ci: Add Windows Vulkan backend testing on Intel (#21292)
* experimenting CI * Experimenting CI fix for MinGW * experimenting CI on Windows * modified script for integration with VisualStudio * added proxy handling * adding python version for Windows execution * fix iterator::end() dereference * fixed proxy handling * Fix errors occurring on Windows * fixed ci script * Reverted to master * Stripping test items to simplify Windows test * adjusting script for windows testing * Changed shell * Fixed shell * Fixed shell * Fix CI setting * Fix CI setting * Fix CI setting * Experimenting ci fix * Experimenting ci fix * Experimenting ci fix * Experimenting ci fix * experimenting fix for unit test error * Changed to use BUILD_LOW_PERF to skip python tests * Fix CI * Added option to specify Ninja generator * Reverted proxy related changes
This commit is contained in:
parent
50e0ad08fb
commit
e439700992
2 changed files with 26 additions and 0 deletions
21
.github/workflows/build-self-hosted.yml
vendored
21
.github/workflows/build-self-hosted.yml
vendored
|
|
@ -213,6 +213,27 @@ jobs:
|
||||||
vulkaninfo --summary
|
vulkaninfo --summary
|
||||||
GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp ~/mnt/llama.cpp
|
GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp ~/mnt/llama.cpp
|
||||||
|
|
||||||
|
ggml-ci-win-intel-vulkan:
|
||||||
|
runs-on: [self-hosted, Windows, X64, Intel]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
id: checkout
|
||||||
|
uses: actions/checkout@v6
|
||||||
|
|
||||||
|
- name: Test
|
||||||
|
id: ggml-ci
|
||||||
|
shell: C:\msys64\usr\bin\bash.exe --noprofile --norc -eo pipefail "{0}"
|
||||||
|
env:
|
||||||
|
MSYSTEM: UCRT64
|
||||||
|
CHERE_INVOKING: 1
|
||||||
|
PATH: C:\msys64\ucrt64\bin;C:\msys64\usr\bin;C:\Windows\System32;${{ env.PATH }}
|
||||||
|
run: |
|
||||||
|
vulkaninfo --summary
|
||||||
|
# Skip python related tests with GG_BUILD_LOW_PERF=1 since Windows MSYS2 UCRT64 currently fails to create
|
||||||
|
# a valid python environment for testing
|
||||||
|
LLAMA_FATAL_WARNINGS=OFF GG_BUILD_NINJA=1 GG_BUILD_VULKAN=1 GG_BUILD_LOW_PERF=1 ./ci/run.sh ./results/llama.cpp ./mnt/llama.cpp
|
||||||
|
|
||||||
ggml-ci-intel-openvino-gpu-low-perf:
|
ggml-ci-intel-openvino-gpu-low-perf:
|
||||||
runs-on: [self-hosted, Linux, Intel, OpenVINO]
|
runs-on: [self-hosted, Linux, Intel, OpenVINO]
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -119,6 +119,11 @@ if [ ! -z ${GG_BUILD_VULKAN} ]; then
|
||||||
CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_METAL=OFF -DGGML_BLAS=OFF"
|
CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_METAL=OFF -DGGML_BLAS=OFF"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Build shared libs on Windows
|
||||||
|
# to reduce binary size and avoid errors in library loading unit tests
|
||||||
|
if uname -s | grep -qi nt; then
|
||||||
|
CMAKE_EXTRA="${CMAKE_EXTRA} -DBUILD_SHARED_LIBS=ON"
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ! -z ${GG_BUILD_WEBGPU} ]; then
|
if [ ! -z ${GG_BUILD_WEBGPU} ]; then
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue