mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-10-27 08:21:30 +00:00
ci : remove vulkaninfo calls (#16169)
This commit is contained in:
2
.github/workflows/build.yml
vendored
2
.github/workflows/build.yml
vendored
@@ -1396,7 +1396,6 @@ jobs:
|
|||||||
- name: Test
|
- name: Test
|
||||||
id: ggml-ci
|
id: ggml-ci
|
||||||
run: |
|
run: |
|
||||||
vulkaninfo
|
|
||||||
GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
|
GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
|
||||||
|
|
||||||
ggml-ci-x64-amd-v710-rocm:
|
ggml-ci-x64-amd-v710-rocm:
|
||||||
@@ -1410,7 +1409,6 @@ jobs:
|
|||||||
- name: Test
|
- name: Test
|
||||||
id: ggml-ci
|
id: ggml-ci
|
||||||
run: |
|
run: |
|
||||||
vulkaninfo
|
|
||||||
GG_BUILD_ROCM=1 GG_BUILD_AMDGPU_TARGETS="gfx1101" bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
|
GG_BUILD_ROCM=1 GG_BUILD_AMDGPU_TARGETS="gfx1101" bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
|
||||||
|
|
||||||
ggml-ci-mac-metal:
|
ggml-ci-mac-metal:
|
||||||
|
|||||||
Reference in New Issue
Block a user