ci : disable AMD workflows + update NVIDIA workflows (#16200)

* ci : disable AMD workflows + update NVIDIA workflows

* cont : fixes

* cont : update nvidia vulkan workflows
This commit is contained in:
Georgi Gerganov
2025-09-23 20:41:40 +03:00
committed by GitHub
parent 0889589dbe
commit f505bd83ca

View File

@@ -1302,8 +1302,8 @@ jobs:
run: | run: |
GG_BUILD_NO_BF16=1 GG_BUILD_EXTRA_TESTS_0=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp GG_BUILD_NO_BF16=1 GG_BUILD_EXTRA_TESTS_0=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
ggml-ci-x64-nvidia-v100-cuda: ggml-ci-x64-nvidia-cuda:
runs-on: [self-hosted, Linux, X64, NVIDIA, V100] runs-on: [self-hosted, Linux, X64, NVIDIA]
steps: steps:
- name: Clone - name: Clone
@@ -1316,8 +1316,8 @@ jobs:
nvidia-smi nvidia-smi
GG_BUILD_CUDA=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp GG_BUILD_CUDA=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
ggml-ci-x64-nvidia-v100-vulkan: ggml-ci-x64-nvidia-vulkan-cm:
runs-on: [self-hosted, Linux, X64, NVIDIA, V100] runs-on: [self-hosted, Linux, X64, NVIDIA]
steps: steps:
- name: Clone - name: Clone
@@ -1327,51 +1327,23 @@ jobs:
- name: Test - name: Test
id: ggml-ci id: ggml-ci
run: | run: |
vulkaninfo vulkaninfo --summary
GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
ggml-ci-x64-nvidia-t4-cuda:
runs-on: [self-hosted, Linux, X64, NVIDIA, T4]
steps:
- name: Clone
id: checkout
uses: actions/checkout@v4
- name: Test
id: ggml-ci
run: |
nvidia-smi
GG_BUILD_CUDA=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
ggml-ci-x64-nvidia-t4-vulkan:
runs-on: [self-hosted, Linux, X64, NVIDIA, T4]
steps:
- name: Clone
id: checkout
uses: actions/checkout@v4
- name: Test
id: ggml-ci
run: |
vulkaninfo
GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
ggml-ci-x64-nvidia-t4-vulkan-coopmat1:
runs-on: [self-hosted, Linux, X64, NVIDIA, T4]
steps:
- name: Clone
id: checkout
uses: actions/checkout@v4
- name: Test
id: ggml-ci
run: |
vulkaninfo
GG_BUILD_VULKAN=1 GGML_VK_DISABLE_COOPMAT2=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp GG_BUILD_VULKAN=1 GGML_VK_DISABLE_COOPMAT2=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
ggml-ci-x64-nvidia-vulkan-cm2:
runs-on: [self-hosted, Linux, X64, NVIDIA, COOPMAT2]
steps:
- name: Clone
id: checkout
uses: actions/checkout@v4
- name: Test
id: ggml-ci
run: |
vulkaninfo --summary
GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
ggml-ci-x64-cpu-amx: ggml-ci-x64-cpu-amx:
runs-on: [self-hosted, Linux, X64, CPU, AMX] runs-on: [self-hosted, Linux, X64, CPU, AMX]
@@ -1385,31 +1357,33 @@ jobs:
run: | run: |
bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
ggml-ci-x64-amd-v710-vulkan: # ggml-ci-x64-amd-vulkan:
runs-on: [self-hosted, Linux, X64, AMD, V710] # runs-on: [self-hosted, Linux, X64, AMD]
#
steps: # steps:
- name: Clone # - name: Clone
id: checkout # id: checkout
uses: actions/checkout@v4 # uses: actions/checkout@v4
#
- name: Test # - name: Test
id: ggml-ci # id: ggml-ci
run: | # run: |
GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp # vulkaninfo --summary
# GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
ggml-ci-x64-amd-v710-rocm: #
runs-on: [self-hosted, Linux, X64, AMD, V710] # ggml-ci-x64-amd-rocm:
# runs-on: [self-hosted, Linux, X64, AMD]
steps: #
- name: Clone # steps:
id: checkout # - name: Clone
uses: actions/checkout@v4 # id: checkout
# uses: actions/checkout@v4
- name: Test #
id: ggml-ci # - name: Test
run: | # id: ggml-ci
GG_BUILD_ROCM=1 GG_BUILD_AMDGPU_TARGETS="gfx1101" bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp # run: |
# amd-smi static
# GG_BUILD_ROCM=1 GG_BUILD_AMDGPU_TARGETS="gfx1101" bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
ggml-ci-mac-metal: ggml-ci-mac-metal:
runs-on: [self-hosted, macOS, ARM64] runs-on: [self-hosted, macOS, ARM64]
@@ -1435,4 +1409,5 @@ jobs:
- name: Test - name: Test
id: ggml-ci id: ggml-ci
run: | run: |
vulkaninfo --summary
GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp ~/mnt/llama.cpp GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp ~/mnt/llama.cpp