mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-10-27 08:21:30 +00:00
* ci : add AMD runners and workflows * ci : move AMD jobs to separate workflow * cont : fix paths
53 lines
1.1 KiB
YAML
53 lines
1.1 KiB
YAML
name: CI (AMD)
|
|
|
|
on:
|
|
workflow_dispatch: # allows manual triggering
|
|
push:
|
|
branches:
|
|
- master
|
|
paths: [
|
|
'.github/workflows/build-amd.yml',
|
|
'**/CMakeLists.txt',
|
|
'**/.cmake',
|
|
'**/*.h',
|
|
'**/*.hpp',
|
|
'**/*.c',
|
|
'**/*.cpp',
|
|
'**/*.cu',
|
|
'**/*.cuh',
|
|
'**/*.comp'
|
|
]
|
|
|
|
concurrency:
|
|
group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }}
|
|
cancel-in-progress: true
|
|
|
|
jobs:
|
|
ggml-ci-x64-amd-vulkan:
|
|
runs-on: [self-hosted, Linux, X64, AMD]
|
|
|
|
steps:
|
|
- name: Clone
|
|
id: checkout
|
|
uses: actions/checkout@v4
|
|
|
|
- name: Test
|
|
id: ggml-ci
|
|
run: |
|
|
vulkaninfo --summary
|
|
GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
|
|
|
|
ggml-ci-x64-amd-rocm:
|
|
runs-on: [self-hosted, Linux, X64, AMD]
|
|
|
|
steps:
|
|
- name: Clone
|
|
id: checkout
|
|
uses: actions/checkout@v4
|
|
|
|
- name: Test
|
|
id: ggml-ci
|
|
run: |
|
|
amd-smi static
|
|
GG_BUILD_ROCM=1 GG_BUILD_AMDGPU_TARGETS="gfx1101" bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
|