mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-10-28 08:31:25 +00:00
ci : add AMD runners and workflows (#16249)
* ci : add AMD runners and workflows * ci : move AMD jobs to separate workflow * cont : fix paths
This commit is contained in:
52
.github/workflows/build-amd.yml
vendored
Normal file
52
.github/workflows/build-amd.yml
vendored
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
name: CI (AMD)
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch: # allows manual triggering
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
paths: [
|
||||||
|
'.github/workflows/build-amd.yml',
|
||||||
|
'**/CMakeLists.txt',
|
||||||
|
'**/.cmake',
|
||||||
|
'**/*.h',
|
||||||
|
'**/*.hpp',
|
||||||
|
'**/*.c',
|
||||||
|
'**/*.cpp',
|
||||||
|
'**/*.cu',
|
||||||
|
'**/*.cuh',
|
||||||
|
'**/*.comp'
|
||||||
|
]
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
ggml-ci-x64-amd-vulkan:
|
||||||
|
runs-on: [self-hosted, Linux, X64, AMD]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
id: checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Test
|
||||||
|
id: ggml-ci
|
||||||
|
run: |
|
||||||
|
vulkaninfo --summary
|
||||||
|
GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
|
||||||
|
|
||||||
|
ggml-ci-x64-amd-rocm:
|
||||||
|
runs-on: [self-hosted, Linux, X64, AMD]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
id: checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Test
|
||||||
|
id: ggml-ci
|
||||||
|
run: |
|
||||||
|
amd-smi static
|
||||||
|
GG_BUILD_ROCM=1 GG_BUILD_AMDGPU_TARGETS="gfx1101" bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
|
||||||
28
.github/workflows/build.yml
vendored
28
.github/workflows/build.yml
vendored
@@ -1461,34 +1461,6 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
|
bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
|
||||||
|
|
||||||
# ggml-ci-x64-amd-vulkan:
|
|
||||||
# runs-on: [self-hosted, Linux, X64, AMD]
|
|
||||||
#
|
|
||||||
# steps:
|
|
||||||
# - name: Clone
|
|
||||||
# id: checkout
|
|
||||||
# uses: actions/checkout@v4
|
|
||||||
#
|
|
||||||
# - name: Test
|
|
||||||
# id: ggml-ci
|
|
||||||
# run: |
|
|
||||||
# vulkaninfo --summary
|
|
||||||
# GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
|
|
||||||
#
|
|
||||||
# ggml-ci-x64-amd-rocm:
|
|
||||||
# runs-on: [self-hosted, Linux, X64, AMD]
|
|
||||||
#
|
|
||||||
# steps:
|
|
||||||
# - name: Clone
|
|
||||||
# id: checkout
|
|
||||||
# uses: actions/checkout@v4
|
|
||||||
#
|
|
||||||
# - name: Test
|
|
||||||
# id: ggml-ci
|
|
||||||
# run: |
|
|
||||||
# amd-smi static
|
|
||||||
# GG_BUILD_ROCM=1 GG_BUILD_AMDGPU_TARGETS="gfx1101" bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
|
|
||||||
|
|
||||||
ggml-ci-mac-metal:
|
ggml-ci-mac-metal:
|
||||||
runs-on: [self-hosted, macOS, ARM64]
|
runs-on: [self-hosted, macOS, ARM64]
|
||||||
|
|
||||||
|
|||||||
@@ -114,6 +114,7 @@ if [ ! -z ${GG_BUILD_NO_SVE} ]; then
|
|||||||
# arm 9 and newer enables sve by default, adjust these flags depending on the cpu used
|
# arm 9 and newer enables sve by default, adjust these flags depending on the cpu used
|
||||||
CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_NATIVE=OFF -DGGML_CPU_ARM_ARCH=armv8.5-a+fp16+i8mm"
|
CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_NATIVE=OFF -DGGML_CPU_ARM_ARCH=armv8.5-a+fp16+i8mm"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
## helpers
|
## helpers
|
||||||
|
|
||||||
# download a file if it does not exist or if it is outdated
|
# download a file if it does not exist or if it is outdated
|
||||||
|
|||||||
Reference in New Issue
Block a user