mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-11-01 09:01:57 +00:00
ggml : automatic selection of best CPU backend (#10606)
* ggml : automatic selection of best CPU backend * amx : minor opt * add GGML_AVX_VNNI to enable avx-vnni, fix checks
This commit is contained in:
12
scripts/build-cpu.sh
Executable file
12
scripts/build-cpu.sh
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
name="$1"
|
||||
args="${@:2}"
|
||||
|
||||
echo "Building $name with args: $args"
|
||||
|
||||
rm -fr build-cpu-$1
|
||||
cmake -S . -B build-cpu-$1 -DGGML_BACKEND_DL=ON -DGGML_NATIVE=OFF $args
|
||||
cmake --build build-cpu-$1 --config Release -t ggml-cpu -j $(nproc)
|
||||
cp build-cpu-$1/bin/libggml-cpu.so ./libggml-cpu-$1.so
|
||||
rm -fr build-cpu-$1
|
||||
Reference in New Issue
Block a user