mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-02 09:12:03 +00:00 
			
		
		
		
	* scripts : add lib.sh and lib_test.sh * scripts : stub out new ci-run.sh script * scripts : switch to PascalCase for functions This looks a little odd at first, but I find it very useful as a convention to know if a command is part of our code vs a builtin. * scripts : add some fancy conversion from snake_case to PascalCase * Add venv to ci/run.sh * Revert scripts work * scripts : add wrapper script for local use of ci/run.sh * Simplify .gitignore for tests, clang-tidy fixes * Label all ctest tests * ci : ctest uses -L main * Attempt at writing ctest_with_model * Update test-model-load-cancel * ci : add ctest_with_model for debug and release ggml-ci * Fix gg_get_model function ggml-ci * got stuck on CMake * Add get_model.cpp to tests/CMakeLists.txt ggml-ci * Fix README.md output for ctest_with_model ggml-ci * workflows : use `-L main` for all ctest ggml-ci * Fixes * GG_RUN_CTEST_MODELFILE => LLAMACPP_TESTMODELFILE * Always show warning rather than failing if model file variable is not set * scripts : update usage text for ci-run.sh
		
			
				
	
	
		
			51 lines
		
	
	
		
			1.3 KiB
		
	
	
	
		
			Bash
		
	
	
		
			Executable File
		
	
	
	
	
			
		
		
	
	
			51 lines
		
	
	
		
			1.3 KiB
		
	
	
	
		
			Bash
		
	
	
		
			Executable File
		
	
	
	
	
#!/bin/bash
 | 
						|
set -euo pipefail
 | 
						|
this=$(realpath "$0"); readonly this
 | 
						|
cd "$(dirname "$this")"
 | 
						|
shellcheck "$this"
 | 
						|
 | 
						|
if (( $# != 1 && $# != 2  )); then
 | 
						|
    cat >&2 <<'EOF'
 | 
						|
usage:
 | 
						|
    ci-run.sh <tmp_dir> [<cache_dir>]
 | 
						|
 | 
						|
This script wraps ci/run.sh:
 | 
						|
* If <tmp_dir> is a ramdisk, you can reduce writes to your SSD. If <tmp_dir> is not a ramdisk, keep in mind that total writes will increase by the size of <cache_dir>.
 | 
						|
    (openllama_3b_v2: quantized models are about 30GB)
 | 
						|
* Persistent model and data files are synced to and from <cache_dir>,
 | 
						|
    excluding generated .gguf files.
 | 
						|
    (openllama_3b_v2: persistent files are about 6.6GB)
 | 
						|
* <cache_dir> defaults to  ~/.cache/llama.cpp
 | 
						|
EOF
 | 
						|
    exit 1
 | 
						|
fi
 | 
						|
 | 
						|
cd .. # => llama.cpp repo root
 | 
						|
 | 
						|
tmp="$1"
 | 
						|
mkdir -p "$tmp"
 | 
						|
tmp=$(realpath "$tmp")
 | 
						|
echo >&2 "Using tmp=$tmp"
 | 
						|
 | 
						|
cache="${2-$HOME/.cache/llama.cpp}"
 | 
						|
mkdir -p "$cache"
 | 
						|
cache=$(realpath "$cache")
 | 
						|
echo >&2 "Using cache=$cache"
 | 
						|
 | 
						|
_sync() {
 | 
						|
    local from="$1"; shift
 | 
						|
    local to="$1"; shift
 | 
						|
 | 
						|
    echo >&2 "Syncing from $from to $to"
 | 
						|
    mkdir -p "$from" "$to"
 | 
						|
    rsync -a "$from" "$to" --delete-during "$@"
 | 
						|
}
 | 
						|
 | 
						|
_sync "$(realpath .)/" "$tmp/llama.cpp"
 | 
						|
_sync "$cache/ci-mnt/models/" "$tmp/llama.cpp/ci-mnt/models/"
 | 
						|
 | 
						|
cd "$tmp/llama.cpp"
 | 
						|
bash ci/run.sh ci-out ci-mnt
 | 
						|
 | 
						|
_sync 'ci-mnt/models/' "$cache/ci-mnt/models/" --exclude='*.gguf' -P
 |