mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	set context default to avoid memory issue, update guide
This commit is contained in:
		@@ -11,16 +11,17 @@ source /opt/intel/oneapi/setvars.sh
 | 
			
		||||
#ZES_ENABLE_SYSMAN=1, Support to get free memory of GPU by sycl::aspect::ext_intel_free_memory. Recommended to use when --split-mode = layer.
 | 
			
		||||
 | 
			
		||||
INPUT_PROMPT="Building a website can be done in 10 simple steps:\nStep 1:"
 | 
			
		||||
MODEL_FILE=llama-2-7b.Q4_0.gguf
 | 
			
		||||
MODEL_FILE=models/llama-2-7b.Q4_0.gguf
 | 
			
		||||
NGL=33
 | 
			
		||||
CONEXT=8192
 | 
			
		||||
 | 
			
		||||
if [ $# -gt 0 ]; then
 | 
			
		||||
    GGML_SYCL_DEVICE=$1
 | 
			
		||||
    echo "use $GGML_SYCL_DEVICE as main GPU"
 | 
			
		||||
    #use signle GPU only
 | 
			
		||||
    ZES_ENABLE_SYSMAN=1 ./build/bin/llama-cli -m models/${MODEL_FILE} -p "${INPUT_PROMPT}" -n 400 -e -ngl ${NGL} -s 0 -mg $GGML_SYCL_DEVICE -sm none
 | 
			
		||||
    ZES_ENABLE_SYSMAN=1 ./build/bin/llama-cli -m ${MODEL_FILE} -p "${INPUT_PROMPT}" -n 400 -e -ngl ${NGL} -s 0 -c ${CONEXT} -mg $GGML_SYCL_DEVICE -sm none
 | 
			
		||||
 | 
			
		||||
else
 | 
			
		||||
    #use multiple GPUs with same max compute units
 | 
			
		||||
    ZES_ENABLE_SYSMAN=1 ./build/bin/llama-cli -m models/${MODEL_FILE} -p "${INPUT_PROMPT}" -n 400 -e -ngl ${NGL} -s 0
 | 
			
		||||
    ZES_ENABLE_SYSMAN=1 ./build/bin/llama-cli -m ${MODEL_FILE} -p "${INPUT_PROMPT}" -n 400 -e -ngl ${NGL} -s 0 -c ${CONEXT}
 | 
			
		||||
fi
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user