mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	* Implement customizable RoPE The original RoPE has pre-defined parameters theta_i = 10000^(−2(i−1)/d), for i in [1, 2, ..., d/2] Our customizable RoPE, ggml_rope_custom_inplace, uses theta_i = scale * base^(−2(i−1)/d), for i in [1, 2, ..., d/2] with the default matches the original scale = 1.0 base = 10000 The new command line arguments --rope-freq-base --rope-freq-scale set the two new RoPE parameter. Recent researches show changing these two parameters extends the context limit with minimal loss. 1. Extending Context to 8K kaiokendev https://kaiokendev.github.io/til#extending-context-to-8k 2. Extending Context Window of Large Language Models via Positional Interpolation Shouyuan Chen, Sherman Wong, Liangjian Chen, Yuandong Tian https://arxiv.org/abs/2306.15595 3. NTK-Aware Scaled RoPE allows LLaMA models to have extended (8k+) context size without any fine-tuning and minimal perplexity degradation. https://www.reddit.com/user/bloc97 https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/ For the bold, try adding the following command line parameters to your favorite model: -c 16384 --rope-freq-base 80000 --rope-freq-scale 0.5 * ggml-metal: fix custom rope * common: fix argument names in help * llama: increase MEM_REQ_EVAL for MODEL_3B It avoids crashing for quantized weights on CPU. Better ways to calculate the required buffer size would be better. * llama: make MEM_REQ_EVAL depend on n_ctx * server: use proper Content-Type in curl examples Without the header Content-Type: application/json, curl will POST with Content-Type: application/x-www-form-urlencoded Though our simple server doesn't care, the httplib.h used has a limit with CPPHTTPLIB_FORM_URL_ENCODED_PAYLOAD_MAX_LENGTH 8192 With Content-Type: application/json, we can send large json data. * style : minor fixes, mostly indentations * ggml : fix asserts --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
		
			
				
	
	
		
			80 lines
		
	
	
		
			1.9 KiB
		
	
	
	
		
			Bash
		
	
	
	
	
	
			
		
		
	
	
			80 lines
		
	
	
		
			1.9 KiB
		
	
	
	
		
			Bash
		
	
	
	
	
	
#!/bin/bash
 | 
						|
 | 
						|
API_URL="${API_URL:-http://127.0.0.1:8080}"
 | 
						|
 | 
						|
CHAT=(
 | 
						|
    "Hello, Assistant."
 | 
						|
    "Hello. How may I help you today?"
 | 
						|
    "Please tell me the largest city in Europe."
 | 
						|
    "Sure. The largest city in Europe is Moscow, the capital of Russia."
 | 
						|
)
 | 
						|
 | 
						|
INSTRUCTION="A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions."
 | 
						|
 | 
						|
trim() {
 | 
						|
    shopt -s extglob
 | 
						|
    set -- "${1##+([[:space:]])}"
 | 
						|
    printf "%s" "${1%%+([[:space:]])}"
 | 
						|
}
 | 
						|
 | 
						|
trim_trailing() {
 | 
						|
    shopt -s extglob
 | 
						|
    printf "%s" "${1%%+([[:space:]])}"
 | 
						|
}
 | 
						|
 | 
						|
format_prompt() {
 | 
						|
    echo -n "${INSTRUCTION}"
 | 
						|
    printf "\n### Human: %s\n### Assistant: %s" "${CHAT[@]}" "$1"
 | 
						|
}
 | 
						|
 | 
						|
tokenize() {
 | 
						|
    curl \
 | 
						|
        --silent \
 | 
						|
        --request POST \
 | 
						|
        --url "${API_URL}/tokenize" \
 | 
						|
        --header "Content-Type: application/json" \
 | 
						|
        --data-raw "$(jq -ns --arg content "$1" '{content:$content}')" \
 | 
						|
    | jq '.tokens[]'
 | 
						|
}
 | 
						|
 | 
						|
N_KEEP=$(tokenize "${INSTRUCTION}" | wc -l)
 | 
						|
 | 
						|
chat_completion() {
 | 
						|
    PROMPT="$(trim_trailing "$(format_prompt "$1")")"
 | 
						|
    DATA="$(echo -n "$PROMPT" | jq -Rs --argjson n_keep $N_KEEP '{
 | 
						|
        prompt: .,
 | 
						|
        temperature: 0.2,
 | 
						|
        top_k: 40,
 | 
						|
        top_p: 0.9,
 | 
						|
        n_keep: $n_keep,
 | 
						|
        n_predict: 256,
 | 
						|
        stop: ["\n### Human:"],
 | 
						|
        stream: true
 | 
						|
    }')"
 | 
						|
 | 
						|
    ANSWER=''
 | 
						|
 | 
						|
    while IFS= read -r LINE; do
 | 
						|
        if [[ $LINE = data:* ]]; then
 | 
						|
            CONTENT="$(echo "${LINE:5}" | jq -r '.content')"
 | 
						|
            printf "%s" "${CONTENT}"
 | 
						|
            ANSWER+="${CONTENT}"
 | 
						|
        fi
 | 
						|
    done < <(curl \
 | 
						|
        --silent \
 | 
						|
        --no-buffer \
 | 
						|
        --request POST \
 | 
						|
        --url "${API_URL}/completion" \
 | 
						|
        --header "Content-Type: application/json" \
 | 
						|
        --data-raw "${DATA}")
 | 
						|
 | 
						|
    printf "\n"
 | 
						|
 | 
						|
    CHAT+=("$1" "$(trim "$ANSWER")")
 | 
						|
}
 | 
						|
 | 
						|
while true; do
 | 
						|
    read -r -e -p "> " QUESTION
 | 
						|
    chat_completion "${QUESTION}"
 | 
						|
done
 |