mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-29 08:41:22 +00:00 
			
		
		
		
	llama : add custom RoPE (#2054)
* Implement customizable RoPE The original RoPE has pre-defined parameters theta_i = 10000^(−2(i−1)/d), for i in [1, 2, ..., d/2] Our customizable RoPE, ggml_rope_custom_inplace, uses theta_i = scale * base^(−2(i−1)/d), for i in [1, 2, ..., d/2] with the default matches the original scale = 1.0 base = 10000 The new command line arguments --rope-freq-base --rope-freq-scale set the two new RoPE parameter. Recent researches show changing these two parameters extends the context limit with minimal loss. 1. Extending Context to 8K kaiokendev https://kaiokendev.github.io/til#extending-context-to-8k 2. Extending Context Window of Large Language Models via Positional Interpolation Shouyuan Chen, Sherman Wong, Liangjian Chen, Yuandong Tian https://arxiv.org/abs/2306.15595 3. NTK-Aware Scaled RoPE allows LLaMA models to have extended (8k+) context size without any fine-tuning and minimal perplexity degradation. https://www.reddit.com/user/bloc97 https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/ For the bold, try adding the following command line parameters to your favorite model: -c 16384 --rope-freq-base 80000 --rope-freq-scale 0.5 * ggml-metal: fix custom rope * common: fix argument names in help * llama: increase MEM_REQ_EVAL for MODEL_3B It avoids crashing for quantized weights on CPU. Better ways to calculate the required buffer size would be better. * llama: make MEM_REQ_EVAL depend on n_ctx * server: use proper Content-Type in curl examples Without the header Content-Type: application/json, curl will POST with Content-Type: application/x-www-form-urlencoded Though our simple server doesn't care, the httplib.h used has a limit with CPPHTTPLIB_FORM_URL_ENCODED_PAYLOAD_MAX_LENGTH 8192 With Content-Type: application/json, we can send large json data. * style : minor fixes, mostly indentations * ggml : fix asserts --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
This commit is contained in:
		| @@ -66,6 +66,7 @@ Using [curl](https://curl.se/). On Windows `curl.exe` should be available in the | ||||
| ```sh | ||||
| curl --request POST \ | ||||
|     --url http://localhost:8080/completion \ | ||||
|     --header "Content-Type: application/json" \ | ||||
|     --data '{"prompt": "Building a website can be done in 10 simple steps:","n_predict": 128}' | ||||
| ``` | ||||
|  | ||||
|   | ||||
| @@ -32,6 +32,7 @@ tokenize() { | ||||
|         --silent \ | ||||
|         --request POST \ | ||||
|         --url "${API_URL}/tokenize" \ | ||||
|         --header "Content-Type: application/json" \ | ||||
|         --data-raw "$(jq -ns --arg content "$1" '{content:$content}')" \ | ||||
|     | jq '.tokens[]' | ||||
| } | ||||
| @@ -64,6 +65,7 @@ chat_completion() { | ||||
|         --no-buffer \ | ||||
|         --request POST \ | ||||
|         --url "${API_URL}/completion" \ | ||||
|         --header "Content-Type: application/json" \ | ||||
|         --data-raw "${DATA}") | ||||
|  | ||||
|     printf "\n" | ||||
|   | ||||
| @@ -608,6 +608,8 @@ static void server_print_usage(const char *argv0, const gpt_params ¶ms, | ||||
|     fprintf(stderr, "  -v, --verbose         verbose output (default: %s)\n", server_verbose ? "enabled" : "disabled"); | ||||
|     fprintf(stderr, "  -t N, --threads N     number of threads to use during computation (default: %d)\n", params.n_threads); | ||||
|     fprintf(stderr, "  -c N, --ctx-size N    size of the prompt context (default: %d)\n", params.n_ctx); | ||||
|     fprintf(stderr, "  --rope-freq-base N    RoPE base frequency (default: %.1f)\n", params.rope_freq_base); | ||||
|     fprintf(stderr, "  --rope-freq-scale N   RoPE frequency scaling factor (default: %g)\n", params.rope_freq_scale); | ||||
|     fprintf(stderr, "  -b N, --batch-size N  batch size for prompt processing (default: %d)\n", params.n_batch); | ||||
|     fprintf(stderr, "  --memory-f32          use f32 instead of f16 for memory key+value (default: disabled)\n"); | ||||
|     fprintf(stderr, "                        not recommended: doubles context memory required and no measurable increase in quality\n"); | ||||
| @@ -722,6 +724,22 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, | ||||
|             } | ||||
|             params.n_ctx = std::stoi(argv[i]); | ||||
|         } | ||||
|         else if (arg == "--rope-freq-base") | ||||
|         { | ||||
|             if (++i >= argc) { | ||||
|                 invalid_param = true; | ||||
|                 break; | ||||
|             } | ||||
|             params.rope_freq_base = std::stof(argv[i]); | ||||
|         } | ||||
|         else if (arg == "--rope-freq-scale") | ||||
|         { | ||||
|             if (++i >= argc) { | ||||
|                 invalid_param = true; | ||||
|                 break; | ||||
|             } | ||||
|             params.rope_freq_scale = std::stof(argv[i]); | ||||
|         } | ||||
|         else if (arg == "--memory-f32" || arg == "--memory_f32") | ||||
|         { | ||||
|             params.memory_f16 = false; | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Xiao-Yong Jin
					Xiao-Yong Jin