mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	server : add --numa support (#2524)
This commit is contained in:
		| @@ -16,6 +16,7 @@ Command line options: | |||||||
| -   `--memory-f32`: Use 32-bit floats instead of 16-bit floats for memory key+value. Not recommended. | -   `--memory-f32`: Use 32-bit floats instead of 16-bit floats for memory key+value. Not recommended. | ||||||
| -   `--mlock`: Lock the model in memory, preventing it from being swapped out when memory-mapped. | -   `--mlock`: Lock the model in memory, preventing it from being swapped out when memory-mapped. | ||||||
| -   `--no-mmap`: Do not memory-map the model. By default, models are mapped into memory, which allows the system to load only the necessary parts of the model as needed. | -   `--no-mmap`: Do not memory-map the model. By default, models are mapped into memory, which allows the system to load only the necessary parts of the model as needed. | ||||||
|  | -   `--numa`: Attempt optimizations that help on some NUMA systems. | ||||||
| -   `--lora FNAME`: Apply a LoRA (Low-Rank Adaptation) adapter to the model (implies --no-mmap). This allows you to adapt the pretrained model to specific tasks or domains. | -   `--lora FNAME`: Apply a LoRA (Low-Rank Adaptation) adapter to the model (implies --no-mmap). This allows you to adapt the pretrained model to specific tasks or domains. | ||||||
| -   `--lora-base FNAME`: Optional model to use as a base for the layers modified by the LoRA adapter. This flag is used in conjunction with the `--lora` flag, and specifies the base model for the adaptation. | -   `--lora-base FNAME`: Optional model to use as a base for the layers modified by the LoRA adapter. This flag is used in conjunction with the `--lora` flag, and specifies the base model for the adaptation. | ||||||
| -   `-to N`, `--timeout N`: Server read/write timeout in seconds. Default `600`. | -   `-to N`, `--timeout N`: Server read/write timeout in seconds. Default `600`. | ||||||
|   | |||||||
| @@ -666,6 +666,7 @@ static void server_print_usage(const char *argv0, const gpt_params ¶ms, | |||||||
|     { |     { | ||||||
|         fprintf(stdout, "  --no-mmap             do not memory-map model (slower load but may reduce pageouts if not using mlock)\n"); |         fprintf(stdout, "  --no-mmap             do not memory-map model (slower load but may reduce pageouts if not using mlock)\n"); | ||||||
|     } |     } | ||||||
|  |     fprintf(stdout, "  --numa                attempt optimizations that help on some NUMA systems\n"); | ||||||
| #ifdef LLAMA_SUPPORTS_GPU_OFFLOAD | #ifdef LLAMA_SUPPORTS_GPU_OFFLOAD | ||||||
|     fprintf(stdout, "  -ngl N, --n-gpu-layers N\n"); |     fprintf(stdout, "  -ngl N, --n-gpu-layers N\n"); | ||||||
|     fprintf(stdout, "                        number of layers to store in VRAM\n"); |     fprintf(stdout, "                        number of layers to store in VRAM\n"); | ||||||
| @@ -940,6 +941,10 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, | |||||||
|         { |         { | ||||||
|             params.use_mmap = false; |             params.use_mmap = false; | ||||||
|         } |         } | ||||||
|  |         else if (arg == "--numa") | ||||||
|  |         { | ||||||
|  |             params.numa = true; | ||||||
|  |         } | ||||||
|         else if (arg == "--embedding") |         else if (arg == "--embedding") | ||||||
|         { |         { | ||||||
|             params.embedding = true; |             params.embedding = true; | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user
	 Cheng Shao
					Cheng Shao