mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	ggml : add numa options (#5377)
* Added numa options to allow finer grained control as well as plumbing for a new mirror mode that will require numa.h * Reverted Makefile * Fixed include * Removed sched.h from ggml.h, moved ggml_get_numa_affinity into ggml.c, removed trailing whitespace and fixed up a few inconsistent variables * removed trailing whitespace * Added numa options to allow finer grained control as well as plumbing for a new mirror mode that will require numa.h * Reverting Makefile * Fixed a number of issues with the move from BOOL to ggml_numa_strategies. Added a note about mirror mode note being implemented yet * Removing MIRROR_MODE code for this PR * Removing last bit of MIRROR_MODE code for this PR * Removing unneeded branch in server.cpp example and moving get_numa_affinity and making it static * Fixed lingering init_llama_backend() bool calls in tests and examples * Remote enum llama_numa_strategies * Revert bad merge with dynatemp flags * add missing enum ggml_numa_strategies declaration and revert sync problem with master * add missing enum ggml_numa_strategies declaration * fixed ggml_init_numa variable * Update ggml.h Co-authored-by: Jared Van Bortel <cebtenzzre@gmail.com> * Update READMEs with info about numa flags, change INTERLEAVE strategy name to DISTRIBUTE everywhere, implement the improved distribution strategy from @rankaiyx, fix a spelling mistake and un-merge some bad merges * split numa init out from llama_backend_init and created llama_numa_init. Updated all code paths and samples * Fix up some boolean vs enum comparisons * Added #ifdefs for non-Linux OS that don't have cpu_set_t datatype * Update ggml.h Align enum values Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> * Update ggml.c Remove whitespace Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> * Update ggml.c align paremeters Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> * Update examples/server/server.cpp remove whitespace and align brace Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> * Update common/common.cpp Remove whitespace and align brace Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> * unified ggml_numa_strategy enum and fixed text alignment in server.cpp example * Update ggml.c simplified return for platforms without NUMA support Co-authored-by: Jared Van Bortel <cebtenzzre@gmail.com> * removed redundant else from cli argument processing of --numa * whitespace --------- Co-authored-by: root <root@nenya.lothlorien.ca> Co-authored-by: Jared Van Bortel <cebtenzzre@gmail.com> Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> Co-authored-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
		| @@ -671,7 +671,15 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) { | ||||
|         } else if (arg == "--no-mmap") { | ||||
|             params.use_mmap = false; | ||||
|         } else if (arg == "--numa") { | ||||
|             params.numa = true; | ||||
|             if (++i >= argc) { | ||||
|                 invalid_param = true; | ||||
|                 break; | ||||
|             } | ||||
|             std::string value(argv[i]); | ||||
|             /**/ if (value == "distribute" || value == "") { params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; } | ||||
|             else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; } | ||||
|             else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; } | ||||
|             else { invalid_param = true; break; } | ||||
|         } else if (arg == "--verbose-prompt") { | ||||
|             params.verbose_prompt = true; | ||||
|         } else if (arg == "--no-display-prompt") { | ||||
| @@ -935,7 +943,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { | ||||
|     printf("  -tb N, --threads-batch N\n"); | ||||
|     printf("                        number of threads to use during batch and prompt processing (default: same as --threads)\n"); | ||||
|     printf("  -td N, --threads-draft N"); | ||||
|     printf("                        number of threads to use during generation (default: same as --threads)"); | ||||
|     printf("                        number of threads to use during generation (default: same as --threads)\n"); | ||||
|     printf("  -tbd N, --threads-batch-draft N\n"); | ||||
|     printf("                        number of threads to use during batch and prompt processing (default: same as --threads-draft)\n"); | ||||
|     printf("  -p PROMPT, --prompt PROMPT\n"); | ||||
| @@ -1005,7 +1013,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { | ||||
|     printf("  --winogrande-tasks N  number of tasks to use when computing the Winogrande score (default: %zu)\n", params.winogrande_tasks); | ||||
|     printf("  --multiple-choice     compute multiple choice score over random tasks from datafile supplied with -f\n"); | ||||
|     printf("  --multiple-choice-tasks N number of tasks to use when computing the multiple choice score (default: %zu)\n", params.winogrande_tasks); | ||||
|     printf("  --kl-divergence       computes KL-divergence to logits provided via --kl-divergence-base"); | ||||
|     printf("  --kl-divergence       computes KL-divergence to logits provided via --kl-divergence-base\n"); | ||||
|     printf("  --keep N              number of tokens to keep from the initial prompt (default: %d, -1 = all)\n", params.n_keep); | ||||
|     printf("  --draft N             number of tokens to draft for speculative decoding (default: %d)\n", params.n_draft); | ||||
|     printf("  --chunks N            max number of chunks to process (default: %d, -1 = all)\n", params.n_chunks); | ||||
| @@ -1022,7 +1030,10 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { | ||||
|     if (llama_supports_mmap()) { | ||||
|         printf("  --no-mmap             do not memory-map model (slower load but may reduce pageouts if not using mlock)\n"); | ||||
|     } | ||||
|     printf("  --numa                attempt optimizations that help on some NUMA systems\n"); | ||||
|     printf("  --numa TYPE           attempt optimizations that help on some NUMA systems\n"); | ||||
|     printf("                          - distribute: spread execution evenly over all nodes\n"); | ||||
|     printf("                          - isolate: only spawn threads on CPUs on the node that execution started on\n"); | ||||
|     printf("                          - numactl: use the CPU map provided by numactl\n"); | ||||
|     printf("                        if run without this previously, it is recommended to drop the system page cache before using this\n"); | ||||
|     printf("                        see https://github.com/ggerganov/llama.cpp/issues/1437\n"); | ||||
|     if (llama_supports_gpu_offload()) { | ||||
| @@ -1689,7 +1700,6 @@ void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const l | ||||
|     fprintf(stream, "no_mmap: %s # default: false\n", !params.use_mmap ? "true" : "false"); | ||||
|     fprintf(stream, "no_mul_mat_q: %s # default: false\n", !params.mul_mat_q ? "true" : "false"); | ||||
|     fprintf(stream, "no_penalize_nl: %s # default: false\n", !sparams.penalize_nl ? "true" : "false"); | ||||
|     fprintf(stream, "numa: %s # default: false\n", params.numa ? "true" : "false"); | ||||
|     fprintf(stream, "ppl_output_type: %d # default: 0\n", params.ppl_output_type); | ||||
|     fprintf(stream, "ppl_stride: %d # default: 0\n", params.ppl_stride); | ||||
|     fprintf(stream, "presence_penalty: %f # default: 0.0\n", sparams.penalty_present); | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 bmwl
					bmwl