mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-28 08:31:25 +00:00 
			
		
		
		
	 f486f6e1e5
			
		
	
	f486f6e1e5
	
	
	
		
			
			* Added numa options to allow finer grained control as well as plumbing for a new mirror mode that will require numa.h * Reverted Makefile * Fixed include * Removed sched.h from ggml.h, moved ggml_get_numa_affinity into ggml.c, removed trailing whitespace and fixed up a few inconsistent variables * removed trailing whitespace * Added numa options to allow finer grained control as well as plumbing for a new mirror mode that will require numa.h * Reverting Makefile * Fixed a number of issues with the move from BOOL to ggml_numa_strategies. Added a note about mirror mode note being implemented yet * Removing MIRROR_MODE code for this PR * Removing last bit of MIRROR_MODE code for this PR * Removing unneeded branch in server.cpp example and moving get_numa_affinity and making it static * Fixed lingering init_llama_backend() bool calls in tests and examples * Remote enum llama_numa_strategies * Revert bad merge with dynatemp flags * add missing enum ggml_numa_strategies declaration and revert sync problem with master * add missing enum ggml_numa_strategies declaration * fixed ggml_init_numa variable * Update ggml.h Co-authored-by: Jared Van Bortel <cebtenzzre@gmail.com> * Update READMEs with info about numa flags, change INTERLEAVE strategy name to DISTRIBUTE everywhere, implement the improved distribution strategy from @rankaiyx, fix a spelling mistake and un-merge some bad merges * split numa init out from llama_backend_init and created llama_numa_init. Updated all code paths and samples * Fix up some boolean vs enum comparisons * Added #ifdefs for non-Linux OS that don't have cpu_set_t datatype * Update ggml.h Align enum values Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> * Update ggml.c Remove whitespace Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> * Update ggml.c align paremeters Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> * Update examples/server/server.cpp remove whitespace and align brace Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> * Update common/common.cpp Remove whitespace and align brace Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> * unified ggml_numa_strategy enum and fixed text alignment in server.cpp example * Update ggml.c simplified return for platforms without NUMA support Co-authored-by: Jared Van Bortel <cebtenzzre@gmail.com> * removed redundant else from cli argument processing of --numa * whitespace --------- Co-authored-by: root <root@nenya.lothlorien.ca> Co-authored-by: Jared Van Bortel <cebtenzzre@gmail.com> Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> Co-authored-by: Jared Van Bortel <jared@nomic.ai>
		
			
				
	
	
		
			25 lines
		
	
	
		
			719 B
		
	
	
	
		
			C++
		
	
	
	
	
	
			
		
		
	
	
			25 lines
		
	
	
		
			719 B
		
	
	
	
		
			C++
		
	
	
	
	
	
| // ref: https://github.com/ggerganov/llama.cpp/issues/4952#issuecomment-1892864763
 | |
| 
 | |
| #include <cstdio>
 | |
| #include <string>
 | |
| #include <thread>
 | |
| 
 | |
| #include "llama.h"
 | |
| #include "get-model.h"
 | |
| 
 | |
| // This creates a new context inside a pthread and then tries to exit cleanly.
 | |
| int main(int argc, char ** argv) {
 | |
|     auto * model_path = get_model_or_exit(argc, argv);
 | |
| 
 | |
|     std::thread([&model_path]() {
 | |
|         llama_backend_init();
 | |
|         auto * model = llama_load_model_from_file(model_path, llama_model_default_params());
 | |
|         auto * ctx = llama_new_context_with_model(model, llama_context_default_params());
 | |
|         llama_free(ctx);
 | |
|         llama_free_model(model);
 | |
|         llama_backend_free();
 | |
|     }).join();
 | |
| 
 | |
|     return 0;
 | |
| }
 |