mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	common : Update the docs on -t --threads (#16236)
* Update the docs on -t --threads
* Revert "Update the docs on -t --threads"
This reverts commit eba97345e2.
* docs: clarify -t/--threads parameter uses CPU threads and defaults to all available cores
* Update arg.cpp
			
			
This commit is contained in:
		| @@ -1760,7 +1760,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex | |||||||
|     ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_LOOKUP})); |     ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_LOOKUP})); | ||||||
|     add_opt(common_arg( |     add_opt(common_arg( | ||||||
|         {"-t", "--threads"}, "N", |         {"-t", "--threads"}, "N", | ||||||
|         string_format("number of threads to use during generation (default: %d)", params.cpuparams.n_threads), |         string_format("number of CPU threads to use during generation (default: %d)", params.cpuparams.n_threads), | ||||||
|         [](common_params & params, int value) { |         [](common_params & params, int value) { | ||||||
|             params.cpuparams.n_threads = value; |             params.cpuparams.n_threads = value; | ||||||
|             if (params.cpuparams.n_threads <= 0) { |             if (params.cpuparams.n_threads <= 0) { | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user