mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	common : refactor arg parser (#9308)
* (wip) argparser v3 * migrated * add test * handle env * fix linux build * add export-docs example * fix build (2) * skip build test-arg-parser on windows * update server docs * bring back missing --alias * bring back --n-predict * clarify test-arg-parser * small correction * add comments * fix args with 2 values * refine example-specific args * no more lamba capture Co-authored-by: slaren@users.noreply.github.com * params.sparams * optimize more * export-docs --> gen-docs
This commit is contained in:
		| @@ -17,9 +17,7 @@ | ||||
| #pragma warning(disable: 4244 4267) // possible loss of data | ||||
| #endif | ||||
|  | ||||
| static void print_usage(int argc, char ** argv, const gpt_params & params) { | ||||
|     gpt_params_print_usage(argc, argv, params); | ||||
|  | ||||
| static void print_usage(int, char ** argv) { | ||||
|     LOG_TEE("\nexample usage:\n"); | ||||
|     LOG_TEE("\n    %s \\\n" | ||||
|             "       -m model.gguf -f some-text.txt [-o imatrix.dat] [--process-output] [--verbosity 1] \\\n" | ||||
| @@ -579,8 +577,8 @@ int main(int argc, char ** argv) { | ||||
|     params.logits_all = true; | ||||
|     params.verbosity = 1; | ||||
|  | ||||
|     if (!gpt_params_parse(argc, argv, params)) { | ||||
|         print_usage(argc, argv, params); | ||||
|     auto options = gpt_params_parser_init(params, LLAMA_EXAMPLE_COMMON, print_usage); | ||||
|     if (!gpt_params_parse(argc, argv, params, options)) { | ||||
|         return 1; | ||||
|     } | ||||
|  | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Xuan Son Nguyen
					Xuan Son Nguyen