mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	llama : improve llama_batch API + simplify parallel example
This commit is contained in:
		| @@ -10,10 +10,12 @@ int main(int argc, char ** argv) { | ||||
|     gpt_params params; | ||||
|  | ||||
|     if (argc == 1 || argv[1][0] == '-') { | ||||
|         printf("usage: %s MODEL_PATH [PROMPT]\n" , argv[0]); | ||||
|         printf("usage: %s MODEL_PATH [PROMPT] [PARALLEL]\n" , argv[0]); | ||||
|         return 1 ; | ||||
|     } | ||||
|  | ||||
|     int n_parallel = 1; | ||||
|  | ||||
|     if (argc >= 2) { | ||||
|         params.model = argv[1]; | ||||
|     } | ||||
| @@ -22,6 +24,10 @@ int main(int argc, char ** argv) { | ||||
|         params.prompt = argv[2]; | ||||
|     } | ||||
|  | ||||
|     if (argc >= 4) { | ||||
|         n_parallel = std::atoi(argv[3]); | ||||
|     } | ||||
|  | ||||
|     if (params.prompt.empty()) { | ||||
|         params.prompt = "Hello my name is"; | ||||
|     } | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Georgi Gerganov
					Georgi Gerganov