mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	common : refactor downloading system, handle mmproj with -hf option (#12694)
* (wip) refactor downloading system [no ci] * fix all examples * fix mmproj with -hf * gemma3: update readme * only handle mmproj in llava example * fix multi-shard download * windows: fix problem with std::min and std::max * fix 2
This commit is contained in:
		| @@ -405,7 +405,7 @@ int main(int argc, char ** argv) { | ||||
|         params.prompt_file = "used built-in defaults"; | ||||
|     } | ||||
|     LOG_INF("External prompt file: \033[32m%s\033[0m\n", params.prompt_file.c_str()); | ||||
|     LOG_INF("Model and path used:  \033[32m%s\033[0m\n\n", params.model.c_str()); | ||||
|     LOG_INF("Model and path used:  \033[32m%s\033[0m\n\n", params.model.path.c_str()); | ||||
|  | ||||
|     LOG_INF("Total prompt tokens: %6d, speed: %5.2f t/s\n", n_total_prompt, (double) (n_total_prompt              ) / (t_main_end - t_main_start) * 1e6); | ||||
|     LOG_INF("Total gen tokens:    %6d, speed: %5.2f t/s\n", n_total_gen,    (double) (n_total_gen                 ) / (t_main_end - t_main_start) * 1e6); | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Xuan-Son Nguyen
					Xuan-Son Nguyen