mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	quantize : fix precedence of cli args (#6541)
This commit is contained in:
		| @@ -13562,10 +13562,10 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s | ||||
|             if (!params->pure && ggml_is_quantized(default_type)) { | ||||
|                 new_type = llama_tensor_get_type(qs, new_type, tensor, ftype); | ||||
|             } | ||||
|             else if (params->token_embedding_type < GGML_TYPE_COUNT && strcmp(tensor->name, "token_embd.weight") == 0) { | ||||
|             if (params->token_embedding_type < GGML_TYPE_COUNT && strcmp(tensor->name, "token_embd.weight") == 0) { | ||||
|                 new_type = params->token_embedding_type; | ||||
|             } | ||||
|             else if (params->output_tensor_type < GGML_TYPE_COUNT && strcmp(tensor->name, "output.weight") == 0) { | ||||
|             if (params->output_tensor_type < GGML_TYPE_COUNT && strcmp(tensor->name, "output.weight") == 0) { | ||||
|                 new_type = params->output_tensor_type; | ||||
|             } | ||||
|  | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Georgi Gerganov
					Georgi Gerganov