mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	llama : fix non-quantization of expert gating tensors (#5754)
This reverts a single line from #5475
This commit is contained in:
		| @@ -11162,7 +11162,8 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s | ||||
|         quantize &= !params->only_copy; | ||||
|  | ||||
|         // do not quantize expert gating tensors | ||||
|         quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_FFN_GATE_INP, "weight"); | ||||
|         // NOTE: can't use LLM_TN here because the layer number is not known | ||||
|         quantize &= name.find("ffn_gate_inp.weight") == std::string::npos; | ||||
|  | ||||
|         // do not quantize positional embeddings and token types (BERT) | ||||
|         quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_POS_EMBD,    "weight"); | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 compilade
					compilade