mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	llama : fix qs.n_attention_wv for DeepSeek-V2 (#9156)
This commit is contained in:
		| @@ -16822,7 +16822,8 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s | ||||
|  | ||||
|         // TODO: avoid hardcoded tensor names - use the TN_* constants | ||||
|         if (name.find("attn_v.weight")   != std::string::npos || | ||||
|             name.find("attn_qkv.weight") != std::string::npos) { | ||||
|             name.find("attn_qkv.weight") != std::string::npos || | ||||
|             name.find("attn_kv_b.weight")!= std::string::npos) { | ||||
|             ++qs.n_attention_wv; | ||||
|         } else if (name == LLM_TN(model.arch)(LLM_TENSOR_OUTPUT, "weight")) { | ||||
|             qs.has_output = true; | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 compilade
					compilade