mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	llama : use Q4_K for attn_v for Q2_K_S when n_gqa >= 4 (#4996)
Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
This commit is contained in:
		| @@ -8477,7 +8477,12 @@ static ggml_type get_k_quant_type(quantize_state_internal & qs, ggml_type new_ty | |||||||
|         } |         } | ||||||
|         else if (name == "token_embd.weight") new_type = GGML_TYPE_Q2_K; |         else if (name == "token_embd.weight") new_type = GGML_TYPE_Q2_K; | ||||||
|     } else if (name.find("attn_v.weight") != std::string::npos) { |     } else if (name.find("attn_v.weight") != std::string::npos) { | ||||||
|         if      (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K; |         if      (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) { | ||||||
|  |             new_type = qs.model.hparams.n_gqa() >= 4 ? GGML_TYPE_Q4_K : GGML_TYPE_Q3_K; | ||||||
|  |         } | ||||||
|  |         else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && qs.model.hparams.n_gqa() >= 4) { | ||||||
|  |             new_type = GGML_TYPE_Q4_K; | ||||||
|  |         } | ||||||
|         else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) { |         else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) { | ||||||
|             new_type = qs.i_attention_wv < 2 ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K; |             new_type = qs.i_attention_wv < 2 ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K; | ||||||
|         } |         } | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user
	 Kawrakow
					Kawrakow