mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	llama : support attention bias on LLaMA architecture (#4283)
* Support attention_bias on LLaMA architecture QKVO bias, should fix InternLM (https://github.com/ggerganov/llama.cpp/issues/3133) and works for LLaMAfied Qwen models (https://github.com/ggerganov/llama.cpp/pull/3743#issuecomment-1825923608). * check existence of qkvo bias while loading llama models Tested on LLaMA2, CUDA and CPU. * Update llama.cpp
This commit is contained in:
		
							
								
								
									
										52
									
								
								llama.cpp
									
									
									
									
									
								
							
							
						
						
									
										52
									
								
								llama.cpp
									
									
									
									
									
								
							| @@ -1266,6 +1266,9 @@ struct llama_layer { | ||||
|     struct ggml_tensor * wqkv; | ||||
|  | ||||
|     // attention bias | ||||
|     struct ggml_tensor * bq; | ||||
|     struct ggml_tensor * bk; | ||||
|     struct ggml_tensor * bv; | ||||
|     struct ggml_tensor * bo; | ||||
|     struct ggml_tensor * bqkv; | ||||
|  | ||||
| @@ -2809,6 +2812,30 @@ static void llm_load_tensors( | ||||
|                         layer.wv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, backend_split); | ||||
|                         layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd},     backend_split); | ||||
|  | ||||
|                         try { | ||||
|                             layer.bq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, backend); | ||||
|                         } catch (const std::runtime_error& e) { | ||||
|                             if (std::string(e.what()).find("not found") != std::string::npos) layer.bq = NULL; else throw; | ||||
|                         } | ||||
|  | ||||
|                         try { | ||||
|                             layer.bk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, backend); | ||||
|                         } catch (const std::runtime_error& e) { | ||||
|                             if (std::string(e.what()).find("not found") != std::string::npos) layer.bk = NULL; else throw; | ||||
|                         } | ||||
|  | ||||
|                         try { | ||||
|                             layer.bv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, backend); | ||||
|                         } catch (const std::runtime_error& e) { | ||||
|                             if (std::string(e.what()).find("not found") != std::string::npos) layer.bv = NULL; else throw; | ||||
|                         } | ||||
|  | ||||
|                         try { | ||||
|                             layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend); | ||||
|                         } catch (const std::runtime_error& e) { | ||||
|                             if (std::string(e.what()).find("not found") != std::string::npos) layer.bo = NULL; else throw; | ||||
|                         } | ||||
|  | ||||
|                         layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend); | ||||
|  | ||||
|                         layer.ffn_gate = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, backend_split); | ||||
| @@ -2817,9 +2844,14 @@ static void llm_load_tensors( | ||||
|  | ||||
|                         if (backend == GGML_BACKEND_GPU) { | ||||
|                             vram_weights += | ||||
|                                 ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.wq)       + ggml_nbytes(layer.wk)       + | ||||
|                                 ggml_nbytes(layer.wv)        + ggml_nbytes(layer.wo)       + ggml_nbytes(layer.ffn_norm) + | ||||
|                                 ggml_nbytes(layer.ffn_gate)  + ggml_nbytes(layer.ffn_down) + ggml_nbytes(layer.ffn_up); | ||||
|                                 ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.wq) + ggml_nbytes(layer.wk) + | ||||
|                                 ggml_nbytes(layer.wv) + ggml_nbytes(layer.wo) + | ||||
|                                 (layer.bq ? ggml_nbytes(layer.bq) : 0) + | ||||
|                                 (layer.bk ? ggml_nbytes(layer.bk) : 0) + | ||||
|                                 (layer.bv ? ggml_nbytes(layer.bv) : 0) + | ||||
|                                 (layer.bo ? ggml_nbytes(layer.bo) : 0) + | ||||
|                                 ggml_nbytes(layer.ffn_norm) + ggml_nbytes(layer.ffn_gate) + | ||||
|                                 ggml_nbytes(layer.ffn_down) + ggml_nbytes(layer.ffn_up); | ||||
|                         } | ||||
|                     } | ||||
|                 } break; | ||||
| @@ -3983,12 +4015,24 @@ struct llm_build_context { | ||||
|                 // compute Q and K and RoPE them | ||||
|                 struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur); | ||||
|                 cb(Qcur, "Qcur", il); | ||||
|                 if (model.layers[il].bq) { | ||||
|                     Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); | ||||
|                     cb(Qcur, "Qcur", il); | ||||
|                 } | ||||
|  | ||||
|                 struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur); | ||||
|                 cb(Kcur, "Kcur", il); | ||||
|                 if (model.layers[il].bk) { | ||||
|                     Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); | ||||
|                     cb(Kcur, "Kcur", il); | ||||
|                 } | ||||
|  | ||||
|                 struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur); | ||||
|                 cb(Vcur, "Vcur", il); | ||||
|                 if (model.layers[il].bv) { | ||||
|                     Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); | ||||
|                     cb(Vcur, "Vcur", il); | ||||
|                 } | ||||
|  | ||||
|                 Qcur = ggml_rope_custom( | ||||
|                     ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head,    n_tokens), inp_pos, | ||||
| @@ -4007,7 +4051,7 @@ struct llm_build_context { | ||||
|                 llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il); | ||||
|  | ||||
|                 cur = llm_build_kqv(ctx0, hparams, kv_self, | ||||
|                         model.layers[il].wo, NULL, | ||||
|                         model.layers[il].wo, model.layers[il].bo, | ||||
|                         Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, cb, il); | ||||
|                 cb(cur, "kqv_out", il); | ||||
|             } | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 CausalLM
					CausalLM