mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	llama : add StarCoder2 support (#5795)
* Add support for starcoder2 * handle rope type * skip rope freq and rotary embeddings from being serialized * resolve comments * Update llama.cpp * remove redundant changes * handle `rope-theta` * llama : change starcoder2 rope type * address comment --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
This commit is contained in:
		 Sourab Mangrulkar
					Sourab Mangrulkar
				
			
				
					committed by
					
						 GitHub
						GitHub
					
				
			
			
				
	
			
			
			 GitHub
						GitHub
					
				
			
						parent
						
							38d16b1426
						
					
				
				
					commit
					c29af7e225
				
			
							
								
								
									
										199
									
								
								llama.cpp
									
									
									
									
									
								
							
							
						
						
									
										199
									
								
								llama.cpp
									
									
									
									
									
								
							| @@ -211,6 +211,7 @@ enum llm_arch { | ||||
|     LLM_ARCH_INTERNLM2, | ||||
|     LLM_ARCH_MINICPM, | ||||
|     LLM_ARCH_GEMMA, | ||||
|     LLM_ARCH_STARCODER2, | ||||
|     LLM_ARCH_UNKNOWN, | ||||
| }; | ||||
|  | ||||
| @@ -238,6 +239,7 @@ static std::map<llm_arch, const char *> LLM_ARCH_NAMES = { | ||||
|     { LLM_ARCH_INTERNLM2,       "internlm2"  }, | ||||
|     { LLM_ARCH_MINICPM,         "minicpm"    }, | ||||
|     { LLM_ARCH_GEMMA,           "gemma"      }, | ||||
|     { LLM_ARCH_STARCODER2,      "starcoder2" }, | ||||
| }; | ||||
|  | ||||
| enum llm_kv { | ||||
| @@ -779,6 +781,24 @@ static std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NAMES = | ||||
|             { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" }, | ||||
|         }, | ||||
|     }, | ||||
|     { | ||||
|         LLM_ARCH_STARCODER2, | ||||
|         { | ||||
|             { LLM_TENSOR_TOKEN_EMBD,      "token_embd" }, | ||||
|             { LLM_TENSOR_OUTPUT_NORM,     "output_norm" }, | ||||
|             { LLM_TENSOR_OUTPUT,          "output" }, | ||||
|             { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" }, | ||||
|             { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" }, | ||||
|             { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" }, | ||||
|             { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" }, | ||||
|             { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" }, | ||||
|             { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" }, | ||||
|             { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" }, | ||||
|             { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" }, | ||||
|             { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" }, | ||||
|             { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" }, | ||||
|         }, | ||||
|     }, | ||||
|     { | ||||
|         LLM_ARCH_UNKNOWN, | ||||
|         { | ||||
| @@ -3320,6 +3340,16 @@ static void llm_load_hparams( | ||||
|                     default: model.type = e_model::MODEL_UNKNOWN; | ||||
|                } | ||||
|             } break; | ||||
|         case LLM_ARCH_STARCODER2: | ||||
|             { | ||||
|                 ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); | ||||
|                 switch (hparams.n_layer) { | ||||
|                     case 30: model.type = e_model::MODEL_3B; break; | ||||
|                     case 32: model.type = e_model::MODEL_7B; break; | ||||
|                     case 40: model.type = e_model::MODEL_15B; break; | ||||
|                     default: model.type = e_model::MODEL_UNKNOWN; | ||||
|                 } | ||||
|             } break; | ||||
|         default: (void)0; | ||||
|     } | ||||
|  | ||||
| @@ -4490,6 +4520,56 @@ static bool llm_load_tensors( | ||||
|                         layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}); | ||||
|                     } | ||||
|                 } break; | ||||
|             case LLM_ARCH_STARCODER2: | ||||
|                 { | ||||
|                     model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); | ||||
|  | ||||
|                     // output | ||||
|                     { | ||||
|                         model.output_norm   = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}); | ||||
|                         model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}); | ||||
|  | ||||
|                         model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, false); | ||||
|                         // if output is NULL, init from the input tok embed | ||||
|                         if (model.output == NULL) { | ||||
|                             model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); | ||||
|                             ml.n_created--; // artificial tensor | ||||
|                             ml.size_data += ggml_nbytes(model.output); | ||||
|                         } | ||||
|  | ||||
|                     } | ||||
|  | ||||
|                     for (int i = 0; i < n_layer; ++i) { | ||||
|                         ggml_context * ctx_layer = ctx_for_layer(i); | ||||
|                         ggml_context * ctx_split = ctx_for_layer_split(i); | ||||
|  | ||||
|                         auto & layer = model.layers[i]; | ||||
|  | ||||
|                         layer.attn_norm   = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}); | ||||
|                         layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i),   {n_embd}); | ||||
|  | ||||
|                         layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}); | ||||
|                         layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}); | ||||
|                         layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}); | ||||
|                         layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}); | ||||
|  | ||||
|                         // optional bias tensors | ||||
|                         layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd}); | ||||
|                         layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_gqa}); | ||||
|                         layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_gqa}); | ||||
|                         layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}); | ||||
|  | ||||
|                         layer.ffn_norm   = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}); | ||||
|                         layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i),   {n_embd}); | ||||
|  | ||||
|                         layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}); | ||||
|                         layer.ffn_up   = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}); | ||||
|  | ||||
|                         // optional bias tensors | ||||
|                         layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}); | ||||
|                         layer.ffn_up_b   = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP ,  "bias", i), {  n_ff}); | ||||
|                     } | ||||
|                 } break; | ||||
|             default: | ||||
|                 throw std::runtime_error("unknown architecture"); | ||||
|         } | ||||
| @@ -7559,6 +7639,120 @@ struct llm_build_context { | ||||
|  | ||||
|         return gf; | ||||
|     } | ||||
|  | ||||
|     struct ggml_cgraph * build_starcoder2() { | ||||
|         struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false); | ||||
|  | ||||
|         const int64_t n_embd_head = hparams.n_embd_head_v; | ||||
|         GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); | ||||
|         GGML_ASSERT(n_embd_head == hparams.n_rot); | ||||
|  | ||||
|         struct ggml_tensor * cur; | ||||
|         struct ggml_tensor * inpL; | ||||
|  | ||||
|         inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb); | ||||
|         cb(inpL, "inp_embd", -1); | ||||
|  | ||||
|         // inp_pos - contains the positions | ||||
|         struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0); | ||||
|         cb(inp_pos, "inp_pos", -1); | ||||
|  | ||||
|         // KQ_mask (mask for 1 head, it will be broadcasted to all heads) | ||||
|         struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0); | ||||
|         cb(KQ_mask, "KQ_mask", -1); | ||||
|  | ||||
|         for (int il = 0; il < n_layer; ++il) { | ||||
|             struct ggml_tensor * inpSA = inpL; | ||||
|  | ||||
|             // norm | ||||
|             cur = llm_build_norm(ctx0, inpL, hparams, | ||||
|                     model.layers[il].attn_norm, model.layers[il].attn_norm_b, | ||||
|                     LLM_NORM, cb, il); | ||||
|             cb(cur, "attn_norm", il); | ||||
|  | ||||
|             // self-attention | ||||
|             { | ||||
|                 // compute Q and K and RoPE them | ||||
|                 struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur); | ||||
|                 cb(Qcur, "Qcur", il); | ||||
|                 if (model.layers[il].bq) { | ||||
|                     Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); | ||||
|                     cb(Qcur, "Qcur", il); | ||||
|                 } | ||||
|  | ||||
|                 struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur); | ||||
|                 cb(Kcur, "Kcur", il); | ||||
|                 if (model.layers[il].bk) { | ||||
|                     Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); | ||||
|                     cb(Kcur, "Kcur", il); | ||||
|                 } | ||||
|  | ||||
|                 struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur); | ||||
|                 cb(Vcur, "Vcur", il); | ||||
|                 if (model.layers[il].bv) { | ||||
|                     Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); | ||||
|                     cb(Vcur, "Vcur", il); | ||||
|                 } | ||||
|  | ||||
|                 Qcur = ggml_rope_custom( | ||||
|                     ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, | ||||
|                     n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale, | ||||
|                     ext_factor, attn_factor, beta_fast, beta_slow | ||||
|                 ); | ||||
|                 cb(Qcur, "Qcur", il); | ||||
|  | ||||
|                 Kcur = ggml_rope_custom( | ||||
|                     ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, | ||||
|                     n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale, | ||||
|                     ext_factor, attn_factor, beta_fast, beta_slow | ||||
|                 ); | ||||
|                 cb(Kcur, "Kcur", il); | ||||
|  | ||||
|                 cur = llm_build_kv(ctx0, model, hparams, kv_self, gf, | ||||
|                         model.layers[il].wo, model.layers[il].bo, | ||||
|                         Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); | ||||
|                 cb(cur, "kqv_out", il); | ||||
|             } | ||||
|  | ||||
|             struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); | ||||
|             cb(ffn_inp, "ffn_inp", il); | ||||
|  | ||||
|             // feed-forward network | ||||
|  | ||||
|             cur = llm_build_norm(ctx0, ffn_inp, hparams, | ||||
|                     model.layers[il].ffn_norm, model.layers[il].ffn_norm_b, | ||||
|                     LLM_NORM, cb, il); | ||||
|             cb(cur, "ffn_norm", il); | ||||
|  | ||||
|             cur = llm_build_ffn(ctx0, cur, | ||||
|                         model.layers[il].ffn_up,   model.layers[il].ffn_up_b, | ||||
|                         NULL,                      NULL, | ||||
|                         model.layers[il].ffn_down, model.layers[il].ffn_down_b, | ||||
|                         NULL, | ||||
|                         LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); | ||||
|             cb(cur, "ffn_out", il); | ||||
|             cur = ggml_add(ctx0, cur, ffn_inp); | ||||
|             cb(cur, "l_out", il); | ||||
|  | ||||
|             // input for next layer | ||||
|             inpL = cur; | ||||
|         } | ||||
|  | ||||
|         cur = inpL; | ||||
|  | ||||
|         cur = llm_build_norm(ctx0, cur, hparams, | ||||
|                 model.output_norm, model.output_norm_b, | ||||
|                 LLM_NORM, cb, -1); | ||||
|         cb(cur, "result_norm", -1); | ||||
|  | ||||
|         // lm_head | ||||
|         cur = ggml_mul_mat(ctx0, model.output, cur); | ||||
|         cb(cur, "result_output", -1); | ||||
|  | ||||
|         ggml_build_forward_expand(gf, cur); | ||||
|  | ||||
|         return gf; | ||||
|     } | ||||
| }; | ||||
|  | ||||
| static struct ggml_cgraph * llama_build_graph_defrag(llama_context & lctx, const std::vector<uint32_t> & ids) { | ||||
| @@ -7705,6 +7899,10 @@ static struct ggml_cgraph * llama_build_graph( | ||||
|             { | ||||
|                 result = llm.build_gemma(); | ||||
|             } break; | ||||
|         case LLM_ARCH_STARCODER2: | ||||
|             { | ||||
|                 result = llm.build_starcoder2(); | ||||
|             } break; | ||||
|         default: | ||||
|             GGML_ASSERT(false); | ||||
|     } | ||||
| @@ -12084,6 +12282,7 @@ enum llama_rope_type llama_rope_type(const struct llama_model * model) { | ||||
|         case LLM_ARCH_QWEN2: | ||||
|         case LLM_ARCH_PHI2: | ||||
|         case LLM_ARCH_GEMMA: | ||||
|         case LLM_ARCH_STARCODER2: | ||||
|             return LLAMA_ROPE_TYPE_NEOX; | ||||
|  | ||||
|         // all model arches should be listed explicitly here | ||||
|   | ||||
		Reference in New Issue
	
	Block a user