mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	chat : fix kimi-k2 chat template (#14852)
This commit is contained in:
		| @@ -1933,12 +1933,6 @@ static const std::map<llm_arch, std::map<llm_tensor, const char *>> LLM_TENSOR_N | ||||
|             { LLM_TENSOR_TOKEN_EMBD_NORM,   "token_embd_norm" }, | ||||
|         } | ||||
|     }, | ||||
|     { | ||||
|         LLM_ARCH_UNKNOWN, | ||||
|         { | ||||
|             { LLM_TENSOR_TOKEN_EMBD,      "token_embd" }, | ||||
|         }, | ||||
|     }, | ||||
|     { | ||||
|         LLM_ARCH_DREAM, | ||||
|         { | ||||
| @@ -1956,6 +1950,12 @@ static const std::map<llm_arch, std::map<llm_tensor, const char *>> LLM_TENSOR_N | ||||
|             { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" }, | ||||
|         }, | ||||
|     }, | ||||
|     { | ||||
|         LLM_ARCH_UNKNOWN, | ||||
|         { | ||||
|             { LLM_TENSOR_TOKEN_EMBD,      "token_embd" }, | ||||
|         }, | ||||
|     }, | ||||
| }; | ||||
|  | ||||
| static const std::map<llm_tensor, llm_tensor_info> LLM_TENSOR_INFOS = { | ||||
|   | ||||
| @@ -718,10 +718,9 @@ int32_t llm_chat_apply_template( | ||||
|             } | ||||
|  | ||||
|             ss << message->content << "<|im_end|>"; | ||||
|  | ||||
|             if (add_ass) { | ||||
|                 ss << "<|im_assistant|>assistant<|im_middle|>"; | ||||
|             } | ||||
|         } | ||||
|         if (add_ass) { | ||||
|             ss << "<|im_assistant|>assistant<|im_middle|>"; | ||||
|         } | ||||
|     } else { | ||||
|         // template not supported | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Xuan-Son Nguyen
					Xuan-Son Nguyen