mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	llama : add phi 3 chat template (#6857)
* Add phi 3 chat template & tests * test : fix chat template result --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
This commit is contained in:
		| @@ -17447,6 +17447,15 @@ static int32_t llama_chat_apply_template_internal( | ||||
|         if (add_ass) { | ||||
|             ss << "<|start_header_id|>assistant<|end_header_id|>\n\n"; | ||||
|         } | ||||
|     } else if (tmpl == "phi3" || (tmpl.find("<|assistant|>") != std::string::npos && tmpl.find("<|end|>") != std::string::npos )) { | ||||
|         // Phi 3 | ||||
|         for (auto message : chat) { | ||||
|             std::string role(message->role); | ||||
|             ss << "<|" << role << "|>\n" << trim(message->content) << "<|end|>\n"; | ||||
|         } | ||||
|         if (add_ass) { | ||||
|             ss << "<|assistant|>\n"; | ||||
|         } | ||||
|     } else { | ||||
|         // template not supported | ||||
|         return -1; | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Tristan Druyen
					Tristan Druyen