mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	llama : add Orion chat template (#6066)
This commit is contained in:
		
							
								
								
									
										20
									
								
								llama.cpp
									
									
									
									
									
								
							
							
						
						
									
										20
									
								
								llama.cpp
									
									
									
									
									
								
							| @@ -14242,6 +14242,26 @@ static int32_t llama_chat_apply_template_internal( | ||||
|         if (add_ass) { | ||||
|             ss << "<start_of_turn>model\n"; | ||||
|         } | ||||
|     } else if (tmpl == "orion" || tmpl.find("'\\n\\nAssistant: ' + eos_token") != std::string::npos) { | ||||
|         // OrionStarAI/Orion-14B-Chat | ||||
|         std::string system_prompt = ""; | ||||
|         for (auto message : chat) { | ||||
|             std::string role(message->role); | ||||
|             if (role == "system") { | ||||
|                 // there is no system message support, we will merge it with user prompt | ||||
|                 system_prompt = message->content; | ||||
|                 continue; | ||||
|             } else if (role == "user") { | ||||
|                 ss << "Human: "; | ||||
|                 if (!system_prompt.empty()) { | ||||
|                     ss << system_prompt << "\n\n"; | ||||
|                     system_prompt = ""; | ||||
|                 } | ||||
|                 ss << message->content << "\n\nAssistant: </s>"; | ||||
|             } else { | ||||
|                 ss << message->content << "</s>"; | ||||
|             } | ||||
|         } | ||||
|     } else { | ||||
|         // template not supported | ||||
|         return -1; | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Xuan Son Nguyen
					Xuan Son Nguyen