mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	llama : add jinja template for rwkv-world (#14665)
* llama : add jinja template for rwkv-world Signed-off-by: Molly Sophia <mollysophia379@gmail.com> * Update convert_hf_to_gguf.py Co-authored-by: Sigbjørn Skjæret <sigbjorn.skjaeret@scala.com> --------- Signed-off-by: Molly Sophia <mollysophia379@gmail.com> Co-authored-by: Sigbjørn Skjæret <sigbjorn.skjaeret@scala.com>
This commit is contained in:
		| @@ -1082,7 +1082,14 @@ class TextModel(ModelBase): | ||||
|         self.gguf_writer.add_token_list(tokens) | ||||
|         self.gguf_writer.add_token_types(toktypes) | ||||
|         special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False) | ||||
|         special_vocab.chat_template = "rwkv-world" | ||||
|         if special_vocab.chat_template is None: | ||||
|             template_path = Path(__file__).parent / "models" / "templates" / "llama-cpp-rwkv-world.jinja" | ||||
|             if template_path.is_file(): | ||||
|                 with open(template_path, "r", encoding="utf-8") as f: | ||||
|                     template = f.read() | ||||
|             else: | ||||
|                 template = "rwkv-world" | ||||
|             special_vocab.chat_template = template | ||||
|         # hack: Add '\n\n' as the EOT token to make it chat normally | ||||
|         special_vocab._set_special_token("eot", 261) | ||||
|         # hack: Override these as they have already been set (incorrectly) | ||||
|   | ||||
							
								
								
									
										34
									
								
								models/templates/llama-cpp-rwkv-world.jinja
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										34
									
								
								models/templates/llama-cpp-rwkv-world.jinja
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,34 @@ | ||||
| {%- if not add_generation_prompt is defined -%} | ||||
|     {%- set add_generation_prompt = true -%} | ||||
| {%- endif -%} | ||||
| {%- set ns = namespace(system_prompt='') -%} | ||||
| {%- for message in messages -%} | ||||
|     {%- if message['role'] == 'system' -%} | ||||
|         {%- set ns.system_prompt = message['content'] -%} | ||||
|     {%- endif -%} | ||||
| {%- endfor -%} | ||||
| {{bos_token}} | ||||
| {%- if ns.system_prompt != '' -%} | ||||
| {{- 'System: ' + ns.system_prompt + '\n\n' -}} | ||||
| {%- endif -%} | ||||
| {%- for message in messages -%} | ||||
|     {%- if message['role'] == 'user' -%} | ||||
|         {{- 'User: ' + message['content']|trim + '\n\n' -}} | ||||
|     {%- endif -%} | ||||
|     {%- if message['role'] == 'assistant' and message['content'] is  not none -%} | ||||
|         {%- set content = message['content'] -%} | ||||
|         {%- if '</think>' in content -%} | ||||
|             {%- set content = content.split('</think>')[-1] -%} | ||||
|         {%- endif -%} | ||||
|         {{- 'Assistant: ' + content|trim + '\n\n' -}} | ||||
|     {%- endif -%} | ||||
| {%- endfor -%} | ||||
| {%- if add_generation_prompt -%} | ||||
|     {{- 'Assistant:' -}} | ||||
|     {%- if enable_thinking is defined and enable_thinking is false %} | ||||
|         {{- ' <think>\n</think>' }} | ||||
|     {%- endif %} | ||||
|     {%- if enable_thinking is defined and enable_thinking is true %} | ||||
|         {{- ' <think>' }} | ||||
|     {%- endif %} | ||||
| {%- endif -%} | ||||
| @@ -170,7 +170,7 @@ llm_chat_template llm_chat_detect_template(const std::string & tmpl) { | ||||
|         // ref: https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/discussions/8#66bae61b1893d14ee8ed85bb | ||||
|         // EXAONE-3.0-7.8B-Instruct | ||||
|         return LLM_CHAT_TEMPLATE_EXAONE_3; | ||||
|     } else if (tmpl_contains("rwkv-world")) { | ||||
|     } else if (tmpl_contains("rwkv-world") || tmpl_contains("{{- 'User: ' + message['content']|trim + '\\n\\n' -}}")) { | ||||
|         return LLM_CHAT_TEMPLATE_RWKV_WORLD; | ||||
|     } else if (tmpl_contains("<|start_of_role|>")) { | ||||
|         return LLM_CHAT_TEMPLATE_GRANITE; | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Molly Sophia
					Molly Sophia