mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-28 08:31:25 +00:00 
			
		
		
		
	llama : add BPE pre-tokenization for Qwen2 (#7114)
* Add BPE pre-tokenization for Qwen2. * minor : fixes --------- Co-authored-by: Ren Xuancheng <17811943+jklj077@users.noreply.github.com> Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
This commit is contained in:
		
							
								
								
									
										5
									
								
								llama.h
									
									
									
									
									
								
							
							
						
						
									
										5
									
								
								llama.h
									
									
									
									
									
								
							| @@ -81,8 +81,9 @@ extern "C" { | ||||
|         LLAMA_VOCAB_PRE_TYPE_GPT2           = 7, | ||||
|         LLAMA_VOCAB_PRE_TYPE_REFACT         = 8, | ||||
|         LLAMA_VOCAB_PRE_TYPE_COMMAND_R      = 9, | ||||
|         LLAMA_VOCAB_PRE_TYPE_OLMO           = 10, | ||||
|         LLAMA_VOCAB_PRE_TYPE_DBRX           = 11, | ||||
|         LLAMA_VOCAB_PRE_TYPE_QWEN2          = 10, | ||||
|         LLAMA_VOCAB_PRE_TYPE_OLMO           = 11, | ||||
|         LLAMA_VOCAB_PRE_TYPE_DBRX           = 12, | ||||
|     }; | ||||
|  | ||||
|     // note: these values should be synchronized with ggml_rope | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Ren Xuancheng
					Ren Xuancheng