mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	convert : fix Qwen/Qwen-7b conversion (#7308)
This commit is contained in:
		| @@ -526,7 +526,7 @@ class Model: | ||||
|  | ||||
|         # for this kind of tokenizer, added_vocab is not a subset of vocab, so they need to be combined | ||||
|         added_vocab = tokenizer.special_tokens | ||||
|         reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in (vocab | added_vocab).items()} | ||||
|         reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in {**vocab, **added_vocab}.items()} | ||||
|  | ||||
|         for i in range(vocab_size): | ||||
|             if i not in reverse_vocab: | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 amd-lalithnc
					amd-lalithnc