mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	llama : add option to render special/control tokens (#6807)
* make : fix common dep on llama.h * llama : add option to render special tokens * readme : add API change notice ggml-ci * swift : fix build
This commit is contained in:
		| @@ -229,7 +229,7 @@ private func tokenize(text: String, add_bos: Bool) -> [llama_token] { | ||||
|  | ||||
| private func token_to_piece(token: llama_token, buffer: inout [CChar]) -> String? { | ||||
|     var result = [CChar](repeating: 0, count: 8) | ||||
|     let nTokens = llama_token_to_piece(model, token, &result, Int32(result.count)) | ||||
|     let nTokens = llama_token_to_piece(model, token, &result, Int32(result.count), false) | ||||
|     if nTokens < 0 { | ||||
|         let actualTokensCount = -Int(nTokens) | ||||
|         result = .init(repeating: 0, count: actualTokensCount) | ||||
| @@ -237,7 +237,8 @@ private func token_to_piece(token: llama_token, buffer: inout [CChar]) -> String | ||||
|             model, | ||||
|             token, | ||||
|             &result, | ||||
|             Int32(result.count) | ||||
|             Int32(result.count), | ||||
|             false | ||||
|         ) | ||||
|         assert(check == actualTokensCount) | ||||
|     } else { | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Georgi Gerganov
					Georgi Gerganov