mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	server : use common_token_to_piece instead of common_detokenize (#11740)
* server : use common_token_to_piece instead of common_detokenize This commit replaces the call to common_detokenize with common_token_to_piece in the populate_token_probs. The motivation for this change is to avoid an issue where common_detokenize would remove the word boundary character for tokens, which caused a regression in the server generated token probabilities. Resolves: https://github.com/ggerganov/llama.cpp/issues/11728 * squash! server : use common_token_to_piece instead of common_detokenize Use common_token_to_piece for post_sampling_probs as well.
This commit is contained in:
		| @@ -2279,7 +2279,7 @@ struct server_context { | ||||
|             for (size_t i = 0; i < std::min(max_probs, n_probs); i++) { | ||||
|                 result.probs.push_back({ | ||||
|                     cur_p->data[i].id, | ||||
|                     common_detokenize(ctx, {cur_p->data[i].id}, special), | ||||
|                     common_token_to_piece(ctx, cur_p->data[i].id, special), | ||||
|                     cur_p->data[i].p | ||||
|                 }); | ||||
|             } | ||||
| @@ -2301,7 +2301,7 @@ struct server_context { | ||||
|             for (size_t i = 0; i < std::min(n_vocab, n_probs); i++) { | ||||
|                 result.probs.push_back({ | ||||
|                     cur[i].id, | ||||
|                     common_detokenize(ctx, {cur[i].id}, special), | ||||
|                     common_token_to_piece(ctx, cur[i].id, special), | ||||
|                     cur[i].p | ||||
|                 }); | ||||
|             } | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Daniel Bevenius
					Daniel Bevenius