mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-04 09:32:00 +00:00 
			
		
		
		
	llama : add DRY sampler (#9702)
* sampling : add DRY sampler (post-refactor) * DRY: Trying to fix coauthors, removed unneeded line * DRY: Fixed redundant code * DRY: Fixed crash issue due to DRY being in chain but uninitialized --------- Co-authored-by: l3utterfly <gc.pthzfoldr@gmail.com> Co-authored-by: pi6am <34464159+pi6am@users.noreply.github.com>
This commit is contained in:
		@@ -1966,3 +1966,19 @@ int32_t llama_detokenize_impl(
 | 
			
		||||
 | 
			
		||||
    return total <= text_len_max ? total : -total;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
std::string llama_detokenize(const struct llama_vocab & vocab, const std::vector<llama_token> & tokens, bool special) {
 | 
			
		||||
    std::string text;
 | 
			
		||||
    text.resize(std::max(text.capacity(), tokens.size()));
 | 
			
		||||
    int32_t n_chars = llama_detokenize_impl(vocab, tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special);
 | 
			
		||||
    if (n_chars < 0) {
 | 
			
		||||
        text.resize(-n_chars);
 | 
			
		||||
        n_chars = llama_detokenize_impl(vocab, tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special);
 | 
			
		||||
        GGML_ASSERT(n_chars <= (int32_t)text.size());  // whitespace trimming is performed after per-token detokenization
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    text.resize(n_chars);
 | 
			
		||||
 | 
			
		||||
    // NOTE: the original tokenizer decodes bytes after collecting the pieces.
 | 
			
		||||
    return text;
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user