mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-11-06 09:46:50 +00:00
only rm when params.escape, rm space if possible which is added back or rm added space token
This commit is contained in:
@@ -344,11 +344,16 @@ struct llama_server_context
|
||||
|
||||
void loadInfill()
|
||||
{
|
||||
params.input_suffix.erase(0, params.input_suffix.find_first_not_of(" "));
|
||||
bool suff_rm_leading_spc = params.escape;
|
||||
if (suff_rm_leading_spc && params.input_suffix.find_first_of(" ") == 0 && params.input_suffix.size() > 1) {
|
||||
params.input_suffix.erase(0, 1);
|
||||
suff_rm_leading_spc = false;
|
||||
}
|
||||
|
||||
auto prefix_tokens = tokenize(params.input_prefix, false);
|
||||
auto suffix_tokens = tokenize(params.input_suffix, false);
|
||||
const int space_token = 29871;
|
||||
if (params.escape && suffix_tokens.size() > 1 && suffix_tokens[0] == space_token) {
|
||||
if (suff_rm_leading_spc && suffix_tokens[0] == space_token) {
|
||||
suffix_tokens.erase(suffix_tokens.begin());
|
||||
}
|
||||
prefix_tokens.insert(prefix_tokens.begin(), llama_token_prefix(ctx));
|
||||
|
||||
Reference in New Issue
Block a user