mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-10-27 08:21:30 +00:00
vocab : mark EOT token for Granite models (#16499)
* vocab : mark EOT token for Granite models * sampling : fallback to EOS when EOT is not found
This commit is contained in:
@@ -2541,8 +2541,13 @@ static void llama_sampler_infill_apply(struct llama_sampler * smpl, llama_token_
|
|||||||
if (n_non_eog == 0) {
|
if (n_non_eog == 0) {
|
||||||
cur_p->size = 1;
|
cur_p->size = 1;
|
||||||
cur_p->data[0].id = ctx->vocab->token_eot();
|
cur_p->data[0].id = ctx->vocab->token_eot();
|
||||||
|
if (cur_p->data[0].id == LLAMA_TOKEN_NULL) {
|
||||||
|
cur_p->data[0].id = ctx->vocab->token_eos();
|
||||||
|
}
|
||||||
cur_p->data[0].logit = 1.0f;
|
cur_p->data[0].logit = 1.0f;
|
||||||
|
|
||||||
|
GGML_ASSERT(cur_p->data[0].id != LLAMA_TOKEN_NULL);
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -2171,6 +2171,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
|
|||||||
|| t.first == "<|end|>"
|
|| t.first == "<|end|>"
|
||||||
|| t.first == "<end_of_turn>"
|
|| t.first == "<end_of_turn>"
|
||||||
|| t.first == "<|endoftext|>"
|
|| t.first == "<|endoftext|>"
|
||||||
|
|| t.first == "<|end_of_text|>" // granite
|
||||||
|| t.first == "<EOT>"
|
|| t.first == "<EOT>"
|
||||||
|| t.first == "_<EOT>"
|
|| t.first == "_<EOT>"
|
||||||
|| t.first == "<|end▁of▁sentence|>" // DeepSeek
|
|| t.first == "<|end▁of▁sentence|>" // DeepSeek
|
||||||
|
|||||||
Reference in New Issue
Block a user