llama : fix handling of "future" tokens when loading sessions

This commit is contained in:
Georgi Gerganov
2023-10-03 18:29:22 +03:00
parent 0f332a9104
commit 337120cc0d
6 changed files with 41 additions and 40 deletions

View File

@@ -448,7 +448,7 @@ struct llama_server_context
n_past = common_part(embd, prompt_tokens);
// since #3228 we now have to manually manage the KV cache
llama_kv_cache_seq_rm(ctx, 0, n_past, params.n_ctx);
llama_kv_cache_seq_rm(ctx, 0, n_past, -1);
embd = prompt_tokens;
if (n_past == num_prompt_tokens)