llama : fix handling of "future" tokens when loading sessions

This commit is contained in:
Georgi Gerganov
2023-10-03 18:29:22 +03:00
parent 0f332a9104
commit 337120cc0d
6 changed files with 41 additions and 40 deletions

View File

@@ -543,6 +543,9 @@ int main(int argc, char ** argv) {
if (i > 0) {
embd.erase(embd.begin(), embd.begin() + i);
}
// remove any "future" tokens that we might have inherited from the session from the KV cache
llama_kv_cache_tokens_rm(ctx, n_past, -1);
}
// evaluate tokens in batches