mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-11-13 10:57:15 +00:00
server : handle failures to restore host cache (#17078)
* server : handle failures to restore host cache * server : add tests for the prompt cache
This commit is contained in:
@@ -1690,6 +1690,9 @@ struct server_slot {
|
||||
bool res = prompt_cache.load(prompt, tokens, ctx, id);
|
||||
if (!res) {
|
||||
SLT_WRN(*this, "%s", "failed to load prompt from cache\n");
|
||||
|
||||
llama_memory_seq_rm(llama_get_memory(ctx), id, -1, -1);
|
||||
prompt.tokens.clear();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user