mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-10-28 08:31:25 +00:00
kv-cache : fix find_slot to not search for continuous slot (#15638)
ggml-ci
This commit is contained in:
@@ -540,7 +540,7 @@ llama_kv_cache::slot_info_vec_t llama_kv_cache::prepare(const std::vector<llama_
|
||||
|
||||
for (const auto & ubatch : ubatches) {
|
||||
// only find a suitable slot for the ubatch. don't modify the cells yet
|
||||
const auto sinfo_new = find_slot(ubatch, true);
|
||||
const auto sinfo_new = find_slot(ubatch, false);
|
||||
if (sinfo_new.empty()) {
|
||||
success = false;
|
||||
break;
|
||||
|
||||
Reference in New Issue
Block a user