mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-11-02 09:12:03 +00:00
examples : replace llama_kv_cache_seq_* with llama_past_seq_*
This commit is contained in:
@@ -455,7 +455,7 @@ static bool compute_imatrix(llama_context * ctx, const gpt_params & params) {
|
||||
const auto t_start = std::chrono::high_resolution_clock::now();
|
||||
|
||||
// clear the KV cache
|
||||
llama_kv_cache_clear(ctx);
|
||||
llama_past_clear(ctx);
|
||||
|
||||
for (int j = 0; j < num_batches; ++j) {
|
||||
const int batch_start = start + j * n_batch;
|
||||
|
||||
Reference in New Issue
Block a user