mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-11-01 09:01:57 +00:00
kv-cache : support layer reuse (#15504)
* kv-cache : support layer reuse ggml-ci * cont : update comments [no ci]
This commit is contained in:
@@ -22,9 +22,26 @@ llama_kv_cache_iswa::llama_kv_cache_iswa(
|
||||
uint32_t kv_size,
|
||||
uint32_t n_seq_max,
|
||||
uint32_t n_ubatch,
|
||||
uint32_t n_pad) : hparams(model.hparams), unified(unified) {
|
||||
llama_kv_cache::layer_filter_cb filter_base = [&](int32_t il) { return !model.hparams.is_swa(il); };
|
||||
llama_kv_cache::layer_filter_cb filter_swa = [&](int32_t il) { return model.hparams.is_swa(il); };
|
||||
uint32_t n_pad,
|
||||
const layer_filter_cb & filter,
|
||||
const layer_reuse_cb & reuse) : hparams(model.hparams), unified(unified) {
|
||||
|
||||
// chain filters
|
||||
const layer_filter_cb filter_base = [&](int32_t il) {
|
||||
if (filter && !filter(il)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return !model.hparams.is_swa(il);
|
||||
};
|
||||
|
||||
const layer_filter_cb filter_swa = [&](int32_t il) {
|
||||
if (filter && !filter(il)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return model.hparams.is_swa(il);
|
||||
};
|
||||
|
||||
const uint32_t size_base = kv_size;
|
||||
|
||||
@@ -41,16 +58,16 @@ llama_kv_cache_iswa::llama_kv_cache_iswa(
|
||||
LLAMA_LOG_INFO("%s: creating non-SWA KV cache, size = %u cells\n", __func__, size_base);
|
||||
|
||||
kv_base = std::make_unique<llama_kv_cache>(
|
||||
model, std::move(filter_base), type_k, type_v,
|
||||
model, type_k, type_v,
|
||||
v_trans, offload, unified, size_base, n_seq_max, n_pad,
|
||||
0, LLAMA_SWA_TYPE_NONE);
|
||||
0, LLAMA_SWA_TYPE_NONE, filter_base, reuse);
|
||||
|
||||
LLAMA_LOG_INFO("%s: creating SWA KV cache, size = %u cells\n", __func__, size_swa);
|
||||
|
||||
kv_swa = std::make_unique<llama_kv_cache>(
|
||||
model, std::move(filter_swa), type_k, type_v,
|
||||
model, type_k, type_v,
|
||||
v_trans, offload, unified, size_swa, n_seq_max, n_pad,
|
||||
hparams.n_swa, hparams.swa_type);
|
||||
hparams.n_swa, hparams.swa_type, filter_swa, reuse);
|
||||
}
|
||||
|
||||
void llama_kv_cache_iswa::clear(bool data) {
|
||||
|
||||
Reference in New Issue
Block a user