mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-04 09:32:00 +00:00 
			
		
		
		
	fix: Use per-layer sizing everywhere in kv caches
Branch: GraniteFour Signed-off-by: Gabe Goodhart <ghart@us.ibm.com>
This commit is contained in:
		@@ -69,8 +69,8 @@ llama_kv_cache_recurrent::llama_kv_cache_recurrent(
 | 
			
		||||
            continue;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(i) + hparams.n_embd_k_s();
 | 
			
		||||
        const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(i) + hparams.n_embd_v_s();
 | 
			
		||||
        const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(i) + hparams.n_embd_k_s(i);
 | 
			
		||||
        const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(i) + hparams.n_embd_v_s(i);
 | 
			
		||||
 | 
			
		||||
        const char * dev_name = "CPU";
 | 
			
		||||
 | 
			
		||||
@@ -754,7 +754,7 @@ void llama_kv_cache_recurrent::state_write_data(llama_io_write_i & io, const std
 | 
			
		||||
    // Iterate and write all the keys first, each row is a cell
 | 
			
		||||
    // Get whole range at a time
 | 
			
		||||
    for (uint32_t il = 0; il < n_layer; ++il) {
 | 
			
		||||
        const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
 | 
			
		||||
        const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s(il);
 | 
			
		||||
 | 
			
		||||
        // Write key type
 | 
			
		||||
        const int32_t k_type_i = (int32_t)k_l[il]->type;
 | 
			
		||||
@@ -774,7 +774,7 @@ void llama_kv_cache_recurrent::state_write_data(llama_io_write_i & io, const std
 | 
			
		||||
 | 
			
		||||
    if (!v_trans) {
 | 
			
		||||
        for (uint32_t il = 0; il < n_layer; ++il) {
 | 
			
		||||
            const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
 | 
			
		||||
            const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(il);
 | 
			
		||||
 | 
			
		||||
            // Write value type
 | 
			
		||||
            const int32_t v_type_i = (int32_t)v_l[il]->type;
 | 
			
		||||
@@ -795,7 +795,7 @@ void llama_kv_cache_recurrent::state_write_data(llama_io_write_i & io, const std
 | 
			
		||||
        // When v is transposed, we also need the element size and get the element ranges from each row
 | 
			
		||||
        const uint32_t kv_size = size;
 | 
			
		||||
        for (uint32_t il = 0; il < n_layer; ++il) {
 | 
			
		||||
            const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
 | 
			
		||||
            const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(il);
 | 
			
		||||
 | 
			
		||||
            // Write value type
 | 
			
		||||
            const int32_t v_type_i = (int32_t)v_l[il]->type;
 | 
			
		||||
@@ -942,7 +942,7 @@ bool llama_kv_cache_recurrent::state_read_data(llama_io_read_i & io, uint32_t ce
 | 
			
		||||
 | 
			
		||||
    // For each layer, read the keys for each cell, one row is one cell, read as one contiguous block
 | 
			
		||||
    for (uint32_t il = 0; il < n_layer; ++il) {
 | 
			
		||||
        const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
 | 
			
		||||
        const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s(il);
 | 
			
		||||
 | 
			
		||||
        // Read type of key
 | 
			
		||||
        int32_t k_type_i_ref;
 | 
			
		||||
@@ -970,7 +970,7 @@ bool llama_kv_cache_recurrent::state_read_data(llama_io_read_i & io, uint32_t ce
 | 
			
		||||
 | 
			
		||||
    if (!v_trans) {
 | 
			
		||||
        for (uint32_t il = 0; il < n_layer; ++il) {
 | 
			
		||||
            const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
 | 
			
		||||
            const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(il);
 | 
			
		||||
 | 
			
		||||
            // Read type of value
 | 
			
		||||
            int32_t v_type_i_ref;
 | 
			
		||||
@@ -998,7 +998,7 @@ bool llama_kv_cache_recurrent::state_read_data(llama_io_read_i & io, uint32_t ce
 | 
			
		||||
    } else {
 | 
			
		||||
        // For each layer, read the values for each cell (transposed)
 | 
			
		||||
        for (uint32_t il = 0; il < n_layer; ++il) {
 | 
			
		||||
            const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
 | 
			
		||||
            const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(il);
 | 
			
		||||
 | 
			
		||||
            // Read type of value
 | 
			
		||||
            int32_t v_type_i_ref;
 | 
			
		||||
 
 | 
			
		||||
@@ -68,8 +68,8 @@ llama_kv_cache_unified::llama_kv_cache_unified(
 | 
			
		||||
            continue;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
 | 
			
		||||
        const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
 | 
			
		||||
        const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s(il);
 | 
			
		||||
        const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(il);
 | 
			
		||||
 | 
			
		||||
        const char * dev_name = "CPU";
 | 
			
		||||
 | 
			
		||||
@@ -1430,7 +1430,7 @@ void llama_kv_cache_unified::state_write_data(llama_io_write_i & io, const std::
 | 
			
		||||
    for (const auto & layer : layers) {
 | 
			
		||||
        const uint32_t il = layer.il;
 | 
			
		||||
 | 
			
		||||
        const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
 | 
			
		||||
        const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s(il);
 | 
			
		||||
 | 
			
		||||
        // Write key type
 | 
			
		||||
        const int32_t k_type_i = (int32_t)layer.k->type;
 | 
			
		||||
@@ -1452,7 +1452,7 @@ void llama_kv_cache_unified::state_write_data(llama_io_write_i & io, const std::
 | 
			
		||||
        for (const auto & layer : layers) {
 | 
			
		||||
            const uint32_t il = layer.il;
 | 
			
		||||
 | 
			
		||||
            const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
 | 
			
		||||
            const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(il);
 | 
			
		||||
 | 
			
		||||
            // Write value type
 | 
			
		||||
            const int32_t v_type_i = (int32_t)layer.v->type;
 | 
			
		||||
@@ -1476,7 +1476,7 @@ void llama_kv_cache_unified::state_write_data(llama_io_write_i & io, const std::
 | 
			
		||||
        for (const auto & layer : layers) {
 | 
			
		||||
            const uint32_t il = layer.il;
 | 
			
		||||
 | 
			
		||||
            const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
 | 
			
		||||
            const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(il);
 | 
			
		||||
 | 
			
		||||
            // Write value type
 | 
			
		||||
            const int32_t v_type_i = (int32_t)layer.v->type;
 | 
			
		||||
@@ -1621,7 +1621,7 @@ bool llama_kv_cache_unified::state_read_data(llama_io_read_i & io, uint32_t cell
 | 
			
		||||
    for (const auto & layer : layers) {
 | 
			
		||||
        const uint32_t il = layer.il;
 | 
			
		||||
 | 
			
		||||
        const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
 | 
			
		||||
        const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s(il);
 | 
			
		||||
 | 
			
		||||
        // Read type of key
 | 
			
		||||
        int32_t k_type_i_ref;
 | 
			
		||||
@@ -1651,7 +1651,7 @@ bool llama_kv_cache_unified::state_read_data(llama_io_read_i & io, uint32_t cell
 | 
			
		||||
        for (const auto & layer : layers) {
 | 
			
		||||
            const uint32_t il = layer.il;
 | 
			
		||||
 | 
			
		||||
            const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
 | 
			
		||||
            const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(il);
 | 
			
		||||
 | 
			
		||||
            // Read type of value
 | 
			
		||||
            int32_t v_type_i_ref;
 | 
			
		||||
@@ -1681,7 +1681,7 @@ bool llama_kv_cache_unified::state_read_data(llama_io_read_i & io, uint32_t cell
 | 
			
		||||
        for (const auto & layer : layers) {
 | 
			
		||||
            const uint32_t il = layer.il;
 | 
			
		||||
 | 
			
		||||
            const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
 | 
			
		||||
            const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(il);
 | 
			
		||||
 | 
			
		||||
            // Read type of value
 | 
			
		||||
            int32_t v_type_i_ref;
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user