mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	* llama : reuse compute graphs ggml-ci * llama-bench : add graph reuse parameter ggml-ci * cont : remove the parameter and the sched resets ggml-ci * graph : rename update() to can_reuse() ggml-ci * params : remove is_same() ggml-ci * graph : set res->params in llm_graph_context constructor ggml-ci * graph : avoid set_max_nodes in llm_graph_result ggml-ci * kv-cache : reuse llama_context's graph result instance ggml-ci * context : reset the previous graph result upon memory updates ggml-ci * batch : llama_ubatch now carries its data instead of pointing to balloc ggml-ci * merge : fix build ggml-ci * graph : fix can_reuse() checks when flash-attention is disabled * graph : move llm_graph_result impl in source file + debug env ggml-ci
		
			
				
	
	
		
			400 lines
		
	
	
		
			12 KiB
		
	
	
	
		
			C++
		
	
	
	
	
	
			
		
		
	
	
			400 lines
		
	
	
		
			12 KiB
		
	
	
	
		
			C++
		
	
	
	
	
	
#pragma once
 | 
						|
 | 
						|
#include "llama-batch.h"
 | 
						|
#include "llama-graph.h"
 | 
						|
#include "llama-kv-cells.h"
 | 
						|
#include "llama-memory.h"
 | 
						|
 | 
						|
#include <unordered_map>
 | 
						|
#include <vector>
 | 
						|
 | 
						|
struct llama_cparams;
 | 
						|
struct llama_hparams;
 | 
						|
struct llama_model;
 | 
						|
struct llama_context;
 | 
						|
 | 
						|
//
 | 
						|
// llama_kv_cache_unified
 | 
						|
//
 | 
						|
 | 
						|
class llama_kv_cache_unified : public llama_memory_i {
 | 
						|
public:
 | 
						|
    static uint32_t get_padding(const llama_cparams & cparams);
 | 
						|
 | 
						|
    // this callback is used to filter out layers that should not be included in the cache
 | 
						|
    using layer_filter_cb = std::function<bool(int32_t il)>;
 | 
						|
 | 
						|
    struct defrag_info {
 | 
						|
        bool empty() const {
 | 
						|
            return ids.empty();
 | 
						|
        }
 | 
						|
 | 
						|
        // contains information about which cell moves where:
 | 
						|
        //  - cell i moves to ids[i]
 | 
						|
        //  - if ids[i] == i || ids[i] == ids.size(), then cell i is not moved
 | 
						|
        std::vector<uint32_t> ids;
 | 
						|
    };
 | 
						|
 | 
						|
    struct stream_copy_info {
 | 
						|
        bool empty() const {
 | 
						|
            assert(ssrc.size() == sdst.size());
 | 
						|
            return ssrc.empty();
 | 
						|
        }
 | 
						|
 | 
						|
        std::vector<uint32_t> ssrc;
 | 
						|
        std::vector<uint32_t> sdst;
 | 
						|
    };
 | 
						|
 | 
						|
    // for each ubatch, create a slot_info that contains information about where the ubatch should be inserted in the
 | 
						|
    //   KV cells. for example, cell indices for each token, such that: token[i] -> goes to cells[idxs[i]]
 | 
						|
    struct slot_info {
 | 
						|
        // data for ggml_set_rows
 | 
						|
        using idx_vec_t = std::vector<uint32_t>;
 | 
						|
 | 
						|
        // number of streams: ns = s1 - s0 + 1
 | 
						|
        llama_seq_id s0;
 | 
						|
        llama_seq_id s1;
 | 
						|
 | 
						|
        std::vector<llama_seq_id> strm; // [ns]
 | 
						|
        std::vector<idx_vec_t>    idxs; // [ns]
 | 
						|
 | 
						|
        uint32_t head() const {
 | 
						|
            GGML_ASSERT(idxs.size() == 1);
 | 
						|
            GGML_ASSERT(!idxs[0].empty());
 | 
						|
 | 
						|
            return idxs[0][0];
 | 
						|
        }
 | 
						|
 | 
						|
        void resize(size_t n) {
 | 
						|
            strm.resize(n);
 | 
						|
            idxs.resize(n);
 | 
						|
        }
 | 
						|
 | 
						|
        size_t size() const {
 | 
						|
            GGML_ASSERT(idxs.size() == strm.size());
 | 
						|
            GGML_ASSERT(!idxs.empty());
 | 
						|
 | 
						|
            return idxs[0].size();
 | 
						|
        }
 | 
						|
 | 
						|
        size_t n_stream() const {
 | 
						|
            return strm.size();
 | 
						|
        }
 | 
						|
 | 
						|
        bool empty() const {
 | 
						|
            return idxs.empty();
 | 
						|
        }
 | 
						|
 | 
						|
        void clear() {
 | 
						|
            idxs.clear();
 | 
						|
        }
 | 
						|
    };
 | 
						|
 | 
						|
    using slot_info_vec_t = std::vector<slot_info>;
 | 
						|
 | 
						|
    llama_kv_cache_unified(
 | 
						|
            const llama_model &  model,
 | 
						|
              layer_filter_cb && filter,
 | 
						|
                    ggml_type    type_k,
 | 
						|
                    ggml_type    type_v,
 | 
						|
                         bool    v_trans,
 | 
						|
                         bool    offload,
 | 
						|
                         bool    unified,
 | 
						|
                     uint32_t    kv_size,
 | 
						|
                     uint32_t    n_seq_max,
 | 
						|
                     uint32_t    n_pad,
 | 
						|
                     uint32_t    n_swa,
 | 
						|
               llama_swa_type    swa_type);
 | 
						|
 | 
						|
    ~llama_kv_cache_unified() = default;
 | 
						|
 | 
						|
    //
 | 
						|
    // llama_memory_i
 | 
						|
    //
 | 
						|
 | 
						|
    llama_memory_context_ptr init_batch(
 | 
						|
            llama_batch_allocr & balloc,
 | 
						|
            uint32_t n_ubatch,
 | 
						|
            bool embd_all) override;
 | 
						|
 | 
						|
    llama_memory_context_ptr init_full() override;
 | 
						|
 | 
						|
    llama_memory_context_ptr init_update(llama_context * lctx, bool optimize) override;
 | 
						|
 | 
						|
    bool get_can_shift() const override;
 | 
						|
 | 
						|
    void clear(bool data) override;
 | 
						|
 | 
						|
    bool seq_rm  (llama_seq_id seq_id,                              llama_pos p0, llama_pos p1) override;
 | 
						|
    void seq_cp  (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
 | 
						|
    void seq_keep(llama_seq_id seq_id)                                                          override;
 | 
						|
    void seq_add (llama_seq_id seq_id,                              llama_pos p0, llama_pos p1, llama_pos shift) override;
 | 
						|
    void seq_div (llama_seq_id seq_id,                              llama_pos p0, llama_pos p1, int d) override;
 | 
						|
 | 
						|
    llama_pos seq_pos_min(llama_seq_id seq_id) const override;
 | 
						|
    llama_pos seq_pos_max(llama_seq_id seq_id) const override;
 | 
						|
 | 
						|
    // state write/load
 | 
						|
 | 
						|
    void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override;
 | 
						|
    void state_read (llama_io_read_i  & io, llama_seq_id seq_id = -1)       override;
 | 
						|
 | 
						|
    //
 | 
						|
    // llama_kv_cache_unified specific API
 | 
						|
    //
 | 
						|
 | 
						|
    uint32_t get_size()     const;
 | 
						|
    uint32_t get_n_stream() const;
 | 
						|
 | 
						|
    bool get_has_shift() const;
 | 
						|
 | 
						|
    //
 | 
						|
    // graph_build API
 | 
						|
    //
 | 
						|
 | 
						|
    uint32_t get_n_kv() const;
 | 
						|
 | 
						|
    // TODO: temporary
 | 
						|
    bool get_supports_set_rows() const;
 | 
						|
 | 
						|
    // get views of the current state of the cache
 | 
						|
    ggml_tensor * get_k(ggml_context * ctx, int32_t il, uint32_t n_kv, const slot_info & sinfo) const;
 | 
						|
    ggml_tensor * get_v(ggml_context * ctx, int32_t il, uint32_t n_kv, const slot_info & sinfo) const;
 | 
						|
 | 
						|
    // store k_cur and v_cur in the cache based on the provided head location
 | 
						|
    ggml_tensor * cpy_k(ggml_context * ctx, ggml_tensor * k_cur, ggml_tensor * k_idxs, int32_t il, const slot_info & sinfo) const;
 | 
						|
    ggml_tensor * cpy_v(ggml_context * ctx, ggml_tensor * v_cur, ggml_tensor * v_idxs, int32_t il, const slot_info & sinfo) const;
 | 
						|
 | 
						|
    //
 | 
						|
    // preparation API
 | 
						|
    //
 | 
						|
 | 
						|
    // find places for the provided ubatches in the cache, returns the slot infos
 | 
						|
    // return empty vector on failure
 | 
						|
    slot_info_vec_t prepare(const std::vector<llama_ubatch> & ubatches);
 | 
						|
 | 
						|
    bool update(llama_context * lctx, bool do_shift, const defrag_info & dinfo, const stream_copy_info & sc_info);
 | 
						|
 | 
						|
    // find a slot of kv cells that can hold the ubatch
 | 
						|
    // if cont == true, then the slot must be continuous
 | 
						|
    // return empty slot_info on failure
 | 
						|
    slot_info find_slot(const llama_ubatch & ubatch, bool cont) const;
 | 
						|
 | 
						|
    // emplace the ubatch context into slot: [sinfo.idxs[0...ubatch.n_tokens - 1]]
 | 
						|
    void apply_ubatch(const slot_info & sinfo, const llama_ubatch & ubatch);
 | 
						|
 | 
						|
    //
 | 
						|
    // input API
 | 
						|
    //
 | 
						|
 | 
						|
    ggml_tensor * build_input_k_idxs(ggml_context * ctx, const llama_ubatch & ubatch) const;
 | 
						|
    ggml_tensor * build_input_v_idxs(ggml_context * ctx, const llama_ubatch & ubatch) const;
 | 
						|
 | 
						|
    void set_input_k_idxs(ggml_tensor * dst, const llama_ubatch * ubatch, const slot_info & sinfo) const;
 | 
						|
    void set_input_v_idxs(ggml_tensor * dst, const llama_ubatch * ubatch, const slot_info & sinfo) const;
 | 
						|
 | 
						|
    void set_input_k_shift(ggml_tensor * dst) const;
 | 
						|
 | 
						|
    void set_input_kq_mask   (ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const;
 | 
						|
    void set_input_pos_bucket(ggml_tensor * dst, const llama_ubatch * ubatch) const;
 | 
						|
 | 
						|
private:
 | 
						|
    const llama_model & model;
 | 
						|
    const llama_hparams & hparams;
 | 
						|
 | 
						|
    struct kv_layer {
 | 
						|
        // layer index in the model
 | 
						|
        // note: can be different from the layer index in the KV cache
 | 
						|
        uint32_t il;
 | 
						|
 | 
						|
        ggml_tensor * k;
 | 
						|
        ggml_tensor * v;
 | 
						|
 | 
						|
        std::vector<ggml_tensor *> k_stream;
 | 
						|
        std::vector<ggml_tensor *> v_stream;
 | 
						|
    };
 | 
						|
 | 
						|
    bool v_trans = true;  // the value tensor is transposed
 | 
						|
 | 
						|
    const uint32_t n_seq_max = 1;
 | 
						|
    const uint32_t n_stream  = 1;
 | 
						|
 | 
						|
    // required padding
 | 
						|
    const uint32_t n_pad = 1;
 | 
						|
 | 
						|
    // SWA
 | 
						|
    const uint32_t n_swa = 0;
 | 
						|
 | 
						|
    // env: LLAMA_KV_CACHE_DEBUG
 | 
						|
    int debug = 0;
 | 
						|
 | 
						|
    // env: LLAMA_SET_ROWS (temporary)
 | 
						|
    // ref: https://github.com/ggml-org/llama.cpp/pull/14285
 | 
						|
    bool supports_set_rows = false;
 | 
						|
 | 
						|
    const llama_swa_type swa_type = LLAMA_SWA_TYPE_NONE;
 | 
						|
 | 
						|
    std::vector<ggml_context_ptr>        ctxs;
 | 
						|
    std::vector<ggml_backend_buffer_ptr> bufs;
 | 
						|
 | 
						|
    // the current index from where we start searching for a free slot in the ring buffer of KV cells (see find_slot())
 | 
						|
    // note: this is not part of the KV state and it's only used to speed-up the find_slot() method
 | 
						|
    std::vector<uint32_t> v_heads;
 | 
						|
 | 
						|
    std::vector<llama_kv_cells_unified> v_cells;
 | 
						|
 | 
						|
    // maps from a sequence id to a stream id
 | 
						|
    std::vector<uint32_t> seq_to_stream;
 | 
						|
 | 
						|
    // pending stream copies that will be applied during the next update
 | 
						|
    stream_copy_info sc_info;
 | 
						|
 | 
						|
    std::vector<kv_layer> layers;
 | 
						|
 | 
						|
    // model layer id -> KV cache layer id
 | 
						|
    std::unordered_map<int32_t, int32_t> map_layer_ids;
 | 
						|
 | 
						|
    // return non-empty vector if cells have been moved
 | 
						|
    defrag_info defrag_prepare(int32_t n_max_nodes) const;
 | 
						|
 | 
						|
    size_t total_size() const;
 | 
						|
 | 
						|
    size_t size_k_bytes() const;
 | 
						|
    size_t size_v_bytes() const;
 | 
						|
 | 
						|
    bool is_masked_swa(llama_pos p0, llama_pos p1) const;
 | 
						|
 | 
						|
    ggml_tensor * build_rope_shift(
 | 
						|
            const llama_cparams & cparams,
 | 
						|
                   ggml_context * ctx,
 | 
						|
                    ggml_tensor * cur,
 | 
						|
                    ggml_tensor * shift,
 | 
						|
                    ggml_tensor * factors,
 | 
						|
                          float   freq_base,
 | 
						|
                          float   freq_scale) const;
 | 
						|
 | 
						|
    ggml_cgraph * build_graph_shift(
 | 
						|
               llm_graph_result * res,
 | 
						|
                  llama_context * lctx) const;
 | 
						|
 | 
						|
    ggml_cgraph * build_graph_defrag(
 | 
						|
               llm_graph_result * res,
 | 
						|
                  llama_context * lctx,
 | 
						|
              const defrag_info & dinfo) const;
 | 
						|
 | 
						|
    struct cell_ranges_t {
 | 
						|
        uint32_t strm;
 | 
						|
 | 
						|
        std::vector<std::pair<uint32_t, uint32_t>> data; // ranges, from inclusive, to exclusive
 | 
						|
    };
 | 
						|
 | 
						|
    void state_write_meta(llama_io_write_i & io, const cell_ranges_t & cr, llama_seq_id seq_id = -1) const;
 | 
						|
    void state_write_data(llama_io_write_i & io, const cell_ranges_t & cr) const;
 | 
						|
 | 
						|
    bool state_read_meta(llama_io_read_i & io, uint32_t strm, uint32_t cell_count, llama_seq_id dest_seq_id = -1);
 | 
						|
    bool state_read_data(llama_io_read_i & io, uint32_t strm, uint32_t cell_count);
 | 
						|
};
 | 
						|
 | 
						|
class llama_kv_cache_unified_context : public llama_memory_context_i {
 | 
						|
public:
 | 
						|
    // some shorthands
 | 
						|
    using slot_info_vec_t  = llama_kv_cache_unified::slot_info_vec_t;
 | 
						|
    using defrag_info      = llama_kv_cache_unified::defrag_info;
 | 
						|
    using stream_copy_info = llama_kv_cache_unified::stream_copy_info;
 | 
						|
 | 
						|
    // used for errors
 | 
						|
    llama_kv_cache_unified_context(llama_memory_status status);
 | 
						|
 | 
						|
    // used to create a full-cache context
 | 
						|
    llama_kv_cache_unified_context(
 | 
						|
            llama_kv_cache_unified * kv);
 | 
						|
 | 
						|
    // used to create an update context
 | 
						|
    llama_kv_cache_unified_context(
 | 
						|
            llama_kv_cache_unified * kv,
 | 
						|
            llama_context * lctx,
 | 
						|
            bool do_shift,
 | 
						|
            defrag_info dinfo,
 | 
						|
            stream_copy_info sc_info);
 | 
						|
 | 
						|
    // used to create a batch procesing context from a batch
 | 
						|
    llama_kv_cache_unified_context(
 | 
						|
            llama_kv_cache_unified * kv,
 | 
						|
            slot_info_vec_t sinfos,
 | 
						|
            std::vector<llama_ubatch> ubatches);
 | 
						|
 | 
						|
    virtual ~llama_kv_cache_unified_context();
 | 
						|
 | 
						|
    //
 | 
						|
    // llama_memory_context_i
 | 
						|
    //
 | 
						|
 | 
						|
    bool next()  override;
 | 
						|
    bool apply() override;
 | 
						|
 | 
						|
    llama_memory_status  get_status() const override;
 | 
						|
    const llama_ubatch & get_ubatch() const override;
 | 
						|
 | 
						|
    //
 | 
						|
    // llama_kv_cache_unified_context specific API
 | 
						|
    //
 | 
						|
 | 
						|
    uint32_t get_n_kv() const;
 | 
						|
 | 
						|
    // TODO: temporary
 | 
						|
    bool get_supports_set_rows() const;
 | 
						|
 | 
						|
    // get views of the current state of the cache
 | 
						|
    ggml_tensor * get_k(ggml_context * ctx, int32_t il) const;
 | 
						|
    ggml_tensor * get_v(ggml_context * ctx, int32_t il) const;
 | 
						|
 | 
						|
    // store k_cur and v_cur in the cache based on the provided head location
 | 
						|
    ggml_tensor * cpy_k(ggml_context * ctx, ggml_tensor * k_cur, ggml_tensor * k_idxs, int32_t il) const;
 | 
						|
    ggml_tensor * cpy_v(ggml_context * ctx, ggml_tensor * v_cur, ggml_tensor * v_idxs, int32_t il) const;
 | 
						|
 | 
						|
    ggml_tensor * build_input_k_idxs(ggml_context * ctx, const llama_ubatch & ubatch) const;
 | 
						|
    ggml_tensor * build_input_v_idxs(ggml_context * ctx, const llama_ubatch & ubatch) const;
 | 
						|
 | 
						|
    void set_input_k_idxs(ggml_tensor * dst, const llama_ubatch * ubatch) const;
 | 
						|
    void set_input_v_idxs(ggml_tensor * dst, const llama_ubatch * ubatch) const;
 | 
						|
 | 
						|
    void set_input_k_shift   (ggml_tensor * dst) const;
 | 
						|
    void set_input_kq_mask   (ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const;
 | 
						|
    void set_input_pos_bucket(ggml_tensor * dst, const llama_ubatch * ubatch) const;
 | 
						|
 | 
						|
private:
 | 
						|
    llama_memory_status status;
 | 
						|
 | 
						|
    llama_kv_cache_unified * kv;
 | 
						|
    llama_context * lctx;
 | 
						|
 | 
						|
    //
 | 
						|
    // update context
 | 
						|
    //
 | 
						|
 | 
						|
    bool do_shift = false;
 | 
						|
 | 
						|
    defrag_info dinfo;
 | 
						|
 | 
						|
    stream_copy_info sc_info;
 | 
						|
 | 
						|
    //
 | 
						|
    // batch processing context
 | 
						|
    //
 | 
						|
 | 
						|
    // the index of the cur ubatch to process
 | 
						|
    size_t i_cur = 0;
 | 
						|
 | 
						|
    slot_info_vec_t sinfos;
 | 
						|
 | 
						|
    std::vector<llama_ubatch> ubatches;
 | 
						|
 | 
						|
    //
 | 
						|
    // data needed for building the compute graph for the current ubatch:
 | 
						|
    //
 | 
						|
 | 
						|
    // a heuristic, to avoid attending the full cache if it is not yet utilized
 | 
						|
    // as the cache gets filled, the benefit from this heuristic disappears
 | 
						|
    int32_t n_kv;
 | 
						|
};
 |