mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-11-02 09:12:03 +00:00
fix: Remove errant virtual destructor leftover from previous impl attempt
Branch: HybridRecurrentCache Signed-off-by: Gabe Goodhart <ghart@us.ibm.com>
This commit is contained in:
@@ -243,7 +243,7 @@ public:
|
|||||||
cparams(cparams),
|
cparams(cparams),
|
||||||
kv_state(kv_state) {
|
kv_state(kv_state) {
|
||||||
}
|
}
|
||||||
virtual ~llm_graph_input_attn_kv_unified() = default;
|
~llm_graph_input_attn_kv_unified() = default;
|
||||||
|
|
||||||
void set_input(const llama_ubatch * ubatch) override;
|
void set_input(const llama_ubatch * ubatch) override;
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user