From 7ba463b38c5d6e3eae83bab3093022997deb39f6 Mon Sep 17 00:00:00 2001 From: Gabe Goodhart Date: Thu, 12 Jun 2025 14:01:28 -0600 Subject: [PATCH] fix: Remove llama_model_is_hybrid_Recurrent public API https://github.com/ggml-org/llama.cpp/pull/13979#discussion_r2141728423 Branch: HybridRecurrentCache Signed-off-by: Gabe Goodhart --- include/llama.h | 3 --- src/llama-model.cpp | 4 ---- 2 files changed, 7 deletions(-) diff --git a/include/llama.h b/include/llama.h index 10f58b278d..635508b10f 100644 --- a/include/llama.h +++ b/include/llama.h @@ -572,9 +572,6 @@ extern "C" { // Returns true if the model is recurrent (like Mamba, RWKV, etc.) LLAMA_API bool llama_model_is_recurrent(const struct llama_model * model); - // Returns true if the model is hybrid-recurrent (like Jamba, Bamba, etc.) - LLAMA_API bool llama_model_is_hybrid_recurrent(const struct llama_model * model); - // Returns 0 on success LLAMA_API uint32_t llama_model_quantize( const char * fname_inp, diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 6e9dd53223..971d1df199 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -14405,10 +14405,6 @@ bool llama_model_is_recurrent(const llama_model * model) { return llm_arch_is_recurrent(model->arch); } -bool llama_model_is_hybrid_recurrent(const llama_model * model) { - return llm_arch_is_hybrid_recurrent(model->arch); -} - const std::vector> & llama_internal_get_tensor_map(const llama_model * model) { return model->tensors_by_name; }