diff --git a/include/llama.h b/include/llama.h index 10f58b278d..635508b10f 100644 --- a/include/llama.h +++ b/include/llama.h @@ -572,9 +572,6 @@ extern "C" { // Returns true if the model is recurrent (like Mamba, RWKV, etc.) LLAMA_API bool llama_model_is_recurrent(const struct llama_model * model); - // Returns true if the model is hybrid-recurrent (like Jamba, Bamba, etc.) - LLAMA_API bool llama_model_is_hybrid_recurrent(const struct llama_model * model); - // Returns 0 on success LLAMA_API uint32_t llama_model_quantize( const char * fname_inp, diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 6e9dd53223..971d1df199 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -14405,10 +14405,6 @@ bool llama_model_is_recurrent(const llama_model * model) { return llm_arch_is_recurrent(model->arch); } -bool llama_model_is_hybrid_recurrent(const llama_model * model) { - return llm_arch_is_hybrid_recurrent(model->arch); -} - const std::vector> & llama_internal_get_tensor_map(const llama_model * model) { return model->tensors_by_name; }