mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-10-29 08:41:22 +00:00
fix: Remove llama_model_is_hybrid_Recurrent public API
https://github.com/ggml-org/llama.cpp/pull/13979#discussion_r2141728423 Branch: HybridRecurrentCache Signed-off-by: Gabe Goodhart <ghart@us.ibm.com>
This commit is contained in:
@@ -572,9 +572,6 @@ extern "C" {
|
|||||||
// Returns true if the model is recurrent (like Mamba, RWKV, etc.)
|
// Returns true if the model is recurrent (like Mamba, RWKV, etc.)
|
||||||
LLAMA_API bool llama_model_is_recurrent(const struct llama_model * model);
|
LLAMA_API bool llama_model_is_recurrent(const struct llama_model * model);
|
||||||
|
|
||||||
// Returns true if the model is hybrid-recurrent (like Jamba, Bamba, etc.)
|
|
||||||
LLAMA_API bool llama_model_is_hybrid_recurrent(const struct llama_model * model);
|
|
||||||
|
|
||||||
// Returns 0 on success
|
// Returns 0 on success
|
||||||
LLAMA_API uint32_t llama_model_quantize(
|
LLAMA_API uint32_t llama_model_quantize(
|
||||||
const char * fname_inp,
|
const char * fname_inp,
|
||||||
|
|||||||
@@ -14405,10 +14405,6 @@ bool llama_model_is_recurrent(const llama_model * model) {
|
|||||||
return llm_arch_is_recurrent(model->arch);
|
return llm_arch_is_recurrent(model->arch);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool llama_model_is_hybrid_recurrent(const llama_model * model) {
|
|
||||||
return llm_arch_is_hybrid_recurrent(model->arch);
|
|
||||||
}
|
|
||||||
|
|
||||||
const std::vector<std::pair<std::string, ggml_tensor *>> & llama_internal_get_tensor_map(const llama_model * model) {
|
const std::vector<std::pair<std::string, ggml_tensor *>> & llama_internal_get_tensor_map(const llama_model * model) {
|
||||||
return model->tensors_by_name;
|
return model->tensors_by_name;
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user