mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-11-06 09:46:50 +00:00
rm unused MAMBA_CHUNK_SIZE
This commit is contained in:
@@ -6672,7 +6672,6 @@ class FalconH1Model(Mamba2Model):
|
||||
|
||||
|
||||
# Add Falcon Mamba2 specific configuration
|
||||
self.gguf_writer.add_uint32("falcon_h1.ssm.mamba_chunk_size", self.hparams["mamba_chunk_size"])
|
||||
self.gguf_writer.add_uint32("falcon_h1.attention.head_dim", self.hparams["head_dim"])
|
||||
self.gguf_writer.add_uint32("falcon_h1.ssm.mamba_d_ssm", self.hparams["mamba_d_ssm"])
|
||||
self.gguf_writer.add_uint32("falcon_h1.num_attention_heads", self.find_hparam(["num_attention_heads"]))
|
||||
|
||||
@@ -165,7 +165,6 @@ enum llm_kv {
|
||||
LLM_KV_FALCON_H1_USE_MLP,
|
||||
LLM_KV_FALCON_H1_MAMBA_NORM_BEFORE_GATE,
|
||||
LLM_KV_FALCON_H1_MAMBA_RMS_NORM,
|
||||
LLM_KV_FALCON_H1_MAMBA_CHUNK_SIZE,
|
||||
|
||||
LLM_KV_ROPE_DIMENSION_COUNT,
|
||||
LLM_KV_ROPE_DIMENSION_SECTIONS,
|
||||
|
||||
@@ -1563,7 +1563,6 @@ void llama_model::load_hparams(llama_model_loader & ml) {
|
||||
ml.get_key(LLM_KV_SSM_TIME_STEP_RANK, hparams.ssm_dt_rank);
|
||||
ml.get_key(LLM_KV_SSM_GROUP_COUNT, hparams.ssm_n_group);
|
||||
ml.get_key(LLM_KV_SSM_HEAD_DIM, hparams.ssm_head_dim);
|
||||
ml.get_key(LLM_KV_FALCON_H1_MAMBA_CHUNK_SIZE, hparams.chunk_size);
|
||||
|
||||
// Falcon-H1 parameters
|
||||
ml.get_key(LLM_KV_ATTN_HEAD_DIM, hparams.attn_head_dim);
|
||||
|
||||
Reference in New Issue
Block a user