mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-04 09:32:00 +00:00 
			
		
		
		
	clean
This commit is contained in:
		@@ -6669,14 +6669,6 @@ class FalconH1Model(Mamba2Model):
 | 
				
			|||||||
        ## Validation ##
 | 
					        ## Validation ##
 | 
				
			||||||
        assert self.hparams.get("hidden_act") in [None, "silu"], "Only SILU activation supported"
 | 
					        assert self.hparams.get("hidden_act") in [None, "silu"], "Only SILU activation supported"
 | 
				
			||||||
        assert self.d_inner % d_head == 0, f"SSM inner size {self.d_inner} not a multiple of head dim {d_head}"
 | 
					        assert self.d_inner % d_head == 0, f"SSM inner size {self.d_inner} not a multiple of head dim {d_head}"
 | 
				
			||||||
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        # Add Falcon Mamba2 specific configuration
 | 
					 | 
				
			||||||
        self.gguf_writer.add_ssm_head_dim(self.hparams["mamba_d_head"])
 | 
					 | 
				
			||||||
        self.gguf_writer.add_ssm_inner_size(self.hparams["mamba_d_ssm"])
 | 
					 | 
				
			||||||
        self.gguf_writer.add_head_count(self.find_hparam(["num_attention_heads"]))
 | 
					 | 
				
			||||||
        self.gguf_writer.add_key_length(self.hparams["head_dim"])
 | 
					 | 
				
			||||||
        self.gguf_writer.add_value_length(self.hparams["head_dim"])
 | 
					 | 
				
			||||||
        self.gguf_writer.add_head_count_kv(self.find_hparam(["num_key_value_heads"], optional=True) or 
 | 
					        self.gguf_writer.add_head_count_kv(self.find_hparam(["num_key_value_heads"], optional=True) or 
 | 
				
			||||||
            self.find_hparam(["num_attention_heads"]))
 | 
					            self.find_hparam(["num_attention_heads"]))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user