mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-11-05 09:36:52 +00:00
more cleanups on python conversion;
This commit is contained in:
@@ -6665,7 +6665,6 @@ class FalconH1Model(Mamba2Model):
|
|||||||
self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
|
self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
|
||||||
self.gguf_writer.add_key_length(self.hparams["head_dim"])
|
self.gguf_writer.add_key_length(self.hparams["head_dim"])
|
||||||
self.gguf_writer.add_value_length(self.hparams["head_dim"])
|
self.gguf_writer.add_value_length(self.hparams["head_dim"])
|
||||||
self.gguf_writer.add_float64("falcon_h1.key_multiplier", self.hparams["key_multiplier"])
|
|
||||||
|
|
||||||
## Validation ##
|
## Validation ##
|
||||||
assert self.hparams.get("hidden_act") in [None, "silu"], "Only SILU activation supported"
|
assert self.hparams.get("hidden_act") in [None, "silu"], "Only SILU activation supported"
|
||||||
|
|||||||
Reference in New Issue
Block a user