mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	convert : fix missing ftype for gemma (#5690)
This commit is contained in:
		| @@ -1803,6 +1803,7 @@ class GemmaModel(Model): | ||||
|         self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"]) | ||||
|         self.gguf_writer.add_key_length(hparams["head_dim"]) | ||||
|         self.gguf_writer.add_value_length(hparams["head_dim"]) | ||||
|         self.gguf_writer.add_file_type(self.ftype) | ||||
|  | ||||
|     def write_tensors(self): | ||||
|         block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer"))) | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Jared Van Bortel
					Jared Van Bortel