mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-04 09:32:00 +00:00 
			
		
		
		
	convert-llama-h5-to-gguf.py : n_layer --> n_block
This commit is contained in:
		@@ -76,7 +76,7 @@ gguf_writer.add_name(last_dir)
 | 
				
			|||||||
gguf_writer.add_architecture(llm_arch)
 | 
					gguf_writer.add_architecture(llm_arch)
 | 
				
			||||||
gguf_writer.add_context_length(llm_arch, hparams["max_position_embeddings"])
 | 
					gguf_writer.add_context_length(llm_arch, hparams["max_position_embeddings"])
 | 
				
			||||||
gguf_writer.add_embedding_length(llm_arch, hparams["hidden_size"])
 | 
					gguf_writer.add_embedding_length(llm_arch, hparams["hidden_size"])
 | 
				
			||||||
gguf_writer.add_layer_count(llm_arch, block_count)
 | 
					gguf_writer.add_block_count(llm_arch, block_count)
 | 
				
			||||||
gguf_writer.add_feed_forward_length(llm_arch, hparams["intermediate_size"])
 | 
					gguf_writer.add_feed_forward_length(llm_arch, hparams["intermediate_size"])
 | 
				
			||||||
gguf_writer.add_rope_dimension_count(llm_arch, hparams["hidden_size"] // hparams["num_attention_heads"])
 | 
					gguf_writer.add_rope_dimension_count(llm_arch, hparams["hidden_size"] // hparams["num_attention_heads"])
 | 
				
			||||||
gguf_writer.add_head_count(llm_arch, head_count)
 | 
					gguf_writer.add_head_count(llm_arch, head_count)
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user