mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-04 09:32:00 +00:00 
			
		
		
		
	convert-llama-hf-to-gguf.py : remove extra kv
This commit is contained in:
		@@ -114,7 +114,6 @@ else:
 | 
				
			|||||||
    sys.exit()
 | 
					    sys.exit()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
gguf_writer.add_architecture()
 | 
					 | 
				
			||||||
gguf_writer.add_name(last_dir)
 | 
					gguf_writer.add_name(last_dir)
 | 
				
			||||||
gguf_writer.add_source_hf_repo(hf_repo)
 | 
					gguf_writer.add_source_hf_repo(hf_repo)
 | 
				
			||||||
gguf_writer.add_tensor_data_layout("Meta AI original pth")
 | 
					gguf_writer.add_tensor_data_layout("Meta AI original pth")
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user