mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-04 09:32:00 +00:00 
			
		
		
		
	convert-llama-hf-to-gguf.py : rope scale fix
This commit is contained in:
		@@ -126,7 +126,7 @@ gguf_writer.add_head_count(head_count)
 | 
			
		||||
gguf_writer.add_head_count_kv(head_count_kv)
 | 
			
		||||
gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"])
 | 
			
		||||
 | 
			
		||||
if "rope_scaling" in hparams and "factor" in hparams["rope_scaling"]:
 | 
			
		||||
if "rope_scaling" in hparams and hparams["rope_scaling"] != None and "factor" in hparams["rope_scaling"]:
 | 
			
		||||
    if "type" in hparams["rope_scaling"]:
 | 
			
		||||
        if hparams["rope_scaling"]["type"] == "linear":
 | 
			
		||||
            gguf_writer.add_rope_scale_linear(hparams["rope_scaling"]["factor"])
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user