mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	Remove .attention from skipped tensors to match more accurately (#7051)
This commit is contained in:
		| @@ -1427,7 +1427,7 @@ class LlamaModel(Model): | ||||
|         experts = dict() | ||||
|         for name, data_torch in self.get_tensors(): | ||||
|             # we don't need these | ||||
|             if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")): | ||||
|             if name.endswith((".attention.masked_bias", ".attention.bias", ".rotary_emb.inv_freq")): | ||||
|                 continue | ||||
|  | ||||
|             old_dtype = data_torch.dtype | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Bartowski
					Bartowski