mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-04 09:32:00 +00:00 
			
		
		
		
	model : remove unnecessary prefix for tensor loading constants
Co-authored-by: Sigbjørn Skjæret <sigbjorn.skjaeret@scala.com>
This commit is contained in:
		@@ -3220,10 +3220,10 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
 | 
			
		||||
                    {
 | 
			
		||||
                        output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
 | 
			
		||||
 | 
			
		||||
                        output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
 | 
			
		||||
                        output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
 | 
			
		||||
                        // if output is NULL, init from the input tok embed, duplicated to allow offloading
 | 
			
		||||
                        if (output == NULL) {
 | 
			
		||||
                            output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
 | 
			
		||||
                            output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
 | 
			
		||||
                        }
 | 
			
		||||
                    }
 | 
			
		||||
 | 
			
		||||
@@ -3266,10 +3266,10 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
 | 
			
		||||
                    {
 | 
			
		||||
                        output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
 | 
			
		||||
 | 
			
		||||
                        output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
 | 
			
		||||
                        output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
 | 
			
		||||
                        // if output is NULL, init from the input tok embed, duplicated to allow offloading
 | 
			
		||||
                        if (output == NULL) {
 | 
			
		||||
                            output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
 | 
			
		||||
                            output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
 | 
			
		||||
                        }
 | 
			
		||||
                    }
 | 
			
		||||
 | 
			
		||||
@@ -3316,7 +3316,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
 | 
			
		||||
 | 
			
		||||
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
 | 
			
		||||
 | 
			
		||||
                        layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, llama_model_loader::TENSOR_NOT_REQUIRED);
 | 
			
		||||
                        layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, TENSOR_NOT_REQUIRED);
 | 
			
		||||
 | 
			
		||||
                        if (layer.ffn_gate_inp) {
 | 
			
		||||
                            // MoE
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user