mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	llama : assume tied weights if lm_head/output weights is missing (#5824)
This is to support model configurations with "tie_word_embeddings" set to true. Co-authored-by: Don Mahurin <2797413+dmahurin@users.noreply.github.com>
This commit is contained in:
		| @@ -3888,7 +3888,13 @@ static bool llm_load_tensors( | |||||||
|                     { |                     { | ||||||
|                         model.output_norm = ml.create_tensor(ctx_output,       tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}); |                         model.output_norm = ml.create_tensor(ctx_output,       tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}); | ||||||
|                         if (model.arch != LLM_ARCH_MINICPM){ |                         if (model.arch != LLM_ARCH_MINICPM){ | ||||||
|                             model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}); |                             model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, false); | ||||||
|  |                             // if output is NULL, init from the input tok embed | ||||||
|  |                             if (model.output == NULL) { | ||||||
|  |                                 model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); | ||||||
|  |                                 ml.n_created--; // artificial tensor | ||||||
|  |                                 ml.size_data += ggml_nbytes(model.output); | ||||||
|  |                             } | ||||||
|                         } |                         } | ||||||
|                     } |                     } | ||||||
|  |  | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user
	 Don Mahurin
					Don Mahurin