mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	llama : avoid using "optional" keyword (#4283)
This commit is contained in:
		
							
								
								
									
										12
									
								
								llama.cpp
									
									
									
									
									
								
							
							
						
						
									
										12
									
								
								llama.cpp
									
									
									
									
									
								
							| @@ -1991,11 +1991,11 @@ struct llama_model_loader { | ||||
|         return tensor; | ||||
|     } | ||||
|  | ||||
|     struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne, ggml_backend_type backend, bool optional = false) { | ||||
|     struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne, ggml_backend_type backend, bool required = true) { | ||||
|         struct ggml_tensor * cur = ggml_get_tensor(ctx_meta, name.c_str()); | ||||
|  | ||||
|         if (cur == NULL) { | ||||
|             if (optional) { | ||||
|             if (!required) { | ||||
|                 return NULL; | ||||
|             } | ||||
|             throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str())); | ||||
| @@ -2816,10 +2816,10 @@ static void llm_load_tensors( | ||||
|                         layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd},     backend_split); | ||||
|  | ||||
|                         // optional bias tensors | ||||
|                         layer.bq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd},     backend, true); | ||||
|                         layer.bk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_gqa}, backend, true); | ||||
|                         layer.bv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_gqa}, backend, true); | ||||
|                         layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd},     backend, true); | ||||
|                         layer.bq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd},     backend, false); | ||||
|                         layer.bk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_gqa}, backend, false); | ||||
|                         layer.bv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_gqa}, backend, false); | ||||
|                         layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd},     backend, false); | ||||
|  | ||||
|                         layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend); | ||||
|  | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Georgi Gerganov
					Georgi Gerganov