mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	llama : support optional tensors (#4283)
This commit is contained in:
		| @@ -1469,7 +1469,7 @@ struct llama_server_context | |||||||
|  |  | ||||||
|     int split_multiprompt_task(task_server& multiprompt_task) |     int split_multiprompt_task(task_server& multiprompt_task) | ||||||
|     { |     { | ||||||
|         auto prompt_count = multiprompt_task.data.at("prompt").size(); |         int prompt_count = multiprompt_task.data.at("prompt").size(); | ||||||
|         assert(prompt_count > 1); |         assert(prompt_count > 1); | ||||||
|  |  | ||||||
|         int multitask_id = id_gen++; |         int multitask_id = id_gen++; | ||||||
|   | |||||||
							
								
								
									
										33
									
								
								llama.cpp
									
									
									
									
									
								
							
							
						
						
									
										33
									
								
								llama.cpp
									
									
									
									
									
								
							| @@ -1991,10 +1991,13 @@ struct llama_model_loader { | |||||||
|         return tensor; |         return tensor; | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne, ggml_backend_type backend) { |     struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne, ggml_backend_type backend, bool optional = false) { | ||||||
|         struct ggml_tensor * cur = ggml_get_tensor(ctx_meta, name.c_str()); |         struct ggml_tensor * cur = ggml_get_tensor(ctx_meta, name.c_str()); | ||||||
|  |  | ||||||
|         if (cur == NULL) { |         if (cur == NULL) { | ||||||
|  |             if (optional) { | ||||||
|  |                 return NULL; | ||||||
|  |             } | ||||||
|             throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str())); |             throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str())); | ||||||
|         } |         } | ||||||
|  |  | ||||||
| @@ -2812,29 +2815,11 @@ static void llm_load_tensors( | |||||||
|                         layer.wv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, backend_split); |                         layer.wv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, backend_split); | ||||||
|                         layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd},     backend_split); |                         layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd},     backend_split); | ||||||
|  |  | ||||||
|                         try { |                         // optional bias tensors | ||||||
|                             layer.bq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, backend); |                         layer.bq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd},     backend, true); | ||||||
|                         } catch (const std::runtime_error& e) { |                         layer.bk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_gqa}, backend, true); | ||||||
|                             if (std::string(e.what()).find("not found") != std::string::npos) layer.bq = NULL; else throw; |                         layer.bv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_gqa}, backend, true); | ||||||
|                         } |                         layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd},     backend, true); | ||||||
|  |  | ||||||
|                         try { |  | ||||||
|                             layer.bk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, backend); |  | ||||||
|                         } catch (const std::runtime_error& e) { |  | ||||||
|                             if (std::string(e.what()).find("not found") != std::string::npos) layer.bk = NULL; else throw; |  | ||||||
|                         } |  | ||||||
|  |  | ||||||
|                         try { |  | ||||||
|                             layer.bv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, backend); |  | ||||||
|                         } catch (const std::runtime_error& e) { |  | ||||||
|                             if (std::string(e.what()).find("not found") != std::string::npos) layer.bv = NULL; else throw; |  | ||||||
|                         } |  | ||||||
|  |  | ||||||
|                         try { |  | ||||||
|                             layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend); |  | ||||||
|                         } catch (const std::runtime_error& e) { |  | ||||||
|                             if (std::string(e.what()).find("not found") != std::string::npos) layer.bo = NULL; else throw; |  | ||||||
|                         } |  | ||||||
|  |  | ||||||
|                         layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend); |                         layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend); | ||||||
|  |  | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user
	 Georgi Gerganov
					Georgi Gerganov