mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	llama : mark LLM_ARCH_STARCODER as full offload supported (#3945)
as done in https://github.com/ggerganov/llama.cpp/pull/3827
This commit is contained in:
		
							
								
								
									
										11
									
								
								llama.cpp
									
									
									
									
									
								
							
							
						
						
									
										11
									
								
								llama.cpp
									
									
									
									
									
								
							| @@ -5164,11 +5164,12 @@ static int llama_decode_internal( | ||||
|  | ||||
|     // If all tensors can be run on the GPU then using more than 1 thread is detrimental. | ||||
|     const bool full_offload_supported = | ||||
|         model.arch == LLM_ARCH_LLAMA    || | ||||
|         model.arch == LLM_ARCH_BAICHUAN || | ||||
|         model.arch == LLM_ARCH_FALCON   || | ||||
|         model.arch == LLM_ARCH_REFACT   || | ||||
|         model.arch == LLM_ARCH_MPT; | ||||
|         model.arch == LLM_ARCH_LLAMA      || | ||||
|         model.arch == LLM_ARCH_BAICHUAN   || | ||||
|         model.arch == LLM_ARCH_FALCON     || | ||||
|         model.arch == LLM_ARCH_REFACT     || | ||||
|         model.arch == LLM_ARCH_MPT        || | ||||
|         model.arch == LLM_ARCH_STARCODER; | ||||
|  | ||||
|     const bool fully_offloaded = model.n_gpu_layers >= (int) hparams.n_layer + 3; | ||||
|     if (ggml_cpu_has_cublas() && full_offload_supported && fully_offloaded) { | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Meng Zhang
					Meng Zhang