mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	llama : use n_threads_batch only when n_tokens >= 32
ggml-ci
This commit is contained in:
		
							
								
								
									
										14
									
								
								llama.cpp
									
									
									
									
									
								
							
							
						
						
									
										14
									
								
								llama.cpp
									
									
									
									
									
								
							| @@ -5433,7 +5433,7 @@ static int llama_decode_internal( | ||||
|  | ||||
|     GGML_ASSERT(n_tokens <= n_batch); | ||||
|  | ||||
|     int n_threads = n_tokens == 1 ? cparams.n_threads : cparams.n_threads_batch; | ||||
|     int n_threads = n_tokens < 32 ? cparams.n_threads : cparams.n_threads_batch; | ||||
|     GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT | ||||
|  | ||||
|     const int64_t t_start_us = ggml_time_us(); | ||||
| @@ -5550,18 +5550,8 @@ static int llama_decode_internal( | ||||
|         n_threads = std::min(4, n_threads); | ||||
|     } | ||||
|  | ||||
|     // If all tensors can be run on the GPU then using more than 1 thread is detrimental. | ||||
|     const bool full_offload_supported = | ||||
|         model.arch == LLM_ARCH_LLAMA      || | ||||
|         model.arch == LLM_ARCH_BAICHUAN   || | ||||
|         model.arch == LLM_ARCH_FALCON     || | ||||
|         model.arch == LLM_ARCH_REFACT     || | ||||
|         model.arch == LLM_ARCH_MPT        || | ||||
|         model.arch == LLM_ARCH_STARCODER  || | ||||
|         model.arch == LLM_ARCH_STABLELM; | ||||
|  | ||||
|     const bool fully_offloaded = model.n_gpu_layers >= (int) hparams.n_layer + 3; | ||||
|     if (ggml_cpu_has_cublas() && full_offload_supported && fully_offloaded) { | ||||
|     if (ggml_cpu_has_cublas() && fully_offloaded) { | ||||
|         n_threads = 1; | ||||
|     } | ||||
|  | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Georgi Gerganov
					Georgi Gerganov