mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	llama : use the same threshold for OpenBLAS and ggml thread limiting (#577)
This commit is contained in:
		| @@ -856,7 +856,7 @@ static bool llama_eval_internal( | ||||
|     // for big prompts, if BLAS is enabled, it is better to use only one thread | ||||
|     // otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance | ||||
|     ggml_cgraph gf = {}; | ||||
|     gf.n_threads = N > 255 && ggml_cpu_has_blas() ? 1 : n_threads; | ||||
|     gf.n_threads = N >= 32 && ggml_cpu_has_blas() ? 1 : n_threads; | ||||
|  | ||||
|     struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); | ||||
|     memcpy(embd->data, tokens, N*ggml_element_size(embd)); | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Maël Kerbiriou
					Maël Kerbiriou