mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-10-28 08:31:25 +00:00
llama : revert n_threads_batch logic
ggml-ci
This commit is contained in:
@@ -5433,7 +5433,7 @@ static int llama_decode_internal(
|
|||||||
|
|
||||||
GGML_ASSERT(n_tokens <= n_batch);
|
GGML_ASSERT(n_tokens <= n_batch);
|
||||||
|
|
||||||
int n_threads = n_tokens < 32 ? cparams.n_threads : cparams.n_threads_batch;
|
int n_threads = n_tokens == 1 ? cparams.n_threads : cparams.n_threads_batch;
|
||||||
GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT
|
GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT
|
||||||
|
|
||||||
const int64_t t_start_us = ggml_time_us();
|
const int64_t t_start_us = ggml_time_us();
|
||||||
|
|||||||
Reference in New Issue
Block a user