mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-10-28 08:31:25 +00:00
server : fix incoming tasks not process in order (#15395)
This commit is contained in:
@@ -1729,7 +1729,7 @@ struct server_queue {
|
|||||||
void pop_deferred_task() {
|
void pop_deferred_task() {
|
||||||
std::unique_lock<std::mutex> lock(mutex_tasks);
|
std::unique_lock<std::mutex> lock(mutex_tasks);
|
||||||
if (!queue_tasks_deferred.empty()) {
|
if (!queue_tasks_deferred.empty()) {
|
||||||
queue_tasks.emplace_back(std::move(queue_tasks_deferred.front()));
|
queue_tasks.emplace_front(std::move(queue_tasks_deferred.front()));
|
||||||
queue_tasks_deferred.pop_front();
|
queue_tasks_deferred.pop_front();
|
||||||
}
|
}
|
||||||
condition_tasks.notify_one();
|
condition_tasks.notify_one();
|
||||||
|
|||||||
Reference in New Issue
Block a user