mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-10-27 08:21:30 +00:00
tests : fix test-thread-safety when compiling with multiple backends (#16699)
* run one test per backend/device (even if it's the same device)
This commit is contained in:
@@ -3,6 +3,7 @@
|
|||||||
// - Creates n_parallel (--parallel) contexts per model
|
// - Creates n_parallel (--parallel) contexts per model
|
||||||
// - Runs inference in parallel on each context
|
// - Runs inference in parallel on each context
|
||||||
|
|
||||||
|
#include <array>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
@@ -38,13 +39,14 @@ int main(int argc, char ** argv) {
|
|||||||
cparams.n_seq_max = 1;
|
cparams.n_seq_max = 1;
|
||||||
|
|
||||||
int dev_count = ggml_backend_dev_count();
|
int dev_count = ggml_backend_dev_count();
|
||||||
int gpu_dev_count = 0;
|
std::vector<std::array<ggml_backend_dev_t, 2>> gpus;
|
||||||
for (int i = 0; i < dev_count; ++i) {
|
for (int i = 0; i < dev_count; ++i) {
|
||||||
auto * dev = ggml_backend_dev_get(i);
|
auto * dev = ggml_backend_dev_get(i);
|
||||||
if (dev && ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_GPU) {
|
if (dev && ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_GPU) {
|
||||||
gpu_dev_count++;
|
gpus.push_back({dev, nullptr});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
const int gpu_dev_count = (int)gpus.size();
|
||||||
const int num_models = gpu_dev_count + 1 + 1; // GPUs + 1 CPU model + 1 layer split
|
const int num_models = gpu_dev_count + 1 + 1; // GPUs + 1 CPU model + 1 layer split
|
||||||
//const int num_models = std::max(1, gpu_dev_count);
|
//const int num_models = std::max(1, gpu_dev_count);
|
||||||
const int num_contexts = std::max(1, params.n_parallel);
|
const int num_contexts = std::max(1, params.n_parallel);
|
||||||
@@ -58,12 +60,12 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
if (m < gpu_dev_count) {
|
if (m < gpu_dev_count) {
|
||||||
mparams.split_mode = LLAMA_SPLIT_MODE_NONE;
|
mparams.split_mode = LLAMA_SPLIT_MODE_NONE;
|
||||||
mparams.main_gpu = m;
|
mparams.devices = gpus[m].data();
|
||||||
} else if (m == gpu_dev_count) {
|
} else if (m == gpu_dev_count) {
|
||||||
mparams.split_mode = LLAMA_SPLIT_MODE_NONE;
|
mparams.split_mode = LLAMA_SPLIT_MODE_NONE;
|
||||||
mparams.main_gpu = -1; // CPU model
|
mparams.main_gpu = -1; // CPU model
|
||||||
} else {
|
} else {
|
||||||
mparams.split_mode = LLAMA_SPLIT_MODE_LAYER;;
|
mparams.split_mode = LLAMA_SPLIT_MODE_LAYER;
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_model * model = llama_model_load_from_file(params.model.path.c_str(), mparams);
|
llama_model * model = llama_model_load_from_file(params.model.path.c_str(), mparams);
|
||||||
|
|||||||
Reference in New Issue
Block a user