mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-10-28 08:31:25 +00:00
llama : allow using iGPUs with --device (#15951)
* llama : allow using iGPUs with --device * mtmd : allow iGPU * rpc-server : allow iGPU
This commit is contained in:
@@ -227,15 +227,7 @@ static ggml_backend_t create_backend(const rpc_server_params & params) {
|
||||
}
|
||||
}
|
||||
|
||||
// try to initialize a GPU backend first
|
||||
if (!backend) {
|
||||
backend = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_GPU, nullptr);
|
||||
}
|
||||
|
||||
// if there aren't GPU backends fallback to CPU backend
|
||||
if (!backend) {
|
||||
backend = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr);
|
||||
}
|
||||
backend = ggml_backend_init_best();
|
||||
|
||||
if (backend) {
|
||||
fprintf(stderr, "%s: using %s backend\n", __func__, ggml_backend_name(backend));
|
||||
|
||||
Reference in New Issue
Block a user