llama : allow using iGPUs with --device (#15951)

* llama : allow using iGPUs with --device

* mtmd : allow iGPU

* rpc-server : allow iGPU
This commit is contained in:
Diego Devesa
2025-09-13 07:49:49 -07:00
committed by GitHub
parent 55758b00ca
commit 50f4281a6f
3 changed files with 8 additions and 24 deletions

View File

@@ -406,6 +406,7 @@ struct clip_ctx {
}
if (!backend) {
backend = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_GPU, nullptr);
backend = backend ? backend : ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_IGPU, nullptr);
}
}

View File

@@ -227,15 +227,7 @@ static ggml_backend_t create_backend(const rpc_server_params & params) {
}
}
// try to initialize a GPU backend first
if (!backend) {
backend = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_GPU, nullptr);
}
// if there aren't GPU backends fallback to CPU backend
if (!backend) {
backend = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr);
}
backend = ggml_backend_init_best();
if (backend) {
fprintf(stderr, "%s: using %s backend\n", __func__, ggml_backend_name(backend));