mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-11-01 09:01:57 +00:00
common : better n_gpu_layers assignment
This commit is contained in:
@@ -702,7 +702,9 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param
|
|||||||
|
|
||||||
lparams.n_ctx = params.n_ctx;
|
lparams.n_ctx = params.n_ctx;
|
||||||
lparams.n_batch = params.n_batch;
|
lparams.n_batch = params.n_batch;
|
||||||
lparams.n_gpu_layers = params.n_gpu_layers != -1 ? params.n_gpu_layers : lparams.n_gpu_layers;
|
if (params.n_gpu_layers != -1) {
|
||||||
|
lparams.n_gpu_layers = params.n_gpu_layers;
|
||||||
|
}
|
||||||
lparams.main_gpu = params.main_gpu;
|
lparams.main_gpu = params.main_gpu;
|
||||||
lparams.tensor_split = params.tensor_split;
|
lparams.tensor_split = params.tensor_split;
|
||||||
lparams.low_vram = params.low_vram;
|
lparams.low_vram = params.low_vram;
|
||||||
|
|||||||
Reference in New Issue
Block a user