mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-11-09 10:17:06 +00:00
ggml : add is_ram_shared to ggml_backend
Metal can share the RAM memory and can utilize mmap without temp buffer
This commit is contained in:
@@ -1834,8 +1834,9 @@ ggml_backend ggml_backend_cuda_init(void) {
|
||||
ggml_backend_cuda_context * ctx = new ggml_backend_cuda_context;
|
||||
|
||||
ggml_backend cuda_backend = {
|
||||
/* .interface = */ &cuda_backend_interface,
|
||||
/* .context = */ ctx
|
||||
/* .interface = = */ &cuda_backend_interface,
|
||||
/* .context = */ ctx,
|
||||
/* .is_ram_shared = */ false,
|
||||
};
|
||||
return cuda_backend;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user