mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-11-05 09:36:52 +00:00
ggml : add is_ram_shared to ggml_backend
Metal can share the RAM memory and can utilize mmap without temp buffer
This commit is contained in:
@@ -255,8 +255,9 @@ struct ggml_backend ggml_backend_cpu_init(void) {
|
||||
ctx->work_size = 0;
|
||||
|
||||
struct ggml_backend cpu_backend = {
|
||||
/* .interface = */ &cpu_backend_interface,
|
||||
/* .context = */ ctx
|
||||
/* .interface = */ &cpu_backend_interface,
|
||||
/* .context = */ ctx,
|
||||
/* .is_ram_shared = */ true,
|
||||
};
|
||||
return cpu_backend;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user