mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-10-27 08:21:30 +00:00
server: add memory breakdown print (#16740)
This commit is contained in:
@@ -5714,6 +5714,7 @@ int main(int argc, char ** argv) {
|
||||
|
||||
clean_up();
|
||||
t.join();
|
||||
llama_memory_breakdown_print(ctx_server.ctx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user