mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	llama: print memory breakdown on exit (#15860)
* llama: print memory breakdown on exit
This commit is contained in:
		| @@ -2,6 +2,7 @@ | ||||
|  | ||||
| #include "llama.h" | ||||
|  | ||||
| #include <map> | ||||
| #include <memory> | ||||
| #include <functional> | ||||
|  | ||||
| @@ -108,6 +109,8 @@ struct llama_memory_i { | ||||
|     virtual llama_pos seq_pos_min(llama_seq_id seq_id) const = 0; | ||||
|     virtual llama_pos seq_pos_max(llama_seq_id seq_id) const = 0; | ||||
|  | ||||
|     virtual std::map<ggml_backend_buffer_type_t, size_t> memory_breakdown() const = 0; | ||||
|  | ||||
|     // | ||||
|     // state write/read | ||||
|     // | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Johannes Gäßler
					Johannes Gäßler