llama: print memory breakdown on exit (#15860)

* llama: print memory breakdown on exit
This commit is contained in:
Johannes Gäßler
2025-09-24 16:53:48 +02:00
committed by GitHub
parent f2a789e334
commit e789095502
18 changed files with 243 additions and 12 deletions

View File

@@ -473,6 +473,14 @@ llama_pos llama_kv_cache::seq_pos_max(llama_seq_id seq_id) const {
return cells.seq_pos_max(seq_id);
}
std::map<ggml_backend_buffer_type_t, size_t> llama_kv_cache::memory_breakdown() const {
std::map<ggml_backend_buffer_type_t, size_t> ret;
for (const ggml_backend_buffer_ptr & buf_ptr : bufs) {
ret[ggml_backend_buffer_get_type(buf_ptr.get())] += ggml_backend_buffer_get_size(buf_ptr.get());
}
return ret;
}
llama_memory_context_ptr llama_kv_cache::init_batch(
llama_batch_allocr & balloc,
uint32_t n_ubatch,