mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-11-09 10:17:06 +00:00
examples(gguf): GGUF example outputs (#17025)
* feat(llama-gguf): Print out the tensor type in llama-gguf r Branch: Mamba2Perf Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * feat(off-topic): print the number of elements in tensors with llama-gguf Branch: Mamba2SSD Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * style: valign Branch: GGUFToolOutputs Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * Update examples/gguf/gguf.cpp --------- Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
This commit is contained in:
@@ -184,8 +184,13 @@ static bool gguf_ex_read_1(const std::string & fname, bool check_data) {
|
|||||||
const char * name = gguf_get_tensor_name (ctx, i);
|
const char * name = gguf_get_tensor_name (ctx, i);
|
||||||
const size_t size = gguf_get_tensor_size (ctx, i);
|
const size_t size = gguf_get_tensor_size (ctx, i);
|
||||||
const size_t offset = gguf_get_tensor_offset(ctx, i);
|
const size_t offset = gguf_get_tensor_offset(ctx, i);
|
||||||
|
const auto type = gguf_get_tensor_type (ctx, i);
|
||||||
|
|
||||||
printf("%s: tensor[%d]: name = %s, size = %zu, offset = %zu\n", __func__, i, name, size, offset);
|
const char * type_name = ggml_type_name(type);
|
||||||
|
const size_t type_size = ggml_type_size(type);
|
||||||
|
const size_t n_elements = size / type_size;
|
||||||
|
|
||||||
|
printf("%s: tensor[%d]: name = %s, size = %zu, offset = %zu, type = %s, n_elts = %zu\n", __func__, i, name, size, offset, type_name, n_elements);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user