gguf : start implementing quantization (WIP)

This commit is contained in:
M. Yusuf Sarıgöz
2023-08-12 14:28:17 +03:00
parent c4f02b4f74
commit b2571af255
3 changed files with 14 additions and 6 deletions

View File

@@ -738,15 +738,19 @@ struct gguf_file_saver {
info_offset = file.tell();
size_t count = gguf_get_data_offset(fl->gguf_ctx) - info_offset;
file.write_zeros(count);
printf("info_offset = %zu\n", info_offset);
file.seek(info_offset, SEEK_SET);
GGML_ASSERT(info_offset == file.tell());
}
size_t write_tensor_info(llama_load_tensor & tensor) {
size_t total_written = 0;
file.seek(0, info_offset);
file.seek(info_offset, SEEK_SET);
GGML_ASSERT(info_offset == file.tell());
total_written += file.write_str(tensor.name);
int32_t n_dims = tensor.ne.size();
file.write_i32(n_dims);
total_written += file.write_i32(n_dims);
for (int32_t i = 0; i < n_dims; ++i) {
total_written += file.write_i32(i);
}