gguf : start implementing quantization (WIP)

This commit is contained in:
M. Yusuf Sarıgöz
2023-08-12 10:40:56 +03:00
parent 186c496fdf
commit 4fa017a1f9
2 changed files with 20 additions and 7 deletions

View File

@@ -614,6 +614,7 @@ struct ggml_context * ctx_data = NULL;
struct gguf_file_saver {
gguf_file file;
gguf_file_loader * fl;
size_t info_offset;
gguf_file_saver(const char * fname, gguf_file_loader * fl, enum llama_ftype new_ftype)
: file(fname, "wb"), fl(fl) {
fprintf(stderr, "llama.cpp: saving model to %s\n", fname);
@@ -734,6 +735,9 @@ struct gguf_file_saver {
}
}
info_offset = file.tell();
size_t count = gguf_get_data_offset(fl->gguf_ctx) - info_offset;
file.write_zeros(count);
}