mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	gguf : add 64-bit support (GGUF v2) (#2821)
* gguf : bump version to 2 * gguf : add support for 64-bit (no backwards comp yet) * gguf : v1 backwards comp * gguf.py : bump GGUF version * gguf.py : uint64_t on all lengths, sizes and counts, enums still uint32_t * gguf.py : string lengths uint32_t * gguf : update all counts to 64-bit * gguf.py : string len uint64_t and n_dims uint32_t * gguf : fix typo * llama.cpp : print gguf version --------- Co-authored-by: klosax <131523366+klosax@users.noreply.github.com>
This commit is contained in:
		| @@ -1144,11 +1144,13 @@ static bool llama_kv_cache_init( | ||||
|  | ||||
| enum llama_fver { | ||||
|     GGUF_FILE_VERSION_V1 = 1, | ||||
|     GGUF_FILE_VERSION_V2 = 2, | ||||
| }; | ||||
|  | ||||
| static const char * llama_file_version_name(llama_fver version) { | ||||
|     switch (version) { | ||||
|         case GGUF_FILE_VERSION_V1: return "GGUF V1 (latest)"; | ||||
|         case GGUF_FILE_VERSION_V1: return "GGUF V1 (support until nov 2023)"; | ||||
|         case GGUF_FILE_VERSION_V2: return "GGUF V2 (latest)"; | ||||
|     } | ||||
|  | ||||
|     return "unknown"; | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Georgi Gerganov
					Georgi Gerganov