mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	llama : add option to override model tensor buffers (#11397)
* llama : add option to override tensor buffers * ggml : fix possible underflow in ggml_nbytes
This commit is contained in:
		| @@ -527,7 +527,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: | ||||
|     } | ||||
|  | ||||
|     std::vector<std::string> splits = {}; | ||||
|     llama_model_loader ml(fname_inp, splits, use_mmap, /*check_tensors*/ true, kv_overrides); | ||||
|     llama_model_loader ml(fname_inp, splits, use_mmap, /*check_tensors*/ true, kv_overrides, nullptr); | ||||
|     ml.init_mappings(false); // no prefetching | ||||
|  | ||||
|     llama_model model(llama_model_default_params()); | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Diego Devesa
					Diego Devesa