mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	stdout : vertical align outputs for better readibility
This commit is contained in:
		| @@ -951,8 +951,9 @@ class OutputFile: | |||||||
|  |  | ||||||
|         ndarrays = bounded_parallel_map(do_item, model.items(), concurrency=8) |         ndarrays = bounded_parallel_map(do_item, model.items(), concurrency=8) | ||||||
|         for i, ((name, lazy_tensor), ndarray) in enumerate(zip(model.items(), ndarrays)): |         for i, ((name, lazy_tensor), ndarray) in enumerate(zip(model.items(), ndarrays)): | ||||||
|             size = ' x '.join(map(str, lazy_tensor.shape)) |             size = ' x '.join(f"{dim:6d}" for dim in lazy_tensor.shape) | ||||||
|             print(f"[{i+1}/{len(model)}] Writing tensor {name}, size {size}...") |             padi = len(str(len(model))) | ||||||
|  |             print(f"[{i+1:{padi}d}/{len(model)}] Writing tensor {name:38s} | size {size:16} | type {lazy_tensor.data_type}") | ||||||
|             of.write_tensor_header(name, lazy_tensor.shape, lazy_tensor.data_type) |             of.write_tensor_header(name, lazy_tensor.shape, lazy_tensor.data_type) | ||||||
|             ndarray.tofile(of.fout) |             ndarray.tofile(of.fout) | ||||||
|         of.fout.close() |         of.fout.close() | ||||||
|   | |||||||
							
								
								
									
										10
									
								
								llama.cpp
									
									
									
									
									
								
							
							
						
						
									
										10
									
								
								llama.cpp
									
									
									
									
									
								
							| @@ -262,12 +262,12 @@ static size_t checked_div(size_t a, size_t b) { | |||||||
| } | } | ||||||
|  |  | ||||||
| static std::string llama_format_tensor_shape(const std::vector<uint32_t> & ne) { | static std::string llama_format_tensor_shape(const std::vector<uint32_t> & ne) { | ||||||
|     std::string ret = "[" + std::to_string(ne.at(0)); |     char buf[256]; | ||||||
|  |     snprintf(buf, sizeof(buf), "%5u", ne.at(0)); | ||||||
|     for (size_t i = 1; i < ne.size(); i++) { |     for (size_t i = 1; i < ne.size(); i++) { | ||||||
|         ret += " x " + std::to_string(ne.at(i)); |         snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " x %5u", ne.at(i)); | ||||||
|     } |     } | ||||||
|     ret += "]"; |     return buf; | ||||||
|     return ret; |  | ||||||
| } | } | ||||||
|  |  | ||||||
| static size_t llama_calc_tensor_size(const std::vector<uint32_t> & ne, enum ggml_type type) { | static size_t llama_calc_tensor_size(const std::vector<uint32_t> & ne, enum ggml_type type) { | ||||||
| @@ -1570,7 +1570,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s | |||||||
|         tensor.data = read_data.addr; |         tensor.data = read_data.addr; | ||||||
|         model_loader->load_data_for(tensor); |         model_loader->load_data_for(tensor); | ||||||
|  |  | ||||||
|         printf("[%zu/%zu] %36s - %s, type = %6s, ", |         printf("[%4zu/%4zu] %36s - %16s, type = %6s, ", | ||||||
|                ++idx, model_loader->tensors_map.tensors.size(), |                ++idx, model_loader->tensors_map.tensors.size(), | ||||||
|                tensor.name.c_str(), llama_format_tensor_shape(tensor.ne).c_str(), |                tensor.name.c_str(), llama_format_tensor_shape(tensor.ne).c_str(), | ||||||
|                ggml_type_name(tensor.type)); |                ggml_type_name(tensor.type)); | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user
	 Georgi Gerganov
					Georgi Gerganov