mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	py : removed unused model variable and verified that the code functions correctly with vocab_only setting. Also confirmed that the code works as expected after running with reduced memory usage due to deletion of no-longer-needed variable. (#547)
				
					
				
			This commit is contained in:
		 DooWoong Lee (David)
					DooWoong Lee (David)
				
			
				
					committed by
					
						 GitHub
						GitHub
					
				
			
			
				
	
			
			
			 GitHub
						GitHub
					
				
			
						parent
						
							96f9c0506f
						
					
				
				
					commit
					692ce3164e
				
			| @@ -145,13 +145,11 @@ def main(): | ||||
|  | ||||
|         print(f"Extracting only the vocab from '{fname_model}'\n") | ||||
|  | ||||
|         model = torch.load(fname_model, map_location="cpu") | ||||
|  | ||||
|         with open(fname_out, "wb") as fout: | ||||
|             write_header(fout, hparams, ftype) | ||||
|             write_tokens(fout, tokenizer) | ||||
|  | ||||
|         del model | ||||
|  | ||||
|         print(f"Done. Output file: {fname_out}\n") | ||||
|  | ||||
|   | ||||
		Reference in New Issue
	
	Block a user