mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	 03fb8a002d
			
		
	
	03fb8a002d
	
	
	
		
			
			This will reproduce the issue in llama13b
{
'prompt': 'Q: hello world \nA: ',
 'stop': ['\n'],
 'temperature': 0.0,
 'n_predict': 10,
 'cache_prompt': True,
 'n_probs': 10
}
		
	
		
			
				
	
	
	
		
			153 KiB
		
	
	
	
	
	
	
	
			
		
		
	
	
			153 KiB