mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	Temporary bump the memory buffer size - hopefully fix issues from 483bab2e
				
					
				
			This commit is contained in:
		| @@ -632,7 +632,7 @@ static bool llama_eval_internal( | |||||||
|     auto & mem_per_token = lctx.mem_per_token; |     auto & mem_per_token = lctx.mem_per_token; | ||||||
|  |  | ||||||
|     // TODO: fix this hardcoded size |     // TODO: fix this hardcoded size | ||||||
|     static size_t buf_size = 512u*1024*1024; |     static size_t buf_size = 2048u*1024*1024; // TMP !!! | ||||||
|     static void * buf = malloc(buf_size); |     static void * buf = malloc(buf_size); | ||||||
|  |  | ||||||
|     if (mem_per_token > 0 && mem_per_token*N > buf_size) { |     if (mem_per_token > 0 && mem_per_token*N > buf_size) { | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user
	 Georgi Gerganov
					Georgi Gerganov