mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	llama : fix unused warning
This commit is contained in:
		| @@ -1053,6 +1053,8 @@ static void llama_model_load_internal( | |||||||
|  |  | ||||||
|         fprintf(stderr, "%s: [cublas] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024); |         fprintf(stderr, "%s: [cublas] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024); | ||||||
|     } |     } | ||||||
|  | #else | ||||||
|  |     (void) n_gpu_layers; | ||||||
| #endif | #endif | ||||||
|  |  | ||||||
|     // loading time will be recalculate after the first eval, so |     // loading time will be recalculate after the first eval, so | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user
	 Georgi Gerganov
					Georgi Gerganov