mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	llama : support WinXP build with MinGW 8.1.0 (#3419)
This commit is contained in:
		 Karthik Kumar Viswanathan
					Karthik Kumar Viswanathan
				
			
				
					committed by
					
						 GitHub
						GitHub
					
				
			
			
				
	
			
			
			 GitHub
						GitHub
					
				
			
						parent
						
							147b17ac94
						
					
				
				
					commit
					ac32902a87
				
			| @@ -987,6 +987,7 @@ struct llama_mmap { | ||||
|         } | ||||
|  | ||||
|         if (prefetch > 0) { | ||||
| #if _WIN32_WINNT >= 0x602 | ||||
|             // PrefetchVirtualMemory is only present on Windows 8 and above, so we dynamically load it | ||||
|             BOOL (WINAPI *pPrefetchVirtualMemory) (HANDLE, ULONG_PTR, PWIN32_MEMORY_RANGE_ENTRY, ULONG); | ||||
|             HMODULE hKernel32 = GetModuleHandleW(L"kernel32.dll"); | ||||
| @@ -1004,6 +1005,9 @@ struct llama_mmap { | ||||
|                             llama_format_win_err(GetLastError()).c_str()); | ||||
|                 } | ||||
|             } | ||||
| #else | ||||
|             throw std::runtime_error("PrefetchVirtualMemory unavailable"); | ||||
| #endif | ||||
|         } | ||||
|     } | ||||
|  | ||||
|   | ||||
		Reference in New Issue
	
	Block a user