mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	llama : update stubs for systems without mmap and mlock (#1266)
Co-authored-by: John Doe <john.doe@example.com>
This commit is contained in:
		
							
								
								
									
										10
									
								
								llama-util.h
									
									
									
									
									
								
							
							
						
						
									
										10
									
								
								llama-util.h
									
									
									
									
									
								
							| @@ -243,7 +243,8 @@ struct llama_mmap { | ||||
| #else | ||||
|     static constexpr bool SUPPORTED = false; | ||||
|  | ||||
|     llama_mmap(struct llama_file *) { | ||||
|     llama_mmap(struct llama_file *, bool prefetch = true) { | ||||
|         (void)prefetch; | ||||
|         throw std::string("mmap not supported"); | ||||
|     } | ||||
| #endif | ||||
| @@ -382,8 +383,13 @@ struct llama_mlock { | ||||
| #else | ||||
|     static constexpr bool SUPPORTED = false; | ||||
|  | ||||
|     void raw_lock(const void * addr, size_t size) { | ||||
|     size_t lock_granularity() { | ||||
|         return (size_t) 65536; | ||||
|     } | ||||
|  | ||||
|     bool raw_lock(const void * addr, size_t size) { | ||||
|         fprintf(stderr, "warning: mlock not supported on this system\n"); | ||||
|         return false; | ||||
|     } | ||||
|  | ||||
|     void raw_unlock(const void * addr, size_t size) {} | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 xloem
					xloem