mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	vulkan: properly initialize vulkan devices for LLAMA_SPLIT_MODE_NONE (#7552)
This commit is contained in:
		| @@ -6012,6 +6012,8 @@ static ggml_backend_buffer_type_i ggml_backend_vk_buffer_type_interface = { | |||||||
| }; | }; | ||||||
|  |  | ||||||
| GGML_CALL ggml_backend_buffer_type_t ggml_backend_vk_buffer_type(size_t dev_num) { | GGML_CALL ggml_backend_buffer_type_t ggml_backend_vk_buffer_type(size_t dev_num) { | ||||||
|  |     ggml_vk_instance_init(); | ||||||
|  |  | ||||||
| #ifdef GGML_VULKAN_DEBUG | #ifdef GGML_VULKAN_DEBUG | ||||||
|     std::cerr << "ggml_backend_vk_buffer_type(" << dev_num << ")" << std::endl; |     std::cerr << "ggml_backend_vk_buffer_type(" << dev_num << ")" << std::endl; | ||||||
| #endif | #endif | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user
	 k.h.lai
					k.h.lai