mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	vulkan: lock accesses of pinned_memory vector (#14333)
This commit is contained in:
		| @@ -305,7 +305,7 @@ static vk_device_architecture get_device_architecture(const vk::PhysicalDevice& | ||||
| } | ||||
|  | ||||
| struct vk_device_struct { | ||||
|     std::mutex mutex; | ||||
|     std::recursive_mutex mutex; | ||||
|  | ||||
|     vk::PhysicalDevice physical_device; | ||||
|     vk::PhysicalDeviceProperties properties; | ||||
| @@ -1197,7 +1197,7 @@ static void ggml_vk_create_pipeline_func(vk_device& device, vk_pipeline& pipelin | ||||
|     } | ||||
|  | ||||
|     { | ||||
|         std::lock_guard<std::mutex> guard(device->mutex); | ||||
|         std::lock_guard<std::recursive_mutex> guard(device->mutex); | ||||
|         device->pipelines.insert({ pipeline->name, pipeline }); | ||||
|     } | ||||
|  | ||||
| @@ -1411,7 +1411,7 @@ static uint32_t ggml_vk_find_queue_family_index(std::vector<vk::QueueFamilyPrope | ||||
|  | ||||
| static void ggml_vk_create_queue(vk_device& device, vk_queue& q, uint32_t queue_family_index, uint32_t queue_index, vk::PipelineStageFlags&& stage_flags, bool transfer_only) { | ||||
|     VK_LOG_DEBUG("ggml_vk_create_queue()"); | ||||
|     std::lock_guard<std::mutex> guard(device->mutex); | ||||
|     std::lock_guard<std::recursive_mutex> guard(device->mutex); | ||||
|  | ||||
|     q.queue_family_index = queue_family_index; | ||||
|     q.transfer_only = transfer_only; | ||||
| @@ -4124,6 +4124,7 @@ static void * ggml_vk_host_malloc(vk_device& device, size_t size) { | ||||
|         return nullptr; | ||||
|     } | ||||
|  | ||||
|     std::lock_guard<std::recursive_mutex> guard(device->mutex); | ||||
|     device->pinned_memory.push_back(std::make_tuple(buf->ptr, size, buf)); | ||||
|  | ||||
|     return buf->ptr; | ||||
| @@ -4134,6 +4135,8 @@ static void ggml_vk_host_free(vk_device& device, void* ptr) { | ||||
|         return; | ||||
|     } | ||||
|     VK_LOG_MEMORY("ggml_vk_host_free(" << ptr << ")"); | ||||
|     std::lock_guard<std::recursive_mutex> guard(device->mutex); | ||||
|  | ||||
|     vk_buffer buf; | ||||
|     size_t index; | ||||
|     for (size_t i = 0; i < device->pinned_memory.size(); i++) { | ||||
| @@ -4156,6 +4159,7 @@ static void ggml_vk_host_free(vk_device& device, void* ptr) { | ||||
| } | ||||
|  | ||||
| static void ggml_vk_host_get(vk_device& device, const void * ptr, vk_buffer& buf, size_t& buf_offset) { | ||||
|     std::lock_guard<std::recursive_mutex> guard(device->mutex); | ||||
|     buf = nullptr; | ||||
|     buf_offset = 0; | ||||
|     for (size_t i = 0; i < device->pinned_memory.size(); i++) { | ||||
| @@ -4457,7 +4461,7 @@ static void ggml_vk_buffer_write_2d(vk_buffer& dst, size_t offset, const void * | ||||
|             memcpy((uint8_t *)dst->ptr + offset + i * width, (const uint8_t *) src + i * spitch, width); | ||||
|         } | ||||
|     } else { | ||||
|         std::lock_guard<std::mutex> guard(dst->device->mutex); | ||||
|         std::lock_guard<std::recursive_mutex> guard(dst->device->mutex); | ||||
|  | ||||
|         vk_context subctx = ggml_vk_create_temporary_context(dst->device->transfer_queue.cmd_pool); | ||||
|         ggml_vk_ctx_begin(dst->device, subctx); | ||||
| @@ -4548,7 +4552,7 @@ static void ggml_vk_buffer_read(vk_buffer& src, size_t offset, void * dst, size_ | ||||
|  | ||||
|         memcpy(dst, (uint8_t *) src->ptr + offset, size); | ||||
|     } else { | ||||
|         std::lock_guard<std::mutex> guard(src->device->mutex); | ||||
|         std::lock_guard<std::recursive_mutex> guard(src->device->mutex); | ||||
|  | ||||
|         vk_context subctx = ggml_vk_create_temporary_context(src->device->transfer_queue.cmd_pool); | ||||
|         ggml_vk_ctx_begin(src->device, subctx); | ||||
| @@ -4578,7 +4582,7 @@ static void ggml_vk_buffer_copy_async(vk_context& ctx, vk_buffer& dst, size_t ds | ||||
|  | ||||
| static void ggml_vk_buffer_copy(vk_buffer& dst, size_t dst_offset, vk_buffer& src, size_t src_offset, size_t size) { | ||||
|     if (src->device == dst->device) { | ||||
|         std::lock_guard<std::mutex> guard(src->device->mutex); | ||||
|         std::lock_guard<std::recursive_mutex> guard(src->device->mutex); | ||||
|         VK_LOG_DEBUG("ggml_vk_buffer_copy(SINGLE_DEVICE, " << size << ")"); | ||||
|         // Copy within the device | ||||
|         vk_context subctx = ggml_vk_create_temporary_context(src->device->transfer_queue.cmd_pool); | ||||
| @@ -4613,7 +4617,7 @@ static void ggml_vk_buffer_memset_async(vk_context& ctx, vk_buffer& dst, size_t | ||||
| static void ggml_vk_buffer_memset(vk_buffer& dst, size_t offset, uint32_t c, size_t size) { | ||||
|     VK_LOG_DEBUG("ggml_vk_buffer_memset(" << offset << ", " << c << ", " << size << ")"); | ||||
|  | ||||
|     std::lock_guard<std::mutex> guard(dst->device->mutex); | ||||
|     std::lock_guard<std::recursive_mutex> guard(dst->device->mutex); | ||||
|     vk_context subctx = ggml_vk_create_temporary_context(dst->device->transfer_queue.cmd_pool); | ||||
|     ggml_vk_ctx_begin(dst->device, subctx); | ||||
|     subctx->s->buffer.fillBuffer(dst->buffer, offset, size, c); | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Jeff Bolz
					Jeff Bolz