mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	ggml-backend : only offload from host buffers (#11120)
This commit is contained in:
		| @@ -761,7 +761,7 @@ static int ggml_backend_sched_backend_id_from_cur(ggml_backend_sched_t sched, st | ||||
|         } | ||||
|         // skip ROPE since the rope freqs tensor is too small to choose a backend based on it | ||||
|         // not an ideal solution | ||||
|         if (tensor->op != GGML_OP_ROPE && src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS) { | ||||
|         if (tensor->op != GGML_OP_ROPE && src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS && ggml_backend_buffer_is_host(src->buffer)) { | ||||
|             int src_backend_id = ggml_backend_sched_backend_from_buffer(sched, src, tensor); | ||||
|             // check if a backend with higher prio wants to offload the op | ||||
|             if (src_backend_id == sched->n_backends - 1) { | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Diego Devesa
					Diego Devesa