mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	RoPE: fix back, CUDA support for back + noncont. (#11240)
* RoPE: fix back, CUDA support for back + noncont. * fix comments reg. non-cont. RoPE support [no-ci]
This commit is contained in:
		| @@ -403,8 +403,6 @@ static bool ggml_backend_cpu_device_supports_op(ggml_backend_dev_t dev, const st | ||||
|                 op->type != GGML_TYPE_IQ1_M; // missing type_traits.from_float | ||||
|         case GGML_OP_MUL_MAT: | ||||
|             return src1->type == GGML_TYPE_F32 || src1->type == ggml_get_type_traits_cpu(src0->type)->vec_dot_type; | ||||
|         case GGML_OP_ROPE_BACK: | ||||
|             return op->src[2] == NULL && (op->op_params[2] & 4) == 0; | ||||
|         case GGML_OP_IM2COL_BACK: | ||||
|             return src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32; | ||||
|         case GGML_OP_OUT_PROD: | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Johannes Gäßler
					Johannes Gäßler