mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	cpu: de-duplicate some of the operators and refactor (ggml/1144)
* cpu: de-duplicate some of the operators and refactor * Fix PR comments * Fix PR comments
This commit is contained in:
		| @@ -23,6 +23,11 @@ function(ggml_add_cpu_backend_variant_impl tag_name) | ||||
|         ggml-cpu/amx/mmq.cpp | ||||
|         ggml-cpu/amx/mmq.h | ||||
|         ggml-cpu/ggml-cpu-impl.h | ||||
|         ggml-cpu/common.h | ||||
|         ggml-cpu/binary-ops.h | ||||
|         ggml-cpu/binary-ops.cpp | ||||
|         ggml-cpu/unary-ops.h | ||||
|         ggml-cpu/unary-ops.cpp | ||||
|         ) | ||||
|  | ||||
|     target_compile_features(${GGML_CPU_NAME} PRIVATE c_std_11 cxx_std_17) | ||||
|   | ||||
							
								
								
									
										158
									
								
								ggml/src/ggml-cpu/binary-ops.cpp
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										158
									
								
								ggml/src/ggml-cpu/binary-ops.cpp
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,158 @@ | ||||
| #include "binary-ops.h" | ||||
|  | ||||
| #if defined(GGML_USE_ACCELERATE) | ||||
| #include <Accelerate/Accelerate.h> | ||||
|  | ||||
| using vDSP_fn_t = void (*)(const float *, vDSP_Stride, const float *, vDSP_Stride, float *, vDSP_Stride, vDSP_Length); | ||||
| #endif | ||||
|  | ||||
| static inline float op_add(float a, float b) { | ||||
|     return a + b; | ||||
| } | ||||
|  | ||||
| static inline float op_sub(float a, float b) { | ||||
|     return a - b; | ||||
| } | ||||
|  | ||||
| static inline float op_mul(float a, float b) { | ||||
|     return a * b; | ||||
| } | ||||
|  | ||||
| static inline float op_div(float a, float b) { | ||||
|     return a / b; | ||||
| } | ||||
|  | ||||
| template <float (*op)(float, float), typename src0_t, typename src1_t, typename dst_t> | ||||
| static inline void vec_binary_op_contiguous(const int64_t n, dst_t * z, const src0_t * x, const src1_t * y) { | ||||
|     constexpr auto src0_to_f32 = type_conversion_table<src0_t>::to_f32; | ||||
|     constexpr auto src1_to_f32 = type_conversion_table<src1_t>::to_f32; | ||||
|     constexpr auto f32_to_dst  = type_conversion_table<dst_t >::from_f32; | ||||
|  | ||||
|     for (int i = 0; i < n; i++) { | ||||
|         z[i] = f32_to_dst(op(src0_to_f32(x[i]), src1_to_f32(y[i]))); | ||||
|     } | ||||
| } | ||||
|  | ||||
| template <float (*op)(float, float), typename src0_t, typename src1_t, typename dst_t> | ||||
| static inline void vec_binary_op_non_contiguous(const int64_t n, const int64_t ne10, const int64_t nb10, dst_t * z, const src0_t * x, const src1_t * y) { | ||||
|     constexpr auto src0_to_f32 = type_conversion_table<src0_t>::to_f32; | ||||
|     constexpr auto src1_to_f32 = type_conversion_table<src1_t>::to_f32; | ||||
|     constexpr auto f32_to_dst  = type_conversion_table<dst_t >::from_f32; | ||||
|  | ||||
|     for (int i = 0; i < n; i++) { | ||||
|         int i10 = i % ne10; | ||||
|         const src1_t * y_ptr = (const src1_t *)((const char *)y + i10*nb10); | ||||
|         z[i] = f32_to_dst(op(src0_to_f32(x[i]), src1_to_f32(*y_ptr))); | ||||
|     } | ||||
| } | ||||
|  | ||||
| template <float (*op)(float, float), typename src0_t, typename src1_t, typename dst_t> | ||||
| static void apply_binary_op(const ggml_compute_params * params, ggml_tensor * dst) { | ||||
|     const ggml_tensor * src0 = dst->src[0]; | ||||
|     const ggml_tensor * src1 = dst->src[1]; | ||||
|  | ||||
|     GGML_ASSERT(ggml_can_repeat(src1, src0) && ggml_are_same_shape(src0, dst)); | ||||
|  | ||||
|     GGML_TENSOR_BINARY_OP_LOCALS | ||||
|  | ||||
|     GGML_ASSERT( nb0 == sizeof(dst_t)); | ||||
|     GGML_ASSERT(nb00 == sizeof(src0_t)); | ||||
|  | ||||
|     const auto [ir0, ir1] = get_thread_range(params, src0); | ||||
|     const bool is_src1_contiguous = (nb10 == sizeof(src1_t)); | ||||
|  | ||||
|     if (!is_src1_contiguous) { // broadcast not implemented yet for non-contiguous | ||||
|         GGML_ASSERT(ggml_are_same_shape(src0, src1)); | ||||
|     } | ||||
|  | ||||
| #ifdef GGML_USE_ACCELERATE | ||||
|     vDSP_fn_t vDSP_op = nullptr; | ||||
|     // TODO - avoid the f32-only check using type 'trait' lookup tables and row-based src-to-float conversion functions | ||||
|     if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { | ||||
|         if (op == op_add) { | ||||
|             vDSP_op = vDSP_vadd; | ||||
|         } else if (op == op_sub) { | ||||
|             vDSP_op = vDSP_vsub; | ||||
|         } else if (op == op_mul) { | ||||
|             vDSP_op = vDSP_vmul; | ||||
|         } else if (op == op_div) { | ||||
|             vDSP_op = vDSP_vdiv; | ||||
|         } | ||||
|     } | ||||
| #endif | ||||
|  | ||||
|     for (int64_t ir = ir0; ir < ir1; ++ir) { | ||||
|         const int64_t i03 = ir/(ne02*ne01); | ||||
|         const int64_t i02 = (ir - i03*ne02*ne01)/ne01; | ||||
|         const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01); | ||||
|  | ||||
|         const int64_t i13 = i03 % ne13; | ||||
|         const int64_t i12 = i02 % ne12; | ||||
|         const int64_t i11 = i01 % ne11; | ||||
|  | ||||
|         dst_t        * dst_ptr  = (dst_t  *)       ((char *)       dst->data  + i03*nb3  + i02*nb2  + i01*nb1 ); | ||||
|         const src0_t * src0_ptr = (const src0_t *) ((const char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01); | ||||
|         const src1_t * src1_ptr = (const src1_t *) ((const char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11); | ||||
|  | ||||
|         if (is_src1_contiguous) { | ||||
|             // src1 is broadcastable across src0 and dst in i1, i2, i3 | ||||
|             const int64_t nr0 = ne00 / ne10; | ||||
|  | ||||
|             for (int64_t r = 0; r < nr0; ++r) { | ||||
| #ifdef GGML_USE_ACCELERATE | ||||
|                 if constexpr (std::is_same_v<src0_t, float> && std::is_same_v<src1_t, float> && std::is_same_v<dst_t, float>) { | ||||
|                     if (vDSP_op != nullptr) { | ||||
|                         vDSP_op(src1_ptr, 1, src0_ptr + r*ne10, 1, dst_ptr + r*ne10, 1, ne10); | ||||
|                         continue; | ||||
|                     } | ||||
|                 } | ||||
| #endif | ||||
|                 vec_binary_op_contiguous<op>(ne10, dst_ptr + r*ne10, src0_ptr + r*ne10, src1_ptr); | ||||
|             } | ||||
|         } else { | ||||
|             vec_binary_op_non_contiguous<op>(ne0, ne10, nb10, dst_ptr, src0_ptr, src1_ptr); | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
| // TODO: Use the 'traits' lookup table (for type conversion fns), instead of a mass of 'if' conditions with long templates | ||||
| template <float (*op)(float, float)> | ||||
| static void binary_op(const ggml_compute_params * params, ggml_tensor * dst) { | ||||
|     const ggml_tensor * src0 = dst->src[0]; | ||||
|     const ggml_tensor * src1 = dst->src[1]; | ||||
|  | ||||
|     /*  */ if (src0->type == GGML_TYPE_F32  && src1->type == GGML_TYPE_F32  && dst->type == GGML_TYPE_F32) { // all f32 | ||||
|         apply_binary_op<op, float, float, float>(params, dst); | ||||
|     } else if (src0->type == GGML_TYPE_F16  && src1->type == GGML_TYPE_F16  && dst->type == GGML_TYPE_F16) { // all f16 | ||||
|         apply_binary_op<op, ggml_fp16_t, ggml_fp16_t, ggml_fp16_t>(params, dst); | ||||
|     } else if (src0->type == GGML_TYPE_BF16 && src1->type == GGML_TYPE_BF16 && dst->type == GGML_TYPE_BF16) { // all bf16 | ||||
|         apply_binary_op<op, ggml_bf16_t, ggml_bf16_t, ggml_bf16_t>(params, dst); | ||||
|     } else if (src0->type == GGML_TYPE_BF16 && src1->type == GGML_TYPE_F32  && dst->type == GGML_TYPE_BF16) { | ||||
|         apply_binary_op<op, ggml_bf16_t, float, ggml_bf16_t>(params, dst); | ||||
|     } else if (src0->type == GGML_TYPE_BF16 && src1->type == GGML_TYPE_F32  && dst->type == GGML_TYPE_F32) { | ||||
|         apply_binary_op<op, ggml_bf16_t, float, float>(params, dst); | ||||
|     } else if (src0->type == GGML_TYPE_F16  && src1->type == GGML_TYPE_F32  && dst->type == GGML_TYPE_F16) { | ||||
|         apply_binary_op<op, ggml_fp16_t, float, ggml_fp16_t>(params, dst); | ||||
|     } else if (src0->type == GGML_TYPE_F16  && src1->type == GGML_TYPE_F32  && dst->type == GGML_TYPE_F32) { | ||||
|         apply_binary_op<op, ggml_fp16_t, float, float>(params, dst); | ||||
|     } else { | ||||
|         GGML_ABORT("%s: unsupported types: dst: %s, src0: %s, src1: %s\n", __func__, | ||||
|             ggml_type_name(dst->type), ggml_type_name(src0->type), ggml_type_name(src1->type)); | ||||
|     } | ||||
| } | ||||
|  | ||||
| void ggml_compute_forward_add_non_quantized(const ggml_compute_params * params, ggml_tensor * dst) { | ||||
|     binary_op<op_add>(params, dst); | ||||
| } | ||||
|  | ||||
| void ggml_compute_forward_sub(const ggml_compute_params * params, ggml_tensor * dst) { | ||||
|     binary_op<op_sub>(params, dst); | ||||
| } | ||||
|  | ||||
| void ggml_compute_forward_mul(const ggml_compute_params * params, ggml_tensor * dst) { | ||||
|     binary_op<op_mul>(params, dst); | ||||
| } | ||||
|  | ||||
| void ggml_compute_forward_div(const ggml_compute_params * params, ggml_tensor * dst) { | ||||
|     binary_op<op_div>(params, dst); | ||||
| } | ||||
							
								
								
									
										16
									
								
								ggml/src/ggml-cpu/binary-ops.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										16
									
								
								ggml/src/ggml-cpu/binary-ops.h
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,16 @@ | ||||
| #pragma once | ||||
|  | ||||
| #include "common.h" | ||||
|  | ||||
| #ifdef __cplusplus | ||||
| extern "C" { | ||||
| #endif | ||||
|  | ||||
| void ggml_compute_forward_add_non_quantized(const struct ggml_compute_params * params, struct ggml_tensor * dst); | ||||
| void ggml_compute_forward_sub(const struct ggml_compute_params * params, struct ggml_tensor * dst); | ||||
| void ggml_compute_forward_mul(const struct ggml_compute_params * params, struct ggml_tensor * dst); | ||||
| void ggml_compute_forward_div(const struct ggml_compute_params * params, struct ggml_tensor * dst); | ||||
|  | ||||
| #ifdef __cplusplus | ||||
| } | ||||
| #endif | ||||
							
								
								
									
										72
									
								
								ggml/src/ggml-cpu/common.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										72
									
								
								ggml/src/ggml-cpu/common.h
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,72 @@ | ||||
| #pragma once | ||||
|  | ||||
| #include "ggml.h" | ||||
| #include "ggml-cpu-traits.h" | ||||
| #include "ggml-cpu-impl.h" | ||||
| #include "ggml-impl.h" | ||||
|  | ||||
| #ifdef __cplusplus | ||||
|  | ||||
| #include <utility> | ||||
|  | ||||
| // convenience functions/macros for use in template calls | ||||
| // note: these won't be required after the 'traits' lookup table is used. | ||||
| static inline ggml_fp16_t f32_to_f16(float x) { | ||||
|     return GGML_FP32_TO_FP16(x); | ||||
| } | ||||
|  | ||||
| static inline float f16_to_f32(ggml_fp16_t x) { | ||||
|     return GGML_FP16_TO_FP32(x); | ||||
| } | ||||
|  | ||||
| static inline ggml_bf16_t f32_to_bf16(float x) { | ||||
|     return GGML_FP32_TO_BF16(x); | ||||
| } | ||||
|  | ||||
| static inline float bf16_to_f32(ggml_bf16_t x) { | ||||
|     return GGML_BF16_TO_FP32(x); | ||||
| } | ||||
|  | ||||
| static inline float f32_to_f32(float x) { | ||||
|     return x; | ||||
| } | ||||
|  | ||||
| // TODO - merge this into the traits table, after using row-based conversions | ||||
| template <class T> | ||||
| struct type_conversion_table; | ||||
|  | ||||
| template <> | ||||
| struct type_conversion_table<ggml_fp16_t> { | ||||
|     static constexpr float (*to_f32)(ggml_fp16_t) = f16_to_f32; | ||||
|     static constexpr ggml_fp16_t (*from_f32)(float) = f32_to_f16; | ||||
| }; | ||||
|  | ||||
| template <> | ||||
| struct type_conversion_table<float> { | ||||
|     static constexpr float (*to_f32)(float) = f32_to_f32; | ||||
|     static constexpr float (*from_f32)(float) = f32_to_f32; | ||||
| }; | ||||
|  | ||||
| template <> | ||||
| struct type_conversion_table<ggml_bf16_t> { | ||||
|     static constexpr float (*to_f32)(ggml_bf16_t) = bf16_to_f32; | ||||
|     static constexpr ggml_bf16_t (*from_f32)(float) = f32_to_bf16; | ||||
| }; | ||||
|  | ||||
| static std::pair<int64_t, int64_t> get_thread_range(const struct ggml_compute_params * params, const struct ggml_tensor * src0) { | ||||
|     const int64_t ith = params->ith; | ||||
|     const int64_t nth = params->nth; | ||||
|  | ||||
|     const int64_t nr  = ggml_nrows(src0); | ||||
|  | ||||
|     // rows per thread | ||||
|     const int64_t dr = (nr + nth - 1)/nth; | ||||
|  | ||||
|     // row range for this thread | ||||
|     const int64_t ir0 = dr*ith; | ||||
|     const int64_t ir1 = MIN(ir0 + dr, nr); | ||||
|  | ||||
|     return {ir0, ir1}; | ||||
| } | ||||
|  | ||||
| #endif | ||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										186
									
								
								ggml/src/ggml-cpu/unary-ops.cpp
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										186
									
								
								ggml/src/ggml-cpu/unary-ops.cpp
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,186 @@ | ||||
| #include "unary-ops.h" | ||||
|  | ||||
| static inline float op_abs(float x) { | ||||
|     return fabsf(x); | ||||
| } | ||||
|  | ||||
| static inline float op_sgn(float x) { | ||||
|     return (x > 0.f) ? 1.f : ((x < 0.f) ? -1.f : 0.f); | ||||
| } | ||||
|  | ||||
| static inline float op_neg(float x) { | ||||
|     return -x; | ||||
| } | ||||
|  | ||||
| static inline float op_step(float x) { | ||||
|     return (x > 0.f) ? 1.f : 0.f; | ||||
| } | ||||
|  | ||||
| static inline float op_tanh(float x) { | ||||
|     return tanhf(x); | ||||
| } | ||||
|  | ||||
| static inline float op_elu(float x) { | ||||
|     return (x > 0.f) ? x : expm1f(x); | ||||
| } | ||||
|  | ||||
| static inline float op_relu(float x) { | ||||
|     return (x > 0.f) ? x : 0.f; | ||||
| } | ||||
|  | ||||
| static inline float op_sigmoid(float x) { | ||||
|     return 1.f / (1.f + expf(-x)); | ||||
| } | ||||
|  | ||||
| static inline float op_hardsigmoid(float x) { | ||||
|     return fminf(1.0f, fmaxf(0.0f, (x + 3.0f) / 6.0f)); | ||||
| } | ||||
|  | ||||
| static inline float op_exp(float x) { | ||||
|     return expf(x); | ||||
| } | ||||
|  | ||||
| static inline float op_hardswish(float x) { | ||||
|     return x * fminf(1.0f, fmaxf(0.0f, (x + 3.0f) / 6.0f)); | ||||
| } | ||||
|  | ||||
| static inline float op_sqr(float x) { | ||||
|     return x * x; | ||||
| } | ||||
|  | ||||
| static inline float op_sqrt(float x) { | ||||
|     return sqrtf(x); | ||||
| } | ||||
|  | ||||
| static inline float op_sin(float x) { | ||||
|     return sinf(x); | ||||
| } | ||||
|  | ||||
| static inline float op_cos(float x) { | ||||
|     return cosf(x); | ||||
| } | ||||
|  | ||||
| static inline float op_log(float x) { | ||||
|     return logf(x); | ||||
| } | ||||
|  | ||||
| template <float (*op)(float), typename src0_t, typename dst_t> | ||||
| static inline void vec_unary_op(int64_t n, dst_t * y, const src0_t * x) { | ||||
|     constexpr auto src0_to_f32 = type_conversion_table<src0_t>::to_f32; | ||||
|     constexpr auto f32_to_dst  = type_conversion_table<dst_t >::from_f32; | ||||
|  | ||||
|     for (int i = 0; i < n; i++) { | ||||
|         y[i] = f32_to_dst(op(src0_to_f32(x[i]))); | ||||
|     } | ||||
| } | ||||
|  | ||||
| template <float (*op)(float), typename src0_t, typename dst_t> | ||||
| static void apply_unary_op(const ggml_compute_params * params, ggml_tensor * dst) { | ||||
|     const ggml_tensor * src0 = dst->src[0]; | ||||
|  | ||||
|     GGML_ASSERT(ggml_is_contiguous_1(src0) && ggml_is_contiguous_1(dst) && ggml_are_same_shape(src0, dst)); | ||||
|  | ||||
|     GGML_TENSOR_UNARY_OP_LOCALS | ||||
|  | ||||
|     GGML_ASSERT( nb0 == sizeof(dst_t)); | ||||
|     GGML_ASSERT(nb00 == sizeof(src0_t)); | ||||
|  | ||||
|     const auto [ir0, ir1] = get_thread_range(params, src0); | ||||
|  | ||||
|     for (int64_t ir = ir0; ir < ir1; ++ir) { | ||||
|         const int64_t i03 = ir/(ne02*ne01); | ||||
|         const int64_t i02 = (ir - i03*ne02*ne01)/ne01; | ||||
|         const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01); | ||||
|  | ||||
|         dst_t        * dst_ptr  = (dst_t  *)       ((char *)       dst->data  + i03*nb3  + i02*nb2  + i01*nb1 ); | ||||
|         const src0_t * src0_ptr = (const src0_t *) ((const char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01); | ||||
|  | ||||
|         vec_unary_op<op>(ne0, dst_ptr, src0_ptr); | ||||
|     } | ||||
| } | ||||
|  | ||||
| // TODO: Use the 'traits' lookup table (for type conversion fns), instead of a mass of 'if' conditions with long templates | ||||
| template <float (*op)(float)> | ||||
| static void unary_op(const ggml_compute_params * params, ggml_tensor * dst) { | ||||
|     const ggml_tensor * src0 = dst->src[0]; | ||||
|  | ||||
|     /*  */ if (src0->type == GGML_TYPE_F32  && dst->type == GGML_TYPE_F32) { // all f32 | ||||
|         apply_unary_op<op, float, float>(params, dst); | ||||
|     } else if (src0->type == GGML_TYPE_F16  && dst->type == GGML_TYPE_F16) { // all f16 | ||||
|         apply_unary_op<op, ggml_fp16_t, ggml_fp16_t>(params, dst); | ||||
|     } else if (src0->type == GGML_TYPE_BF16 && dst->type == GGML_TYPE_BF16) { // all bf16 | ||||
|         apply_unary_op<op, ggml_bf16_t, ggml_bf16_t>(params, dst); | ||||
|     } else if (src0->type == GGML_TYPE_BF16 && dst->type == GGML_TYPE_F32) { | ||||
|         apply_unary_op<op, ggml_bf16_t, float>(params, dst); | ||||
|     } else if (src0->type == GGML_TYPE_F16  && dst->type == GGML_TYPE_F32) { | ||||
|         apply_unary_op<op, ggml_fp16_t, float>(params, dst); | ||||
|     } else { | ||||
|         fprintf(stderr, "%s: unsupported types: dst: %s, src0: %s\n", __func__, | ||||
|             ggml_type_name(dst->type), ggml_type_name(src0->type)); | ||||
|         GGML_ABORT("fatal error"); | ||||
|     } | ||||
| } | ||||
|  | ||||
| void ggml_compute_forward_abs(const ggml_compute_params * params, ggml_tensor * dst) { | ||||
|     unary_op<op_abs>(params, dst); | ||||
| } | ||||
|  | ||||
| void ggml_compute_forward_sgn(const ggml_compute_params * params, ggml_tensor * dst) { | ||||
|     unary_op<op_sgn>(params, dst); | ||||
| } | ||||
|  | ||||
| void ggml_compute_forward_neg(const ggml_compute_params * params, ggml_tensor * dst) { | ||||
|     unary_op<op_neg>(params, dst); | ||||
| } | ||||
|  | ||||
| void ggml_compute_forward_step(const ggml_compute_params * params, ggml_tensor * dst) { | ||||
|     unary_op<op_step>(params, dst); | ||||
| } | ||||
|  | ||||
| void ggml_compute_forward_tanh(const ggml_compute_params * params, ggml_tensor * dst) { | ||||
|     unary_op<op_tanh>(params, dst); | ||||
| } | ||||
|  | ||||
| void ggml_compute_forward_elu(const ggml_compute_params * params, ggml_tensor * dst) { | ||||
|     unary_op<op_elu>(params, dst); | ||||
| } | ||||
|  | ||||
| void ggml_compute_forward_relu(const ggml_compute_params * params, ggml_tensor * dst) { | ||||
|     unary_op<op_relu>(params, dst); | ||||
| } | ||||
|  | ||||
| void ggml_compute_forward_sigmoid(const ggml_compute_params * params, ggml_tensor * dst) { | ||||
|     unary_op<op_sigmoid>(params, dst); | ||||
| } | ||||
|  | ||||
| void ggml_compute_forward_hardsigmoid(const ggml_compute_params * params, ggml_tensor * dst) { | ||||
|     unary_op<op_hardsigmoid>(params, dst); | ||||
| } | ||||
|  | ||||
| void ggml_compute_forward_exp(const ggml_compute_params * params, ggml_tensor * dst) { | ||||
|     unary_op<op_exp>(params, dst); | ||||
| } | ||||
|  | ||||
| void ggml_compute_forward_hardswish(const ggml_compute_params * params, ggml_tensor * dst) { | ||||
|     unary_op<op_hardswish>(params, dst); | ||||
| } | ||||
|  | ||||
| void ggml_compute_forward_sqr(const ggml_compute_params * params, ggml_tensor * dst) { | ||||
|     unary_op<op_sqr>(params, dst); | ||||
| } | ||||
|  | ||||
| void ggml_compute_forward_sqrt(const ggml_compute_params * params, ggml_tensor * dst) { | ||||
|     unary_op<op_sqrt>(params, dst); | ||||
| } | ||||
|  | ||||
| void ggml_compute_forward_sin(const ggml_compute_params * params, ggml_tensor * dst) { | ||||
|     unary_op<op_sin>(params, dst); | ||||
| } | ||||
|  | ||||
| void ggml_compute_forward_cos(const ggml_compute_params * params, ggml_tensor * dst) { | ||||
|     unary_op<op_cos>(params, dst); | ||||
| } | ||||
|  | ||||
| void ggml_compute_forward_log(const ggml_compute_params * params, ggml_tensor * dst) { | ||||
|     unary_op<op_log>(params, dst); | ||||
| } | ||||
							
								
								
									
										28
									
								
								ggml/src/ggml-cpu/unary-ops.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										28
									
								
								ggml/src/ggml-cpu/unary-ops.h
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,28 @@ | ||||
| #pragma once | ||||
|  | ||||
| #include "common.h" | ||||
|  | ||||
| #ifdef __cplusplus | ||||
| extern "C" { | ||||
| #endif | ||||
|  | ||||
| void ggml_compute_forward_abs(const struct ggml_compute_params * params, struct ggml_tensor * dst); | ||||
| void ggml_compute_forward_sgn(const struct ggml_compute_params * params, struct ggml_tensor * dst); | ||||
| void ggml_compute_forward_neg(const struct ggml_compute_params * params, struct ggml_tensor * dst); | ||||
| void ggml_compute_forward_step(const struct ggml_compute_params * params, struct ggml_tensor * dst); | ||||
| void ggml_compute_forward_tanh(const struct ggml_compute_params * params, struct ggml_tensor * dst); | ||||
| void ggml_compute_forward_elu(const struct ggml_compute_params * params, struct ggml_tensor * dst); | ||||
| void ggml_compute_forward_relu(const struct ggml_compute_params * params, struct ggml_tensor * dst); | ||||
| void ggml_compute_forward_sigmoid(const struct ggml_compute_params * params, struct ggml_tensor * dst); | ||||
| void ggml_compute_forward_hardsigmoid(const struct ggml_compute_params * params, struct ggml_tensor * dst); | ||||
| void ggml_compute_forward_exp(const struct ggml_compute_params * params, struct ggml_tensor * dst); | ||||
| void ggml_compute_forward_hardswish(const struct ggml_compute_params * params, struct ggml_tensor * dst); | ||||
| void ggml_compute_forward_sqr(const struct ggml_compute_params * params, struct ggml_tensor * dst); | ||||
| void ggml_compute_forward_sqrt(const struct ggml_compute_params * params, struct ggml_tensor * dst); | ||||
| void ggml_compute_forward_sin(const struct ggml_compute_params * params, struct ggml_tensor * dst); | ||||
| void ggml_compute_forward_cos(const struct ggml_compute_params * params, struct ggml_tensor * dst); | ||||
| void ggml_compute_forward_log(const struct ggml_compute_params * params, struct ggml_tensor * dst); | ||||
|  | ||||
| #ifdef __cplusplus | ||||
| } | ||||
| #endif | ||||
		Reference in New Issue
	
	Block a user
	 cmdr2
					cmdr2