ggml-cpu: disable fp32->fp16 nnpa conversions for now

there are some conversion failures in nnpa that requires the eyes of an
ibm stsm. will create a separate pr to introduce the fp32->fp16 change.

Signed-off-by: Aaron Teo <aaron.teo1@ibm.com>
This commit is contained in:
Aaron Teo
2025-06-21 16:58:43 +08:00
parent 4f017d718a
commit 27131e5f34
2 changed files with 0 additions and 22 deletions

View File

@@ -1,7 +1,6 @@
#pragma once
#include "ggml-cpu-impl.h"
#include <signal.h>
//
// simd mappings
@@ -983,14 +982,6 @@ static inline float32x4_t __lzs_f16cx4_load(const ggml_fp16_t * x) {
// TODO: check why this function is not being hit at all
static inline void __lzs_f16cx4_store(ggml_fp16_t * x, float32x4_t v_y) {
#ifdef __NNPA__
float32x4_t zero = vec_splats(0.0f);
uint16x8_t v_x = vec_round_from_fp32(v_y, zero, 0);
x[0] = vec_extract(v_x, 0);
x[1] = vec_extract(v_x, 1);
x[2] = vec_extract(v_x, 2);
x[3] = vec_extract(v_x, 3);
#else
float arr[4];
// note: keep type-cast here to prevent compiler bugs
@@ -1000,7 +991,6 @@ static inline void __lzs_f16cx4_store(ggml_fp16_t * x, float32x4_t v_y) {
for (int i = 0; i < 4; i++) {
x[i] = GGML_FP32_TO_FP16(arr[i]);
}
#endif
}
#define GGML_F16_VEC GGML_F32x4