mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-11-15 11:17:31 +00:00
ggml-cpu: disable fp32->fp16 nnpa conversions for now
there are some conversion failures in nnpa that requires the eyes of an ibm stsm. will create a separate pr to introduce the fp32->fp16 change. Signed-off-by: Aaron Teo <aaron.teo1@ibm.com>
This commit is contained in:
@@ -1,7 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include "ggml-cpu-impl.h"
|
||||
#include <signal.h>
|
||||
|
||||
//
|
||||
// simd mappings
|
||||
@@ -983,14 +982,6 @@ static inline float32x4_t __lzs_f16cx4_load(const ggml_fp16_t * x) {
|
||||
|
||||
// TODO: check why this function is not being hit at all
|
||||
static inline void __lzs_f16cx4_store(ggml_fp16_t * x, float32x4_t v_y) {
|
||||
#ifdef __NNPA__
|
||||
float32x4_t zero = vec_splats(0.0f);
|
||||
uint16x8_t v_x = vec_round_from_fp32(v_y, zero, 0);
|
||||
x[0] = vec_extract(v_x, 0);
|
||||
x[1] = vec_extract(v_x, 1);
|
||||
x[2] = vec_extract(v_x, 2);
|
||||
x[3] = vec_extract(v_x, 3);
|
||||
#else
|
||||
float arr[4];
|
||||
|
||||
// note: keep type-cast here to prevent compiler bugs
|
||||
@@ -1000,7 +991,6 @@ static inline void __lzs_f16cx4_store(ggml_fp16_t * x, float32x4_t v_y) {
|
||||
for (int i = 0; i < 4; i++) {
|
||||
x[i] = GGML_FP32_TO_FP16(arr[i]);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#define GGML_F16_VEC GGML_F32x4
|
||||
|
||||
Reference in New Issue
Block a user