mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-04 09:32:00 +00:00 
			
		
		
		
	ggml-cpu: reattempt fp32->fp16
Signed-off-by: Aaron Teo <aaron.teo1@ibm.com>
This commit is contained in:
		@@ -3142,10 +3142,12 @@ void ggml_cpu_fp32_to_fp16(const float * x, ggml_fp16_t * y, int64_t n) {
 | 
			
		||||
        _mm_storel_epi64((__m128i *)(y + i), y_vec);
 | 
			
		||||
    }
 | 
			
		||||
#elif defined(__NNPA__)
 | 
			
		||||
    for (; i + 3 < n; i += 4) {
 | 
			
		||||
        float32x4_t v_x = vec_xl(0, (const float *)(x + i));
 | 
			
		||||
        uint16x8_t v_xd = vec_convert_to_fp16(v_x, 0);
 | 
			
		||||
        vec_xst(v_xd, 0, (ggml_fp16_t *)(y + i));
 | 
			
		||||
    for (; i + 7 < n; i += 8) {
 | 
			
		||||
        float32x4_t v_xh = vec_xl(0, (const float *)(x + i + 0));
 | 
			
		||||
        float32x4_t v_xl = vec_xl(0, (const float *)(x + i + 4));
 | 
			
		||||
        __vector short int v_xd = vec_round_from_fp32(v_xh, v_xl, 0);
 | 
			
		||||
        __vector short int v_xfp16 = vec_convert_to_fp16(v_xd, 0);
 | 
			
		||||
        vec_xst(v_xfp16, 0, (ggml_fp16_t *)(y + i));
 | 
			
		||||
    }
 | 
			
		||||
#endif
 | 
			
		||||
    for (; i < n; ++i) {
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user