mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	ggml-cpu: remove mistaken fallback macro
fallback logic was already implemented but i was too sleepy to realise Signed-off-by: Aaron Teo <aaron.teo1@ibm.com>
This commit is contained in:
		| @@ -135,10 +135,6 @@ | ||||
|         uint16x8_t v_h = vec_convert_to_fp16(v_hd, 0); | ||||
|         return vec_extract(v_h, 0); | ||||
|     } | ||||
| #else | ||||
|     // fallback to the generic implementation | ||||
|     #define GGML_CPU_FP16_TO_FP32(x) GGML_FP16_TO_FP32(x) | ||||
|     #define GGML_CPU_FP32_TO_FP16(x) GGML_FP32_TO_FP16(x) | ||||
| #endif | ||||
|  | ||||
| // On ARM NEON, it's quicker to directly convert x -> x instead of calling into ggml_lookup_fp16_to_fp32, | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Aaron Teo
					Aaron Teo