mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	 6769e944c7
			
		
	
	6769e944c7
	
	
	
		
			
			* k_quants: WIP super-blocks with 64 weights * k_quants: WIP super-blocks with 64 weights Q6_K scalar and AVX2 works * k_quants: WIP super-blocks with 64 weights Q4_K scalar and AVX2 works * k_quants: WIP super-blocks with 64 weights Q2_K scalar and AVX2 works. Q2_K is way too slow (it is actually slower than the scalar implementation) * k_quants: WIP super-blocks with 64 weights Q3_K scalar and AVX2 works. * k_quants: WIP super-blocks with 64 weights Q5_K scalar and AVX2 works, and with that all k_quants are done on AVX2 and scalar * k_quants: WIP super-blocks with 64 weights Q6_K working on CUDA. Cannot make it run quite as gast as with super-blocks with 256 weigths: 8% slower on 4080, 20% slower on the 1660 (but there we fit 1 less layer on the GPU because pf the larger model size), so some fraction of these 20% is due to that, * k_quants: WIP super-blocks with 64 weights Q4_K working on CUDA. ~10% slower on GTX-1660, 16% slower on 4080. * k_quants: WIP super-blocks with 64 weights Q2_K working on CUDA. ~3% slower on GTX-1660, 10% slower on 4080. * k_quants: WIP super-blocks with 64 weights Q3_K working on CUDA. * k_quants: WIP super-blocks with 64 weights Q5_K working on CUDA, and with this CUDA is done. * k_quants: WIP super-blocks with 64 weights Q6_K working on ARM_NEON * k_quants: WIP super-blocks with 64 weights Q4_K working on ARM_NEON, but quite a bit slower than 256 weights * k_quants: WIP super-blocks with 64 weights Q2_K working on ARM_NEON, but quite a bit slower than 256 weights * k_quants: WIP super-blocks with 64 weights Q3_K working on ARM_NEON, but quite a bit slower than 256 weights. * k_quants: WIP super-blocks with 64 weights Q5_K working on ARM_NEON, but quite a bit slower than 256 weights. With that, we have full support for ARM_NEON, although performance is not quite there. * k_quants: WIP super-blocks with 64 weights Slightly more efficient Q3_K and Q5_K * k_quants: WIP super-blocks with 64 weights Another small improvement for Q3_K and Q5_K on ARM_NEON * k_quants: WIP super-blocks with 64 weights Yet another speedup for Q5_K on ARM_NEON. We are now within 10% of the QK_K = 256 version. * k_quants: WIP super-blocks with 64 weights * We are able to pass preprocessor macros to the Metal compiler * Q6_K works and is actually slightly more efficient than the QK_K = 256 version (25.2 ms vs 25.8 ms) * k_quants: WIP super-blocks with 64 weights Q4_K works on Metal and is actually slightly faster than QK_K = 256 (21.95 ms vs 24.0 ms). * k_quants: WIP super-blocks with 64 weights Q2_K works on Metal and is very slightly faster than QK_K = 256 (23.8 ms vs 24.2 ms). * k_quants: WIP super-blocks with 64 weights Q3_K works on Metal and is slightly faster than QK_K = 256 (26.6 ms vs 28.3 ms). * k_quants: WIP super-blocks with 64 weights Q5_K works on Metal and is slightly faster than QK_K = 256 (23.7 ms vs 26.3 ms). * k_quants: call them _K, not _k, also on Metal * k_quants: correctly define QK_K in llama.cpp * Fixed bug in q4_K quantization added with the 64-block addition * Simplify via lambda * k_quants: swicth Q3_K to 4-bit scales when QK_K = 64 Otherwise there isn't much benefit from this quantization type. There is some very slight loss in accuracy, but we reduce size by ~7%. E.g., for OpenLLaMA-3B, Q3_K_S perplexity is 8.6131 with 8-bit scales and 8.6352 with 4-bit, while file size decreases from 1.53G to 1.44G. * k_quants: switch Q4_K to 4-bit scales when QK_K = 64 Here the loss in accuracy is greater than for Q3_K, but the Q4_K points still move further to the left on the perplexity vs size curve. * k_quants: forgot to add the Metal changes in last commit * k_quants: change Q5_K to be type 0 when QK_K = 64 Still needs AVX2 implementation * k_quants: AVX2 implementation for new 64-weight Q5_K * k_quants: 10% faster ARM_NEON Q5_K dot product * k_quants: fixed issue caused by merging with master --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
		
			
				
	
	
		
			158 lines
		
	
	
		
			6.8 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			158 lines
		
	
	
		
			6.8 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| #pragma once
 | |
| 
 | |
| #include "ggml.h"
 | |
| 
 | |
| #include <stdint.h>
 | |
| #include <assert.h>
 | |
| #include <stddef.h>
 | |
| 
 | |
| // Super-block size
 | |
| #ifdef GGML_QKK_64
 | |
| #define QK_K 64
 | |
| #define K_SCALE_SIZE 4
 | |
| #else
 | |
| #define QK_K 256
 | |
| #define K_SCALE_SIZE 12
 | |
| #endif
 | |
| 
 | |
| //
 | |
| // Super-block quantization structures
 | |
| //
 | |
| 
 | |
| // 2-bit quantization
 | |
| // weight is represented as x = a * q + b
 | |
| // 16 blocks of 16 elemenets each
 | |
| // Effectively 2.5625 bits per weight
 | |
| typedef struct {
 | |
|     uint8_t scales[QK_K/16]; // scales and mins, quantized with 4 bits
 | |
|     uint8_t qs[QK_K/4];      // quants
 | |
|     ggml_fp16_t d;           // super-block scale for quantized scales
 | |
|     ggml_fp16_t dmin;        // super-block scale for quantized mins
 | |
| } block_q2_K;
 | |
| static_assert(sizeof(block_q2_K) == 2*sizeof(ggml_fp16_t) + QK_K/16 + QK_K/4, "wrong q2_K block size/padding");
 | |
| 
 | |
| // 3-bit quantization
 | |
| // weight is represented as x = a * q
 | |
| // 16 blocks of 16 elemenets each
 | |
| // Effectively 3.4375 bits per weight
 | |
| #ifdef GGML_QKK_64
 | |
| typedef struct {
 | |
|     uint8_t hmask[QK_K/8];     // quants - high bit
 | |
|     uint8_t qs[QK_K/4];        // quants - low 2 bits
 | |
|     uint8_t scales[2];
 | |
|     ggml_fp16_t d;             // super-block scale
 | |
| } block_q3_K;
 | |
| static_assert(sizeof(block_q3_K) == sizeof(ggml_fp16_t) + QK_K / 4 + QK_K / 8 + 2, "wrong q3_K block size/padding");
 | |
| #else
 | |
| typedef struct {
 | |
|     uint8_t hmask[QK_K/8];     // quants - high bit
 | |
|     uint8_t qs[QK_K/4];        // quants - low 2 bits
 | |
|     uint8_t scales[12];        // scales, quantized with 6 bits
 | |
|     ggml_fp16_t d;             // super-block scale
 | |
| } block_q3_K;
 | |
| static_assert(sizeof(block_q3_K) == sizeof(ggml_fp16_t) + QK_K / 4 + QK_K / 8 + 12, "wrong q3_K block size/padding");
 | |
| #endif
 | |
| 
 | |
| // 4-bit quantization
 | |
| // 16 blocks of 32 elements each
 | |
| // weight is represented as x = a * q + b
 | |
| // Effectively 4.5 bits per weight
 | |
| #ifdef GGML_QKK_64
 | |
| typedef struct {
 | |
|     ggml_fp16_t d[2];          // super-block scales/mins
 | |
|     uint8_t scales[2];         // 4-bit block scales/mins
 | |
|     uint8_t qs[QK_K/2];        // 4--bit quants
 | |
| } block_q4_K;
 | |
| static_assert(sizeof(block_q4_K) == 2*sizeof(ggml_fp16_t) + QK_K/2 + 2, "wrong q4_K block size/padding");
 | |
| #else
 | |
| typedef struct {
 | |
|     ggml_fp16_t d;             // super-block scale for quantized scales
 | |
|     ggml_fp16_t dmin;          // super-block scale for quantized mins
 | |
|     uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits
 | |
|     uint8_t qs[QK_K/2];        // 4--bit quants
 | |
| } block_q4_K;
 | |
| static_assert(sizeof(block_q4_K) == 2*sizeof(ggml_fp16_t) + K_SCALE_SIZE + QK_K/2, "wrong q4_K block size/padding");
 | |
| #endif
 | |
| 
 | |
| // 5-bit quantization
 | |
| // 16 blocks of 32 elements each
 | |
| // weight is represented as x = a * q + b
 | |
| // Effectively 5.5 bits per weight
 | |
| #ifdef GGML_QKK_64
 | |
| typedef struct {
 | |
|     ggml_fp16_t d;               // super-block scale
 | |
|     int8_t  scales[QK_K/16];     // 8-bit block scales
 | |
|     uint8_t qh[QK_K/8];          // quants, high bit
 | |
|     uint8_t qs[QK_K/2];          // quants, low 4 bits
 | |
| } block_q5_K;
 | |
| static_assert(sizeof(block_q5_K) == sizeof(ggml_fp16_t) + QK_K/2 + QK_K/8 + QK_K/16, "wrong q5_K block size/padding");
 | |
| #else
 | |
| typedef struct {
 | |
|     ggml_fp16_t d;               // super-block scale for quantized scales
 | |
|     ggml_fp16_t dmin;            // super-block scale for quantized mins
 | |
|     uint8_t scales[K_SCALE_SIZE];   // scales and mins, quantized with 6 bits
 | |
|     uint8_t qh[QK_K/8];          // quants, high bit
 | |
|     uint8_t qs[QK_K/2];          // quants, low 4 bits
 | |
| } block_q5_K;
 | |
| static_assert(sizeof(block_q5_K) == 2*sizeof(ggml_fp16_t) + K_SCALE_SIZE + QK_K/2 + QK_K/8, "wrong q5_K block size/padding");
 | |
| #endif
 | |
| 
 | |
| // 6-bit quantization
 | |
| // weight is represented as x = a * q
 | |
| // 16 blocks of 16 elemenets each
 | |
| // Effectively 6.5625 bits per weight
 | |
| typedef struct {
 | |
|     uint8_t ql[QK_K/2];      // quants, lower 4 bits
 | |
|     uint8_t qh[QK_K/4];      // quants, upper 2 bits
 | |
|     int8_t  scales[QK_K/16]; // scales, quantized with 8 bits
 | |
|     ggml_fp16_t d;           // super-block scale
 | |
| } block_q6_K;
 | |
| static_assert(sizeof(block_q6_K) == sizeof(ggml_fp16_t) + QK_K / 16 + 3*QK_K/4, "wrong q6_K block size/padding");
 | |
| 
 | |
| // This is only used for intermediate quantization and dot products
 | |
| typedef struct {
 | |
|     float   d;              // delta
 | |
|     int8_t  qs[QK_K];       // quants
 | |
|     int16_t bsums[QK_K/16]; // sum of quants in groups of 16
 | |
| } block_q8_K;
 | |
| static_assert(sizeof(block_q8_K) == sizeof(float) + QK_K + QK_K/16*sizeof(int16_t), "wrong q8_K block size/padding");
 | |
| 
 | |
| 
 | |
| // Quantization
 | |
| void quantize_row_q2_K_reference(const float * restrict x, block_q2_K * restrict y, int k);
 | |
| void quantize_row_q3_K_reference(const float * restrict x, block_q3_K * restrict y, int k);
 | |
| void quantize_row_q4_K_reference(const float * restrict x, block_q4_K * restrict y, int k);
 | |
| void quantize_row_q5_K_reference(const float * restrict x, block_q5_K * restrict y, int k);
 | |
| void quantize_row_q6_K_reference(const float * restrict x, block_q6_K * restrict y, int k);
 | |
| void quantize_row_q8_K_reference(const float * restrict x, block_q8_K * restrict y, int k);
 | |
| 
 | |
| void quantize_row_q2_K(const float * restrict x, void * restrict y, int k);
 | |
| void quantize_row_q3_K(const float * restrict x, void * restrict y, int k);
 | |
| void quantize_row_q4_K(const float * restrict x, void * restrict y, int k);
 | |
| void quantize_row_q5_K(const float * restrict x, void * restrict y, int k);
 | |
| void quantize_row_q6_K(const float * restrict x, void * restrict y, int k);
 | |
| void quantize_row_q8_K(const float * restrict x, void * restrict y, int k);
 | |
| 
 | |
| // Dequantization
 | |
| void dequantize_row_q2_K(const block_q2_K * restrict x, float * restrict y, int k);
 | |
| void dequantize_row_q3_K(const block_q3_K * restrict x, float * restrict y, int k);
 | |
| void dequantize_row_q4_K(const block_q4_K * restrict x, float * restrict y, int k);
 | |
| void dequantize_row_q5_K(const block_q5_K * restrict x, float * restrict y, int k);
 | |
| void dequantize_row_q6_K(const block_q6_K * restrict x, float * restrict y, int k);
 | |
| void dequantize_row_q8_K(const block_q8_K * restrict x, float * restrict y, int k);
 | |
| 
 | |
| // Dot product
 | |
| void ggml_vec_dot_q2_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
 | |
| void ggml_vec_dot_q3_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
 | |
| void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
 | |
| void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
 | |
| void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
 | |
| 
 | |
| // Quantization with histogram collection
 | |
| size_t ggml_quantize_q2_K(const float * src, void * dst, int n, int k, int64_t * hist);
 | |
| size_t ggml_quantize_q3_K(const float * src, void * dst, int n, int k, int64_t * hist);
 | |
| size_t ggml_quantize_q4_K(const float * src, void * dst, int n, int k, int64_t * hist);
 | |
| size_t ggml_quantize_q5_K(const float * src, void * dst, int n, int k, int64_t * hist);
 | |
| size_t ggml_quantize_q6_K(const float * src, void * dst, int n, int k, int64_t * hist);
 | |
| 
 |