mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	 99009e72f8
			
		
	
	99009e72f8
	
	
	
		
			
			* Starting to add k-quantization to ggml I think it is better to have quantization separate from ggml. For now just adding the k-quants there, but it would be better to also factor out the existing ggml quantizations. * Adding Q3_K and Q8_K (de)-quantization * Q3_K now working on CUDA and AVX2/scalar CUDA is not ideal - ~50% slower than Q4_0 for single token prediction, about the same in batch mode (perplexity). CPU single token is ~55 ms (on Ryzen 7950X). * Some improvement for Q3_K on CUDA It is now ~22.5 ms/token on my GPU, so ~30% slower than Q4_0. * Some more CUDA optimizations for Q3_K Single token is now 20.5 ms/token (~20% slower than Q4_0). Perplexity is on par with Q4_0. * Adding Q4_K - scalar, AVX2, CUDA Performance is the same or perhaps very slightly better than Q4_0 on the CPU. On the GPU, single token prediction is ~10% better than Q4_0, batch mode (perplexity is about the same). * Adding Q6_K - scalar, AVX2, CUDA Performance is ~40% lower compared to Q4_K on the CPU. This is to be expected, considering that we are memory bound on the CPU and the 6-bit model is ~44% larger than the 4-bit. On the GPU, single token prediction is ~6% lower than Q4_0, batch mode (perplexity) is even closer (but still slower). * Adding Q5_K - scalar, AVX2, CUDA Performance is ~20% lower compared to Q4_K on the CPU. This is to be expected, considering that we are memory bound on the CPU and the 5-bit model is ~22% larger than the 4-bit. On the GPU, single token prediction is about the same as Q4_0 for both, single token and batch prediction. * Per convention, all QX_K quantizations use Q5_K for output.weight * Adding quantization mixes * Quantization mixes: didn't quite get what I wanted in the last commit * Q4_K dot product for ARM_NEON * Q6_K dot product for ARM_NEON * Q5_K dot product for ARM_NEON * Adding Q3_K dot for ARM_NEON It is 22% slower than Q4_K, despite the smaller model size. On x86_64, where we are memory bound, the Q3_K model is quite a bit faster than Q4_K. * A very slightly faster ARM_NEON Q3_K dot * Adding Q2_K - just CUDA for now Token prediction is pretty good - about 15.5 ms on a RTX 4080. Perplexity is about the same as Q4_K. * Adding scalar and AVX2 Q2_K dot * Adding ARM_NEON Q2_K dot About the same performance as Q4_K. * A slightly faster ARM_NEON Q2_K dot Single token prediction is now ~36 ms on M2 Max. The code is much simpler too. * Fixed bug in Q2_K CUDA dot product kernel Stranegly enough, for the few prompts I tried with the 7B model the responses looked perfectly reasonable. Only realized something is not quite right when I tried the larger models and started getting nonse back. In any case, Q2_K single token evaluation time on an RTX 4080 in a Ryzen7950X box iusing CUDA and model fully loaded on the GPU are ~15.5 ms for 7B, ~25.4 ms for 13B, and ~55.8 ms for 30B. The max number of layers that fit in VRAM for The 65B is 32. With that, we get ~330 ms per token, which is not that much faster than just running on the CPU (~470 ms per token). * Don't print zeros/NaNs when no count histogram has been collected * A 10% faster CUDA vector dot kernel for Q3_K Q3_K is now running at ~18.5 ms / token on CUDA, so the gap to Q4_0 is only 10%. It seems memory acccess pattern is more important for performance than the amount of computation the kernel does. * A slightly daster Q4_K AVX2 dot product For perplexity, where we are less memory bound, time per pass drops by ~5%. Barely measurable difference for single token prediction. * A slightly faster ARM_NEON A4_K dot product * Minor * Fix quantization error test We cannot possibly be expecting rmse < 0.002 for 2- and 3-bit quantization variants. * Fix docker build I have been sloppy with vector reinterpret casts on ARM_NEON. It seems clang is very forgiving in that regard. * Added forgotten ggml.o dependence on k_quants.h to the Makefile * Had unintentionally committed the Makefile with -Ofast enabled * ggml : rename k_quants -> ggml-quants-k, use lowercase in code --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com> Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
		
			
				
	
	
		
			160 lines
		
	
	
		
			5.4 KiB
		
	
	
	
		
			C++
		
	
	
	
	
	
			
		
		
	
	
			160 lines
		
	
	
		
			5.4 KiB
		
	
	
	
		
			C++
		
	
	
	
	
	
| // Unit tests for quantization specific functions - quantize, dequantize and dot product
 | |
| 
 | |
| #include "ggml.h"
 | |
| 
 | |
| #undef NDEBUG
 | |
| #include <assert.h>
 | |
| #include <math.h>
 | |
| #include <stdio.h>
 | |
| #include <string>
 | |
| #include <vector>
 | |
| 
 | |
| 
 | |
| const float MAX_QUANTIZATION_REFERENCE_ERROR = 0.0001;
 | |
| const float MAX_QUANTIZATION_TOTAL_ERROR = 0.002;
 | |
| const float MAX_QUANTIZATION_TOTAL_ERROR_2BITS = 0.0075;
 | |
| const float MAX_QUANTIZATION_TOTAL_ERROR_3BITS = 0.0040;
 | |
| const float MAX_DOT_PRODUCT_ERROR = 0.02;
 | |
| 
 | |
| const char* RESULT_STR[] = {"ok", "FAILED"};
 | |
| 
 | |
| 
 | |
| // Generate synthetic data
 | |
| void generate_data(float offset, size_t n, float * dst) {
 | |
|     for (size_t i = 0; i < n; i++) {
 | |
|         dst[i] = 0.1 + 2*cosf(i + offset);
 | |
|     }
 | |
| }
 | |
| 
 | |
| // Calculate RMSE between two float arrays
 | |
| float array_rmse(const float * a1, const float * a2, size_t n) {
 | |
|     double sum = 0;
 | |
|     for (size_t i = 0; i < n; i++) {
 | |
|         double diff = a1[i] - a2[i];
 | |
|         sum += diff * diff;
 | |
|     }
 | |
|     return sqrtf(sum) / n;
 | |
| }
 | |
| 
 | |
| // Total quantization error on test data
 | |
| float total_quantization_error(quantize_fns_t & qfns, size_t test_size, const float * test_data) {
 | |
|     std::vector<uint8_t> tmp_q(2*test_size);
 | |
|     std::vector<float> tmp_out(test_size);
 | |
| 
 | |
|     qfns.quantize_row_q(test_data, tmp_q.data(), test_size);
 | |
|     qfns.dequantize_row_q(tmp_q.data(), tmp_out.data(), test_size);
 | |
|     return array_rmse(test_data, tmp_out.data(), test_size);
 | |
| }
 | |
| 
 | |
| // Total quantization error on test data
 | |
| float reference_quantization_error(quantize_fns_t & qfns, size_t test_size, const float * test_data) {
 | |
|     std::vector<uint8_t> tmp_q(2*test_size);
 | |
|     std::vector<float> tmp_out(test_size);
 | |
|     std::vector<float> tmp_out_ref(test_size);
 | |
| 
 | |
|     qfns.quantize_row_q(test_data, tmp_q.data(), test_size);
 | |
|     qfns.dequantize_row_q(tmp_q.data(), tmp_out.data(), test_size);
 | |
| 
 | |
|     qfns.quantize_row_q_reference(test_data, tmp_q.data(), test_size);
 | |
|     qfns.dequantize_row_q(tmp_q.data(), tmp_out_ref.data(), test_size);
 | |
| 
 | |
|     return array_rmse(tmp_out.data(), tmp_out_ref.data(), test_size);
 | |
| }
 | |
| 
 | |
| float dot_product(const float * a1, const float * a2, size_t test_size) {
 | |
|     double sum = 0;
 | |
|     for (size_t i = 0; i < test_size; i++) {
 | |
|         sum += a1[i] * a2[i];
 | |
|     }
 | |
|     return sum;
 | |
| }
 | |
| 
 | |
| // Total dot product error
 | |
| float dot_product_error(quantize_fns_t & qfns, size_t test_size, const float * test_data1, const float *test_data2) {
 | |
|     std::vector<uint8_t> tmp_q1(2*test_size);
 | |
|     std::vector<uint8_t> tmp_q2(2*test_size);
 | |
| 
 | |
|     qfns.quantize_row_q    (test_data1, tmp_q1.data(), test_size);
 | |
|     qfns.quantize_row_q_dot(test_data2, tmp_q2.data(), test_size);
 | |
| 
 | |
|     float result = INFINITY;
 | |
|     qfns.vec_dot_q(test_size, &result, tmp_q1.data(), tmp_q2.data());
 | |
| 
 | |
|     const float dot_ref = dot_product(test_data1, test_data2, test_size);
 | |
| 
 | |
|     return fabsf(result - dot_ref) / test_size;
 | |
| }
 | |
| 
 | |
| int main(int argc, char * argv[]) {
 | |
|     bool verbose = false;
 | |
|     const size_t test_size = 32 * 128;
 | |
| 
 | |
|     std::string arg;
 | |
|     for (int i = 1; i < argc; i++) {
 | |
|         arg = argv[i];
 | |
| 
 | |
|         if (arg == "-v") {
 | |
|             verbose = true;
 | |
|         } else {
 | |
|             fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
 | |
|             return 1;
 | |
|         }
 | |
|     }
 | |
| 
 | |
|     std::vector<float> test_data(test_size);
 | |
|     std::vector<float> test_data2(test_size);
 | |
| 
 | |
|     generate_data(0.0, test_data.size(), test_data.data());
 | |
|     generate_data(1.0, test_data2.size(), test_data2.data());
 | |
| 
 | |
|     // Initialize GGML, ensures float conversion tables are initialized
 | |
|     struct ggml_init_params ggml_params = {
 | |
|         /* .mem_size   = */ 1*1024,
 | |
|         /* .mem_buffer = */ NULL,
 | |
|         /* .no_alloc   = */ true,
 | |
|     };
 | |
|     struct ggml_context * ctx = ggml_init(ggml_params);
 | |
| 
 | |
|     int num_failed = 0;
 | |
|     bool failed = false;
 | |
| 
 | |
|     for (int i = 0; i < GGML_TYPE_COUNT; i++) {
 | |
|         ggml_type type = (ggml_type) i;
 | |
|         quantize_fns_t qfns = ggml_internal_get_quantize_fn(i);
 | |
| 
 | |
|         if (qfns.quantize_row_q && qfns.dequantize_row_q) {
 | |
|             const float total_error = total_quantization_error(qfns, test_size, test_data.data());
 | |
|             const float max_quantization_error =
 | |
|                 type == GGML_TYPE_Q2_K ? MAX_QUANTIZATION_TOTAL_ERROR_2BITS :
 | |
|                 type == GGML_TYPE_Q3_K ? MAX_QUANTIZATION_TOTAL_ERROR_3BITS : MAX_QUANTIZATION_TOTAL_ERROR;
 | |
|             failed = !(total_error < max_quantization_error);
 | |
|             num_failed += failed;
 | |
|             if (failed || verbose) {
 | |
|                 printf("%5s absolute quantization error:    %s (%f)\n", ggml_type_name(type), RESULT_STR[failed], total_error);
 | |
|             }
 | |
| 
 | |
|             const float reference_error = reference_quantization_error(qfns, test_size, test_data.data());
 | |
|             failed = !(reference_error < MAX_QUANTIZATION_REFERENCE_ERROR);
 | |
|             num_failed += failed;
 | |
|             if (failed || verbose) {
 | |
|                 printf("%5s reference implementation error: %s (%f)\n", ggml_type_name(type), RESULT_STR[failed], reference_error);
 | |
|             }
 | |
| 
 | |
|             const float vec_dot_error = dot_product_error(qfns, test_size, test_data.data(), test_data2.data());
 | |
|             failed = !(vec_dot_error < MAX_DOT_PRODUCT_ERROR);
 | |
|             num_failed += failed;
 | |
|             if (failed || verbose) {
 | |
|                 printf("%5s dot product error:              %s (%f)\n", ggml_type_name(type), RESULT_STR[failed], vec_dot_error);
 | |
|             }
 | |
|         }
 | |
|     }
 | |
| 
 | |
|     if (num_failed || verbose) {
 | |
|         printf("%d tests failed\n", num_failed);
 | |
|     }
 | |
| 
 | |
|     ggml_free(ctx);
 | |
| 
 | |
|     return num_failed > 0;
 | |
| }
 |