mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	 4c4cb30736
			
		
	
	4c4cb30736
	
	
	
		
			
			* iq4_nl: squash commits for easier rebase * Basics (quantize, dequantize) * CUDA dequantize and dot product * Slightly faster CUDA dot product (120 t/s) * Switch to 6-bit scales * Scalar dot product * AVX2 dot product * ARM_NEON dot product * Works on metal, but still slow * Slightly better Metal dot product * Another small Metal improvement * Metal dot product is getting there * Faster CUDA dot product * Add 1/8 ffn_down layers as Q5_K when no imatrix has been provided * Report the actual bpw * Add _xs mix that is 4.05 bpw for non-MoE models * Remove IQ4_XS for now, slightly adjust kvalues_iq4nl * AVX2 dot product uses Q8_0 instead of Q8_K * Add to test-backend-ops * Minor fix * Also use use Q5_K for attn_output in MoE models * Fixes after merging latest master * Switching to blocks of 32 * AVX2 for blocks of 32 * Scaler dot product for blocks of 32 * ARM_NEON dot product for blocks of 32 * Metal kernels for blocks of 32 * Slightly faster Metal kernels * Resurrecting iq3_xs After all the experimentation, nothing was better than this. * Minor PPL improvement via a block scale fudge factor * Minor improvement via 3 neighbours * iq3_xs: working scalar and AVX2 dot products * iq3_xs: ARM_NEON dot product - works but extremely slow (10 t/s) * iq3_xs: working Metal implementation * Adding IQ3_M - IQ3_XS mix with mostly Q4_K * iiq3_xs: a 3.4375 bpw variant * iq3_xs: make CUDA work for new version * iq3_xs: make scalar and AVX2 work for new version * iq3_s: make ARM_NEON work with new version * iq3_xs: make new version work on metal Performance is very similar to Q3_K_S * iq3_xs: tiny Metal speed improvement * iq3_xs: tiny Metal speed improvement * Fix stupid warning * Q3_K_XS now uses a mix of IQ3_XS and IQ3_XXS * iq3_xs: rename to iq3_s * iq3_s: make tests pass * Move Q3_K_XS mix to 3.25 bpw * Attempt to fix failing tests * Another attempt to fix the Windows builds * Attempt to fix ROCm * ROCm again * iq3_s: partial fix for QK_K = 64 * iq3_s: make it work on metal for QK_K = 64 Pleasent surprise: the coding was super-block size independent, so all it took was to delete some QK_K == 256 guards. * Will this fix ROCm? --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
		
			
				
	
	
		
			189 lines
		
	
	
		
			6.6 KiB
		
	
	
	
		
			C++
		
	
	
	
	
	
			
		
		
	
	
			189 lines
		
	
	
		
			6.6 KiB
		
	
	
	
		
			C++
		
	
	
	
	
	
| // Unit tests for quantization specific functions - quantize, dequantize and dot product
 | |
| 
 | |
| #include "ggml.h"
 | |
| 
 | |
| #undef NDEBUG
 | |
| #include <assert.h>
 | |
| #include <math.h>
 | |
| #include <stdio.h>
 | |
| #include <string>
 | |
| #include <vector>
 | |
| 
 | |
| #if defined(_MSC_VER)
 | |
| #pragma warning(disable: 4244 4267) // possible loss of data
 | |
| #endif
 | |
| 
 | |
| constexpr float MAX_QUANTIZATION_REFERENCE_ERROR = 0.0001f;
 | |
| constexpr float MAX_QUANTIZATION_TOTAL_ERROR = 0.002f;
 | |
| constexpr float MAX_QUANTIZATION_TOTAL_ERROR_2BITS = 0.0075f;
 | |
| constexpr float MAX_QUANTIZATION_TOTAL_ERROR_3BITS = 0.0040f;
 | |
| constexpr float MAX_QUANTIZATION_TOTAL_ERROR_3BITS_XXS = 0.0050f;
 | |
| constexpr float MAX_DOT_PRODUCT_ERROR = 0.02f;
 | |
| constexpr float MAX_DOT_PRODUCT_ERROR_LOWBIT = 0.04f;
 | |
| 
 | |
| static const char* RESULT_STR[] = {"ok", "FAILED"};
 | |
| 
 | |
| 
 | |
| // Generate synthetic data
 | |
| static void generate_data(float offset, size_t n, float * dst) {
 | |
|     for (size_t i = 0; i < n; i++) {
 | |
|         dst[i] = 0.1 + 2*cosf(i + offset);
 | |
|     }
 | |
| }
 | |
| 
 | |
| // Calculate RMSE between two float arrays
 | |
| static float array_rmse(const float * a1, const float * a2, size_t n) {
 | |
|     double sum = 0;
 | |
|     for (size_t i = 0; i < n; i++) {
 | |
|         double diff = a1[i] - a2[i];
 | |
|         sum += diff * diff;
 | |
|     }
 | |
|     return sqrtf(sum) / n;
 | |
| }
 | |
| 
 | |
| // Total quantization error on test data
 | |
| static float total_quantization_error(ggml_type_traits_t & qfns, size_t test_size, const float * test_data) {
 | |
|     std::vector<uint8_t> tmp_q(2*test_size);
 | |
|     std::vector<float> tmp_out(test_size);
 | |
| 
 | |
|     qfns.from_float(test_data, tmp_q.data(), test_size);
 | |
|     qfns.to_float(tmp_q.data(), tmp_out.data(), test_size);
 | |
|     return array_rmse(test_data, tmp_out.data(), test_size);
 | |
| }
 | |
| 
 | |
| // Total quantization error on test data
 | |
| static float reference_quantization_error(ggml_type_traits_t & qfns, size_t test_size, const float * test_data) {
 | |
|     std::vector<uint8_t> tmp_q(2*test_size);
 | |
|     std::vector<float> tmp_out(test_size);
 | |
|     std::vector<float> tmp_out_ref(test_size);
 | |
| 
 | |
|     qfns.from_float(test_data, tmp_q.data(), test_size);
 | |
|     qfns.to_float(tmp_q.data(), tmp_out.data(), test_size);
 | |
| 
 | |
|     qfns.from_float_reference(test_data, tmp_q.data(), test_size);
 | |
|     qfns.to_float(tmp_q.data(), tmp_out_ref.data(), test_size);
 | |
| 
 | |
|     return array_rmse(tmp_out.data(), tmp_out_ref.data(), test_size);
 | |
| }
 | |
| 
 | |
| static float dot_product(const float * a1, const float * a2, size_t test_size) {
 | |
|     double sum = 0;
 | |
|     for (size_t i = 0; i < test_size; i++) {
 | |
|         sum += a1[i] * a2[i];
 | |
|     }
 | |
|     return sum;
 | |
| }
 | |
| 
 | |
| // Total dot product error
 | |
| static float dot_product_error(
 | |
|     ggml_type_traits_t & qfns, size_t test_size, const float * test_data1, const float *test_data2
 | |
| ) {
 | |
|     std::vector<uint8_t> tmp_q1(2*test_size);
 | |
|     std::vector<uint8_t> tmp_q2(2*test_size);
 | |
| 
 | |
|     auto vdot = ggml_internal_get_type_traits(qfns.vec_dot_type);
 | |
| 
 | |
|     qfns.from_float(test_data1, tmp_q1.data(), test_size);
 | |
|     vdot.from_float(test_data2, tmp_q2.data(), test_size);
 | |
| 
 | |
|     float result = INFINITY;
 | |
|     qfns.vec_dot(test_size, &result, 0, tmp_q1.data(), 0, tmp_q2.data(), 0, 1);
 | |
| 
 | |
|     const float dot_ref = dot_product(test_data1, test_data2, test_size);
 | |
| 
 | |
|     return fabsf(result - dot_ref) / test_size;
 | |
| }
 | |
| 
 | |
| int main(int argc, char * argv[]) {
 | |
|     bool verbose = false;
 | |
|     const size_t test_size = 32 * 128;
 | |
| 
 | |
|     std::string arg;
 | |
|     for (int i = 1; i < argc; i++) {
 | |
|         arg = argv[i];
 | |
| 
 | |
|         if (arg == "-v") {
 | |
|             verbose = true;
 | |
|         } else {
 | |
|             fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
 | |
|             return 1;
 | |
|         }
 | |
|     }
 | |
| 
 | |
|     std::vector<float> test_data(test_size);
 | |
|     std::vector<float> test_data2(test_size);
 | |
| 
 | |
|     generate_data(0.0, test_data.size(), test_data.data());
 | |
|     generate_data(1.0, test_data2.size(), test_data2.data());
 | |
| 
 | |
|     // Initialize GGML, ensures float conversion tables are initialized
 | |
|     struct ggml_init_params ggml_params = {
 | |
|         /* .mem_size   = */ 1*1024,
 | |
|         /* .mem_buffer = */ NULL,
 | |
|         /* .no_alloc   = */ true,
 | |
|     };
 | |
|     struct ggml_context * ctx = ggml_init(ggml_params);
 | |
| 
 | |
|     int num_failed = 0;
 | |
|     bool failed = false;
 | |
| 
 | |
|     for (int i = 0; i < GGML_TYPE_COUNT; i++) {
 | |
|         ggml_type type = (ggml_type) i;
 | |
|         ggml_type_traits_t qfns = ggml_internal_get_type_traits(type);
 | |
| 
 | |
|         // deprecated - skip
 | |
|         if (qfns.blck_size == 0) {
 | |
|             continue;
 | |
|         }
 | |
| 
 | |
|         const ggml_type ei = (ggml_type)i;
 | |
| 
 | |
|         if (ei == GGML_TYPE_IQ2_XXS || ei == GGML_TYPE_IQ2_XS) {
 | |
|             printf("Skip %s due to missing quantization functionality\n", ggml_type_name(ei));
 | |
|             continue;
 | |
|         }
 | |
| 
 | |
|         printf("Testing %s\n", ggml_type_name((ggml_type) i));
 | |
|         ggml_quantize_init(ei);
 | |
| 
 | |
|         if (qfns.from_float && qfns.to_float) {
 | |
|             const float total_error = total_quantization_error(qfns, test_size, test_data.data());
 | |
|             const float max_quantization_error =
 | |
|                 type == GGML_TYPE_Q2_K    ? MAX_QUANTIZATION_TOTAL_ERROR_2BITS :
 | |
|                 type == GGML_TYPE_Q3_K    ? MAX_QUANTIZATION_TOTAL_ERROR_3BITS :
 | |
|                 type == GGML_TYPE_IQ3_S   ? MAX_QUANTIZATION_TOTAL_ERROR_3BITS :
 | |
|                 type == GGML_TYPE_IQ3_XXS ? MAX_QUANTIZATION_TOTAL_ERROR_3BITS_XXS : MAX_QUANTIZATION_TOTAL_ERROR;
 | |
|             failed = !(total_error < max_quantization_error);
 | |
|             num_failed += failed;
 | |
|             if (failed || verbose) {
 | |
|                 printf("%5s absolute quantization error:    %s (%f)\n", ggml_type_name(type), RESULT_STR[failed], total_error);
 | |
|             }
 | |
| 
 | |
|             const float reference_error = reference_quantization_error(qfns, test_size, test_data.data());
 | |
|             failed = !(reference_error < MAX_QUANTIZATION_REFERENCE_ERROR);
 | |
|             num_failed += failed;
 | |
|             if (failed || verbose) {
 | |
|                 printf("%5s reference implementation error: %s (%f)\n", ggml_type_name(type), RESULT_STR[failed], reference_error);
 | |
|             }
 | |
| 
 | |
|             const float vec_dot_error = dot_product_error(qfns, test_size, test_data.data(), test_data2.data());
 | |
|             const float max_allowed_error = type == GGML_TYPE_Q2_K || type == GGML_TYPE_IQ2_XS || type == GGML_TYPE_IQ2_XXS ||
 | |
|                                             type == GGML_TYPE_IQ3_XXS || type == GGML_TYPE_IQ3_S ? MAX_DOT_PRODUCT_ERROR_LOWBIT
 | |
|                                           : MAX_DOT_PRODUCT_ERROR;
 | |
|             failed = !(vec_dot_error < max_allowed_error);
 | |
|             num_failed += failed;
 | |
|             if (failed || verbose) {
 | |
|                 printf("%5s dot product error:              %s (%f)\n", ggml_type_name(type), RESULT_STR[failed], vec_dot_error);
 | |
|             }
 | |
|         }
 | |
|     }
 | |
| 
 | |
|     if (num_failed || verbose) {
 | |
|         printf("%d tests failed\n", num_failed);
 | |
|     }
 | |
| 
 | |
|     ggml_free(ctx);
 | |
| 
 | |
|     return num_failed > 0;
 | |
| }
 |