mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	* Refactor shaders, extract GLSL code from ggml_vk_generate_shaders.py into vulkan-shaders directory * Improve debug log code * Add memory debug output option * Fix flake8 * Fix unnecessary high llama-3 VRAM use
		
			
				
	
	
		
			43 lines
		
	
	
		
			1.7 KiB
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
			
		
		
	
	
			43 lines
		
	
	
		
			1.7 KiB
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
#version 450
 | 
						|
 | 
						|
#include "dequant_head.comp"
 | 
						|
 | 
						|
layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in;
 | 
						|
 | 
						|
layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
 | 
						|
layout (binding = 1) writeonly buffer D {D_TYPE data_b[];};
 | 
						|
 | 
						|
void main() {
 | 
						|
    [[unroll]] for (uint wgy = 0; wgy < 256; wgy++) {
 | 
						|
        const uint i = uint(gl_WorkGroupID.x * 256 + wgy);
 | 
						|
        if (i >= p.M * p.K / QUANT_K) {
 | 
						|
            return;
 | 
						|
        }
 | 
						|
 | 
						|
        const uint r = gl_LocalInvocationID.x / 4;
 | 
						|
        const uint tid = r / 2;
 | 
						|
        const uint is0 = r % 2;
 | 
						|
        const uint l0 = 16 * is0 + 4 * (gl_LocalInvocationID.x % 4);
 | 
						|
        const uint n = tid / 4;
 | 
						|
        const uint j = tid - 4*n;
 | 
						|
 | 
						|
        const uint8_t m = uint8_t(1 << (4*n + j));
 | 
						|
        const uint is = 8*n + 2*j + is0;
 | 
						|
        const uint shift = 2*j;
 | 
						|
 | 
						|
        const int8_t us = int8_t(is <  4 ? (data_a[i].scales[is-0] & 0xF) | (((data_a[i].scales[is+8] >> 0) & 3) << 4) :
 | 
						|
                                 is <  8 ? (data_a[i].scales[is-0] & 0xF) | (((data_a[i].scales[is+4] >> 2) & 3) << 4) :
 | 
						|
                                 is < 12 ? (data_a[i].scales[is-8] >>  4) | (((data_a[i].scales[is+0] >> 4) & 3) << 4) :
 | 
						|
                                           (data_a[i].scales[is-8] >>  4) | (((data_a[i].scales[is-4] >> 6) & 3) << 4));
 | 
						|
        const FLOAT_TYPE d_all = FLOAT_TYPE(data_a[i].d);
 | 
						|
        const FLOAT_TYPE dl    = d_all * FLOAT_TYPE(us - 32);
 | 
						|
 | 
						|
        const uint y_idx = i * QUANT_K + 128 * n + 32 * j;
 | 
						|
        const uint qs_idx = 32*n;
 | 
						|
 | 
						|
        for (uint l = l0; l < l0 + 4; ++l) {
 | 
						|
            data_b[y_idx + l] = D_TYPE(dl * FLOAT_TYPE(int8_t((data_a[i].qs[qs_idx + l] >> shift) & 3) - (((data_a[i].hmask[l] & m) != 0) ? 0 : 4)));
 | 
						|
        }
 | 
						|
    }
 | 
						|
}
 |