mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	* Refactor shaders, extract GLSL code from ggml_vk_generate_shaders.py into vulkan-shaders directory * Improve debug log code * Add memory debug output option * Fix flake8 * Fix unnecessary high llama-3 VRAM use
		
			
				
	
	
		
			36 lines
		
	
	
		
			1.0 KiB
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
			
		
		
	
	
			36 lines
		
	
	
		
			1.0 KiB
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
#version 450
 | 
						|
 | 
						|
#include "dequant_head.comp"
 | 
						|
 | 
						|
layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;
 | 
						|
 | 
						|
layout (binding = 0) readonly buffer A {block_q5_1 data_a[];};
 | 
						|
layout (binding = 1) writeonly buffer D {D_TYPE data_b[];};
 | 
						|
 | 
						|
void main() {
 | 
						|
    const uint i = gl_WorkGroupID.x * 4 + gl_LocalInvocationID.x / 64;
 | 
						|
 | 
						|
    const uint tid = gl_LocalInvocationID.x % 64;
 | 
						|
    const uint il  = tid/32;
 | 
						|
    const uint ir  = tid%32;
 | 
						|
    const uint ib = 32*i + ir;
 | 
						|
    if (ib >= p.nel / 32) {
 | 
						|
        return;
 | 
						|
    }
 | 
						|
 | 
						|
    const uint b_idx = 1024*i + 32*ir + 8*il;
 | 
						|
 | 
						|
    const float d = float(data_a[ib].d);
 | 
						|
    const float m = float(data_a[ib].m);
 | 
						|
    const uint qh = data_a[ib].qh;
 | 
						|
 | 
						|
    const uint q_idx = 8*il;
 | 
						|
 | 
						|
    [[unroll]] for (uint l = 0; l < 8; ++l) {
 | 
						|
        const uint iqs = q_idx + l;
 | 
						|
        const uint vui = uint(data_a[ib].qs[iqs]);
 | 
						|
        data_b[b_idx + l +  0] = D_TYPE(d * (((vui & 0xF) | (((qh >> iqs) << 4) & 0x10))) + m);
 | 
						|
        data_b[b_idx + l + 16] = D_TYPE(d * (((vui >>  4) | ((qh >> (iqs + 12)) & 0x10))) + m);
 | 
						|
    }
 | 
						|
}
 |