mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	 7c7836d9d4
			
		
	
	7c7836d9d4
	
	
	
		
			
			* Refactor shaders, extract GLSL code from ggml_vk_generate_shaders.py into vulkan-shaders directory * Improve debug log code * Add memory debug output option * Fix flake8 * Fix unnecessary high llama-3 VRAM use
		
			
				
	
	
		
			32 lines
		
	
	
		
			970 B
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
			
		
		
	
	
			32 lines
		
	
	
		
			970 B
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
| #version 450
 | |
| 
 | |
| #include "types.comp"
 | |
| #include "generic_binary_head.comp"
 | |
| #include "dequant_funcs.comp"
 | |
| 
 | |
| void main() {
 | |
|     const uint i00 = (gl_GlobalInvocationID.x)*2;
 | |
|     const uint i10 = gl_GlobalInvocationID.y;
 | |
|     const uint i11 = (gl_GlobalInvocationID.z)/p.ne12;
 | |
|     const uint i12 = (gl_GlobalInvocationID.z)%p.ne12;
 | |
| 
 | |
|     if (i00 >= p.ne00) {
 | |
|         return;
 | |
|     }
 | |
| 
 | |
|     const uint i01 = data_b[i10*p.nb10 + i11*p.nb11 + i12*p.nb12];
 | |
| 
 | |
|     const uint a_offset = i01*p.nb01 + i11*p.nb02 + i12*p.nb03;
 | |
|     const uint d_offset = i10*p.nb21 + i11*p.nb22 + i12*p.nb23;
 | |
| 
 | |
|     const uint ib = a_offset + i00/QUANT_K; // block index
 | |
|     const uint iqs = (i00%QUANT_K)/QUANT_R; // quant index
 | |
|     const uint iybs = i00 - i00%QUANT_K; // dst block start index
 | |
|     const uint y_offset = QUANT_R == 1 ? 1 : QUANT_K/2;
 | |
| 
 | |
|     vec2 v = dequantize(ib, iqs, 0);
 | |
| 
 | |
|     data_d[d_offset + iybs + iqs           ] = D_TYPE(v.x);
 | |
|     data_d[d_offset + iybs + iqs + y_offset] = D_TYPE(v.y);
 | |
| }
 |