mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	Signed-off-by: Jared Van Bortel <jared@nomic.ai> Co-authored-by: niansa <anton-sa@web.de> Co-authored-by: Adam Treat <treat.adam@gmail.com> Co-authored-by: Aaron Miller <apage43@ninjawhale.com> Co-authored-by: ToKiNoBug <tokinobug@163.com> Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> Co-authored-by: slaren <slarengh@gmail.com>
		
			
				
	
	
		
			18 lines
		
	
	
		
			609 B
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
			
		
		
	
	
			18 lines
		
	
	
		
			609 B
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
void main() {
 | 
						|
    const uint i = gl_WorkGroupID.x;
 | 
						|
    const int r = inB[i + pcs.inBOff];
 | 
						|
 | 
						|
    int z = 0;
 | 
						|
    for (uint ind = gl_LocalInvocationID.x; ind < pcs.ne00/16; ind += gl_WorkGroupSize.x) {
 | 
						|
        const uint inIndex = (r * pcs.nb01 + pcs.inAOff) + ind/NL * SIZE_OF_BLOCK;
 | 
						|
        const mat4 result = dequantize_block(inIndex, ind%NL);
 | 
						|
        for (uint j = 0; j < 4; ++j) {
 | 
						|
            for (uint k = 0; k < 4; ++k) {
 | 
						|
                const uint outIndex = i * pcs.nb1/BYTES_FOR_TYPE + pcs.outOff + z;
 | 
						|
                out_[outIndex] = result[j][k];
 | 
						|
                ++z;
 | 
						|
            }
 | 
						|
        }
 | 
						|
    }
 | 
						|
}
 |