mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	op_getrows_f32 is required since https://github.com/ggerganov/llama.cpp/pull/6122 for the Vulkan w/ Kompute backend to be functional. As such, implement this op to make this backend functional again.
		
			
				
	
	
		
			32 lines
		
	
	
		
			762 B
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
			
		
		
	
	
			32 lines
		
	
	
		
			762 B
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
#version 450
 | 
						|
 | 
						|
#include "common.comp"
 | 
						|
 | 
						|
layout(local_size_x = 1) in;
 | 
						|
 | 
						|
layout (binding = 0) readonly buffer tensorInA { float inA[]; };
 | 
						|
layout (binding = 1) readonly buffer tensorInB { int inB[]; };
 | 
						|
layout (binding = 2) writeonly buffer tensorOut { float out_[]; };
 | 
						|
 | 
						|
layout (push_constant) uniform parameter {
 | 
						|
    uint inAOff;
 | 
						|
    uint inBOff;
 | 
						|
    uint outOff;
 | 
						|
    int ne00;
 | 
						|
    int nb01;
 | 
						|
    int nb1;
 | 
						|
} pcs;
 | 
						|
 | 
						|
void dequantize_row_f32(uint x /*Based from inA unaligned*/, uint y /*Based from out_*/, int k) {
 | 
						|
    for (int j = 0; j < k; j++) {
 | 
						|
        out_[y + j] = inA[x + j];
 | 
						|
    }
 | 
						|
}
 | 
						|
 | 
						|
void main() {
 | 
						|
    const uint i = gl_WorkGroupID.x;
 | 
						|
    const int r = inB[i + pcs.inBOff];
 | 
						|
 | 
						|
    dequantize_row_f32(r*pcs.nb01/4 + pcs.inAOff, i*pcs.nb1/4 + pcs.outOff, pcs.ne00);
 | 
						|
}
 |