mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	Signed-off-by: Jared Van Bortel <jared@nomic.ai> Co-authored-by: niansa <anton-sa@web.de> Co-authored-by: Adam Treat <treat.adam@gmail.com> Co-authored-by: Aaron Miller <apage43@ninjawhale.com> Co-authored-by: ToKiNoBug <tokinobug@163.com> Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> Co-authored-by: slaren <slarengh@gmail.com>
		
			
				
	
	
		
			24 lines
		
	
	
		
			528 B
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
			
		
		
	
	
			24 lines
		
	
	
		
			528 B
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
#version 450
 | 
						|
 | 
						|
#include "common.comp"
 | 
						|
 | 
						|
layout(local_size_x = 1) in;
 | 
						|
 | 
						|
layout(binding = 0) buffer restrict readonly tensorIn { float in_[]; };
 | 
						|
layout(binding = 1) buffer restrict writeonly tensorOut { float out_[]; };
 | 
						|
 | 
						|
layout(push_constant) uniform PushConstants {
 | 
						|
    uint inOff;
 | 
						|
    uint outOff;
 | 
						|
    float scale;
 | 
						|
} pcs;
 | 
						|
 | 
						|
void main() {
 | 
						|
    const uint baseIndex = gl_WorkGroupID.x * 8;
 | 
						|
 | 
						|
    for (uint x = 0; x < 8; x++) {
 | 
						|
        const uint i = baseIndex + x;
 | 
						|
        out_[i + pcs.outOff] = in_[i + pcs.inOff] * pcs.scale;
 | 
						|
    }
 | 
						|
}
 |