mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	 7c7836d9d4
			
		
	
	7c7836d9d4
	
	
	
		
			
			* Refactor shaders, extract GLSL code from ggml_vk_generate_shaders.py into vulkan-shaders directory * Improve debug log code * Add memory debug output option * Fix flake8 * Fix unnecessary high llama-3 VRAM use
		
			
				
	
	
		
			14 lines
		
	
	
		
			367 B
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
			
		
		
	
	
			14 lines
		
	
	
		
			367 B
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
| #version 450
 | |
| 
 | |
| #include "types.comp"
 | |
| #include "generic_unary_head.comp"
 | |
| 
 | |
| void main() {
 | |
|     if (gl_GlobalInvocationID.x >= p.ne) {
 | |
|         return;
 | |
|     }
 | |
| 
 | |
|     const FLOAT_TYPE val = FLOAT_TYPE(data_a[src0_idx(gl_GlobalInvocationID.x)]);
 | |
|     data_d[p.d_offset + dst_idx(gl_GlobalInvocationID.x)] = D_TYPE(val < p.param1 ? p.param1 : (val > p.param2 ? p.param2 : val));
 | |
| }
 |