mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	* Refactor shaders, extract GLSL code from ggml_vk_generate_shaders.py into vulkan-shaders directory * Improve debug log code * Add memory debug output option * Fix flake8 * Fix unnecessary high llama-3 VRAM use
		
			
				
	
	
		
			17 lines
		
	
	
		
			419 B
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
			
		
		
	
	
			17 lines
		
	
	
		
			419 B
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
#version 450
 | 
						|
 | 
						|
#include "types.comp"
 | 
						|
#include "generic_unary_head.comp"
 | 
						|
 | 
						|
void main() {
 | 
						|
    if (gl_GlobalInvocationID.x >= p.ne) {
 | 
						|
        return;
 | 
						|
    }
 | 
						|
 | 
						|
#ifndef OPTIMIZATION_ERROR_WORKAROUND
 | 
						|
    data_d[p.d_offset + dst_idx(gl_GlobalInvocationID.x)] = D_TYPE(data_a[src0_idx(gl_GlobalInvocationID.x)]);
 | 
						|
#else
 | 
						|
    data_d[p.d_offset + dst_idx(gl_GlobalInvocationID.x)] = data_a[src0_idx(gl_GlobalInvocationID.x)];
 | 
						|
#endif
 | 
						|
}
 |