mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	 7c7836d9d4
			
		
	
	7c7836d9d4
	
	
	
		
			
			* Refactor shaders, extract GLSL code from ggml_vk_generate_shaders.py into vulkan-shaders directory * Improve debug log code * Add memory debug output option * Fix flake8 * Fix unnecessary high llama-3 VRAM use
		
			
				
	
	
		
			10 lines
		
	
	
		
			160 B
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
			
		
		
	
	
			10 lines
		
	
	
		
			160 B
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
| #extension GL_EXT_shader_16bit_storage : require
 | |
| 
 | |
| layout (push_constant) uniform parameter
 | |
| {
 | |
|     uint KX;
 | |
|     uint KY;
 | |
|     float param1;
 | |
|     float param2;
 | |
| } p;
 |