mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-29 08:41:22 +00:00 
			
		
		
		
	 905d87b70a
			
		
	
	905d87b70a
	
	
	
		
			
			* CUDA kernel for q4_0 dequant. + mat. vec. mult. * Added q4_1 via template * Added missing __syncthreads(); * --gpu_layers -> --gpu-layers * Shorter dequantize_mul_mat_vec line * q5_0 dequantize_mul_mat kernel * More readable dequantize_mul_mat_vec logic * dequantize_mul_mat_vec kernels for q5_1, q8_0, f16 * llama : offload "output" tensor to GPU too + coding style fixes --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
		
			
				
	
	
		
			22 lines
		
	
	
		
			701 B
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			22 lines
		
	
	
		
			701 B
		
	
	
	
		
			C
		
	
	
	
	
	
| #include "ggml.h"
 | |
| 
 | |
| #ifdef  __cplusplus
 | |
| extern "C" {
 | |
| #endif
 | |
| 
 | |
| void   ggml_init_cublas(void);
 | |
| 
 | |
| bool   ggml_cuda_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
 | |
| size_t ggml_cuda_mul_mat_get_wsize(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
 | |
| void   ggml_cuda_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst, void * wdata, size_t wsize);
 | |
| 
 | |
| // TODO: export these with GGML_API
 | |
| void * ggml_cuda_host_malloc(size_t size);
 | |
| void   ggml_cuda_host_free(void * ptr);
 | |
| 
 | |
| void ggml_cuda_transform_tensor(struct ggml_tensor * tensor);
 | |
| 
 | |
| #ifdef  __cplusplus
 | |
| }
 | |
| #endif
 |