mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	 95f57bb5d5
			
		
	
	95f57bb5d5
	
	
	
		
			
			* ggml : remove ggml_task_type and GGML_PERF * check abort_callback on main thread only * vulkan : remove usage of ggml_compute_params * remove LLAMA_PERF
		
			
				
	
	
		
			15 lines
		
	
	
		
			302 B
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			15 lines
		
	
	
		
			302 B
		
	
	
	
		
			C
		
	
	
	
	
	
| #pragma once
 | |
| #include <stdint.h>
 | |
| #include <stdbool.h>
 | |
| #ifdef __cplusplus
 | |
| extern "C" {
 | |
| #endif
 | |
| 
 | |
| bool llamafile_sgemm(int64_t, int64_t, int64_t, const void *, int64_t,
 | |
|                      const void *, int64_t, void *, int64_t, int, int,
 | |
|                      int, int, int);
 | |
| 
 | |
| #ifdef __cplusplus
 | |
| }
 | |
| #endif
 |