mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	ggml: unify backend logging mechanism (#9709)
* Add scaffolding for ggml logging macros * Metal backend now uses GGML logging * Cuda backend now uses GGML logging * Cann backend now uses GGML logging * Add enum tag to parameters * Use C memory allocation funcs * Fix compile error * Use GGML_LOG instead of GGML_PRINT * Rename llama_state to llama_logger_state * Prevent null format string * Fix whitespace * Remove log callbacks from ggml backends * Remove cuda log statement
This commit is contained in:
		| @@ -33,6 +33,21 @@ extern "C" { | ||||
| #endif | ||||
| #endif | ||||
|  | ||||
| // | ||||
| // logging | ||||
| // | ||||
|  | ||||
| GGML_ATTRIBUTE_FORMAT(2, 3) | ||||
| void ggml_log_internal        (enum ggml_log_level level, const char * format, ...); | ||||
| void ggml_log_callback_default(enum ggml_log_level level, const char * text, void * user_data); | ||||
|  | ||||
| #define GGML_LOG(...)       ggml_log_internal(GGML_LOG_LEVEL_NONE , __VA_ARGS__) | ||||
| #define GGML_LOG_INFO(...)  ggml_log_internal(GGML_LOG_LEVEL_INFO , __VA_ARGS__) | ||||
| #define GGML_LOG_WARN(...)  ggml_log_internal(GGML_LOG_LEVEL_WARN , __VA_ARGS__) | ||||
| #define GGML_LOG_ERROR(...) ggml_log_internal(GGML_LOG_LEVEL_ERROR, __VA_ARGS__) | ||||
| #define GGML_LOG_DEBUG(...) ggml_log_internal(GGML_LOG_LEVEL_DEBUG, __VA_ARGS__) | ||||
| #define GGML_LOG_CONT(...)  ggml_log_internal(GGML_LOG_LEVEL_CONT , __VA_ARGS__) | ||||
|  | ||||
| // bitset | ||||
|  | ||||
| typedef uint32_t ggml_bitset_t; | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 bandoti
					bandoti