mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	Capture CUDA logging output (#7298)
* logging: output capture in cuda module * fix compile error * fix: vsnprintf terminates with 0, string use not correct * post review * Update llama.cpp Co-authored-by: slaren <slarengh@gmail.com> * Update llama.cpp Co-authored-by: slaren <slarengh@gmail.com> --------- Co-authored-by: slaren <slarengh@gmail.com>
This commit is contained in:
		| @@ -1697,6 +1697,8 @@ struct llama_state { | ||||
|     llama_state() { | ||||
| #ifdef GGML_USE_METAL | ||||
|         ggml_backend_metal_log_set_callback(log_callback, log_callback_user_data); | ||||
| #elif defined(GGML_USE_CUDA) | ||||
|         ggml_backend_cuda_log_set_callback(log_callback, log_callback_user_data); | ||||
| #endif | ||||
|     } | ||||
|  | ||||
| @@ -18174,6 +18176,8 @@ void llama_log_set(ggml_log_callback log_callback, void * user_data) { | ||||
|     g_state.log_callback_user_data = user_data; | ||||
| #ifdef GGML_USE_METAL | ||||
|     ggml_backend_metal_log_set_callback(g_state.log_callback, g_state.log_callback_user_data); | ||||
| #elif defined(GGML_USE_CUDA) | ||||
|     ggml_backend_cuda_log_set_callback(g_state.log_callback, g_state.log_callback_user_data); | ||||
| #endif | ||||
| } | ||||
|  | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 fraxy-v
					fraxy-v