mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	graph : Pass the graph placeholder message in debug mode (#14748)
Without that condition, this debug log clutters the screen every batch treated in the prompt processing, or every token generated in Kobold.cpp.
This commit is contained in:
		| @@ -467,7 +467,9 @@ bool llm_graph_result::can_reuse(const llm_graph_params & params) { | ||||
|     for (auto & input : inputs) { | ||||
|         const bool cur = input->can_reuse(params); | ||||
|  | ||||
|         LLAMA_LOG_DEBUG("  %s: can_reuse = %d\n", "placeholder", cur); | ||||
|         if (debug > 1) { | ||||
|             LLAMA_LOG_DEBUG("%s: can_reuse = %d\n", "placeholder", cur); | ||||
|         } | ||||
|  | ||||
|         res = res && cur; | ||||
|     } | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Nexes the Elder
					Nexes the Elder