mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	tts : remove printfs (#12640)
* tts.cpp : llama tokens console output is done using LOG_INF instead of printf(). Therefore the options '--log-disable' and '--log-file' have now uniform impact on all output.
This commit is contained in:
		| @@ -699,11 +699,13 @@ lovely<|t_0.56|><|code_start|><|634|><|596|><|1766|><|1556|><|1306|><|1285|><|14 | |||||||
|             const std::string voice_data = audio_data; |             const std::string voice_data = audio_data; | ||||||
|  |  | ||||||
|             auto tmp = common_tokenize(vocab, voice_data, false, true); |             auto tmp = common_tokenize(vocab, voice_data, false, true); | ||||||
|             printf("\n\n"); |  | ||||||
|  |             std::ostringstream tokens_oss; | ||||||
|             for (size_t i = 0; i < tmp.size(); ++i) { |             for (size_t i = 0; i < tmp.size(); ++i) { | ||||||
|                 printf("%d, ", tmp[i]); |                 tokens_oss << tmp[i] << ", "; | ||||||
|             } |             } | ||||||
|             printf("\n\n"); |             LOG_INF("\n\n%s: llama tokens: %s\n\n", __func__, tokens_oss.str().c_str()); | ||||||
|  |  | ||||||
|             prompt_add(prompt_inp, tmp); |             prompt_add(prompt_inp, tmp); | ||||||
| #else | #else | ||||||
|             prompt_add(prompt_inp, llama_tokens { |             prompt_add(prompt_inp, llama_tokens { | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user
	 marcoStocchi
					marcoStocchi