mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	server : fix divide-by-zero in metrics reporting (#11915)
This commit is contained in:
		| @@ -3656,7 +3656,7 @@ int main(int argc, char ** argv) { | ||||
|             }, { | ||||
|                     {"name",  "n_busy_slots_per_decode"}, | ||||
|                     {"help",  "Average number of busy slots per llama_decode() call"}, | ||||
|                     {"value",  (float) res_metrics->n_busy_slots_total / (float) res_metrics->n_decode_total} | ||||
|                     {"value",  (float) res_metrics->n_busy_slots_total / std::max((float) res_metrics->n_decode_total, 1.f)} | ||||
|             }}}, | ||||
|             {"gauge", {{ | ||||
|                     {"name",  "prompt_tokens_seconds"}, | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Antoine Viallon
					Antoine Viallon