mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	Another attempt by V3.1 non-thinking
This commit is contained in:
		| @@ -774,6 +774,9 @@ struct server_task_result_cmpl_final : server_task_result { | ||||
|         if (!stream && !probs_output.empty()) { | ||||
|             res["completion_probabilities"] = completion_token_output::probs_vector_to_json(probs_output, post_sampling_probs); | ||||
|         } | ||||
|         if (!oaicompat_msg.reasoning_content.empty()) { | ||||
|             res["reasoning_content"] = oaicompat_msg.reasoning_content; | ||||
|         } | ||||
|         return response_fields.empty() ? res : json_get_nested_values(response_fields, res); | ||||
|     } | ||||
|  | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Jesse CreateThis
					Jesse CreateThis