From 399d2a10e23fcf37cc7a703d7de50ffecc7e0c6f Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Thu, 7 Aug 2025 08:54:39 -0700 Subject: [PATCH] Fix pre-commit error in main (#22462) Signed-off-by: Woosuk Kwon --- vllm/entrypoints/openai/serving_responses.py | 36 ++++++++++---------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/vllm/entrypoints/openai/serving_responses.py b/vllm/entrypoints/openai/serving_responses.py index d40231795b..a7554e0d68 100644 --- a/vllm/entrypoints/openai/serving_responses.py +++ b/vllm/entrypoints/openai/serving_responses.py @@ -422,24 +422,6 @@ class OpenAIServingResponses(OpenAIServing): usage=usage, ) - # Log complete response if output logging is enabled - if self.enable_log_outputs and self.request_logger: - output_text = "" - if content: - output_text = content - elif reasoning_content: - output_text = f"[reasoning: {reasoning_content}]" - - if output_text: - self.request_logger.log_outputs( - request_id=request.request_id, - outputs=output_text, - output_token_ids=final_output.token_ids, - finish_reason=final_output.finish_reason, - is_streaming=False, - delta=False, - ) - if request.store: async with self.response_store_lock: stored_response = self.response_store.get(response.id) @@ -469,6 +451,24 @@ class OpenAIServingResponses(OpenAIServing): reasoning_content = None content = final_output.text + # Log complete response if output logging is enabled + if self.enable_log_outputs and self.request_logger: + output_text = "" + if content: + output_text = content + elif reasoning_content: + output_text = f"[reasoning: {reasoning_content}]" + + if output_text: + self.request_logger.log_outputs( + request_id=request.request_id, + outputs=output_text, + output_token_ids=final_output.token_ids, + finish_reason=final_output.finish_reason, + is_streaming=False, + delta=False, + ) + output = [] if reasoning_content: reasoning_item = ResponseReasoningItem(