mirror of
https://github.com/vllm-project/vllm.git
synced 2025-10-20 23:03:52 +08:00
Fix pre-commit error in main (#22462)
Signed-off-by: Woosuk Kwon <woosuk.kwon@berkeley.edu>
This commit is contained in:
@ -422,24 +422,6 @@ class OpenAIServingResponses(OpenAIServing):
|
||||
usage=usage,
|
||||
)
|
||||
|
||||
# Log complete response if output logging is enabled
|
||||
if self.enable_log_outputs and self.request_logger:
|
||||
output_text = ""
|
||||
if content:
|
||||
output_text = content
|
||||
elif reasoning_content:
|
||||
output_text = f"[reasoning: {reasoning_content}]"
|
||||
|
||||
if output_text:
|
||||
self.request_logger.log_outputs(
|
||||
request_id=request.request_id,
|
||||
outputs=output_text,
|
||||
output_token_ids=final_output.token_ids,
|
||||
finish_reason=final_output.finish_reason,
|
||||
is_streaming=False,
|
||||
delta=False,
|
||||
)
|
||||
|
||||
if request.store:
|
||||
async with self.response_store_lock:
|
||||
stored_response = self.response_store.get(response.id)
|
||||
@ -469,6 +451,24 @@ class OpenAIServingResponses(OpenAIServing):
|
||||
reasoning_content = None
|
||||
content = final_output.text
|
||||
|
||||
# Log complete response if output logging is enabled
|
||||
if self.enable_log_outputs and self.request_logger:
|
||||
output_text = ""
|
||||
if content:
|
||||
output_text = content
|
||||
elif reasoning_content:
|
||||
output_text = f"[reasoning: {reasoning_content}]"
|
||||
|
||||
if output_text:
|
||||
self.request_logger.log_outputs(
|
||||
request_id=request.request_id,
|
||||
outputs=output_text,
|
||||
output_token_ids=final_output.token_ids,
|
||||
finish_reason=final_output.finish_reason,
|
||||
is_streaming=False,
|
||||
delta=False,
|
||||
)
|
||||
|
||||
output = []
|
||||
if reasoning_content:
|
||||
reasoning_item = ResponseReasoningItem(
|
||||
|
Reference in New Issue
Block a user