mirror of
https://github.com/vllm-project/vllm.git
synced 2025-10-20 14:53:52 +08:00
[Misc] refactor example - openai_transcription_client (#19851)
Signed-off-by: reidliu41 <reid201711@gmail.com> Co-authored-by: reidliu41 <reid201711@gmail.com>
This commit is contained in:
@ -1,5 +1,23 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
"""
|
||||
This script demonstrates how to use the vLLM API server to perform audio
|
||||
transcription with the `openai/whisper-large-v3` model.
|
||||
|
||||
Before running this script, you must start the vLLM server with the following command:
|
||||
|
||||
vllm serve openai/whisper-large-v3
|
||||
|
||||
Requirements:
|
||||
- vLLM with audio support
|
||||
- openai Python SDK
|
||||
- httpx for streaming support
|
||||
|
||||
The script performs:
|
||||
1. Synchronous transcription using OpenAI-compatible API.
|
||||
2. Streaming transcription using raw HTTP request to the vLLM server.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
|
||||
@ -21,6 +39,9 @@ client = OpenAI(
|
||||
|
||||
|
||||
def sync_openai():
|
||||
"""
|
||||
Perform synchronous transcription using OpenAI-compatible API.
|
||||
"""
|
||||
with open(str(mary_had_lamb), "rb") as f:
|
||||
transcription = client.audio.transcriptions.create(
|
||||
file=f,
|
||||
@ -37,11 +58,11 @@ def sync_openai():
|
||||
print("transcription result:", transcription.text)
|
||||
|
||||
|
||||
sync_openai()
|
||||
|
||||
|
||||
# OpenAI Transcription API client does not support streaming.
|
||||
async def stream_openai_response():
|
||||
"""
|
||||
Perform streaming transcription using vLLM's raw HTTP streaming API.
|
||||
"""
|
||||
data = {
|
||||
"language": "en",
|
||||
"stream": True,
|
||||
@ -68,7 +89,15 @@ async def stream_openai_response():
|
||||
# Extract and print the content
|
||||
content = chunk["choices"][0].get("delta", {}).get("content")
|
||||
print(content, end="")
|
||||
print() # Final newline after stream ends
|
||||
|
||||
|
||||
# Run the asynchronous function
|
||||
asyncio.run(stream_openai_response())
|
||||
def main():
|
||||
sync_openai()
|
||||
|
||||
# Run the asynchronous function
|
||||
asyncio.run(stream_openai_response())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
Reference in New Issue
Block a user