mirror of
https://github.com/vllm-project/vllm.git
synced 2025-10-20 14:53:52 +08:00
Fix: Add missing EOFError handling in CLI complete command (#20896)
Signed-off-by: reidliu41 <reid201711@gmail.com>
This commit is contained in:
@ -55,7 +55,7 @@ def chat(system_prompt: str | None, model_name: str, client: OpenAI) -> None:
|
||||
try:
|
||||
input_message = input("> ")
|
||||
except EOFError:
|
||||
return
|
||||
break
|
||||
conversation.append({"role": "user", "content": input_message})
|
||||
|
||||
chat_completion = client.chat.completions.create(model=model_name,
|
||||
@ -118,7 +118,7 @@ class ChatCommand(CLISubcommand):
|
||||
try:
|
||||
input_message = input("> ")
|
||||
except EOFError:
|
||||
return
|
||||
break
|
||||
conversation.append({"role": "user", "content": input_message})
|
||||
|
||||
chat_completion = client.chat.completions.create(
|
||||
@ -170,7 +170,10 @@ class CompleteCommand(CLISubcommand):
|
||||
|
||||
print("Please enter prompt to complete:")
|
||||
while True:
|
||||
input_prompt = input("> ")
|
||||
try:
|
||||
input_prompt = input("> ")
|
||||
except EOFError:
|
||||
break
|
||||
completion = client.completions.create(model=model_name,
|
||||
prompt=input_prompt)
|
||||
output = completion.choices[0].text
|
||||
|
Reference in New Issue
Block a user