[Flaky CI] Increase timeout tolerance for test_mp_crash_detection+test_default_mm_lora_chat_completions (#23028)
Signed-off-by: mgoin <mgoin64@gmail.com>
This commit is contained in:
@ -48,7 +48,8 @@ def multimodal_server(): # noqa: F811
|
||||
f"{{\"audio\": \"{AUDIO_LORA_PATH}\"}}",
|
||||
]
|
||||
|
||||
with RemoteOpenAIServer(MULTIMODAL_MODEL_NAME, args) as remote_server:
|
||||
with RemoteOpenAIServer(MULTIMODAL_MODEL_NAME, args,
|
||||
max_wait_seconds=480) as remote_server:
|
||||
yield remote_server
|
||||
|
||||
|
||||
|
@ -255,8 +255,8 @@ async def test_mp_crash_detection(monkeypatch: pytest.MonkeyPatch):
|
||||
pass
|
||||
end = time.perf_counter()
|
||||
|
||||
assert end - start < 60, (
|
||||
"Expected vLLM to gracefully shutdown in <60s "
|
||||
assert end - start < 100, (
|
||||
"Expected vLLM to gracefully shutdown in <100s "
|
||||
"if there is an error in the startup.")
|
||||
|
||||
|
||||
|
Reference in New Issue
Block a user