Compare commits

...

98 Commits

Author SHA1 Message Date
d0d68a4c33 updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-24 01:17:05 +00:00
35f3782d21 relax hybrid dp asserts
Signed-off-by: Tyler Michael Smith <tyler@neuralmagic.com>
2025-07-23 13:49:52 -04:00
5fb68091db Merge remote-tracking branch 'origin/main' into one-pod-per-node-lb
# Conflicts:
#	.buildkite/test-pipeline.yaml
2025-07-23 15:52:27 +01:00
fb0cf7e212 rename test
Signed-off-by: Nick Hill <nhill@redhat.com>
2025-07-23 14:57:34 +01:00
1c300fcf5a fix internal_dp_lb tests
Signed-off-by: Nick Hill <nhill@redhat.com>
2025-07-23 14:55:02 +01:00
6328c808b8 CI tests for hybrid DPLB mode
Signed-off-by: Nick Hill <nhill@redhat.com>
2025-07-23 12:12:42 +01:00
d95aedd533 [Tests] Add tests for headless internal DP LB
Signed-off-by: Nick Hill <nhill@redhat.com>
2025-07-23 12:07:33 +01:00
8601a22d22 fix bad merge
Signed-off-by: Nick Hill <nhill@redhat.com>
2025-07-23 12:07:33 +01:00
1bd5f2f1ad Merge remote-tracking branch 'refs/remotes/origin/main' into one-pod-per-node-lb 2025-07-23 11:04:38 +01:00
f63cc19210 fix handshake mock test
Signed-off-by: Nick Hill <nhill@redhat.com>
2025-07-23 08:08:40 +01:00
aca3ce6ba0 fix cross-node headless arg validation
Signed-off-by: Nick Hill <nhill@redhat.com>
2025-07-22 23:54:12 +01:00
75bd8ead71 fix handshake
Signed-off-by: Nick Hill <nhill@redhat.com>
2025-07-22 18:48:56 +01:00
cecf38ae82 fix assert
Signed-off-by: Nick Hill <nhill@redhat.com>
2025-07-22 18:12:07 +01:00
f27a85d435 add cross-node dp arg validation
Signed-off-by: Nick Hill <nhill@redhat.com>
2025-07-22 13:49:14 +01:00
82f9292b84 infer hybrid lb mode on secondary modes
and update some comments

Signed-off-by: Nick Hill <nhill@redhat.com>
2025-07-22 13:24:34 +01:00
36ed9f3467 fix coordinator for hybrid LB mode
Signed-off-by: Nick Hill <nhill@redhat.com>
2025-07-22 13:17:42 +01:00
60ae223986 Merge remote-tracking branch 'origin/main' into one-pod-per-node-lb
Signed-off-by: Nick Hill <nhill@redhat.com>

# Conflicts:
#	vllm/v1/engine/core_client.py
2025-07-21 19:20:59 +01:00
7a793ad562 fix data_parallel_hybrid_lb arg default value
Signed-off-by: Nick Hill <nhill@redhat.com>
2025-07-21 15:29:48 +01:00
58e4227fec Update vllm/engine/arg_utils.py
Co-authored-by: Nick Hill <nhill@redhat.com>
2025-07-21 08:24:34 -04:00
40397e378d finished validating
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-21 00:46:31 +00:00
1b481d3489 updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-21 00:33:24 +00:00
e80c015d24 updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-21 00:29:18 +00:00
f53166a963 update ux
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-21 00:21:35 +00:00
6feb4569fe update ux
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-21 00:21:12 +00:00
5f0663bce4 cleanup
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-21 00:08:26 +00:00
1dcd90065d updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-21 00:00:40 +00:00
e81c277e6e updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 23:55:02 +00:00
a58892880e updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 23:53:10 +00:00
3c206b1975 updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 23:52:07 +00:00
ec86e797da updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 23:32:49 +00:00
d327a6bed5 cleanup
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 23:26:48 +00:00
2d32c2849f stash
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 23:10:16 +00:00
fe68027a08 updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 22:45:45 +00:00
32a35f5d93 stash
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 22:43:08 +00:00
be03d841f6 stash
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 22:33:17 +00:00
91608889a4 updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 22:19:37 +00:00
9f7d3217dd stash
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 21:23:02 +00:00
85cd2da6b4 seems to be working again, but LB is wrong
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 21:15:36 +00:00
a46bc0a6a3 updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 21:06:47 +00:00
0018dd01c9 debug
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 20:52:40 +00:00
093b9380db updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 20:41:36 +00:00
cae7cb0223 updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 20:36:47 +00:00
7127d838d7 updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 20:34:58 +00:00
6491d59202 cleanup
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 20:32:02 +00:00
fc79d23d9a updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 20:24:10 +00:00
ad34f4a953 updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 20:22:47 +00:00
99583c2a25 updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 20:21:33 +00:00
2cf8ff64c7 updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 20:17:54 +00:00
d4ab18f19d updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 20:03:24 +00:00
d9ea345d8c refactor ux
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 20:02:45 +00:00
c22990470f refactor ux
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 20:01:57 +00:00
6206a06a84 nit comments
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 19:24:17 +00:00
72d2c87f8f nit comments
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 19:21:17 +00:00
e540aa41b8 revert logger changes
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 19:11:43 +00:00
1b488f8d5a Merge branch 'main' into one-pod-per-node-lb 2025-07-20 19:07:51 +00:00
840d3812de updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 19:07:31 +00:00
3f4ae353c2 updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 17:19:42 +00:00
e9e180da5f cleanup
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 17:19:13 +00:00
5ea4fa206d updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 17:17:15 +00:00
f477b50493 updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 17:15:20 +00:00
876c864d3c updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 17:12:27 +00:00
d9291f998e cleanup
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 17:04:35 +00:00
c08fb6d456 updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 16:57:45 +00:00
5e6114df5d Merge pull request #19 from robertgshaw2-redhat/fix-prometheus-logging
Improve code structure
2025-07-20 12:53:23 -04:00
4eae5cbeea updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 16:50:36 +00:00
1358836fa0 updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 16:48:31 +00:00
02ecfa80a4 updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 16:46:35 +00:00
54e405bd92 updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 16:45:46 +00:00
896b0a271e updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 15:56:59 +00:00
fd0650f258 updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 15:56:50 +00:00
cad9670547 updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 15:23:30 +00:00
3956d8ccad updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 15:22:43 +00:00
9a2e26d049 updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 14:44:54 +00:00
d39cf9380d updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 14:42:48 +00:00
e08e1e99ee cleanup prometheus logging
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 14:41:55 +00:00
de91a3cd6a convert to use only one prometheus stat logger per async llm
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 14:38:45 +00:00
a69edca369 convert to use only one prometheus stat logger per async llm
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 13:52:50 +00:00
1e5303a801 stash
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 13:37:34 +00:00
6569facd3b stash
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 13:34:38 +00:00
471fa4ae68 updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 03:54:43 +00:00
dbc51d6e98 nits
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 03:48:11 +00:00
b9c0f658ca nits
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 03:47:45 +00:00
1ced153eec updatedd
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 03:47:23 +00:00
2a68433a82 updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 03:45:48 +00:00
4438796b48 fix lb issues
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 03:44:38 +00:00
d2d54e9c72 updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 02:46:54 +00:00
e1843b7e6c updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 02:30:23 +00:00
b142571366 cleanup
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 02:24:49 +00:00
2aa497571d updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 02:23:34 +00:00
14db6606f2 updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 02:21:51 +00:00
4f5d3eabc8 updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 02:21:19 +00:00
14cf3c4786 updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 02:20:54 +00:00
2fd05875d4 updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 02:20:36 +00:00
48cf09be0b updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 02:19:52 +00:00
59a958362f updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 02:19:30 +00:00
aefeeed64d updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 02:18:01 +00:00
b90d33163c updated
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-20 02:15:19 +00:00
14f13ed690 added debug logging
Signed-off-by: Robert Shaw <robshaw@redhat.com>
2025-07-19 16:27:38 +00:00
12 changed files with 487 additions and 45 deletions

View File

@ -166,6 +166,7 @@ steps:
- tests/v1/test_async_llm_dp.py
- tests/v1/test_external_lb_dp.py
- tests/v1/test_internal_lb_dp.py
- tests/v1/test_hybrid_lb_dp.py
- tests/v1/engine/test_engine_core_client.py
commands:
# test with tp=2 and external_dp=2
@ -178,6 +179,7 @@ steps:
- TP_SIZE=2 DP_SIZE=2 pytest -v -s v1/test_async_llm_dp.py
- TP_SIZE=2 DP_SIZE=2 pytest -v -s v1/test_external_lb_dp.py
- TP_SIZE=1 DP_SIZE=4 pytest -v -s v1/test_internal_lb_dp.py
- TP_SIZE=1 DP_SIZE=4 pytest -v -s v1/test_hybrid_lb_dp.py
- pytest -v -s v1/engine/test_engine_core_client.py::test_kv_cache_events_dp
- pytest -v -s distributed/test_utils.py
- pytest -v -s compile/test_basic_correctness.py

View File

@ -565,8 +565,8 @@ def test_engine_core_proc_instantiation_cuda_empty(
from vllm.v1.engine.utils import EngineZmqAddresses
def mock_startup_handshake(self, handshake_socket, on_head_node,
parallel_config):
def mock_startup_handshake(self, handshake_socket, local_client,
headless, parallel_config):
return EngineZmqAddresses(inputs=["tcp://127.0.0.1:5555"],
outputs=["tcp://127.0.0.1:5556"],
coordinator_input=None,

View File

@ -0,0 +1,352 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import asyncio
import os
import threading
import time
from contextlib import AsyncExitStack
import openai # use the official client for correctness check
import pytest
import pytest_asyncio
from tests.utils import RemoteOpenAIServer
from tests.v1.test_utils import check_request_balancing
from vllm.platforms import Platform
MODEL_NAME = "ibm-research/PowerMoE-3b"
# Number of data parallel ranks for hybrid LB testing (4 total)
DP_SIZE = int(os.getenv("DP_SIZE", "4"))
# Default tensor parallel size to use
TP_SIZE = int(os.getenv("TP_SIZE", "1"))
# Number of nodes (2 nodes, each with 2 DP ranks)
NUM_NODES = 2
DP_SIZE_LOCAL = DP_SIZE // NUM_NODES # 2 ranks per node
class HybridLBServerManager:
"""Manages hybrid data parallel vLLM server instances where each node
runs a single logical API server that balances requests only to the
DP engines running on that same node."""
def __init__(self,
model_name: str,
dp_size: int,
api_server_count: int,
base_server_args: list,
dp_size_local: int = DP_SIZE_LOCAL,
tp_size: int = TP_SIZE):
self.model_name = model_name
self.dp_size = dp_size
self.dp_size_local = dp_size_local
self.tp_size = tp_size
self.api_server_count = api_server_count
self.base_server_args = base_server_args
self.servers: list[tuple[RemoteOpenAIServer, list[str]]] = []
self.server_threads: list[threading.Thread] = []
self.num_nodes = dp_size // dp_size_local
def __enter__(self) -> list[tuple[RemoteOpenAIServer, list[str]]]:
"""Start all server instances for hybrid LB mode."""
for node_id in range(self.num_nodes):
# Create server args for this specific node
server_args = self.base_server_args.copy()
# Calculate start rank for this node
start_rank = node_id * self.dp_size_local
# Add hybrid LB specific arguments
server_args.extend([
"--data-parallel-size",
str(self.dp_size),
"--data-parallel-size-local",
str(self.dp_size_local),
"--data-parallel-start-rank",
str(start_rank),
"--data-parallel-hybrid-lb", # Enable hybrid LB mode
"--tensor-parallel-size",
str(self.tp_size),
"--port",
str(8000 + node_id), # Different port for each node
"--api-server-count",
str(self.api_server_count),
"--data-parallel-address",
"127.0.0.1",
"--data-parallel-rpc-port",
"13345",
])
# Use a thread to start each server to allow parallel initialization
def start_server(node: int, sargs: list[str]):
try:
# Calculate GPU devices for this node
gpus_per_node = self.dp_size_local * self.tp_size
gpu_start = node * gpus_per_node
gpu_end = gpu_start + gpus_per_node
# Start the server
server = RemoteOpenAIServer(
self.model_name,
sargs,
auto_port=False,
env_dict={
"CUDA_VISIBLE_DEVICES":
",".join(
str(Platform.device_id_to_physical_device_id(
i)) for i in range(gpu_start, gpu_end))
})
server.__enter__()
print(f"Hybrid LB node {node} started successfully with "
f"{self.dp_size_local} local DP ranks and "
f"{self.api_server_count} API servers")
self.servers.append((server, sargs))
except Exception as e:
print(f"Failed to start hybrid LB node {node}: {e}")
raise
thread = threading.Thread(target=start_server,
args=(node_id, server_args))
thread.start()
self.server_threads.append(thread)
# Wait for all servers to start
for thread in self.server_threads:
thread.join()
# Give servers additional time to fully initialize and coordinate
time.sleep(3)
if len(self.servers) != self.num_nodes:
raise Exception("Servers failed to start")
return self.servers
def __exit__(self, exc_type, exc_val, exc_tb):
"""Stop all server instances."""
while self.servers:
try:
self.servers.pop()[0].__exit__(exc_type, exc_val, exc_tb)
except Exception as e:
print(f"Error stopping server: {e}")
@pytest.fixture(scope="module")
def default_server_args():
return [
# use half precision for speed and memory savings in CI environment
"--dtype",
"bfloat16",
"--max-model-len",
"2048",
"--max-num-seqs",
"128",
"--enforce-eager",
]
@pytest.fixture(scope="module", params=[1]) # Only 1 API server for now
def servers(request, default_server_args):
api_server_count = request.param
with HybridLBServerManager(MODEL_NAME, DP_SIZE, api_server_count,
default_server_args, DP_SIZE_LOCAL,
TP_SIZE) as server_list:
yield server_list
@pytest_asyncio.fixture
async def clients(servers: list[tuple[RemoteOpenAIServer, list[str]]]):
# Create a client for each node (each node has its own API endpoint)
async with AsyncExitStack() as stack:
yield [
await stack.enter_async_context(server.get_async_client())
for server, _ in servers
]
@pytest.mark.asyncio
@pytest.mark.parametrize(
"model_name",
[MODEL_NAME],
)
async def test_hybrid_lb_completion(clients: list[openai.AsyncOpenAI],
servers: list[tuple[RemoteOpenAIServer,
list[str]]],
model_name: str) -> None:
async def make_request(client: openai.AsyncOpenAI):
completion = await client.completions.create(
model=model_name,
prompt="Hello, my name is",
max_tokens=10,
temperature=1.0)
assert completion.id is not None
assert completion.choices is not None and len(completion.choices) == 1
choice = completion.choices[0]
# The exact number of tokens can vary slightly with temperature=1.0,
# so we check for a reasonable minimum length.
assert len(choice.text) >= 1
# Finish reason might not always be 'length' if the model finishes early
# or due to other reasons, especially with high temperature.
# So, we'll accept 'length' or 'stop'.
assert choice.finish_reason in ("length", "stop")
# Token counts can also vary, so we check they are positive.
assert completion.usage.completion_tokens > 0
assert completion.usage.prompt_tokens > 0
assert completion.usage.total_tokens > 0
return completion
# Test single request to each node
for i, client in enumerate(clients):
result = await make_request(client)
assert result is not None
print(
f"Hybrid LB node {i} handled single completion request successfully"
)
await asyncio.sleep(0.5)
# Send requests to all nodes - each should balance within its local DP ranks
num_requests_per_node = 25 # Total 50 requests across 2 nodes
all_tasks = []
for i, client in enumerate(clients):
tasks = [make_request(client) for _ in range(num_requests_per_node)]
all_tasks.extend(tasks)
results = await asyncio.gather(*all_tasks)
assert len(results) == num_requests_per_node * len(clients)
assert all(completion is not None for completion in results)
await asyncio.sleep(0.5)
# Second burst of requests
all_tasks = []
for i, client in enumerate(clients):
tasks = [make_request(client) for _ in range(num_requests_per_node)]
all_tasks.extend(tasks)
results = await asyncio.gather(*all_tasks)
assert len(results) == num_requests_per_node * len(clients)
assert all(completion is not None for completion in results)
_, server_args = servers[0]
api_server_count = (
server_args.count('--api-server-count')
and server_args[server_args.index('--api-server-count') + 1] or 1)
print(
f"Successfully completed hybrid LB test with {len(clients)} nodes "
f"({DP_SIZE_LOCAL} DP ranks each, API server count: {api_server_count})"
)
# Check request balancing within each node
for i, (server, _) in enumerate(servers):
print(f"Checking request balancing for node {i}")
check_request_balancing(server, DP_SIZE_LOCAL)
@pytest.mark.asyncio
@pytest.mark.parametrize(
"model_name",
[MODEL_NAME],
)
async def test_hybrid_lb_completion_streaming(clients: list[
openai.AsyncOpenAI], servers: list[tuple[RemoteOpenAIServer, list[str]]],
model_name: str) -> None:
prompt = "What is an LLM?"
async def make_streaming_request(client: openai.AsyncOpenAI):
# Perform a non-streaming request to get the expected full output
single_completion = await client.completions.create(
model=model_name,
prompt=prompt,
max_tokens=5,
temperature=0.0,
)
single_output = single_completion.choices[0].text
# Perform the streaming request
stream = await client.completions.create(model=model_name,
prompt=prompt,
max_tokens=5,
temperature=0.0,
stream=True)
chunks: list[str] = []
finish_reason_count = 0
last_chunk = None
async for chunk in stream:
chunks.append(chunk.choices[0].text)
if chunk.choices[0].finish_reason is not None:
finish_reason_count += 1
last_chunk = chunk # Keep track of the last chunk
# finish reason should only return in the last block for OpenAI API
assert finish_reason_count == 1, (
"Finish reason should appear exactly once.")
assert last_chunk is not None, (
"Stream should have yielded at least one chunk.")
assert last_chunk.choices[
0].finish_reason == "length", "Finish reason should be 'length'."
# Check that the combined text matches the non-streamed version.
assert "".join(
chunks
) == single_output, "Streamed output should match non-streamed output."
return True # Indicate success for this request
# Test single request to each node
for i, client in enumerate(clients):
result = await make_streaming_request(client)
assert result is not None
print(
f"Hybrid LB node {i} handled single streaming request successfully"
)
await asyncio.sleep(0.5)
# Send streaming requests to all nodes
num_requests_per_node = 25 # Total 50 requests across 2 nodes
all_tasks = []
for i, client in enumerate(clients):
tasks = [
make_streaming_request(client)
for _ in range(num_requests_per_node)
]
all_tasks.extend(tasks)
results = await asyncio.gather(*all_tasks)
assert len(results) == num_requests_per_node * len(clients)
assert all(results), "Not all streaming requests completed successfully."
await asyncio.sleep(0.5)
# Second burst of streaming requests
all_tasks = []
for i, client in enumerate(clients):
tasks = [
make_streaming_request(client)
for _ in range(num_requests_per_node)
]
all_tasks.extend(tasks)
results = await asyncio.gather(*all_tasks)
assert len(results) == num_requests_per_node * len(clients)
assert all(results), "Not all streaming requests completed successfully."
_, server_args = servers[0]
api_server_count = (
server_args.count('--api-server-count')
and server_args[server_args.index('--api-server-count') + 1] or 1)
print(f"Successfully completed hybrid LB streaming test with "
f"{len(clients)} nodes ({DP_SIZE_LOCAL} DP ranks each, "
f"API server count: {api_server_count})")
# Check request balancing within each node
for i, (server, _) in enumerate(servers):
print(f"Checking streaming request balancing for node {i}")
check_request_balancing(server, DP_SIZE_LOCAL)

View File

@ -1906,8 +1906,16 @@ class ParallelConfig:
"""Backend to use for data parallel, either "mp" or "ray"."""
data_parallel_external_lb: bool = False
"""Whether to use "external" DP LB mode. Applies only to online serving
and when data_parallel_size > 0. Set implicitly when
data_parallel_rank is provided explicitly to vllm serve."""
and when data_parallel_size > 0. This is useful for a "one-pod-per-rank"
wide-EP setup in Kuberentes. Set implicitly when --data-parallel-rank
is provided explicitly to vllm serve."""
data_parallel_hybrid_lb: bool = False
"""Whether to use "hybrid" DP LB mode. Applies only to online serving
and when data_parallel_size > 0. Enables running an AsyncLLM
and API server on a "per-node" basis where vLLM load balances
between local data parallel ranks, but an external LB balances
between vLLM nodes/replicas. Set explicitly in conjunction with
--data-parallel-start-rank."""
enable_expert_parallel: bool = False
"""Use expert parallelism instead of tensor parallelism for MoE layers."""
enable_eplb: bool = False

View File

@ -295,9 +295,11 @@ class EngineArgs:
tensor_parallel_size: int = ParallelConfig.tensor_parallel_size
data_parallel_size: int = ParallelConfig.data_parallel_size
data_parallel_rank: Optional[int] = None
data_parallel_start_rank: Optional[int] = None
data_parallel_size_local: Optional[int] = None
data_parallel_address: Optional[str] = None
data_parallel_rpc_port: Optional[int] = None
data_parallel_hybrid_lb: bool = False
data_parallel_backend: str = ParallelConfig.data_parallel_backend
enable_expert_parallel: bool = ParallelConfig.enable_expert_parallel
enable_eplb: bool = ParallelConfig.enable_eplb
@ -607,6 +609,11 @@ class EngineArgs:
type=int,
help='Data parallel rank of this instance. '
'When set, enables external load balancer mode.')
parallel_group.add_argument('--data-parallel-start-rank',
'-dpr',
type=int,
help='Starting data parallel rank '
'for secondary nodes.')
parallel_group.add_argument('--data-parallel-size-local',
'-dpl',
type=int,
@ -628,6 +635,9 @@ class EngineArgs:
default='mp',
help='Backend for data parallel, either '
'"mp" or "ray".')
parallel_group.add_argument(
"--data-parallel-hybrid-lb",
**parallel_kwargs["data_parallel_hybrid_lb"])
parallel_group.add_argument(
"--enable-expert-parallel",
**parallel_kwargs["enable_expert_parallel"])
@ -986,6 +996,7 @@ class EngineArgs:
def create_engine_config(
self,
usage_context: Optional[UsageContext] = None,
headless: bool = False,
) -> VllmConfig:
"""
Create the VllmConfig.
@ -1074,15 +1085,41 @@ class EngineArgs:
# but we should not do this here.
placement_group = ray.util.get_current_placement_group()
assert not headless or not self.data_parallel_hybrid_lb, (
"data_parallel_hybrid_lb is not applicable in "
"headless mode")
data_parallel_external_lb = self.data_parallel_rank is not None
# Local DP rank = 1, use pure-external LB.
if data_parallel_external_lb:
assert self.data_parallel_size_local in (1, None), (
"data_parallel_size_local must be 1 when data_parallel_rank "
"is set")
data_parallel_size_local = 1
# Use full external lb if we have local_size of 1.
self.data_parallel_hybrid_lb = False
elif self.data_parallel_size_local is not None:
data_parallel_size_local = self.data_parallel_size_local
if self.data_parallel_start_rank and not headless:
# Infer hybrid LB mode.
self.data_parallel_hybrid_lb = True
if self.data_parallel_hybrid_lb and data_parallel_size_local == 1:
# Use full external lb if we have local_size of 1.
data_parallel_external_lb = True
self.data_parallel_hybrid_lb = False
if data_parallel_size_local == self.data_parallel_size:
# Disable hybrid LB mode if set for a single node
self.data_parallel_hybrid_lb = False
self.data_parallel_rank = self.data_parallel_start_rank or 0
else:
assert not self.data_parallel_hybrid_lb, (
"data_parallel_size_local must be set to use "
"data_parallel_hybrid_lb.")
# Local DP size defaults to global DP size if not set.
data_parallel_size_local = self.data_parallel_size
@ -1139,6 +1176,7 @@ class EngineArgs:
data_parallel_master_ip=data_parallel_address,
data_parallel_rpc_port=data_parallel_rpc_port,
data_parallel_backend=self.data_parallel_backend,
data_parallel_hybrid_lb=self.data_parallel_hybrid_lb,
enable_expert_parallel=self.enable_expert_parallel,
enable_eplb=self.enable_eplb,
num_redundant_experts=self.num_redundant_experts,

View File

@ -45,11 +45,6 @@ class ServeSubcommand(CLISubcommand):
if args.headless or args.api_server_count < 1:
run_headless(args)
else:
if args.data_parallel_start_rank:
raise ValueError(
"data_parallel_start_rank is only applicable "
"in headless mode. "
"Add --headless flag to enable headless mode.")
if args.api_server_count > 1:
run_multi_api_server(args)
else:
@ -86,13 +81,14 @@ def run_headless(args: argparse.Namespace):
# Create the EngineConfig.
engine_args = vllm.AsyncEngineArgs.from_cli_args(args)
usage_context = UsageContext.OPENAI_API_SERVER
vllm_config = engine_args.create_engine_config(usage_context=usage_context)
vllm_config = engine_args.create_engine_config(usage_context=usage_context,
headless=True)
if not envs.VLLM_USE_V1:
raise ValueError("Headless mode is only supported for V1")
if engine_args.data_parallel_rank is not None:
raise ValueError("data_parallel_rank is not applicable in "
if engine_args.data_parallel_hybrid_lb:
raise ValueError("data_parallel_hybrid_lb is not applicable in "
"headless mode")
parallel_config = vllm_config.parallel_config
@ -122,7 +118,7 @@ def run_headless(args: argparse.Namespace):
engine_manager = CoreEngineProcManager(
target_fn=EngineCoreProc.run_engine_core,
local_engine_count=local_engine_count,
start_index=args.data_parallel_start_rank,
start_index=vllm_config.parallel_config.data_parallel_rank,
local_start_index=0,
vllm_config=vllm_config,
local_client=False,
@ -169,6 +165,11 @@ def run_multi_api_server(args: argparse.Namespace):
" api_server_count > 1")
model_config.disable_mm_preprocessor_cache = True
if vllm_config.parallel_config.data_parallel_hybrid_lb:
raise NotImplementedError(
"Hybrid load balancing with --api-server-count > 0"
"is not yet supported.")
executor_class = Executor.get_class(vllm_config)
log_stats = not engine_args.disable_log_stats

View File

@ -253,13 +253,6 @@ def make_arg_parser(parser: FlexibleArgumentParser) -> FlexibleArgumentParser:
default=False,
help="Run in headless mode. See multi-node data parallel "
"documentation for more details.")
parser.add_argument(
"--data-parallel-start-rank",
"-dpr",
type=int,
default=0,
help="Starting data parallel rank for secondary nodes. "
"Requires --headless.")
parser.add_argument("--api-server-count",
"-asc",
type=int,

View File

@ -125,7 +125,7 @@ class AsyncLLM(EngineClient):
if self.log_stats:
self.logger_manager = StatLoggerManager(
vllm_config=vllm_config,
engine_idxs=self.engine_core.engine_ranks,
engine_idxs=self.engine_core.engine_ranks_managed,
custom_stat_loggers=stat_loggers,
)
self.logger_manager.log_engine_initialized()

View File

@ -61,11 +61,12 @@ class DPCoordinator:
host = parallel_config.data_parallel_master_ip
external_lb = parallel_config.data_parallel_external_lb
hybrid_lb = parallel_config.data_parallel_hybrid_lb
# Assume coordinator is colocated with front-end procs when not in
# external DP LB mode.
# either external or hybrid DP LB mode.
front_publish_address = get_engine_client_zmq_addr(
local_only=not external_lb, host=host)
local_only=not external_lb and not hybrid_lb, host=host)
local_only_eng = dp_size == parallel_config.data_parallel_size_local
back_publish_address = get_engine_client_zmq_addr(local_only_eng, host)

View File

@ -467,13 +467,14 @@ class EngineCoreProc(EngineCore):
For DP>1 with internal loadbalancing this is with the shared front-end
process which may reside on a different node.
For DP>1 with external loadbalancing, two handshakes are performed:
For DP>1 with external or hybrid loadbalancing, two handshakes are
performed:
- With the rank 0 front-end process which retrieves the
DP Coordinator ZMQ addresses and DP process group address.
- With the colocated front-end process which retrieves the
client input/output socket addresses.
with the exception of the rank 0 engine itself which doesn't require
the second handshake.
with the exception of the rank 0 and colocated engines themselves which
don't require the second handshake.
Here, "front-end" process can mean the process containing the engine
core client (which is the API server process in the case the API
@ -482,15 +483,18 @@ class EngineCoreProc(EngineCore):
"""
input_ctx = zmq.Context()
is_local = local_client and client_handshake_address is None
headless = not local_client
handshake = self._perform_handshake(input_ctx, handshake_address,
identity, is_local, vllm_config,
identity, is_local, headless,
vllm_config,
vllm_config.parallel_config)
if client_handshake_address is None:
with handshake as addresses:
yield addresses
else:
assert local_client
local_handshake = self._perform_handshake(
input_ctx, client_handshake_address, identity, local_client,
input_ctx, client_handshake_address, identity, True, False,
vllm_config)
with handshake as addresses, local_handshake as client_addresses:
addresses.inputs = client_addresses.inputs
@ -507,6 +511,7 @@ class EngineCoreProc(EngineCore):
handshake_address: str,
identity: bytes,
local_client: bool,
headless: bool,
vllm_config: VllmConfig,
parallel_config_to_update: Optional[ParallelConfig] = None,
) -> Generator[EngineZmqAddresses, None, None]:
@ -518,6 +523,7 @@ class EngineCoreProc(EngineCore):
bind=False) as handshake_socket:
# Register engine with front-end.
addresses = self.startup_handshake(handshake_socket, local_client,
headless,
parallel_config_to_update)
yield addresses
@ -531,6 +537,7 @@ class EngineCoreProc(EngineCore):
msgspec.msgpack.encode({
"status": "READY",
"local": local_client,
"headless": headless,
"num_gpu_blocks": num_gpu_blocks,
"dp_stats_address": dp_stats_address,
}))
@ -539,6 +546,7 @@ class EngineCoreProc(EngineCore):
def startup_handshake(
handshake_socket: zmq.Socket,
local_client: bool,
headless: bool,
parallel_config: Optional[ParallelConfig] = None,
) -> EngineZmqAddresses:
@ -547,6 +555,7 @@ class EngineCoreProc(EngineCore):
msgspec.msgpack.encode({
"status": "HELLO",
"local": local_client,
"headless": headless,
}))
# Receive initialization message.

View File

@ -429,18 +429,23 @@ class MPClient(EngineCoreClient):
parallel_config = vllm_config.parallel_config
dp_size = parallel_config.data_parallel_size
dp_rank = parallel_config.data_parallel_rank
external_dp_lb = parallel_config.data_parallel_external_lb
dp_local_size = parallel_config.data_parallel_size_local
offline_mode = parallel_config.data_parallel_rank_local is not None
self.engine_ranks = ([dp_rank] if
(offline_mode or external_dp_lb) else list(
range(dp_size)))
# Client manages local+remote EngineCores in pure internal LB case.
# Client manages local EngineCores in hybrid and external LB case.
local_engines_only = (parallel_config.data_parallel_hybrid_lb
or parallel_config.data_parallel_external_lb)
num_ranks = dp_local_size if local_engines_only else dp_size
self.engine_ranks_managed = [dp_rank] if offline_mode else list(
range(dp_rank, dp_rank + num_ranks))
assert parallel_config.data_parallel_size_local <= len(
self.engine_ranks)
self.engine_ranks_managed)
# ZMQ identity of each engine that this client will talk to.
self.core_engines: list[EngineIdentity] = [
index.to_bytes(2, "little") for index in self.engine_ranks
rank.to_bytes(2, "little")
for rank in self.engine_ranks_managed
]
# Wait for ready messages from each engine on the input socket.
@ -895,6 +900,12 @@ class DPAsyncMPClient(AsyncMPClient):
return
assert self.stats_update_address is not None
assert len(self.engine_ranks_managed) > 0
# NOTE: running and waiting counts are all global from
# the Coordinator include all global EngineCores. This
# slice includes just the cores managed by this client.
count_slice = slice(self.engine_ranks_managed[0],
self.engine_ranks_managed[-1] + 1)
async def run_engine_stats_update_task():
with make_zmq_socket(self.ctx, self.stats_update_address,
@ -959,7 +970,8 @@ class DPAsyncMPClient(AsyncMPClient):
counts, wave, running = msgspec.msgpack.decode(buf)
self.current_wave = wave
self.engines_running = running
self.lb_engines = counts
self.lb_engines = counts[count_slice]
logger.info(f"{counts=} | {count_slice=}")
resources.stats_update_task = asyncio.create_task(
run_engine_stats_update_task())

View File

@ -544,7 +544,8 @@ def launch_core_engines(
local_start_index = parallel_config.data_parallel_rank_local
dp_rank = parallel_config.data_parallel_rank
host = parallel_config.data_parallel_master_ip
external_dp_lb = parallel_config.data_parallel_external_lb
local_engines_only = (parallel_config.data_parallel_hybrid_lb
or parallel_config.data_parallel_external_lb)
# In offline mode there is an LLM instance per DP rank and
# one core engine per LLM, see
@ -553,8 +554,8 @@ def launch_core_engines(
# client_local_only = True for cases where this front-end
# sends requests only to colocated engines.
client_local_only = offline_mode or external_dp_lb or (local_engine_count
== dp_size)
client_local_only = (offline_mode or local_engines_only
or (local_engine_count == dp_size))
# Set up input and output addresses.
addresses = EngineZmqAddresses(
@ -598,14 +599,27 @@ def launch_core_engines(
yield engine_actor_manager, coordinator, addresses
return
if offline_mode or (external_dp_lb and dp_rank > 0):
if offline_mode:
assert local_engine_count == 1
engines_to_handshake = [CoreEngine(index=dp_rank, local=True)]
else:
elif dp_rank == 0:
# Rank 0 holds Coordinator, so it handshakes with all Cores
# in both external dplb and internal dplb mode.
# Note this also covers the case where we have zero local engines
# and rank 0 is headless.
engines_to_handshake = [
CoreEngine(index=i, local=(i < local_engine_count))
for i in range(dp_size)
]
else:
# Rank > 0 handshakes with just the local cores it is managing.
assert local_engines_only, (
"Attempting to launch core_engines from dp_rank > 0, but "
"found internal DPLB, which is incompatible.")
engines_to_handshake = [
CoreEngine(index=i, local=True)
for i in range(dp_rank, dp_rank + local_engine_count)
]
# Whether the started engines will handshake only with co-located
# front-end processes. In external_dp_lb mode, ranks > 0 handshake with
@ -616,7 +630,7 @@ def launch_core_engines(
handshake_address = get_engine_client_zmq_addr(
handshake_local_only, host, parallel_config.data_parallel_rpc_port)
if external_dp_lb and dp_rank > 0:
if local_engines_only and dp_rank > 0:
assert not handshake_local_only
local_handshake_address = get_open_zmq_ipc_path()
client_handshake_address = local_handshake_address
@ -631,8 +645,6 @@ def launch_core_engines(
# Start local engines.
if local_engine_count:
# In server mode, start_index and local_start_index will
# both be 0.
local_engine_manager = CoreEngineProcManager(
EngineCoreProc.run_engine_core,
vllm_config=vllm_config,
@ -678,6 +690,9 @@ def wait_for_engine_startup(
poller = zmq.Poller()
poller.register(handshake_socket, zmq.POLLIN)
remote_should_be_headless = not parallel_config.data_parallel_hybrid_lb \
and not parallel_config.data_parallel_external_lb
if proc_manager is not None:
for sentinel in proc_manager.sentinels():
poller.register(sentinel, zmq.POLLIN)
@ -713,13 +728,24 @@ def wait_for_engine_startup(
raise RuntimeError(f"Message from engine with unexpected data "
f"parallel rank: {eng_index}")
msg = msgspec.msgpack.decode(ready_msg_bytes)
status, local = msg["status"], msg["local"]
status, local, headless = msg["status"], msg["local"], msg["headless"]
if local != engine.local:
raise RuntimeError(f"{status} message from "
f"{'local' if local else 'remote'} "
f"engine {eng_index}, expected it to be "
f"{'local' if engine.local else 'remote'}")
# Remote engines must be headless iff we aren't in hybrid dp lb mode.
if not local and headless != remote_should_be_headless:
if headless:
raise RuntimeError(f"Remote engine {eng_index} must not use "
f"--headless in external or hybrid dp lb "
f"mode")
else:
raise RuntimeError(f"Remote engine {eng_index} must use "
f"--headless unless in external or hybrid "
f"dp lb mode")
if status == "HELLO" and engine.state == CoreEngineState.NEW:
# Send init message with DP config info.