static EPLB fix bug, add unit test (#1186)

<!--  Thanks for sending a pull request!

BEFORE SUBMITTING, PLEASE READ
https://docs.vllm.ai/en/latest/contributing/overview.html

-->
### What this PR does / why we need it?
<!--
- Please clarify what changes you are proposing. The purpose of this
section is to outline the changes and how this PR fixes the issue.
If possible, please consider writing useful notes for better and faster
reviews in your PR.

- Please clarify why the changes are needed. For instance, the use case
and bug description.

- Fixes #
-->
1.add static EPLB unit test
2.fix bug: Tensor cannot be directly judged by if statements
### Does this PR introduce _any_ user-facing change?
<!--
Note that it means *any* user-facing change including all aspects such
as API, interface or other behavior changes.
Documentation-only updates are not considered user-facing changes.
-->

### How was this patch tested?
<!--
CI passed with new added/existing test.
If it was tested in a way different from regular unit tests, please
clarify how you tested step by step, ideally copy and paste-able, so
that other reviewers can test and check, and descendants can verify in
the future.
If tests were not added, please describe why they were not added and/or
why it was difficult to add.
-->
Run the unit test.

---------

Signed-off-by: songshanhu07 <1763685535@qq.com>
This commit is contained in:
songshanhu07
2025-06-18 19:46:56 +08:00
committed by GitHub
parent 2cd8ecdc4f
commit ebb2a70dbb
4 changed files with 150 additions and 3 deletions

View File

@ -1,2 +1,2 @@
pytest-asyncio
pytest-mock

View File

@ -4,6 +4,7 @@ modelscope
openai
pytest >= 6.0
pytest-asyncio
pytest-mock
lm-eval
ray
types-jsonschema

View File

@ -0,0 +1,146 @@
# fused moe ops test will hit the infer_schema error, we need add the patch
# here to make the test pass.
import vllm_ascend.patch.worker.patch_common.patch_utils # type: ignore[import] # isort: skip # noqa
import json
from typing import List, TypedDict
import pytest
import torch
from vllm_ascend.ops.expert_load_balancer import ExpertLoadBalancer
class Device(TypedDict):
device_id: int
device_expert: List[int]
class Layer(TypedDict):
layer_id: int
device_count: int
device_list: List[Device]
class MockData(TypedDict):
moe_layer_count: int
layer_list: List[Layer]
MOCK_DATA: MockData = {
"moe_layer_count":
1,
"layer_list": [{
"layer_id":
0,
"device_count":
2,
"device_list": [{
"device_id": 0,
"device_expert": [7, 2, 0, 3, 5]
}, {
"device_id": 1,
"device_expert": [6, 1, 4, 7, 2]
}]
}]
}
@pytest.fixture
def mock_expert_load_balancer(tmp_path):
json_file = tmp_path / "expert_map.json"
with open(json_file, 'w') as f:
json.dump(MOCK_DATA, f)
return ExpertLoadBalancer(str(json_file), global_expert_num=8)
def test_init(mock_expert_load_balancer):
assert isinstance(mock_expert_load_balancer.expert_map_tensor,
torch.Tensor)
assert mock_expert_load_balancer.layers_num == MOCK_DATA["moe_layer_count"]
assert mock_expert_load_balancer.ranks_num == MOCK_DATA["layer_list"][0][
"device_count"]
def test_generate_index_dicts(mock_expert_load_balancer):
tensor_2d = torch.tensor([[7, 2, 0, 3, 5], [6, 1, 4, 7, 2]])
result = mock_expert_load_balancer.generate_index_dicts(tensor_2d)
expected_result = [{
7: 0,
2: 1,
0: 2,
3: 3,
5: 4
}, {
6: 5,
1: 6,
4: 7,
7: 8,
2: 9
}]
assert result == expected_result
def test_generate_expert_placement_map(mock_expert_load_balancer):
expert_placement_map = mock_expert_load_balancer.generate_expert_placement_map(
)
assert expert_placement_map.shape == (mock_expert_load_balancer.layers_num,
mock_expert_load_balancer.ranks_num,
8)
assert torch.all(expert_placement_map >= -1)
def test_generate_log2phy_expert_map(mock_expert_load_balancer):
layer_id = 0
log2phy_map = mock_expert_load_balancer.generate_log2phy_expert_map(
layer_id)
assert log2phy_map.shape == (mock_expert_load_balancer.ranks_num, 8)
assert torch.all(log2phy_map >= -1)
def test_get_rank_placement_map(mock_expert_load_balancer, mocker):
mocker.patch("torch_npu.npu._lazy_init")
mocker.patch('torch.npu.current_device', return_value='cpu')
layer_id = 0
rank_id = 0
rank_local_expert_num, rank_expert_map = mock_expert_load_balancer.get_rank_placement_map(
layer_id, rank_id)
assert rank_local_expert_num == 5
expected_tensor = torch.tensor([2, -1, 1, 3, -1, 4, -1, 0],
dtype=torch.int32).to(
rank_expert_map.device)
assert rank_expert_map.equal(expected_tensor)
rank_id = 1
rank_local_expert_num, rank_expert_map = mock_expert_load_balancer.get_rank_placement_map(
layer_id, rank_id)
expected_tensor = torch.tensor([-1, 1, 4, -1, 2, -1, 0, 3],
dtype=torch.int32).to(
rank_expert_map.device)
assert rank_expert_map.equal(expected_tensor)
def test_get_rank_log2phy_map(mock_expert_load_balancer):
layer_id = 0
rank_id = 0
log2phy_map = mock_expert_load_balancer.get_rank_log2phy_map(
layer_id, rank_id)
expected_tensor = torch.tensor([2, 6, 1, 3, 7, 4, 5, 0],
dtype=torch.int32).to(log2phy_map.device)
assert log2phy_map.equal(expected_tensor)
rank_id = 1
log2phy_map = mock_expert_load_balancer.get_rank_log2phy_map(
layer_id, rank_id)
expected_tensor = torch.tensor([2, 6, 9, 3, 7, 4, 5, 8],
dtype=torch.int32).to(log2phy_map.device)
assert log2phy_map.equal(expected_tensor)
def test_get_global_redundant_expert_num(mock_expert_load_balancer):
redundant_expert_num = mock_expert_load_balancer.get_global_redundant_expert_num(
)
expected_redundant_expert_num = len(MOCK_DATA["layer_list"][0]["device_list"][0]["device_expert"]) * \
MOCK_DATA["layer_list"][0]["device_count"] - 8
assert redundant_expert_num == expected_redundant_expert_num

View File

@ -118,7 +118,7 @@ def fused_experts_with_mc2(
global_redundant_expert_num: int = 0,
shared_experts: Optional[Any] = None,
) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
if log2phy:
if log2phy is not None:
topk_ids = log2phy[topk_ids]
global_bs = 0
moe_expert_num = len(expert_map) + global_redundant_expert_num
@ -233,7 +233,7 @@ def fused_experts_with_all2all(
log2phy: torch.Tensor = None,
global_redundant_expert_num: int = 0,
):
if log2phy:
if log2phy is not None:
topk_ids = log2phy[topk_ids]
original_shape = hidden_states.shape
if len(original_shape) == 3: