mirror of
https://github.com/vllm-project/vllm-ascend.git
synced 2025-10-21 14:13:50 +08:00
### What this PR does / why we need it?
vLLM Ascend plugin (vllm-ascend) is a backend plugin for running vLLM on
the Ascend NPU.
This plugin is the recommended approach for supporting the Ascend
backend within the vLLM community. It adheres to the principles outlined
in the [RFC]: Hardware pluggable, providing a hardware-pluggable
interface that decouples the integration of the Ascend NPU with vLLM.
This patch also include changes to make CI work and use cache speed up
e2e test, including:
1. Change push (post merge ci) and pull_request (pr ci) trigger branch
to main
2. Make mypy work by ignore base_communicator and clear unused deps
3. Several improvements for vllm_ascend_test:
- use cache (pip, ms, hf) speed up e2e test (25mins --> 5mins)
- switch `git clone` command to `action/checkout` to speedup checkout
and
- Enable sv for pytest for better info dump
- Remove network host to resole `docker: conflicting ontions: cannot
attach both user-defined and non-user-definednetwork-modes`, which is a
problem on docker 1.45 but not on 1.39.
4. Adapt MLA decode optimizations:
cabaf4eff3
### Does this PR introduce _any_ user-facing change?
Yes, init the PR.
### How was this patch tested?
- This is the first PR to make ascend NPU work on vLLM. All code is
tested on ascend with vLLM V0 Engine.
- CI passed
---------
Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
Signed-off-by: Yikun Jiang <yikunkero@gmail.com>
Co-authored-by: wangxiyuan <wangxiyuan1007@gmail.com>
Co-authored-by: MengqingCao <cmq0113@163.com>
Co-authored-by: wangshuai09 <391746016@qq.com>
Co-authored-by: Shanshan Shen <467638484@qq.com>
Co-authored-by: wangli <wangli858794774@gmail.com>
304 lines
12 KiB
Python
304 lines
12 KiB
Python
#
|
|
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
|
# This file is a part of the vllm-ascend project.
|
|
# Adapted from vllm-project/vllm/blob/main/tests/models/utils.py
|
|
# Copyright 2023 The vLLM team.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
#
|
|
|
|
import warnings
|
|
from typing import Dict, List, Optional, Sequence, Tuple, Union
|
|
|
|
import torch
|
|
from vllm.config import ModelConfig, TaskOption
|
|
from vllm.inputs import InputContext
|
|
from vllm.sequence import Logprob, PromptLogprobs, SampleLogprobs
|
|
|
|
TokensText = Tuple[List[int], str]
|
|
|
|
|
|
def check_outputs_equal(
|
|
*,
|
|
outputs_0_lst: Sequence[TokensText],
|
|
outputs_1_lst: Sequence[TokensText],
|
|
name_0: str,
|
|
name_1: str,
|
|
):
|
|
"""
|
|
Compare the two sequences generated by different models,
|
|
which should be equal.
|
|
"""
|
|
assert len(outputs_0_lst) == len(outputs_1_lst)
|
|
|
|
for prompt_idx, (outputs_0,
|
|
outputs_1) in enumerate(zip(outputs_0_lst,
|
|
outputs_1_lst)):
|
|
output_ids_0, output_str_0 = outputs_0
|
|
output_ids_1, output_str_1 = outputs_1
|
|
|
|
# The text and token outputs should exactly match
|
|
fail_msg = (f"Test{prompt_idx}:"
|
|
f"\n{name_0}:\t{output_str_0!r}"
|
|
f"\n{name_1}:\t{output_str_1!r}")
|
|
|
|
assert output_str_0 == output_str_1, fail_msg
|
|
assert output_ids_0 == output_ids_1, fail_msg
|
|
|
|
|
|
# Representation of generated sequence as a tuple of
|
|
# * Token ID list
|
|
# * String
|
|
# * List of top sample logprobs for each sampled token
|
|
#
|
|
# Assumes prompt logprobs were not requested.
|
|
TokensTextLogprobs = Tuple[List[int], str, Optional[Union[List[Dict[int,
|
|
float]],
|
|
SampleLogprobs]]]
|
|
|
|
# Allow for tokens to be represented as str's rather than IDs;
|
|
# tuple of
|
|
# * Token string representations list
|
|
# * String
|
|
# * Optional list of top sample logprobs for each sampled token
|
|
#
|
|
# Assumes prompt logprobs were not requested.
|
|
TextTextLogprobs = Tuple[List[str], str, Optional[Union[List[Dict[str, float]],
|
|
List[Dict[str,
|
|
Logprob]]]]]
|
|
|
|
# Representation of generated sequence as a tuple of
|
|
# * Token ID list
|
|
# * String
|
|
# * Optional list of top sample logprobs for each sampled token
|
|
# * Optional list of top prompt logprobs for each prompt token
|
|
#
|
|
# Allows prompt logprobs to be requested.
|
|
TokensTextLogprobsPromptLogprobs = Tuple[
|
|
List[int], str, Optional[Union[List[Dict[int, float]], SampleLogprobs]],
|
|
Optional[Union[List[Optional[Dict[int, float]]], PromptLogprobs]]]
|
|
|
|
|
|
def check_logprobs_close(
|
|
*,
|
|
outputs_0_lst: Sequence[Union[TokensTextLogprobs,
|
|
TokensTextLogprobsPromptLogprobs,
|
|
TextTextLogprobs]],
|
|
outputs_1_lst: Sequence[Union[TokensTextLogprobs,
|
|
TokensTextLogprobsPromptLogprobs,
|
|
TextTextLogprobs]],
|
|
name_0: str,
|
|
name_1: str,
|
|
num_outputs_0_skip_tokens: int = 0,
|
|
warn_on_mismatch: bool = True,
|
|
always_check_logprobs: bool = False,
|
|
) -> None:
|
|
"""Compare the logprobs of two sequences generated by different models,
|
|
which should be similar but not necessarily equal.
|
|
|
|
How sample logprobs are compared:
|
|
* `always_check_logprobs == True`: set of highest-logprob token ids
|
|
must match between seq0 and seq1 at all sampled token offsets
|
|
* `always_check_logprobs == False`: highest-logprob token ids are
|
|
only compared at sampled token offsets for which generated token
|
|
ids don't match
|
|
|
|
Prompt logprobs must be provided either for both input sequences, or
|
|
for neither. If prompt logprobs are provided, then highest-logprob
|
|
prompt token ids must match between seq0 and seq1 at all prompt token
|
|
offsets.
|
|
|
|
Args:
|
|
outputs_0_lst: First sequence to compare
|
|
outputs_0_lst: Second sequence to compare
|
|
name_0: sequence #0 name
|
|
name_1: sequence #1 name
|
|
num_outputs_0_skip_tokens: If > 0, specifies the number of initial
|
|
sequence #0 tokens & logprobs to discard
|
|
before comparison, i.e. all
|
|
of sequence #1 will be compared to
|
|
sequence #0 beginning at index
|
|
num_outputs_0_skip_tokens
|
|
warn_on_mismatch: Issue a warning if there is token-wise or text-wise
|
|
mismatch between the two sequences
|
|
always_check_logprobs: If true, check logprobs even when tokens match
|
|
"""
|
|
assert len(outputs_0_lst) == len(outputs_1_lst)
|
|
|
|
# Loop through responses to each prompt.
|
|
for prompt_idx, (outputs_0,
|
|
outputs_1) in enumerate(zip(outputs_0_lst,
|
|
outputs_1_lst)):
|
|
assert len(outputs_0) == len(outputs_1)
|
|
if len(outputs_0) == 3:
|
|
assert len(outputs_1) == 3
|
|
# Break out tokens, text & sample logprobs
|
|
# (prompt logprobs were not provided)
|
|
output_ids_0, output_str_0, logprobs_0 = outputs_0
|
|
output_ids_1, output_str_1, logprobs_1 = outputs_1
|
|
elif len(outputs_0) == 4:
|
|
assert len(outputs_1) == 4
|
|
# Break out tokens, text, sample logprobs & prompt logprobs
|
|
(
|
|
output_ids_0,
|
|
output_str_0,
|
|
logprobs_0,
|
|
prompt_logprobs_0,
|
|
) = outputs_0
|
|
(
|
|
output_ids_1,
|
|
output_str_1,
|
|
logprobs_1,
|
|
prompt_logprobs_1,
|
|
) = outputs_1
|
|
|
|
# Test prompt logprobs closeness
|
|
if (prompt_logprobs_0 is not None
|
|
and prompt_logprobs_1 is not None):
|
|
# Both sequences' prompt logprobs lists are not `None``
|
|
# (although individual list elements may be `None`);
|
|
# for each token's logprobs:
|
|
for idx, (logprobs_elem_0, logprobs_elem_1) in enumerate(
|
|
zip(prompt_logprobs_0, prompt_logprobs_1)):
|
|
fail_msg = (
|
|
f"Prompt logprobs test:"
|
|
f"\n{name_0}:\tPrompt index {idx}\t{logprobs_elem_0}"
|
|
f"\n{name_1}:\tPrompt index {idx}\t{logprobs_elem_1}")
|
|
|
|
if logprobs_elem_0 is None:
|
|
# If the seq 0 token's logprobs are `None`,
|
|
# the seq 1 token's logprobs must be `None`
|
|
assert logprobs_elem_1 is None, fail_msg
|
|
else:
|
|
# If the seq 0 token's logprobs are not `None`,
|
|
# the seq 1 token's logprobs must not be `None`
|
|
assert logprobs_elem_1 is not None, fail_msg
|
|
# Logprobs check: top-k token choices must be the same
|
|
assert (set(logprobs_elem_0.keys()) == set(
|
|
logprobs_elem_1.keys())), fail_msg
|
|
else:
|
|
# Both sequence logprobs lists must be `None`
|
|
fail_msg = (f"Prompt logprobs test:"
|
|
f"\n{name_0}:\tlogprobs\t{prompt_logprobs_0}"
|
|
f"\n{name_1}:\tlogprobs\t{prompt_logprobs_1}")
|
|
|
|
assert (prompt_logprobs_0 is None
|
|
and prompt_logprobs_1 is None), fail_msg
|
|
else:
|
|
raise ValueError(f"Outputs tuple must have 3 or 4 elements but "
|
|
f"{len(outputs_0)} elements were provided: "
|
|
f"{outputs_0}")
|
|
|
|
if logprobs_0 is None:
|
|
logprobs_0 = [None] * len(output_ids_0)
|
|
if logprobs_1 is None:
|
|
logprobs_1 = [None] * len(output_ids_1)
|
|
|
|
# Skip specified number of initial sequence #0 tokens
|
|
# & logprobs, leaving output text as-is for simplicity
|
|
# (text mismatches may generate warnings but do not
|
|
# cause the test to fail.)
|
|
if num_outputs_0_skip_tokens < 0:
|
|
raise ValueError("num_outputs_0_skip_tokens must be non-negative")
|
|
output_ids_0 = output_ids_0[num_outputs_0_skip_tokens:]
|
|
logprobs_0 = logprobs_0[num_outputs_0_skip_tokens:]
|
|
|
|
# Loop through generated tokens.
|
|
for idx, (output_id_0,
|
|
output_id_1) in enumerate(zip(output_ids_0, output_ids_1)):
|
|
|
|
is_tok_mismatch = output_id_0 != output_id_1
|
|
|
|
# If generated tokens don't match
|
|
# or it is desired to always check logprobs,
|
|
# then
|
|
if is_tok_mismatch or always_check_logprobs:
|
|
logprobs_elem_0 = logprobs_0[idx]
|
|
logprobs_elem_1 = logprobs_1[idx]
|
|
|
|
# Each predicted token must be in top N logprobs of the other
|
|
fail_msg = (
|
|
f"Test{prompt_idx}:"
|
|
f"\nMatched tokens:\t{output_ids_0[:idx]}"
|
|
f"\n{name_0}:\t{output_str_0!r}\t{logprobs_elem_0}"
|
|
f"\n{name_1}:\t{output_str_1!r}\t{logprobs_elem_1}")
|
|
|
|
assert logprobs_elem_0 is not None, fail_msg
|
|
assert logprobs_elem_1 is not None, fail_msg
|
|
assert output_id_0 in logprobs_elem_1, fail_msg
|
|
assert output_id_1 in logprobs_elem_0, fail_msg
|
|
|
|
if warn_on_mismatch and is_tok_mismatch:
|
|
with warnings.catch_warnings():
|
|
# This ensures that repeated warnings are shown
|
|
# in the output, not just the first occurrence
|
|
warnings.simplefilter("always")
|
|
|
|
warnings.warn(fail_msg, stacklevel=2)
|
|
|
|
# Break out since sequences will now diverge.
|
|
break
|
|
else:
|
|
if output_str_0 != output_str_1 and warn_on_mismatch:
|
|
# The token outputs exactly match,
|
|
# so the text outputs should exactly match as well
|
|
fail_msg = (f"Test{prompt_idx}:"
|
|
f"\n{name_0}:\t{output_str_0!r}"
|
|
f"\n{name_1}:\t{output_str_1!r}")
|
|
|
|
with warnings.catch_warnings():
|
|
# This ensures that repeated warnings are shown
|
|
# in the output, not just the first occurrence
|
|
warnings.simplefilter("always")
|
|
|
|
warnings.warn(fail_msg, stacklevel=2)
|
|
|
|
|
|
def build_model_context(model_name: str,
|
|
task: TaskOption = "auto",
|
|
tokenizer_name: Optional[str] = None,
|
|
trust_remote_code: bool = False,
|
|
dtype: Optional[Union[str, torch.dtype]] = None,
|
|
mm_processor_kwargs: Optional[Dict] = None,
|
|
limit_mm_per_prompt: Optional[Dict] = None):
|
|
"""Creates an InputContext for a given model.
|
|
|
|
Args:
|
|
model_name: Name of the model being considered.
|
|
tokenizer_name: Name of the tokenizer being considered.
|
|
trust_remote_code: Whether or not to allow loading remote code.
|
|
mm_processor_kwargs: optional processor kwargs for to be leveraged
|
|
in the input processor, mapper, dummy data creation, etc.
|
|
limit_mm_per_prompt: Multimodal limits.
|
|
|
|
Returns:
|
|
InputContext for the model being considered.
|
|
"""
|
|
if tokenizer_name is None:
|
|
tokenizer_name = model_name
|
|
if dtype is None:
|
|
dtype = "half"
|
|
|
|
model_config = ModelConfig(
|
|
model_name,
|
|
task=task,
|
|
tokenizer=tokenizer_name,
|
|
tokenizer_mode="auto",
|
|
trust_remote_code=trust_remote_code,
|
|
dtype=dtype,
|
|
seed=0,
|
|
mm_processor_kwargs=mm_processor_kwargs,
|
|
limit_mm_per_prompt=limit_mm_per_prompt,
|
|
)
|
|
return InputContext(model_config)
|