mirror of
https://github.com/vllm-project/vllm.git
synced 2025-10-20 23:03:52 +08:00
Signed-off-by: taohui <taohui3@gmail.com> Signed-off-by: Tao Hui <taohui3@gmail.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: Chauncey <chaunceyjiang@gmail.com>
77 lines
2.4 KiB
Python
77 lines
2.4 KiB
Python
# SPDX-License-Identifier: Apache-2.0
|
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
|
|
import pytest
|
|
from transformers import AutoTokenizer
|
|
|
|
from vllm.entrypoints.openai.protocol import ChatCompletionRequest, DeltaMessage
|
|
from vllm.reasoning import (
|
|
DeepSeekR1ReasoningParser,
|
|
DeepSeekV3ReasoningParser,
|
|
IdentityReasoningParser,
|
|
)
|
|
|
|
REASONING_MODEL_NAME = "deepseek-ai/DeepSeek-V3.1"
|
|
|
|
|
|
@pytest.fixture(scope="module")
|
|
def tokenizer():
|
|
return AutoTokenizer.from_pretrained(REASONING_MODEL_NAME)
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"thinking,expected_parser_type",
|
|
[
|
|
(True, DeepSeekR1ReasoningParser),
|
|
(False, IdentityReasoningParser),
|
|
],
|
|
)
|
|
def test_parser_selection(tokenizer, thinking, expected_parser_type):
|
|
parser = DeepSeekV3ReasoningParser(
|
|
tokenizer, chat_template_kwargs={"thinking": thinking}
|
|
)
|
|
|
|
assert isinstance(parser._parser, expected_parser_type)
|
|
|
|
|
|
def test_identity_reasoning_parser_basic(tokenizer):
|
|
parser = IdentityReasoningParser(tokenizer)
|
|
|
|
# Test is_reasoning_end always returns True
|
|
input_text = "This is some output"
|
|
input_tokens = tokenizer.tokenize(input_text)
|
|
input_ids = tokenizer.convert_tokens_to_ids(input_tokens)
|
|
assert parser.is_reasoning_end(input_ids) is True
|
|
|
|
# Test extract_content_ids returns all input_ids
|
|
assert parser.extract_content_ids(input_ids) == input_ids
|
|
|
|
# Test extract_reasoning_content returns (None, model_output)
|
|
request = ChatCompletionRequest(model="test-model", messages=[], temperature=1.0)
|
|
reasoning, content = parser.extract_reasoning_content(input_text, request)
|
|
assert reasoning is None
|
|
assert content == input_text
|
|
|
|
# Test extract_reasoning_content_streaming returns DeltaMessage or None
|
|
result = parser.extract_reasoning_content_streaming(
|
|
previous_text="",
|
|
current_text="Hello world",
|
|
delta_text="Hello world",
|
|
previous_token_ids=[],
|
|
current_token_ids=input_ids,
|
|
delta_token_ids=input_ids,
|
|
)
|
|
assert isinstance(result, DeltaMessage)
|
|
assert result.content == "Hello world"
|
|
|
|
# If delta_text is empty, should return None
|
|
result_none = parser.extract_reasoning_content_streaming(
|
|
previous_text="Hello world",
|
|
current_text="Hello world",
|
|
delta_text="",
|
|
previous_token_ids=input_ids,
|
|
current_token_ids=input_ids,
|
|
delta_token_ids=[],
|
|
)
|
|
assert result_none is None
|