mirror of
https://github.com/vllm-project/vllm-ascend.git
synced 2025-10-20 13:43:53 +08:00
Upgrade to vllm 0.9.0. 0.8.5 will not be supported any more. Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
139 lines
5.6 KiB
YAML
139 lines
5.6 KiB
YAML
#
|
|
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
# This file is a part of the vllm-ascend project.
|
|
#
|
|
|
|
name: 'e2e test'
|
|
|
|
on:
|
|
schedule:
|
|
- cron: '0 23 * * *'
|
|
pull_request:
|
|
branches:
|
|
- 'main'
|
|
- '*-dev'
|
|
paths:
|
|
- '*.txt'
|
|
- '**/*.py'
|
|
- '.github/workflows/vllm_ascend_test.yaml'
|
|
- '!docs/**'
|
|
- 'pytest.ini'
|
|
# Bash shells do not use ~/.profile or ~/.bashrc so these shells need to be explicitly
|
|
# declared as "shell: bash -el {0}" on steps that need to be properly activated.
|
|
# It's used to activate ascend-toolkit environment variables.
|
|
defaults:
|
|
run:
|
|
shell: bash -el {0}
|
|
|
|
jobs:
|
|
test:
|
|
strategy:
|
|
max-parallel: 2
|
|
matrix:
|
|
os: [linux-arm64-npu-1, linux-arm64-npu-4]
|
|
vllm_version: [main, v0.9.0]
|
|
concurrency:
|
|
group: >
|
|
${{
|
|
matrix.os == 'linux-arm64-npu-4'
|
|
&& github.event.pull_request.number
|
|
&& format('pr-{0}-limit-npu-4', github.event.pull_request.number)
|
|
|| format('job-{0}-{1}-{2}', matrix.os, matrix.vllm_version, github.event.pull_request.number)
|
|
}}
|
|
cancel-in-progress: false
|
|
name: vLLM Ascend test
|
|
runs-on: ${{ matrix.os }}
|
|
container:
|
|
# TODO(yikun): Remove m.daocloud.io prefix when infra proxy ready
|
|
image: m.daocloud.io/quay.io/ascend/cann:8.1.rc1-910b-ubuntu22.04-py3.10
|
|
env:
|
|
HF_ENDPOINT: https://hf-mirror.com
|
|
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
|
VLLM_LOGGING_LEVEL: ERROR
|
|
steps:
|
|
- name: Check npu and CANN info
|
|
run: |
|
|
npu-smi info
|
|
cat /usr/local/Ascend/ascend-toolkit/latest/"$(uname -i)"-linux/ascend_toolkit_install.info
|
|
|
|
- name: Config mirrors
|
|
run: |
|
|
sed -i 's|ports.ubuntu.com|mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list
|
|
pip config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
|
|
apt-get update -y
|
|
apt install git -y
|
|
git config --global url."https://gh-proxy.test.osinfra.cn/https://github.com/".insteadOf https://github.com/
|
|
|
|
- name: Checkout vllm-project/vllm-ascend repo
|
|
uses: actions/checkout@v4
|
|
|
|
- name: Install system dependencies
|
|
run: |
|
|
apt-get -y install `cat packages.txt`
|
|
apt-get -y install gcc g++ cmake libnuma-dev
|
|
|
|
- name: Checkout vllm-project/vllm repo
|
|
uses: actions/checkout@v4
|
|
with:
|
|
repository: vllm-project/vllm
|
|
ref: ${{ matrix.vllm_version }}
|
|
path: ./vllm-empty
|
|
|
|
- name: Install vllm-project/vllm from source
|
|
working-directory: ./vllm-empty
|
|
run: |
|
|
VLLM_TARGET_DEVICE=empty pip install -e .
|
|
|
|
- name: Install vllm-project/vllm-ascend
|
|
run: |
|
|
pip install -r requirements-dev.txt
|
|
pip install -v -e .
|
|
|
|
- name: Run vllm-project/vllm-ascend test for V1 Engine
|
|
env:
|
|
VLLM_USE_V1: 1
|
|
VLLM_WORKER_MULTIPROC_METHOD: spawn
|
|
run: |
|
|
if [[ "${{ matrix.os }}" == "linux-arm64-npu-1" ]]; then
|
|
VLLM_USE_MODELSCOPE=True pytest -sv tests/singlecard/test_offline_inference.py
|
|
# AscendScheduler doesn't work, fix it later
|
|
# pytest -sv tests/singlecard/tets_schedule.py
|
|
# guided decoding doesn't work, fix it later
|
|
# pytest -sv tests/singlecard/test_guided_decoding.py.py
|
|
pytest -sv tests/singlecard/ --ignore=tests/singlecard/test_offline_inference.py --ignore=tests/singlecard/test_scheduler.py --ignore=tests/singlecard/test_guided_decoding.py
|
|
else
|
|
pytest -sv tests/multicard/test_ilama_lora_tp2.py
|
|
VLLM_USE_MODELSCOPE=True pytest -sv tests/multicard/ --ignore=tests/multicard/test_ilama_lora_tp2.py
|
|
fi
|
|
|
|
- name: Run vllm-project/vllm-ascend test on V0 engine
|
|
env:
|
|
VLLM_USE_V1: 0
|
|
run: |
|
|
if [[ "${{ matrix.os }}" == "linux-arm64-npu-1" ]]; then
|
|
VLLM_USE_MODELSCOPE=True pytest -sv tests/singlecard/test_offline_inference.py
|
|
# AscendScheduler doesn't work, fix it later
|
|
# pytest -sv tests/singlecard/tets_schedule.py
|
|
# guided decoding doesn't work, fix it later
|
|
# pytest -sv tests/singlecard/test_guided_decoding.py.py
|
|
pytest -sv tests/singlecard/ --ignore=tests/singlecard/test_offline_inference.py --ignore=tests/singlecard/test_scheduler.py --ignore=tests/singlecard/test_guided_decoding.py
|
|
else
|
|
pytest -sv tests/multicard/test_ilama_lora_tp2.py
|
|
# Fixme: run VLLM_USE_MODELSCOPE=True pytest -sv tests/multicard/test_offline_inference_distributed.py will raise error.
|
|
VLLM_USE_MODELSCOPE=True pytest -sv tests/multicard/test_offline_inference_distributed.py::test_models_distributed_QwQ
|
|
VLLM_USE_MODELSCOPE=True pytest -sv tests/multicard/test_offline_inference_distributed.py::test_models_distributed_DeepSeek
|
|
VLLM_USE_MODELSCOPE=True pytest -sv tests/multicard/ --ignore=tests/multicard/test_ilama_lora_tp2.py --ignore=tests/multicard/test_offline_inference_distributed.py
|
|
fi
|