mirror of
https://github.com/pytorch/pytorch.git
synced 2025-11-01 22:14:53 +08:00
Compare commits
12 Commits
ciflow/tru
...
gh/yangw-d
| Author | SHA1 | Date | |
|---|---|---|---|
| c06dad3bd0 | |||
| 8be7774a04 | |||
| 77975bdd55 | |||
| 20820419b2 | |||
| 7136e6b927 | |||
| 68d0b8ddf9 | |||
| b26d64cec4 | |||
| 0138304ecb | |||
| 81430f9533 | |||
| 9138f8e75c | |||
| 16ff13f37b | |||
| 1d49893009 |
5
.github/ci_configs/CONFIG_TEMPLATE.yaml
vendored
Normal file
5
.github/ci_configs/CONFIG_TEMPLATE.yaml
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
# A template for a CI config file. This file is used as an example for how to define a CI config file.
|
||||
build:
|
||||
build_target: fake_build_target
|
||||
fake_env: ${ENV_VAR1}
|
||||
fake_env2: ${ENV_VAR2}
|
||||
24
.github/ci_configs/vllm.yaml
vendored
Normal file
24
.github/ci_configs/vllm.yaml
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
external_build:
|
||||
name: vllm-sm80-external-build
|
||||
build_target: vllm
|
||||
torch_whl_dir: dist
|
||||
work_directory: vllm
|
||||
artifact_dir: shared
|
||||
# if set, overrides the base image used in vllm build
|
||||
base_image: ${DOCKER_IMAGE} # replace with the env_var DOCKER_IMAGE
|
||||
# if set, replaces the vllm dockerfile.torch_nightly with the local dockerfile.
|
||||
dockerfile_path: "./.github/docker/Dockerfile.tmp_vllm"
|
||||
test:
|
||||
- preset: # this is a test preset, the preset steps runs before each test group
|
||||
- name: Basic Correctness Test # name of the test
|
||||
id: vllm_basic_correctness_test # the unique id to identify the test config, it must be unique in the ci config yml file
|
||||
env_vars: # global env vars
|
||||
- VLLM_WORKER_MULTIPROC_METHOD=spawn
|
||||
preset: # this can be bash, python etc anything you want to run to set the proper test env
|
||||
run:
|
||||
- test: pytest -v -s basic_correctness/test_cumem.py
|
||||
- test: pytest -v -s basic_correctness/test_basic_correctness.py
|
||||
- test: pytest -v -s basic_correctness/test_cpu_offload.py
|
||||
- test: pytest -v -s basic_correctness/test_preemption.py
|
||||
env_vars:
|
||||
- VLLM_TEST_ENABLE_ARTIFICIAL_PREEMPT=1 # test-level
|
||||
414
.github/docker/Dockerfile.tmp_vllm
vendored
Normal file
414
.github/docker/Dockerfile.tmp_vllm
vendored
Normal file
@ -0,0 +1,414 @@
|
||||
# TODO(elainwy): remove this file after the torch nightly dockerfile is in sync in vllm repo
|
||||
# The vLLM Dockerfile is used to construct vLLM image against torch nightly and torch main that can be directly used for testing
|
||||
|
||||
ARG CUDA_VERSION=12.8.1
|
||||
ARG PYTHON_VERSION=3.12
|
||||
|
||||
# BUILD_BASE_IMAGE: used to setup python build xformers, and vllm wheels, It can be replaced with a different base image from local machine,
|
||||
# by default, it uses the torch-nightly-base stage from this docker image
|
||||
ARG BUILD_BASE_IMAGE=torch-nightly-base
|
||||
|
||||
# FINAL_BASE_IMAGE: used to set up vllm-instaled environment and build flashinfer,
|
||||
# by default, it uses devel-ubuntu22.04 official image.
|
||||
ARG FINAL_BASE_IMAGE=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04
|
||||
|
||||
|
||||
#################### TORCH NIGHTLY BASE IMAGE ####################
|
||||
# A base image for building vLLM with devel ubuntu 22.04, this is mainly used to build vllm in vllm builtkite ci
|
||||
From nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04 as torch-nightly-base
|
||||
ARG CUDA_VERSION=12.8.1
|
||||
ARG PYTHON_VERSION=3.12
|
||||
ARG TARGETPLATFORM
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN PYTHON_VERSION_STR=$(echo ${PYTHON_VERSION} | sed 's/\.//g') && \
|
||||
echo "export PYTHON_VERSION_STR=${PYTHON_VERSION_STR}" >> /etc/environment
|
||||
|
||||
# Install Python and other dependencies if it does not existed
|
||||
RUN if ! command -v python3 >/dev/null || ! python3 --version | grep -q "${PYTHON_VERSION}"; then \
|
||||
echo "Installing Python ${PYTHON_VERSION}..." && \
|
||||
echo 'tzdata tzdata/Areas select America' | debconf-set-selections && \
|
||||
echo 'tzdata tzdata/Zones/America select Los_Angeles' | debconf-set-selections && \
|
||||
apt-get update -y && \
|
||||
apt-get install -y ccache software-properties-common git curl sudo && \
|
||||
for i in 1 2 3; do \
|
||||
add-apt-repository -y ppa:deadsnakes/ppa && break || \
|
||||
{ echo "Attempt $i failed, retrying in 5s..."; sleep 5; }; \
|
||||
done && \
|
||||
apt-get update -y && \
|
||||
apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv && \
|
||||
update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 && \
|
||||
update-alternatives --set python3 /usr/bin/python${PYTHON_VERSION} && \
|
||||
ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config && \
|
||||
curl -sS https://bootstrap.pypa.io/get-pip.py | python${PYTHON_VERSION}; \
|
||||
else \
|
||||
echo "Python ${PYTHON_VERSION} already present, skipping setup."; \
|
||||
fi \
|
||||
&& python3 --version && python3 -m pip --version
|
||||
|
||||
# Upgrade to GCC 10 to avoid https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92519
|
||||
# as it was causing spam when compiling the CUTLASS kernels
|
||||
# Ensure gcc >= 10 to avoid CUTLASS issues (bug 92519)
|
||||
RUN current_gcc_version=$(gcc -dumpversion | cut -f1 -d.) && \
|
||||
if [ "$current_gcc_version" -lt 10 ]; then \
|
||||
echo "GCC version is $current_gcc_version, installing gcc-10..."; \
|
||||
apt-get update && \
|
||||
apt-get install -y gcc-10 g++-10 && \
|
||||
update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 100 && \
|
||||
update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-10 100; \
|
||||
else \
|
||||
echo "GCC version is $current_gcc_version, no need to install gcc-10."; \
|
||||
fi && \
|
||||
gcc --version && g++ --version
|
||||
|
||||
# install uv for faster pip installs
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
python3 -m pip install uv==0.8.4
|
||||
|
||||
ENV UV_HTTP_TIMEOUT=500
|
||||
ENV UV_INDEX_STRATEGY="unsafe-best-match"
|
||||
|
||||
#################### TORCH NIGHTLY BASE IMAGE ####################
|
||||
|
||||
|
||||
#################### BASE BUILD IMAGE ####################
|
||||
# A base image for building vLLM with torch nightly or torch wheels
|
||||
# prepare basic build environment
|
||||
FROM ${BUILD_BASE_IMAGE} AS base
|
||||
USER root
|
||||
|
||||
# Workaround for https://github.com/openai/triton/issues/2507 and
|
||||
# https://github.com/pytorch/pytorch/issues/107960 -- hopefully
|
||||
# this won't be needed for future versions of this docker image
|
||||
# or future versions of triton.
|
||||
RUN ldconfig /usr/local/cuda-$(echo $CUDA_VERSION | cut -d. -f1,2)/compat/
|
||||
|
||||
# Install uv for faster pip installs if not existed
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
if ! python3 -m uv --version >/dev/null 2>&1; then \
|
||||
python3 -m pip install uv==0.8.4; \
|
||||
fi
|
||||
ENV UV_HTTP_TIMEOUT=500
|
||||
ENV UV_INDEX_STRATEGY="unsafe-best-match"
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
# install build and runtime dependencies
|
||||
COPY requirements/common.txt requirements/common.txt
|
||||
COPY use_existing_torch.py use_existing_torch.py
|
||||
COPY pyproject.toml pyproject.toml
|
||||
|
||||
# install build and runtime dependencies without stable torch version
|
||||
RUN python3 use_existing_torch.py
|
||||
|
||||
# default mount file as placeholder, this just avoid the mount error
|
||||
# change to a different vllm folder if this does not exist anymore
|
||||
ARG TORCH_WHEELS_PATH="./requirements"
|
||||
ARG PINNED_TORCH_VERSION
|
||||
|
||||
# Install torch, torchaudio and torchvision based on the input
|
||||
# if TORCH_WHEELS_PATH is default "./requirements", it will pull thethe nightly versions using pip
|
||||
# otherwise, it will use the whls from TORCH_WHEELS_PATH from the host machine
|
||||
RUN --mount=type=bind,source=${TORCH_WHEELS_PATH},target=/dist \
|
||||
--mount=type=cache,target=/root/.cache/uv \
|
||||
if [ -n "$TORCH_WHEELS_PATH" ] && [ "$TORCH_WHEELS_PATH" != "./requirements" ] && [ -d "/dist" ] && ls /dist/torch*.whl >/dev/null 2>&1; then \
|
||||
torch_whl=$(find /dist -maxdepth 1 -name 'torch-*.whl' -print -quit); \
|
||||
vision_whl=$(find /dist/vision -name 'torchvision*.whl' | head -n1 | xargs); \
|
||||
audio_whl=$(find /dist/audio -name 'torchaudio*.whl' | head -n1 | xargs); \
|
||||
uv pip install --system "${torch_whl}[opt-einsum]"; \
|
||||
uv pip install --system "${vision_whl}"; \
|
||||
uv pip install --system "${audio_whl}"; \
|
||||
elif [ -n "$PINNED_TORCH_VERSION" ]; then \
|
||||
echo "[INFO] Installing pinned torch nightly version: $PINNED_TORCH_VERSION"; \
|
||||
uv pip install --system "$PINNED_TORCH_VERSION" --index-url https://download.pytorch.org/whl/nightly/cu128; \
|
||||
else \
|
||||
echo "[INFO] Installing torch nightly with latest one"; \
|
||||
uv pip install --system torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu128; \
|
||||
fi
|
||||
|
||||
# Install numba 0.61.2 for cuda environment
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
uv pip install --system numba==0.61.2
|
||||
|
||||
# Install common dependencies from vllm common.txt
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
uv pip install --system -r requirements/common.txt
|
||||
|
||||
|
||||
# Must put before installing xformers, so it can install the correct version of xfomrers.
|
||||
ARG torch_cuda_arch_list='8.0;8.6;8.9;9.0'
|
||||
ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list}
|
||||
ARG max_jobs=16
|
||||
ENV MAX_JOBS=${max_jobs}
|
||||
|
||||
# Build xformers with cuda and torch nightly/wheel
|
||||
# following official xformers guidance: https://github.com/facebookresearch/xformers#build
|
||||
ARG XFORMERS_COMMIT=f2de641ef670510cadab099ce6954031f52f191c
|
||||
ENV CCACHE_DIR=/root/.cache/ccache
|
||||
RUN --mount=type=cache,target=/root/.cache/ccache \
|
||||
--mount=type=cache,target=/root/.cache/uv \
|
||||
echo 'git clone xformers...' \
|
||||
&& git clone https://github.com/facebookresearch/xformers.git --recursive \
|
||||
&& cd xformers \
|
||||
&& git checkout ${XFORMERS_COMMIT} \
|
||||
&& git submodule update --init --recursive \
|
||||
&& echo 'finish git clone xformers...' \
|
||||
&& rm -rf build \
|
||||
&& python3 setup.py bdist_wheel --dist-dir=../xformers-dist --verbose \
|
||||
&& cd .. \
|
||||
&& rm -rf xformers
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
uv pip install --system xformers-dist/*.whl --verbose
|
||||
|
||||
# Build can take a long time, and the torch nightly version fetched from url can be different in next docker stage.
|
||||
# track the nightly torch version used in the build, when we set up runtime environment we can make sure the version is the same
|
||||
RUN uv pip freeze | grep -i '^torch\|^torchvision\|^torchaudio' > torch_build_versions.txt
|
||||
RUN cat torch_build_versions.txt
|
||||
|
||||
RUN pip freeze | grep -E 'torch|xformers|torchvision|torchaudio'
|
||||
|
||||
#################### BASE BUILD IMAGE ####################
|
||||
|
||||
|
||||
#################### WHEEL BUILD IMAGE ####################
|
||||
# Image used to build vllm wheel
|
||||
FROM base AS build
|
||||
ARG TARGETPLATFORM
|
||||
|
||||
ENV UV_HTTP_TIMEOUT=500
|
||||
ENV UV_INDEX_STRATEGY="unsafe-best-match"
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN python3 use_existing_torch.py
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
uv pip install --system -r requirements/build.txt
|
||||
|
||||
ARG GIT_REPO_CHECK=0
|
||||
RUN --mount=type=bind,source=.git,target=.git \
|
||||
if [ "$GIT_REPO_CHECK" != "0" ]; then bash tools/check_repo.sh ; fi
|
||||
|
||||
# Max jobs used by Ninja to build extensions
|
||||
ARG max_jobs=16
|
||||
ENV MAX_JOBS=${max_jobs}
|
||||
ARG nvcc_threads=2
|
||||
ENV NVCC_THREADS=$nvcc_threads
|
||||
ARG torch_cuda_arch_list='8.0;8.6;8.9;9.0'
|
||||
ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list}
|
||||
|
||||
ARG USE_SCCACHE
|
||||
ARG SCCACHE_BUCKET_NAME=vllm-build-sccache
|
||||
ARG SCCACHE_REGION_NAME=us-west-2
|
||||
ARG SCCACHE_S3_NO_CREDENTIALS=0
|
||||
|
||||
# if USE_SCCACHE is set, use sccache to speed up compilation
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
--mount=type=bind,source=.git,target=.git \
|
||||
if [ "$USE_SCCACHE" = "1" ]; then \
|
||||
echo "Installing sccache..." \
|
||||
&& curl -L -o sccache.tar.gz https://github.com/mozilla/sccache/releases/download/v0.8.1/sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz \
|
||||
&& tar -xzf sccache.tar.gz \
|
||||
&& sudo mv sccache-v0.8.1-x86_64-unknown-linux-musl/sccache /usr/bin/sccache \
|
||||
&& rm -rf sccache.tar.gz sccache-v0.8.1-x86_64-unknown-linux-musl \
|
||||
&& export SCCACHE_BUCKET=${SCCACHE_BUCKET_NAME} \
|
||||
&& export SCCACHE_REGION=${SCCACHE_REGION_NAME} \
|
||||
&& export SCCACHE_S3_NO_CREDENTIALS=${SCCACHE_S3_NO_CREDENTIALS} \
|
||||
&& export SCCACHE_IDLE_TIMEOUT=0 \
|
||||
&& export CMAKE_BUILD_TYPE=Release \
|
||||
&& sccache --show-stats \
|
||||
&& python3 setup.py bdist_wheel --dist-dir=vllm-dist --py-limited-api=cp38 \
|
||||
&& sccache --show-stats; \
|
||||
fi
|
||||
|
||||
ENV CCACHE_DIR=/root/.cache/ccache
|
||||
RUN --mount=type=cache,target=/root/.cache/ccache \
|
||||
--mount=type=cache,target=/root/.cache/uv \
|
||||
--mount=type=bind,source=.git,target=.git \
|
||||
if [ "$USE_SCCACHE" != "1" ]; then \
|
||||
# Clean any existing CMake artifacts
|
||||
rm -rf .deps && \
|
||||
mkdir -p .deps && \
|
||||
python3 setup.py bdist_wheel --dist-dir=vllm-dist --py-limited-api=cp38; \
|
||||
fi
|
||||
|
||||
RUN echo "[DEBUG] Listing current directory:" && \
|
||||
ls -al && \
|
||||
echo "[DEBUG] Showing torch_build_versions.txt content:" && \
|
||||
cat torch_build_versions.txt
|
||||
|
||||
#################### WHEEL BUILD IMAGE ####################
|
||||
|
||||
|
||||
################### VLLM INSTALLED IMAGE ####################
|
||||
# Setup clean environment for vLLM for test and api server using ubuntu22.04 with AOT flashinfer
|
||||
FROM ${FINAL_BASE_IMAGE} AS vllm-base
|
||||
USER root
|
||||
# prepare for environment starts
|
||||
WORKDIR /workspace
|
||||
|
||||
RUN PYTHON_VERSION_STR=$(echo ${PYTHON_VERSION} | sed 's/\.//g') && \
|
||||
echo "export PYTHON_VERSION_STR=${PYTHON_VERSION_STR}" >> /etc/environment
|
||||
|
||||
# Install Python and other dependencies if it does not existed
|
||||
RUN if ! command -v python3 >/dev/null || ! python3 --version | grep -q "${PYTHON_VERSION}"; then \
|
||||
echo "Installing Python ${PYTHON_VERSION}..." && \
|
||||
echo 'tzdata tzdata/Areas select America' | debconf-set-selections && \
|
||||
echo 'tzdata tzdata/Zones/America select Los_Angeles' | debconf-set-selections && \
|
||||
apt-get update -y && \
|
||||
apt-get install -y ccache software-properties-common git curl sudo && \
|
||||
for i in 1 2 3; do \
|
||||
add-apt-repository -y ppa:deadsnakes/ppa && break || \
|
||||
{ echo "Attempt $i failed, retrying in 5s..."; sleep 5; }; \
|
||||
done && \
|
||||
apt-get update -y && \
|
||||
apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv && \
|
||||
update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 && \
|
||||
update-alternatives --set python3 /usr/bin/python${PYTHON_VERSION} && \
|
||||
ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config && \
|
||||
curl -sS https://bootstrap.pypa.io/get-pip.py | python${PYTHON_VERSION}; \
|
||||
else \
|
||||
echo "Python ${PYTHON_VERSION} already present, skipping setup."; \
|
||||
fi \
|
||||
&& python3 --version && python3 -m pip --version
|
||||
|
||||
|
||||
# Get the torch versions, and whls used in previous stagtes for consistency
|
||||
COPY --from=base /workspace/torch_build_versions.txt ./torch_build_versions.txt
|
||||
COPY --from=base /workspace/xformers-dist /wheels/xformers
|
||||
COPY --from=build /workspace/vllm-dist /wheels/vllm
|
||||
RUN echo "[DEBUG] Listing current directory before torch install step:" && \
|
||||
ls -al && \
|
||||
echo "[DEBUG] Showing torch_build_versions.txt content:" && \
|
||||
cat torch_build_versions.txt
|
||||
|
||||
# Workaround for https://github.com/openai/triton/issues/2507 and
|
||||
# https://github.com/pytorch/pytorch/issues/107960 -- hopefully
|
||||
# this won't be needed for future versions of this docker image
|
||||
# or future versions of triton.
|
||||
RUN ldconfig /usr/local/cuda-$(echo $CUDA_VERSION | cut -d. -f1,2)/compat/
|
||||
|
||||
|
||||
# Install uv for faster pip installs if not existed
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
if ! python3 -m uv --version > /dev/null 2>&1; then \
|
||||
python3 -m pip install uv==0.8.4; \
|
||||
fi
|
||||
ENV UV_HTTP_TIMEOUT=500
|
||||
ENV UV_INDEX_STRATEGY="unsafe-best-match"
|
||||
|
||||
# Default mount file as placeholder, this just avoid the mount error
|
||||
ARG TORCH_WHEELS_PATH="./requirements"
|
||||
# Install torch, torchaudio and torchvision
|
||||
# if TORCH_WHEELS_PATH is default "./requirements", it will pull the nightly versions using pip using torch_build_versions.txt
|
||||
# otherwise, it will use the whls from TORCH_WHEELS_PATH from the host machine
|
||||
RUN --mount=type=bind,source=${TORCH_WHEELS_PATH},target=/dist \
|
||||
--mount=type=cache,target=/root/.cache/uv \
|
||||
if [ -n "$TORCH_WHEELS_PATH" ] && [ "$TORCH_WHEELS_PATH" != "./requirements" ] && [ -d "/dist" ] && ls /dist/torch*.whl >/dev/null 2>&1; then \
|
||||
torch_whl=$(find /dist -maxdepth 1 -name 'torch-*.whl' -print -quit); \
|
||||
vision_whl=$(find /dist/vision -name 'torchvision*.whl' | head -n1 | xargs); \
|
||||
audio_whl=$(find /dist/audio -name 'torchaudio*.whl' | head -n1 | xargs); \
|
||||
echo "Found: '${torch_whl}' '${audio_whl}' '${vision_whl}'"; \
|
||||
uv pip install --system "${torch_whl}[opt-einsum]"; \
|
||||
uv pip install --system "${vision_whl}"; \
|
||||
uv pip install --system "${audio_whl}"; \
|
||||
else \
|
||||
echo "[INFO] Installing torch versions from torch_build_versions.txt"; \
|
||||
uv pip install --system $(cat torch_build_versions.txt | xargs) --index-url https://download.pytorch.org/whl/nightly/cu128; \
|
||||
fi
|
||||
|
||||
# Install the vllm wheel from previous stage
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
uv pip install --system /wheels/vllm/*.whl --verbose
|
||||
|
||||
# Install xformers wheel from previous stage
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
uv pip install --system /wheels/xformers/*.whl --verbose
|
||||
|
||||
|
||||
# Build flashinfer from source.
|
||||
ARG torch_cuda_arch_list='8.0;8.9;9.0a'
|
||||
# install package for build flashinfer
|
||||
# see issue: https://github.com/flashinfer-ai/flashinfer/issues/738
|
||||
|
||||
RUN pip install build==1.3.0
|
||||
RUN pip freeze | grep -E 'setuptools|packaging|build'
|
||||
|
||||
ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list}
|
||||
# Build flashinfer for torch nightly from source around 10 mins
|
||||
ARG FLASHINFER_GIT_REPO="https://github.com/flashinfer-ai/flashinfer.git"
|
||||
# Keep this in sync with https://github.com/vllm-project/vllm/blob/main/requirements/cuda.txt
|
||||
ARG FLASHINFER_GIT_REF="v0.2.9rc2"
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
git clone --depth 1 --recursive --shallow-submodules \
|
||||
--branch ${FLASHINFER_GIT_REF} \
|
||||
${FLASHINFER_GIT_REPO} flashinfer \
|
||||
&& echo "Building FlashInfer with AOT for arches: ${torch_cuda_arch_list}" \
|
||||
&& cd flashinfer \
|
||||
&& python3 -m flashinfer.aot \
|
||||
&& python3 -m build --no-isolation --wheel --outdir ../wheels/flashinfer \
|
||||
&& cd .. \
|
||||
&& rm -rf flashinfer
|
||||
|
||||
# install flashinfer python
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
uv pip install --system wheels/flashinfer/*.whl --verbose
|
||||
|
||||
# Logging to confirm the torch versions
|
||||
RUN pip freeze | grep -E 'torch|xformers|vllm|flashinfer'
|
||||
################### VLLM INSTALLED IMAGE ####################
|
||||
|
||||
|
||||
#################### UNITTEST IMAGE #############################
|
||||
FROM vllm-base as test
|
||||
|
||||
ENV UV_HTTP_TIMEOUT=500
|
||||
ENV UV_INDEX_STRATEGY="unsafe-best-match"
|
||||
|
||||
COPY tests/ tests/
|
||||
COPY examples examples
|
||||
COPY benchmarks benchmarks
|
||||
COPY ./vllm/collect_env.py .
|
||||
COPY requirements/common.txt requirements/common.txt
|
||||
COPY use_existing_torch.py use_existing_torch.py
|
||||
COPY pyproject.toml pyproject.toml
|
||||
# Install build and runtime dependencies without stable torch version
|
||||
COPY requirements/nightly_torch_test.txt requirements/nightly_torch_test.txt
|
||||
|
||||
RUN python3 use_existing_torch.py
|
||||
|
||||
# install packages
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
uv pip install --system -r requirements/common.txt
|
||||
# enable fast downloads from hf (for testing)
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
uv pip install --system hf_transfer
|
||||
ENV HF_HUB_ENABLE_HF_TRANSFER 1
|
||||
|
||||
# install development dependencies (for testing)
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
uv pip install --system -e tests/vllm_test_utils
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
uv pip install --system -r requirements/nightly_torch_test.txt
|
||||
|
||||
# Workaround for #17068
|
||||
# pinned commit for v2.2.4
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
uv pip install --system --no-build-isolation "git+https://github.com/state-spaces/mamba@95d8aba8a8c75aedcaa6143713b11e745e7cd0d9#egg=mamba-ssm"
|
||||
|
||||
# Logging to confirm the torch versions
|
||||
RUN pip freeze | grep -E 'torch|xformers|vllm|flashinfer'
|
||||
|
||||
# Logging to confirm all the packages are installed
|
||||
RUN pip freeze
|
||||
|
||||
#################### UNITTEST IMAGE #############################
|
||||
|
||||
#################### EXPORT STAGE ####################
|
||||
FROM scratch as export-wheels
|
||||
|
||||
# Just copy the wheels we prepared in previous stages
|
||||
COPY --from=base /workspace/xformers-dist /wheels/xformers
|
||||
COPY --from=build /workspace/vllm-dist /wheels/vllm
|
||||
COPY --from=vllm-base /workspace/wheels/flashinfer /wheels/flashinfer-python
|
||||
1
.github/pytorch-probot.yml
vendored
1
.github/pytorch-probot.yml
vendored
@ -26,6 +26,7 @@ ciflow_push_tags:
|
||||
- ciflow/trunk
|
||||
- ciflow/unstable
|
||||
- ciflow/xpu
|
||||
- ciflow/vllm
|
||||
- ciflow/torchbench
|
||||
- ciflow/op-benchmark
|
||||
- ciflow/pull
|
||||
|
||||
323
.github/workflows/_linux-external-build-main.yml
vendored
Normal file
323
.github/workflows/_linux-external-build-main.yml
vendored
Normal file
@ -0,0 +1,323 @@
|
||||
name: linux-external-build
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
build-environment:
|
||||
required: true
|
||||
type: string
|
||||
description: Top-level label for what's being built/tested.
|
||||
build-target:
|
||||
required: true
|
||||
type: string
|
||||
description: target library to build
|
||||
ci-config:
|
||||
required: false
|
||||
type: string
|
||||
description: CI config to use for the build and test.
|
||||
use-gha:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
description: If set to any value, upload to GHA. Otherwise upload to S3.
|
||||
build-generates-artifacts:
|
||||
required: false
|
||||
type: boolean
|
||||
default: true
|
||||
description: If set, upload generated build artifacts.
|
||||
artifacts-folder-name:
|
||||
required: false
|
||||
type: string
|
||||
description: must be different from build-environment
|
||||
default: ""
|
||||
docker-image:
|
||||
required: true
|
||||
type: string
|
||||
description: Docker image to run in or replace the external base image.
|
||||
cuda-arch-list:
|
||||
required: false
|
||||
type: string
|
||||
default: "8.9"
|
||||
description: |
|
||||
List of CUDA architectures CI build should target.
|
||||
max_jobs:
|
||||
required: false
|
||||
type: number
|
||||
description:
|
||||
Maximum number of jobs to run the external build
|
||||
default: 16
|
||||
runner_prefix:
|
||||
required: false
|
||||
default: ""
|
||||
type: string
|
||||
description: Prefix for runner label
|
||||
runner:
|
||||
required: false
|
||||
type: string
|
||||
default: "linux.2xlarge"
|
||||
description: |
|
||||
Label of the runner this job should run on.
|
||||
s3-bucket:
|
||||
description: S3 bucket to download artifact
|
||||
required: false
|
||||
type: string
|
||||
default: "gha-artifacts"
|
||||
aws-role-to-assume:
|
||||
description: Role to assume for downloading artifacts
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
disable-monitor:
|
||||
description: |
|
||||
Disable utilization monitoring for build job
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
monitor-log-interval:
|
||||
description: |
|
||||
Set the interval for the monitor script to log utilization.
|
||||
required: false
|
||||
type: number
|
||||
default: 5
|
||||
monitor-data-collect-interval:
|
||||
description: |
|
||||
Set the interval for the monitor script to collect data.
|
||||
required: false
|
||||
type: number
|
||||
default: 1
|
||||
build-additional-packages:
|
||||
description: |
|
||||
If set, the build job will also builds these packages and saves their
|
||||
wheels as artifacts
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
secrets:
|
||||
HUGGING_FACE_HUB_TOKEN:
|
||||
required: false
|
||||
description: |
|
||||
HF Auth token to avoid rate limits when downloading models or datasets from hub
|
||||
SCRIBE_GRAPHQL_ACCESS_TOKEN:
|
||||
required: false
|
||||
description: |
|
||||
FB app token to write to scribe endpoint
|
||||
|
||||
jobs:
|
||||
build-external-lib:
|
||||
environment: ${{ github.ref == 'refs/heads/main' && 'scribe-protected' || startsWith(github.ref, 'refs/heads/release/') && 'scribe-protected' || contains(github.event.pull_request.labels.*.name, 'ci-scribe') && 'scribe-pr' || '' }}
|
||||
# Don't run on forked repos
|
||||
if: github.repository_owner == 'pytorch'
|
||||
runs-on: ${{ inputs.runner_prefix}}${{ inputs.runner }}
|
||||
timeout-minutes: 240
|
||||
steps:
|
||||
- name: Setup SSH (Click me for login details)
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
instructions: |
|
||||
Build is done inside the container, to start an interactive session run:
|
||||
docker exec -it $(docker container ps --format '{{.ID}}') bash
|
||||
|
||||
# [pytorch repo ref]
|
||||
# Use a pytorch/pytorch reference instead of a reference to the local
|
||||
# checkout because when we run this action we don't *have* a local
|
||||
# checkout. In other cases you should prefer a local checkout.
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
with:
|
||||
no-sudo: true
|
||||
|
||||
- name: Get workflow job id
|
||||
id: get-job-id
|
||||
uses: ./.github/actions/get-workflow-job-id
|
||||
if: always()
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: configure aws credentials
|
||||
uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722 # v4.1.0
|
||||
if: ${{ inputs.aws-role-to-assume != ''}}
|
||||
with:
|
||||
role-to-assume: ${{ inputs.aws-role-to-assume }}
|
||||
role-session-name: gha-linux-build
|
||||
aws-region: us-east-1
|
||||
|
||||
- name: Setup Linux
|
||||
uses: ./.github/actions/setup-linux
|
||||
if: inputs.build-environment != 'linux-s390x-binary-manywheel'
|
||||
|
||||
- name: Login to Amazon ECR
|
||||
if: ${{ inputs.aws-role-to-assume != ''}}
|
||||
id: login-ecr
|
||||
continue-on-error: true
|
||||
uses: aws-actions/amazon-ecr-login@062b18b96a7aff071d4dc91bc00c4c1a7945b076 # v2.0.1
|
||||
|
||||
- name: Parse ref
|
||||
id: parse-ref
|
||||
run: .github/scripts/parse_ref.py
|
||||
|
||||
- name: Start monitoring script
|
||||
id: monitor-script
|
||||
if: ${{ !inputs.disable-monitor }}
|
||||
shell: bash
|
||||
continue-on-error: true
|
||||
env:
|
||||
JOB_ID: ${{ steps.get-job-id.outputs.job-id }}
|
||||
JOB_NAME: ${{ steps.get-job-id.outputs.job-name }}
|
||||
WORKFLOW_NAME: ${{ github.workflow }}
|
||||
WORKFLOW_RUN_ID: ${{github.run_id}}
|
||||
MONITOR_LOG_INTERVAL: ${{ inputs.monitor-log-interval }}
|
||||
MONITOR_DATA_COLLECT_INTERVAL: ${{ inputs.monitor-data-collect-interval }}
|
||||
run: |
|
||||
mkdir -p ../../usage_logs
|
||||
python3 -m pip install psutil==5.9.8 dataclasses_json==0.6.7
|
||||
python3 -m tools.stats.monitor \
|
||||
--log-interval "$MONITOR_LOG_INTERVAL" \
|
||||
--data-collect-interval "$MONITOR_DATA_COLLECT_INTERVAL" \
|
||||
> "../../usage_logs/usage_log_build_${JOB_ID}.txt" 2>&1 &
|
||||
echo "monitor-script-pid=${!}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: Calculate docker image
|
||||
id: calculate-docker-image
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
if: inputs.build-environment != 'linux-s390x-binary-manywheel'
|
||||
with:
|
||||
docker-image-name: ${{ inputs.docker-image }}
|
||||
|
||||
- name: Use following to pull public copy of the image
|
||||
id: print-ghcr-mirror
|
||||
if: inputs.build-environment != 'linux-s390x-binary-manywheel'
|
||||
env:
|
||||
ECR_DOCKER_IMAGE: ${{ steps.calculate-docker-image.outputs.docker-image }}
|
||||
shell: bash
|
||||
run: |
|
||||
tag=${ECR_DOCKER_IMAGE##*:}
|
||||
echo "docker pull ghcr.io/pytorch/ci-image:${tag/:/-}"
|
||||
|
||||
- name: Pull docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
with:
|
||||
docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
|
||||
|
||||
- name: Download pytorch build artifacts
|
||||
uses: ./.github/actions/download-build-artifacts
|
||||
with:
|
||||
name: ${{ inputs.build-environment }}
|
||||
s3-bucket: ${{ inputs.s3-bucket }}
|
||||
use-gha: ${{ inputs.use-gha }}
|
||||
|
||||
- name: Download TD artifacts
|
||||
continue-on-error: true
|
||||
uses: ./.github/actions/download-td-artifacts
|
||||
|
||||
- name: Calculate Max Jobs
|
||||
id: set-max-jobs
|
||||
run: |
|
||||
if [[ -n "${{ inputs.max_jobs }}" ]]; then
|
||||
echo "Using input max_jobs: ${{ inputs.max_jobs }}"
|
||||
echo "MAX_JOBS=${{ inputs.max_jobs }}" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
DEFAULT_JOBS=$(nproc --ignore=6)
|
||||
echo "Fallback to nproc: $DEFAULT_JOBS"
|
||||
echo "MAX_JOBS=$DEFAULT_JOBS" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
- name: Build external project
|
||||
id: build
|
||||
env:
|
||||
BUILD_ENVIRONMENT: ${{ inputs.build-environment }}
|
||||
BRANCH: ${{ steps.parse-ref.outputs.branch }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
# Do not set SCCACHE_S3_KEY_PREFIX to share the cache between all build jobs
|
||||
SCCACHE_BUCKET: ossci-compiler-cache-circleci-v2
|
||||
SCCACHE_REGION: us-east-1
|
||||
PR_LABELS: ${{ toJson(github.event.pull_request.labels.*.name) }}
|
||||
TORCH_CUDA_ARCH_LIST: ${{ inputs.cuda-arch-list }}
|
||||
OUR_GITHUB_JOB_ID: ${{ steps.get-job-id.outputs.job-id }}
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
||||
MAX_JOBS: ${{ steps.set-max-jobs.outputs.MAX_JOBS }}
|
||||
DOCKER_IMAGE: ${{ inputs.docker-image }}
|
||||
BUILD_TARGET: ${{ inputs.build-target }}
|
||||
CI_CONFIG: ${{ inputs.ci-config }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
python3 --version
|
||||
docker images
|
||||
START_TIME=$(date +%s)
|
||||
(
|
||||
cd scripts/lumen_cli
|
||||
python3 -m pip install -e .
|
||||
)
|
||||
python3 -m cli.run --config "$CI_CONFIG" build external "$BUILD_TARGET"
|
||||
END_TIME=$(date +%s)
|
||||
echo "build_time=$((END_TIME - START_TIME))" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Archive artifacts into zip
|
||||
if: ${{ inputs.build-generates-artifacts && steps.build.outcome && steps.build.outcome != 'skipped'}}
|
||||
run: |
|
||||
zip -1 -r artifacts.zip shared/
|
||||
|
||||
# By default it will upload the artifacts to <github_org>/<github_repo>/<workflow_id>/<name>-<target>-additional-build/
|
||||
# to avoid override the pytorch build artifacts
|
||||
- name: Store External Build Artifacts on S3
|
||||
if: ${{ inputs.build-generates-artifacts }}
|
||||
uses: seemethere/upload-artifact-s3@baba72d0712b404f646cebe0730933554ebce96a # v5.1.0
|
||||
with:
|
||||
name: ${{ inputs.artifacts-folder-name || format('{0}-{1}-additional-build', inputs.build-environment, inputs.build-target) }}
|
||||
retention-days: 14
|
||||
if-no-files-found: warn
|
||||
path: artifacts.zip
|
||||
s3-bucket: ${{ inputs.s3-bucket }}
|
||||
|
||||
- name: Stop monitoring script
|
||||
if: ${{ always() && steps.monitor-script.outputs.monitor-script-pid }}
|
||||
shell: bash
|
||||
continue-on-error: true
|
||||
env:
|
||||
MONITOR_SCRIPT_PID: ${{ steps.monitor-script.outputs.monitor-script-pid }}
|
||||
run: |
|
||||
kill "$MONITOR_SCRIPT_PID"
|
||||
|
||||
- name: Copy logs
|
||||
shell: bash
|
||||
if: ${{ always() && steps.build.outcome != 'skipped' && !inputs.disable-monitor && inputs.build-environment != 'linux-s390x-binary-manywheel'}}
|
||||
continue-on-error: true
|
||||
run: |
|
||||
rm -f ./usage_logs
|
||||
mkdir -p ./usage_logs
|
||||
cp ../../usage_logs/usage_log_build_*.txt ./usage_logs/
|
||||
|
||||
- name: Upload raw usage log to s3
|
||||
if: ${{ always() && steps.build.outcome != 'skipped' && !inputs.disable-monitor && inputs.build-environment != 'linux-s390x-binary-manywheel'}}
|
||||
uses: seemethere/upload-artifact-s3@v5
|
||||
with:
|
||||
s3-prefix: |
|
||||
${{ github.repository }}/${{ github.run_id }}/${{ github.run_attempt }}/artifact
|
||||
retention-days: 14
|
||||
if-no-files-found: warn
|
||||
path: usage_logs/usage_log_build_*.txt
|
||||
|
||||
- name: Upload utilization stats
|
||||
if: ${{ always() && steps.build.outcome != 'skipped' && !inputs.disable-monitor && inputs.build-environment != 'linux-s390x-binary-manywheel' }}
|
||||
continue-on-error: true
|
||||
uses: ./.github/actions/upload-utilization-stats
|
||||
with:
|
||||
job_id: ${{ steps.get-job-id.outputs.job-id }}
|
||||
job_name: ${{ steps.get-job-id.outputs.job-name }}
|
||||
workflow_name: ${{ github.workflow }}
|
||||
workflow_run_id: ${{github.run_id}}
|
||||
workflow_attempt: ${{github.run_attempt}}
|
||||
artifact_prefix: usage_log_build_${{ steps.get-job-id.outputs.job-id }}
|
||||
|
||||
- name: Teardown Linux
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@main
|
||||
if: always() && inputs.build-environment != 'linux-s390x-binary-manywheel'
|
||||
|
||||
- name: Cleanup docker
|
||||
if: always() && inputs.build-environment == 'linux-s390x-binary-manywheel'
|
||||
shell: bash
|
||||
run: |
|
||||
# on s390x stop the container for clean worker stop
|
||||
docker stop -a || true
|
||||
docker kill -a || true
|
||||
42
.github/workflows/tools-unit-tests.yml
vendored
Normal file
42
.github/workflows/tools-unit-tests.yml
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
name: test-scripts-and-ci-tools
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- scripts/lumen_cli/**
|
||||
- .github/workflows/tools-unit-tests.yml
|
||||
pull_request:
|
||||
paths:
|
||||
- scripts/lumen_cli/**
|
||||
- .github/workflows/tools-unit-tests.yml
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
torch-cli-unit-tests:
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout pytorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
with:
|
||||
submodules: true
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Run tests
|
||||
continue-on-error: true
|
||||
run: |
|
||||
set -ex
|
||||
python3 -m venv /tmp/venv
|
||||
source /tmp/venv/bin/activate
|
||||
python -m pip install --upgrade pip
|
||||
pip install pytest==7.3.2
|
||||
pip install -e scripts/lumen_cli
|
||||
pytest -v -s scripts/lumen_cli/tests
|
||||
61
.github/workflows/vllm.yml
vendored
Normal file
61
.github/workflows/vllm.yml
vendored
Normal file
@ -0,0 +1,61 @@
|
||||
name: vllm-test
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- ciflow/vllm/*
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
get-label-type:
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }}
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
curr_branch: ${{ github.head_ref || github.ref_name }}
|
||||
curr_ref_type: ${{ github.ref_type }}
|
||||
opt_out_experiments: lf
|
||||
|
||||
torch-build-sm89:
|
||||
name: sm89-vllm-test
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
build-additional-packages: "vision audio torchao"
|
||||
build-environment: linux-jammy-cuda12.8-py3.12-gcc11-sm89
|
||||
docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3.12-gcc11-vllm
|
||||
cuda-arch-list: '8.9'
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "vllm_basic_correctness_test", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g6.4xlarge.experimental.nvidia.gpu" },
|
||||
{ config: "vllm_basic_models_test", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g6.4xlarge.experimental.nvidia.gpu" },
|
||||
]}
|
||||
secrets: inherit
|
||||
|
||||
vllm-build-sm89:
|
||||
name: sm89-vllm-test
|
||||
uses: ./.github/workflows/_linux-external-build-main.yml
|
||||
needs: [
|
||||
get-label-type,
|
||||
torch-build-sm89
|
||||
]
|
||||
with:
|
||||
build-additional-packages: "vision audio"
|
||||
build-environment: linux-jammy-cuda12.8-py3.12-gcc11-sm89
|
||||
build-target: vllm
|
||||
ci-config: ".github/ci_configs/vllm.yaml"
|
||||
runner_prefix: ${{ needs.get-label-type.outputs.label-type }}
|
||||
docker-image: ${{ needs.torch-build-sm89.outputs.docker-image }}
|
||||
cuda-arch-list: '8.9'
|
||||
runner: linux.24xlarge.memory
|
||||
secrets: inherit
|
||||
37
scripts/lumen_cli/README.md
Normal file
37
scripts/lumen_cli/README.md
Normal file
@ -0,0 +1,37 @@
|
||||
# 🔧 Lumos_cli
|
||||
A Python CLI tool for building and testing PyTorch-based components, using a YAML configuration file for structured, repeatable workflows.
|
||||
|
||||
|
||||
## Features
|
||||
- **Build**
|
||||
- external projects (e.g. vLLM)
|
||||
|
||||
## 📦 Installation
|
||||
at the root of the pytorch repo
|
||||
```bash
|
||||
pip install -e scripts/lumos_cli
|
||||
```
|
||||
|
||||
## Run the cli tool
|
||||
The cli tool must be used at root of pytorch repo, as example to run build external vllm:
|
||||
```bash
|
||||
python -m cli.run build external vllm
|
||||
```
|
||||
this will run the build steps with default behaviour for vllm project.
|
||||
|
||||
with config file, we store the ci configs in .github/ci_configs:
|
||||
```bash
|
||||
python3 -m cli.run --config ".github/ci_configs/CONFIG_TEMPLATE.yaml" build external vllm
|
||||
```
|
||||
this will run the build steps defined in the config file for vllm project
|
||||
|
||||
to see help messages, run
|
||||
```bash
|
||||
python3 -m cli.run --help
|
||||
```
|
||||
|
||||
## Add customized external build logics
|
||||
To add a new external build, for instance, add a new external build logics:
|
||||
1. create the build function in cli/lib folder
|
||||
2. register your target and the main build function at EXTERNAL_BUILD_TARGET_DISPATCH in `cli/build_cli/register_build.py`
|
||||
3. [optional] create your ci config file in .github/ci_configs/${EXTERNAL_PACKAGE_NAME}.yaml
|
||||
0
scripts/lumen_cli/cli/build_cli/__init__.py
Normal file
0
scripts/lumen_cli/cli/build_cli/__init__.py
Normal file
43
scripts/lumen_cli/cli/build_cli/register_build.py
Normal file
43
scripts/lumen_cli/cli/build_cli/register_build.py
Normal file
@ -0,0 +1,43 @@
|
||||
import logging
|
||||
|
||||
from cli.lib.core.vllm import VllmBuildRunner
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def register_build_commands(subparsers):
|
||||
"""
|
||||
register build commands, this is a subcommand of lumos_cli
|
||||
"""
|
||||
build_parser = subparsers.add_parser("build", help="Build related commands")
|
||||
build_subparsers = build_parser.add_subparsers(dest="build_command")
|
||||
|
||||
register_build_external_commands(build_subparsers)
|
||||
|
||||
|
||||
def register_build_external_commands(subparsers):
|
||||
"""
|
||||
register build external commands, this is a subcommand of build
|
||||
"""
|
||||
external_parser = subparsers.add_parser("external", help="Build external targets")
|
||||
external_parser.add_argument(
|
||||
"target", help="Name of the external target to build (e.g., vllm)"
|
||||
)
|
||||
external_parser.set_defaults(func=run_build_external)
|
||||
|
||||
|
||||
# Mappings to build external targets
|
||||
# add new build external targets here
|
||||
EXTERNAL_BUILD_TARGET_DISPATCH = {
|
||||
"vllm": lambda args: VllmBuildRunner(config_path=args.config),
|
||||
}
|
||||
|
||||
|
||||
def run_build_external(args):
|
||||
target = args.target
|
||||
print(f"Running external build for target: {args.target}")
|
||||
print(args.config)
|
||||
if target not in EXTERNAL_BUILD_TARGET_DISPATCH:
|
||||
raise ValueError(f"Unknown build target: {target}")
|
||||
EXTERNAL_BUILD_TARGET_DISPATCH[target](args).run()
|
||||
0
scripts/lumen_cli/cli/lib/__init__.py
Normal file
0
scripts/lumen_cli/cli/lib/__init__.py
Normal file
170
scripts/lumen_cli/cli/lib/common/file_utils.py
Normal file
170
scripts/lumen_cli/cli/lib/common/file_utils.py
Normal file
@ -0,0 +1,170 @@
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
|
||||
import yaml
|
||||
from cli.lib.common.utils import run_cmd
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def force_create_dir(path: str):
|
||||
"""
|
||||
Ensures that the given directory path is freshly created.
|
||||
|
||||
If the directory already exists, it will be removed along with all its contents.
|
||||
Then a new, empty directory will be created at the same path.
|
||||
"""
|
||||
remove_dir(path)
|
||||
ensure_dir_exists(path)
|
||||
|
||||
|
||||
def ensure_dir_exists(path: str):
|
||||
"""
|
||||
Ensure the directory exists. Create it if it doesn't exist.
|
||||
"""
|
||||
if not os.path.exists(path):
|
||||
logger.info(f"[INFO] Creating directory: {path}")
|
||||
os.makedirs(path, exist_ok=True)
|
||||
else:
|
||||
logger.info(f"Directory already exists: {path}")
|
||||
|
||||
|
||||
def remove_dir(path: str):
|
||||
"""
|
||||
Remove a directory if it exists.
|
||||
"""
|
||||
if os.path.exists(path):
|
||||
logger.info(f"Removing directory: {path}")
|
||||
shutil.rmtree(path)
|
||||
else:
|
||||
logger.info(f"Directory not found (skipped): {path}")
|
||||
|
||||
|
||||
def get_abs_path(path: str):
|
||||
"""
|
||||
Get the absolute path of the given path.
|
||||
"""
|
||||
if not path:
|
||||
return ""
|
||||
return os.path.abspath(path)
|
||||
|
||||
|
||||
def get_existing_abs_path(path: str) -> str:
|
||||
"""
|
||||
Get and validate the absolute path of the given path.
|
||||
Raises an exception if the path does not exist.
|
||||
"""
|
||||
|
||||
path = get_abs_path(path)
|
||||
if is_path_exist(path):
|
||||
raise FileNotFoundError(f"Path does not exist: {path}")
|
||||
return path
|
||||
|
||||
|
||||
def is_path_exist(path: str) -> bool:
|
||||
"""
|
||||
Check if a path exists.
|
||||
"""
|
||||
if not path:
|
||||
return False
|
||||
return os.path.exists(path)
|
||||
|
||||
|
||||
def read_yaml_file(file_path: str) -> dict:
|
||||
"""
|
||||
Read a YAML file with environment variable substitution.
|
||||
|
||||
Supports replacing environment variables in the form of $VAR or ${VAR}.
|
||||
Logs any missing variables and removes unresolved placeholders.
|
||||
|
||||
Args
|
||||
- file_path[str]: Local Path to the YAML file
|
||||
|
||||
Returns:
|
||||
- dict[optionally]: Parsed YAML content as a dictionary.
|
||||
|
||||
Raises:
|
||||
- FileNotFoundError: If the file does not exist.
|
||||
- ValueError: If the YAML content is invalid or not a dictionary.
|
||||
- RuntimeError: For other unexpected errors during parsing.
|
||||
"""
|
||||
p = get_abs_path(file_path)
|
||||
|
||||
if not os.path.exists(p):
|
||||
raise FileNotFoundError(f"YAML file not found: {file_path}")
|
||||
|
||||
try:
|
||||
with open(p, "r", encoding="utf-8") as f:
|
||||
raw_content = f.read()
|
||||
|
||||
# Find all $VAR and ${VAR}
|
||||
pattern = re.compile(r"\$(\w+)|\$\{([^}]+)\}")
|
||||
missing_vars = set()
|
||||
|
||||
expanded_content = os.path.expandvars(raw_content)
|
||||
|
||||
# Then remove any remaining unresolved $VAR or ${VAR} patterns
|
||||
for match in pattern.finditer(expanded_content):
|
||||
if match.group(1):
|
||||
missing_vars.add(match.group(1))
|
||||
else:
|
||||
missing_vars.add(match.group(2))
|
||||
if missing_vars:
|
||||
logger.warning(f"Missing environment variables: {', '.join(missing_vars)}")
|
||||
|
||||
# remove $VAR or ${VAR} if it does not exist in the yml file
|
||||
cleaned = re.sub(r"\$(\w+)|\$\{[^}]+\}", "", expanded_content)
|
||||
# remove the env_var holder ${ENV_VAR} if it does not exist in the env
|
||||
data = yaml.safe_load(cleaned)
|
||||
if data is None:
|
||||
return {}
|
||||
if not isinstance(data, dict):
|
||||
raise ValueError(
|
||||
f"YAML content must be a dictionary, got {type(data).__name__}"
|
||||
)
|
||||
return data
|
||||
except yaml.YAMLError as e:
|
||||
raise ValueError(f"Failed to parse YAML file '{file_path}': {e}") from e
|
||||
except ValueError as e:
|
||||
raise ValueError(f"Failed to parse YAML file '{file_path}': {e}") from e
|
||||
except Exception as e:
|
||||
raise RuntimeError(
|
||||
f"Unexpected error while reading YAML file '{file_path}': {e}"
|
||||
) from e
|
||||
|
||||
|
||||
def local_image_exists(image_name: str):
|
||||
"""
|
||||
Check if a local Docker image exists.
|
||||
image name format: <image_name>:<tag>
|
||||
"""
|
||||
try:
|
||||
run_cmd(f"docker image inspect {image_name}", log_cmd=False)
|
||||
return True
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
|
||||
|
||||
def clone_external_repo(target: str, repo: str, cwd: str):
|
||||
logger.info(f"cloning {target}....")
|
||||
commit = get_post_build_pinned_commit(target)
|
||||
|
||||
# delete the directory if it exists
|
||||
remove_dir(cwd)
|
||||
|
||||
# Clone the repo & checkout commit
|
||||
run_cmd(f"git clone {repo}")
|
||||
run_cmd(f"git checkout {commit}", cwd=cwd)
|
||||
run_cmd("git submodule update --init --recursive", cwd=cwd)
|
||||
|
||||
|
||||
def get_post_build_pinned_commit(name: str, prefix=".github/ci_commit_pins") -> str:
|
||||
path = Path(prefix) / f"{name}.txt"
|
||||
if not path.exists():
|
||||
raise FileNotFoundError(f"Pin file not found: {path}")
|
||||
return path.read_text(encoding="utf-8").strip()
|
||||
10
scripts/lumen_cli/cli/lib/common/logger.py
Normal file
10
scripts/lumen_cli/cli/lib/common/logger.py
Normal file
@ -0,0 +1,10 @@
|
||||
import logging
|
||||
import sys
|
||||
|
||||
|
||||
def setup_logging(level: int = logging.INFO):
|
||||
logging.basicConfig(
|
||||
level=level,
|
||||
format="%(asctime)s [%(levelname)s] %(name)s: %(message)s",
|
||||
stream=sys.stdout,
|
||||
)
|
||||
117
scripts/lumen_cli/cli/lib/common/utils.py
Normal file
117
scripts/lumen_cli/cli/lib/common/utils.py
Normal file
@ -0,0 +1,117 @@
|
||||
import logging
|
||||
import os
|
||||
import shlex
|
||||
import subprocess
|
||||
import sys
|
||||
from dataclasses import fields
|
||||
from textwrap import indent
|
||||
from typing import Optional
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def generate_dataclass_help(cls) -> str:
|
||||
"""Auto-generate help text for dataclass default values."""
|
||||
lines = []
|
||||
for field in fields(cls):
|
||||
default = field.default
|
||||
if default is not None and default != "":
|
||||
lines.append(f"{field.name:<22} = {repr(default)}")
|
||||
else:
|
||||
lines.append(f"{field.name:<22} = ''")
|
||||
return indent("\n".join(lines), " ")
|
||||
|
||||
|
||||
def run_shell(
|
||||
cmd: str,
|
||||
log_cmd: bool = True,
|
||||
cwd: Optional[str] = None,
|
||||
env: Optional[dict] = None,
|
||||
):
|
||||
"""
|
||||
Run a shell command using /bin/bash.
|
||||
|
||||
Args:
|
||||
cmd (str): The command string to execute.
|
||||
log_cmd (bool): Whether to log the command before execution.
|
||||
cwd (Optional[str]): Working directory to run the command in.
|
||||
env (Optional[dict]): Environment variables to set during execution.
|
||||
|
||||
Raises:
|
||||
subprocess.CalledProcessError: If the command fails.
|
||||
"""
|
||||
if log_cmd:
|
||||
logger.info(f"[shell] {cmd}")
|
||||
try:
|
||||
subprocess.run(
|
||||
cmd,
|
||||
shell=True,
|
||||
executable="/bin/bash",
|
||||
stdout=sys.stdout,
|
||||
stderr=sys.stderr,
|
||||
check=True,
|
||||
env=env,
|
||||
cwd=cwd,
|
||||
)
|
||||
except subprocess.CalledProcessError as e:
|
||||
logger.error(
|
||||
f"[shell] Command failed.\n"
|
||||
f"Command: {cmd}\n"
|
||||
f"Exit code: {e.returncode}\n"
|
||||
f"STDOUT:\n{getattr(e, 'stdout', '')}\n"
|
||||
f"STDERR:\n{getattr(e, 'stderr', '')}"
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
def run_cmd(
|
||||
cmd: str,
|
||||
log_cmd: bool = True,
|
||||
cwd: Optional[str] = None,
|
||||
env: Optional[dict] = None,
|
||||
):
|
||||
"""
|
||||
Run a command using subprocess with shell=False (i.e., direct exec).
|
||||
This only works for commands that are not shell builtins. It is recommended
|
||||
to use this method rather than run_shell().
|
||||
|
||||
Args:
|
||||
cmd (str): The command string to execute (will be split using shlex).
|
||||
log_cmd (bool): Whether to log the command before execution.
|
||||
cwd (Optional[str]): Working directory to run the command in.
|
||||
env (Optional[dict]): Environment variables to set during execution.
|
||||
|
||||
Raises:
|
||||
subprocess.CalledProcessError: If the command fails.
|
||||
"""
|
||||
args = shlex.split(cmd)
|
||||
|
||||
if log_cmd:
|
||||
logger.info(f"[cmd] {' '.join(args)}")
|
||||
try:
|
||||
subprocess.run(
|
||||
args,
|
||||
shell=False,
|
||||
stdout=sys.stdout,
|
||||
stderr=sys.stderr,
|
||||
check=True,
|
||||
env=env,
|
||||
cwd=cwd,
|
||||
)
|
||||
except subprocess.CalledProcessError as e:
|
||||
logger.error(
|
||||
f"[cmd] Command failed.\n"
|
||||
f"Command: {cmd}\n"
|
||||
f"Exit code: {e.returncode}\n"
|
||||
f"STDOUT:\n{getattr(e, 'stdout', '')}\n"
|
||||
f"STDERR:\n{getattr(e, 'stderr', '')}"
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
def get_env(name: str, default: str = "") -> str:
|
||||
"""
|
||||
Get an environment variable with a default fallback.
|
||||
"""
|
||||
return os.environ.get(name, default)
|
||||
193
scripts/lumen_cli/cli/lib/core/vllm.py
Normal file
193
scripts/lumen_cli/cli/lib/core/vllm.py
Normal file
@ -0,0 +1,193 @@
|
||||
import logging
|
||||
import os
|
||||
import textwrap
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
from cli.lib.type.build import BuildRunner, LinuxExternalBuildBaseConfig
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
from cli.lib.common.file_utils import (
|
||||
clone_external_repo,
|
||||
ensure_dir_exists,
|
||||
force_create_dir,
|
||||
get_abs_path,
|
||||
is_path_exist,
|
||||
local_image_exists,
|
||||
)
|
||||
from cli.lib.common.utils import get_env, run_cmd
|
||||
|
||||
|
||||
# default path for docker build artifacts
|
||||
_DEFAULT_RESULT_PATH = "./shared"
|
||||
|
||||
# temp folder in vllm to cp torch whls in vllm work directory for docker build
|
||||
_VLLM_TEMP_FOLDER = "tmp"
|
||||
|
||||
|
||||
@dataclass
|
||||
class VllmBuildConfig(LinuxExternalBuildBaseConfig):
|
||||
"""
|
||||
Configuration specific to vLLM build jobs.
|
||||
"""
|
||||
|
||||
artifact_dir: str = ""
|
||||
torch_whl_dir: str = ""
|
||||
base_image: str = ""
|
||||
dockerfile_path: str = ""
|
||||
target: str = field(default_factory=lambda: get_env("TARGET", "export-wheels"))
|
||||
tag_name: str = field(default_factory=lambda: get_env("TAG", "vllm-wheels"))
|
||||
|
||||
|
||||
class VllmBuildRunner(BuildRunner):
|
||||
def __init__(self, config_path: str = ""):
|
||||
super().__init__(config_path)
|
||||
self.cfg = VllmBuildConfig()
|
||||
self.work_directory = "vllm"
|
||||
|
||||
def prepare(self):
|
||||
"""
|
||||
Prepare the vllm build environment:
|
||||
- clone vllm repo with pinned commit
|
||||
- create result dir if it does not exist
|
||||
- copy torch whls to vllm work directory if provided
|
||||
- copy user provided dockerfile to vllm work directory if provided
|
||||
"""
|
||||
clone_vllm()
|
||||
cfg = self._to_vllm_build_config()
|
||||
self.cfg = cfg
|
||||
logger.info(f"setup vllm build config: {self.cfg}")
|
||||
|
||||
ensure_dir_exists(self.cfg.artifact_dir)
|
||||
self.cp_dockerfile_if_exist()
|
||||
self.cp_torch_whls_if_exist()
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
main function to run vllm build
|
||||
1. prepare vllm build environment
|
||||
2. prepare the docker build command args
|
||||
3. run docker build
|
||||
"""
|
||||
self.prepare()
|
||||
logger.info(f"Running vllm build: {self.cfg}")
|
||||
torch_arg = _get_torch_wheel_path_arg(self.cfg.torch_whl_dir)
|
||||
base_arg, final_base_img, pull_flag = _get_base_image_args(self.cfg.base_image)
|
||||
cmd = _generate_docker_build_cmd(
|
||||
self.cfg, torch_arg, base_arg, final_base_img, pull_flag
|
||||
)
|
||||
logger.info(f"Running docker build: \n{cmd}")
|
||||
run_cmd(cmd, cwd="vllm", env=os.environ.copy())
|
||||
|
||||
def _to_vllm_build_config(self):
|
||||
external_build_config = self.get_external_build_config()
|
||||
base_image = external_build_config.get("base_image", "")
|
||||
artifact_dir = self.get_result_path(
|
||||
external_build_config.get("artifact_dir", "")
|
||||
)
|
||||
abs_whl_dir = get_abs_path(external_build_config.get("torch_whl_dir", ""))
|
||||
dockerfile_path = get_abs_path(external_build_config.get("dockerfile_path", ""))
|
||||
config = VllmBuildConfig(
|
||||
artifact_dir=artifact_dir,
|
||||
torch_whl_dir=abs_whl_dir,
|
||||
base_image=base_image,
|
||||
dockerfile_path=dockerfile_path,
|
||||
)
|
||||
return config
|
||||
|
||||
def cp_torch_whls_if_exist(self):
|
||||
if not self.cfg.torch_whl_dir:
|
||||
logger.info(
|
||||
"torch whl dir not provided, using default setting when build vllm with torch nightly"
|
||||
)
|
||||
return
|
||||
if not is_path_exist(self.cfg.torch_whl_dir):
|
||||
raise ValueError(
|
||||
f"torch whl dir is provided: {self.cfg.torch_whl_dir}, but it does not exist"
|
||||
)
|
||||
tmp_dir = f"./{self.work_directory}/{_VLLM_TEMP_FOLDER}"
|
||||
force_create_dir(tmp_dir)
|
||||
run_cmd(f"cp -a {self.cfg.torch_whl_dir}/. {tmp_dir}", log_cmd=True)
|
||||
|
||||
def cp_dockerfile_if_exist(self):
|
||||
if self.cfg.dockerfile_path:
|
||||
logger.info(f"use user provided dockerfile {self.cfg.dockerfile_path}")
|
||||
run_cmd(
|
||||
f"cp {self.cfg.dockerfile_path} ./vllm/docker/Dockerfile.nightly_torch",
|
||||
)
|
||||
else:
|
||||
logger.info("using vllm default dockerfile.torch_nightly for build")
|
||||
|
||||
def get_result_path(self, path):
|
||||
"""
|
||||
Get the absolute path of the result path
|
||||
"""
|
||||
if not path:
|
||||
path = _DEFAULT_RESULT_PATH
|
||||
abs_path = get_abs_path(path)
|
||||
return abs_path
|
||||
|
||||
|
||||
def _get_torch_wheel_path_arg(torch_whl_dir: str) -> str:
|
||||
if not torch_whl_dir:
|
||||
return ""
|
||||
return f"--build-arg TORCH_WHEELS_PATH={_VLLM_TEMP_FOLDER}"
|
||||
|
||||
|
||||
def _get_base_image_args(base_image: str) -> tuple[str, str, str]:
|
||||
"""
|
||||
Returns:
|
||||
- base_image_arg: docker buildx arg string for base image
|
||||
- pull_flag: --pull=true or --pull=false depending on whether the image exists locally
|
||||
"""
|
||||
pull_flag = ""
|
||||
if not base_image:
|
||||
return "", "", ""
|
||||
|
||||
base_image_arg = f"--build-arg BUILD_BASE_IMAGE={base_image}"
|
||||
final_base_image_arg = f"--build-arg FINAL_BASE_IMAGE={base_image}"
|
||||
if local_image_exists(base_image):
|
||||
logger.info(f"[INFO] Found local image: {base_image}")
|
||||
pull_flag = "--pull=false"
|
||||
return base_image_arg, final_base_image_arg, pull_flag
|
||||
logger.info(
|
||||
f"[INFO] Local image not found: {base_image}, will try to pull from remote"
|
||||
)
|
||||
return base_image_arg, final_base_image_arg, ""
|
||||
|
||||
|
||||
def _generate_docker_build_cmd(
|
||||
cfg: VllmBuildConfig,
|
||||
torch_arg: str,
|
||||
base_image_arg: str,
|
||||
final_base_image_arg: str,
|
||||
pull_flag: str,
|
||||
) -> str:
|
||||
return textwrap.dedent(
|
||||
f"""
|
||||
docker buildx build \
|
||||
--output type=local,dest={cfg.artifact_dir} \
|
||||
-f docker/Dockerfile.nightly_torch \
|
||||
{pull_flag} \
|
||||
{torch_arg} \
|
||||
{base_image_arg} \
|
||||
{final_base_image_arg} \
|
||||
--build-arg max_jobs={cfg.max_jobs} \
|
||||
--build-arg CUDA_VERSION={cfg.cuda} \
|
||||
--build-arg PYTHON_VERSION={cfg.py} \
|
||||
--build-arg USE_SCCACHE={int(bool(cfg.sccache_bucket and cfg.sccache_region))} \
|
||||
--build-arg SCCACHE_BUCKET_NAME={cfg.sccache_bucket} \
|
||||
--build-arg SCCACHE_REGION_NAME={cfg.sccache_region} \
|
||||
--build-arg torch_cuda_arch_list='{cfg.torch_cuda_arch_list}' \
|
||||
--target {cfg.target} \
|
||||
-t {cfg.tag_name} \
|
||||
--progress=plain .
|
||||
"""
|
||||
).strip()
|
||||
|
||||
|
||||
def clone_vllm():
|
||||
clone_external_repo(
|
||||
target="vllm", repo="https://github.com/vllm-project/vllm.git", cwd="vllm"
|
||||
)
|
||||
42
scripts/lumen_cli/cli/run.py
Normal file
42
scripts/lumen_cli/cli/run.py
Normal file
@ -0,0 +1,42 @@
|
||||
# main.py
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
|
||||
from cli.build_cli.register_build import register_build_commands
|
||||
from cli.lib.common.logger import setup_logging
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def main():
|
||||
# Define top-level parser
|
||||
parser = argparse.ArgumentParser(description="Lumos CLI")
|
||||
subparsers = parser.add_subparsers(dest="command", required=True)
|
||||
# Add top-level args
|
||||
parser.add_argument(
|
||||
"--config", required=False, help="Path to config file for build and test"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--log-level", default="INFO", help="Log level (DEBUG, INFO, WARNING, ERROR)"
|
||||
)
|
||||
|
||||
# registers second-level subcommands
|
||||
register_build_commands(subparsers)
|
||||
|
||||
# parse args after all options are registered
|
||||
args = parser.parse_args()
|
||||
|
||||
# setup global logging
|
||||
setup_logging(getattr(logging, args.log_level.upper(), logging.INFO))
|
||||
logger.debug("Parsed args: %s", args)
|
||||
|
||||
if hasattr(args, "func"):
|
||||
args.func(args)
|
||||
else:
|
||||
parser.print_help()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
19
scripts/lumen_cli/pyproject.toml
Normal file
19
scripts/lumen_cli/pyproject.toml
Normal file
@ -0,0 +1,19 @@
|
||||
[project]
|
||||
name = "torch-ci"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"pyyaml>=6.0"
|
||||
]
|
||||
|
||||
[tool.setuptools]
|
||||
packages = ["cli", "cli.build_cli", "cli.lib"]
|
||||
|
||||
[tool.setuptools.package-dir]
|
||||
cli = "cli"
|
||||
|
||||
[tool.ruff.lint]
|
||||
# Enable preview mode for linting
|
||||
preview = true
|
||||
|
||||
# Now you can select your preview rules, like RUF048
|
||||
extend-select = ["RUF048"]
|
||||
75
scripts/lumen_cli/tests/test_app.py
Normal file
75
scripts/lumen_cli/tests/test_app.py
Normal file
@ -0,0 +1,75 @@
|
||||
# tests/test_cli.py
|
||||
import io
|
||||
import sys
|
||||
import unittest
|
||||
from contextlib import redirect_stderr, redirect_stdout
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from cli.run import main
|
||||
|
||||
from utils import create_temp_yaml
|
||||
|
||||
|
||||
class TestArgparseCLI(unittest.TestCase):
|
||||
@patch("cli.build_cli.register_build.VllmBuildRunner")
|
||||
def test_cli_run_build_external(self, mock_runner_cls):
|
||||
mock_runner = MagicMock()
|
||||
mock_runner_cls.return_value = mock_runner
|
||||
|
||||
test_args = ["cli.run", "build", "external", "vllm"]
|
||||
with patch.object(sys, "argv", test_args):
|
||||
stdout = io.StringIO()
|
||||
with redirect_stdout(stdout), redirect_stderr(io.StringIO()):
|
||||
main()
|
||||
|
||||
mock_runner_cls.assert_called_once_with(config_path=None)
|
||||
mock_runner.run.assert_called_once()
|
||||
|
||||
output = stdout.getvalue()
|
||||
self.assertIn("Running external build for target: vllm", output)
|
||||
|
||||
@patch("cli.build_cli.register_build.VllmBuildRunner")
|
||||
def test_cli_with_fake_config_build_vllm(self, mock_runner_cls):
|
||||
mock_runner = MagicMock()
|
||||
mock_runner_cls.return_value = mock_runner
|
||||
|
||||
config_path = create_temp_yaml({"some": "config"})
|
||||
test_args = ["cli.run", "--config", config_path, "build", "external", "vllm"]
|
||||
|
||||
with patch.object(sys, "argv", test_args):
|
||||
stdout = io.StringIO()
|
||||
with redirect_stdout(stdout), redirect_stderr(io.StringIO()):
|
||||
try:
|
||||
main()
|
||||
except SystemExit as e:
|
||||
self.fail(f"Exited unexpectedly: {e}")
|
||||
|
||||
mock_runner_cls.assert_called_once_with(config_path=config_path)
|
||||
mock_runner.run.assert_called_once()
|
||||
|
||||
output = stdout.getvalue()
|
||||
self.assertIn("Running external build for target: vllm", output)
|
||||
|
||||
def test_build_help(self):
|
||||
test_args = ["cli.run", "build", "--help"]
|
||||
|
||||
with patch.object(sys, "argv", test_args):
|
||||
stdout = io.StringIO()
|
||||
stderr = io.StringIO()
|
||||
|
||||
# --help always raises SystemExit(0)
|
||||
with self.assertRaises(SystemExit) as cm:
|
||||
with redirect_stdout(stdout), redirect_stderr(stderr):
|
||||
main()
|
||||
|
||||
self.assertEqual(cm.exception.code, 0)
|
||||
|
||||
output = stdout.getvalue()
|
||||
self.assertIn("usage", output)
|
||||
self.assertIn(
|
||||
"external", output
|
||||
) # assuming "external" is a subcommand of build
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
53
scripts/lumen_cli/tests/test_build_external.py
Normal file
53
scripts/lumen_cli/tests/test_build_external.py
Normal file
@ -0,0 +1,53 @@
|
||||
import unittest
|
||||
from unittest import mock
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from cli.lib.core.vllm import VllmBuildRunner
|
||||
|
||||
|
||||
class TestVllmBuildRunner(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.runner = VllmBuildRunner()
|
||||
|
||||
@patch("cli.lib.core.vllm.clone_external_repo")
|
||||
@patch("cli.lib.core.vllm.get_abs_path", side_effect=lambda x: f"/abs/{x}")
|
||||
@patch("cli.lib.core.vllm.ensure_dir_exists")
|
||||
@patch("cli.lib.core.vllm.VllmBuildRunner.cp_torch_whls_if_exist")
|
||||
@patch("cli.lib.core.vllm.VllmBuildRunner.cp_dockerfile_if_exist")
|
||||
def test_prepare_success(
|
||||
self, mock_cp_dockerfile, mock_cp_whls, mock_ensure_dir, mock_abs, mock_clone
|
||||
):
|
||||
# simulate config
|
||||
self.runner.get_external_build_config = MagicMock(
|
||||
return_value={
|
||||
"artifact_dir": "artifacts",
|
||||
"torch_whl_dir": "whls",
|
||||
"dockerfile_path": "docker/Dockerfile.nightly_torch",
|
||||
"base_image": "base",
|
||||
}
|
||||
)
|
||||
self.runner.prepare()
|
||||
mock_clone.assert_called_once()
|
||||
mock_ensure_dir.assert_called_once_with("/abs/artifacts")
|
||||
mock_cp_dockerfile.assert_called_once()
|
||||
mock_cp_whls.assert_called_once()
|
||||
|
||||
@patch("cli.lib.core.vllm.run_cmd")
|
||||
@patch("cli.lib.core.vllm._generate_docker_build_cmd", return_value="echo build")
|
||||
@patch("cli.lib.core.vllm._get_torch_wheel_path_arg", return_value="--torch")
|
||||
@patch.object(VllmBuildRunner, "prepare")
|
||||
def test_run(self, mock_prepare, mock_torch_arg, mock_generate_cmd, mock_run_cmd):
|
||||
self.runner.cfg = self.runner._to_vllm_build_config()
|
||||
self.runner.run()
|
||||
mock_prepare.assert_called_once()
|
||||
mock_generate_cmd.assert_called_once()
|
||||
mock_run_cmd.assert_called_once_with("echo build", cwd="vllm", env=mock.ANY)
|
||||
|
||||
@patch("cli.lib.core.vllm.is_path_exist", return_value=True)
|
||||
@patch("cli.lib.core.vllm.run_cmd")
|
||||
@patch("cli.lib.core.vllm.force_create_dir")
|
||||
def test_cp_torch_whls(self, mock_force_create, mock_run, mock_exist):
|
||||
self.runner.cfg.torch_whl_dir = "some/path"
|
||||
self.runner.cp_torch_whls_if_exist()
|
||||
mock_force_create.assert_called_once_with("./vllm/tmp")
|
||||
mock_run.assert_called_once_with("cp -a some/path/. ./vllm/tmp", log_cmd=True)
|
||||
68
scripts/lumen_cli/tests/test_utils.py
Normal file
68
scripts/lumen_cli/tests/test_utils.py
Normal file
@ -0,0 +1,68 @@
|
||||
import os
|
||||
import unittest
|
||||
from unittest.mock import patch
|
||||
|
||||
from cli.lib.common.file_utils import read_yaml_file
|
||||
|
||||
from utils import create_temp_yaml
|
||||
|
||||
|
||||
class TestReadYamlFile(unittest.TestCase):
|
||||
def setUp(self):
|
||||
os.environ.pop("EXISTING_VAR", None)
|
||||
os.environ.pop("MISSING_VAR", None)
|
||||
os.environ.pop("ANOTHER_MISSING", None)
|
||||
|
||||
def test_file_not_found(self):
|
||||
with self.assertRaises(FileNotFoundError):
|
||||
read_yaml_file("/nonexistent/file.yaml")
|
||||
|
||||
def test_valid_yaml_with_existing_env_var(self):
|
||||
os.environ["EXISTING_VAR"] = "yes"
|
||||
content = {
|
||||
"key1": "$EXISTING_VAR",
|
||||
"key2": "literal",
|
||||
}
|
||||
path = create_temp_yaml(content)
|
||||
result = read_yaml_file(path)
|
||||
self.assertEqual(result["key1"], True)
|
||||
self.assertEqual(result["key2"], "literal")
|
||||
|
||||
def test_missing_env_vars_warned_and_removed(self):
|
||||
content = {
|
||||
"key1": "$MISSING_VAR",
|
||||
"key2": "${ANOTHER_MISSING}",
|
||||
}
|
||||
path = create_temp_yaml(content)
|
||||
|
||||
with patch("cli.lib.common.file_utils.logger") as mock_logger:
|
||||
result = read_yaml_file(path)
|
||||
|
||||
self.assertIsNone(result["key1"])
|
||||
self.assertIsNone(result["key2"])
|
||||
|
||||
mock_logger.warning.assert_called_once()
|
||||
warning_msg = mock_logger.warning.call_args[0][0]
|
||||
self.assertIn("Missing environment variables", warning_msg)
|
||||
|
||||
def test_yaml_content_not_dict(self):
|
||||
path = create_temp_yaml('["item1", "item2"]')
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
read_yaml_file(path)
|
||||
|
||||
def test_empty_yaml_returns_empty_dict(self):
|
||||
path = create_temp_yaml({})
|
||||
result = read_yaml_file(path)
|
||||
self.assertEqual(result, {})
|
||||
|
||||
def test_invalid_yaml_syntax(self):
|
||||
content = "{ invalid: yaml: ["
|
||||
path = create_temp_yaml(content)
|
||||
with self.assertRaises(ValueError) as cm:
|
||||
read_yaml_file(path)
|
||||
self.assertIn("Failed to parse YAML file", str(cm.exception))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
13
scripts/lumen_cli/tests/utils.py
Normal file
13
scripts/lumen_cli/tests/utils.py
Normal file
@ -0,0 +1,13 @@
|
||||
import tempfile
|
||||
|
||||
import yaml
|
||||
|
||||
|
||||
def create_temp_yaml(content: str | dict) -> str:
|
||||
tmp_file = tempfile.NamedTemporaryFile(mode="w+", suffix=".yaml", delete=False)
|
||||
if isinstance(content, dict):
|
||||
yaml.dump(content, tmp_file)
|
||||
else:
|
||||
tmp_file.write(content)
|
||||
tmp_file.flush()
|
||||
return tmp_file.name
|
||||
Reference in New Issue
Block a user