Compare commits

...

11 Commits

Author SHA1 Message Date
c0d981fb92 try 2025-08-14 07:24:10 -07:00
f5f3fd155f try 2025-08-14 07:23:24 -07:00
161048c20a add test 2025-08-14 00:40:58 -07:00
d8bc95af0b add test 2025-08-14 00:27:33 -07:00
4e4e1f81b3 add test 2025-08-13 22:31:19 -07:00
24580f98c2 add test 2025-08-13 22:30:02 -07:00
de4c6b3f3f add test
ghstack-source-id: b11f5e19e1b622610668017d89feac01750675cf
Pull-Request: https://github.com/pytorch/pytorch/pull/160583
2025-08-13 22:29:59 -07:00
ef6573629e setup test ci
ghstack-source-id: 913ccdea2bf59332313cf34c2a85b7d01d886ff6
Pull-Request: https://github.com/pytorch/pytorch/pull/160361
2025-08-13 22:29:38 -07:00
36e115eca5 vllm build workflow
ghstack-source-id: 8acb25f33d5232b80db025a8b840cf3fa5f30266
Pull-Request: https://github.com/pytorch/pytorch/pull/160116
2025-08-13 22:29:38 -07:00
dad3e16379 vllmbuild
ghstack-source-id: 61c87bcac7a2b2f47c2a03b8a1da8999dd648acd
Pull-Request: https://github.com/pytorch/pytorch/pull/160089
2025-08-13 22:29:38 -07:00
95490a1ad7 [ghstack] setup torch_cli build
ghstack-source-id: 33be5fbc9eb5f6e6519afc6153f727020d77461d
Pull-Request: https://github.com/pytorch/pytorch/pull/160043
2025-08-13 22:29:38 -07:00
32 changed files with 3021 additions and 1 deletions

31
.ci/lumen_cli/README.md Normal file
View File

@ -0,0 +1,31 @@
# 🔧 Lumen_cli
A Python CLI tool for building and testing PyTorch-based components, using a YAML configuration file for structured, repeatable workflows.
## Features
- **Build**
- external projects (e.g. vLLM)
## 📦 Installation
at the root of the pytorch repo
```bash
pip install -e .ci/lumen_cli
```
## Run the cli tool
The cli tool must be used at root of pytorch repo, as example to run build external vllm:
```bash
python -m cli.run build external vllm
```
this will run the build steps with default behaviour for vllm project.
to see help messages, run
```bash
python3 -m cli.run --help
```
## Add customized external build logics
To add a new external build, for instance, add a new external build logics:
1. create the build function in cli/lib folder
2. register your target and the main build function at EXTERNAL_BUILD_TARGET_DISPATCH in `cli/build_cli/register_build.py`
3. [optional] create your ci config file in .github/ci_configs/${EXTERNAL_PACKAGE_NAME}.yaml

View File

View File

@ -0,0 +1,37 @@
import argparse
import logging
from cli.lib.common.cli_helper import register_targets, RichHelp, TargetSpec
from cli.lib.core.vllm import VllmBuildRunner
logger = logging.getLogger(__name__)
# Maps targets to their argparse configuration and runner
# it adds new target to path python -m cli.run build external {target} with buildrunner
_TARGETS: dict[str, TargetSpec] = {
"vllm": {
"runner": VllmBuildRunner,
"help": "Build vLLM using docker buildx.",
}
# add yours ...
}
def register_build_commands(subparsers: argparse._SubParsersAction) -> None:
build_parser = subparsers.add_parser(
"build",
help="Build related commands",
formatter_class=RichHelp,
)
build_subparsers = build_parser.add_subparsers(dest="build_command", required=True)
overview = "\n".join(
f" {name:12} {spec.get('help', '')}" for name, spec in _TARGETS.items()
)
external_parser = build_subparsers.add_parser(
"external",
help="Build external targets",
description="Build third-party targets.\n\nAvailable targets:\n" + overview,
formatter_class=RichHelp,
)
register_targets(external_parser, _TARGETS)

View File

View File

@ -0,0 +1,71 @@
"""
Cli Argparser Utility helpers for CLI tasks.
"""
import argparse
from abc import ABC, abstractmethod
try:
from typing import Any, Callable, Required, TypedDict # Python 3.11+
except ImportError:
from typing import Any, Callable, TypedDict
from typing_extensions import Required # Fallback for Python <3.11
class BaseRunner(ABC):
def __init__(self, args: Any) -> None:
self.args = args
@abstractmethod
def run(self) -> None:
"""runs main logics, required"""
# Pretty help: keep newlines + show defaults
class RichHelp(
argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter
):
pass
class TargetSpec(TypedDict, total=False):
"""CLI subcommand specification with bA."""
runner: Required[type[BaseRunner]]
help: str
description: str
add_arguments: Callable[[argparse.ArgumentParser], None]
def register_targets(
parser: argparse.ArgumentParser,
target_specs: dict[str, TargetSpec],
common_args: Callable[[argparse.ArgumentParser], None] = lambda _: None,
) -> None:
"""Register target subcommands."""
targets = parser.add_subparsers(
dest="target",
required=True,
metavar="{" + ",".join(target_specs.keys()) + "}",
)
for name, spec in target_specs.items():
desc = spec.get("description") or spec["runner"].__doc__ or ""
p = targets.add_parser(
name,
help=spec.get("help", ""),
description=desc.strip(),
formatter_class=RichHelp,
)
p.set_defaults(
func=lambda args, cls=spec["runner"]: cls(args).run(),
_runner_class=spec["runner"],
)
if "add_arguments" in spec and callable(spec["add_arguments"]):
spec["add_arguments"](p)
if common_args:
common_args(p)

View File

@ -0,0 +1,42 @@
"""
Docker Utility helpers for CLI tasks.
"""
import logging
from typing import Optional
import docker
from docker.errors import APIError, NotFound
logger = logging.getLogger(__name__)
# lazy singleton so we don't reconnect every call
_docker_client: Optional[docker.DockerClient] = None
def _get_client() -> docker.DockerClient:
global _docker_client
if _docker_client is None:
_docker_client = docker.from_env()
return _docker_client
def local_image_exists(
image_name: str, client: Optional[docker.DockerClient] = None
) -> bool:
"""Return True if a local Docker image exists."""
if not image_name:
return False
client = client or _get_client()
try:
client.images.get(image_name)
return True
except (NotFound, APIError) as e:
logger.error(
"Error when checking Docker image '%s': %s",
image_name,
e.explanation if hasattr(e, "explanation") else str(e),
)
return False

View File

@ -0,0 +1,110 @@
"""
Environment Variables and Dataclasses Utility helpers for CLI tasks.
"""
import os
from dataclasses import field, fields, is_dataclass, MISSING
from pathlib import Path
from textwrap import indent
from typing import Optional, Union
from cli.lib.common.utils import str2bool
def get_env(name: str, default: str = "") -> str:
"""Get environment variable with default fallback."""
return os.environ.get(name) or default
def env_path_optional(
name: str,
default: Optional[Union[str, Path]] = None,
resolve: bool = True,
) -> Optional[Path]:
"""Get environment variable as optional Path."""
val = get_env(name) or default
if not val:
return None
path = Path(val)
return path.resolve() if resolve else path
def env_path(
name: str,
default: Optional[Union[str, Path]] = None,
resolve: bool = True,
) -> Path:
"""Get environment variable as Path, raise if missing."""
path = env_path_optional(name, default, resolve)
if not path:
raise ValueError(f"Missing path value for {name}")
return path
def env_bool(
name: str,
default: bool = False,
) -> bool:
val = get_env(name)
if not val:
return default
return str2bool(val)
def env_bool_field(
name: str,
default: bool = False,
):
return field(default_factory=lambda: env_bool(name, default))
def env_path_field(
name: str,
default: Union[str, Path] = "",
*,
resolve: bool = True,
) -> Path:
return field(default_factory=lambda: env_path(name, default, resolve=resolve))
def env_str_field(
name: str,
default: str = "",
) -> str:
return field(default_factory=lambda: get_env(name, default))
def generate_dataclass_help(cls) -> str:
"""Auto-generate help text for dataclass fields."""
if not is_dataclass(cls):
raise TypeError(f"{cls} is not a dataclass")
def get_value(f):
if f.default is not MISSING:
return f.default
if f.default_factory is not MISSING:
try:
return f.default_factory()
except Exception as e:
return f"<error: {e}>"
return "<required>"
lines = [f"{f.name:<22} = {repr(get_value(f))}" for f in fields(cls)]
return indent("\n".join(lines), " ")
def with_params_help(params_cls: type, title: str = "Parameter defaults"):
"""
Class decorator that appends a help table generated from another dataclass
(e.g., VllmParameters) to the decorated class's docstring.
"""
if not is_dataclass(params_cls):
raise TypeError(f"{params_cls} must be a dataclass")
def _decorator(cls: type) -> type:
block = generate_dataclass_help(params_cls)
cls.__doc__ = (cls.__doc__ or "") + f"\n\n{title}:\n{block}"
return cls
return _decorator

View File

@ -0,0 +1,84 @@
"""
Git Utility helpers for CLI tasks.
"""
import logging
from pathlib import Path
from cli.lib.common.path_helper import remove_dir
from cli.lib.common.utils import run_command
from git import GitCommandError, RemoteProgress, Repo
logger = logging.getLogger(__name__)
class PrintProgress(RemoteProgress):
"""Simple progress logger for git operations."""
def __init__(self, interval: int = 5):
super().__init__()
self._last_percent = -1
self._interval = interval
def update(self, op_code, cur, max=None, message=""):
msg = self._cur_line or message
if max and cur:
percent = int(cur / max * 100)
if percent != self._last_percent and percent % self._interval == 0:
self._last_percent = percent
logger.info("Progress: %d%% - %s", percent, msg)
elif msg:
logger.info(msg)
def clone_external_repo(target: str, repo: str, dst: str = "", update_submodules=False):
"""Clone repository with pinned commit and optional submodules."""
dst = dst or target
try:
logger.info("Cloning %s to %s", target, dst)
# Clone and fetch
remove_dir(dst)
r = Repo.clone_from(repo, dst, progress=PrintProgress())
r.git.fetch("--all", "--tags")
# Checkout pinned commit
commit = get_post_build_pinned_commit(target)
logger.info("Checking out pinned commit %s", commit)
r.git.checkout(commit)
# Update submodules if requested
if update_submodules and r.submodules:
logger.info("Updating %d submodule(s)", len(r.submodules))
for sm in r.submodules:
sm.update(init=True, recursive=True, progress=PrintProgress())
logger.info("Successfully cloned %s", target)
return r
except GitCommandError as e:
logger.error("Git operation failed: %s", e)
raise
def clone_vllm_pure(commit: str):
"""
cloning vllm and checkout pinned commit
"""
print("clonening vllm....", flush=True)
cwd = "vllm"
# delete the directory if it exists
remove_dir(cwd)
# Clone the repo & checkout commit
run_command("git clone https://github.com/vllm-project/vllm.git")
run_command(f"git checkout {commit}", cwd=cwd)
run_command("git submodule update --init --recursive", cwd=cwd)
def get_post_build_pinned_commit(name: str, prefix=".github/ci_commit_pins") -> str:
path = Path(prefix) / f"{name}.txt"
if not path.exists():
raise FileNotFoundError(f"Pin file not found: {path}")
return path.read_text(encoding="utf-8").strip()

View File

@ -0,0 +1,14 @@
"""
Logger Utility helpers for CLI tasks.
"""
import logging
import sys
def setup_logging(level: int = logging.INFO):
logging.basicConfig(
level=level,
format="%(asctime)s [%(levelname)s] %(name)s: %(message)s",
stream=sys.stdout,
)

View File

@ -0,0 +1,62 @@
"""Path utility helpers for CLI tasks."""
import logging
import shutil
from pathlib import Path
from typing import Union
logger = logging.getLogger(__name__)
def get_path(path: Union[str, Path], resolve: bool = False) -> Path:
"""Convert to Path object, optionally resolving to absolute path."""
if not path:
raise ValueError("Path cannot be None or empty")
result = Path(path)
return result.resolve() if resolve else result
def ensure_dir_exists(path: Union[str, Path]) -> Path:
"""Create directory if it doesn't exist."""
path_obj = get_path(path)
path_obj.mkdir(parents=True, exist_ok=True)
return path_obj
def remove_dir(path: Union[str, Path, None]) -> None:
"""Remove directory if it exists."""
if not path:
return
path_obj = get_path(path)
if path_obj.exists():
shutil.rmtree(path_obj)
def force_create_dir(path: Union[str, Path]) -> Path:
"""Remove directory if exists, then create fresh empty directory."""
remove_dir(path)
return ensure_dir_exists(path)
def copy(src: Union[str, Path], dst: Union[str, Path]) -> None:
"""Copy file or directory from src to dst."""
src_path = get_path(src, resolve=True)
dst_path = get_path(dst, resolve=True)
if not src_path.exists():
raise FileNotFoundError(f"Source does not exist: {src_path}")
dst_path.parent.mkdir(parents=True, exist_ok=True)
if src_path.is_file():
shutil.copy2(src_path, dst_path)
elif src_path.is_dir():
shutil.copytree(src_path, dst_path, dirs_exist_ok=True)
else:
raise ValueError(f"Unsupported path type: {src_path}")
def is_path_exist(path: Union[str, Path, None]) -> bool:
"""Check if path exists."""
return bool(path and get_path(path).exists())

View File

@ -0,0 +1,69 @@
import glob
import logging
import shlex
import shutil
import sys
from collections.abc import Iterable
from typing import Optional, Union
from cli.lib.common.utils import run_command
logger = logging.getLogger(__name__)
def pip_install_packages(
packages: Iterable[str] = (),
env=None,
*,
requirements: Optional[str] = None,
constraints: Optional[str] = None,
prefer_uv: bool = False,
) -> None:
use_uv = prefer_uv and shutil.which("uv") is not None
base = (
[sys.executable, "-m", "uv", "pip", "install"]
if use_uv
else [sys.executable, "-m", "pip", "install"]
)
if use_uv:
logger.info("Installing packages using uv pip")
cmd = base[:]
if requirements:
cmd += ["-r", requirements]
if constraints:
cmd += ["-c", constraints]
cmd += list(packages)
logger.info("pip installing packages: %s", " ".join(map(shlex.quote, cmd)))
run_command(" ".join(map(shlex.quote, cmd)), env=env)
logger.info("Done installing packages")
def pip_install_first_match(pattern: str, extras: Optional[str] = None, pref_uv=False):
"""
Install the first local whl that matches the given glob pattern.
Args:
pattern (str): Glob pattern for the wheel file(s).
extras (str | None): Optional extras (e.g., "opt_einsum") to install with the wheel.
"""
matches = sorted(glob.glob(pattern))
if not matches:
raise FileNotFoundError(f"No files match: {pattern}")
wheel = matches[0]
target = f"{wheel}[{extras}]" if extras else wheel
logger.info("Installing wheel: %s", target)
pip_install_packages([target], prefer_uv=pref_uv)
def run_python(args: Union[str, list[str]], env=None):
"""
Run the python in the current environment.
"""
if isinstance(args, str):
args = shlex.split(args)
cmd = [sys.executable] + args
run_command(" ".join(map(shlex.quote, cmd)), env=env)

View File

@ -0,0 +1,117 @@
"""
General Utility helpers for CLI tasks.
"""
import logging
import os
import shlex
import subprocess
import sys
from contextlib import contextmanager
from typing import Optional
logger = logging.getLogger(__name__)
def run_command(
cmd: str,
use_shell: bool = False,
log_cmd: bool = True,
cwd: Optional[str] = None,
env: Optional[dict] = None,
check: bool = True,
) -> int:
"""Run a command with optional shell execution."""
if use_shell:
args = cmd
log_prefix = "[shell]"
executable = "/bin/bash"
else:
args = shlex.split(cmd)
log_prefix = "[cmd]"
executable = None
if log_cmd:
display_cmd = cmd if use_shell else " ".join(args)
logger.info("%s %s", log_prefix, display_cmd)
run_env = {**os.environ, **(env or {})}
proc = subprocess.run(
args,
shell=use_shell,
executable=executable,
stdout=sys.stdout,
stderr=sys.stderr,
cwd=cwd,
env=run_env,
check=False,
)
if check and proc.returncode != 0:
logger.error(
"%s Command failed (exit %s): %s", log_prefix, proc.returncode, cmd
)
raise subprocess.CalledProcessError(
proc.returncode, args if not use_shell else cmd
)
return proc.returncode
def str2bool(value: Optional[str]) -> bool:
"""Convert environment variables to boolean values."""
if not value:
return False
if not isinstance(value, str):
raise ValueError(
f"Expected a string value for boolean conversion, got {type(value)}"
)
value = value.strip().lower()
true_value_set = {"1", "true", "t", "yes", "y", "on", "enable", "enabled", "found"}
false_value_set = {"0", "false", "f", "no", "n", "off", "disable"}
if value in true_value_set:
return True
if value in false_value_set:
return False
raise ValueError(f"Invalid string value for boolean conversion: {value}")
@contextmanager
def temp_environ(updates: dict[str, str]):
"""
Temporarily set environment variables and restore them after the block.
Args:
updates: Dict of environment variables to set.
"""
missing = object()
old: dict[str, str | object] = {k: os.environ.get(k, missing) for k in updates}
try:
os.environ.update(updates)
yield
finally:
for k, v in old.items():
if v is missing:
os.environ.pop(k, None)
else:
os.environ[k] = v # type: ignore[arg-type]
@contextmanager
def working_directory(path: str):
"""
Temporarily change the working directory inside a context.
"""
if not path:
# No-op context
yield
return
prev_cwd = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(prev_cwd)

View File

@ -0,0 +1,650 @@
import logging
import os
import re
import subprocess
import sys
import textwrap
from collections.abc import Iterable
from dataclasses import dataclass
from enum import Enum
from pathlib import Path
from typing import Any, Optional
from cli.lib.common.cli_helper import BaseRunner
from cli.lib.common.docker_helper import local_image_exists
from cli.lib.common.envs_helper import (
env_bool_field,
env_path_field,
env_str_field,
get_env,
with_params_help,
)
from cli.lib.common.git_helper import clone_vllm_pure, get_post_build_pinned_commit
from cli.lib.common.path_helper import (
copy,
ensure_dir_exists,
force_create_dir,
get_path,
is_path_exist,
remove_dir,
)
from cli.lib.common.pip_helper import (
pip_install_first_match,
pip_install_packages,
run_python,
)
from cli.lib.common.utils import run_command, temp_environ, working_directory
logger = logging.getLogger(__name__)
# Default path for docker build artifacts
_DEFAULT_RESULT_PATH = "./shared"
# Temp folder in vllm work place to cp torch whls in vllm work directory for docker build
_VLLM_TEMP_FOLDER = "tmp"
@dataclass
class VllmBuildParameters:
"""
Parameters defining the vllm external input configurations.
Combine with VllmDockerBuildArgs to define the vllm build environment
"""
# USE_TORCH_WHEEL: when true, use local Torch wheels; requires TORCH_WHEELS_PATH.
# Otherwise docker build pull torch nightly during build
# TORCH_WHEELS_PATH: directory containing local torch wheels when use_torch_whl is True
use_torch_whl: bool = env_bool_field("USE_TORCH_WHEEL", True)
torch_whls_path: Path = env_path_field("TORCH_WHEELS_PATH", "./dist")
# USE_LOCAL_BASE_IMAGE: when true, use an existing local Docker base image; requires BASE_IMAGE
# Otherwise, pull dockerfile's default image remotely
# BASE_IMAGE: name:tag (only needed when use_local_base_image is True)
use_local_base_image: bool = env_bool_field("USE_LOCAL_BASE_IMAGE", True)
base_image: str = env_str_field("BASE_IMAGE")
# USE_LOCAL_DOCKERFILE: when true("1"), use a local Dockerfile; requires DOCKERFILE_PATH.
# otherwise, use vllm's default dockerfile.torch_nightly for build
# DOCKERFILE_PATH: path to Dockerfile used when use_local_dockerfile is True"
use_local_dockerfile: bool = env_bool_field("USE_LOCAL_DOCKERFILE", True)
dockerfile_path: Path = env_path_field(
"DOCKERFILE_PATH", ".github/ci_configs/vllm/Dockerfile.tmp_vllm"
)
# OUTPUT_DIR: where docker buildx (local exporter) will write artifacts
output_dir: Path = env_path_field("OUTPUT_DIR", "shared")
# --- Build args ----------------------------------------------------------
target_stage: str = env_str_field("TARGET_STAGE", "export-wheels")
tag_name: str = env_str_field("TAG", "vllm-wheels")
cuda_version: str = env_str_field("CUDA_VERSION", "12.8.1")
python_version: str = env_str_field("PYTHON_VERSION", "3.12")
max_jobs: str = env_str_field("MAX_JOBS", "64")
sccache_bucket: str = env_str_field("SCCACHE_BUCKET")
sccache_region: str = env_str_field("SCCACHE_REGION")
torch_cuda_arch_list: str = env_str_field("TORCH_CUDA_ARCH_LIST", "8.9")
def __post_init__(self):
checks = [
(
self.use_torch_whl, # flag
True, # trigger_value
"torch_whls_path", # resource
is_path_exist, # check_func
"TORCH_WHEELS_PATH is not provided, but USE_TORCH_WHEEL is set to 1",
),
(
self.use_local_base_image,
True,
"base_image",
local_image_exists,
f"BASE_IMAGE {self.base_image} does not found, but USE_LOCAL_BASE_IMAGE is set to 1",
),
(
self.use_local_dockerfile,
True,
"dockerfile_path",
is_path_exist,
" DOCKERFILE_PATH path does not found, but USE_LOCAL_DOCKERFILE is set to 1",
),
]
for flag, trigger_value, attr_name, check_func, error_msg in checks:
value = getattr(self, attr_name)
if flag == trigger_value:
if not value or not check_func(value):
raise ValueError(error_msg)
else:
logger.info("flag %s is not set", flag)
if not self.output_dir:
raise ValueError("missing required output_dir")
@with_params_help(VllmBuildParameters)
class VllmBuildRunner(BaseRunner):
"""
Build vLLM using docker buildx.
Environment variable options:
"USE_TORCH_WHEEL": "1: use local wheels; 0: pull nightly from pypi",
"TORCH_WHEELS_PATH": "Path to local wheels (when USE_TORCH_WHEEL=1)",
"USE_LOCAL_BASE_IMAGE": "1: use local base image; 0: default image",
"BASE_IMAGE": "name:tag to indicate base image the dockerfile depends on (when USE_LOCAL_BASE_IMAGE=1)",
"USE_LOCAL_DOCKERFILE": "1: use local Dockerfile; 0: vllm repo default dockerfile.torch_nightly",
"DOCKERFILE_PATH": "Path to Dockerfile (when USE_LOCAL_DOCKERFILE=1)",
"OUTPUT_DIR": "e.g. './shared'",
"TORCH_CUDA_ARCH_LIST": "e.g. '8.0' or '8.0;9.0'",
"CUDA_VERSION": "e.g. '12.8.1'",
"PYTHON_VERSION": "e.g. '3.12'",
"MAX_JOBS": "e.g. '64'",
"SCCACHE_BUCKET": "e.g. 'my-bucket'",
"SCCACHE_REGION": "e.g. 'us-west-2'",
"""
def __init__(self, args=None):
self.work_directory = "vllm"
def run(self):
"""
main function to run vllm build
1. prepare vllm build environment
2. prepare the docker build command args
3. run docker build
"""
inputs = VllmBuildParameters()
logger.info("Running vllm build with inputs: %s", inputs)
clone_vllm()
self.cp_dockerfile_if_exist(inputs)
# cp torch wheels from root direct to vllm workspace if exist
self.cp_torch_whls_if_exist(inputs)
ensure_dir_exists(inputs.output_dir)
cmd = self._generate_docker_build_cmd(inputs)
logger.info("Running docker build: \n %s", cmd)
run_command(cmd, cwd="vllm", env=os.environ.copy())
def cp_torch_whls_if_exist(self, inputs: VllmBuildParameters) -> str:
if not inputs.use_torch_whl:
return ""
tmp_dir = f"./{self.work_directory}/{_VLLM_TEMP_FOLDER}"
tmp_path = Path(tmp_dir)
force_create_dir(tmp_path)
copy(inputs.torch_whls_path, tmp_dir)
return tmp_dir
def cp_dockerfile_if_exist(self, inputs: VllmBuildParameters):
if not inputs.use_local_dockerfile:
logger.info("using vllm default dockerfile.torch_nightly for build")
return
dockerfile_path = get_path(inputs.dockerfile_path, resolve=True)
vllm_torch_dockerfile = Path(
f"./{self.work_directory}/docker/Dockerfile.nightly_torch"
)
copy(dockerfile_path, vllm_torch_dockerfile)
def get_result_path(self, path):
"""
Get the absolute path of the result path
"""
if not path:
path = _DEFAULT_RESULT_PATH
abs_path = get_path(path, resolve=True)
return abs_path
def _get_torch_wheel_path_arg(self, torch_whl_dir: Optional[Path]) -> str:
if not torch_whl_dir:
return ""
return f"--build-arg TORCH_WHEELS_PATH={_VLLM_TEMP_FOLDER}"
def _get_base_image_args(self, inputs: VllmBuildParameters) -> tuple[str, str, str]:
"""
Returns:
- base_image_arg: docker buildx arg string for base image
- final_base_image_arg: docker buildx arg string for vllm-base stage
- pull_flag: --pull=true or --pull=false depending on whether the image exists locally
"""
if not inputs.use_local_base_image:
return "", "", ""
base_image = inputs.base_image
# set both base image and final base image to the same local image
base_image_arg = f"--build-arg BUILD_BASE_IMAGE={base_image}"
final_base_image_arg = f"--build-arg FINAL_BASE_IMAGE={base_image}"
if local_image_exists(base_image):
pull_flag = "--pull=false"
return base_image_arg, final_base_image_arg, pull_flag
logger.info(
"[INFO] Local image not found:%s will try to pull from remote", {base_image}
)
return base_image_arg, final_base_image_arg, ""
def _generate_docker_build_cmd(
self,
inputs: VllmBuildParameters,
) -> str:
base_image_arg, final_base_image_arg, pull_flag = self._get_base_image_args(
inputs
)
torch_arg = self._get_torch_wheel_path_arg(inputs.torch_whls_path)
return textwrap.dedent(
f"""
docker buildx build \
--output type=local,dest={inputs.output_dir} \
-f docker/Dockerfile.nightly_torch \
{pull_flag} \
{torch_arg} \
{base_image_arg} \
{final_base_image_arg} \
--build-arg max_jobs={inputs.max_jobs} \
--build-arg CUDA_VERSION={inputs.cuda_version} \
--build-arg PYTHON_VERSION={inputs.python_version} \
--build-arg USE_SCCACHE={int(bool(inputs.sccache_bucket and inputs.sccache_region))} \
--build-arg SCCACHE_BUCKET_NAME={inputs.sccache_bucket} \
--build-arg SCCACHE_REGION_NAME={inputs.sccache_region} \
--build-arg torch_cuda_arch_list='{inputs.torch_cuda_arch_list}' \
--target {inputs.target_stage} \
-t {inputs.tag_name} \
--progress=plain .
"""
).strip()
@dataclass
class VllmTestParameters:
"""
Parameters defining the vllm external test input
!!!DO NOT ADD SECRETS IN THIS CLASS!!!
you can put environment variable name in VllmTestParameters if it's not the same as the secret one
fetch secrests directly from env variables during runtime
"""
torch_whls_path: Path = env_path_field("TORCH_WHEELS_PATH", "./dist")
vllm_whls_path: Path = env_path_field("VLLM_WHEELS_PATH", "./shared")
torch_cuda_arch_list: str = env_str_field("TORCH_CUDA_ARCH_LIST", "8.9")
def __post_init__(self):
if not self.torch_whls_path.exists():
raise ValueError("missing torch_whls_path")
if not self.vllm_whls_path.exists():
raise ValueError("missing vllm_whls_path")
class TestInpuType(Enum):
TEST_PLAN = "test_plan"
UNKNOWN = "unknown"
class VllmTestRunner(BaseRunner):
def __init__(self, args: Any):
self.work_directory = "vllm"
self.test_plan = ""
self.test_type = TestInpuType.UNKNOWN
if args.test_plan:
self.test_plan = args.test_plan
self.test_type = TestInpuType.TEST_PLAN
# Matches the structeur in the artifacts.zip from torcb build
self.TORCH_WHL_PATH_REGEX = "torch*.whl"
self.TORCH_WHL_EXTRA = "opt-einsum"
self.TORCH_ADDITIONAL_WHLS_REGEX = [
"vision/torchvision*.whl",
"audio/torchaudio*.whl",
]
# Match the structure of the artifacts.zip from vllm external build
self.VLLM_TEST_WHLS_REGEX = [
"wheels/xformers/xformers*.whl",
"wheels/vllm/vllm*.whl",
"wheels/flashinfer-python/flashinfer*.whl",
]
def prepare(self):
"""
prepare test environment for vllm. This includes clone vllm repo, install all wheels, test dependencies and set env
"""
params = VllmTestParameters()
logger.info("Display VllmTestParameters %s", params)
self._set_envs(params)
clone_vllm(dst=self.work_directory)
with working_directory(self.work_directory):
remove_dir(Path("vllm"))
self._install_wheels(params)
self._install_dependencies()
# verify the torches are not overridden by test dependencies
self.check_versions()
def run(self):
"""
main function to run vllm test
"""
self.prepare()
with working_directory(self.work_directory):
if self.test_type == TestInpuType.TEST_PLAN:
self.run_test_plan(self.test_plan)
else:
raise ValueError(f"Unknown test type {self.test_type}")
def _install_wheels(self, params: VllmTestParameters):
logger.info("Running vllm test with inputs: %s", params)
logger.info("Installing torch wheel")
# torch_p = f"{str(params.torch_whls_path)}/{self.TORCH_WHL_PATH_REGEX}"
# pip_install_first_match(torch_p, self.TORCH_WHL_EXTRA)
logger.info("Installing other torch-related wheels")
torch_whls_path = [
f"{str(params.torch_whls_path)}/{whl_path}"
for whl_path in self.TORCH_ADDITIONAL_WHLS_REGEX
]
for torch_whl in torch_whls_path:
pip_install_first_match(torch_whl)
logger.info("Done. Installed torch and other torch-related wheels ")
logger.info("Installing vllm wheels")
vllm_whls_path = [
f"{str(params.vllm_whls_path)}/{whl_path}"
for whl_path in self.VLLM_TEST_WHLS_REGEX
]
for vllm_whl in vllm_whls_path:
pip_install_first_match(vllm_whl)
logger.info("Done. Installed vllm wheels")
def _install_test_dependencies(self):
"""
Install test dependencies for vllm test
This method replaces torch dependencies with local torch wheel info in
requirements/test.in file from vllm repo.
Then generates the test.txt file using uv pip compile, along with requirements/test.txt,
which is generated by the test.in with torch stable as soft constraint to match
packages' version
"""
# TODO(elainewy): move this as part of vllm build, to generate the test.txt file
logger.info("generate test.txt from requirements/test.in with local torch whls")
preprocess_test_in()
copy(
Path("requirements/test.txt"),
Path("snapshot_constraint.txt"),
)
run_command(
f"{sys.executable} -m uv pip compile requirements/test.in "
"-o test.txt "
"--index-strategy unsafe-best-match "
"--constraint snapshot_constraint.txt "
"--torch-backend cu128"
)
logger.info("install requirements from test.txt")
pip_install_packages(requirements="test.txt", prefer_uv=True)
logger.info("Done. install requirements from test.txt")
# install mambda from source since it does not work now with pip
# TODO(elainewy): move this as part of vllm build
pip_install_packages(
packages=[
"--no-build-isolation",
"git+https://github.com/state-spaces/mamba@v2.2.4",
],
prefer_uv=True,
)
logger.info("Done. installed requirements from test.txt")
def _install_dependencies(self):
pip_install_packages(packages=["-e", "tests/vllm_test_utils"], prefer_uv=True)
pip_install_packages(packages=["hf_transfer"], prefer_uv=True)
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
# using script from vllm repo to remove all torch packages from requirements txt
run_python("use_existing_torch.py")
# install common packages
for requirements in ["requirements/common.txt", "requirements/build.txt"]:
pip_install_packages(
requirements=requirements,
prefer_uv=True,
)
# install test packages
self._install_test_dependencies()
def check_versions(self):
"""
check installed packages version
"""
logger.info("double check installed packages")
patterns = ["torch", "xformers", "torchvision", "torchaudio", "vllm"]
for pkg in patterns:
try:
module = __import__(pkg)
version = getattr(module, "__version__", None)
version = version if version else "Unknown version"
logger.info("%s: %s", pkg, version)
except ImportError:
logger.info(" %s: Not installed", pkg)
logger.info("Done. checked installed packages")
def _set_envs(self, inputs: VllmTestParameters):
os.environ["TORCH_CUDA_ARCH_LIST"] = inputs.torch_cuda_arch_list
if not self.validate_cuda(get_env("TORCH_CUDA_ARCH_LIST")):
logger.warning(
"Missing supported TORCH_CUDA_ARCH_LIST. "
"Currently support TORCH_CUDA_ARCH_LIST env var "
"with supported arch [8.0, 8.9, 9.0]"
)
self.validate_cuda(get_env("TORCH_CUDA_ARCH_LIST"))
os.environ["HF_TOKEN"] = os.getenv("VLLM_TEST_HUGGING_FACE_TOKEN", "")
if not get_env("HF_TOKEN"):
raise ValueError(
"missing required HF_TOKEN, please set VLLM_TEST_HUGGING_FACE_TOKEN env var"
)
if not get_env("TORCH_CUDA_ARCH_LIST"):
raise ValueError(
"missing required TORCH_CUDA_ARCH_LIST, please set TORCH_CUDA_ARCH_LIST env var"
)
def run_test_plan(self, test_plan: str):
"""
a method to run list of tests based on the test plan. currently this only
used to run vllm tests.
"""
logger.info("run vllm tests.....")
tests_map = sample_test_plans()
if test_plan not in tests_map:
raise RuntimeError(
f"test {test_plan} not found, please add it to test plan pool"
)
tests = tests_map[test_plan]
logger.info("Running tests: %s", tests["title"])
pkgs = tests.get("package_install", [])
if pkgs:
logger.info("Installing packages: %s", pkgs)
pip_install_packages(packages=pkgs, prefer_uv=True)
with (
temp_environ(tests.get("env_var", {})),
working_directory(tests.get("working_directory", "tests")),
):
failures = []
for step in tests["steps"]:
with temp_environ(step.get("env_var", {})):
code = run_command(cmd=step["command"], check=False)
if code != 0:
failures.append(step)
if failures:
logger.error("Failed tests: %s", failures)
raise RuntimeError(f"{len(failures)} pytest runs failed: {failures}")
logger.info("Done. All tests passed")
def validate_cuda(self, value: str) -> bool:
VALID_VALUES = {"8.0", "8.9", "9.0"}
return all(v in VALID_VALUES for v in value.split())
def clone_vllm(dst: str = "vllm"):
clone_vllm_pure(get_post_build_pinned_commit(dst))
"""
clone_external_repo(
target="vllm",
repo="https://github.com/vllm-project/vllm.git",
dst=dst,
update_submodules=True,
)
"""
def preprocess_test_in(
target_file: str = "requirements/test.in", additional_packages: Iterable[str] = ()
):
"""
This modifies the target_file file in place. by default, it points to vllm's requirements/test.in
It removes torch packages in target_file and replace with local torch whls
"""
additional_package_to_move = list(additional_packages or ())
pkgs_to_remove = [
"torch",
"torchvision",
"torchaudio",
"xformers",
"mamba_ssm",
] + additional_package_to_move
# Read current requirements
target_path = Path(target_file)
lines = target_path.read_text().splitlines()
# Remove lines starting with the package names (==, @, >=) — case-insensitive
pattern = re.compile(rf"^({'|'.join(pkgs_to_remove)})\s*(==|@|>=)", re.IGNORECASE)
kept_lines = [line for line in lines if not pattern.match(line)]
# Get local torch/vision/audio installs from pip freeze
# this is hacky, but it works
pip_freeze = subprocess.check_output(["pip", "freeze"], text=True)
header_lines = [
line
for line in pip_freeze.splitlines()
if re.match(
r"^(torch|torchvision|torchaudio)\s*@\s*file://", line, re.IGNORECASE
)
]
# Write back: header_lines + blank + kept_lines
out = "\n".join(header_lines + [""] + kept_lines) + "\n"
target_path.write_text(out)
logger.info("[INFO] Updated %s", target_file)
def sample_test_plans():
"""
Simple sample to unblock the vllm ci development, which is mimic to
https://github.com/vllm-project/vllm/blob/main/.buildkite/test-pipeline.yaml
"""
# TODO(elainewy): Read from yaml file to handle the env and tests for vllm
# TODO(elainewy): implement logics to handle package_install
return {
# test plan:
# required id, title, and steps
# optional: env_var, package_install, working_directory
# by default the working_drectory is "tests/", but it can be changed based on tests, for instance,
# vllm sample test happens in samples/
"vllm_basic_correctness_test": {
"title": "Basic Correctness Test",
"id": "vllm_basic_correctness_test",
"env_var": {
"VLLM_WORKER_MULTIPROC_METHOD": "spawn",
},
# test step:
# required: command
# available fields: env_var (env_var only set within the scope of the test step), package_install(pip package)
"steps": [
{
"command": "pytest -v -s basic_correctness/test_cumem.py",
},
{
"command": "pytest -v -s basic_correctness/test_basic_correctness.py",
},
{
"command": "pytest -v -s basic_correctness/test_cpu_offload.py",
},
{
"command": "pytest -v -s basic_correctness/test_preemption.py",
"env_var": {
"VLLM_TEST_ENABLE_ARTIFICIAL_PREEMPT": "1",
},
},
],
},
"vllm_basic_models_test": {
"title": "Basic models test",
"id": "vllm_basic_models_test",
"steps": [
{"command": "pytest -v -s models/test_transformers.py"},
{"command": "pytest -v -s models/test_registry.py"},
{"command": "pytest -v -s models/test_utils.py"},
{"command": "pytest -v -s models/test_vision.py"},
{"command": "pytest -v -s models/test_initialization.py"},
],
},
"vllm_entrypoints_test": {
"title": "Entrypoints Test ",
"id": "vllm_entrypoints_test",
"env_var": {
"VLLM_WORKER_MULTIPROC_METHOD": "spawn",
},
"steps": [
{
"command": " ".join(
[
"pytest",
"-v",
"-s",
"entrypoints/llm",
"--ignore=entrypoints/llm/test_lazy_outlines.py",
"--ignore=entrypoints/llm/test_generate.py",
"--ignore=entrypoints/llm/test_generate_multiple_loras.py",
"--ignore=entrypoints/llm/test_collective_rpc.py",
]
)
},
{"command": "pytest -v -s entrypoints/llm/test_lazy_outlines.py"},
{"command": "pytest -v -s entrypoints/llm/test_generate.py "},
{
"command": "pytest -v -s entrypoints/llm/test_generate_multiple_loras.py"
},
{
"env_var": {"VLLM_USE_V1": "0"},
"command": "pytest -v -s entrypoints/offline_mode",
},
],
},
"vllm_regression_test": {
"title": "Regression Test",
"id": "vllm_regression_test",
"package_install": ["modelscope"],
"steps": [
{"command": "pytest -v -s test_regression.py"},
],
},
}

40
.ci/lumen_cli/cli/run.py Normal file
View File

@ -0,0 +1,40 @@
# main.py
import argparse
import logging
from cli.build_cli.register_build import register_build_commands
from cli.lib.common.logger import setup_logging
from cli.test_cli.register_test import register_test_commands
logger = logging.getLogger(__name__)
def main():
# Define top-level parser
parser = argparse.ArgumentParser(description="Lumos CLI")
subparsers = parser.add_subparsers(dest="command", required=True)
parser.add_argument(
"--log-level", default="INFO", help="Log level (DEBUG, INFO, WARNING, ERROR)"
)
# registers second-level subcommands
register_build_commands(subparsers)
register_test_commands(subparsers)
# parse args after all options are registered
args = parser.parse_args()
# setup global logging
setup_logging(getattr(logging, args.log_level.upper(), logging.INFO))
logger.debug("Parsed args: %s", args)
if hasattr(args, "func"):
args.func(args)
else:
parser.print_help()
if __name__ == "__main__":
main()

View File

View File

@ -0,0 +1,51 @@
import argparse
import logging
from cli.lib.common.cli_helper import register_targets, RichHelp, TargetSpec
from cli.lib.core.vllm import VllmTestRunner
logger = logging.getLogger(__name__)
# Maps targets to their argparse configuration and runner
# it adds new target to path python -m cli.run build external {target} with buildrunner
_TARGETS: dict[str, TargetSpec] = {
"vllm": {
"runner": VllmTestRunner,
"help": "test vLLM unittests",
}
# add yours ...
}
def common_args(parser: argparse.ArgumentParser) -> None:
"""
Add common CLI arguments to the given parser.
"""
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
"-tp",
"--test-plan",
type=str,
help="a pre-defined test plan to run, e.g. 'basic_correctness_test'",
)
# TODO(elainewy):add another common option that user can trigger a specific test with test config
def register_test_commands(subparsers: argparse._SubParsersAction) -> None:
build_parser = subparsers.add_parser(
"test",
help="test related commands",
formatter_class=RichHelp,
)
build_subparsers = build_parser.add_subparsers(dest="test_command", required=True)
overview = "\n".join(
f" {name:12} {spec.get('help', '')}" for name, spec in _TARGETS.items()
)
external_parser = build_subparsers.add_parser(
"external",
help="Test external targets",
description="Test third-party targets.\n\nAvailable targets:\n" + overview,
formatter_class=RichHelp,
)
register_targets(external_parser, _TARGETS, common_args=common_args)

View File

@ -0,0 +1,23 @@
[project]
name = "lumen-ci"
version = "0.1.0"
dependencies = [
"pyyaml==6.0.2",
"GitPython==3.1.45",
"docker==7.1.0",
"pytest==7.3.2",
"uv==0.8.4"
]
[tool.setuptools]
packages = ["cli"]
[tool.setuptools.package-dir]
cli = "cli"
[tool.ruff.lint]
# Enable preview mode for linting
preview = true
# Now you can select your preview rules, like RUF048
extend-select = ["RUF048"]

View File

@ -0,0 +1,47 @@
# tests/test_cli.py
import io
import sys
import unittest
from contextlib import redirect_stderr, redirect_stdout
from unittest.mock import patch
from cli.run import main
class TestArgparseCLI(unittest.TestCase):
@patch("cli.build_cli.register_build.VllmBuildRunner.run", return_value=None)
@patch("cli.build_cli.register_build.VllmBuildRunner.__init__", return_value=None)
def test_cli_run_build_external(self, mock_init, mock_run):
from cli.run import main # import after patches if needed
test_args = ["cli.run", "build", "external", "vllm"]
with patch.object(sys, "argv", test_args):
# argparse may call sys.exit on error; capture to avoid test aborts
try:
main()
except SystemExit:
pass
mock_init.assert_called_once() # got constructed
mock_run.assert_called_once_with() # run() called
def test_build_help(self):
test_args = ["cli.run", "build", "--help"]
with patch.object(sys, "argv", test_args):
stdout = io.StringIO()
stderr = io.StringIO()
# --help always raises SystemExit(0)
with self.assertRaises(SystemExit) as cm:
with redirect_stdout(stdout), redirect_stderr(stderr):
main()
self.assertEqual(cm.exception.code, 0)
output = stdout.getvalue()
self.assertIn("usage", output)
self.assertIn("external", output)
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,115 @@
import argparse
import io
import unittest
from contextlib import redirect_stderr
from unittest.mock import patch
from cli.lib.common.cli_helper import BaseRunner, register_targets, RichHelp, TargetSpec
# ---- Dummy runners for unittests----
class FooRunner(BaseRunner):
"""Foo description from docstring."""
def run(self) -> None: # replaced by mock
pass
class BarRunner(BaseRunner):
def run(self) -> None: # replaced by mock
pass
def add_foo_args(p: argparse.ArgumentParser) -> None:
p.add_argument("--x", type=int, required=True, help="x value")
def common_args(p: argparse.ArgumentParser) -> None:
p.add_argument("--verbose", action="store_true", help="verbose flag")
def build_parser(specs: dict[str, TargetSpec]) -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(prog="app", formatter_class=RichHelp)
register_targets(
parser=parser,
target_specs=specs,
common_args=common_args,
)
return parser
def get_subparser(
parser: argparse.ArgumentParser, name: str
) -> argparse.ArgumentParser:
subparsers_action = next(
a
for a in parser._subparsers._group_actions # type: ignore[attr-defined]
if isinstance(a, argparse._SubParsersAction)
)
return subparsers_action.choices[name]
class TestRegisterTargets(unittest.TestCase):
def test_metavar_lists_targets(self):
specs: dict[str, TargetSpec] = {
"foo": {"runner": FooRunner, "add_arguments": add_foo_args},
"bar": {"runner": BarRunner},
}
parser = build_parser(specs)
subparsers_action = next(
a
for a in parser._subparsers._group_actions # type: ignore[attr-defined]
if isinstance(a, argparse._SubParsersAction)
)
self.assertEqual(subparsers_action.metavar, "{foo,bar}")
def test_add_arguments_and_common_args_present(self):
specs: dict[str, TargetSpec] = {
"foo": {"runner": FooRunner, "add_arguments": add_foo_args},
}
parser = build_parser(specs)
foo = get_subparser(parser, "foo")
help_text = foo.format_help()
self.assertIn("--x", help_text)
self.assertIn("--verbose", help_text)
def test_runner_constructed_with_ns_and_run_called(self):
specs: dict[str, TargetSpec] = {
"foo": {"runner": FooRunner, "add_arguments": add_foo_args},
}
parser = build_parser(specs)
with (
patch.object(FooRunner, "__init__", return_value=None) as mock_init,
patch.object(FooRunner, "run", return_value=None) as mock_run,
):
ns = parser.parse_args(["foo", "--x", "3", "--verbose"])
ns.func(ns) # set by register_targets
# __init__ received the Namespace
self.assertEqual(mock_init.call_count, 1)
(called_ns,), _ = mock_init.call_args
self.assertIsInstance(called_ns, argparse.Namespace)
# run() called with no args
mock_run.assert_called_once_with()
def test_runner_docstring_used_as_description_when_missing(self):
specs: dict[str, TargetSpec] = {
"foo": {"runner": FooRunner, "add_arguments": add_foo_args},
}
parser = build_parser(specs)
foo = get_subparser(parser, "foo")
help_text = foo.format_help()
self.assertIn("Foo description from docstring.", help_text)
def test_missing_target_raises_systemexit_with_usage(self):
specs: dict[str, TargetSpec] = {"foo": {"runner": FooRunner}}
parser = build_parser(specs)
buf = io.StringIO()
with self.assertRaises(SystemExit), redirect_stderr(buf):
parser.parse_args([])
err = buf.getvalue()
self.assertIn("usage:", err)
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,75 @@
import unittest
from unittest import mock
from unittest.mock import MagicMock
import docker.errors as derr
from cli.lib.common.docker_helper import _get_client, local_image_exists
class TestDockerImageHelpers(unittest.TestCase):
def setUp(self):
# Reset the singleton in the target module
patcher = mock.patch("cli.lib.common.docker_helper._docker_client", None)
self.addCleanup(patcher.stop)
patcher.start()
def test_local_image_exists_true(self):
# Mock a docker client whose images.get returns an object (no exception)
mock_client = MagicMock()
mock_client.images.get.return_value = object()
ok = local_image_exists("repo:tag", client=mock_client)
self.assertTrue(ok)
def test_local_image_exists_not_found_false(self):
mock_client = MagicMock()
# Raise docker.errors.NotFound
mock_client.images.get.side_effect = derr.NotFound("nope")
ok = local_image_exists("missing:latest", client=mock_client)
self.assertFalse(ok)
def test_local_image_exists_api_error_false(self):
mock_client = MagicMock()
mock_client.images.get.side_effect = derr.APIError("boom", None)
ok = local_image_exists("broken:tag", client=mock_client)
self.assertFalse(ok)
def test_local_image_exists_uses_lazy_singleton(self):
# Patch docker.from_env used by _get_client()
with mock.patch(
"cli.lib.common.docker_helper.docker.from_env"
) as mock_from_env:
mock_docker_client = MagicMock()
mock_from_env.return_value = mock_docker_client
# First call should create and cache the client
c1 = _get_client()
self.assertIs(c1, mock_docker_client)
mock_from_env.assert_called_once()
# Second call should reuse cached client (no extra from_env calls)
c2 = _get_client()
self.assertIs(c2, mock_docker_client)
mock_from_env.assert_called_once() # still once
def test_local_image_exists_without_client_param_calls_get_client_once(self):
# Ensure _get_client is called and cached; local_image_exists should reuse it
with mock.patch("cli.lib.common.docker_helper._get_client") as mock_get_client:
mock_client = MagicMock()
mock_get_client.return_value = mock_client
# 1st call
local_image_exists("repo:tag")
# 2nd call
local_image_exists("repo:tag2")
# local_image_exists should call _get_client each time,
# but your _get_client itself caches docker.from_env.
self.assertEqual(mock_get_client.call_count, 2)
self.assertEqual(mock_client.images.get.call_count, 2)
mock_client.images.get.assert_any_call("repo:tag")
mock_client.images.get.assert_any_call("repo:tag2")
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,149 @@
import os
import unittest
from dataclasses import dataclass
from pathlib import Path
from unittest.mock import patch
import cli.lib.common.envs_helper as m
class TestEnvHelpers(unittest.TestCase):
def setUp(self):
# Keep a copy of the original environment to restore later
self._env_backup = dict(os.environ)
def tearDown(self):
# Restore environment to original state
os.environ.clear()
os.environ.update(self._env_backup)
# -------- get_env --------
def test_get_env_unset_returns_default(self):
with patch.dict(os.environ, {}, clear=True):
self.assertEqual(m.get_env("FOO", "default"), "default")
def test_get_env_empty_returns_default(self):
with patch.dict(os.environ, {"FOO": ""}, clear=True):
self.assertEqual(m.get_env("FOO", "default"), "default")
def test_get_env_set_returns_value(self):
with patch.dict(os.environ, {"FOO": "bar"}, clear=True):
self.assertEqual(m.get_env("FOO", "default"), "bar")
def test_get_env_not_exist_returns_default(self):
with patch.dict(os.environ, {"FOO": "bar"}, clear=True):
self.assertEqual(m.get_env("TEST_NOT_EXIST", "default"), "default")
def test_get_env_not_exist_without_default(self):
with patch.dict(os.environ, {"FOO": "bar"}, clear=True):
self.assertEqual(m.get_env("TEST_NOT_EXIST"), "")
# -------- env_bool --------
def test_env_bool_uses_default_when_unset(self):
with patch.dict(os.environ, {}, clear=True):
self.assertTrue(m.env_bool("FLAG", default=True))
self.assertFalse(m.env_bool("FLAG", default=False))
def test_env_bool_uses_str2bool_when_set(self):
# Patch str2bool used by env_bool so we don't depend on its exact behavior
def fake_str2bool(s: str) -> bool:
return s.lower() in {"1", "true", "yes", "on", "y"}
with (
patch.dict(os.environ, {"FLAG": "yEs"}, clear=True),
patch.object(m, "str2bool", fake_str2bool),
):
self.assertTrue(m.env_bool("FLAG", default=False))
# -------- env_path_optional / env_path --------
def test_env_path_optional_unset_returns_none_by_default(self):
with patch.dict(os.environ, {}, clear=True):
self.assertIsNone(m.env_path_optional("P"))
def test_env_path_optional_unset_returns_none_when_env_var_is_empty(self):
with patch.dict(os.environ, {"P": ""}, clear=True):
self.assertIsNone(m.env_path_optional("P"))
def test_env_path_optional_unset_returns_default_str(self):
# default as string; resolve=True by default -> absolute path
default_str = "x/y"
with patch.dict(os.environ, {}, clear=True):
p = m.env_path_optional("P", default=default_str)
self.assertIsInstance(p, Path)
self.assertIsNotNone(p)
if p:
self.assertTrue(p.is_absolute())
self.assertEqual(p.parts[-2:], ("x", "y"))
def test_env_path_optional_unset_returns_default_path_no_resolve(self):
d = Path("z")
with patch.dict(os.environ, {}, clear=True):
p = m.env_path_optional("P", default=d, resolve=False)
self.assertEqual(p, d)
def test_env_path_optional_respects_resolve_true(self):
with patch.dict(os.environ, {"P": "a/b"}, clear=True):
p = m.env_path_optional("P", resolve=True)
self.assertIsInstance(p, Path)
if p:
self.assertTrue(p.is_absolute())
def test_env_path_optional_respects_resolve_false(self):
with patch.dict(os.environ, {"P": "rel/dir"}, clear=True):
p = m.env_path_optional("P", resolve=False)
self.assertEqual(p, Path("rel/dir"))
if p:
self.assertFalse(p.is_absolute())
def test_env_path_raises_when_missing_and_default_none(self):
with patch.dict(os.environ, {}, clear=True):
with self.assertRaises(ValueError):
m.env_path("P", None, resolve=True)
def test_env_path_returns_path_when_present(self):
tmp = Path("./b").resolve()
with patch.dict(os.environ, {"P": str(tmp)}, clear=True):
p = m.env_path("P", None, resolve=True)
self.assertEqual(p, tmp)
# -------- dataclass field helpers --------
def test_dataclass_fields_read_env_at_instantiation(self):
@dataclass
class Cfg:
flag: bool = m.env_bool_field("FLAG", default=False)
out: Path = m.env_path_field("OUT", default="ab", resolve=True)
name: str = m.env_str_field("NAME", default="anon")
# First instantiation
with patch.dict(
os.environ, {"FLAG": "true", "OUT": "outdir", "NAME": "alice"}, clear=True
):
cfg1 = Cfg()
self.assertTrue(cfg1.flag)
self.assertIsInstance(cfg1.out, Path)
self.assertTrue(cfg1.out.is_absolute())
self.assertEqual(cfg1.name, "alice")
cfg1.name = "bob" # change instance value
self.assertEqual(cfg1.name, "bob") # change is reflected
# Change env; new instance should reflect new values
with patch.dict(os.environ, {"FLAG": "false", "NAME": ""}, clear=True):
cfg2 = Cfg()
self.assertFalse(cfg2.flag) # str2bool("false") -> False
self.assertTrue("ab" in str(cfg2.out))
self.assertIsInstance(cfg2.out, Path)
self.assertTrue(cfg2.out.is_absolute())
self.assertEqual(cfg2.name, "anon") # empty -> fallback to default
def test_dataclass_path_field_with_default_value(self):
@dataclass
class C2:
out: Path = m.env_path_field("OUT", default="some/dir", resolve=False)
with patch.dict(os.environ, {}, clear=True):
c = C2()
self.assertEqual(c.out, Path("some/dir"))
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,122 @@
# test_path_utils.py
# Run: pytest -q
import os
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from cli.lib.common.path_helper import (
copy,
ensure_dir_exists,
force_create_dir,
get_path,
is_path_exist,
remove_dir,
)
class TestPathHelper(unittest.TestCase):
def setUp(self):
self.tmpdir = TemporaryDirectory()
self.tmp_path = Path(self.tmpdir.name)
def tearDown(self):
self.tmpdir.cleanup()
# -------- get_path --------
def test_get_path_returns_path_for_str(self):
# Use relative path to avoid absolute-ness
rel_str = "sub/f.txt"
os.chdir(self.tmp_path)
p = get_path(rel_str, resolve=False)
self.assertIsInstance(p, Path)
self.assertFalse(p.is_absolute())
self.assertEqual(str(p), rel_str)
def test_get_path_resolves(self):
rel_str = "sub/f.txt"
p = get_path(str(self.tmp_path / rel_str), resolve=True)
self.assertTrue(p.is_absolute())
self.assertTrue(str(p).endswith(rel_str))
def test_get_path_with_path_input(self):
p_in = self.tmp_path / "sub/f.txt"
p_out = get_path(p_in, resolve=False)
self.assertTrue(str(p_out) == str(p_in))
def test_get_path_with_none_raises(self):
with self.assertRaises(ValueError):
get_path(None) # type: ignore[arg-type]
def test_get_path_invalid_type_raises(self):
with self.assertRaises(TypeError):
get_path(123) # type: ignore[arg-type]
# -------- ensure_dir_exists / force_create_dir / remove_dir --------
def test_ensure_dir_exists_creates_and_is_idempotent(self):
d = self.tmp_path / "made"
ensure_dir_exists(d)
self.assertTrue(d.exists() and d.is_dir())
ensure_dir_exists(d)
def test_force_create_dir_clears_existing(self):
d = self.tmp_path / "fresh"
(d / "inner").mkdir(parents=True)
(d / "inner" / "f.txt").write_text("x")
force_create_dir(d)
self.assertTrue(d.exists())
self.assertEqual(list(d.iterdir()), [])
def test_remove_dir_none_is_noop(self):
remove_dir(None) # type: ignore[arg-type]
def test_remove_dir_nonexistent_is_noop(self):
ghost = self.tmp_path / "ghost"
remove_dir(ghost)
def test_remove_dir_accepts_str(self):
d = self.tmp_path / "to_rm"
d.mkdir()
remove_dir(str(d))
self.assertFalse(d.exists())
# -------- copy --------
def test_copy_file_to_file(self):
src = self.tmp_path / "src.txt"
dst = self.tmp_path / "out" / "dst.txt"
src.write_text("hello")
copy(src, dst)
self.assertEqual(dst.read_text(), "hello")
def test_copy_dir_to_new_dir(self):
src = self.tmp_path / "srcdir"
(src / "a").mkdir(parents=True)
(src / "a" / "f.txt").write_text("content")
dst = self.tmp_path / "destdir"
copy(src, dst)
self.assertEqual((dst / "a" / "f.txt").read_text(), "content")
def test_copy_dir_into_existing_dir_overwrite_true_merges(self):
src = self.tmp_path / "srcdir"
dst = self.tmp_path / "destdir"
(src / "x").mkdir(parents=True)
(src / "x" / "new.txt").write_text("new")
dst.mkdir()
(dst / "existing.txt").write_text("old")
copy(src, dst)
self.assertEqual((dst / "existing.txt").read_text(), "old")
self.assertEqual((dst / "x" / "new.txt").read_text(), "new")
def test_is_str_path_exist(self):
p = self.tmp_path / "x.txt"
p.write_text("1")
self.assertTrue(is_path_exist(str(p)))
self.assertTrue(is_path_exist(p))
self.assertFalse(is_path_exist(str(self.tmp_path / "missing")))
self.assertFalse(is_path_exist(self.tmp_path / "missing"))
self.assertFalse(is_path_exist(""))
if __name__ == "__main__":
unittest.main()

View File

View File

@ -0,0 +1,181 @@
import os
import tempfile
import unittest
from pathlib import Path
from unittest.mock import MagicMock, patch
import cli.lib.core.vllm as vllm
class TestVllmBuildParameters(unittest.TestCase):
@patch("cli.lib.core.vllm.local_image_exists", return_value=True)
@patch("cli.lib.core.vllm.is_path_exist", return_value=True)
@patch(
"cli.lib.common.envs_helper.env_path_optional",
side_effect=lambda name, default=None, resolve=True: {
"DOCKERFILE_PATH": Path("/abs/vllm/Dockerfile"),
"TORCH_WHEELS_PATH": Path("/abs/dist"),
"OUTPUT_DIR": Path("/abs/shared"),
}.get(name, Path(default) if default is not None else None),
)
@patch.dict(
os.environ,
{
"USE_TORCH_WHEEL": "1",
"USE_LOCAL_BASE_IMAGE": "1",
"USE_LOCAL_DOCKERFILE": "1",
"BASE_IMAGE": "my/image:tag",
"DOCKERFILE_PATH": "vllm/Dockerfile",
"TORCH_WHEELS_PATH": "dist",
"OUTPUT_DIR": "shared",
},
clear=True,
)
def test_params_success_normalizes_and_validates(
self, mock_env_path, mock_is_path, mock_local_img
):
params = vllm.VllmBuildParameters()
self.assertEqual(params.torch_whls_path, Path("/abs/dist"))
self.assertEqual(params.dockerfile_path, Path("/abs/vllm/Dockerfile"))
self.assertEqual(params.output_dir, Path("/abs/shared"))
self.assertEqual(params.base_image, "my/image:tag")
@patch("cli.lib.core.vllm.is_path_exist", return_value=False)
@patch.dict(
os.environ, {"USE_TORCH_WHEEL": "1", "TORCH_WHEELS_PATH": "dist"}, clear=True
)
def test_params_missing_torch_whls_raises(self, _is_path):
with tempfile.TemporaryDirectory() as td:
os.chdir(td)
with self.assertRaises(ValueError) as cm:
vllm.VllmBuildParameters(
use_local_base_image=False,
use_local_dockerfile=False,
)
err = cm.exception
self.assertIn("TORCH_WHEELS_PATH", str(err))
@patch("cli.lib.core.vllm.local_image_exists", return_value=False)
@patch.dict(
os.environ, {"USE_LOCAL_BASE_IMAGE": "1", "BASE_IMAGE": "img:tag"}, clear=True
)
def test_params_missing_local_base_image_raises(self, _local_img):
with tempfile.TemporaryDirectory() as td:
os.chdir(td)
with self.assertRaises(ValueError) as cm:
vllm.VllmBuildParameters(
use_torch_whl=False,
use_local_dockerfile=False,
)
err = cm.exception
self.assertIn("BASE_IMAGE", str(err))
@patch("cli.lib.core.vllm.is_path_exist", return_value=False)
@patch.dict(
os.environ,
{"USE_LOCAL_DOCKERFILE": "1", "DOCKERFILE_PATH": "Dockerfile"},
clear=True,
)
def test_params_missing_dockerfile_raises(self, _is_path):
with tempfile.TemporaryDirectory() as td:
os.chdir(td)
with self.assertRaises(ValueError) as cm:
vllm.VllmBuildParameters(
use_torch_whl=False,
use_local_base_image=False,
)
err = cm.exception
self.assertIn("DOCKERFILE_PATH", str(err))
@patch("cli.lib.core.vllm.is_path_exist", return_value=False)
@patch.dict(
os.environ,
{"OUTPUT_DIR": ""},
clear=True,
)
def test_params_missing_output_dir(self, _is_path):
with self.assertRaises(FileNotFoundError):
vllm.VllmBuildParameters()
class TestBuildCmdAndRun(unittest.TestCase):
@patch("cli.lib.core.vllm.local_image_exists", return_value=True)
def test_generate_docker_build_cmd_includes_bits(self, _exists):
runner = vllm.VllmBuildRunner()
# Craft inputs that simulate a prepared build
inputs = MagicMock()
inputs.output_dir = Path("/abs/out")
inputs.use_local_base_image = True
inputs.base_image = "img:tag"
inputs.torch_whls_path = Path("./vllm/tmp")
inputs.max_jobs = 64
inputs.cuda_version = "12.8.1"
inputs.python_version = "3.12"
inputs.sccache_bucket = "my-bucket"
inputs.sccache_region = "us-west-2"
inputs.torch_cuda_arch_list = "8.0;9.0"
inputs.target_stage = "export-wheels"
inputs.tag_name = "vllm-wheels"
cmd = runner._generate_docker_build_cmd(inputs)
squashed = " ".join(cmd.split()) # normalize whitespace for matching
self.assertIn("--output type=local,dest=/abs/out", squashed)
self.assertIn("-f docker/Dockerfile.nightly_torch", squashed)
self.assertIn("--pull=false", squashed)
self.assertIn("--build-arg TORCH_WHEELS_PATH=tmp", squashed)
self.assertIn("--build-arg BUILD_BASE_IMAGE=img:tag", squashed)
self.assertIn("--build-arg FINAL_BASE_IMAGE=img:tag", squashed)
self.assertIn("--build-arg max_jobs=64", squashed)
self.assertIn("--build-arg CUDA_VERSION=12.8.1", squashed)
self.assertIn("--build-arg PYTHON_VERSION=3.12", squashed)
self.assertIn("--build-arg USE_SCCACHE=1", squashed)
self.assertIn("--build-arg SCCACHE_BUCKET_NAME=my-bucket", squashed)
self.assertIn("--build-arg SCCACHE_REGION_NAME=us-west-2", squashed)
self.assertIn("--build-arg torch_cuda_arch_list='8.0;9.0'", squashed)
self.assertIn("--target export-wheels", squashed)
self.assertIn("-t vllm-wheels", squashed)
@patch("cli.lib.core.vllm.run_command")
@patch("cli.lib.core.vllm.ensure_dir_exists")
@patch("cli.lib.core.vllm.clone_vllm")
@patch.object(
vllm.VllmBuildRunner,
"_generate_docker_build_cmd",
return_value="docker buildx ...",
)
@patch.dict(
os.environ,
{
# Make __post_init__ validations pass cheaply
"USE_TORCH_WHEEL": "0",
"USE_LOCAL_BASE_IMAGE": "0",
"USE_LOCAL_DOCKERFILE": "0",
"OUTPUT_DIR": "shared",
},
clear=True,
)
def test_run_calls_clone_prepare_and_build(
self, mock_gen, mock_clone, mock_ensure, mock_run
):
# Stub parameters instance so we avoid FS/Docker accesses in run()
params = MagicMock()
params.output_dir = Path("shared")
params.use_local_dockerfile = False
params.use_torch_whl = False
with patch("cli.lib.core.vllm.VllmBuildParameters", return_value=params):
runner = vllm.VllmBuildRunner()
runner.run()
mock_clone.assert_called_once()
mock_ensure.assert_called_once_with(Path("shared"))
mock_gen.assert_called_once_with(params)
mock_run.assert_called_once()
# ensure we run in vllm workdir
_, kwargs = mock_run.call_args
assert kwargs.get("cwd") == "vllm"
if __name__ == "__main__":
unittest.main()

View File

@ -1639,6 +1639,16 @@ elif [[ "${TEST_CONFIG}" == *xla* ]]; then
install_torchvision
build_xla
test_xla
elif [[ "$TEST_CONFIG" == *vllm* ]]; then
(cd .ci/lumen_cli && python -m pip install -e .)
if [[ "$BUILD_ENVIRONMENT" == *sm80* ]]; then
export TORCH_CUDA_ARCH_LIST="8.0"
elif [[ "$BUILD_ENVIRONMENT" == *sm90* ]]; then
export TORCH_CUDA_ARCH_LIST="9.0"
else
export TORCH_CUDA_ARCH_LIST="8.9"
fi
python -m cli.run test external vllm --test-plan "$TEST_CONFIG"
elif [[ "${TEST_CONFIG}" == *executorch* ]]; then
test_executorch
elif [[ "$TEST_CONFIG" == 'jit_legacy' ]]; then

View File

@ -1 +1 @@
6a39ba85fe0f2fff9494b5eccea717c93510c230
53d7c39271aeb0568afcae337396a972e1848586

View File

@ -0,0 +1,414 @@
# TODO(elainwy): remove this file after the torch nightly dockerfile is in sync in vllm repo
# The vLLM Dockerfile is used to construct vLLM image against torch nightly and torch main that can be directly used for testing
ARG CUDA_VERSION=12.8.1
ARG PYTHON_VERSION=3.12
# BUILD_BASE_IMAGE: used to setup python build xformers, and vllm wheels, It can be replaced with a different base image from local machine,
# by default, it uses the torch-nightly-base stage from this docker image
ARG BUILD_BASE_IMAGE=torch-nightly-base
# FINAL_BASE_IMAGE: used to set up vllm-instaled environment and build flashinfer,
# by default, it uses devel-ubuntu22.04 official image.
ARG FINAL_BASE_IMAGE=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04
#################### TORCH NIGHTLY BASE IMAGE ####################
# A base image for building vLLM with devel ubuntu 22.04, this is mainly used to build vllm in vllm builtkite ci
From nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04 as torch-nightly-base
ARG CUDA_VERSION=12.8.1
ARG PYTHON_VERSION=3.12
ARG TARGETPLATFORM
ENV DEBIAN_FRONTEND=noninteractive
RUN PYTHON_VERSION_STR=$(echo ${PYTHON_VERSION} | sed 's/\.//g') && \
echo "export PYTHON_VERSION_STR=${PYTHON_VERSION_STR}" >> /etc/environment
# Install Python and other dependencies if it does not existed
RUN if ! command -v python3 >/dev/null || ! python3 --version | grep -q "${PYTHON_VERSION}"; then \
echo "Installing Python ${PYTHON_VERSION}..." && \
echo 'tzdata tzdata/Areas select America' | debconf-set-selections && \
echo 'tzdata tzdata/Zones/America select Los_Angeles' | debconf-set-selections && \
apt-get update -y && \
apt-get install -y ccache software-properties-common git curl sudo && \
for i in 1 2 3; do \
add-apt-repository -y ppa:deadsnakes/ppa && break || \
{ echo "Attempt $i failed, retrying in 5s..."; sleep 5; }; \
done && \
apt-get update -y && \
apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv && \
update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 && \
update-alternatives --set python3 /usr/bin/python${PYTHON_VERSION} && \
ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config && \
curl -sS https://bootstrap.pypa.io/get-pip.py | python${PYTHON_VERSION}; \
else \
echo "Python ${PYTHON_VERSION} already present, skipping setup."; \
fi \
&& python3 --version && python3 -m pip --version
# Upgrade to GCC 10 to avoid https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92519
# as it was causing spam when compiling the CUTLASS kernels
# Ensure gcc >= 10 to avoid CUTLASS issues (bug 92519)
RUN current_gcc_version=$(gcc -dumpversion | cut -f1 -d.) && \
if [ "$current_gcc_version" -lt 10 ]; then \
echo "GCC version is $current_gcc_version, installing gcc-10..."; \
apt-get update && \
apt-get install -y gcc-10 g++-10 && \
update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 100 && \
update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-10 100; \
else \
echo "GCC version is $current_gcc_version, no need to install gcc-10."; \
fi && \
gcc --version && g++ --version
# install uv for faster pip installs
RUN --mount=type=cache,target=/root/.cache/uv \
python3 -m pip install uv==0.8.4
ENV UV_HTTP_TIMEOUT=500
ENV UV_INDEX_STRATEGY="unsafe-best-match"
#################### TORCH NIGHTLY BASE IMAGE ####################
#################### BASE BUILD IMAGE ####################
# A base image for building vLLM with torch nightly or torch wheels
# prepare basic build environment
FROM ${BUILD_BASE_IMAGE} AS base
USER root
# Workaround for https://github.com/openai/triton/issues/2507 and
# https://github.com/pytorch/pytorch/issues/107960 -- hopefully
# this won't be needed for future versions of this docker image
# or future versions of triton.
RUN ldconfig /usr/local/cuda-$(echo $CUDA_VERSION | cut -d. -f1,2)/compat/
# Install uv for faster pip installs if not existed
RUN --mount=type=cache,target=/root/.cache/uv \
if ! python3 -m uv --version >/dev/null 2>&1; then \
python3 -m pip install uv==0.8.4; \
fi
ENV UV_HTTP_TIMEOUT=500
ENV UV_INDEX_STRATEGY="unsafe-best-match"
WORKDIR /workspace
# install build and runtime dependencies
COPY requirements/common.txt requirements/common.txt
COPY use_existing_torch.py use_existing_torch.py
COPY pyproject.toml pyproject.toml
# install build and runtime dependencies without stable torch version
RUN python3 use_existing_torch.py
# default mount file as placeholder, this just avoid the mount error
# change to a different vllm folder if this does not exist anymore
ARG TORCH_WHEELS_PATH="./requirements"
ARG PINNED_TORCH_VERSION
# Install torch, torchaudio and torchvision based on the input
# if TORCH_WHEELS_PATH is default "./requirements", it will pull thethe nightly versions using pip
# otherwise, it will use the whls from TORCH_WHEELS_PATH from the host machine
RUN --mount=type=bind,source=${TORCH_WHEELS_PATH},target=/dist \
--mount=type=cache,target=/root/.cache/uv \
if [ -n "$TORCH_WHEELS_PATH" ] && [ "$TORCH_WHEELS_PATH" != "./requirements" ] && [ -d "/dist" ] && ls /dist/torch*.whl >/dev/null 2>&1; then \
torch_whl=$(find /dist -maxdepth 1 -name 'torch-*.whl' -print -quit); \
vision_whl=$(find /dist/vision -name 'torchvision*.whl' | head -n1 | xargs); \
audio_whl=$(find /dist/audio -name 'torchaudio*.whl' | head -n1 | xargs); \
uv pip install --system "${torch_whl}[opt-einsum]"; \
uv pip install --system "${vision_whl}"; \
uv pip install --system "${audio_whl}"; \
elif [ -n "$PINNED_TORCH_VERSION" ]; then \
echo "[INFO] Installing pinned torch nightly version: $PINNED_TORCH_VERSION"; \
uv pip install --system "$PINNED_TORCH_VERSION" --index-url https://download.pytorch.org/whl/nightly/cu128; \
else \
echo "[INFO] Installing torch nightly with latest one"; \
uv pip install --system torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu128; \
fi
# Install numba 0.61.2 for cuda environment
RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --system numba==0.61.2
# Install common dependencies from vllm common.txt
RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --system -r requirements/common.txt
# Must put before installing xformers, so it can install the correct version of xfomrers.
ARG torch_cuda_arch_list='8.0;8.6;8.9;9.0'
ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list}
ARG max_jobs=16
ENV MAX_JOBS=${max_jobs}
# Build xformers with cuda and torch nightly/wheel
# following official xformers guidance: https://github.com/facebookresearch/xformers#build
ARG XFORMERS_COMMIT=f2de641ef670510cadab099ce6954031f52f191c
ENV CCACHE_DIR=/root/.cache/ccache
RUN --mount=type=cache,target=/root/.cache/ccache \
--mount=type=cache,target=/root/.cache/uv \
echo 'git clone xformers...' \
&& git clone https://github.com/facebookresearch/xformers.git --recursive \
&& cd xformers \
&& git checkout ${XFORMERS_COMMIT} \
&& git submodule update --init --recursive \
&& echo 'finish git clone xformers...' \
&& rm -rf build \
&& python3 setup.py bdist_wheel --dist-dir=../xformers-dist --verbose \
&& cd .. \
&& rm -rf xformers
RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --system xformers-dist/*.whl --verbose
# Build can take a long time, and the torch nightly version fetched from url can be different in next docker stage.
# track the nightly torch version used in the build, when we set up runtime environment we can make sure the version is the same
RUN uv pip freeze | grep -i '^torch\|^torchvision\|^torchaudio' > torch_build_versions.txt
RUN cat torch_build_versions.txt
RUN pip freeze | grep -E 'torch|xformers|torchvision|torchaudio'
#################### BASE BUILD IMAGE ####################
#################### WHEEL BUILD IMAGE ####################
# Image used to build vllm wheel
FROM base AS build
ARG TARGETPLATFORM
ENV UV_HTTP_TIMEOUT=500
ENV UV_INDEX_STRATEGY="unsafe-best-match"
COPY . .
RUN python3 use_existing_torch.py
RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --system -r requirements/build.txt
ARG GIT_REPO_CHECK=0
RUN --mount=type=bind,source=.git,target=.git \
if [ "$GIT_REPO_CHECK" != "0" ]; then bash tools/check_repo.sh ; fi
# Max jobs used by Ninja to build extensions
ARG max_jobs=16
ENV MAX_JOBS=${max_jobs}
ARG nvcc_threads=2
ENV NVCC_THREADS=$nvcc_threads
ARG torch_cuda_arch_list='8.0;8.6;8.9;9.0'
ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list}
ARG USE_SCCACHE
ARG SCCACHE_BUCKET_NAME=vllm-build-sccache
ARG SCCACHE_REGION_NAME=us-west-2
ARG SCCACHE_S3_NO_CREDENTIALS=0
# if USE_SCCACHE is set, use sccache to speed up compilation
RUN --mount=type=cache,target=/root/.cache/uv \
--mount=type=bind,source=.git,target=.git \
if [ "$USE_SCCACHE" = "1" ]; then \
echo "Installing sccache..." \
&& curl -L -o sccache.tar.gz https://github.com/mozilla/sccache/releases/download/v0.8.1/sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz \
&& tar -xzf sccache.tar.gz \
&& sudo mv sccache-v0.8.1-x86_64-unknown-linux-musl/sccache /usr/bin/sccache \
&& rm -rf sccache.tar.gz sccache-v0.8.1-x86_64-unknown-linux-musl \
&& export SCCACHE_BUCKET=${SCCACHE_BUCKET_NAME} \
&& export SCCACHE_REGION=${SCCACHE_REGION_NAME} \
&& export SCCACHE_S3_NO_CREDENTIALS=${SCCACHE_S3_NO_CREDENTIALS} \
&& export SCCACHE_IDLE_TIMEOUT=0 \
&& export CMAKE_BUILD_TYPE=Release \
&& sccache --show-stats \
&& python3 setup.py bdist_wheel --dist-dir=vllm-dist --py-limited-api=cp38 \
&& sccache --show-stats; \
fi
ENV CCACHE_DIR=/root/.cache/ccache
RUN --mount=type=cache,target=/root/.cache/ccache \
--mount=type=cache,target=/root/.cache/uv \
--mount=type=bind,source=.git,target=.git \
if [ "$USE_SCCACHE" != "1" ]; then \
# Clean any existing CMake artifacts
rm -rf .deps && \
mkdir -p .deps && \
python3 setup.py bdist_wheel --dist-dir=vllm-dist --py-limited-api=cp38; \
fi
RUN echo "[DEBUG] Listing current directory:" && \
ls -al && \
echo "[DEBUG] Showing torch_build_versions.txt content:" && \
cat torch_build_versions.txt
#################### WHEEL BUILD IMAGE ####################
################### VLLM INSTALLED IMAGE ####################
# Setup clean environment for vLLM for test and api server using ubuntu22.04 with AOT flashinfer
FROM ${FINAL_BASE_IMAGE} AS vllm-base
USER root
# prepare for environment starts
WORKDIR /workspace
RUN PYTHON_VERSION_STR=$(echo ${PYTHON_VERSION} | sed 's/\.//g') && \
echo "export PYTHON_VERSION_STR=${PYTHON_VERSION_STR}" >> /etc/environment
# Install Python and other dependencies if it does not existed
RUN if ! command -v python3 >/dev/null || ! python3 --version | grep -q "${PYTHON_VERSION}"; then \
echo "Installing Python ${PYTHON_VERSION}..." && \
echo 'tzdata tzdata/Areas select America' | debconf-set-selections && \
echo 'tzdata tzdata/Zones/America select Los_Angeles' | debconf-set-selections && \
apt-get update -y && \
apt-get install -y ccache software-properties-common git curl sudo && \
for i in 1 2 3; do \
add-apt-repository -y ppa:deadsnakes/ppa && break || \
{ echo "Attempt $i failed, retrying in 5s..."; sleep 5; }; \
done && \
apt-get update -y && \
apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv && \
update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 && \
update-alternatives --set python3 /usr/bin/python${PYTHON_VERSION} && \
ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config && \
curl -sS https://bootstrap.pypa.io/get-pip.py | python${PYTHON_VERSION}; \
else \
echo "Python ${PYTHON_VERSION} already present, skipping setup."; \
fi \
&& python3 --version && python3 -m pip --version
# Get the torch versions, and whls used in previous stagtes for consistency
COPY --from=base /workspace/torch_build_versions.txt ./torch_build_versions.txt
COPY --from=base /workspace/xformers-dist /wheels/xformers
COPY --from=build /workspace/vllm-dist /wheels/vllm
RUN echo "[DEBUG] Listing current directory before torch install step:" && \
ls -al && \
echo "[DEBUG] Showing torch_build_versions.txt content:" && \
cat torch_build_versions.txt
# Workaround for https://github.com/openai/triton/issues/2507 and
# https://github.com/pytorch/pytorch/issues/107960 -- hopefully
# this won't be needed for future versions of this docker image
# or future versions of triton.
RUN ldconfig /usr/local/cuda-$(echo $CUDA_VERSION | cut -d. -f1,2)/compat/
# Install uv for faster pip installs if not existed
RUN --mount=type=cache,target=/root/.cache/uv \
if ! python3 -m uv --version > /dev/null 2>&1; then \
python3 -m pip install uv==0.8.4; \
fi
ENV UV_HTTP_TIMEOUT=500
ENV UV_INDEX_STRATEGY="unsafe-best-match"
# Default mount file as placeholder, this just avoid the mount error
ARG TORCH_WHEELS_PATH="./requirements"
# Install torch, torchaudio and torchvision
# if TORCH_WHEELS_PATH is default "./requirements", it will pull the nightly versions using pip using torch_build_versions.txt
# otherwise, it will use the whls from TORCH_WHEELS_PATH from the host machine
RUN --mount=type=bind,source=${TORCH_WHEELS_PATH},target=/dist \
--mount=type=cache,target=/root/.cache/uv \
if [ -n "$TORCH_WHEELS_PATH" ] && [ "$TORCH_WHEELS_PATH" != "./requirements" ] && [ -d "/dist" ] && ls /dist/torch*.whl >/dev/null 2>&1; then \
torch_whl=$(find /dist -maxdepth 1 -name 'torch-*.whl' -print -quit); \
vision_whl=$(find /dist/vision -name 'torchvision*.whl' | head -n1 | xargs); \
audio_whl=$(find /dist/audio -name 'torchaudio*.whl' | head -n1 | xargs); \
echo "Found: '${torch_whl}' '${audio_whl}' '${vision_whl}'"; \
uv pip install --system "${torch_whl}[opt-einsum]"; \
uv pip install --system "${vision_whl}"; \
uv pip install --system "${audio_whl}"; \
else \
echo "[INFO] Installing torch versions from torch_build_versions.txt"; \
uv pip install --system $(cat torch_build_versions.txt | xargs) --index-url https://download.pytorch.org/whl/nightly/cu128; \
fi
# Install the vllm wheel from previous stage
RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --system /wheels/vllm/*.whl --verbose
# Install xformers wheel from previous stage
RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --system /wheels/xformers/*.whl --verbose
# Build flashinfer from source.
ARG torch_cuda_arch_list='8.0;8.9;9.0a'
# install package for build flashinfer
# see issue: https://github.com/flashinfer-ai/flashinfer/issues/738
RUN pip install build==1.3.0
RUN pip freeze | grep -E 'setuptools|packaging|build'
ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list}
# Build flashinfer for torch nightly from source around 10 mins
ARG FLASHINFER_GIT_REPO="https://github.com/flashinfer-ai/flashinfer.git"
# Keep this in sync with https://github.com/vllm-project/vllm/blob/main/requirements/cuda.txt
ARG FLASHINFER_GIT_REF="v0.2.9rc2"
RUN --mount=type=cache,target=/root/.cache/uv \
git clone --depth 1 --recursive --shallow-submodules \
--branch ${FLASHINFER_GIT_REF} \
${FLASHINFER_GIT_REPO} flashinfer \
&& echo "Building FlashInfer with AOT for arches: ${torch_cuda_arch_list}" \
&& cd flashinfer \
&& python3 -m flashinfer.aot \
&& python3 -m build --no-isolation --wheel --outdir ../wheels/flashinfer \
&& cd .. \
&& rm -rf flashinfer
# install flashinfer python
RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --system wheels/flashinfer/*.whl --verbose
# Logging to confirm the torch versions
RUN pip freeze | grep -E 'torch|xformers|vllm|flashinfer'
################### VLLM INSTALLED IMAGE ####################
#################### UNITTEST IMAGE #############################
FROM vllm-base as test
ENV UV_HTTP_TIMEOUT=500
ENV UV_INDEX_STRATEGY="unsafe-best-match"
COPY tests/ tests/
COPY examples examples
COPY benchmarks benchmarks
COPY ./vllm/collect_env.py .
COPY requirements/common.txt requirements/common.txt
COPY use_existing_torch.py use_existing_torch.py
COPY pyproject.toml pyproject.toml
# Install build and runtime dependencies without stable torch version
COPY requirements/nightly_torch_test.txt requirements/nightly_torch_test.txt
RUN python3 use_existing_torch.py
# install packages
RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --system -r requirements/common.txt
# enable fast downloads from hf (for testing)
RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --system hf_transfer
ENV HF_HUB_ENABLE_HF_TRANSFER 1
# install development dependencies (for testing)
RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --system -e tests/vllm_test_utils
RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --system -r requirements/nightly_torch_test.txt
# Workaround for #17068
# pinned commit for v2.2.4
RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --system --no-build-isolation "git+https://github.com/state-spaces/mamba@95d8aba8a8c75aedcaa6143713b11e745e7cd0d9#egg=mamba-ssm"
# Logging to confirm the torch versions
RUN pip freeze | grep -E 'torch|xformers|vllm|flashinfer'
# Logging to confirm all the packages are installed
RUN pip freeze
#################### UNITTEST IMAGE #############################
#################### EXPORT STAGE ####################
FROM scratch as export-wheels
# Just copy the wheels we prepared in previous stages
COPY --from=base /workspace/xformers-dist /wheels/xformers
COPY --from=build /workspace/vllm-dist /wheels/vllm
COPY --from=vllm-base /workspace/wheels/flashinfer /wheels/flashinfer-python

View File

@ -26,6 +26,7 @@ ciflow_push_tags:
- ciflow/trunk
- ciflow/unstable
- ciflow/xpu
- ciflow/vllm
- ciflow/torchbench
- ciflow/op-benchmark
- ciflow/pull

View File

@ -0,0 +1,292 @@
name: linux-external-build
on:
workflow_call:
inputs:
build-environment:
required: true
type: string
description: Top-level label for what's being built/tested.
build-target:
required: true
type: string
description: target library to build
build-generates-artifacts:
required: false
type: boolean
default: true
description: If set, upload generated build artifacts.
artifacts-folder-name:
required: false
type: string
description: must be different from build-environment
default: ""
docker-image:
required: true
type: string
description: Docker image to run in or replace the external base image.
cuda-arch-list:
required: false
type: string
default: "8.9"
description: |
List of CUDA architectures CI build should target.
runner_prefix:
required: false
default: ""
type: string
description: Prefix for runner label
runner:
required: false
type: string
default: "linux.2xlarge"
description: |
Label of the runner this job should run on.
s3-bucket:
description: S3 bucket to download artifact
required: false
type: string
default: "gha-artifacts"
use-gha:
required: false
type: string
default: ""
description: f set to any value, use GHA to download the artifact. Otherwise use s3.
aws-role-to-assume:
description: Role to assume for downloading artifacts
required: false
type: string
default: ""
disable-monitor:
description: |
Disable utilization monitoring for build job
required: false
type: boolean
default: false
monitor-log-interval:
description: |
Set the interval for the monitor script to log utilization.
required: false
type: number
default: 5
monitor-data-collect-interval:
description: |
Set the interval for the monitor script to collect data.
required: false
type: number
default: 1
secrets:
HUGGING_FACE_HUB_TOKEN:
required: false
description: |
HF Auth token to avoid rate limits when downloading models or datasets from hub
SCRIBE_GRAPHQL_ACCESS_TOKEN:
required: false
description: |
FB app token to write to scribe endpoint
jobs:
build-external-lib:
environment: ${{ github.ref == 'refs/heads/main' && 'scribe-protected' || startsWith(github.ref, 'refs/heads/release/') && 'scribe-protected' || contains(github.event.pull_request.labels.*.name, 'ci-scribe') && 'scribe-pr' || '' }}
# Don't run on forked repos
if: github.repository_owner == 'pytorch'
runs-on: ${{ inputs.runner_prefix}}${{ inputs.runner }}
timeout-minutes: 240
steps:
- name: Setup SSH (Click me for login details)
uses: pytorch/test-infra/.github/actions/setup-ssh@main
with:
github-secret: ${{ secrets.GITHUB_TOKEN }}
instructions: |
Build is done inside the container, to start an interactive session run:
docker exec -it $(docker container ps --format '{{.ID}}') bash
# [pytorch repo ref]
# Use a pytorch/pytorch reference instead of a reference to the local
# checkout because when we run this action we don't *have* a local
# checkout. In other cases you should prefer a local checkout.
- name: Checkout PyTorch
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
with:
no-sudo: true
- name: Get workflow job id
id: get-job-id
uses: ./.github/actions/get-workflow-job-id
if: always()
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
- name: configure aws credentials
uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722 # v4.1.0
if: ${{ inputs.aws-role-to-assume != ''}}
with:
role-to-assume: ${{ inputs.aws-role-to-assume }}
role-session-name: gha-linux-build
aws-region: us-east-1
- name: Setup Linux
uses: ./.github/actions/setup-linux
- name: Login to Amazon ECR
if: ${{ inputs.aws-role-to-assume != ''}}
id: login-ecr
continue-on-error: true
uses: aws-actions/amazon-ecr-login@062b18b96a7aff071d4dc91bc00c4c1a7945b076 # v2.0.1
- name: Parse ref
id: parse-ref
run: .github/scripts/parse_ref.py
- name: Start monitoring script
id: monitor-script
if: ${{ !inputs.disable-monitor }}
shell: bash
continue-on-error: true
env:
JOB_ID: ${{ steps.get-job-id.outputs.job-id }}
JOB_NAME: ${{ steps.get-job-id.outputs.job-name }}
WORKFLOW_NAME: ${{ github.workflow }}
WORKFLOW_RUN_ID: ${{github.run_id}}
MONITOR_LOG_INTERVAL: ${{ inputs.monitor-log-interval }}
MONITOR_DATA_COLLECT_INTERVAL: ${{ inputs.monitor-data-collect-interval }}
run: |
mkdir -p ../../usage_logs
python3 -m pip install psutil==5.9.8 dataclasses_json==0.6.7
python3 -m tools.stats.monitor \
--log-interval "$MONITOR_LOG_INTERVAL" \
--data-collect-interval "$MONITOR_DATA_COLLECT_INTERVAL" \
> "../../usage_logs/usage_log_build_${JOB_ID}.txt" 2>&1 &
echo "monitor-script-pid=${!}" >> "${GITHUB_OUTPUT}"
- name: Calculate docker image
id: calculate-docker-image
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
with:
docker-image-name: ${{ inputs.docker-image }}
- name: Use following to pull public copy of the image
id: print-ghcr-mirror
env:
ECR_DOCKER_IMAGE: ${{ steps.calculate-docker-image.outputs.docker-image }}
shell: bash
run: |
tag=${ECR_DOCKER_IMAGE##*:}
echo "docker pull ghcr.io/pytorch/ci-image:${tag/:/-}"
- name: Pull docker image
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
with:
docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
- name: Download pytorch build artifacts
uses: ./.github/actions/download-build-artifacts
with:
name: ${{ inputs.build-environment }}
s3-bucket: ${{ inputs.s3-bucket }}
use-gha: ${{ inputs.use-gha }}
- name: Download TD artifacts
continue-on-error: true
uses: ./.github/actions/download-td-artifacts
- name: Build external project
id: build
env:
BUILD_ENVIRONMENT: ${{ inputs.build-environment }}
BRANCH: ${{ steps.parse-ref.outputs.branch }}
PR_NUMBER: ${{ github.event.pull_request.number }}
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
# Do not set SCCACHE_S3_KEY_PREFIX to share the cache between all build jobs
SCCACHE_BUCKET: ossci-compiler-cache-circleci-v2
SCCACHE_REGION: us-east-1
PR_LABELS: ${{ toJson(github.event.pull_request.labels.*.name) }}
TORCH_CUDA_ARCH_LIST: ${{ inputs.cuda-arch-list }}
OUR_GITHUB_JOB_ID: ${{ steps.get-job-id.outputs.job-id }}
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
BASE_IMAGE: ${{ inputs.docker-image }}
BUILD_TARGET: ${{ inputs.build-target }}
run: |
set -euo pipefail
python3 --version
docker images
START_TIME=$(date +%s)
(
cd .ci/lumen_cli
python3 -m pip install -e .
)
MAX_JOBS="$(nproc --ignore=6)"
export MAX_JOBS
python3 -m cli.run build external "$BUILD_TARGET"
END_TIME=$(date +%s)
echo "build_time=$((END_TIME - START_TIME))" >> "$GITHUB_OUTPUT"
- name: Archive artifacts into zip
if: ${{ inputs.build-generates-artifacts && steps.build.outcome && steps.build.outcome != 'skipped'}}
run: |
zip -1 -r artifacts.zip shared/
# By default it will upload the artifacts to <github_org>/<github_repo>/<workflow_id>/<name>-<target>-additional-build/
# to avoid override the pytorch build artifacts
- name: Store External Build Artifacts on S3
if: ${{ inputs.build-generates-artifacts }}
uses: seemethere/upload-artifact-s3@baba72d0712b404f646cebe0730933554ebce96a # v5.1.0
with:
name: ${{ inputs.artifacts-folder-name || format('{0}-{1}-additional-build', inputs.build-environment, inputs.build-target) }}
retention-days: 14
if-no-files-found: warn
path: artifacts.zip
s3-bucket: ${{ inputs.s3-bucket }}
- name: Stop monitoring script
if: ${{ always() && steps.monitor-script.outputs.monitor-script-pid }}
shell: bash
continue-on-error: true
env:
MONITOR_SCRIPT_PID: ${{ steps.monitor-script.outputs.monitor-script-pid }}
run: |
kill "$MONITOR_SCRIPT_PID"
- name: Copy logs
shell: bash
if: ${{ always() && steps.build.outcome != 'skipped' && !inputs.disable-monitor}}
continue-on-error: true
run: |
rm -f ./usage_logs
mkdir -p ./usage_logs
cp ../../usage_logs/usage_log_build_*.txt ./usage_logs/
- name: Upload raw usage log to s3
if: ${{ always() && steps.build.outcome != 'skipped' && !inputs.disable-monitor}}
uses: seemethere/upload-artifact-s3@v5
with:
s3-prefix: |
${{ github.repository }}/${{ github.run_id }}/${{ github.run_attempt }}/artifact
retention-days: 14
if-no-files-found: warn
path: usage_logs/usage_log_build_*.txt
- name: Upload utilization stats
if: ${{ always() && steps.build.outcome != 'skipped' && !inputs.disable-monitor }}
continue-on-error: true
uses: ./.github/actions/upload-utilization-stats
with:
job_id: ${{ steps.get-job-id.outputs.job-id }}
job_name: ${{ steps.get-job-id.outputs.job-name }}
workflow_name: ${{ github.workflow }}
workflow_run_id: ${{github.run_id}}
workflow_attempt: ${{github.run_attempt}}
artifact_prefix: usage_log_build_${{ steps.get-job-id.outputs.job-id }}
- name: Teardown Linux
uses: pytorch/test-infra/.github/actions/teardown-linux@main
if: always()
- name: Cleanup docker
if: always()
shell: bash
run: |
docker stop -a || true
docker kill -a || true

View File

@ -47,6 +47,12 @@ on:
required: false
type: string
default: ""
additional-artifact-name:
description: |
additional artifacts needed to be downloaded for testing
required: false
type: string
default: ""
disable-monitor:
description: |
[Experimental] Disable utilization monitoring for tests.
@ -72,6 +78,10 @@ on:
required: false
description: |
HF Auth token to avoid rate limits when downloading models or datasets from hub
VLLM_TEST_HUGGING_FACE_TOKEN:
required: false
description: |
HF Auth token to test vllm
SCRIBE_GRAPHQL_ACCESS_TOKEN:
required: false
description: |
@ -216,6 +226,14 @@ jobs:
s3-bucket: ${{ inputs.s3-bucket }}
use-gha: ${{ inputs.use-gha }}
- name: Download additional build artifacts
if: ${{ inputs.additional-artifact-name != ''}}
uses: ./.github/actions/download-build-artifacts
with:
name: ${{ inputs.additional-artifact-name }}
s3-bucket: ${{ inputs.s3-bucket }}
use-gha: ${{ inputs.use-gha }}
- name: Download TD artifacts
continue-on-error: true
uses: ./.github/actions/download-td-artifacts
@ -286,6 +304,7 @@ jobs:
PYTORCH_TEST_CUDA_MEM_LEAK_CHECK: ${{ matrix.mem_leak_check && '1' || '0' }}
PYTORCH_TEST_RERUN_DISABLED_TESTS: ${{ matrix.rerun_disabled_tests && '1' || '0' }}
DASHBOARD_TAG: ${{ inputs.dashboard-tag }}
VLLM_TEST_HUGGING_FACE_TOKEN: ${{ secrets.VLLM_TEST_HUGGING_FACE_TOKEN }}
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }}
ARTIFACTS_FILE_SUFFIX: ${{ github.job }}-${{ matrix.config }}-${{ matrix.shard }}-${{ matrix.num_shards }}-${{ matrix.runner }}_${{ steps.get-job-id.outputs.job-id }}
@ -362,6 +381,7 @@ jobs:
-e PYTORCH_TEST_RERUN_DISABLED_TESTS \
-e SKIP_SCCACHE_INITIALIZATION=1 \
-e HUGGING_FACE_HUB_TOKEN \
-e VLLM_TEST_HUGGING_FACE_TOKEN \
-e SCRIBE_GRAPHQL_ACCESS_TOKEN \
-e DASHBOARD_TAG \
-e ARTIFACTS_FILE_SUFFIX \

70
.github/workflows/tools-unit-tests.yml vendored Normal file
View File

@ -0,0 +1,70 @@
name: test-scripts-and-ci-tools
on:
push:
branches:
- main
paths:
- scripts/lumen_cli/**
- .github/workflows/tools-unit-tests.yml
pull_request:
paths:
- scripts/lumen_cli/**
- .github/workflows/tools-unit-tests.yml
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
cancel-in-progress: true
jobs:
lumen-cli-unit-tests-python312:
permissions:
contents: read
pull-requests: write
if: ${{ github.repository_owner == 'pytorch' }}
runs-on: ubuntu-latest
steps:
- name: Checkout pytorch
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
with:
submodules: true
fetch-depth: 0
- name: Setup Python
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: '3.12'
cache: pip
- name: Run tests
continue-on-error: true
run: |
set -ex
python3 -m venv /tmp/venv
source /tmp/venv/bin/activate
pip install -e .ci/lumen_cli/
pytest -v -s .ci/lumen_cli/tests/*
lumen-cli-compatible-python39:
permissions:
contents: read
pull-requests: write
if: ${{ github.repository_owner == 'pytorch' }}
runs-on: ubuntu-latest
steps:
- name: Checkout pytorch
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
with:
submodules: true
fetch-depth: 0
- name: Setup Python
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: '3.9'
cache: 'pip'
- name: Run tests
continue-on-error: true
run: |
set -ex
python3 -m venv /tmp/venv
source /tmp/venv/bin/activate
pip install -e .ci/lumen_cli/

123
.github/workflows/vllm.yml vendored Normal file
View File

@ -0,0 +1,123 @@
name: vllm-test
on:
push:
tags:
- ciflow/vllm/*
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
cancel-in-progress: true
permissions:
id-token: write
contents: read
jobs:
get-label-type:
name: get-label-type
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }}
with:
triggering_actor: ${{ github.triggering_actor }}
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
curr_branch: ${{ github.head_ref || github.ref_name }}
curr_ref_type: ${{ github.ref_type }}
opt_out_experiments: lf
torch-build-sm89:
name: ci-vllm-test-sm89
uses: ./.github/workflows/_linux-build.yml
needs: get-label-type
with:
build-additional-packages: "vision audio torchao"
build-environment: linux-jammy-cuda12.8-py3.12-gcc11-sm89
docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3.12-gcc11-vllm
cuda-arch-list: '8.9'
test-matrix: |
{ include: [
{ config: "vllm_basic_correctness_test", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g6.4xlarge.experimental.nvidia.gpu" },
{ config: "vllm_basic_models_test", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g6.4xlarge.experimental.nvidia.gpu" },
{ config: "vllm_regression_test", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g6.4xlarge.experimental.nvidia.gpu" },
{ config: "vllm_entrypoints_test", shard: 1, num_shards: 1,runner: "${{ needs.get-label-type.outputs.label-type }}linux.g6.4xlarge.experimental.nvidia.gpu" },
]}
secrets: inherit
torch-build-sm80:
name: ci-vllm-test-sm80
uses: ./.github/workflows/_linux-build.yml
needs: get-label-type
with:
build-additional-packages: "vision audio torchao"
build-environment: linux-jammy-cuda12.8-py3.12-gcc11-sm80
docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3.12-gcc11-vllm
cuda-arch-list: '8.0'
test-matrix: |
{ include: [
{ config: "vllm_basic_correctness_test", shard: 1, num_shards: 1, runner: "linux.aws.a100" },
{ config: "vllm_basic_models_test", shard: 1, num_shards: 1, runner: "linux.aws.a100" },
{ config: "vllm_regression_test", shard: 1, num_shards: 1, runner: "linux.aws.a100" },
{ config: "vllm_entrypoints_test", shard: 1, num_shards: 1, runner: "linux.aws.a100" },
]}
secrets: inherit
vllm-build-sm89:
name: ci-vllm-test-sm89
uses: ./.github/workflows/_linux-external-build-main.yml
needs: [
get-label-type,
torch-build-sm89
]
with:
build-environment: linux-jammy-cuda12.8-py3.12-gcc11-sm89
build-target: vllm
runner_prefix: ${{ needs.get-label-type.outputs.label-type }}
docker-image: ${{ needs.torch-build-sm89.outputs.docker-image }}
cuda-arch-list: '8.9'
runner: linux.24xlarge.memory
secrets: inherit
vllm-test-sm89:
name: ci-vllm-test-sm89
uses: ./.github/workflows/_linux-test.yml
needs: [
torch-build-sm89,
vllm-build-sm89
]
with:
build-environment: linux-jammy-cuda12.8-py3.12-gcc11-sm89
docker-image: ${{ needs.torch-build-sm89.outputs.docker-image }}
test-matrix: ${{ needs.torch-build-sm89.outputs.test-matrix }}
additional-artifact-name: linux-jammy-cuda12.8-py3.12-gcc11-sm89-vllm-additional-build
secrets: inherit
vllm-build-sm80:
name: ci-vllm-test-sm80
uses: ./.github/workflows/_linux-external-build-main.yml
needs: [
get-label-type,
torch-build-sm80
]
with:
build-environment: linux-jammy-cuda12.8-py3.12-gcc11-sm80
build-target: vllm
runner_prefix: ${{ needs.get-label-type.outputs.label-type }}
docker-image: ${{ needs.torch-build-sm80.outputs.docker-image }}
runner: linux.24xlarge.memory
cuda-arch-list: '8.0'
secrets: inherit
vllm-test-sm80:
name: ci-vllm-test-sm80
uses: ./.github/workflows/_linux-test.yml
needs: [
torch-build-sm80,
vllm-build-sm80
]
with:
build-environment: linux-jammy-cuda12.8-py3.12-gcc11-sm80
docker-image: ${{ needs.torch-build-sm80.outputs.docker-image }}
test-matrix: ${{ needs.torch-build-sm80.outputs.test-matrix }}
additional-artifact-name: linux-jammy-cuda12.8-py3.12-gcc11-sm80-vllm-additional-build
secrets: inherit