mirror of
https://github.com/huggingface/kernels.git
synced 2025-10-20 20:46:42 +08:00
CI: pure GitHub actions (no Docker) (#12)
This commit is contained in:
119
.github/workflows/docker-build-matrix.yml
vendored
119
.github/workflows/docker-build-matrix.yml
vendored
@ -1,119 +0,0 @@
|
||||
name: Docker Build Matrix
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
types: [opened, synchronize, reopened] # trigger on PRs
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build Docker Image
|
||||
runs-on:
|
||||
group: aws-g6-24xlarge
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
strategy:
|
||||
max-parallel: 4
|
||||
matrix:
|
||||
# python: ["3.10", "3.11", "3.12"]
|
||||
# ubuntu: ["18.04", "20.04", "22.04"]
|
||||
# cuda: ["11.8.0", "12.1.0", "12.2.0", "12.4.0", "12.6.0"]
|
||||
# torch: ["2.4.0", "2.5.0"]
|
||||
include:
|
||||
- ubuntu: "18.04"
|
||||
cuda: "11.8.0"
|
||||
torch: "2.4.0"
|
||||
python: "3.10"
|
||||
- ubuntu: "22.04"
|
||||
cuda: "12.4.0"
|
||||
torch: "2.5.1"
|
||||
python: "3.12"
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Generate Docker metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ghcr.io/${{ github.repository }}/hf_kernels
|
||||
tags: |
|
||||
type=raw,value=${{ matrix.cuda }}-${{ matrix.torch }}-python${{ matrix.python }}-ubuntu${{ matrix.ubuntu }}
|
||||
type=sha,prefix=${{ matrix.cuda }}-${{ matrix.torch }}-python${{ matrix.python }}-ubuntu${{ matrix.ubuntu }}-
|
||||
|
||||
- name: Build Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: docker/Dockerfile
|
||||
platforms: linux/amd64
|
||||
build-args: |
|
||||
PYTHON_VERSION=${{ matrix.python }}
|
||||
UBUNTU_VERSION=${{ matrix.ubuntu }}
|
||||
CUDA_VERSION=${{ matrix.cuda }}
|
||||
TORCH_VERSION=${{ matrix.torch }}
|
||||
push: false
|
||||
load: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=gha,name=hf-kernels-cache-${{ matrix.ubuntu }}-${{ matrix.python }}-${{ matrix.cuda }}-${{ matrix.torch }}
|
||||
cache-to: type=gha,name=hf-kernels-cache-${{ matrix.ubuntu }}-${{ matrix.python }}-${{ matrix.cuda }}-${{ matrix.torch }}
|
||||
|
||||
- name: Save Docker image
|
||||
run: |
|
||||
IMAGE_TAG="${{ steps.meta.outputs.tags }}"
|
||||
# Get the first tag if multiple tags are present
|
||||
FIRST_TAG=$(echo "$IMAGE_TAG" | head -n 1)
|
||||
docker save -o /tmp/docker-image-${{ matrix.cuda }}-${{ matrix.torch }}-python${{ matrix.python }}-ubuntu${{ matrix.ubuntu }}.tar "$FIRST_TAG"
|
||||
|
||||
# Note: recommended to upload images via artifacts to share acrross jobs
|
||||
# https://docs.docker.com/build/ci/github-actions/share-image-jobs/
|
||||
- name: Upload Docker image artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: docker-image-${{ matrix.cuda }}-${{ matrix.torch }}-python${{ matrix.python }}-ubuntu${{ matrix.ubuntu }}
|
||||
path: /tmp/docker-image-${{ matrix.cuda }}-${{ matrix.torch }}-python${{ matrix.python }}-ubuntu${{ matrix.ubuntu }}.tar
|
||||
retention-days: 1
|
||||
|
||||
test:
|
||||
needs: build
|
||||
name: Test Docker Images
|
||||
runs-on:
|
||||
group: aws-g6-12xlarge-plus
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Download all Docker images
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
pattern: docker-image-*
|
||||
path: /tmp
|
||||
merge-multiple: true
|
||||
|
||||
- name: Load and test Docker images
|
||||
run: |
|
||||
for image_tar in /tmp/docker-image-*.tar; do
|
||||
echo "Processing image $image_tar"
|
||||
# Extract the version tag from the filename without the 'docker-image-' prefix
|
||||
docker_tag=$(basename $image_tar .tar | sed 's/^docker-image-//')
|
||||
echo "Loading image with tag $docker_tag"
|
||||
docker load -i $image_tar
|
||||
echo "Loaded image $docker_tag"
|
||||
docker run --gpus all \
|
||||
-v /home/runner/_work/hf-kernels/hf-kernels/tests:/workspace/tests \
|
||||
ghcr.io/huggingface/hf-kernels/hf_kernels:$docker_tag
|
||||
echo "Tested image $docker_tag"
|
||||
done
|
42
.github/workflows/test.yml
vendored
Normal file
42
.github/workflows/test.yml
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
name: Test hf-kernels
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
types: [opened, synchronize, reopened] # trigger on PRs
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Run tests
|
||||
runs-on:
|
||||
group: aws-g6-24xlarge
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
strategy:
|
||||
max-parallel: 4
|
||||
matrix:
|
||||
python: ["3.10", "3.12"]
|
||||
torch: ["2.4.0", "2.5.0"]
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install uv and set the python version
|
||||
uses: astral-sh/setup-uv@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Install the project
|
||||
run: uv sync --all-extras --dev
|
||||
|
||||
- name: Run tests
|
||||
run: uv run pytest tests
|
@ -1,84 +0,0 @@
|
||||
# syntax=docker/dockerfile:1.4
|
||||
ARG PYTHON_VERSION=3.10
|
||||
ARG CUDA_VERSION=12.4.0
|
||||
ARG UBUNTU_VERSION=20.04
|
||||
ARG TORCH_VERSION=2.5.0
|
||||
|
||||
FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION} as base
|
||||
|
||||
# Set environment variables
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
PYTHONUNBUFFERED=1 \
|
||||
PATH="/root/.local/bin:/root/.cargo/bin:${PATH}" \
|
||||
NVIDIA_VISIBLE_DEVICES=all \
|
||||
NVIDIA_DRIVER_CAPABILITIES=compute,utility
|
||||
|
||||
# Install system dependencies
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
git \
|
||||
git-lfs \
|
||||
curl \
|
||||
python3 \
|
||||
python3-pip \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& git lfs install
|
||||
|
||||
# Install uv package manager
|
||||
RUN curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Need to re-declare ARG after FROM for use in RUN
|
||||
ARG CUDA_VERSION
|
||||
ARG TORCH_VERSION
|
||||
ARG PYTHON_VERSION
|
||||
|
||||
RUN echo "Building with CUDA_VERSION=${CUDA_VERSION}, TORCH_VERSION=${TORCH_VERSION}, PYTHON_VERSION=${PYTHON_VERSION}"
|
||||
|
||||
# Install requested Python version
|
||||
RUN uv python install "${PYTHON_VERSION}"
|
||||
|
||||
# Initialize uv and create virtual env
|
||||
RUN uv init --app kernel-test --python "${PYTHON_VERSION}"
|
||||
|
||||
# Move into the app
|
||||
WORKDIR /app/kernel-test
|
||||
|
||||
# Install PyTorch with the appropriate CUDA version
|
||||
|
||||
# NOTE: `markupsafe` must be installed first to avoid a conflict with the torch package.
|
||||
# See: https://github.com/astral-sh/uv/issues/9647
|
||||
|
||||
RUN CUDA_MAJOR_MINOR=$(echo ${CUDA_VERSION} | cut -d'.' -f1,2) && \
|
||||
case ${CUDA_MAJOR_MINOR} in \
|
||||
"11.8") CUDA_TAG="cu118" ;; \
|
||||
"12.1") CUDA_TAG="cu121" ;; \
|
||||
"12.2") CUDA_TAG="cu122" ;; \
|
||||
"12.4") CUDA_TAG="cu124" ;; \
|
||||
*) CUDA_TAG="" ;; \
|
||||
esac && \
|
||||
if [ -n "${CUDA_TAG}" ]; then \
|
||||
echo "Installing PyTorch ${TORCH_VERSION} with CUDA ${CUDA_TAG}" && \
|
||||
uv add markupsafe --default-index "https://pypi.org/simple" && \
|
||||
uv add "torch==${TORCH_VERSION}" --index-url "https://download.pytorch.org/whl/${CUDA_TAG}"; \
|
||||
else \
|
||||
echo "Installing PyTorch ${TORCH_VERSION} without CUDA-specific index" && \
|
||||
uv add "torch==${TORCH_VERSION}"; \
|
||||
fi
|
||||
|
||||
# add pytest for runtime tests
|
||||
RUN uv add pytest pytest-benchmark huggingface_hub
|
||||
|
||||
# Copy application files
|
||||
COPY src ./hf_kernels/src
|
||||
COPY pyproject.toml ./hf_kernels/pyproject.toml
|
||||
COPY README.md ./hf_kernels/README.md
|
||||
COPY examples ./examples
|
||||
COPY tests ./tests
|
||||
|
||||
# Install the kernel library
|
||||
RUN uv pip install ./hf_kernels
|
||||
|
||||
# Run tests and benchmarks
|
||||
CMD [".venv/bin/pytest", "tests", "-v"]
|
@ -9,6 +9,24 @@ authors = [
|
||||
{ name = "Nicolas Patry", email = "nicolas@huggingface.co" },
|
||||
]
|
||||
readme = "README.md"
|
||||
requires-python = ">= 3.8"
|
||||
dependencies = [
|
||||
"huggingface-hub>=0.26.3",
|
||||
"packaging>=24.2",
|
||||
"tomli>=2.0.1; python_version<'3.11'",
|
||||
"torch>=2.4",
|
||||
]
|
||||
|
||||
[build-system]
|
||||
requires = ["setuptools"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[dependency-groups]
|
||||
dev = [
|
||||
"pytest >=8",
|
||||
# Whatever version is compatible with pytest.
|
||||
"pytest-benchmark",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
hf-kernels = "hf_kernels.cli:main"
|
||||
@ -16,12 +34,6 @@ hf-kernels = "hf_kernels.cli:main"
|
||||
[project.entry-points."egg_info.writers"]
|
||||
"hf-kernels.lock" = "hf_kernels.lockfile:write_egg_lockfile"
|
||||
|
||||
[dependencies]
|
||||
python = "^3.9"
|
||||
huggingface-hub = "^0.26.3"
|
||||
packaging = "^24.2"
|
||||
tomli = { version = "^2.0.1", python = "<3.11" }
|
||||
|
||||
#[build-system]
|
||||
#requires = ["torch", "huggingface_hub", "numpy", "tomli;python_version<='3.10'"]
|
||||
#build-backend = "hf_kernels.build"
|
||||
|
Reference in New Issue
Block a user