mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-24 15:44:58 +08:00
Compare commits
6 Commits
lucaskabel
...
benchmarki
| Author | SHA1 | Date | |
|---|---|---|---|
| 0f49e915a9 | |||
| 2f1217f944 | |||
| e0bf01e87b | |||
| 3b5ae0e9fc | |||
| 5f5f654a3e | |||
| 21931cbbc6 |
2
.bazelrc
2
.bazelrc
@ -2,7 +2,7 @@ build --cxxopt=--std=c++17
|
||||
build --copt=-I.
|
||||
# Bazel does not support including its cc_library targets as system
|
||||
# headers. We work around this for generated code
|
||||
# (e.g. torch/headeronly/macros/cmake_macros.h) by making the generated directory a
|
||||
# (e.g. c10/macros/cmake_macros.h) by making the generated directory a
|
||||
# system include path.
|
||||
build --copt=-isystem --copt bazel-out/k8-fastbuild/bin
|
||||
build --copt=-isystem --copt bazel-out/darwin-fastbuild/bin
|
||||
|
||||
@ -3,8 +3,10 @@ set -eux -o pipefail
|
||||
|
||||
GPU_ARCH_VERSION=${GPU_ARCH_VERSION:-}
|
||||
|
||||
if [[ "$GPU_ARCH_VERSION" == *"12.9"* ]]; then
|
||||
export TORCH_CUDA_ARCH_LIST="8.0;9.0;10.0;12.0"
|
||||
if [[ "$GPU_ARCH_VERSION" == *"12.6"* ]]; then
|
||||
export TORCH_CUDA_ARCH_LIST="9.0"
|
||||
elif [[ "$GPU_ARCH_VERSION" == *"12.8"* ]]; then
|
||||
export TORCH_CUDA_ARCH_LIST="9.0;10.0;12.0"
|
||||
fi
|
||||
|
||||
SCRIPTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
|
||||
@ -25,7 +27,6 @@ if [ "$DESIRED_CUDA" = "cpu" ]; then
|
||||
USE_PRIORITIZED_TEXT_FOR_LD=1 python /pytorch/.ci/aarch64_linux/aarch64_wheel_ci_build.py --enable-mkldnn
|
||||
else
|
||||
echo "BASE_CUDA_VERSION is set to: $DESIRED_CUDA"
|
||||
export USE_SYSTEM_NCCL=1
|
||||
#USE_PRIORITIZED_TEXT_FOR_LD for enable linker script optimization https://github.com/pytorch/pytorch/pull/121975/files
|
||||
USE_PRIORITIZED_TEXT_FOR_LD=1 python /pytorch/.ci/aarch64_linux/aarch64_wheel_ci_build.py --enable-mkldnn --enable-cuda
|
||||
fi
|
||||
|
||||
@ -79,7 +79,6 @@ def package_cuda_wheel(wheel_path, desired_cuda) -> None:
|
||||
os.system(f"unzip {wheel_path} -d {folder}/tmp")
|
||||
libs_to_copy = [
|
||||
"/usr/local/cuda/extras/CUPTI/lib64/libcupti.so.12",
|
||||
"/usr/local/cuda/extras/CUPTI/lib64/libnvperf_host.so",
|
||||
"/usr/local/cuda/lib64/libcudnn.so.9",
|
||||
"/usr/local/cuda/lib64/libcublas.so.12",
|
||||
"/usr/local/cuda/lib64/libcublasLt.so.12",
|
||||
@ -89,7 +88,7 @@ def package_cuda_wheel(wheel_path, desired_cuda) -> None:
|
||||
"/usr/local/cuda/lib64/libcusparseLt.so.0",
|
||||
"/usr/local/cuda/lib64/libcusolver.so.11",
|
||||
"/usr/local/cuda/lib64/libcurand.so.10",
|
||||
"/usr/local/cuda/lib64/libnccl.so.2",
|
||||
"/usr/local/cuda/lib64/libnvToolsExt.so.1",
|
||||
"/usr/local/cuda/lib64/libnvJitLink.so.12",
|
||||
"/usr/local/cuda/lib64/libnvrtc.so.12",
|
||||
"/usr/local/cuda/lib64/libcudnn_adv.so.9",
|
||||
@ -109,9 +108,9 @@ def package_cuda_wheel(wheel_path, desired_cuda) -> None:
|
||||
"/usr/local/lib/libnvpl_blas_core.so.0",
|
||||
]
|
||||
|
||||
if "129" in desired_cuda:
|
||||
if "128" in desired_cuda:
|
||||
libs_to_copy += [
|
||||
"/usr/local/cuda/lib64/libnvrtc-builtins.so.12.9",
|
||||
"/usr/local/cuda/lib64/libnvrtc-builtins.so.12.8",
|
||||
"/usr/local/cuda/lib64/libcufile.so.0",
|
||||
"/usr/local/cuda/lib64/libcufile_rdma.so.1",
|
||||
]
|
||||
|
||||
@ -438,7 +438,9 @@ def build_torchvision(
|
||||
)
|
||||
build_vars += f"BUILD_VERSION={version}.dev{build_date}"
|
||||
elif build_version is not None:
|
||||
build_vars += f"BUILD_VERSION={build_version} PYTORCH_VERSION={branch[1:].split('-', maxsplit=1)[0]}"
|
||||
build_vars += (
|
||||
f"BUILD_VERSION={build_version} PYTORCH_VERSION={branch[1:].split('-')[0]}"
|
||||
)
|
||||
if host.using_docker():
|
||||
build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000"
|
||||
|
||||
@ -493,7 +495,9 @@ def build_torchdata(
|
||||
)
|
||||
build_vars += f"BUILD_VERSION={version}.dev{build_date}"
|
||||
elif build_version is not None:
|
||||
build_vars += f"BUILD_VERSION={build_version} PYTORCH_VERSION={branch[1:].split('-', maxsplit=1)[0]}"
|
||||
build_vars += (
|
||||
f"BUILD_VERSION={build_version} PYTORCH_VERSION={branch[1:].split('-')[0]}"
|
||||
)
|
||||
if host.using_docker():
|
||||
build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000"
|
||||
|
||||
@ -549,7 +553,9 @@ def build_torchtext(
|
||||
)
|
||||
build_vars += f"BUILD_VERSION={version}.dev{build_date}"
|
||||
elif build_version is not None:
|
||||
build_vars += f"BUILD_VERSION={build_version} PYTORCH_VERSION={branch[1:].split('-', maxsplit=1)[0]}"
|
||||
build_vars += (
|
||||
f"BUILD_VERSION={build_version} PYTORCH_VERSION={branch[1:].split('-')[0]}"
|
||||
)
|
||||
if host.using_docker():
|
||||
build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000"
|
||||
|
||||
@ -607,7 +613,9 @@ def build_torchaudio(
|
||||
)
|
||||
build_vars += f"BUILD_VERSION={version}.dev{build_date}"
|
||||
elif build_version is not None:
|
||||
build_vars += f"BUILD_VERSION={build_version} PYTORCH_VERSION={branch[1:].split('-', maxsplit=1)[0]}"
|
||||
build_vars += (
|
||||
f"BUILD_VERSION={build_version} PYTORCH_VERSION={branch[1:].split('-')[0]}"
|
||||
)
|
||||
if host.using_docker():
|
||||
build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000"
|
||||
|
||||
|
||||
@ -5,7 +5,7 @@ source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
|
||||
|
||||
if [[ ${BUILD_ENVIRONMENT} == *onnx* ]]; then
|
||||
pip install click mock tabulate networkx==2.0
|
||||
pip -q install "file:///var/lib/jenkins/workspace/third_party/onnx#egg=onnx"
|
||||
pip -q install --user "file:///var/lib/jenkins/workspace/third_party/onnx#egg=onnx"
|
||||
fi
|
||||
|
||||
# Skip tests in environments where they are not built/applicable
|
||||
@ -147,8 +147,8 @@ export DNNL_MAX_CPU_ISA=AVX2
|
||||
if [[ "${SHARD_NUMBER:-1}" == "1" ]]; then
|
||||
# TODO(sdym@meta.com) remove this when the linked issue resolved.
|
||||
# py is temporary until https://github.com/Teemu/pytest-sugar/issues/241 is fixed
|
||||
pip install py==1.11.0
|
||||
pip install pytest-sugar
|
||||
pip install --user py==1.11.0
|
||||
pip install --user pytest-sugar
|
||||
# NB: Warnings are disabled because they make it harder to see what
|
||||
# the actual erroring test is
|
||||
"$PYTHON" \
|
||||
|
||||
@ -36,104 +36,3 @@ See `build.sh` for valid build environments (it's the giant switch).
|
||||
# Set flags (see build.sh) and build image
|
||||
sudo bash -c 'TRITON=1 ./build.sh pytorch-linux-bionic-py3.8-gcc9 -t myimage:latest
|
||||
```
|
||||
|
||||
## [Guidance] Adding a New Base Docker Image
|
||||
|
||||
### Background
|
||||
|
||||
The base Docker images in directory `.ci/docker/` are built by the `docker-builds.yml` workflow. Those images are used throughout the PyTorch CI/CD pipeline. You should only create or modify a base Docker image if you need specific environment changes or dependencies before building PyTorch on CI.
|
||||
|
||||
1. **Automatic Rebuilding**:
|
||||
- The Docker image building process is triggered automatically when changes are made to files in the `.ci/docker/*` directory
|
||||
- This ensures all images stay up-to-date with the latest dependencies and configurations
|
||||
|
||||
2. **Image Reuse in PyTorch Build Workflows** (example: linux-build):
|
||||
- The images generated by `docker-builds.yml` are reused in `_linux-build.yml` through the `calculate-docker-image` step
|
||||
- The `_linux-build.yml` workflow:
|
||||
- Pulls the Docker image determined by the `calculate-docker-image` step
|
||||
- Runs a Docker container with that image
|
||||
- Executes `.ci/pytorch/build.sh` inside the container to build PyTorch
|
||||
|
||||
3. **Usage in Test Workflows** (example: linux-test):
|
||||
- The same Docker images are also used in `_linux-test.yml` for running tests
|
||||
- The `_linux-test.yml` workflow follows a similar pattern:
|
||||
- It uses the `calculate-docker-image` step to determine which Docker image to use
|
||||
- It pulls the Docker image and runs a container with that image
|
||||
- It installs the wheels from the artifacts generated by PyTorch build jobs
|
||||
- It executes test scripts (like `.ci/pytorch/test.sh` or `.ci/pytorch/multigpu-test.sh`) inside the container
|
||||
|
||||
### Understanding File Purposes
|
||||
|
||||
#### `.ci/docker/build.sh` vs `.ci/pytorch/build.sh`
|
||||
- **`.ci/docker/build.sh`**:
|
||||
- Used for building base Docker images
|
||||
- Executed by the `docker-builds.yml` workflow to pre-build Docker images for CI
|
||||
- Contains configurations for different Docker build environments
|
||||
|
||||
- **`.ci/pytorch/build.sh`**:
|
||||
- Used for building PyTorch inside a Docker container
|
||||
- Called by workflows like `_linux-build.yml` after the Docker container is started
|
||||
- Builds PyTorch wheels and other artifacts
|
||||
|
||||
#### `.ci/docker/ci_commit_pins/` vs `.github/ci_commit_pins`
|
||||
- **`.ci/docker/ci_commit_pins/`**:
|
||||
- Used for pinning dependency versions during base Docker image building
|
||||
- Ensures consistent environments for building PyTorch
|
||||
- Changes here trigger base Docker image rebuilds
|
||||
|
||||
- **`.github/ci_commit_pins`**:
|
||||
- Used for pinning dependency versions during PyTorch building and tests
|
||||
- Ensures consistent dependencies for PyTorch across different builds
|
||||
- Used by build scripts running inside Docker containers
|
||||
|
||||
### Step-by-Step Guide for Adding a New Base Docker Image
|
||||
|
||||
#### 1. Add Pinned Commits (If Applicable)
|
||||
|
||||
We use pinned commits for build stability. The `nightly.yml` workflow checks and updates pinned commits for certain repository dependencies daily.
|
||||
|
||||
If your new Docker image needs a library installed from a specific pinned commit or built from source:
|
||||
|
||||
1. Add the repository you want to track in `nightly.yml` and `merge-rules.yml`
|
||||
2. Add the initial pinned commit in `.ci/docker/ci_commit_pins/`. The text filename should match the one defined in step 1
|
||||
|
||||
#### 2. Configure the Base Docker Image
|
||||
1. **Add new Base Docker image configuration** (if applicable):
|
||||
|
||||
Add the configuration in `.ci/docker/build.sh`. For example:
|
||||
```bash
|
||||
pytorch-linux-jammy-cuda12.8-cudnn9-py3.12-gcc11-new1)
|
||||
CUDA_VERSION=12.8.1
|
||||
ANACONDA_PYTHON_VERSION=3.12
|
||||
GCC_VERSION=11
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
UCX_COMMIT=${_UCX_COMMIT}
|
||||
UCC_COMMIT=${_UCC_COMMIT}
|
||||
TRITON=yes
|
||||
NEW_ARG_1=yes
|
||||
;;
|
||||
```
|
||||
|
||||
2. **Add build arguments to Docker build command**:
|
||||
|
||||
If you're introducing a new argument to the Docker build, make sure to add it in the Docker build step in `.ci/docker/build.sh`:
|
||||
```bash
|
||||
docker build \
|
||||
....
|
||||
--build-arg "NEW_ARG_1=${NEW_ARG_1}"
|
||||
```
|
||||
|
||||
3. **Update Dockerfile logic**:
|
||||
|
||||
Update the Dockerfile to use the new argument. For example, in `ubuntu/Dockerfile`:
|
||||
```dockerfile
|
||||
ARG NEW_ARG_1
|
||||
# Set up environment for NEW_ARG_1
|
||||
RUN if [ -n "${NEW_ARG_1}" ]; then bash ./do_something.sh; fi
|
||||
```
|
||||
|
||||
4. **Add the Docker configuration** in `.github/workflows/docker-builds.yml`:
|
||||
|
||||
The `docker-builds.yml` workflow pre-builds the Docker images whenever changes occur in the `.ci/docker/` directory. This includes the
|
||||
pinned commit updates.
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
ARG CUDA_VERSION=12.6
|
||||
ARG CUDA_VERSION=12.4
|
||||
ARG BASE_TARGET=cuda${CUDA_VERSION}
|
||||
ARG ROCM_IMAGE=rocm/dev-almalinux-8:6.3-complete
|
||||
FROM amd64/almalinux:8.10-20250519 as base
|
||||
@ -52,6 +52,10 @@ ENV CUDA_VERSION=${CUDA_VERSION}
|
||||
# Make things in our path by default
|
||||
ENV PATH=/usr/local/cuda-${CUDA_VERSION}/bin:$PATH
|
||||
|
||||
FROM cuda as cuda11.8
|
||||
RUN bash ./install_cuda.sh 11.8
|
||||
ENV DESIRED_CUDA=11.8
|
||||
|
||||
FROM cuda as cuda12.6
|
||||
RUN bash ./install_cuda.sh 12.6
|
||||
ENV DESIRED_CUDA=12.6
|
||||
@ -60,10 +64,6 @@ FROM cuda as cuda12.8
|
||||
RUN bash ./install_cuda.sh 12.8
|
||||
ENV DESIRED_CUDA=12.8
|
||||
|
||||
FROM cuda as cuda12.9
|
||||
RUN bash ./install_cuda.sh 12.9
|
||||
ENV DESIRED_CUDA=12.9
|
||||
|
||||
FROM ${ROCM_IMAGE} as rocm
|
||||
ENV PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
|
||||
ADD ./common/install_mkl.sh install_mkl.sh
|
||||
@ -78,8 +78,7 @@ RUN bash ./install_mnist.sh
|
||||
FROM base as all_cuda
|
||||
COPY --from=cuda11.8 /usr/local/cuda-11.8 /usr/local/cuda-11.8
|
||||
COPY --from=cuda12.6 /usr/local/cuda-12.6 /usr/local/cuda-12.6
|
||||
COPY --from=cuda12.8 /usr/local/cuda-12.8 /usr/local/cuda-12.8
|
||||
COPY --from=cuda12.9 /usr/local/cuda-12.9 /usr/local/cuda-12.9
|
||||
COPY --from=cuda12.4 /usr/local/cuda-12.8 /usr/local/cuda-12.8
|
||||
|
||||
# Final step
|
||||
FROM ${BASE_TARGET} as final
|
||||
|
||||
@ -50,23 +50,30 @@ if [[ "$image" == *xla* ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ "$image" == *-jammy* ]]; then
|
||||
if [[ "$image" == *-focal* ]]; then
|
||||
UBUNTU_VERSION=20.04
|
||||
elif [[ "$image" == *-jammy* ]]; then
|
||||
UBUNTU_VERSION=22.04
|
||||
elif [[ "$image" == *-noble* ]]; then
|
||||
UBUNTU_VERSION=24.04
|
||||
elif [[ "$image" == *ubuntu* ]]; then
|
||||
extract_version_from_image_name ubuntu UBUNTU_VERSION
|
||||
elif [[ "$image" == *centos* ]]; then
|
||||
extract_version_from_image_name centos CENTOS_VERSION
|
||||
fi
|
||||
|
||||
if [ -n "${UBUNTU_VERSION}" ]; then
|
||||
OS="ubuntu"
|
||||
elif [ -n "${CENTOS_VERSION}" ]; then
|
||||
OS="centos"
|
||||
else
|
||||
echo "Unable to derive operating system base..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DOCKERFILE="${OS}/Dockerfile"
|
||||
if [[ "$image" == *rocm* ]]; then
|
||||
# When using ubuntu - 22.04, start from Ubuntu docker image, instead of nvidia/cuda docker image.
|
||||
if [[ "$image" == *cuda* && "$UBUNTU_VERSION" != "22.04" ]]; then
|
||||
DOCKERFILE="${OS}-cuda/Dockerfile"
|
||||
elif [[ "$image" == *rocm* ]]; then
|
||||
DOCKERFILE="${OS}-rocm/Dockerfile"
|
||||
elif [[ "$image" == *xpu* ]]; then
|
||||
DOCKERFILE="${OS}-xpu/Dockerfile"
|
||||
@ -91,18 +98,9 @@ tag=$(echo $image | awk -F':' '{print $2}')
|
||||
# configuration, so we hardcode everything here rather than do it
|
||||
# from scratch
|
||||
case "$tag" in
|
||||
pytorch-linux-jammy-cuda12.4-cudnn9-py3-gcc11)
|
||||
CUDA_VERSION=12.4
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=11
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
UCX_COMMIT=${_UCX_COMMIT}
|
||||
UCC_COMMIT=${_UCC_COMMIT}
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc11)
|
||||
CUDA_VERSION=12.8.1
|
||||
pytorch-linux-focal-cuda12.6-cudnn9-py3-gcc11)
|
||||
CUDA_VERSION=12.6.3
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=11
|
||||
VISION=yes
|
||||
@ -112,7 +110,8 @@ case "$tag" in
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc9-inductor-benchmarks)
|
||||
CUDA_VERSION=12.8.1
|
||||
CUDA_VERSION=12.8
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=9
|
||||
VISION=yes
|
||||
@ -122,8 +121,32 @@ case "$tag" in
|
||||
TRITON=yes
|
||||
INDUCTOR_BENCHMARKS=yes
|
||||
;;
|
||||
pytorch-linux-jammy-cuda12.8-cudnn9-py3.12-gcc9-inductor-benchmarks)
|
||||
CUDA_VERSION=12.8.1
|
||||
pytorch-linux-focal-cuda12.6-cudnn9-py3-gcc9)
|
||||
CUDA_VERSION=12.6.3
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=9
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
UCX_COMMIT=${_UCX_COMMIT}
|
||||
UCC_COMMIT=${_UCC_COMMIT}
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-jammy-cuda12.6-cudnn9-py3-gcc9-inductor-benchmarks)
|
||||
CUDA_VERSION=12.6
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=9
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
UCX_COMMIT=${_UCX_COMMIT}
|
||||
UCC_COMMIT=${_UCC_COMMIT}
|
||||
TRITON=yes
|
||||
INDUCTOR_BENCHMARKS=yes
|
||||
;;
|
||||
pytorch-linux-jammy-cuda12.6-cudnn9-py3.12-gcc9-inductor-benchmarks)
|
||||
CUDA_VERSION=12.6
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.12
|
||||
GCC_VERSION=9
|
||||
VISION=yes
|
||||
@ -133,8 +156,9 @@ case "$tag" in
|
||||
TRITON=yes
|
||||
INDUCTOR_BENCHMARKS=yes
|
||||
;;
|
||||
pytorch-linux-jammy-cuda12.8-cudnn9-py3.13-gcc9-inductor-benchmarks)
|
||||
CUDA_VERSION=12.8.1
|
||||
pytorch-linux-jammy-cuda12.6-cudnn9-py3.13-gcc9-inductor-benchmarks)
|
||||
CUDA_VERSION=12.6
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.13
|
||||
GCC_VERSION=9
|
||||
VISION=yes
|
||||
@ -144,18 +168,9 @@ case "$tag" in
|
||||
TRITON=yes
|
||||
INDUCTOR_BENCHMARKS=yes
|
||||
;;
|
||||
pytorch-linux-jammy-cuda12.8-cudnn9-py3.12-gcc11-vllm)
|
||||
CUDA_VERSION=12.8.1
|
||||
ANACONDA_PYTHON_VERSION=3.12
|
||||
GCC_VERSION=11
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
UCX_COMMIT=${_UCX_COMMIT}
|
||||
UCC_COMMIT=${_UCC_COMMIT}
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc9)
|
||||
CUDA_VERSION=12.8.1
|
||||
pytorch-linux-focal-cuda11.8-cudnn9-py3-gcc9)
|
||||
CUDA_VERSION=11.8.0
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=9
|
||||
VISION=yes
|
||||
@ -164,24 +179,44 @@ case "$tag" in
|
||||
UCC_COMMIT=${_UCC_COMMIT}
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-jammy-py3-clang12-onnx)
|
||||
pytorch-linux-focal-py3-clang10-onnx)
|
||||
ANACONDA_PYTHON_VERSION=3.9
|
||||
CLANG_VERSION=12
|
||||
CLANG_VERSION=10
|
||||
VISION=yes
|
||||
ONNX=yes
|
||||
;;
|
||||
pytorch-linux-jammy-py3.9-clang12)
|
||||
pytorch-linux-focal-py3.9-clang10)
|
||||
ANACONDA_PYTHON_VERSION=3.9
|
||||
CLANG_VERSION=12
|
||||
CLANG_VERSION=10
|
||||
VISION=yes
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-jammy-rocm-n-py3 | pytorch-linux-jammy-rocm-n-py3-benchmarks | pytorch-linux-noble-rocm-n-py3)
|
||||
if [[ $tag =~ "jammy" ]]; then
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
else
|
||||
ANACONDA_PYTHON_VERSION=3.12
|
||||
fi
|
||||
pytorch-linux-focal-py3.11-clang10)
|
||||
ANACONDA_PYTHON_VERSION=3.11
|
||||
CLANG_VERSION=10
|
||||
VISION=yes
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-focal-py3.9-gcc9)
|
||||
ANACONDA_PYTHON_VERSION=3.9
|
||||
GCC_VERSION=9
|
||||
VISION=yes
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-jammy-rocm-n-1-py3)
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=11
|
||||
VISION=yes
|
||||
ROCM_VERSION=6.3
|
||||
NINJA_VERSION=1.9.0
|
||||
TRITON=yes
|
||||
KATEX=yes
|
||||
UCX_COMMIT=${_UCX_COMMIT}
|
||||
UCC_COMMIT=${_UCC_COMMIT}
|
||||
INDUCTOR_BENCHMARKS=yes
|
||||
;;
|
||||
pytorch-linux-jammy-rocm-n-py3)
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=11
|
||||
VISION=yes
|
||||
ROCM_VERSION=6.4
|
||||
@ -190,21 +225,7 @@ case "$tag" in
|
||||
KATEX=yes
|
||||
UCX_COMMIT=${_UCX_COMMIT}
|
||||
UCC_COMMIT=${_UCC_COMMIT}
|
||||
if [[ $tag =~ "benchmarks" ]]; then
|
||||
INDUCTOR_BENCHMARKS=yes
|
||||
fi
|
||||
;;
|
||||
pytorch-linux-noble-rocm-alpha-py3)
|
||||
ANACONDA_PYTHON_VERSION=3.12
|
||||
GCC_VERSION=11
|
||||
VISION=yes
|
||||
ROCM_VERSION=7.0
|
||||
NINJA_VERSION=1.9.0
|
||||
TRITON=yes
|
||||
KATEX=yes
|
||||
UCX_COMMIT=${_UCX_COMMIT}
|
||||
UCC_COMMIT=${_UCC_COMMIT}
|
||||
PYTORCH_ROCM_ARCH="gfx90a;gfx942;gfx950"
|
||||
INDUCTOR_BENCHMARKS=yes
|
||||
;;
|
||||
pytorch-linux-jammy-xpu-2025.0-py3)
|
||||
ANACONDA_PYTHON_VERSION=3.9
|
||||
@ -222,7 +243,7 @@ case "$tag" in
|
||||
NINJA_VERSION=1.9.0
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-jammy-py3.9-gcc11-inductor-benchmarks)
|
||||
pytorch-linux-jammy-py3.9-gcc11-inductor-benchmarks)
|
||||
ANACONDA_PYTHON_VERSION=3.9
|
||||
GCC_VERSION=11
|
||||
VISION=yes
|
||||
@ -231,13 +252,25 @@ case "$tag" in
|
||||
DOCS=yes
|
||||
INDUCTOR_BENCHMARKS=yes
|
||||
;;
|
||||
pytorch-linux-jammy-cuda12.8-cudnn9-py3.9-clang12)
|
||||
pytorch-linux-jammy-cuda11.8-cudnn9-py3.9-clang12)
|
||||
ANACONDA_PYTHON_VERSION=3.9
|
||||
CUDA_VERSION=12.8.1
|
||||
CUDA_VERSION=11.8
|
||||
CUDNN_VERSION=9
|
||||
CLANG_VERSION=12
|
||||
VISION=yes
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-jammy-py3-clang12-asan)
|
||||
ANACONDA_PYTHON_VERSION=3.9
|
||||
CLANG_VERSION=12
|
||||
VISION=yes
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-jammy-py3-clang15-asan)
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
CLANG_VERSION=15
|
||||
VISION=yes
|
||||
;;
|
||||
pytorch-linux-jammy-py3-clang18-asan)
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
CLANG_VERSION=18
|
||||
@ -270,23 +303,21 @@ case "$tag" in
|
||||
GCC_VERSION=11
|
||||
TRITON_CPU=yes
|
||||
;;
|
||||
pytorch-linux-jammy-linter)
|
||||
pytorch-linux-focal-linter)
|
||||
# TODO: Use 3.9 here because of this issue https://github.com/python/mypy/issues/13627.
|
||||
# We will need to update mypy version eventually, but that's for another day. The task
|
||||
# would be to upgrade mypy to 1.0.0 with Python 3.11
|
||||
PYTHON_VERSION=3.9
|
||||
;;
|
||||
pytorch-linux-jammy-cuda12.8-cudnn9-py3.9-linter)
|
||||
pytorch-linux-jammy-cuda11.8-cudnn9-py3.9-linter)
|
||||
PYTHON_VERSION=3.9
|
||||
CUDA_VERSION=12.8.1
|
||||
CUDA_VERSION=11.8
|
||||
;;
|
||||
pytorch-linux-jammy-aarch64-py3.10-gcc11)
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=11
|
||||
ACL=yes
|
||||
VISION=yes
|
||||
CONDA_CMAKE=yes
|
||||
OPENBLAS=yes
|
||||
# snadampal: skipping llvm src build install because the current version
|
||||
# from pytorch/llvm:9.0.1 is x86 specific
|
||||
SKIP_LLVM_SRC_BUILD_INSTALL=yes
|
||||
@ -296,8 +327,6 @@ case "$tag" in
|
||||
GCC_VERSION=11
|
||||
ACL=yes
|
||||
VISION=yes
|
||||
CONDA_CMAKE=yes
|
||||
OPENBLAS=yes
|
||||
# snadampal: skipping llvm src build install because the current version
|
||||
# from pytorch/llvm:9.0.1 is x86 specific
|
||||
SKIP_LLVM_SRC_BUILD_INSTALL=yes
|
||||
@ -312,6 +341,7 @@ case "$tag" in
|
||||
fi
|
||||
if [[ "$image" == *cuda* ]]; then
|
||||
extract_version_from_image_name cuda CUDA_VERSION
|
||||
extract_version_from_image_name cudnn CUDNN_VERSION
|
||||
fi
|
||||
if [[ "$image" == *rocm* ]]; then
|
||||
extract_version_from_image_name rocm ROCM_VERSION
|
||||
@ -340,6 +370,14 @@ esac
|
||||
|
||||
tmp_tag=$(basename "$(mktemp -u)" | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
#when using cudnn version 8 install it separately from cuda
|
||||
if [[ "$image" == *cuda* && ${OS} == "ubuntu" ]]; then
|
||||
IMAGE_NAME="nvidia/cuda:${CUDA_VERSION}-cudnn${CUDNN_VERSION}-devel-ubuntu${UBUNTU_VERSION}"
|
||||
if [[ ${CUDNN_VERSION} == 9 ]]; then
|
||||
IMAGE_NAME="nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}"
|
||||
fi
|
||||
fi
|
||||
|
||||
no_cache_flag=""
|
||||
progress_flag=""
|
||||
# Do not use cache and progress=plain when in CI
|
||||
@ -356,6 +394,7 @@ docker build \
|
||||
--build-arg "LLVMDEV=${LLVMDEV:-}" \
|
||||
--build-arg "VISION=${VISION:-}" \
|
||||
--build-arg "UBUNTU_VERSION=${UBUNTU_VERSION}" \
|
||||
--build-arg "CENTOS_VERSION=${CENTOS_VERSION}" \
|
||||
--build-arg "DEVTOOLSET_VERSION=${DEVTOOLSET_VERSION}" \
|
||||
--build-arg "GLIBC_VERSION=${GLIBC_VERSION}" \
|
||||
--build-arg "CLANG_VERSION=${CLANG_VERSION}" \
|
||||
@ -363,6 +402,9 @@ docker build \
|
||||
--build-arg "PYTHON_VERSION=${PYTHON_VERSION}" \
|
||||
--build-arg "GCC_VERSION=${GCC_VERSION}" \
|
||||
--build-arg "CUDA_VERSION=${CUDA_VERSION}" \
|
||||
--build-arg "CUDNN_VERSION=${CUDNN_VERSION}" \
|
||||
--build-arg "TENSORRT_VERSION=${TENSORRT_VERSION}" \
|
||||
--build-arg "GRADLE_VERSION=${GRADLE_VERSION}" \
|
||||
--build-arg "NINJA_VERSION=${NINJA_VERSION:-}" \
|
||||
--build-arg "KATEX=${KATEX:-}" \
|
||||
--build-arg "ROCM_VERSION=${ROCM_VERSION:-}" \
|
||||
@ -380,7 +422,6 @@ docker build \
|
||||
--build-arg "XPU_VERSION=${XPU_VERSION}" \
|
||||
--build-arg "UNINSTALL_DILL=${UNINSTALL_DILL}" \
|
||||
--build-arg "ACL=${ACL:-}" \
|
||||
--build-arg "OPENBLAS=${OPENBLAS:-}" \
|
||||
--build-arg "SKIP_SCCACHE_INSTALL=${SKIP_SCCACHE_INSTALL:-}" \
|
||||
--build-arg "SKIP_LLVM_SRC_BUILD_INSTALL=${SKIP_LLVM_SRC_BUILD_INSTALL:-}" \
|
||||
-f $(dirname ${DOCKERFILE})/Dockerfile \
|
||||
|
||||
@ -39,7 +39,6 @@ RUN bash ./install_user.sh && rm install_user.sh
|
||||
|
||||
# Install conda and other packages (e.g., numpy, pytest)
|
||||
ARG ANACONDA_PYTHON_VERSION
|
||||
ARG BUILD_ENVIRONMENT
|
||||
ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION
|
||||
ENV PATH /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/bin:/opt/conda/bin:$PATH
|
||||
COPY requirements-ci.txt /opt/conda/requirements-ci.txt
|
||||
|
||||
@ -1 +1 @@
|
||||
56392aa978594cc155fa8af48cd949f5b5f1823a
|
||||
b173722085b3f555d6ba4533d6bbaddfd7c71144
|
||||
|
||||
@ -1 +1 @@
|
||||
v2.27.5-1
|
||||
v2.26.5-1
|
||||
|
||||
@ -1 +1 @@
|
||||
ae324eeac8e102a2b40370e341460f3791353398
|
||||
b0e26b7359c147b8aa0af686c20510fb9b15990a
|
||||
|
||||
@ -1 +1 @@
|
||||
f7888497a1eb9e98d4c07537f0d0bcfe180d1363
|
||||
c8757738a7418249896224430ce84888e8ecdd79
|
||||
|
||||
@ -23,10 +23,6 @@ conda_install() {
|
||||
as_jenkins conda install -q -n py_$ANACONDA_PYTHON_VERSION -y python="$ANACONDA_PYTHON_VERSION" $*
|
||||
}
|
||||
|
||||
conda_install_through_forge() {
|
||||
as_jenkins conda install -c conda-forge -q -n py_$ANACONDA_PYTHON_VERSION -y python="$ANACONDA_PYTHON_VERSION" $*
|
||||
}
|
||||
|
||||
conda_run() {
|
||||
as_jenkins conda run -n py_$ANACONDA_PYTHON_VERSION --no-capture-output $*
|
||||
}
|
||||
|
||||
@ -15,9 +15,6 @@ install_ubuntu() {
|
||||
elif [[ "$UBUNTU_VERSION" == "22.04"* ]]; then
|
||||
cmake3="cmake=3.22*"
|
||||
maybe_libiomp_dev=""
|
||||
elif [[ "$UBUNTU_VERSION" == "24.04"* ]]; then
|
||||
cmake3="cmake=3.28*"
|
||||
maybe_libiomp_dev=""
|
||||
else
|
||||
cmake3="cmake=3.5*"
|
||||
maybe_libiomp_dev="libiomp-dev"
|
||||
@ -33,6 +30,18 @@ install_ubuntu() {
|
||||
maybe_libomp_dev=""
|
||||
fi
|
||||
|
||||
# HACK: UCC testing relies on libnccl library from NVIDIA repo, and version 2.16 crashes
|
||||
# See https://github.com/pytorch/pytorch/pull/105260#issuecomment-1673399729
|
||||
# TODO: Eliminate this hack, we should not relay on apt-get installation
|
||||
# See https://github.com/pytorch/pytorch/issues/144768
|
||||
if [[ "$UBUNTU_VERSION" == "20.04"* && "$CUDA_VERSION" == "11.8"* ]]; then
|
||||
maybe_libnccl_dev="libnccl2=2.15.5-1+cuda11.8 libnccl-dev=2.15.5-1+cuda11.8 --allow-downgrades --allow-change-held-packages"
|
||||
elif [[ "$UBUNTU_VERSION" == "20.04"* && "$CUDA_VERSION" == "12.4"* ]]; then
|
||||
maybe_libnccl_dev="libnccl2=2.26.2-1+cuda12.4 libnccl-dev=2.26.2-1+cuda12.4 --allow-downgrades --allow-change-held-packages"
|
||||
else
|
||||
maybe_libnccl_dev=""
|
||||
fi
|
||||
|
||||
# Install common dependencies
|
||||
apt-get update
|
||||
# TODO: Some of these may not be necessary
|
||||
@ -61,6 +70,7 @@ install_ubuntu() {
|
||||
libasound2-dev \
|
||||
libsndfile-dev \
|
||||
${maybe_libomp_dev} \
|
||||
${maybe_libnccl_dev} \
|
||||
software-properties-common \
|
||||
wget \
|
||||
sudo \
|
||||
|
||||
@ -4,8 +4,12 @@ set -ex
|
||||
|
||||
# Optionally install conda
|
||||
if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
|
||||
BASE_URL="https://github.com/conda-forge/miniforge/releases/latest/download" # @lint-ignore
|
||||
CONDA_FILE="Miniforge3-Linux-$(uname -m).sh"
|
||||
BASE_URL="https://repo.anaconda.com/miniconda"
|
||||
CONDA_FILE="Miniconda3-latest-Linux-x86_64.sh"
|
||||
if [[ $(uname -m) == "aarch64" ]] || [[ "$BUILD_ENVIRONMENT" == *xpu* ]]; then
|
||||
BASE_URL="https://github.com/conda-forge/miniforge/releases/latest/download" # @lint-ignore
|
||||
CONDA_FILE="Miniforge3-Linux-$(uname -m).sh"
|
||||
fi
|
||||
|
||||
MAJOR_PYTHON_VERSION=$(echo "$ANACONDA_PYTHON_VERSION" | cut -d . -f 1)
|
||||
MINOR_PYTHON_VERSION=$(echo "$ANACONDA_PYTHON_VERSION" | cut -d . -f 2)
|
||||
@ -17,6 +21,7 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
mkdir -p /opt/conda
|
||||
chown jenkins:jenkins /opt/conda
|
||||
|
||||
@ -59,16 +64,11 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
|
||||
# which is provided in libstdcxx 12 and up.
|
||||
conda_install libstdcxx-ng=12.3.0 --update-deps -c conda-forge
|
||||
|
||||
# Miniforge installer doesn't install sqlite by default
|
||||
if [[ "$BUILD_ENVIRONMENT" == *rocm* ]]; then
|
||||
conda_install sqlite
|
||||
fi
|
||||
|
||||
# Install PyTorch conda deps, as per https://github.com/pytorch/pytorch README
|
||||
if [[ $(uname -m) != "aarch64" ]]; then
|
||||
pip_install mkl==2024.2.0
|
||||
pip_install mkl-static==2024.2.0
|
||||
pip_install mkl-include==2024.2.0
|
||||
if [[ $(uname -m) == "aarch64" ]]; then
|
||||
conda_install "openblas==0.3.29=*openmp*"
|
||||
else
|
||||
conda_install "mkl=2021.4.0 mkl-include=2021.4.0"
|
||||
fi
|
||||
|
||||
# Install llvm-8 as it is required to compile llvmlite-0.30.0 from source
|
||||
@ -82,10 +82,6 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
|
||||
conda_run ${SCRIPT_FOLDER}/install_magma_conda.sh $(cut -f1-2 -d'.' <<< ${CUDA_VERSION})
|
||||
fi
|
||||
|
||||
if [[ "$UBUNTU_VERSION" == "24.04"* ]] ; then
|
||||
conda_install_through_forge libstdcxx-ng=14
|
||||
fi
|
||||
|
||||
# Install some other packages, including those needed for Python test reporting
|
||||
pip_install -r /opt/conda/requirements-ci.txt
|
||||
|
||||
|
||||
@ -3,10 +3,11 @@
|
||||
set -uex -o pipefail
|
||||
|
||||
PYTHON_DOWNLOAD_URL=https://www.python.org/ftp/python
|
||||
PYTHON_DOWNLOAD_GITHUB_BRANCH=https://github.com/python/cpython/archive/refs/heads # @lint-ignore
|
||||
GET_PIP_URL=https://bootstrap.pypa.io/get-pip.py
|
||||
|
||||
# Python versions to be installed in /opt/$VERSION_NO
|
||||
CPYTHON_VERSIONS=${CPYTHON_VERSIONS:-"3.9.0 3.10.1 3.11.0 3.12.0 3.13.0 3.13.0t 3.14.0 3.14.0t"}
|
||||
CPYTHON_VERSIONS=${CPYTHON_VERSIONS:-"3.9.0 3.10.1 3.11.0 3.12.0 3.13.0 3.13.0t"}
|
||||
|
||||
function check_var {
|
||||
if [ -z "$1" ]; then
|
||||
@ -23,8 +24,9 @@ function do_cpython_build {
|
||||
tar -xzf Python-$py_ver.tgz
|
||||
|
||||
local additional_flags=""
|
||||
if [[ "$py_ver" == *"t" ]]; then
|
||||
if [ "$py_ver" == "3.13.0t" ]; then
|
||||
additional_flags=" --disable-gil"
|
||||
mv cpython-3.13/ cpython-3.13t/
|
||||
fi
|
||||
|
||||
pushd $py_folder
|
||||
@ -66,29 +68,32 @@ function do_cpython_build {
|
||||
ln -s pip3 ${prefix}/bin/pip
|
||||
fi
|
||||
# install setuptools since python 3.12 is required to use distutils
|
||||
# packaging is needed to create symlink since wheel no longer provides needed information
|
||||
${prefix}/bin/pip install packaging==25.0 wheel==0.45.1 setuptools==80.9.0
|
||||
local abi_tag=$(${prefix}/bin/python -c "from packaging.tags import interpreter_name, interpreter_version; import sysconfig ; from sysconfig import get_config_var; print('{0}{1}-{0}{1}{2}'.format(interpreter_name(), interpreter_version(), 't' if sysconfig.get_config_var('Py_GIL_DISABLED') else ''))")
|
||||
${prefix}/bin/pip install wheel==0.34.2 setuptools==68.2.2
|
||||
local abi_tag=$(${prefix}/bin/python -c "from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag; print('{0}{1}-{2}'.format(get_abbr_impl(), get_impl_ver(), get_abi_tag()))")
|
||||
ln -sf ${prefix} /opt/python/${abi_tag}
|
||||
}
|
||||
|
||||
function build_cpython {
|
||||
local py_ver=$1
|
||||
check_var $py_ver
|
||||
local py_suffix=$py_ver
|
||||
local py_folder=$py_ver
|
||||
check_var $PYTHON_DOWNLOAD_URL
|
||||
local py_ver_folder=$py_ver
|
||||
|
||||
# Special handling for nogil
|
||||
if [[ "${py_ver}" == *"t" ]]; then
|
||||
py_suffix=${py_ver::-1}
|
||||
py_folder=$py_suffix
|
||||
if [ "$py_ver" = "3.13.0t" ]; then
|
||||
PY_VER_SHORT="3.13"
|
||||
PYT_VER_SHORT="3.13t"
|
||||
check_var $PYTHON_DOWNLOAD_GITHUB_BRANCH
|
||||
wget $PYTHON_DOWNLOAD_GITHUB_BRANCH/$PY_VER_SHORT.tar.gz -O Python-$py_ver.tgz
|
||||
do_cpython_build $py_ver cpython-$PYT_VER_SHORT
|
||||
elif [ "$py_ver" = "3.13.0" ]; then
|
||||
PY_VER_SHORT="3.13"
|
||||
check_var $PYTHON_DOWNLOAD_GITHUB_BRANCH
|
||||
wget $PYTHON_DOWNLOAD_GITHUB_BRANCH/$PY_VER_SHORT.tar.gz -O Python-$py_ver.tgz
|
||||
do_cpython_build $py_ver cpython-$PY_VER_SHORT
|
||||
else
|
||||
wget -q $PYTHON_DOWNLOAD_URL/$py_ver_folder/Python-$py_ver.tgz
|
||||
do_cpython_build $py_ver Python-$py_ver
|
||||
fi
|
||||
# Only b3 is available now
|
||||
if [ "$py_suffix" == "3.14.0" ]; then
|
||||
py_suffix="3.14.0b3"
|
||||
fi
|
||||
wget -q $PYTHON_DOWNLOAD_URL/$py_folder/Python-$py_suffix.tgz -O Python-$py_ver.tgz
|
||||
do_cpython_build $py_ver Python-$py_suffix
|
||||
|
||||
rm -f Python-$py_ver.tgz
|
||||
}
|
||||
|
||||
@ -10,8 +10,6 @@ else
|
||||
arch_path='sbsa'
|
||||
fi
|
||||
|
||||
NVSHMEM_VERSION=3.3.9
|
||||
|
||||
function install_cuda {
|
||||
version=$1
|
||||
runfile=$2
|
||||
@ -42,40 +40,18 @@ function install_cudnn {
|
||||
rm -rf tmp_cudnn
|
||||
}
|
||||
|
||||
function install_nvshmem {
|
||||
cuda_major_version=$1 # e.g. "12"
|
||||
nvshmem_version=$2 # e.g. "3.3.9"
|
||||
function install_118 {
|
||||
CUDNN_VERSION=9.1.0.70
|
||||
echo "Installing CUDA 11.8 and cuDNN ${CUDNN_VERSION} and NCCL and cuSparseLt-0.4.0"
|
||||
install_cuda 11.8.0 cuda_11.8.0_520.61.05_linux
|
||||
|
||||
case "${arch_path}" in
|
||||
sbsa)
|
||||
dl_arch="aarch64"
|
||||
;;
|
||||
x86_64)
|
||||
dl_arch="x64"
|
||||
;;
|
||||
*)
|
||||
dl_arch="${arch}"
|
||||
;;
|
||||
esac
|
||||
install_cudnn 11 $CUDNN_VERSION
|
||||
|
||||
tmpdir="tmp_nvshmem"
|
||||
mkdir -p "${tmpdir}" && cd "${tmpdir}"
|
||||
CUDA_VERSION=11.8 bash install_nccl.sh
|
||||
|
||||
# nvSHMEM license: https://docs.nvidia.com/nvshmem/api/sla.html
|
||||
filename="libnvshmem_cuda${cuda_major_version}-linux-${arch_path}-${nvshmem_version}"
|
||||
url="https://developer.download.nvidia.com/compute/redist/nvshmem/${nvshmem_version}/builds/cuda${cuda_major_version}/txz/agnostic/${dl_arch}/${filename}.tar.gz"
|
||||
CUDA_VERSION=11.8 bash install_cusparselt.sh
|
||||
|
||||
# download, unpack, install
|
||||
wget -q "${url}"
|
||||
tar xf "${filename}.tar.gz"
|
||||
cp -a "libnvshmem/include/"* /usr/local/cuda/include/
|
||||
cp -a "libnvshmem/lib/"* /usr/local/cuda/lib64/
|
||||
|
||||
# cleanup
|
||||
cd ..
|
||||
rm -rf "${tmpdir}"
|
||||
|
||||
echo "nvSHMEM ${nvshmem_version} for CUDA ${cuda_major_version} (${arch_path}) installed."
|
||||
ldconfig
|
||||
}
|
||||
|
||||
function install_124 {
|
||||
@ -93,14 +69,12 @@ function install_124 {
|
||||
}
|
||||
|
||||
function install_126 {
|
||||
CUDNN_VERSION=9.10.2.21
|
||||
echo "Installing CUDA 12.6.3 and cuDNN ${CUDNN_VERSION} and NVSHMEM and NCCL and cuSparseLt-0.7.1"
|
||||
CUDNN_VERSION=9.5.1.17
|
||||
echo "Installing CUDA 12.6.3 and cuDNN ${CUDNN_VERSION} and NCCL and cuSparseLt-0.6.3"
|
||||
install_cuda 12.6.3 cuda_12.6.3_560.35.05_linux
|
||||
|
||||
install_cudnn 12 $CUDNN_VERSION
|
||||
|
||||
install_nvshmem 12 $NVSHMEM_VERSION
|
||||
|
||||
CUDA_VERSION=12.6 bash install_nccl.sh
|
||||
|
||||
CUDA_VERSION=12.6 bash install_cusparselt.sh
|
||||
@ -108,22 +82,35 @@ function install_126 {
|
||||
ldconfig
|
||||
}
|
||||
|
||||
function install_129 {
|
||||
CUDNN_VERSION=9.10.2.21
|
||||
echo "Installing CUDA 12.9.1 and cuDNN ${CUDNN_VERSION} and NVSHMEM and NCCL and cuSparseLt-0.7.1"
|
||||
# install CUDA 12.9.1 in the same container
|
||||
install_cuda 12.9.1 cuda_12.9.1_575.57.08_linux
|
||||
function prune_118 {
|
||||
echo "Pruning CUDA 11.8 and cuDNN"
|
||||
#####################################################################################
|
||||
# CUDA 11.8 prune static libs
|
||||
#####################################################################################
|
||||
export NVPRUNE="/usr/local/cuda-11.8/bin/nvprune"
|
||||
export CUDA_LIB_DIR="/usr/local/cuda-11.8/lib64"
|
||||
|
||||
# cuDNN license: https://developer.nvidia.com/cudnn/license_agreement
|
||||
install_cudnn 12 $CUDNN_VERSION
|
||||
export GENCODE="-gencode arch=compute_35,code=sm_35 -gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90"
|
||||
export GENCODE_CUDNN="-gencode arch=compute_35,code=sm_35 -gencode arch=compute_37,code=sm_37 -gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90"
|
||||
|
||||
install_nvshmem 12 $NVSHMEM_VERSION
|
||||
if [[ -n "$OVERRIDE_GENCODE" ]]; then
|
||||
export GENCODE=$OVERRIDE_GENCODE
|
||||
fi
|
||||
|
||||
CUDA_VERSION=12.9 bash install_nccl.sh
|
||||
# all CUDA libs except CuDNN and CuBLAS (cudnn and cublas need arch 3.7 included)
|
||||
ls $CUDA_LIB_DIR/ | grep "\.a" | grep -v "culibos" | grep -v "cudart" | grep -v "cudnn" | grep -v "cublas" | grep -v "metis" \
|
||||
| xargs -I {} bash -c \
|
||||
"echo {} && $NVPRUNE $GENCODE $CUDA_LIB_DIR/{} -o $CUDA_LIB_DIR/{}"
|
||||
|
||||
CUDA_VERSION=12.9 bash install_cusparselt.sh
|
||||
# prune CuDNN and CuBLAS
|
||||
$NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublas_static.a -o $CUDA_LIB_DIR/libcublas_static.a
|
||||
$NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublasLt_static.a -o $CUDA_LIB_DIR/libcublasLt_static.a
|
||||
|
||||
ldconfig
|
||||
#####################################################################################
|
||||
# CUDA 11.8 prune visual tools
|
||||
#####################################################################################
|
||||
export CUDA_BASE="/usr/local/cuda-11.8/"
|
||||
rm -rf $CUDA_BASE/libnvvp $CUDA_BASE/nsightee_plugins $CUDA_BASE/nsight-compute-2022.3.0 $CUDA_BASE/nsight-systems-2022.4.2/
|
||||
}
|
||||
|
||||
function prune_124 {
|
||||
@ -196,15 +183,13 @@ function prune_126 {
|
||||
|
||||
function install_128 {
|
||||
CUDNN_VERSION=9.8.0.87
|
||||
echo "Installing CUDA 12.8.1 and cuDNN ${CUDNN_VERSION} and NVSHMEM and NCCL and cuSparseLt-0.7.1"
|
||||
echo "Installing CUDA 12.8.1 and cuDNN ${CUDNN_VERSION} and NCCL and cuSparseLt-0.6.3"
|
||||
# install CUDA 12.8.1 in the same container
|
||||
install_cuda 12.8.1 cuda_12.8.1_570.124.06_linux
|
||||
|
||||
# cuDNN license: https://developer.nvidia.com/cudnn/license_agreement
|
||||
install_cudnn 12 $CUDNN_VERSION
|
||||
|
||||
install_nvshmem 12 $NVSHMEM_VERSION
|
||||
|
||||
CUDA_VERSION=12.8 bash install_nccl.sh
|
||||
|
||||
CUDA_VERSION=12.8 bash install_cusparselt.sh
|
||||
@ -216,13 +201,13 @@ function install_128 {
|
||||
while test $# -gt 0
|
||||
do
|
||||
case "$1" in
|
||||
11.8) install_118; prune_118
|
||||
;;
|
||||
12.4) install_124; prune_124
|
||||
;;
|
||||
12.6|12.6.*) install_126; prune_126
|
||||
12.6) install_126; prune_126
|
||||
;;
|
||||
12.8|12.8.*) install_128;
|
||||
;;
|
||||
12.9|12.9.*) install_129;
|
||||
12.8) install_128;
|
||||
;;
|
||||
*) echo "bad argument $1"; exit 1
|
||||
;;
|
||||
|
||||
26
.ci/docker/common/install_cudnn.sh
Normal file
26
.ci/docker/common/install_cudnn.sh
Normal file
@ -0,0 +1,26 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [[ -n "${CUDNN_VERSION}" ]]; then
|
||||
# cuDNN license: https://developer.nvidia.com/cudnn/license_agreement
|
||||
mkdir tmp_cudnn
|
||||
pushd tmp_cudnn
|
||||
if [[ ${CUDA_VERSION:0:4} == "12.8" ]]; then
|
||||
CUDNN_NAME="cudnn-linux-x86_64-9.8.0.87_cuda12-archive"
|
||||
elif [[ ${CUDA_VERSION:0:4} == "12.6" ]]; then
|
||||
CUDNN_NAME="cudnn-linux-x86_64-9.5.1.17_cuda12-archive"
|
||||
elif [[ ${CUDA_VERSION:0:2} == "12" ]]; then
|
||||
CUDNN_NAME="cudnn-linux-x86_64-9.1.0.70_cuda12-archive"
|
||||
elif [[ ${CUDA_VERSION:0:2} == "11" ]]; then
|
||||
CUDNN_NAME="cudnn-linux-x86_64-9.1.0.70_cuda11-archive"
|
||||
else
|
||||
print "Unsupported CUDA version ${CUDA_VERSION}"
|
||||
exit 1
|
||||
fi
|
||||
curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/${CUDNN_NAME}.tar.xz
|
||||
tar xf ${CUDNN_NAME}.tar.xz
|
||||
cp -a ${CUDNN_NAME}/include/* /usr/local/cuda/include/
|
||||
cp -a ${CUDNN_NAME}/lib/* /usr/local/cuda/lib64/
|
||||
popd
|
||||
rm -rf tmp_cudnn
|
||||
ldconfig
|
||||
fi
|
||||
@ -5,13 +5,13 @@ set -ex
|
||||
# cuSPARSELt license: https://docs.nvidia.com/cuda/cusparselt/license.html
|
||||
mkdir tmp_cusparselt && cd tmp_cusparselt
|
||||
|
||||
if [[ ${CUDA_VERSION:0:4} =~ ^12\.[5-9]$ ]]; then
|
||||
if [[ ${CUDA_VERSION:0:4} =~ ^12\.[5-8]$ ]]; then
|
||||
arch_path='sbsa'
|
||||
export TARGETARCH=${TARGETARCH:-$(uname -m)}
|
||||
if [ ${TARGETARCH} = 'amd64' ] || [ "${TARGETARCH}" = 'x86_64' ]; then
|
||||
arch_path='x86_64'
|
||||
fi
|
||||
CUSPARSELT_NAME="libcusparse_lt-linux-${arch_path}-0.7.1.0-archive"
|
||||
CUSPARSELT_NAME="libcusparse_lt-linux-${arch_path}-0.6.3.2-archive"
|
||||
curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-${arch_path}/${CUSPARSELT_NAME}.tar.xz
|
||||
elif [[ ${CUDA_VERSION:0:4} == "12.4" ]]; then
|
||||
arch_path='sbsa'
|
||||
@ -21,6 +21,9 @@ elif [[ ${CUDA_VERSION:0:4} == "12.4" ]]; then
|
||||
fi
|
||||
CUSPARSELT_NAME="libcusparse_lt-linux-${arch_path}-0.6.2.3-archive"
|
||||
curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-${arch_path}/${CUSPARSELT_NAME}.tar.xz
|
||||
elif [[ ${CUDA_VERSION:0:4} == "11.8" ]]; then
|
||||
CUSPARSELT_NAME="libcusparse_lt-linux-x86_64-0.4.0.7-archive"
|
||||
curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-x86_64/${CUSPARSELT_NAME}.tar.xz
|
||||
else
|
||||
echo "Not sure which libcusparselt version to install for this ${CUDA_VERSION}"
|
||||
fi
|
||||
|
||||
@ -15,37 +15,11 @@ function install_timm() {
|
||||
commit=$(get_pinned_commit timm)
|
||||
|
||||
pip_install "git+https://github.com/huggingface/pytorch-image-models@${commit}"
|
||||
}
|
||||
|
||||
function install_torchbench() {
|
||||
local commit
|
||||
commit=$(get_pinned_commit torchbench)
|
||||
git clone https://github.com/pytorch/benchmark torchbench
|
||||
pushd torchbench
|
||||
git checkout "$commit"
|
||||
|
||||
python install.py --continue_on_fail
|
||||
|
||||
# TODO (huydhn): transformers-4.44.2 added by https://github.com/pytorch/benchmark/pull/2488
|
||||
# is regressing speedup metric. This needs to be investigated further
|
||||
pip install transformers==4.38.1
|
||||
|
||||
echo "Print all dependencies after TorchBench is installed"
|
||||
python -mpip freeze
|
||||
popd
|
||||
|
||||
chown -R jenkins torchbench
|
||||
# Clean up
|
||||
conda_run pip uninstall -y torch torchvision triton
|
||||
}
|
||||
|
||||
# Pango is needed for weasyprint which is needed for doctr
|
||||
conda_install pango
|
||||
|
||||
# Stable packages are ok here, just to satisfy TorchBench check
|
||||
pip_install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu128
|
||||
|
||||
install_torchbench
|
||||
install_huggingface
|
||||
install_timm
|
||||
|
||||
# Clean up
|
||||
conda_run pip uninstall -y torch torchvision torchaudio triton
|
||||
|
||||
@ -8,6 +8,16 @@ retry () {
|
||||
"$@" || (sleep 10 && "$@") || (sleep 20 && "$@") || (sleep 40 && "$@")
|
||||
}
|
||||
|
||||
# A bunch of custom pip dependencies for ONNX
|
||||
pip_install \
|
||||
beartype==0.15.0 \
|
||||
filelock==3.9.0 \
|
||||
flatbuffers==2.0 \
|
||||
mock==5.0.1 \
|
||||
ninja==1.10.2 \
|
||||
networkx==2.5 \
|
||||
numpy==1.24.2
|
||||
|
||||
# ONNXRuntime should be installed before installing
|
||||
# onnx-weekly. Otherwise, onnx-weekly could be
|
||||
# overwritten by onnx.
|
||||
@ -19,8 +29,11 @@ pip_install \
|
||||
transformers==4.36.2
|
||||
|
||||
pip_install coloredlogs packaging
|
||||
|
||||
pip_install onnxruntime==1.18.1
|
||||
pip_install onnxscript==0.3.1
|
||||
pip_install onnxscript==0.2.6 --no-deps
|
||||
# required by onnxscript
|
||||
pip_install ml_dtypes
|
||||
|
||||
# Cache the transformers model to be used later by ONNX tests. We need to run the transformers
|
||||
# package to download the model. By default, the model is cached at ~/.cache/huggingface/hub/
|
||||
|
||||
@ -4,9 +4,9 @@
|
||||
set -ex
|
||||
|
||||
cd /
|
||||
git clone https://github.com/OpenMathLib/OpenBLAS.git -b "${OPENBLAS_VERSION:-v0.3.30}" --depth 1 --shallow-submodules
|
||||
git clone https://github.com/OpenMathLib/OpenBLAS.git -b v0.3.29 --depth 1 --shallow-submodules
|
||||
|
||||
|
||||
OPENBLAS_CHECKOUT_DIR="OpenBLAS"
|
||||
OPENBLAS_BUILD_FLAGS="
|
||||
NUM_THREADS=128
|
||||
USE_OPENMP=1
|
||||
@ -14,8 +14,9 @@ NO_SHARED=0
|
||||
DYNAMIC_ARCH=1
|
||||
TARGET=ARMV8
|
||||
CFLAGS=-O3
|
||||
BUILD_BFLOAT16=1
|
||||
"
|
||||
|
||||
OPENBLAS_CHECKOUT_DIR="OpenBLAS"
|
||||
|
||||
make -j8 ${OPENBLAS_BUILD_FLAGS} -C ${OPENBLAS_CHECKOUT_DIR}
|
||||
make -j8 ${OPENBLAS_BUILD_FLAGS} install -C ${OPENBLAS_CHECKOUT_DIR}
|
||||
|
||||
@ -8,11 +8,9 @@ ver() {
|
||||
|
||||
install_ubuntu() {
|
||||
apt-get update
|
||||
# gpg-agent is not available by default
|
||||
apt-get install -y --no-install-recommends gpg-agent
|
||||
if [[ $(ver $UBUNTU_VERSION) -ge $(ver 22.04) ]]; then
|
||||
echo -e 'Package: *\nPin: release o=repo.radeon.com\nPin-Priority: 600' \
|
||||
| sudo tee /etc/apt/preferences.d/rocm-pin-600
|
||||
if [[ $UBUNTU_VERSION == 20.04 ]]; then
|
||||
# gpg-agent is not available by default on 20.04
|
||||
apt-get install -y --no-install-recommends gpg-agent
|
||||
fi
|
||||
apt-get install -y kmod
|
||||
apt-get install -y wget
|
||||
@ -28,27 +26,13 @@ Pin: release o=repo.radeon.com
|
||||
Pin-Priority: 600
|
||||
EOF
|
||||
|
||||
# we want the patch version of 6.4 instead
|
||||
if [[ $(ver $ROCM_VERSION) -eq $(ver 6.4) ]]; then
|
||||
ROCM_VERSION="${ROCM_VERSION}.2"
|
||||
fi
|
||||
|
||||
# Default url values
|
||||
rocm_baseurl="http://repo.radeon.com/rocm/apt/${ROCM_VERSION}"
|
||||
amdgpu_baseurl="https://repo.radeon.com/amdgpu/${ROCM_VERSION}/ubuntu"
|
||||
|
||||
# Special case for ROCM_VERSION == 7.0
|
||||
if [[ $(ver "$ROCM_VERSION") -eq $(ver 7.0) ]]; then
|
||||
rocm_baseurl="https://repo.radeon.com/rocm/apt/7.0_alpha2"
|
||||
amdgpu_baseurl="https://repo.radeon.com/amdgpu/30.10_alpha2/ubuntu"
|
||||
fi
|
||||
|
||||
# Add amdgpu repository
|
||||
UBUNTU_VERSION_NAME=`cat /etc/os-release | grep UBUNTU_CODENAME | awk -F= '{print $2}'`
|
||||
echo "deb [arch=amd64] ${amdgpu_baseurl} ${UBUNTU_VERSION_NAME} main" > /etc/apt/sources.list.d/amdgpu.list
|
||||
echo "deb [arch=amd64] https://repo.radeon.com/amdgpu/${ROCM_VERSION}/ubuntu ${UBUNTU_VERSION_NAME} main" > /etc/apt/sources.list.d/amdgpu.list
|
||||
|
||||
# Add rocm repository
|
||||
wget -qO - http://repo.radeon.com/rocm/rocm.gpg.key | apt-key add -
|
||||
local rocm_baseurl="http://repo.radeon.com/rocm/apt/${ROCM_VERSION}"
|
||||
echo "deb [arch=amd64] ${rocm_baseurl} ${UBUNTU_VERSION_NAME} main" > /etc/apt/sources.list.d/rocm.list
|
||||
apt-get update --allow-insecure-repositories
|
||||
|
||||
@ -82,33 +66,25 @@ EOF
|
||||
done
|
||||
|
||||
# ROCm 6.3 had a regression where initializing static code objects had significant overhead
|
||||
# CI no longer builds for ROCm 6.3, but
|
||||
# ROCm 6.4 did not yet fix the regression, also HIP branch names are different
|
||||
if [[ $(ver $ROCM_VERSION) -ge $(ver 6.4) ]] && [[ $(ver $ROCM_VERSION) -lt $(ver 7.0) ]]; then
|
||||
if [[ $(ver $ROCM_VERSION) -eq $(ver 6.4.2) ]]; then
|
||||
HIP_TAG=rocm-6.4.2
|
||||
CLR_HASH=74d78ba3ac4bac235d02bcb48511c30b5cfdd457 # branch release/rocm-rel-6.4.2-statco-hotfix
|
||||
elif [[ $(ver $ROCM_VERSION) -eq $(ver 6.4.1) ]]; then
|
||||
HIP_TAG=rocm-6.4.1
|
||||
CLR_HASH=efe6c35790b9206923bfeed1209902feff37f386 # branch release/rocm-rel-6.4.1-statco-hotfix
|
||||
if [[ $(ver $ROCM_VERSION) -eq $(ver 6.3) ]] || [[ $(ver $ROCM_VERSION) -eq $(ver 6.4) ]]; then
|
||||
if [[ $(ver $ROCM_VERSION) -eq $(ver 6.3) ]]; then
|
||||
HIP_BRANCH=rocm-6.3.x
|
||||
VER_STR=6.3
|
||||
elif [[ $(ver $ROCM_VERSION) -eq $(ver 6.4) ]]; then
|
||||
HIP_TAG=rocm-6.4.0
|
||||
CLR_HASH=600f5b0d2baed94d5121e2174a9de0851b040b0c # branch release/rocm-rel-6.4-statco-hotfix
|
||||
HIP_BRANCH=release/rocm-rel-6.4
|
||||
VER_STR=6.4
|
||||
fi
|
||||
# clr build needs CppHeaderParser but can only find it using conda's python
|
||||
python -m pip install CppHeaderParser
|
||||
git clone https://github.com/ROCm/HIP -b $HIP_TAG
|
||||
/opt/conda/bin/python -m pip install CppHeaderParser
|
||||
git clone https://github.com/ROCm/HIP -b $HIP_BRANCH
|
||||
HIP_COMMON_DIR=$(readlink -f HIP)
|
||||
git clone https://github.com/jeffdaily/clr
|
||||
pushd clr
|
||||
git checkout $CLR_HASH
|
||||
popd
|
||||
git clone https://github.com/jeffdaily/clr -b release/rocm-rel-${VER_STR}-statco-hotfix
|
||||
mkdir -p clr/build
|
||||
pushd clr/build
|
||||
# Need to point CMake to the correct python installation to find CppHeaderParser
|
||||
cmake .. -DPython3_EXECUTABLE=/opt/conda/envs/py_${ANACONDA_PYTHON_VERSION}/bin/python3 -DCLR_BUILD_HIP=ON -DHIP_COMMON_DIR=$HIP_COMMON_DIR
|
||||
cmake .. -DCLR_BUILD_HIP=ON -DHIP_COMMON_DIR=$HIP_COMMON_DIR
|
||||
make -j
|
||||
cp hipamd/lib/libamdhip64.so.6.4.* /opt/rocm/lib/libamdhip64.so.6.4.*
|
||||
cp hipamd/lib/libamdhip64.so.${VER_STR}.* /opt/rocm/lib/libamdhip64.so.${VER_STR}.*
|
||||
popd
|
||||
rm -rf HIP clr
|
||||
fi
|
||||
|
||||
@ -5,12 +5,7 @@ set -eou pipefail
|
||||
|
||||
function do_install() {
|
||||
rocm_version=$1
|
||||
if [[ ${rocm_version} =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
# chop off any patch version
|
||||
rocm_version="${rocm_version%.*}"
|
||||
fi
|
||||
|
||||
rocm_version_nodot=${rocm_version//./}
|
||||
rocm_version_nodot=${1//./}
|
||||
|
||||
# Version 2.7.2 + ROCm related updates
|
||||
MAGMA_VERSION=a1625ff4d9bc362906bd01f805dbbe12612953f6
|
||||
|
||||
@ -51,12 +51,7 @@ as_jenkins git clone --recursive ${TRITON_REPO} triton
|
||||
cd triton
|
||||
as_jenkins git checkout ${TRITON_PINNED_COMMIT}
|
||||
as_jenkins git submodule update --init --recursive
|
||||
|
||||
# Old versions of python have setup.py in ./python; newer versions have it in ./
|
||||
if [ ! -f setup.py ]; then
|
||||
cd python
|
||||
fi
|
||||
|
||||
cd python
|
||||
pip_install pybind11==2.13.6
|
||||
|
||||
# TODO: remove patch setup.py once we have a proper fix for https://github.com/triton-lang/triton/issues/4527
|
||||
@ -98,10 +93,3 @@ fi
|
||||
if [ -n "${NUMPY_VERSION}" ]; then
|
||||
pip_install "numpy==${NUMPY_VERSION}"
|
||||
fi
|
||||
|
||||
# IMPORTANT: helion needs to be installed without dependencies.
|
||||
# It depends on torch and triton. We don't want to install
|
||||
# triton and torch from production on Docker CI images
|
||||
if [[ "$ANACONDA_PYTHON_VERSION" != 3.9* ]]; then
|
||||
pip_install helion --no-deps
|
||||
fi
|
||||
|
||||
@ -34,27 +34,18 @@ function install_ubuntu() {
|
||||
|
||||
# The xpu-smi packages
|
||||
apt-get install -y flex bison xpu-smi
|
||||
|
||||
if [[ "${XPU_DRIVER_TYPE,,}" == "lts" ]]; then
|
||||
# Compute and Media Runtimes
|
||||
apt-get install -y \
|
||||
intel-opencl-icd intel-level-zero-gpu level-zero \
|
||||
intel-media-va-driver-non-free libmfx1 libmfxgen1 libvpl2 \
|
||||
libegl-mesa0 libegl1-mesa libegl1-mesa-dev libgbm1 libgl1-mesa-dev libgl1-mesa-dri \
|
||||
libglapi-mesa libgles2-mesa-dev libglx-mesa0 libigdgmm12 libxatracker2 mesa-va-drivers \
|
||||
mesa-vdpau-drivers mesa-vulkan-drivers va-driver-all vainfo hwinfo clinfo
|
||||
# Development Packages
|
||||
apt-get install -y libigc-dev intel-igc-cm libigdfcl-dev libigfxcmrt-dev level-zero-dev
|
||||
else # rolling driver
|
||||
apt-get install -y \
|
||||
intel-opencl-icd libze-intel-gpu1 libze1 \
|
||||
intel-media-va-driver-non-free libmfx-gen1 libvpl2 \
|
||||
libegl-mesa0 libegl1-mesa libegl1-mesa-dev libgbm1 libgl1-mesa-dev libgl1-mesa-dri \
|
||||
libglapi-mesa libglx-mesa0 libigdgmm12 libxatracker2 mesa-va-drivers \
|
||||
mesa-vdpau-drivers mesa-vulkan-drivers va-driver-all vainfo hwinfo clinfo intel-ocloc
|
||||
apt-get install -y libigc-dev intel-igc-cm libigdfcl-dev libigfxcmrt-dev libze-dev
|
||||
# Compute and Media Runtimes
|
||||
apt-get install -y \
|
||||
intel-opencl-icd intel-level-zero-gpu level-zero \
|
||||
intel-media-va-driver-non-free libmfx1 libmfxgen1 libvpl2 \
|
||||
libegl-mesa0 libegl1-mesa libegl1-mesa-dev libgbm1 libgl1-mesa-dev libgl1-mesa-dri \
|
||||
libglapi-mesa libgles2-mesa-dev libglx-mesa0 libigdgmm12 libxatracker2 mesa-va-drivers \
|
||||
mesa-vdpau-drivers mesa-vulkan-drivers va-driver-all vainfo hwinfo clinfo
|
||||
if [[ "${XPU_DRIVER_TYPE,,}" == "rolling" ]]; then
|
||||
apt-get install -y intel-ocloc
|
||||
fi
|
||||
|
||||
# Development Packages
|
||||
apt-get install -y libigc-dev intel-igc-cm libigdfcl-dev libigfxcmrt-dev level-zero-dev
|
||||
# Install Intel Support Packages
|
||||
apt-get install -y ${XPU_PACKAGES}
|
||||
|
||||
@ -65,10 +56,14 @@ function install_ubuntu() {
|
||||
|
||||
function install_rhel() {
|
||||
. /etc/os-release
|
||||
|
||||
if [[ ! " 8.8 8.10 9.0 9.2 9.3 " =~ " ${VERSION_ID} " ]]; then
|
||||
echo "RHEL version ${VERSION_ID} not supported"
|
||||
exit
|
||||
if [[ "${ID}" == "rhel" ]]; then
|
||||
if [[ ! " 8.8 8.9 9.0 9.2 9.3 " =~ " ${VERSION_ID} " ]]; then
|
||||
echo "RHEL version ${VERSION_ID} not supported"
|
||||
exit
|
||||
fi
|
||||
elif [[ "${ID}" == "almalinux" ]]; then
|
||||
# Workaround for almalinux8 which used by quay.io/pypa/manylinux_2_28_x86_64
|
||||
VERSION_ID="8.8"
|
||||
fi
|
||||
|
||||
dnf install -y 'dnf-command(config-manager)'
|
||||
@ -139,11 +134,11 @@ function install_sles() {
|
||||
|
||||
}
|
||||
|
||||
# Default use GPU driver rolling releases
|
||||
XPU_DRIVER_VERSION=""
|
||||
if [[ "${XPU_DRIVER_TYPE,,}" == "lts" ]]; then
|
||||
# Use GPU driver LTS releases
|
||||
XPU_DRIVER_VERSION="/lts/2350"
|
||||
# Default use GPU driver LTS releases
|
||||
XPU_DRIVER_VERSION="/lts/2350"
|
||||
if [[ "${XPU_DRIVER_TYPE,,}" == "rolling" ]]; then
|
||||
# Use GPU driver rolling releases
|
||||
XPU_DRIVER_VERSION=""
|
||||
fi
|
||||
|
||||
# Default use Intel® oneAPI Deep Learning Essentials 2025.0
|
||||
|
||||
@ -54,6 +54,16 @@ COPY ./ci_commit_pins/nccl-cu* /ci_commit_pins/
|
||||
COPY ./common/install_cusparselt.sh install_cusparselt.sh
|
||||
ENV CUDA_HOME /usr/local/cuda
|
||||
|
||||
FROM cuda as cuda11.8
|
||||
RUN bash ./install_cuda.sh 11.8
|
||||
RUN bash ./install_magma.sh 11.8
|
||||
RUN ln -sf /usr/local/cuda-11.8 /usr/local/cuda
|
||||
|
||||
FROM cuda as cuda12.4
|
||||
RUN bash ./install_cuda.sh 12.4
|
||||
RUN bash ./install_magma.sh 12.4
|
||||
RUN ln -sf /usr/local/cuda-12.4 /usr/local/cuda
|
||||
|
||||
FROM cuda as cuda12.6
|
||||
RUN bash ./install_cuda.sh 12.6
|
||||
RUN bash ./install_magma.sh 12.6
|
||||
@ -64,11 +74,6 @@ RUN bash ./install_cuda.sh 12.8
|
||||
RUN bash ./install_magma.sh 12.8
|
||||
RUN ln -sf /usr/local/cuda-12.8 /usr/local/cuda
|
||||
|
||||
FROM cuda as cuda12.9
|
||||
RUN bash ./install_cuda.sh 12.9
|
||||
RUN bash ./install_magma.sh 12.9
|
||||
RUN ln -sf /usr/local/cuda-12.9 /usr/local/cuda
|
||||
|
||||
FROM cpu as rocm
|
||||
ARG ROCM_VERSION
|
||||
ARG PYTORCH_ROCM_ARCH
|
||||
|
||||
@ -39,10 +39,6 @@ case ${DOCKER_TAG_PREFIX} in
|
||||
DOCKER_GPU_BUILD_ARG=""
|
||||
;;
|
||||
rocm*)
|
||||
# we want the patch version of 6.4 instead
|
||||
if [[ $(ver $GPU_ARCH_VERSION) -eq $(ver 6.4) ]]; then
|
||||
GPU_ARCH_VERSION="${GPU_ARCH_VERSION}.2"
|
||||
fi
|
||||
BASE_TARGET=rocm
|
||||
GPU_IMAGE=rocm/dev-ubuntu-22.04:${GPU_ARCH_VERSION}-complete
|
||||
PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
|
||||
|
||||
@ -27,7 +27,5 @@ COPY ./common/install_linter.sh install_linter.sh
|
||||
RUN bash ./install_linter.sh
|
||||
RUN rm install_linter.sh
|
||||
|
||||
RUN chown -R jenkins:jenkins /var/lib/jenkins/ci_env
|
||||
|
||||
USER jenkins
|
||||
CMD ["bash"]
|
||||
|
||||
@ -26,7 +26,7 @@ ADD ./common/install_openssl.sh install_openssl.sh
|
||||
RUN bash ./install_openssl.sh && rm install_openssl.sh
|
||||
|
||||
|
||||
# remove unnecessary python versions
|
||||
# remove unncessary python versions
|
||||
RUN rm -rf /opt/python/cp26-cp26m /opt/_internal/cpython-2.6.9-ucs2
|
||||
RUN rm -rf /opt/python/cp26-cp26mu /opt/_internal/cpython-2.6.9-ucs4
|
||||
RUN rm -rf /opt/python/cp33-cp33m /opt/_internal/cpython-3.3.6
|
||||
@ -103,7 +103,6 @@ ENV SSL_CERT_FILE=/opt/_internal/certs.pem
|
||||
# Install LLVM version
|
||||
COPY --from=openssl /opt/openssl /opt/openssl
|
||||
COPY --from=base /opt/python /opt/python
|
||||
COPY --from=base /usr/local/lib/ /usr/local/lib/
|
||||
COPY --from=base /opt/_internal /opt/_internal
|
||||
COPY --from=base /usr/local/bin/auditwheel /usr/local/bin/auditwheel
|
||||
COPY --from=intel /opt/intel /opt/intel
|
||||
|
||||
@ -2,7 +2,7 @@ FROM quay.io/pypa/manylinux_2_28_aarch64 as base
|
||||
|
||||
ARG GCCTOOLSET_VERSION=13
|
||||
|
||||
# Language variables
|
||||
# Language variabes
|
||||
ENV LC_ALL=en_US.UTF-8
|
||||
ENV LANG=en_US.UTF-8
|
||||
ENV LANGUAGE=en_US.UTF-8
|
||||
@ -58,13 +58,12 @@ RUN git config --global --add safe.directory "*"
|
||||
|
||||
FROM base as openblas
|
||||
# Install openblas
|
||||
ARG OPENBLAS_VERSION
|
||||
ADD ./common/install_openblas.sh install_openblas.sh
|
||||
RUN bash ./install_openblas.sh && rm install_openblas.sh
|
||||
|
||||
FROM base as final
|
||||
|
||||
# remove unnecessary python versions
|
||||
# remove unncessary python versions
|
||||
RUN rm -rf /opt/python/cp26-cp26m /opt/_internal/cpython-2.6.9-ucs2
|
||||
RUN rm -rf /opt/python/cp26-cp26mu /opt/_internal/cpython-2.6.9-ucs4
|
||||
RUN rm -rf /opt/python/cp33-cp33m /opt/_internal/cpython-3.3.6
|
||||
|
||||
@ -60,7 +60,7 @@ RUN bash ./install_openssl.sh && rm install_openssl.sh
|
||||
ENV SSL_CERT_FILE=/opt/_internal/certs.pem
|
||||
|
||||
FROM openssl as final
|
||||
# remove unnecessary python versions
|
||||
# remove unncessary python versions
|
||||
RUN rm -rf /opt/python/cp26-cp26m /opt/_internal/cpython-2.6.9-ucs2
|
||||
RUN rm -rf /opt/python/cp26-cp26mu /opt/_internal/cpython-2.6.9-ucs4
|
||||
RUN rm -rf /opt/python/cp33-cp33m /opt/_internal/cpython-3.3.6
|
||||
|
||||
@ -120,19 +120,15 @@ RUN python3 -mpip install cmake==3.28.0
|
||||
# so just build it from upstream repository.
|
||||
# h5py is dependency of onnxruntime_training.
|
||||
# h5py==3.11.0 builds with hdf5-devel 1.10.5 from repository.
|
||||
# h5py 3.11.0 doesn't build with numpy >= 2.3.0.
|
||||
# install newest flatbuffers version first:
|
||||
# for some reason old version is getting pulled in otherwise.
|
||||
# packaging package is required for onnxruntime wheel build.
|
||||
RUN pip3 install flatbuffers && \
|
||||
pip3 install cython 'pkgconfig>=1.5.5' 'setuptools>=77' 'numpy<2.3.0' && \
|
||||
pip3 install --no-build-isolation h5py==3.11.0 && \
|
||||
pip3 install h5py==3.11.0 && \
|
||||
pip3 install packaging && \
|
||||
git clone https://github.com/microsoft/onnxruntime && \
|
||||
cd onnxruntime && git checkout v1.21.0 && \
|
||||
git submodule update --init --recursive && \
|
||||
wget https://github.com/microsoft/onnxruntime/commit/f57db79743c4d1a3553aa05cf95bcd10966030e6.patch && \
|
||||
patch -p1 < f57db79743c4d1a3553aa05cf95bcd10966030e6.patch && \
|
||||
./build.sh --config Release --parallel 0 --enable_pybind \
|
||||
--build_wheel --enable_training --enable_training_apis \
|
||||
--enable_training_ops --skip_tests --allow_running_as_root \
|
||||
|
||||
@ -27,7 +27,6 @@ fi
|
||||
|
||||
MANY_LINUX_VERSION=${MANY_LINUX_VERSION:-}
|
||||
DOCKERFILE_SUFFIX=${DOCKERFILE_SUFFIX:-}
|
||||
OPENBLAS_VERSION=${OPENBLAS_VERSION:-}
|
||||
|
||||
case ${image} in
|
||||
manylinux2_28-builder:cpu)
|
||||
@ -41,7 +40,6 @@ case ${image} in
|
||||
GPU_IMAGE=arm64v8/almalinux:8
|
||||
DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=13 --build-arg NINJA_VERSION=1.12.1"
|
||||
MANY_LINUX_VERSION="2_28_aarch64"
|
||||
OPENBLAS_VERSION="v0.3.30"
|
||||
;;
|
||||
manylinuxcxx11-abi-builder:cpu-cxx11-abi)
|
||||
TARGET=final
|
||||
@ -75,10 +73,6 @@ case ${image} in
|
||||
DOCKERFILE_SUFFIX="_cuda_aarch64"
|
||||
;;
|
||||
manylinux2_28-builder:rocm*)
|
||||
# we want the patch version of 6.4 instead
|
||||
if [[ $(ver $GPU_ARCH_VERSION) -eq $(ver 6.4) ]]; then
|
||||
GPU_ARCH_VERSION="${GPU_ARCH_VERSION}.2"
|
||||
fi
|
||||
TARGET=rocm_final
|
||||
MANY_LINUX_VERSION="2_28"
|
||||
DEVTOOLSET_VERSION="11"
|
||||
@ -115,7 +109,6 @@ tmp_tag=$(basename "$(mktemp -u)" | tr '[:upper:]' '[:lower:]')
|
||||
DOCKER_BUILDKIT=1 docker build \
|
||||
${DOCKER_GPU_BUILD_ARG} \
|
||||
--build-arg "GPU_IMAGE=${GPU_IMAGE}" \
|
||||
--build-arg "OPENBLAS_VERSION=${OPENBLAS_VERSION}" \
|
||||
--target "${TARGET}" \
|
||||
-t "${tmp_tag}" \
|
||||
$@ \
|
||||
|
||||
@ -16,7 +16,6 @@ click
|
||||
#test that import:
|
||||
|
||||
coremltools==5.0b5 ; python_version < "3.12"
|
||||
coremltools==8.3 ; python_version == "3.12"
|
||||
#Description: Apple framework for ML integration
|
||||
#Pinned versions: 5.0b5
|
||||
#test that import:
|
||||
@ -42,15 +41,18 @@ fbscribelogger==0.1.7
|
||||
#Pinned versions: 0.1.6
|
||||
#test that import:
|
||||
|
||||
flatbuffers==24.12.23
|
||||
flatbuffers==2.0 ; platform_machine != "s390x"
|
||||
#Description: cross platform serialization library
|
||||
#Pinned versions: 24.12.23
|
||||
#Pinned versions: 2.0
|
||||
#test that import:
|
||||
|
||||
flatbuffers ; platform_machine == "s390x"
|
||||
#Description: cross platform serialization library; Newer version is required on s390x for new python version
|
||||
|
||||
hypothesis==5.35.1
|
||||
# Pin hypothesis to avoid flakiness: https://github.com/pytorch/pytorch/issues/31136
|
||||
#Description: advanced library for generating parametrized tests
|
||||
#Pinned versions: 5.35.1
|
||||
#Pinned versions: 3.44.6, 4.53.2
|
||||
#test that import: test_xnnpack_integration.py, test_pruning_op.py, test_nn.py
|
||||
|
||||
junitparser==2.1.1
|
||||
@ -63,12 +65,10 @@ lark==0.12.0
|
||||
#Pinned versions: 0.12.0
|
||||
#test that import:
|
||||
|
||||
librosa>=0.6.2 ; python_version < "3.11" and platform_machine != "s390x"
|
||||
librosa==0.10.2 ; python_version == "3.12" and platform_machine != "s390x"
|
||||
librosa>=0.6.2 ; python_version < "3.11"
|
||||
#Description: A python package for music and audio analysis
|
||||
#Pinned versions: >=0.6.2
|
||||
#test that import: test_spectral_ops.py
|
||||
#librosa depends on numba; disable it for s390x while numba is disabled too
|
||||
|
||||
#mkl #this breaks linux-bionic-rocm4.5-py3.7
|
||||
#Description: Intel oneAPI Math Kernel Library
|
||||
@ -93,10 +93,10 @@ librosa==0.10.2 ; python_version == "3.12" and platform_machine != "s390x"
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
mypy==1.16.0
|
||||
mypy==1.15.0
|
||||
# Pin MyPy version because new errors are likely to appear with each release
|
||||
#Description: linter
|
||||
#Pinned versions: 1.16.0
|
||||
#Pinned versions: 1.14.0
|
||||
#test that import: test_typing.py, test_type_hints.py
|
||||
|
||||
networkx==2.8.8
|
||||
@ -111,15 +111,13 @@ ninja==1.11.1.3
|
||||
#Pinned versions: 1.11.1.3
|
||||
#test that import: run_test.py, test_cpp_extensions_aot.py,test_determination.py
|
||||
|
||||
numba==0.49.0 ; python_version < "3.9" and platform_machine != "s390x"
|
||||
numba==0.55.2 ; python_version == "3.9" and platform_machine != "s390x"
|
||||
numba==0.55.2 ; python_version == "3.10" and platform_machine != "s390x"
|
||||
numba==0.60.0 ; python_version == "3.12" and platform_machine != "s390x"
|
||||
numba==0.49.0 ; python_version < "3.9"
|
||||
numba==0.55.2 ; python_version == "3.9"
|
||||
numba==0.55.2 ; python_version == "3.10"
|
||||
#Description: Just-In-Time Compiler for Numerical Functions
|
||||
#Pinned versions: 0.54.1, 0.49.0, <=0.49.1
|
||||
#test that import: test_numba_integration.py
|
||||
#For numba issue see https://github.com/pytorch/pytorch/issues/51511
|
||||
#Need release > 0.61.2 for s390x due to https://github.com/numba/numba/pull/10073
|
||||
|
||||
#numpy
|
||||
#Description: Provides N-dimensional arrays and linear algebra
|
||||
@ -223,9 +221,9 @@ pygments==2.15.0
|
||||
#Pinned versions: 2.12.0
|
||||
#test that import: the doctests
|
||||
|
||||
#pyyaml
|
||||
#PyYAML
|
||||
#Description: data serialization format
|
||||
#Pinned versions: 6.0.2
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
#requests
|
||||
@ -235,7 +233,7 @@ pygments==2.15.0
|
||||
|
||||
#rich
|
||||
#Description: rich text and beautiful formatting in the terminal
|
||||
#Pinned versions: 14.1.0
|
||||
#Pinned versions: 10.9.0
|
||||
#test that import:
|
||||
|
||||
scikit-image==0.19.3 ; python_version < "3.10"
|
||||
@ -309,7 +307,7 @@ pytest-cpp==2.3.0
|
||||
#Pinned versions: 2.3.0
|
||||
#test that import:
|
||||
|
||||
z3-solver==4.15.1.0 ; platform_machine != "s390x"
|
||||
z3-solver==4.12.6.0
|
||||
#Description: The Z3 Theorem Prover Project
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
@ -344,7 +342,7 @@ onnx==1.18.0
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
onnxscript==0.3.1
|
||||
onnxscript==0.2.6
|
||||
#Description: Required by mypy and test_public_bindings.py when checking torch.onnx._internal
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
@ -363,11 +361,12 @@ pwlf==2.2.1
|
||||
#Pinned versions: 2.2.1
|
||||
#test that import: test_sac_estimator.py
|
||||
|
||||
|
||||
# To build PyTorch itself
|
||||
pyyaml
|
||||
astunparse
|
||||
PyYAML
|
||||
pyzstd
|
||||
setuptools>=70.1.0
|
||||
six
|
||||
setuptools
|
||||
|
||||
scons==4.5.2 ; platform_machine == "aarch64"
|
||||
|
||||
@ -383,16 +382,3 @@ dataclasses_json==0.6.7
|
||||
|
||||
cmake==4.0.0
|
||||
#Description: required for building
|
||||
|
||||
tlparse==0.3.30
|
||||
#Description: required for log parsing
|
||||
|
||||
cuda-bindings>=12.0,<13.0 ; platform_machine != "s390x"
|
||||
#Description: required for testing CUDAGraph::raw_cuda_graph(). See https://nvidia.github.io/cuda-python/cuda-bindings/latest/support.html for how this version was chosen. Note "Any fix in the latest bindings would be backported to the prior major version" means that only the newest version of cuda-bindings will get fixes. Depending on the latest version of 12.x is okay because all 12.y versions will be supported via "CUDA minor version compatibility". Pytorch builds against 13.z versions of cuda toolkit work with 12.x versions of cuda-bindings as well because newer drivers work with old toolkits.
|
||||
#test that import: test_cuda.py
|
||||
|
||||
setuptools-git-versioning==2.1.0
|
||||
scikit-build==0.18.1
|
||||
pyre-extensions==0.0.32
|
||||
tabulate==0.9.0
|
||||
#Description: These package are needed to build FBGEMM and torchrec on PyTorch CI
|
||||
|
||||
@ -1,11 +1,11 @@
|
||||
sphinx==5.3.0
|
||||
#Description: This is used to generate PyTorch docs
|
||||
#Pinned versions: 5.3.0
|
||||
-e git+https://github.com/pytorch/pytorch_sphinx_theme.git@722b7e6f9ca512fcc526ad07d62b3d28c50bb6cd#egg=pytorch_sphinx_theme2
|
||||
-e git+https://github.com/pytorch/pytorch_sphinx_theme.git@pytorch_sphinx_theme2#egg=pytorch_sphinx_theme2
|
||||
|
||||
# TODO: sphinxcontrib.katex 0.9.0 adds a local KaTeX server to speed up pre-rendering
|
||||
# but it doesn't seem to work and hangs around idly. The initial thought that it is probably
|
||||
# something related to Docker setup. We can investigate this later.
|
||||
# but it doesn't seem to work and hangs around idly. The initial thought is probably
|
||||
# something related to Docker setup. We can investigate this later
|
||||
|
||||
sphinxcontrib.katex==0.8.6
|
||||
#Description: This is used to generate PyTorch docs
|
||||
@ -19,10 +19,9 @@ sphinx_sitemap==2.6.0
|
||||
#Description: This is used to generate sitemap for PyTorch docs
|
||||
#Pinned versions: 2.6.0
|
||||
|
||||
matplotlib==3.5.3 ; python_version < "3.13"
|
||||
matplotlib==3.6.3 ; python_version >= "3.13"
|
||||
matplotlib==3.5.3
|
||||
#Description: This is used to generate PyTorch docs
|
||||
#Pinned versions: 3.6.3 if python > 3.12. Otherwise 3.5.3.
|
||||
#Pinned versions: 3.5.3
|
||||
|
||||
tensorboard==2.13.0 ; python_version < "3.13"
|
||||
tensorboard==2.18.0 ; python_version >= "3.13"
|
||||
@ -50,8 +49,8 @@ IPython==8.12.0
|
||||
#Pinned versions: 8.12.0
|
||||
|
||||
myst-nb==0.17.2
|
||||
#Description: This is used to generate PyTorch functorch and torch.compile docs.
|
||||
#Pinned versions: 0.17.2
|
||||
#Description: This is used to generate PyTorch functorch docs
|
||||
#Pinned versions: 0.13.2
|
||||
|
||||
# The following are required to build torch.distributed.elastic.rendezvous.etcd* docs
|
||||
python-etcd==0.4.5
|
||||
|
||||
@ -1 +1 @@
|
||||
3.4.0
|
||||
3.3.1
|
||||
|
||||
@ -1 +0,0 @@
|
||||
3.4.0
|
||||
170
.ci/docker/ubuntu-cuda/Dockerfile
Normal file
170
.ci/docker/ubuntu-cuda/Dockerfile
Normal file
@ -0,0 +1,170 @@
|
||||
ARG UBUNTU_VERSION
|
||||
ARG CUDA_VERSION
|
||||
ARG IMAGE_NAME
|
||||
|
||||
FROM ${IMAGE_NAME} as base
|
||||
|
||||
ARG UBUNTU_VERSION
|
||||
ARG CUDA_VERSION
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
# Install common dependencies (so that this step can be cached separately)
|
||||
COPY ./common/install_base.sh install_base.sh
|
||||
RUN bash ./install_base.sh && rm install_base.sh
|
||||
|
||||
# Install user
|
||||
COPY ./common/install_user.sh install_user.sh
|
||||
RUN bash ./install_user.sh && rm install_user.sh
|
||||
|
||||
# Install katex
|
||||
ARG KATEX
|
||||
COPY ./common/install_docs_reqs.sh install_docs_reqs.sh
|
||||
RUN bash ./install_docs_reqs.sh && rm install_docs_reqs.sh
|
||||
|
||||
# Install conda and other packages (e.g., numpy, pytest)
|
||||
ARG ANACONDA_PYTHON_VERSION
|
||||
ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION
|
||||
ENV PATH /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/bin:/opt/conda/bin:$PATH
|
||||
COPY requirements-ci.txt /opt/conda/requirements-ci.txt
|
||||
COPY ./common/install_conda.sh install_conda.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ./common/install_magma_conda.sh install_magma_conda.sh
|
||||
RUN bash ./install_conda.sh && rm install_conda.sh install_magma_conda.sh common_utils.sh /opt/conda/requirements-ci.txt
|
||||
|
||||
# Install gcc
|
||||
ARG GCC_VERSION
|
||||
COPY ./common/install_gcc.sh install_gcc.sh
|
||||
RUN bash ./install_gcc.sh && rm install_gcc.sh
|
||||
|
||||
# Install clang
|
||||
ARG CLANG_VERSION
|
||||
COPY ./common/install_clang.sh install_clang.sh
|
||||
RUN bash ./install_clang.sh && rm install_clang.sh
|
||||
|
||||
# (optional) Install vision packages like OpenCV
|
||||
ARG VISION
|
||||
COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./
|
||||
RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi
|
||||
RUN rm install_vision.sh cache_vision_models.sh common_utils.sh
|
||||
ENV INSTALLED_VISION ${VISION}
|
||||
|
||||
# (optional) Install UCC
|
||||
ARG UCX_COMMIT
|
||||
ARG UCC_COMMIT
|
||||
ENV UCX_COMMIT $UCX_COMMIT
|
||||
ENV UCC_COMMIT $UCC_COMMIT
|
||||
ENV UCX_HOME /usr
|
||||
ENV UCC_HOME /usr
|
||||
ADD ./common/install_ucc.sh install_ucc.sh
|
||||
RUN if [ -n "${UCX_COMMIT}" ] && [ -n "${UCC_COMMIT}" ]; then bash ./install_ucc.sh; fi
|
||||
RUN rm install_ucc.sh
|
||||
|
||||
COPY ./common/install_openssl.sh install_openssl.sh
|
||||
ENV OPENSSL_ROOT_DIR /opt/openssl
|
||||
RUN bash ./install_openssl.sh
|
||||
ENV OPENSSL_DIR /opt/openssl
|
||||
|
||||
ARG INDUCTOR_BENCHMARKS
|
||||
ARG ANACONDA_PYTHON_VERSION
|
||||
ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION
|
||||
COPY ./common/install_inductor_benchmark_deps.sh install_inductor_benchmark_deps.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ci_commit_pins/huggingface.txt huggingface.txt
|
||||
COPY ci_commit_pins/timm.txt timm.txt
|
||||
RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi
|
||||
RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface.txt
|
||||
|
||||
ARG TRITON
|
||||
|
||||
FROM base as triton-builder
|
||||
# Install triton, this needs to be done before sccache because the latter will
|
||||
# try to reach out to S3, which docker build runners don't have access
|
||||
COPY ./common/install_triton.sh install_triton.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ci_commit_pins/triton.txt triton.txt
|
||||
COPY triton_version.txt triton_version.txt
|
||||
RUN bash ./install_triton.sh
|
||||
|
||||
FROM base as final
|
||||
COPY --from=triton-builder /opt/triton /opt/triton
|
||||
RUN if [ -n "${TRITON}" ]; then pip install /opt/triton/*.whl; chown -R jenkins:jenkins /opt/conda; fi
|
||||
RUN rm -rf /opt/triton
|
||||
|
||||
ARG HALIDE
|
||||
# Build and install halide
|
||||
COPY ./common/install_halide.sh install_halide.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ci_commit_pins/halide.txt halide.txt
|
||||
RUN if [ -n "${HALIDE}" ]; then bash ./install_halide.sh; fi
|
||||
RUN rm install_halide.sh common_utils.sh halide.txt
|
||||
|
||||
# Install ccache/sccache (do this last, so we get priority in PATH)
|
||||
COPY ./common/install_cache.sh install_cache.sh
|
||||
ENV PATH /opt/cache/bin:$PATH
|
||||
# See https://github.com/pytorch/pytorch/issues/82174
|
||||
# TODO(sdym@fb.com):
|
||||
# check if this is needed after full off Xenial migration
|
||||
ENV CARGO_NET_GIT_FETCH_WITH_CLI true
|
||||
RUN bash ./install_cache.sh && rm install_cache.sh
|
||||
ENV CMAKE_CUDA_COMPILER_LAUNCHER=/opt/cache/bin/sccache
|
||||
|
||||
# Add jni.h for java host build
|
||||
COPY ./common/install_jni.sh install_jni.sh
|
||||
COPY ./java/jni.h jni.h
|
||||
RUN bash ./install_jni.sh && rm install_jni.sh
|
||||
|
||||
# Install Open MPI for CUDA
|
||||
COPY ./common/install_openmpi.sh install_openmpi.sh
|
||||
RUN if [ -n "${CUDA_VERSION}" ]; then bash install_openmpi.sh; fi
|
||||
RUN rm install_openmpi.sh
|
||||
|
||||
# Include BUILD_ENVIRONMENT environment variable in image
|
||||
ARG BUILD_ENVIRONMENT
|
||||
ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT}
|
||||
|
||||
# AWS specific CUDA build guidance
|
||||
ENV TORCH_CUDA_ARCH_LIST Maxwell
|
||||
ENV TORCH_NVCC_FLAGS "-Xfatbin -compress-all"
|
||||
ENV CUDA_PATH /usr/local/cuda
|
||||
|
||||
# Install LLVM dev version (Defined in the pytorch/builder github repository)
|
||||
COPY --from=pytorch/llvm:9.0.1 /opt/llvm /opt/llvm
|
||||
|
||||
# Install CUDNN
|
||||
ARG CUDNN_VERSION
|
||||
ARG CUDA_VERSION
|
||||
COPY ./common/install_cudnn.sh install_cudnn.sh
|
||||
RUN if [ -n "${CUDNN_VERSION}" ]; then bash install_cudnn.sh; fi
|
||||
RUN rm install_cudnn.sh
|
||||
|
||||
# Install CUSPARSELT
|
||||
ARG CUDA_VERSION
|
||||
COPY ./common/install_cusparselt.sh install_cusparselt.sh
|
||||
RUN bash install_cusparselt.sh
|
||||
RUN rm install_cusparselt.sh
|
||||
|
||||
# Install NCCL
|
||||
ARG CUDA_VERSION
|
||||
COPY ./common/install_nccl.sh install_nccl.sh
|
||||
COPY ./ci_commit_pins/nccl-cu* /ci_commit_pins/
|
||||
RUN bash install_nccl.sh
|
||||
RUN rm install_nccl.sh /ci_commit_pins/nccl-cu*
|
||||
ENV USE_SYSTEM_NCCL=1
|
||||
ENV NCCL_INCLUDE_DIR="/usr/local/cuda/include/"
|
||||
ENV NCCL_LIB_DIR="/usr/local/cuda/lib64/"
|
||||
|
||||
# Install CUDSS
|
||||
ARG CUDA_VERSION
|
||||
COPY ./common/install_cudss.sh install_cudss.sh
|
||||
RUN bash install_cudss.sh
|
||||
RUN rm install_cudss.sh
|
||||
|
||||
# Delete /usr/local/cuda-11.X/cuda-11.X symlinks
|
||||
RUN if [ -h /usr/local/cuda-11.6/cuda-11.6 ]; then rm /usr/local/cuda-11.6/cuda-11.6; fi
|
||||
RUN if [ -h /usr/local/cuda-11.7/cuda-11.7 ]; then rm /usr/local/cuda-11.7/cuda-11.7; fi
|
||||
RUN if [ -h /usr/local/cuda-12.1/cuda-12.1 ]; then rm /usr/local/cuda-12.1/cuda-12.1; fi
|
||||
RUN if [ -h /usr/local/cuda-12.4/cuda-12.4 ]; then rm /usr/local/cuda-12.4/cuda-12.4; fi
|
||||
|
||||
USER jenkins
|
||||
CMD ["bash"]
|
||||
@ -25,7 +25,6 @@ RUN bash ./install_docs_reqs.sh && rm install_docs_reqs.sh
|
||||
|
||||
# Install conda and other packages (e.g., numpy, pytest)
|
||||
ARG ANACONDA_PYTHON_VERSION
|
||||
ARG BUILD_ENVIRONMENT
|
||||
ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION
|
||||
ENV PATH /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/bin:/opt/conda/bin:$PATH
|
||||
COPY requirements-ci.txt /opt/conda/requirements-ci.txt
|
||||
@ -98,9 +97,8 @@ COPY ./common/install_inductor_benchmark_deps.sh install_inductor_benchmark_deps
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ci_commit_pins/huggingface.txt huggingface.txt
|
||||
COPY ci_commit_pins/timm.txt timm.txt
|
||||
COPY ci_commit_pins/torchbench.txt torchbench.txt
|
||||
RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi
|
||||
RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface.txt torchbench.txt
|
||||
RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface.txt
|
||||
|
||||
# (optional) Install non-default Ninja version
|
||||
ARG NINJA_VERSION
|
||||
|
||||
@ -72,7 +72,7 @@ ARG TRITON
|
||||
COPY ./common/install_triton.sh install_triton.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ci_commit_pins/triton-xpu.txt triton-xpu.txt
|
||||
COPY triton_xpu_version.txt triton_version.txt
|
||||
COPY triton_version.txt triton_version.txt
|
||||
RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi
|
||||
RUN rm install_triton.sh common_utils.sh triton-xpu.txt triton_version.txt
|
||||
|
||||
|
||||
@ -98,9 +98,8 @@ COPY ./common/install_inductor_benchmark_deps.sh install_inductor_benchmark_deps
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ci_commit_pins/huggingface.txt huggingface.txt
|
||||
COPY ci_commit_pins/timm.txt timm.txt
|
||||
COPY ci_commit_pins/torchbench.txt torchbench.txt
|
||||
RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi
|
||||
RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface.txt torchbench.txt
|
||||
RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface.txt
|
||||
|
||||
ARG TRITON
|
||||
ARG TRITON_CPU
|
||||
@ -148,12 +147,6 @@ RUN if [ -n "${ACL}" ]; then bash ./install_acl.sh; fi
|
||||
RUN rm install_acl.sh
|
||||
ENV INSTALLED_ACL ${ACL}
|
||||
|
||||
ARG OPENBLAS
|
||||
COPY ./common/install_openblas.sh install_openblas.sh
|
||||
RUN if [ -n "${OPENBLAS}" ]; then bash ./install_openblas.sh; fi
|
||||
RUN rm install_openblas.sh
|
||||
ENV INSTALLED_OPENBLAS ${OPENBLAS}
|
||||
|
||||
# Install ccache/sccache (do this last, so we get priority in PATH)
|
||||
ARG SKIP_SCCACHE_INSTALL
|
||||
COPY ./common/install_cache.sh install_cache.sh
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
SHELL=/usr/bin/env bash
|
||||
|
||||
DOCKER_CMD ?= docker
|
||||
DESIRED_CUDA ?= 12.8
|
||||
DESIRED_CUDA ?= 11.8
|
||||
DESIRED_CUDA_SHORT = $(subst .,,$(DESIRED_CUDA))
|
||||
PACKAGE_NAME = magma-cuda
|
||||
CUDA_ARCH_LIST ?= -gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90
|
||||
@ -16,21 +16,15 @@ DOCKER_RUN = set -eou pipefail; ${DOCKER_CMD} run --rm -i \
|
||||
magma/build_magma.sh
|
||||
|
||||
.PHONY: all
|
||||
all: magma-cuda129
|
||||
all: magma-cuda128
|
||||
all: magma-cuda126
|
||||
all: magma-cuda118
|
||||
|
||||
.PHONY:
|
||||
clean:
|
||||
$(RM) -r magma-*
|
||||
$(RM) -r output
|
||||
|
||||
.PHONY: magma-cuda129
|
||||
magma-cuda129: DESIRED_CUDA := 12.9
|
||||
magma-cuda129: CUDA_ARCH_LIST += -gencode arch=compute_100,code=sm_100 -gencode arch=compute_120,code=sm_120
|
||||
magma-cuda129:
|
||||
$(DOCKER_RUN)
|
||||
|
||||
.PHONY: magma-cuda128
|
||||
magma-cuda128: DESIRED_CUDA := 12.8
|
||||
magma-cuda128: CUDA_ARCH_LIST += -gencode arch=compute_100,code=sm_100 -gencode arch=compute_120,code=sm_120
|
||||
@ -41,3 +35,9 @@ magma-cuda128:
|
||||
magma-cuda126: DESIRED_CUDA := 12.6
|
||||
magma-cuda126:
|
||||
$(DOCKER_RUN)
|
||||
|
||||
.PHONY: magma-cuda118
|
||||
magma-cuda118: DESIRED_CUDA := 11.8
|
||||
magma-cuda118: CUDA_ARCH_LIST += -gencode arch=compute_37,code=sm_37
|
||||
magma-cuda118:
|
||||
$(DOCKER_RUN)
|
||||
|
||||
@ -31,6 +31,7 @@ elif [[ "$OS_NAME" == *"Ubuntu"* ]]; then
|
||||
# Comment out nvidia repositories to prevent them from getting apt-get updated, see https://github.com/pytorch/pytorch/issues/74968
|
||||
# shellcheck disable=SC2046
|
||||
sed -i 's/.*nvidia.*/# &/' $(find /etc/apt/ -type f -name "*.list")
|
||||
|
||||
retry apt-get update
|
||||
retry apt-get -y install zip openssl
|
||||
else
|
||||
@ -97,7 +98,6 @@ if [[ -z "$PYTORCH_ROOT" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
pushd "$PYTORCH_ROOT"
|
||||
retry pip install -qUr requirements-build.txt
|
||||
python setup.py clean
|
||||
retry pip install -qr requirements.txt
|
||||
case ${DESIRED_PYTHON} in
|
||||
@ -138,11 +138,28 @@ fi
|
||||
|
||||
echo "Calling setup.py bdist at $(date)"
|
||||
|
||||
time CMAKE_ARGS=${CMAKE_ARGS[@]} \
|
||||
EXTRA_CAFFE2_CMAKE_FLAGS=${EXTRA_CAFFE2_CMAKE_FLAGS[@]} \
|
||||
if [[ "$USE_SPLIT_BUILD" == "true" ]]; then
|
||||
echo "Calling setup.py bdist_wheel for split build (BUILD_LIBTORCH_WHL)"
|
||||
time EXTRA_CAFFE2_CMAKE_FLAGS=${EXTRA_CAFFE2_CMAKE_FLAGS[@]} \
|
||||
BUILD_LIBTORCH_WHL=1 BUILD_PYTHON_ONLY=0 \
|
||||
BUILD_LIBTORCH_CPU_WITH_DEBUG=$BUILD_DEBUG_INFO \
|
||||
USE_NCCL=${USE_NCCL} USE_RCCL=${USE_RCCL} USE_KINETO=${USE_KINETO} \
|
||||
python setup.py bdist_wheel -d /tmp/$WHEELHOUSE_DIR
|
||||
echo "Finished setup.py bdist_wheel for split build (BUILD_LIBTORCH_WHL)"
|
||||
echo "Calling setup.py bdist_wheel for split build (BUILD_PYTHON_ONLY)"
|
||||
time EXTRA_CAFFE2_CMAKE_FLAGS=${EXTRA_CAFFE2_CMAKE_FLAGS[@]} \
|
||||
BUILD_LIBTORCH_WHL=0 BUILD_PYTHON_ONLY=1 \
|
||||
BUILD_LIBTORCH_CPU_WITH_DEBUG=$BUILD_DEBUG_INFO \
|
||||
USE_NCCL=${USE_NCCL} USE_RCCL=${USE_RCCL} USE_KINETO=${USE_KINETO} \
|
||||
python setup.py bdist_wheel -d /tmp/$WHEELHOUSE_DIR --cmake
|
||||
echo "Finished setup.py bdist_wheel for split build (BUILD_PYTHON_ONLY)"
|
||||
else
|
||||
time CMAKE_ARGS=${CMAKE_ARGS[@]} \
|
||||
EXTRA_CAFFE2_CMAKE_FLAGS=${EXTRA_CAFFE2_CMAKE_FLAGS[@]} \
|
||||
BUILD_LIBTORCH_CPU_WITH_DEBUG=$BUILD_DEBUG_INFO \
|
||||
USE_NCCL=${USE_NCCL} USE_RCCL=${USE_RCCL} USE_KINETO=${USE_KINETO} \
|
||||
python setup.py bdist_wheel -d /tmp/$WHEELHOUSE_DIR
|
||||
fi
|
||||
echo "Finished setup.py bdist at $(date)"
|
||||
|
||||
# Build libtorch packages
|
||||
@ -255,6 +272,10 @@ ls /tmp/$WHEELHOUSE_DIR
|
||||
mkdir -p "/$WHEELHOUSE_DIR"
|
||||
mv /tmp/$WHEELHOUSE_DIR/torch*linux*.whl /$WHEELHOUSE_DIR/
|
||||
|
||||
if [[ "$USE_SPLIT_BUILD" == "true" ]]; then
|
||||
mv /tmp/$WHEELHOUSE_DIR/torch_no_python*.whl /$WHEELHOUSE_DIR/ || true
|
||||
fi
|
||||
|
||||
if [[ -n "$BUILD_PYTHONLESS" ]]; then
|
||||
mkdir -p /$LIBTORCH_HOUSE_DIR
|
||||
mv /tmp/$LIBTORCH_HOUSE_DIR/*.zip /$LIBTORCH_HOUSE_DIR
|
||||
@ -431,8 +452,16 @@ if [[ -z "$BUILD_PYTHONLESS" ]]; then
|
||||
pushd $PYTORCH_ROOT/test
|
||||
|
||||
# Install the wheel for this Python version
|
||||
if [[ "$USE_SPLIT_BUILD" == "true" ]]; then
|
||||
pip uninstall -y "$TORCH_NO_PYTHON_PACKAGE_NAME" || true
|
||||
fi
|
||||
|
||||
pip uninstall -y "$TORCH_PACKAGE_NAME"
|
||||
|
||||
if [[ "$USE_SPLIT_BUILD" == "true" ]]; then
|
||||
pip install "$TORCH_NO_PYTHON_PACKAGE_NAME" --no-index -f /$WHEELHOUSE_DIR --no-dependencies -v
|
||||
fi
|
||||
|
||||
pip install "$TORCH_PACKAGE_NAME" --no-index -f /$WHEELHOUSE_DIR --no-dependencies -v
|
||||
|
||||
# Print info on the libraries installed in this wheel
|
||||
|
||||
@ -15,9 +15,6 @@ export INSTALL_TEST=0 # dont install test binaries into site-packages
|
||||
export USE_CUPTI_SO=0
|
||||
export USE_CUSPARSELT=${USE_CUSPARSELT:-1} # Enable if not disabled by libtorch build
|
||||
export USE_CUFILE=${USE_CUFILE:-1}
|
||||
export USE_SYSTEM_NCCL=1
|
||||
export NCCL_INCLUDE_DIR="/usr/local/cuda/include/"
|
||||
export NCCL_LIB_DIR="/usr/local/cuda/lib64/"
|
||||
|
||||
# Keep an array of cmake variables to add to
|
||||
if [[ -z "$CMAKE_ARGS" ]]; then
|
||||
@ -51,23 +48,20 @@ else
|
||||
fi
|
||||
|
||||
cuda_version_nodot=$(echo $CUDA_VERSION | tr -d '.')
|
||||
EXTRA_CAFFE2_CMAKE_FLAGS+=("-DATEN_NO_TEST=ON")
|
||||
|
||||
TORCH_CUDA_ARCH_LIST="5.0;6.0;7.0;7.5;8.0;8.6"
|
||||
case ${CUDA_VERSION} in
|
||||
#removing sm_50-sm_60 as these architectures are deprecated in CUDA 12.8/9 and will be removed in future releases
|
||||
#however we would like to keep sm_70 architecture see: https://github.com/pytorch/pytorch/issues/157517
|
||||
12.8)
|
||||
TORCH_CUDA_ARCH_LIST="7.0;7.5;8.0;8.6;9.0;10.0;12.0"
|
||||
;;
|
||||
12.9)
|
||||
TORCH_CUDA_ARCH_LIST="7.0;7.5;8.0;8.6;9.0;10.0;12.0+PTX"
|
||||
# WAR to resolve the ld error in libtorch build with CUDA 12.9
|
||||
if [[ "$PACKAGE_TYPE" == "libtorch" ]]; then
|
||||
TORCH_CUDA_ARCH_LIST="7.5;8.0;9.0;10.0;12.0+PTX"
|
||||
fi
|
||||
TORCH_CUDA_ARCH_LIST="7.5;8.0;8.6;9.0;10.0;12.0+PTX" #removing sm_50-sm_70 as these architectures are deprecated in CUDA 12.8 and will be removed in future releases
|
||||
EXTRA_CAFFE2_CMAKE_FLAGS+=("-DATEN_NO_TEST=ON")
|
||||
;;
|
||||
12.6)
|
||||
TORCH_CUDA_ARCH_LIST="5.0;6.0;7.0;7.5;8.0;8.6;9.0"
|
||||
TORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST};9.0"
|
||||
EXTRA_CAFFE2_CMAKE_FLAGS+=("-DATEN_NO_TEST=ON")
|
||||
;;
|
||||
11.8)
|
||||
TORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST};3.7;9.0"
|
||||
EXTRA_CAFFE2_CMAKE_FLAGS+=("-DATEN_NO_TEST=ON")
|
||||
;;
|
||||
*)
|
||||
echo "unknown cuda version $CUDA_VERSION"
|
||||
@ -110,11 +104,12 @@ DEPS_SONAME=(
|
||||
)
|
||||
|
||||
|
||||
# CUDA_VERSION 12.6, 12.8, 12.9
|
||||
# CUDA_VERSION 12.6, 12.8
|
||||
if [[ $CUDA_VERSION == 12* ]]; then
|
||||
export USE_STATIC_CUDNN=0
|
||||
# Try parallelizing nvcc as well
|
||||
export TORCH_NVCC_FLAGS="-Xfatbin -compress-all --threads 2"
|
||||
|
||||
if [[ -z "$PYTORCH_EXTRA_INSTALL_REQUIREMENTS" ]]; then
|
||||
echo "Bundling with cudnn and cublas."
|
||||
DEPS_LIST+=(
|
||||
@ -130,12 +125,11 @@ if [[ $CUDA_VERSION == 12* ]]; then
|
||||
"/usr/local/cuda/lib64/libcublasLt.so.12"
|
||||
"/usr/local/cuda/lib64/libcusparseLt.so.0"
|
||||
"/usr/local/cuda/lib64/libcudart.so.12"
|
||||
"/usr/local/cuda/lib64/libnvToolsExt.so.1"
|
||||
"/usr/local/cuda/lib64/libnvrtc.so.12"
|
||||
"/usr/local/cuda/lib64/libnvrtc-builtins.so"
|
||||
"/usr/local/cuda/lib64/libcufile.so.0"
|
||||
"/usr/local/cuda/lib64/libcufile_rdma.so.1"
|
||||
"/usr/local/cuda/extras/CUPTI/lib64/libcupti.so.12"
|
||||
"/usr/local/cuda/extras/CUPTI/lib64/libnvperf_host.so"
|
||||
)
|
||||
DEPS_SONAME+=(
|
||||
"libcudnn_adv.so.9"
|
||||
@ -150,18 +144,12 @@ if [[ $CUDA_VERSION == 12* ]]; then
|
||||
"libcublasLt.so.12"
|
||||
"libcusparseLt.so.0"
|
||||
"libcudart.so.12"
|
||||
"libnvToolsExt.so.1"
|
||||
"libnvrtc.so.12"
|
||||
"libnvrtc-builtins.so"
|
||||
"libcufile.so.0"
|
||||
"libcufile_rdma.so.1"
|
||||
"libcupti.so.12"
|
||||
"libnvperf_host.so"
|
||||
)
|
||||
# Add libnvToolsExt only if CUDA version is not 12.9
|
||||
if [[ $CUDA_VERSION != 12.9* ]]; then
|
||||
DEPS_LIST+=("/usr/local/cuda/lib64/libnvToolsExt.so.1")
|
||||
DEPS_SONAME+=("libnvToolsExt.so.1")
|
||||
fi
|
||||
else
|
||||
echo "Using nvidia libs from pypi."
|
||||
CUDA_RPATHS=(
|
||||
@ -174,10 +162,8 @@ if [[ $CUDA_VERSION == 12* ]]; then
|
||||
'$ORIGIN/../../nvidia/curand/lib'
|
||||
'$ORIGIN/../../nvidia/cusolver/lib'
|
||||
'$ORIGIN/../../nvidia/cusparse/lib'
|
||||
'$ORIGIN/../../nvidia/cusparselt/lib'
|
||||
'$ORIGIN/../../cusparselt/lib'
|
||||
'$ORIGIN/../../nvidia/nccl/lib'
|
||||
'$ORIGIN/../../nvidia/nvshmem/lib'
|
||||
'$ORIGIN/../../nvidia/nvtx/lib'
|
||||
'$ORIGIN/../../nvidia/cufile/lib'
|
||||
)
|
||||
@ -186,9 +172,94 @@ if [[ $CUDA_VERSION == 12* ]]; then
|
||||
export LIB_SO_RPATH=$CUDA_RPATHS':$ORIGIN'
|
||||
export FORCE_RPATH="--force-rpath"
|
||||
export USE_STATIC_NCCL=0
|
||||
export USE_SYSTEM_NCCL=1
|
||||
export ATEN_STATIC_CUDA=0
|
||||
export USE_CUDA_STATIC_LINK=0
|
||||
export USE_CUPTI_SO=1
|
||||
export NCCL_INCLUDE_DIR="/usr/local/cuda/include/"
|
||||
export NCCL_LIB_DIR="/usr/local/cuda/lib64/"
|
||||
fi
|
||||
elif [[ $CUDA_VERSION == "11.8" ]]; then
|
||||
export USE_STATIC_CUDNN=0
|
||||
# Turn USE_CUFILE off for CUDA 11.8 since nvidia-cufile-cu11 and 1.9.0.20 are
|
||||
# not available in PYPI
|
||||
export USE_CUFILE=0
|
||||
# Try parallelizing nvcc as well
|
||||
export TORCH_NVCC_FLAGS="-Xfatbin -compress-all --threads 2"
|
||||
# Bundle ptxas into the wheel, see https://github.com/pytorch/pytorch/pull/119750
|
||||
export BUILD_BUNDLE_PTXAS=1
|
||||
|
||||
# CUDA 11.8 have to ship the libcusparseLt.so.0 with the binary
|
||||
# since nvidia-cusparselt-cu11 is not available in PYPI
|
||||
if [[ $USE_CUSPARSELT == "1" ]]; then
|
||||
DEPS_SONAME+=(
|
||||
"libcusparseLt.so.0"
|
||||
)
|
||||
DEPS_LIST+=(
|
||||
"/usr/local/cuda/lib64/libcusparseLt.so.0"
|
||||
)
|
||||
fi
|
||||
|
||||
if [[ -z "$PYTORCH_EXTRA_INSTALL_REQUIREMENTS" ]]; then
|
||||
echo "Bundling with cudnn and cublas."
|
||||
DEPS_LIST+=(
|
||||
"/usr/local/cuda/lib64/libcudnn_adv.so.9"
|
||||
"/usr/local/cuda/lib64/libcudnn_cnn.so.9"
|
||||
"/usr/local/cuda/lib64/libcudnn_graph.so.9"
|
||||
"/usr/local/cuda/lib64/libcudnn_ops.so.9"
|
||||
"/usr/local/cuda/lib64/libcudnn_engines_runtime_compiled.so.9"
|
||||
"/usr/local/cuda/lib64/libcudnn_engines_precompiled.so.9"
|
||||
"/usr/local/cuda/lib64/libcudnn_heuristic.so.9"
|
||||
"/usr/local/cuda/lib64/libcudnn.so.9"
|
||||
"/usr/local/cuda/lib64/libcublas.so.11"
|
||||
"/usr/local/cuda/lib64/libcublasLt.so.11"
|
||||
"/usr/local/cuda/lib64/libcudart.so.11.0"
|
||||
"/usr/local/cuda/lib64/libnvToolsExt.so.1"
|
||||
"/usr/local/cuda/lib64/libnvrtc.so.11.2" # this is not a mistake, it links to more specific cuda version
|
||||
"/usr/local/cuda/lib64/libnvrtc-builtins.so.11.8"
|
||||
)
|
||||
DEPS_SONAME+=(
|
||||
"libcudnn_adv.so.9"
|
||||
"libcudnn_cnn.so.9"
|
||||
"libcudnn_graph.so.9"
|
||||
"libcudnn_ops.so.9"
|
||||
"libcudnn_engines_runtime_compiled.so.9"
|
||||
"libcudnn_engines_precompiled.so.9"
|
||||
"libcudnn_heuristic.so.9"
|
||||
"libcudnn.so.9"
|
||||
"libcublas.so.11"
|
||||
"libcublasLt.so.11"
|
||||
"libcudart.so.11.0"
|
||||
"libnvToolsExt.so.1"
|
||||
"libnvrtc.so.11.2"
|
||||
"libnvrtc-builtins.so.11.8"
|
||||
)
|
||||
else
|
||||
echo "Using nvidia libs from pypi."
|
||||
CUDA_RPATHS=(
|
||||
'$ORIGIN/../../nvidia/cublas/lib'
|
||||
'$ORIGIN/../../nvidia/cuda_cupti/lib'
|
||||
'$ORIGIN/../../nvidia/cuda_nvrtc/lib'
|
||||
'$ORIGIN/../../nvidia/cuda_runtime/lib'
|
||||
'$ORIGIN/../../nvidia/cudnn/lib'
|
||||
'$ORIGIN/../../nvidia/cufft/lib'
|
||||
'$ORIGIN/../../nvidia/curand/lib'
|
||||
'$ORIGIN/../../nvidia/cusolver/lib'
|
||||
'$ORIGIN/../../nvidia/cusparse/lib'
|
||||
'$ORIGIN/../../nvidia/nccl/lib'
|
||||
'$ORIGIN/../../nvidia/nvtx/lib'
|
||||
)
|
||||
CUDA_RPATHS=$(IFS=: ; echo "${CUDA_RPATHS[*]}")
|
||||
export C_SO_RPATH=$CUDA_RPATHS':$ORIGIN:$ORIGIN/lib'
|
||||
export LIB_SO_RPATH=$CUDA_RPATHS':$ORIGIN'
|
||||
export FORCE_RPATH="--force-rpath"
|
||||
export USE_STATIC_NCCL=0
|
||||
export USE_SYSTEM_NCCL=1
|
||||
export ATEN_STATIC_CUDA=0
|
||||
export USE_CUDA_STATIC_LINK=0
|
||||
export USE_CUPTI_SO=1
|
||||
export NCCL_INCLUDE_DIR="/usr/local/cuda/include/"
|
||||
export NCCL_LIB_DIR="/usr/local/cuda/lib64/"
|
||||
fi
|
||||
else
|
||||
echo "Unknown cuda version $CUDA_VERSION"
|
||||
|
||||
@ -92,7 +92,6 @@ if [[ -z "$PYTORCH_ROOT" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
pushd "$PYTORCH_ROOT"
|
||||
retry pip install -qUr requirements-build.txt
|
||||
python setup.py clean
|
||||
retry pip install -qr requirements.txt
|
||||
retry pip install -q numpy==2.0.1
|
||||
@ -104,7 +103,7 @@ if [[ "$DESIRED_CUDA" == *"rocm"* ]]; then
|
||||
export ROCclr_DIR=/opt/rocm/rocclr/lib/cmake/rocclr
|
||||
fi
|
||||
|
||||
echo "Calling 'python -m pip install .' at $(date)"
|
||||
echo "Calling setup.py install at $(date)"
|
||||
|
||||
if [[ $LIBTORCH_VARIANT = *"static"* ]]; then
|
||||
STATIC_CMAKE_FLAG="-DTORCH_STATIC=1"
|
||||
@ -120,7 +119,7 @@ fi
|
||||
# TODO: Remove this flag once https://github.com/pytorch/pytorch/issues/55952 is closed
|
||||
CFLAGS='-Wno-deprecated-declarations' \
|
||||
BUILD_LIBTORCH_CPU_WITH_DEBUG=1 \
|
||||
python -m pip install --no-build-isolation -v .
|
||||
python setup.py install
|
||||
|
||||
mkdir -p libtorch/{lib,bin,include,share}
|
||||
|
||||
|
||||
@ -95,7 +95,6 @@ ROCM_SO_FILES=(
|
||||
"libroctracer64.so"
|
||||
"libroctx64.so"
|
||||
"libhipblaslt.so"
|
||||
"libhipsparselt.so"
|
||||
"libhiprtc.so"
|
||||
)
|
||||
|
||||
@ -187,28 +186,20 @@ do
|
||||
OS_SO_FILES[${#OS_SO_FILES[@]}]=$file_name # Append lib to array
|
||||
done
|
||||
|
||||
ARCH=$(echo $PYTORCH_ROCM_ARCH | sed 's/;/|/g') # Replace ; separated arch list to bar for grep
|
||||
|
||||
# rocBLAS library files
|
||||
ROCBLAS_LIB_SRC=$ROCM_HOME/lib/rocblas/library
|
||||
ROCBLAS_LIB_DST=lib/rocblas/library
|
||||
ROCBLAS_ARCH_SPECIFIC_FILES=$(ls $ROCBLAS_LIB_SRC | grep -E $ARCH)
|
||||
ROCBLAS_OTHER_FILES=$(ls $ROCBLAS_LIB_SRC | grep -v gfx)
|
||||
ROCBLAS_LIB_FILES=($ROCBLAS_ARCH_SPECIFIC_FILES $ROCBLAS_OTHER_FILES)
|
||||
ARCH=$(echo $PYTORCH_ROCM_ARCH | sed 's/;/|/g') # Replace ; seperated arch list to bar for grep
|
||||
ARCH_SPECIFIC_FILES=$(ls $ROCBLAS_LIB_SRC | grep -E $ARCH)
|
||||
OTHER_FILES=$(ls $ROCBLAS_LIB_SRC | grep -v gfx)
|
||||
ROCBLAS_LIB_FILES=($ARCH_SPECIFIC_FILES $OTHER_FILES)
|
||||
|
||||
# hipblaslt library files
|
||||
HIPBLASLT_LIB_SRC=$ROCM_HOME/lib/hipblaslt/library
|
||||
HIPBLASLT_LIB_DST=lib/hipblaslt/library
|
||||
HIPBLASLT_ARCH_SPECIFIC_FILES=$(ls $HIPBLASLT_LIB_SRC | grep -E $ARCH)
|
||||
HIPBLASLT_OTHER_FILES=$(ls $HIPBLASLT_LIB_SRC | grep -v gfx)
|
||||
HIPBLASLT_LIB_FILES=($HIPBLASLT_ARCH_SPECIFIC_FILES $HIPBLASLT_OTHER_FILES)
|
||||
|
||||
# hipsparselt library files
|
||||
HIPSPARSELT_LIB_SRC=$ROCM_HOME/lib/hipsparselt/library
|
||||
HIPSPARSELT_LIB_DST=lib/hipsparselt/library
|
||||
HIPSPARSELT_ARCH_SPECIFIC_FILES=$(ls $HIPSPARSELT_LIB_SRC | grep -E $ARCH)
|
||||
#HIPSPARSELT_OTHER_FILES=$(ls $HIPSPARSELT_LIB_SRC | grep -v gfx)
|
||||
HIPSPARSELT_LIB_FILES=($HIPSPARSELT_ARCH_SPECIFIC_FILES $HIPSPARSELT_OTHER_FILES)
|
||||
ARCH_SPECIFIC_FILES=$(ls $HIPBLASLT_LIB_SRC | grep -E $ARCH)
|
||||
OTHER_FILES=$(ls $HIPBLASLT_LIB_SRC | grep -v gfx)
|
||||
HIPBLASLT_LIB_FILES=($ARCH_SPECIFIC_FILES $OTHER_FILES)
|
||||
|
||||
# ROCm library files
|
||||
ROCM_SO_PATHS=()
|
||||
@ -243,14 +234,12 @@ DEPS_SONAME=(
|
||||
DEPS_AUX_SRCLIST=(
|
||||
"${ROCBLAS_LIB_FILES[@]/#/$ROCBLAS_LIB_SRC/}"
|
||||
"${HIPBLASLT_LIB_FILES[@]/#/$HIPBLASLT_LIB_SRC/}"
|
||||
"${HIPSPARSELT_LIB_FILES[@]/#/$HIPSPARSELT_LIB_SRC/}"
|
||||
"/opt/amdgpu/share/libdrm/amdgpu.ids"
|
||||
)
|
||||
|
||||
DEPS_AUX_DSTLIST=(
|
||||
"${ROCBLAS_LIB_FILES[@]/#/$ROCBLAS_LIB_DST/}"
|
||||
"${HIPBLASLT_LIB_FILES[@]/#/$HIPBLASLT_LIB_DST/}"
|
||||
"${HIPSPARSELT_LIB_FILES[@]/#/$HIPSPARSELT_LIB_DST/}"
|
||||
"share/libdrm/amdgpu.ids"
|
||||
)
|
||||
|
||||
|
||||
@ -19,7 +19,7 @@ git config --global --add safe.directory /var/lib/jenkins/workspace
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *onnx* ]]; then
|
||||
# TODO: This can be removed later once vision is also part of the Docker image
|
||||
pip install -q --no-use-pep517 "git+https://github.com/pytorch/vision.git@$(cat .github/ci_commit_pins/vision.txt)"
|
||||
pip install -q --user --no-use-pep517 "git+https://github.com/pytorch/vision.git@$(cat .github/ci_commit_pins/vision.txt)"
|
||||
# JIT C++ extensions require ninja, so put it into PATH.
|
||||
export PATH="/var/lib/jenkins/.local/bin:$PATH"
|
||||
# NB: ONNX test is fast (~15m) so it's ok to retry it few more times to avoid any flaky issue, we
|
||||
|
||||
34
.ci/pytorch/build-mobile.sh
Executable file
34
.ci/pytorch/build-mobile.sh
Executable file
@ -0,0 +1,34 @@
|
||||
#!/usr/bin/env bash
|
||||
# DO NOT ADD 'set -x' not to reveal CircleCI secret context environment variables
|
||||
set -eu -o pipefail
|
||||
|
||||
# This script uses linux host toolchain + mobile build options in order to
|
||||
# build & test mobile libtorch without having to setup Android/iOS
|
||||
# toolchain/simulator.
|
||||
|
||||
# shellcheck source=./common.sh
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
|
||||
# shellcheck source=./common-build.sh
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common-build.sh"
|
||||
|
||||
# Install torch & torchvision - used to download & trace test model.
|
||||
# Ideally we should use the libtorch built on the PR so that backward
|
||||
# incompatible changes won't break this script - but it will significantly slow
|
||||
# down mobile CI jobs.
|
||||
# Here we install nightly instead of stable so that we have an option to
|
||||
# temporarily skip mobile CI jobs on BC-breaking PRs until they are in nightly.
|
||||
retry pip install --pre torch torchvision \
|
||||
-f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html \
|
||||
--progress-bar off
|
||||
|
||||
# Run end-to-end process of building mobile library, linking into the predictor
|
||||
# binary, and running forward pass with a real model.
|
||||
if [[ "$BUILD_ENVIRONMENT" == *-mobile-custom-build-static* ]]; then
|
||||
TEST_CUSTOM_BUILD_STATIC=1 test/mobile/custom_build/build.sh
|
||||
elif [[ "$BUILD_ENVIRONMENT" == *-mobile-lightweight-dispatch* ]]; then
|
||||
test/mobile/lightweight_dispatch/build.sh
|
||||
else
|
||||
TEST_DEFAULT_BUILD=1 test/mobile/custom_build/build.sh
|
||||
fi
|
||||
|
||||
print_sccache_stats
|
||||
@ -11,6 +11,10 @@ source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
|
||||
# shellcheck source=./common-build.sh
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common-build.sh"
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *-mobile-*build* ]]; then
|
||||
exec "$(dirname "${BASH_SOURCE[0]}")/build-mobile.sh" "$@"
|
||||
fi
|
||||
|
||||
echo "Python version:"
|
||||
python --version
|
||||
|
||||
@ -23,12 +27,6 @@ cmake --version
|
||||
echo "Environment variables:"
|
||||
env
|
||||
|
||||
# The sccache wrapped version of nvcc gets put in /opt/cache/lib in docker since
|
||||
# there are some issues if it is always wrapped, so we need to add it to PATH
|
||||
# during CI builds.
|
||||
# https://github.com/pytorch/pytorch/blob/0b6c0898e6c352c8ea93daec854e704b41485375/.ci/docker/common/install_cache.sh#L97
|
||||
export PATH="/opt/cache/lib:$PATH"
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *cuda* ]]; then
|
||||
# Use jemalloc during compilation to mitigate https://github.com/pytorch/pytorch/issues/116289
|
||||
export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.2
|
||||
@ -54,6 +52,12 @@ fi
|
||||
export USE_LLVM=/opt/llvm
|
||||
export LLVM_DIR=/opt/llvm/lib/cmake/llvm
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *executorch* ]]; then
|
||||
# To build test_edge_op_registration
|
||||
export BUILD_EXECUTORCH=ON
|
||||
export USE_CUDA=0
|
||||
fi
|
||||
|
||||
if ! which conda; then
|
||||
# In ROCm CIs, we are doing cross compilation on build machines with
|
||||
# intel cpu and later run tests on machines with amd cpu.
|
||||
@ -120,8 +124,26 @@ if [[ "$BUILD_ENVIRONMENT" == *libtorch* ]]; then
|
||||
fi
|
||||
|
||||
# Use special scripts for Android builds
|
||||
if [[ "${BUILD_ENVIRONMENT}" == *-android* ]]; then
|
||||
export ANDROID_NDK=/opt/ndk
|
||||
build_args=()
|
||||
if [[ "${BUILD_ENVIRONMENT}" == *-arm-v7a* ]]; then
|
||||
build_args+=("-DANDROID_ABI=armeabi-v7a")
|
||||
elif [[ "${BUILD_ENVIRONMENT}" == *-arm-v8a* ]]; then
|
||||
build_args+=("-DANDROID_ABI=arm64-v8a")
|
||||
elif [[ "${BUILD_ENVIRONMENT}" == *-x86_32* ]]; then
|
||||
build_args+=("-DANDROID_ABI=x86")
|
||||
elif [[ "${BUILD_ENVIRONMENT}" == *-x86_64* ]]; then
|
||||
build_args+=("-DANDROID_ABI=x86_64")
|
||||
fi
|
||||
if [[ "${BUILD_ENVIRONMENT}" == *vulkan* ]]; then
|
||||
build_args+=("-DUSE_VULKAN=ON")
|
||||
fi
|
||||
build_args+=("-DUSE_LITE_INTERPRETER_PROFILER=OFF")
|
||||
exec ./scripts/build_android.sh "${build_args[@]}" "$@"
|
||||
fi
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *vulkan* ]]; then
|
||||
if [[ "$BUILD_ENVIRONMENT" != *android* && "$BUILD_ENVIRONMENT" == *vulkan* ]]; then
|
||||
export USE_VULKAN=1
|
||||
# shellcheck disable=SC1091
|
||||
source /var/lib/jenkins/vulkansdk/setup-env.sh
|
||||
@ -176,8 +198,10 @@ fi
|
||||
|
||||
# We only build FlashAttention files for CUDA 8.0+, and they require large amounts of
|
||||
# memory to build and will OOM
|
||||
if [[ "$BUILD_ENVIRONMENT" == *cuda* ]] && echo "${TORCH_CUDA_ARCH_LIST}" | tr ' ' '\n' | sed 's/$/>= 8.0/' | bc | grep -q 1; then
|
||||
export BUILD_CUSTOM_STEP="ninja -C build flash_attention -j 2"
|
||||
if [[ "$BUILD_ENVIRONMENT" == *cuda* ]] && [[ 1 -eq $(echo "${TORCH_CUDA_ARCH_LIST} >= 8.0" | bc) ]] && [ -z "$MAX_JOBS_OVERRIDE" ]; then
|
||||
echo "WARNING: FlashAttention files require large amounts of memory to build and will OOM"
|
||||
echo "Setting MAX_JOBS=(nproc-2)/3 to reduce memory usage"
|
||||
export MAX_JOBS="$(( $(nproc --ignore=2) / 3 ))"
|
||||
fi
|
||||
|
||||
if [[ "${BUILD_ENVIRONMENT}" == *clang* ]]; then
|
||||
@ -203,7 +227,7 @@ if [[ "${BUILD_ENVIRONMENT}" == *-pch* ]]; then
|
||||
export USE_PRECOMPILED_HEADERS=1
|
||||
fi
|
||||
|
||||
if [[ "${BUILD_ENVIRONMENT}" != *cuda* ]]; then
|
||||
if [[ "${BUILD_ENVIRONMENT}" != *android* && "${BUILD_ENVIRONMENT}" != *cuda* ]]; then
|
||||
export BUILD_STATIC_RUNTIME_BENCHMARK=ON
|
||||
fi
|
||||
|
||||
@ -233,7 +257,6 @@ if [[ "$BUILD_ENVIRONMENT" == *-bazel-* ]]; then
|
||||
set -e -o pipefail
|
||||
|
||||
get_bazel
|
||||
python3 tools/optional_submodules.py checkout_eigen
|
||||
|
||||
# Leave 1 CPU free and use only up to 80% of memory to reduce the change of crashing
|
||||
# the runner
|
||||
@ -265,32 +288,25 @@ else
|
||||
|
||||
WERROR=1 python setup.py clean
|
||||
|
||||
WERROR=1 python setup.py bdist_wheel
|
||||
if [[ "$USE_SPLIT_BUILD" == "true" ]]; then
|
||||
python3 tools/packaging/split_wheel.py bdist_wheel
|
||||
else
|
||||
WERROR=1 python setup.py bdist_wheel
|
||||
fi
|
||||
else
|
||||
python setup.py clean
|
||||
if [[ "$BUILD_ENVIRONMENT" == *xla* ]]; then
|
||||
source .ci/pytorch/install_cache_xla.sh
|
||||
fi
|
||||
python setup.py bdist_wheel
|
||||
if [[ "$USE_SPLIT_BUILD" == "true" ]]; then
|
||||
echo "USE_SPLIT_BUILD cannot be used with xla or rocm"
|
||||
exit 1
|
||||
else
|
||||
python setup.py bdist_wheel
|
||||
fi
|
||||
fi
|
||||
pip_install_whl "$(echo dist/*.whl)"
|
||||
|
||||
if [[ "${BUILD_ADDITIONAL_PACKAGES:-}" == *vision* ]]; then
|
||||
install_torchvision
|
||||
fi
|
||||
|
||||
if [[ "${BUILD_ADDITIONAL_PACKAGES:-}" == *audio* ]]; then
|
||||
install_torchaudio
|
||||
fi
|
||||
|
||||
if [[ "${BUILD_ADDITIONAL_PACKAGES:-}" == *torchrec* || "${BUILD_ADDITIONAL_PACKAGES:-}" == *fbgemm* ]]; then
|
||||
install_torchrec_and_fbgemm
|
||||
fi
|
||||
|
||||
if [[ "${BUILD_ADDITIONAL_PACKAGES:-}" == *torchao* ]]; then
|
||||
install_torchao
|
||||
fi
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *xpu* ]]; then
|
||||
echo "Checking that xpu is compiled"
|
||||
pushd dist/
|
||||
@ -378,8 +394,10 @@ else
|
||||
# This is an attempt to mitigate flaky libtorch build OOM error. By default, the build parallelization
|
||||
# is set to be the number of CPU minus 2. So, let's try a more conservative value here. A 4xlarge has
|
||||
# 16 CPUs
|
||||
MAX_JOBS=$(nproc --ignore=4)
|
||||
export MAX_JOBS
|
||||
if [ -z "$MAX_JOBS_OVERRIDE" ]; then
|
||||
MAX_JOBS=$(nproc --ignore=4)
|
||||
export MAX_JOBS
|
||||
fi
|
||||
|
||||
# NB: Install outside of source directory (at the same level as the root
|
||||
# pytorch folder) so that it doesn't get cleaned away prior to docker push.
|
||||
|
||||
@ -313,7 +313,7 @@ if [[ "$(uname)" == 'Linux' && "$PACKAGE_TYPE" == 'manywheel' ]]; then
|
||||
# Please see issue for reference: https://github.com/pytorch/pytorch/issues/152426
|
||||
if [[ "$(uname -m)" == "s390x" ]]; then
|
||||
cxx_abi="19"
|
||||
elif [[ "$DESIRED_CUDA" != 'xpu' && "$DESIRED_CUDA" != 'rocm'* ]]; then
|
||||
elif [[ "$DESIRED_CUDA" != 'cu118' && "$DESIRED_CUDA" != 'xpu' && "$DESIRED_CUDA" != 'rocm'* ]]; then
|
||||
cxx_abi="18"
|
||||
else
|
||||
cxx_abi="16"
|
||||
|
||||
@ -13,13 +13,6 @@ if [[ "$BUILD_ENVIRONMENT" != *win-* ]]; then
|
||||
fi
|
||||
|
||||
if which sccache > /dev/null; then
|
||||
# Clear SCCACHE_BUCKET and SCCACHE_REGION if they are empty, otherwise
|
||||
# sccache will complain about invalid bucket configuration
|
||||
if [[ -z "${SCCACHE_BUCKET:-}" ]]; then
|
||||
unset SCCACHE_BUCKET
|
||||
unset SCCACHE_REGION
|
||||
fi
|
||||
|
||||
# Save sccache logs to file
|
||||
sccache --stop-server > /dev/null 2>&1 || true
|
||||
rm -f ~/sccache_error.log || true
|
||||
|
||||
@ -15,6 +15,6 @@ if [[ "${BUILD_ENVIRONMENT}" == *rocm* ]]; then
|
||||
export PYTORCH_TEST_WITH_ROCM=1
|
||||
fi
|
||||
|
||||
# TODO: Reenable libtorch testing for MacOS, see https://github.com/pytorch/pytorch/issues/62598
|
||||
# TODO: Renable libtorch testing for MacOS, see https://github.com/pytorch/pytorch/issues/62598
|
||||
# shellcheck disable=SC2034
|
||||
BUILD_TEST_LIBTORCH=0
|
||||
|
||||
@ -78,34 +78,6 @@ function pip_install_whl() {
|
||||
fi
|
||||
}
|
||||
|
||||
function pip_build_and_install() {
|
||||
local build_target=$1
|
||||
local wheel_dir=$2
|
||||
|
||||
local found_whl=0
|
||||
for file in "${wheel_dir}"/*.whl
|
||||
do
|
||||
if [[ -f "${file}" ]]; then
|
||||
found_whl=1
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
# Build the wheel if it doesn't exist
|
||||
if [ "${found_whl}" == "0" ]; then
|
||||
python3 -m pip wheel \
|
||||
--no-build-isolation \
|
||||
--no-deps \
|
||||
--no-use-pep517 \
|
||||
-w "${wheel_dir}" \
|
||||
"${build_target}"
|
||||
fi
|
||||
|
||||
for file in "${wheel_dir}"/*.whl
|
||||
do
|
||||
pip_install_whl "${file}"
|
||||
done
|
||||
}
|
||||
|
||||
function pip_install() {
|
||||
# retry 3 times
|
||||
@ -152,7 +124,14 @@ function get_pinned_commit() {
|
||||
function install_torchaudio() {
|
||||
local commit
|
||||
commit=$(get_pinned_commit audio)
|
||||
pip_build_and_install "git+https://github.com/pytorch/audio.git@${commit}" dist/audio
|
||||
if [[ "$1" == "cuda" ]]; then
|
||||
# TODO: This is better to be passed as a parameter from _linux-test workflow
|
||||
# so that it can be consistent with what is set in build
|
||||
TORCH_CUDA_ARCH_LIST="8.0;8.6" pip_install --no-use-pep517 --user "git+https://github.com/pytorch/audio.git@${commit}"
|
||||
else
|
||||
pip_install --no-use-pep517 --user "git+https://github.com/pytorch/audio.git@${commit}"
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
function install_torchtext() {
|
||||
@ -160,8 +139,8 @@ function install_torchtext() {
|
||||
local text_commit
|
||||
data_commit=$(get_pinned_commit data)
|
||||
text_commit=$(get_pinned_commit text)
|
||||
pip_build_and_install "git+https://github.com/pytorch/data.git@${data_commit}" dist/data
|
||||
pip_build_and_install "git+https://github.com/pytorch/text.git@${text_commit}" dist/text
|
||||
pip_install --no-use-pep517 --user "git+https://github.com/pytorch/data.git@${data_commit}"
|
||||
pip_install --no-use-pep517 --user "git+https://github.com/pytorch/text.git@${text_commit}"
|
||||
}
|
||||
|
||||
function install_torchvision() {
|
||||
@ -174,19 +153,17 @@ function install_torchvision() {
|
||||
echo 'char* dlerror(void) { return "";}'|gcc -fpic -shared -o "${HOME}/dlerror.so" -x c -
|
||||
LD_PRELOAD=${orig_preload}:${HOME}/dlerror.so
|
||||
fi
|
||||
|
||||
if [[ "${BUILD_ENVIRONMENT}" == *cuda* ]]; then
|
||||
# Not sure if both are needed, but why not
|
||||
export FORCE_CUDA=1
|
||||
export WITH_CUDA=1
|
||||
fi
|
||||
pip_build_and_install "git+https://github.com/pytorch/vision.git@${commit}" dist/vision
|
||||
|
||||
pip_install --no-use-pep517 --user "git+https://github.com/pytorch/vision.git@${commit}"
|
||||
if [ -n "${LD_PRELOAD}" ]; then
|
||||
LD_PRELOAD=${orig_preload}
|
||||
fi
|
||||
}
|
||||
|
||||
function install_tlparse() {
|
||||
pip_install --user "tlparse==0.3.30"
|
||||
PATH="$(python -m site --user-base)/bin:$PATH"
|
||||
}
|
||||
|
||||
function install_torchrec_and_fbgemm() {
|
||||
local torchrec_commit
|
||||
torchrec_commit=$(get_pinned_commit torchrec)
|
||||
@ -201,71 +178,25 @@ function install_torchrec_and_fbgemm() {
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *rocm* ]] ; then
|
||||
# install torchrec first because it installs fbgemm nightly on top of rocm fbgemm
|
||||
pip_build_and_install "git+https://github.com/pytorch/torchrec.git@${torchrec_commit}" dist/torchrec
|
||||
pip_install --no-use-pep517 --user "git+https://github.com/pytorch/torchrec.git@${torchrec_commit}"
|
||||
pip_uninstall fbgemm-gpu-nightly
|
||||
|
||||
# Set ROCM_HOME isn't available, use ROCM_PATH if set or /opt/rocm
|
||||
ROCM_HOME="${ROCM_HOME:-${ROCM_PATH:-/opt/rocm}}"
|
||||
|
||||
# Find rocm_version.h header file for ROCm version extract
|
||||
rocm_version_h="${ROCM_HOME}/include/rocm-core/rocm_version.h"
|
||||
if [ ! -f "$rocm_version_h" ]; then
|
||||
rocm_version_h="${ROCM_HOME}/include/rocm_version.h"
|
||||
fi
|
||||
|
||||
# Error out if rocm_version.h not found
|
||||
if [ ! -f "$rocm_version_h" ]; then
|
||||
echo "Error: rocm_version.h not found in expected locations." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Extract major, minor and patch ROCm version numbers
|
||||
MAJOR_VERSION=$(grep 'ROCM_VERSION_MAJOR' "$rocm_version_h" | awk '{print $3}')
|
||||
MINOR_VERSION=$(grep 'ROCM_VERSION_MINOR' "$rocm_version_h" | awk '{print $3}')
|
||||
PATCH_VERSION=$(grep 'ROCM_VERSION_PATCH' "$rocm_version_h" | awk '{print $3}')
|
||||
ROCM_INT=$((MAJOR_VERSION * 10000 + MINOR_VERSION * 100 + PATCH_VERSION))
|
||||
echo "ROCm version: $ROCM_INT"
|
||||
export BUILD_ROCM_VERSION="$MAJOR_VERSION.$MINOR_VERSION"
|
||||
|
||||
pip_install tabulate # needed for newer fbgemm
|
||||
pip_install patchelf # needed for rocm fbgemm
|
||||
|
||||
local wheel_dir=dist/fbgemm_gpu
|
||||
local found_whl=0
|
||||
for file in "${wheel_dir}"/*.whl
|
||||
do
|
||||
if [[ -f "${file}" ]]; then
|
||||
found_whl=1
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
# Build the wheel if it doesn't exist
|
||||
if [ "${found_whl}" == "0" ]; then
|
||||
git clone --recursive https://github.com/pytorch/fbgemm
|
||||
pushd fbgemm/fbgemm_gpu
|
||||
git checkout "${fbgemm_commit}" --recurse-submodules
|
||||
python setup.py bdist_wheel \
|
||||
--build-variant=rocm \
|
||||
-DHIP_ROOT_DIR="${ROCM_PATH}" \
|
||||
-DCMAKE_C_FLAGS="-DTORCH_USE_HIP_DSA" \
|
||||
-DCMAKE_CXX_FLAGS="-DTORCH_USE_HIP_DSA"
|
||||
popd
|
||||
|
||||
# Save the wheel before cleaning up
|
||||
mkdir -p dist/fbgemm_gpu
|
||||
cp fbgemm/fbgemm_gpu/dist/*.whl dist/fbgemm_gpu
|
||||
fi
|
||||
|
||||
for file in "${wheel_dir}"/*.whl
|
||||
do
|
||||
pip_install_whl "${file}"
|
||||
done
|
||||
|
||||
git clone --recursive https://github.com/pytorch/fbgemm
|
||||
pushd fbgemm/fbgemm_gpu
|
||||
git checkout "${fbgemm_commit}"
|
||||
python setup.py install \
|
||||
--package_variant=rocm \
|
||||
-DHIP_ROOT_DIR="${ROCM_PATH}" \
|
||||
-DCMAKE_C_FLAGS="-DTORCH_USE_HIP_DSA" \
|
||||
-DCMAKE_CXX_FLAGS="-DTORCH_USE_HIP_DSA"
|
||||
popd
|
||||
rm -rf fbgemm
|
||||
else
|
||||
pip_build_and_install "git+https://github.com/pytorch/torchrec.git@${torchrec_commit}" dist/torchrec
|
||||
pip_build_and_install "git+https://github.com/pytorch/FBGEMM.git@${fbgemm_commit}#subdirectory=fbgemm_gpu" dist/fbgemm_gpu
|
||||
# See https://github.com/pytorch/pytorch/issues/106971
|
||||
CUDA_PATH=/usr/local/cuda-12.1 pip_install --no-use-pep517 --user "git+https://github.com/pytorch/FBGEMM.git@${fbgemm_commit}#egg=fbgemm-gpu&subdirectory=fbgemm_gpu"
|
||||
pip_install --no-use-pep517 --user "git+https://github.com/pytorch/torchrec.git@${torchrec_commit}"
|
||||
fi
|
||||
}
|
||||
|
||||
@ -281,10 +212,34 @@ function clone_pytorch_xla() {
|
||||
fi
|
||||
}
|
||||
|
||||
function checkout_install_torchbench() {
|
||||
local commit
|
||||
commit=$(get_pinned_commit torchbench)
|
||||
git clone https://github.com/pytorch/benchmark torchbench
|
||||
pushd torchbench
|
||||
git checkout "$commit"
|
||||
|
||||
if [ "$1" ]; then
|
||||
python install.py --continue_on_fail models "$@"
|
||||
else
|
||||
# Occasionally the installation may fail on one model but it is ok to continue
|
||||
# to install and test other models
|
||||
python install.py --continue_on_fail
|
||||
fi
|
||||
|
||||
# TODO (huydhn): transformers-4.44.2 added by https://github.com/pytorch/benchmark/pull/2488
|
||||
# is regressing speedup metric. This needs to be investigated further
|
||||
pip install transformers==4.38.1
|
||||
|
||||
echo "Print all dependencies after TorchBench is installed"
|
||||
python -mpip freeze
|
||||
popd
|
||||
}
|
||||
|
||||
function install_torchao() {
|
||||
local commit
|
||||
commit=$(get_pinned_commit torchao)
|
||||
pip_build_and_install "git+https://github.com/pytorch/ao.git@${commit}" dist/ao
|
||||
pip_install --no-use-pep517 --user "git+https://github.com/pytorch/ao.git@${commit}"
|
||||
}
|
||||
|
||||
function print_sccache_stats() {
|
||||
|
||||
123
.ci/pytorch/create_test_cert.py
Normal file
123
.ci/pytorch/create_test_cert.py
Normal file
@ -0,0 +1,123 @@
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from tempfile import mkdtemp
|
||||
|
||||
from cryptography import x509
|
||||
from cryptography.hazmat.primitives import hashes, serialization
|
||||
from cryptography.hazmat.primitives.asymmetric import rsa
|
||||
from cryptography.x509.oid import NameOID
|
||||
|
||||
|
||||
temp_dir = mkdtemp()
|
||||
print(temp_dir)
|
||||
|
||||
|
||||
def genrsa(path):
|
||||
key = rsa.generate_private_key(
|
||||
public_exponent=65537,
|
||||
key_size=2048,
|
||||
)
|
||||
with open(path, "wb") as f:
|
||||
f.write(
|
||||
key.private_bytes(
|
||||
encoding=serialization.Encoding.PEM,
|
||||
format=serialization.PrivateFormat.TraditionalOpenSSL,
|
||||
encryption_algorithm=serialization.NoEncryption(),
|
||||
)
|
||||
)
|
||||
return key
|
||||
|
||||
|
||||
def create_cert(path, C, ST, L, O, key):
|
||||
subject = issuer = x509.Name(
|
||||
[
|
||||
x509.NameAttribute(NameOID.COUNTRY_NAME, C),
|
||||
x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, ST),
|
||||
x509.NameAttribute(NameOID.LOCALITY_NAME, L),
|
||||
x509.NameAttribute(NameOID.ORGANIZATION_NAME, O),
|
||||
]
|
||||
)
|
||||
cert = (
|
||||
x509.CertificateBuilder()
|
||||
.subject_name(subject)
|
||||
.issuer_name(issuer)
|
||||
.public_key(key.public_key())
|
||||
.serial_number(x509.random_serial_number())
|
||||
.not_valid_before(datetime.now(timezone.utc))
|
||||
.not_valid_after(
|
||||
# Our certificate will be valid for 10 days
|
||||
datetime.now(timezone.utc) + timedelta(days=10)
|
||||
)
|
||||
.add_extension(
|
||||
x509.BasicConstraints(ca=True, path_length=None),
|
||||
critical=True,
|
||||
)
|
||||
.sign(key, hashes.SHA256())
|
||||
)
|
||||
# Write our certificate out to disk.
|
||||
with open(path, "wb") as f:
|
||||
f.write(cert.public_bytes(serialization.Encoding.PEM))
|
||||
return cert
|
||||
|
||||
|
||||
def create_req(path, C, ST, L, O, key):
|
||||
csr = (
|
||||
x509.CertificateSigningRequestBuilder()
|
||||
.subject_name(
|
||||
x509.Name(
|
||||
[
|
||||
# Provide various details about who we are.
|
||||
x509.NameAttribute(NameOID.COUNTRY_NAME, C),
|
||||
x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, ST),
|
||||
x509.NameAttribute(NameOID.LOCALITY_NAME, L),
|
||||
x509.NameAttribute(NameOID.ORGANIZATION_NAME, O),
|
||||
]
|
||||
)
|
||||
)
|
||||
.sign(key, hashes.SHA256())
|
||||
)
|
||||
with open(path, "wb") as f:
|
||||
f.write(csr.public_bytes(serialization.Encoding.PEM))
|
||||
return csr
|
||||
|
||||
|
||||
def sign_certificate_request(path, csr_cert, ca_cert, private_ca_key):
|
||||
cert = (
|
||||
x509.CertificateBuilder()
|
||||
.subject_name(csr_cert.subject)
|
||||
.issuer_name(ca_cert.subject)
|
||||
.public_key(csr_cert.public_key())
|
||||
.serial_number(x509.random_serial_number())
|
||||
.not_valid_before(datetime.now(timezone.utc))
|
||||
.not_valid_after(
|
||||
# Our certificate will be valid for 10 days
|
||||
datetime.now(timezone.utc) + timedelta(days=10)
|
||||
# Sign our certificate with our private key
|
||||
)
|
||||
.sign(private_ca_key, hashes.SHA256())
|
||||
)
|
||||
with open(path, "wb") as f:
|
||||
f.write(cert.public_bytes(serialization.Encoding.PEM))
|
||||
return cert
|
||||
|
||||
|
||||
ca_key = genrsa(temp_dir + "/ca.key")
|
||||
ca_cert = create_cert(
|
||||
temp_dir + "/ca.pem",
|
||||
"US",
|
||||
"New York",
|
||||
"New York",
|
||||
"Gloo Certificate Authority",
|
||||
ca_key,
|
||||
)
|
||||
|
||||
pkey = genrsa(temp_dir + "/pkey.key")
|
||||
csr = create_req(
|
||||
temp_dir + "/csr.csr",
|
||||
"US",
|
||||
"California",
|
||||
"San Francisco",
|
||||
"Gloo Testing Company",
|
||||
pkey,
|
||||
)
|
||||
|
||||
cert = sign_certificate_request(temp_dir + "/cert.pem", csr, ca_cert, ca_key)
|
||||
@ -5,6 +5,11 @@ set -x
|
||||
# shellcheck source=./macos-common.sh
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/macos-common.sh"
|
||||
|
||||
if [[ -n "$CONDA_ENV" ]]; then
|
||||
# Use binaries under conda environment
|
||||
export PATH="$CONDA_ENV/bin":$PATH
|
||||
fi
|
||||
|
||||
# Test that OpenMP is enabled
|
||||
pushd test
|
||||
if [[ ! $(python -c "import torch; print(int(torch.backends.openmp.is_available()))") == "1" ]]; then
|
||||
@ -157,29 +162,6 @@ test_jit_hooks() {
|
||||
assert_git_not_dirty
|
||||
}
|
||||
|
||||
# Shellcheck doesn't like it when you pass no arguments to a function
|
||||
# that can take args. See https://www.shellcheck.net/wiki/SC2120
|
||||
# shellcheck disable=SC2120
|
||||
checkout_install_torchbench() {
|
||||
local commit
|
||||
commit=$(cat .ci/docker/ci_commit_pins/torchbench.txt)
|
||||
git clone https://github.com/pytorch/benchmark torchbench
|
||||
pushd torchbench
|
||||
git checkout "$commit"
|
||||
|
||||
if [ "$1" ]; then
|
||||
python install.py --continue_on_fail models "$@"
|
||||
else
|
||||
# Occasionally the installation may fail on one model but it is ok to continue
|
||||
# to install and test other models
|
||||
python install.py --continue_on_fail
|
||||
fi
|
||||
|
||||
echo "Print all dependencies after TorchBench is installed"
|
||||
python -mpip freeze
|
||||
popd
|
||||
}
|
||||
|
||||
torchbench_setup_macos() {
|
||||
git clone --recursive https://github.com/pytorch/vision torchvision
|
||||
git clone --recursive https://github.com/pytorch/audio torchaudio
|
||||
@ -202,11 +184,13 @@ torchbench_setup_macos() {
|
||||
USE_OPENMP=0 python setup.py develop
|
||||
popd
|
||||
|
||||
# Shellcheck doesn't like it when you pass no arguments to a function that can take args. See https://www.shellcheck.net/wiki/SC2120
|
||||
# shellcheck disable=SC2119,SC2120
|
||||
checkout_install_torchbench
|
||||
}
|
||||
|
||||
pip_benchmark_deps() {
|
||||
python -mpip install --no-input requests cython scikit-learn six
|
||||
python -mpip install --no-input astunparse requests cython scikit-learn
|
||||
}
|
||||
|
||||
|
||||
@ -249,52 +233,53 @@ test_torchbench_smoketest() {
|
||||
mkdir -p "$TEST_REPORTS_DIR"
|
||||
|
||||
local device=mps
|
||||
local dtypes=(undefined float16 bfloat16 notset)
|
||||
local dtype=${dtypes[$1]}
|
||||
local models=(hf_T5 llama BERT_pytorch dcgan hf_GPT2 yolov3 resnet152 sam sam_fast pytorch_unet stable_diffusion_text_encoder speech_transformer Super_SloMo doctr_det_predictor doctr_reco_predictor timm_resnet timm_vovnet vgg16)
|
||||
local models=(hf_T5 llama BERT_pytorch dcgan hf_GPT2 yolov3 resnet152 sam pytorch_unet stable_diffusion_text_encoder speech_transformer Super_SloMo doctr_det_predictor doctr_reco_predictor)
|
||||
local hf_models=(GoogleFnet YituTechConvBert Speech2Text2ForCausalLM)
|
||||
|
||||
for backend in eager inductor; do
|
||||
|
||||
echo "Launching torchbench inference performance run for backend ${backend} and dtype ${dtype}"
|
||||
local dtype_arg="--${dtype}"
|
||||
if [ "$dtype" == notset ]; then
|
||||
dtype_arg="--float32"
|
||||
fi
|
||||
touch "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_inference_${device}_performance.csv"
|
||||
for model in "${models[@]}"; do
|
||||
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/torchbench.py \
|
||||
--performance --only "$model" --backend "$backend" --inference --devices "$device" "$dtype_arg" \
|
||||
--output "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_inference_${device}_performance.csv" || true
|
||||
if [ "$backend" == "inductor" ]; then
|
||||
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/torchbench.py \
|
||||
--accuracy --only "$model" --backend "$backend" --inference --devices "$device" "$dtype_arg" \
|
||||
--output "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_inference_${device}_accuracy.csv" || true
|
||||
fi
|
||||
done
|
||||
if [ "$backend" == "inductor" ]; then
|
||||
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/huggingface.py \
|
||||
--performance --backend "$backend" --inference --devices "$device" "$dtype_arg" \
|
||||
--output "$TEST_REPORTS_DIR/inductor_${backend}_huggingface_${dtype}_inference_${device}_performance.csv" || true
|
||||
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/huggingface.py \
|
||||
--accuracy --backend "$backend" --inference --devices "$device" "$dtype_arg" \
|
||||
--output "$TEST_REPORTS_DIR/inductor_${backend}_huggingface_${dtype}_inference_${device}_accuracy.csv" || true
|
||||
fi
|
||||
|
||||
if [ "$dtype" == notset ]; then
|
||||
for dtype_ in notset amp; do
|
||||
echo "Launching torchbench training performance run for backend ${backend} and dtype ${dtype_}"
|
||||
touch "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype_}_training_${device}_performance.csv"
|
||||
local dtype_arg="--${dtype_}"
|
||||
if [ "$dtype_" == notset ]; then
|
||||
for dtype in notset float16 bfloat16; do
|
||||
echo "Launching torchbench inference performance run for backend ${backend} and dtype ${dtype}"
|
||||
local dtype_arg="--${dtype}"
|
||||
if [ "$dtype" == notset ]; then
|
||||
dtype_arg="--float32"
|
||||
fi
|
||||
for model in "${models[@]}"; do
|
||||
fi
|
||||
touch "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_inference_${device}_performance.csv"
|
||||
for model in "${models[@]}"; do
|
||||
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/torchbench.py \
|
||||
--performance --only "$model" --backend "$backend" --inference --devices "$device" "$dtype_arg" \
|
||||
--output "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_inference_${device}_performance.csv" || true
|
||||
if [ "$backend" == "inductor" ]; then
|
||||
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/torchbench.py \
|
||||
--performance --only "$model" --backend "$backend" --training --devices "$device" "$dtype_arg" \
|
||||
--output "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype_}_training_${device}_performance.csv" || true
|
||||
done
|
||||
--accuracy --only "$model" --backend "$backend" --inference --devices "$device" "$dtype_arg" \
|
||||
--output "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_inference_${device}_accuracy.csv" || true
|
||||
fi
|
||||
done
|
||||
fi
|
||||
for model in "${hf_models[@]}"; do
|
||||
if [ "$backend" == "inductor" ]; then
|
||||
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/huggingface.py \
|
||||
--performance --only "$model" --backend "$backend" --inference --devices "$device" "$dtype_arg" \
|
||||
--output "$TEST_REPORTS_DIR/inductor_${backend}_huggingface_${dtype}_inference_${device}_performance.csv" || true
|
||||
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/huggingface.py \
|
||||
--accuracy --only "$model" --backend "$backend" --inference --devices "$device" "$dtype_arg" \
|
||||
--output "$TEST_REPORTS_DIR/inductor_${backend}_huggingface_${dtype}_inference_${device}_accuracy.csv" || true
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
for dtype in notset amp; do
|
||||
echo "Launching torchbench training performance run for backend ${backend} and dtype ${dtype}"
|
||||
touch "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_training_${device}_performance.csv"
|
||||
local dtype_arg="--${dtype}"
|
||||
if [ "$dtype" == notset ]; then
|
||||
dtype_arg="--float32"
|
||||
fi
|
||||
for model in "${models[@]}"; do
|
||||
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/torchbench.py \
|
||||
--performance --only "$model" --backend "$backend" --training --devices "$device" "$dtype_arg" \
|
||||
--output "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_training_${device}_performance.csv" || true
|
||||
done
|
||||
done
|
||||
|
||||
done
|
||||
|
||||
@ -333,6 +318,8 @@ test_timm_perf() {
|
||||
echo "timm benchmark on mps device completed"
|
||||
}
|
||||
|
||||
install_tlparse
|
||||
|
||||
if [[ $TEST_CONFIG == *"perf_all"* ]]; then
|
||||
test_torchbench_perf
|
||||
test_hf_perf
|
||||
@ -344,7 +331,7 @@ elif [[ $TEST_CONFIG == *"perf_hf"* ]]; then
|
||||
elif [[ $TEST_CONFIG == *"perf_timm"* ]]; then
|
||||
test_timm_perf
|
||||
elif [[ $TEST_CONFIG == *"perf_smoketest"* ]]; then
|
||||
test_torchbench_smoketest "${SHARD_NUMBER}"
|
||||
test_torchbench_smoketest
|
||||
elif [[ $TEST_CONFIG == *"mps"* ]]; then
|
||||
test_python_mps
|
||||
elif [[ $NUM_TEST_SHARDS -gt 1 ]]; then
|
||||
|
||||
18
.ci/pytorch/run_glootls_test.sh
Executable file
18
.ci/pytorch/run_glootls_test.sh
Executable file
@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
|
||||
CREATE_TEST_CERT="$(dirname "${BASH_SOURCE[0]}")/create_test_cert.py"
|
||||
TMP_CERT_DIR=$(python "$CREATE_TEST_CERT")
|
||||
|
||||
openssl verify -CAfile "${TMP_CERT_DIR}/ca.pem" "${TMP_CERT_DIR}/cert.pem"
|
||||
|
||||
export GLOO_DEVICE_TRANSPORT=TCP_TLS
|
||||
export GLOO_DEVICE_TRANSPORT_TCP_TLS_PKEY=${TMP_CERT_DIR}/pkey.key
|
||||
export GLOO_DEVICE_TRANSPORT_TCP_TLS_CERT=${TMP_CERT_DIR}/cert.pem
|
||||
export GLOO_DEVICE_TRANSPORT_TCP_TLS_CA_FILE=${TMP_CERT_DIR}/ca.pem
|
||||
|
||||
time python test/run_test.py --include distributed/test_c10d_gloo --verbose -- ProcessGroupGlooTest
|
||||
|
||||
unset GLOO_DEVICE_TRANSPORT
|
||||
unset GLOO_DEVICE_TRANSPORT_TCP_TLS_PKEY
|
||||
unset GLOO_DEVICE_TRANSPORT_TCP_TLS_CERT
|
||||
unset GLOO_DEVICE_TRANSPORT_TCP_TLS_CA_FILE
|
||||
@ -74,13 +74,12 @@ else
|
||||
fi
|
||||
|
||||
# Environment initialization
|
||||
retry pip install -qUr requirements-build.txt
|
||||
if [[ "$(uname)" == Darwin ]]; then
|
||||
# Install the testing dependencies
|
||||
retry pip install -q future hypothesis ${NUMPY_PACKAGE} ${PROTOBUF_PACKAGE} pytest
|
||||
retry pip install -q future hypothesis ${NUMPY_PACKAGE} ${PROTOBUF_PACKAGE} pytest setuptools six typing_extensions pyyaml
|
||||
else
|
||||
retry pip install -qr requirements.txt || true
|
||||
retry pip install -q hypothesis protobuf pytest || true
|
||||
retry pip install -q hypothesis protobuf pytest setuptools || true
|
||||
numpy_ver=1.15
|
||||
case "$(python --version 2>&1)" in
|
||||
*2* | *3.5* | *3.6*)
|
||||
|
||||
@ -93,7 +93,7 @@ def check_lib_symbols_for_abi_correctness(lib: str) -> None:
|
||||
f"Found pre-cxx11 symbols, but there shouldn't be any, see: {pre_cxx11_symbols[:100]}"
|
||||
)
|
||||
if num_cxx11_symbols < 100:
|
||||
raise RuntimeError("Didn't find enough cxx11 symbols")
|
||||
raise RuntimeError("Didn't find enought cxx11 symbols")
|
||||
|
||||
|
||||
def main() -> None:
|
||||
|
||||
@ -46,9 +46,6 @@ def get_gomp_thread():
|
||||
|
||||
# use the default gomp path of AlmaLinux OS
|
||||
libgomp_path = "/usr/lib64/libgomp.so.1"
|
||||
# if it does not exist, try Ubuntu path
|
||||
if not os.path.exists(libgomp_path):
|
||||
libgomp_path = f"/usr/lib/{os.uname().machine}-linux-gnu/libgomp.so.1"
|
||||
|
||||
os.environ["GOMP_CPU_AFFINITY"] = "0-3"
|
||||
|
||||
|
||||
@ -276,7 +276,7 @@ def smoke_test_cuda(
|
||||
torch_nccl_version = ".".join(str(v) for v in torch.cuda.nccl.version())
|
||||
print(f"Torch nccl; version: {torch_nccl_version}")
|
||||
|
||||
# Pypi dependencies are installed on linux only and nccl is available only on Linux.
|
||||
# Pypi dependencies are installed on linux ony and nccl is availbale only on Linux.
|
||||
if pypi_pkg_check == "enabled" and sys.platform in ["linux", "linux2"]:
|
||||
compare_pypi_to_torch_versions(
|
||||
"cudnn", find_pypi_package_version("nvidia-cudnn"), torch_cudnn_version
|
||||
@ -385,29 +385,6 @@ def smoke_test_compile(device: str = "cpu") -> None:
|
||||
x_pt2 = torch.compile(model, mode="max-autotune")(x)
|
||||
|
||||
|
||||
def smoke_test_nvshmem() -> None:
|
||||
if not torch.cuda.is_available():
|
||||
print("CUDA is not available, skipping NVSHMEM test")
|
||||
return
|
||||
|
||||
# Check if NVSHMEM is compiled in current build
|
||||
try:
|
||||
from torch._C._distributed_c10d import _is_nvshmem_available
|
||||
except ImportError:
|
||||
# Not built with NVSHMEM support.
|
||||
# torch is not compiled with NVSHMEM prior to 2.9
|
||||
if torch.__version__ < "2.9":
|
||||
return
|
||||
else:
|
||||
# After 2.9: NVSHMEM is expected to be compiled in current build
|
||||
raise RuntimeError("torch not compiled with NVSHMEM") from None
|
||||
|
||||
print("torch compiled with NVSHMEM")
|
||||
|
||||
# Check if NVSHMEM is available on current system.
|
||||
print(f"NVSHMEM available at run time: {_is_nvshmem_available()}")
|
||||
|
||||
|
||||
def smoke_test_modules():
|
||||
cwd = os.getcwd()
|
||||
for module in MODULES:
|
||||
@ -502,8 +479,6 @@ def main() -> None:
|
||||
options.pypi_pkg_check,
|
||||
)
|
||||
|
||||
smoke_test_nvshmem()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@ -11,8 +11,6 @@ export TERM=vt100
|
||||
|
||||
# shellcheck source=./common.sh
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
|
||||
# shellcheck source=./common-build.sh
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common-build.sh"
|
||||
|
||||
# Do not change workspace permissions for ROCm and s390x CI jobs
|
||||
# as it can leave workspace with bad permissions for cancelled jobs
|
||||
@ -165,6 +163,8 @@ elif [[ "$BUILD_ENVIRONMENT" == *xpu* ]]; then
|
||||
export PYTORCH_TESTING_DEVICE_ONLY_FOR="xpu"
|
||||
# setting PYTHON_TEST_EXTRA_OPTION
|
||||
export PYTHON_TEST_EXTRA_OPTION="--xpu"
|
||||
# Disable sccache for xpu test due to flaky issue https://github.com/pytorch/pytorch/issues/143585
|
||||
sudo rm -rf /opt/cache
|
||||
fi
|
||||
|
||||
if [[ "$TEST_CONFIG" == *crossref* ]]; then
|
||||
@ -196,12 +196,12 @@ if [[ "$BUILD_ENVIRONMENT" == *xpu* ]]; then
|
||||
# shellcheck disable=SC1091
|
||||
source /opt/intel/oneapi/mpi/latest/env/vars.sh
|
||||
# Check XPU status before testing
|
||||
timeout 30 xpu-smi discovery || true
|
||||
xpu-smi discovery
|
||||
fi
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" != *-bazel-* ]] ; then
|
||||
# JIT C++ extensions require ninja.
|
||||
pip_install "ninja==1.10.2"
|
||||
pip_install --user "ninja==1.10.2"
|
||||
# ninja is installed in $HOME/.local/bin, e.g., /var/lib/jenkins/.local/bin for CI user jenkins
|
||||
# but this script should be runnable by any user, including root
|
||||
export PATH="$HOME/.local/bin:$PATH"
|
||||
@ -212,6 +212,8 @@ if [[ "$BUILD_ENVIRONMENT" == *aarch64* ]]; then
|
||||
export VALGRIND=OFF
|
||||
fi
|
||||
|
||||
install_tlparse
|
||||
|
||||
# DANGER WILL ROBINSON. The LD_PRELOAD here could cause you problems
|
||||
# if you're not careful. Check this if you made some changes and the
|
||||
# ASAN test is not working
|
||||
@ -224,7 +226,7 @@ if [[ "$BUILD_ENVIRONMENT" == *asan* ]]; then
|
||||
export PYTORCH_TEST_WITH_ASAN=1
|
||||
export PYTORCH_TEST_WITH_UBSAN=1
|
||||
# TODO: Figure out how to avoid hard-coding these paths
|
||||
export ASAN_SYMBOLIZER_PATH=/usr/lib/llvm-18/bin/llvm-symbolizer
|
||||
export ASAN_SYMBOLIZER_PATH=/usr/lib/llvm-15/bin/llvm-symbolizer
|
||||
export TORCH_USE_RTLD_GLOBAL=1
|
||||
# NB: We load libtorch.so with RTLD_GLOBAL for UBSAN, unlike our
|
||||
# default behavior.
|
||||
@ -289,12 +291,6 @@ elif [[ $TEST_CONFIG == 'nogpu_AVX512' ]]; then
|
||||
export ATEN_CPU_CAPABILITY=avx2
|
||||
fi
|
||||
|
||||
if [[ "${TEST_CONFIG}" == "legacy_nvidia_driver" ]]; then
|
||||
# Make sure that CUDA can be initialized
|
||||
(cd test && python -c "import torch; torch.rand(2, 2, device='cuda')")
|
||||
export USE_LEGACY_DRIVER=1
|
||||
fi
|
||||
|
||||
test_python_legacy_jit() {
|
||||
time python test/run_test.py --include test_jit_legacy test_jit_fuser_legacy --verbose
|
||||
assert_git_not_dirty
|
||||
@ -328,29 +324,6 @@ test_python_smoke() {
|
||||
assert_git_not_dirty
|
||||
}
|
||||
|
||||
test_h100_distributed() {
|
||||
# Distributed tests at H100
|
||||
time python test/run_test.py --include distributed/_composable/test_composability/test_pp_composability.py $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
|
||||
# This test requires multicast support
|
||||
time python test/run_test.py --include distributed/_composable/fsdp/test_fully_shard_comm.py -k TestFullyShardAllocFromPG $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
|
||||
assert_git_not_dirty
|
||||
}
|
||||
|
||||
test_h100_symm_mem() {
|
||||
# symmetric memory test
|
||||
time python test/run_test.py --include distributed/test_symmetric_memory.py $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
|
||||
time python test/run_test.py --include distributed/test_nvshmem.py $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
|
||||
time python test/run_test.py --include distributed/test_nvshmem_triton.py $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
|
||||
time python test/run_test.py --include distributed/test_nccl.py $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
|
||||
assert_git_not_dirty
|
||||
}
|
||||
|
||||
test_h100_cutlass_backend() {
|
||||
# cutlass backend tests for H100
|
||||
TORCHINDUCTOR_CUTLASS_DIR=$(realpath "./third_party/cutlass") python test/run_test.py --include inductor/test_cutlass_backend -k "not addmm" $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
|
||||
TORCHINDUCTOR_CUTLASS_DIR=$(realpath "./third_party/cutlass") python test/run_test.py --include inductor/test_cutlass_evt $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
|
||||
}
|
||||
|
||||
test_lazy_tensor_meta_reference_disabled() {
|
||||
export TORCH_DISABLE_FUNCTIONALIZATION_META_REFERENCE=1
|
||||
echo "Testing lazy tensor operations without meta reference"
|
||||
@ -379,24 +352,12 @@ test_dynamo_wrapped_shard() {
|
||||
assert_git_not_dirty
|
||||
}
|
||||
|
||||
test_einops() {
|
||||
pip install einops==0.6.1
|
||||
time python test/run_test.py --einops --verbose --upload-artifacts-while-running
|
||||
pip install einops==0.7.0
|
||||
time python test/run_test.py --einops --verbose --upload-artifacts-while-running
|
||||
pip install einops==0.8.1
|
||||
time python test/run_test.py --einops --verbose --upload-artifacts-while-running
|
||||
assert_git_not_dirty
|
||||
}
|
||||
|
||||
|
||||
test_inductor_distributed() {
|
||||
# Smuggle a few multi-gpu tests here so that we don't have to request another large node
|
||||
echo "Testing multi_gpu tests in test_torchinductor"
|
||||
python test/run_test.py -i inductor/test_torchinductor.py -k test_multi_gpu --verbose
|
||||
python test/run_test.py -i inductor/test_aot_inductor.py -k test_non_default_cuda_device --verbose
|
||||
python test/run_test.py -i inductor/test_aot_inductor.py -k test_replicate_on_devices --verbose
|
||||
python test/run_test.py -i inductor/test_aot_inductor.py -k test_on_gpu_device1 --verbose
|
||||
python test/run_test.py -i inductor/test_aot_inductor.py -k test_non_default_gpu_device --verbose
|
||||
python test/run_test.py -i inductor/test_aot_inductor.py -k test_load_package_multiple_gpus --verbose
|
||||
python test/run_test.py -i distributed/test_c10d_functional_native.py --verbose
|
||||
python test/run_test.py -i distributed/tensor/test_dtensor_compile.py --verbose
|
||||
python test/run_test.py -i distributed/tensor/parallel/test_micro_pipeline_tp.py --verbose
|
||||
@ -448,21 +409,14 @@ test_inductor_aoti() {
|
||||
python3 tools/amd_build/build_amd.py
|
||||
fi
|
||||
if [[ "$BUILD_ENVIRONMENT" == *sm86* ]]; then
|
||||
BUILD_COMMAND=(TORCH_CUDA_ARCH_LIST=8.6 USE_FLASH_ATTENTION=OFF python -m pip install --no-build-isolation -v -e .)
|
||||
BUILD_AOT_INDUCTOR_TEST=1 TORCH_CUDA_ARCH_LIST=8.6 USE_FLASH_ATTENTION=OFF python setup.py develop
|
||||
# TODO: Replace me completely, as one should not use conda libstdc++, nor need special path to TORCH_LIB
|
||||
TEST_ENVS=(CPP_TESTS_DIR="${BUILD_BIN_DIR}" LD_LIBRARY_PATH="/opt/conda/envs/py_3.10/lib:${TORCH_LIB_DIR}:${LD_LIBRARY_PATH}")
|
||||
LD_LIBRARY_PATH=/opt/conda/envs/py_3.10/lib/:${TORCH_LIB_DIR}:$LD_LIBRARY_PATH
|
||||
CPP_TESTS_DIR="${BUILD_BIN_DIR}" python test/run_test.py --cpp --verbose -i cpp/test_aoti_abi_check cpp/test_aoti_inference -dist=loadfile
|
||||
else
|
||||
BUILD_COMMAND=(python -m pip install --no-build-isolation -v -e .)
|
||||
TEST_ENVS=(CPP_TESTS_DIR="${BUILD_BIN_DIR}" LD_LIBRARY_PATH="${TORCH_LIB_DIR}")
|
||||
BUILD_AOT_INDUCTOR_TEST=1 python setup.py develop
|
||||
CPP_TESTS_DIR="${BUILD_BIN_DIR}" LD_LIBRARY_PATH="${TORCH_LIB_DIR}" python test/run_test.py --cpp --verbose -i cpp/test_aoti_abi_check cpp/test_aoti_inference -dist=loadfile
|
||||
fi
|
||||
|
||||
# aoti cmake custom command requires `torch` to be installed
|
||||
# initialize the cmake build cache and install torch
|
||||
/usr/bin/env "${BUILD_COMMAND[@]}"
|
||||
# rebuild with the build cache with `BUILD_AOT_INDUCTOR_TEST` enabled
|
||||
/usr/bin/env CMAKE_FRESH=1 BUILD_AOT_INDUCTOR_TEST=1 "${BUILD_COMMAND[@]}"
|
||||
|
||||
/usr/bin/env "${TEST_ENVS[@]}" python test/run_test.py --cpp --verbose -i cpp/test_aoti_abi_check cpp/test_aoti_inference cpp/test_vec_half_AVX2 -dist=loadfile
|
||||
}
|
||||
|
||||
test_inductor_cpp_wrapper_shard() {
|
||||
@ -475,26 +429,47 @@ test_inductor_cpp_wrapper_shard() {
|
||||
TEST_REPORTS_DIR=$(pwd)/test/test-reports
|
||||
mkdir -p "$TEST_REPORTS_DIR"
|
||||
|
||||
if [[ "$1" -eq "2" ]]; then
|
||||
# For now, manually put the opinfo tests in shard 2, and all other tests in
|
||||
# shard 1. Run all CPU tests, as well as specific GPU tests triggering past
|
||||
# bugs, for now.
|
||||
python test/run_test.py \
|
||||
--include inductor/test_torchinductor_opinfo \
|
||||
-k 'linalg or to_sparse or TestInductorOpInfoCPU' \
|
||||
--verbose
|
||||
exit
|
||||
fi
|
||||
|
||||
# Run certain inductor unit tests with cpp wrapper. In the end state, we
|
||||
# should be able to run all the inductor unit tests with cpp_wrapper.
|
||||
#
|
||||
# TODO: I'm pretty sure that "TestInductorOpInfoCPU" is not a valid filter,
|
||||
# but change that in another PR to more accurately monitor the increased CI
|
||||
# usage.
|
||||
python test/run_test.py \
|
||||
--include inductor/test_torchinductor_opinfo \
|
||||
-k 'linalg or to_sparse or TestInductorOpInfoCPU' \
|
||||
--shard "$1" "$NUM_TEST_SHARDS" \
|
||||
--verbose
|
||||
python test/run_test.py \
|
||||
--include inductor/test_torchinductor inductor/test_max_autotune inductor/test_cpu_repro \
|
||||
--shard "$1" "$NUM_TEST_SHARDS" \
|
||||
--verbose
|
||||
python test/run_test.py --inductor \
|
||||
--include test_torch \
|
||||
-k 'take' \
|
||||
--shard "$1" "$NUM_TEST_SHARDS" \
|
||||
--verbose
|
||||
python test/run_test.py --inductor --include test_torch -k 'take' --verbose
|
||||
|
||||
# Run inductor benchmark tests with cpp wrapper.
|
||||
# Skip benchmark tests if it's in rerun-disabled-mode.
|
||||
if [[ "${PYTORCH_TEST_RERUN_DISABLED_TESTS}" == "1" ]]; then
|
||||
echo "skip dynamo benchmark tests for rerun-disabled-test"
|
||||
else
|
||||
echo "run dynamo benchmark tests with cpp wrapper"
|
||||
python benchmarks/dynamo/timm_models.py --device cuda --accuracy --amp \
|
||||
--training --inductor --disable-cudagraphs --only vit_base_patch16_224 \
|
||||
--output "$TEST_REPORTS_DIR/inductor_cpp_wrapper_training.csv"
|
||||
python benchmarks/dynamo/check_accuracy.py \
|
||||
--actual "$TEST_REPORTS_DIR/inductor_cpp_wrapper_training.csv" \
|
||||
--expected "benchmarks/dynamo/ci_expected_accuracy/${MAYBE_ROCM}inductor_timm_training.csv"
|
||||
|
||||
python benchmarks/dynamo/torchbench.py --device cuda --accuracy \
|
||||
--bfloat16 --inference --inductor --only hf_T5 --output "$TEST_REPORTS_DIR/inductor_cpp_wrapper_inference.csv"
|
||||
python benchmarks/dynamo/torchbench.py --device cuda --accuracy \
|
||||
--bfloat16 --inference --inductor --only llama --output "$TEST_REPORTS_DIR/inductor_cpp_wrapper_inference.csv"
|
||||
python benchmarks/dynamo/torchbench.py --device cuda --accuracy \
|
||||
--bfloat16 --inference --inductor --only moco --output "$TEST_REPORTS_DIR/inductor_cpp_wrapper_inference.csv"
|
||||
python benchmarks/dynamo/check_accuracy.py \
|
||||
--actual "$TEST_REPORTS_DIR/inductor_cpp_wrapper_inference.csv" \
|
||||
--expected "benchmarks/dynamo/ci_expected_accuracy/${MAYBE_ROCM}inductor_torchbench_inference.csv"
|
||||
fi
|
||||
}
|
||||
|
||||
# "Global" flags for inductor benchmarking controlled by TEST_CONFIG
|
||||
@ -507,7 +482,7 @@ DYNAMO_BENCHMARK_FLAGS=()
|
||||
|
||||
pr_time_benchmarks() {
|
||||
|
||||
pip_install "fbscribelogger"
|
||||
pip_install --user "fbscribelogger"
|
||||
|
||||
TEST_REPORTS_DIR=$(pwd)/test/test-reports
|
||||
mkdir -p "$TEST_REPORTS_DIR"
|
||||
@ -615,9 +590,7 @@ test_perf_for_dashboard() {
|
||||
|
||||
local device=cuda
|
||||
if [[ "${TEST_CONFIG}" == *cpu* ]]; then
|
||||
if [[ "${TEST_CONFIG}" == *cpu_x86_zen* ]]; then
|
||||
device=cpu_x86_zen
|
||||
elif [[ "${TEST_CONFIG}" == *cpu_x86* ]]; then
|
||||
if [[ "${TEST_CONFIG}" == *cpu_x86* ]]; then
|
||||
device=cpu_x86
|
||||
elif [[ "${TEST_CONFIG}" == *cpu_aarch64* ]]; then
|
||||
device=cpu_aarch64
|
||||
@ -627,19 +600,13 @@ test_perf_for_dashboard() {
|
||||
device=cuda_a10g
|
||||
elif [[ "${TEST_CONFIG}" == *h100* ]]; then
|
||||
device=cuda_h100
|
||||
elif [[ "${TEST_CONFIG}" == *b200* ]]; then
|
||||
device=cuda_b200
|
||||
elif [[ "${TEST_CONFIG}" == *rocm* ]]; then
|
||||
device=rocm
|
||||
fi
|
||||
|
||||
for mode in "${modes[@]}"; do
|
||||
if [[ "$mode" == "inference" ]]; then
|
||||
if [[ "$device" == "cpu_x86" ]]; then
|
||||
dtype=amp
|
||||
else
|
||||
dtype=bfloat16
|
||||
fi
|
||||
dtype=bfloat16
|
||||
elif [[ "$mode" == "training" ]]; then
|
||||
dtype=amp
|
||||
fi
|
||||
@ -651,10 +618,6 @@ test_perf_for_dashboard() {
|
||||
target_flag+=( --no-translation-validation)
|
||||
fi
|
||||
|
||||
if [[ "$DASHBOARD_TAG" == *freezing-true* ]]; then
|
||||
target_flag+=( --freezing)
|
||||
fi
|
||||
|
||||
if [[ "$DASHBOARD_TAG" == *default-true* ]]; then
|
||||
$TASKSET python "benchmarks/dynamo/$suite.py" \
|
||||
"${target_flag[@]}" --"$mode" --"$dtype" --backend "$backend" --disable-cudagraphs "$@" \
|
||||
@ -803,16 +766,6 @@ test_dynamo_benchmark() {
|
||||
if [[ "${TEST_CONFIG}" == *perf_compare* ]]; then
|
||||
test_single_dynamo_benchmark "training" "$suite" "$shard_id" --training --amp "$@"
|
||||
elif [[ "${TEST_CONFIG}" == *perf* ]]; then
|
||||
# TODO (huydhn): Just smoke test some sample models
|
||||
if [[ "${TEST_CONFIG}" == *b200* ]]; then
|
||||
if [[ "${suite}" == "huggingface" ]]; then
|
||||
export TORCHBENCH_ONLY_MODELS="DistillGPT2"
|
||||
elif [[ "${suite}" == "timm_models" ]]; then
|
||||
export TORCHBENCH_ONLY_MODELS="inception_v3"
|
||||
elif [[ "${suite}" == "torchbench" ]]; then
|
||||
export TORCHBENCH_ONLY_MODELS="hf_Bert"
|
||||
fi
|
||||
fi
|
||||
test_single_dynamo_benchmark "dashboard" "$suite" "$shard_id" "$@"
|
||||
else
|
||||
if [[ "${TEST_CONFIG}" == *cpu* ]]; then
|
||||
@ -940,6 +893,12 @@ test_torchbench_gcp_smoketest(){
|
||||
popd
|
||||
}
|
||||
|
||||
test_python_gloo_with_tls() {
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/run_glootls_test.sh"
|
||||
assert_git_not_dirty
|
||||
}
|
||||
|
||||
|
||||
test_aten() {
|
||||
# Test ATen
|
||||
# The following test(s) of ATen have already been skipped by caffe2 in rocm environment:
|
||||
@ -986,8 +945,6 @@ test_without_numpy() {
|
||||
if [[ "${TEST_CONFIG}" == *dynamo_wrapped* ]]; then
|
||||
python -c "import sys;sys.path.insert(0, 'fake_numpy');import torch;torch.compile(lambda x:print(x))('Hello World')"
|
||||
fi
|
||||
# Regression test for https://github.com/pytorch/pytorch/pull/157734 (torch.onnx should be importable without numpy)
|
||||
python -c "import sys;sys.path.insert(0, 'fake_numpy');import torch; import torch.onnx"
|
||||
popd
|
||||
}
|
||||
|
||||
@ -1174,12 +1131,6 @@ test_custom_backend() {
|
||||
|
||||
test_custom_script_ops() {
|
||||
echo "Testing custom script operators"
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *s390x* ]]; then
|
||||
echo "Skipping custom script operators until it's fixed"
|
||||
return 0
|
||||
fi
|
||||
|
||||
CUSTOM_OP_BUILD="${CUSTOM_TEST_ARTIFACT_BUILD_DIR}/custom-op-build"
|
||||
pushd test/custom_operator
|
||||
cp -a "$CUSTOM_OP_BUILD" build
|
||||
@ -1332,13 +1283,10 @@ EOF
|
||||
|
||||
# Step 2. Make sure that the public API test "test_correct_module_names" fails when an existing
|
||||
# file is modified to introduce an invalid public API function.
|
||||
# The filepath here must not have __all__ defined in it, otherwise the test will pass.
|
||||
# If your PR introduces __all__ to torch/cuda/streams.py please point this to another file
|
||||
# that does not have __all__ defined.
|
||||
EXISTING_FILEPATH="${TORCH_INSTALL_DIR}/cuda/streams.py"
|
||||
EXISTING_FILEPATH="${TORCH_INSTALL_DIR}/nn/parameter.py"
|
||||
cp -v "${EXISTING_FILEPATH}" "${EXISTING_FILEPATH}.orig"
|
||||
echo "${BAD_PUBLIC_FUNC}" >> "${EXISTING_FILEPATH}"
|
||||
invalid_api="torch.cuda.streams.new_public_func"
|
||||
invalid_api="torch.nn.parameter.new_public_func"
|
||||
echo "Appended an invalid public API function to existing file ${EXISTING_FILEPATH}..."
|
||||
|
||||
check_public_api_test_fails \
|
||||
@ -1493,8 +1441,8 @@ test_bazel() {
|
||||
|
||||
test_benchmarks() {
|
||||
if [[ "$BUILD_ENVIRONMENT" == *cuda* && $TEST_CONFIG != *nogpu* ]]; then
|
||||
pip_install "pytest-benchmark==3.2.3"
|
||||
pip_install "requests"
|
||||
pip_install --user "pytest-benchmark==3.2.3"
|
||||
pip_install --user "requests"
|
||||
BENCHMARK_DATA="benchmarks/.data"
|
||||
mkdir -p ${BENCHMARK_DATA}
|
||||
pytest benchmarks/fastrnns/test_bench.py --benchmark-sort=Name --benchmark-json=${BENCHMARK_DATA}/fastrnns_default.json --fuser=default --executor=default
|
||||
@ -1602,12 +1550,11 @@ test_operator_benchmark() {
|
||||
test_inductor_set_cpu_affinity
|
||||
|
||||
cd benchmarks/operator_benchmark/pt_extension
|
||||
python -m pip install .
|
||||
python setup.py install
|
||||
|
||||
cd "${TEST_DIR}"/benchmarks/operator_benchmark
|
||||
$TASKSET python -m benchmark_all_test --device "$1" --tag-filter "$2" \
|
||||
--output-csv "${TEST_REPORTS_DIR}/operator_benchmark_eager_float32_cpu.csv" \
|
||||
--output-json-for-dashboard "${TEST_REPORTS_DIR}/operator_benchmark_eager_float32_cpu.json" \
|
||||
--output-dir "${TEST_REPORTS_DIR}/operator_benchmark_eager_float32_cpu.csv"
|
||||
|
||||
pip_install pandas
|
||||
python check_perf_csv.py \
|
||||
@ -1622,13 +1569,7 @@ if ! [[ "${BUILD_ENVIRONMENT}" == *libtorch* || "${BUILD_ENVIRONMENT}" == *-baze
|
||||
fi
|
||||
if [[ "${TEST_CONFIG}" == *numpy_2* ]]; then
|
||||
# Install numpy-2.0.2 and compatible scipy & numba versions
|
||||
# Force re-install of pandas to avoid error where pandas checks numpy version from initial install and fails upon import
|
||||
TMP_PANDAS_VERSION=$(python -c "import pandas; print(pandas.__version__)" 2>/dev/null)
|
||||
if [ -n "$TMP_PANDAS_VERSION" ]; then
|
||||
python -m pip install --pre numpy==2.0.2 scipy==1.13.1 numba==0.60.0 pandas=="$TMP_PANDAS_VERSION" --force-reinstall
|
||||
else
|
||||
python -m pip install --pre numpy==2.0.2 scipy==1.13.1 numba==0.60.0
|
||||
fi
|
||||
python -mpip install --pre numpy==2.0.2 scipy==1.13.1 numba==0.60.0
|
||||
python test/run_test.py --include dynamo/test_functions.py dynamo/test_unspec.py test_binary_ufuncs.py test_fake_tensor.py test_linalg.py test_numpy_interop.py test_tensor_creation_ops.py test_torch.py torch_np/test_basic.py
|
||||
elif [[ "${BUILD_ENVIRONMENT}" == *aarch64* && "${TEST_CONFIG}" != *perf_cpu_aarch64* ]]; then
|
||||
test_linux_aarch64
|
||||
@ -1682,40 +1623,52 @@ elif [[ "${TEST_CONFIG}" == *timm* ]]; then
|
||||
id=$((SHARD_NUMBER-1))
|
||||
test_dynamo_benchmark timm_models "$id"
|
||||
elif [[ "${TEST_CONFIG}" == cachebench ]]; then
|
||||
install_torchaudio
|
||||
install_torchaudio cuda
|
||||
install_torchvision
|
||||
PYTHONPATH=/torchbench test_cachebench
|
||||
checkout_install_torchbench nanogpt BERT_pytorch resnet50 hf_T5 llama moco
|
||||
PYTHONPATH=$(pwd)/torchbench test_cachebench
|
||||
elif [[ "${TEST_CONFIG}" == verify_cachebench ]]; then
|
||||
install_torchaudio
|
||||
install_torchaudio cpu
|
||||
install_torchvision
|
||||
PYTHONPATH=/torchbench test_verify_cachebench
|
||||
checkout_install_torchbench nanogpt
|
||||
PYTHONPATH=$(pwd)/torchbench test_verify_cachebench
|
||||
elif [[ "${TEST_CONFIG}" == *torchbench* ]]; then
|
||||
install_torchaudio
|
||||
if [[ "${TEST_CONFIG}" == *cpu* ]]; then
|
||||
install_torchaudio cpu
|
||||
else
|
||||
install_torchaudio cuda
|
||||
fi
|
||||
install_torchvision
|
||||
install_torchao
|
||||
TORCH_CUDA_ARCH_LIST="8.0;8.6" pip_install git+https://github.com/pytorch/ao.git
|
||||
id=$((SHARD_NUMBER-1))
|
||||
# https://github.com/opencv/opencv-python/issues/885
|
||||
pip_install opencv-python==4.8.0.74
|
||||
if [[ "${TEST_CONFIG}" == *inductor_torchbench_smoketest_perf* ]]; then
|
||||
PYTHONPATH=/torchbench test_inductor_torchbench_smoketest_perf
|
||||
checkout_install_torchbench hf_Bert hf_Albert timm_vision_transformer
|
||||
PYTHONPATH=$(pwd)/torchbench test_inductor_torchbench_smoketest_perf
|
||||
elif [[ "${TEST_CONFIG}" == *inductor_torchbench_cpu_smoketest_perf* ]]; then
|
||||
PYTHONPATH=/torchbench test_inductor_torchbench_cpu_smoketest_perf
|
||||
checkout_install_torchbench timm_vision_transformer phlippe_densenet basic_gnn_edgecnn \
|
||||
llama_v2_7b_16h resnet50 timm_efficientnet mobilenet_v3_large timm_resnest \
|
||||
functorch_maml_omniglot yolov3 mobilenet_v2 resnext50_32x4d densenet121 mnasnet1_0
|
||||
PYTHONPATH=$(pwd)/torchbench test_inductor_torchbench_cpu_smoketest_perf
|
||||
elif [[ "${TEST_CONFIG}" == *torchbench_gcp_smoketest* ]]; then
|
||||
TORCHBENCHPATH=/torchbench test_torchbench_gcp_smoketest
|
||||
checkout_install_torchbench
|
||||
TORCHBENCHPATH=$(pwd)/torchbench test_torchbench_gcp_smoketest
|
||||
else
|
||||
checkout_install_torchbench
|
||||
# Do this after checkout_install_torchbench to ensure we clobber any
|
||||
# nightlies that torchbench may pull in
|
||||
if [[ "${TEST_CONFIG}" != *cpu* ]]; then
|
||||
install_torchrec_and_fbgemm
|
||||
fi
|
||||
PYTHONPATH=/torchbench test_dynamo_benchmark torchbench "$id"
|
||||
PYTHONPATH=$(pwd)/torchbench test_dynamo_benchmark torchbench "$id"
|
||||
fi
|
||||
elif [[ "${TEST_CONFIG}" == *inductor_cpp_wrapper* ]]; then
|
||||
install_torchaudio cuda
|
||||
install_torchvision
|
||||
PYTHONPATH=/torchbench test_inductor_cpp_wrapper_shard "$SHARD_NUMBER"
|
||||
if [[ "$SHARD_NUMBER" -eq "1" ]]; then
|
||||
test_inductor_aoti
|
||||
fi
|
||||
checkout_install_torchbench hf_T5 llama moco
|
||||
PYTHONPATH=$(pwd)/torchbench test_inductor_cpp_wrapper_shard "$SHARD_NUMBER"
|
||||
test_inductor_aoti
|
||||
elif [[ "${TEST_CONFIG}" == *inductor* ]]; then
|
||||
install_torchvision
|
||||
test_inductor_shard "${SHARD_NUMBER}"
|
||||
@ -1724,8 +1677,6 @@ elif [[ "${TEST_CONFIG}" == *inductor* ]]; then
|
||||
test_inductor_distributed
|
||||
fi
|
||||
fi
|
||||
elif [[ "${TEST_CONFIG}" == *einops* ]]; then
|
||||
test_einops
|
||||
elif [[ "${TEST_CONFIG}" == *dynamo_wrapped* ]]; then
|
||||
install_torchvision
|
||||
test_dynamo_wrapped_shard "${SHARD_NUMBER}"
|
||||
@ -1773,12 +1724,6 @@ elif [[ "${BUILD_ENVIRONMENT}" == *xpu* ]]; then
|
||||
test_xpu_bin
|
||||
elif [[ "${TEST_CONFIG}" == smoke ]]; then
|
||||
test_python_smoke
|
||||
elif [[ "${TEST_CONFIG}" == h100_distributed ]]; then
|
||||
test_h100_distributed
|
||||
elif [[ "${TEST_CONFIG}" == "h100-symm-mem" ]]; then
|
||||
test_h100_symm_mem
|
||||
elif [[ "${TEST_CONFIG}" == h100_cutlass_backend ]]; then
|
||||
test_h100_cutlass_backend
|
||||
else
|
||||
install_torchvision
|
||||
install_monkeytype
|
||||
|
||||
@ -1,34 +0,0 @@
|
||||
# If you want to rebuild, run this with $env:REBUILD=1
|
||||
# If you want to build with CUDA, run this with $env:USE_CUDA=1
|
||||
# If you want to build without CUDA, run this with $env:USE_CUDA=0
|
||||
|
||||
# Check for setup.py in the current directory
|
||||
if (-not (Test-Path "setup.py")) {
|
||||
Write-Host "ERROR: Please run this build script from PyTorch root directory."
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Get the script's parent directory
|
||||
$ScriptParentDir = Split-Path -Parent $MyInvocation.MyCommand.Definition
|
||||
|
||||
# Set TMP_DIR and convert to Windows path
|
||||
$env:TMP_DIR = Join-Path (Get-Location) "build\win_tmp"
|
||||
$env:TMP_DIR_WIN = $env:TMP_DIR # Already in Windows format, no cygpath needed
|
||||
|
||||
# Set final package directory with default fallback
|
||||
if (-not $env:PYTORCH_FINAL_PACKAGE_DIR) {
|
||||
$env:PYTORCH_FINAL_PACKAGE_DIR = "C:\w\build-results"
|
||||
}
|
||||
|
||||
# Create the final package directory if it doesn't exist
|
||||
if (-not (Test-Path $env:PYTORCH_FINAL_PACKAGE_DIR)) {
|
||||
New-Item -Path $env:PYTORCH_FINAL_PACKAGE_DIR -ItemType Directory -Force | Out-Null
|
||||
}
|
||||
|
||||
# Set script helpers directory
|
||||
$env:SCRIPT_HELPERS_DIR = Join-Path $ScriptParentDir "win-test-helpers\arm64"
|
||||
|
||||
# Run the main build script
|
||||
& "$env:SCRIPT_HELPERS_DIR\build_pytorch.ps1"
|
||||
|
||||
Write-Host "BUILD PASSED"
|
||||
@ -1,24 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex -o pipefail
|
||||
|
||||
SCRIPT_PARENT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
|
||||
# shellcheck source=./common.sh
|
||||
source "$SCRIPT_PARENT_DIR/common.sh"
|
||||
|
||||
run_tests() {
|
||||
echo Running smoke_test.py...
|
||||
python ./.ci/pytorch/smoke_test/smoke_test.py --package torchonly
|
||||
|
||||
echo Running test_autograd.oy, test_nn.py, test_torch.py...
|
||||
cd test
|
||||
|
||||
CORE_TEST_LIST=("test_autograd.py" "test_nn.py" "test_modules.py")
|
||||
|
||||
for t in "${CORE_TEST_LIST[@]}"; do
|
||||
echo "Running test: $t"
|
||||
python "$t" --verbose --save-xml --use-pytest -vvvv -rfEsxXP -p no:xdist
|
||||
done
|
||||
}
|
||||
|
||||
run_tests
|
||||
echo "TEST PASSED"
|
||||
@ -31,7 +31,7 @@ PYLONG_API_CHECK=$?
|
||||
if [[ $PYLONG_API_CHECK == 0 ]]; then
|
||||
echo "Usage of PyLong_{From,As}{Unsigned}Long API may lead to overflow errors on Windows"
|
||||
echo "because \`sizeof(long) == 4\` and \`sizeof(unsigned long) == 4\`."
|
||||
echo "Please include \"torch/csrc/utils/python_numbers.h\" and use the corresponding APIs instead."
|
||||
echo "Please include \"torch/csrc/utils/python_numbers.h\" and use the correspoding APIs instead."
|
||||
echo "PyLong_FromLong -> THPUtils_packInt32 / THPUtils_packInt64"
|
||||
echo "PyLong_AsLong -> THPUtils_unpackInt (32-bit) / THPUtils_unpackLong (64-bit)"
|
||||
echo "PyLong_FromUnsignedLong -> THPUtils_packUInt32 / THPUtils_packUInt64"
|
||||
|
||||
@ -1,98 +0,0 @@
|
||||
# TODO: we may can use existing build_pytorch.bat for arm64
|
||||
|
||||
if ($env:DEBUG -eq "1") {
|
||||
$env:BUILD_TYPE = "debug"
|
||||
} else {
|
||||
$env:BUILD_TYPE = "release"
|
||||
}
|
||||
|
||||
# This inflates our log size slightly, but it is REALLY useful to be
|
||||
# able to see what our cl.exe commands are. (since you can actually
|
||||
# just copy-paste them into a local Windows setup to just rebuild a
|
||||
# single file.)
|
||||
# log sizes are too long, but leaving this here in case someone wants to use it locally
|
||||
# $env:CMAKE_VERBOSE_MAKEFILE = "1"
|
||||
|
||||
$env:INSTALLER_DIR = Join-Path $env:SCRIPT_HELPERS_DIR "installation-helpers"
|
||||
|
||||
cd ..
|
||||
|
||||
# Environment variables
|
||||
$env:SCCACHE_IDLE_TIMEOUT = "0"
|
||||
$env:SCCACHE_IGNORE_SERVER_IO_ERROR = "1"
|
||||
$env:CMAKE_BUILD_TYPE = $env:BUILD_TYPE
|
||||
$env:CMAKE_C_COMPILER_LAUNCHER = "sccache"
|
||||
$env:CMAKE_CXX_COMPILER_LAUNCHER = "sccache"
|
||||
$env:libuv_ROOT = Join-Path $env:DEPENDENCIES_DIR "libuv\install"
|
||||
$env:MSSdk = "1"
|
||||
|
||||
if ($env:PYTORCH_BUILD_VERSION) {
|
||||
$env:PYTORCH_BUILD_VERSION = $env:PYTORCH_BUILD_VERSION
|
||||
$env:PYTORCH_BUILD_NUMBER = "1"
|
||||
}
|
||||
|
||||
$env:CMAKE_POLICY_VERSION_MINIMUM = "3.5"
|
||||
|
||||
# Set BLAS type
|
||||
if ($env:ENABLE_APL -eq "1") {
|
||||
$env:BLAS = "APL"
|
||||
$env:USE_LAPACK = "1"
|
||||
} elseif ($env:ENABLE_OPENBLAS -eq "1") {
|
||||
$env:BLAS = "OpenBLAS"
|
||||
$env:OpenBLAS_HOME = Join-Path $env:DEPENDENCIES_DIR "OpenBLAS\install"
|
||||
}
|
||||
|
||||
# Change to source directory
|
||||
Set-Location $env:PYTORCH_ROOT
|
||||
|
||||
# Copy libuv.dll
|
||||
Copy-Item -Path (Join-Path $env:libuv_ROOT "lib\Release\uv.dll") -Destination "torch\lib\uv.dll" -Force
|
||||
|
||||
# Create virtual environment
|
||||
python -m venv .venv
|
||||
.\.venv\Scripts\Activate.ps1
|
||||
where.exe python
|
||||
|
||||
# Python install dependencies
|
||||
python -m pip install --upgrade pip
|
||||
pip install setuptools pyyaml
|
||||
pip install -r requirements.txt
|
||||
|
||||
# Set after installing psutil
|
||||
$env:DISTUTILS_USE_SDK = "1"
|
||||
|
||||
# Print all environment variables
|
||||
Get-ChildItem Env:
|
||||
|
||||
# Start and inspect sccache
|
||||
sccache --start-server
|
||||
sccache --zero-stats
|
||||
sccache --show-stats
|
||||
|
||||
# Build the wheel
|
||||
python setup.py bdist_wheel
|
||||
if ($LASTEXITCODE -ne 0) { exit 1 }
|
||||
|
||||
# Install the wheel locally
|
||||
$whl = Get-ChildItem -Path "dist\*.whl" | Select-Object -First 1
|
||||
if ($whl) {
|
||||
python -mpip install --no-index --no-deps $whl.FullName
|
||||
}
|
||||
|
||||
# Copy final wheel
|
||||
robocopy "dist" "$env:PYTORCH_FINAL_PACKAGE_DIR" *.whl
|
||||
|
||||
# Export test times
|
||||
python tools/stats/export_test_times.py
|
||||
|
||||
# Copy additional CI files
|
||||
robocopy ".additional_ci_files" "$env:PYTORCH_FINAL_PACKAGE_DIR\.additional_ci_files" /E
|
||||
|
||||
# Save ninja log
|
||||
Copy-Item -Path "build\.ninja_log" -Destination $env:PYTORCH_FINAL_PACKAGE_DIR -Force
|
||||
|
||||
# Final sccache stats and stop
|
||||
sccache --show-stats
|
||||
sccache --stop-server
|
||||
|
||||
exit 0
|
||||
@ -10,7 +10,7 @@ set PATH=C:\Program Files\CMake\bin;C:\Program Files\7-Zip;C:\ProgramData\chocol
|
||||
:: able to see what our cl.exe commands are (since you can actually
|
||||
:: just copy-paste them into a local Windows setup to just rebuild a
|
||||
:: single file.)
|
||||
:: log sizes are too long, but leaving this here in case someone wants to use it locally
|
||||
:: log sizes are too long, but leaving this here incase someone wants to use it locally
|
||||
:: set CMAKE_VERBOSE_MAKEFILE=1
|
||||
|
||||
|
||||
@ -42,7 +42,7 @@ call choco upgrade -y cmake --no-progress --installargs 'ADD_CMAKE_TO_PATH=Syste
|
||||
if errorlevel 1 goto fail
|
||||
if not errorlevel 0 goto fail
|
||||
|
||||
call pip install mkl==2024.2.0 mkl-static==2024.2.0 mkl-include==2024.2.0
|
||||
call pip install mkl-include==2021.4.0 mkl-devel==2021.4.0
|
||||
if errorlevel 1 goto fail
|
||||
if not errorlevel 0 goto fail
|
||||
|
||||
|
||||
@ -52,7 +52,7 @@ if __name__ == "__main__":
|
||||
if os.path.exists(debugger):
|
||||
command_args = [debugger, "-o", "-c", "~*g; q"] + command_args
|
||||
command_string = " ".join(command_args)
|
||||
print("Rerunning with traceback enabled")
|
||||
print("Reruning with traceback enabled")
|
||||
print("Command:", command_string)
|
||||
subprocess.run(command_args, check=False)
|
||||
sys.exit(e.returncode)
|
||||
|
||||
@ -41,7 +41,7 @@ fi
|
||||
python -m pip install pytest-rerunfailures==10.3 pytest-cpp==2.3.0 tensorboard==2.13.0 protobuf==5.29.4 pytest-subtests==0.13.1
|
||||
|
||||
# Install Z3 optional dependency for Windows builds.
|
||||
python -m pip install z3-solver==4.15.1.0
|
||||
python -m pip install z3-solver==4.12.2.0
|
||||
|
||||
# Install tlparse for test\dynamo\test_structured_trace.py UTs.
|
||||
python -m pip install tlparse==0.3.30
|
||||
@ -52,9 +52,6 @@ python -m pip install parameterized==0.8.1
|
||||
# Install pulp for testing ilps under torch\distributed\_tools
|
||||
python -m pip install pulp==2.9.0
|
||||
|
||||
# Install expecttest to merge https://github.com/pytorch/pytorch/pull/155308
|
||||
python -m pip install expecttest==0.3.0
|
||||
|
||||
run_tests() {
|
||||
# Run nvidia-smi if available
|
||||
for path in '/c/Program Files/NVIDIA Corporation/NVSMI/nvidia-smi.exe' /c/Windows/System32/nvidia-smi.exe; do
|
||||
|
||||
59
.ci/pytorch/windows/cuda118.bat
Normal file
59
.ci/pytorch/windows/cuda118.bat
Normal file
@ -0,0 +1,59 @@
|
||||
@echo off
|
||||
|
||||
set MODULE_NAME=pytorch
|
||||
|
||||
IF NOT EXIST "setup.py" IF NOT EXIST "%MODULE_NAME%" (
|
||||
call internal\clone.bat
|
||||
cd %~dp0
|
||||
) ELSE (
|
||||
call internal\clean.bat
|
||||
)
|
||||
IF ERRORLEVEL 1 goto :eof
|
||||
|
||||
call internal\check_deps.bat
|
||||
IF ERRORLEVEL 1 goto :eof
|
||||
|
||||
REM Check for optional components
|
||||
|
||||
set USE_CUDA=
|
||||
set CMAKE_GENERATOR=Visual Studio 15 2017 Win64
|
||||
|
||||
IF "%NVTOOLSEXT_PATH%"=="" (
|
||||
IF EXIST "C:\Program Files\NVIDIA Corporation\NvToolsExt\lib\x64\nvToolsExt64_1.lib" (
|
||||
set NVTOOLSEXT_PATH=C:\Program Files\NVIDIA Corporation\NvToolsExt
|
||||
) ELSE (
|
||||
echo NVTX ^(Visual Studio Extension ^for CUDA^) ^not installed, failing
|
||||
exit /b 1
|
||||
)
|
||||
)
|
||||
|
||||
IF "%CUDA_PATH_V118%"=="" (
|
||||
IF EXIST "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.8\bin\nvcc.exe" (
|
||||
set "CUDA_PATH_V118=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.8"
|
||||
) ELSE (
|
||||
echo CUDA 11.8 not found, failing
|
||||
exit /b 1
|
||||
)
|
||||
)
|
||||
|
||||
IF "%BUILD_VISION%" == "" (
|
||||
set TORCH_CUDA_ARCH_LIST=3.7+PTX;5.0;6.0;6.1;7.0;7.5;8.0;8.6;9.0
|
||||
set TORCH_NVCC_FLAGS=-Xfatbin -compress-all
|
||||
) ELSE (
|
||||
set NVCC_FLAGS=-D__CUDA_NO_HALF_OPERATORS__ --expt-relaxed-constexpr -gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=compute_80 -gencode=arch=compute_86,code=compute_86 -gencode=arch=compute_90,code=compute_90
|
||||
)
|
||||
|
||||
set "CUDA_PATH=%CUDA_PATH_V118%"
|
||||
set "PATH=%CUDA_PATH_V118%\bin;%PATH%"
|
||||
|
||||
:optcheck
|
||||
|
||||
call internal\check_opts.bat
|
||||
IF ERRORLEVEL 1 goto :eof
|
||||
|
||||
if exist "%NIGHTLIES_PYTORCH_ROOT%" cd %NIGHTLIES_PYTORCH_ROOT%\..
|
||||
call %~dp0\internal\copy.bat
|
||||
IF ERRORLEVEL 1 goto :eof
|
||||
|
||||
call %~dp0\internal\setup.bat
|
||||
IF ERRORLEVEL 1 goto :eof
|
||||
@ -27,24 +27,24 @@ IF "%NVTOOLSEXT_PATH%"=="" (
|
||||
)
|
||||
)
|
||||
|
||||
IF "%CUDA_PATH_V129%"=="" (
|
||||
IF EXIST "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.9\bin\nvcc.exe" (
|
||||
set "CUDA_PATH_V129=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.9"
|
||||
IF "%CUDA_PATH_V124%"=="" (
|
||||
IF EXIST "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\bin\nvcc.exe" (
|
||||
set "CUDA_PATH_V124=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4"
|
||||
) ELSE (
|
||||
echo CUDA 12.9 not found, failing
|
||||
echo CUDA 12.4 not found, failing
|
||||
exit /b 1
|
||||
)
|
||||
)
|
||||
|
||||
IF "%BUILD_VISION%" == "" (
|
||||
set TORCH_CUDA_ARCH_LIST=7.0;7.5;8.0;8.6;9.0;10.0;12.0
|
||||
set TORCH_CUDA_ARCH_LIST=6.1;7.0;7.5;8.0;8.6;9.0
|
||||
set TORCH_NVCC_FLAGS=-Xfatbin -compress-all
|
||||
) ELSE (
|
||||
set NVCC_FLAGS=-D__CUDA_NO_HALF_OPERATORS__ --expt-relaxed-constexpr -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=compute_80 -gencode=arch=compute_86,code=compute_86 -gencode=arch=compute_90,code=compute_90 -gencode=arch=compute_100,code=compute_100 -gencode=arch=compute_120,code=compute_120
|
||||
set NVCC_FLAGS=-D__CUDA_NO_HALF_OPERATORS__ --expt-relaxed-constexpr -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=compute_80 -gencode=arch=compute_86,code=compute_86 -gencode=arch=compute_90,code=compute_90
|
||||
)
|
||||
|
||||
set "CUDA_PATH=%CUDA_PATH_V129%"
|
||||
set "PATH=%CUDA_PATH_V129%\bin;%PATH%"
|
||||
set "CUDA_PATH=%CUDA_PATH_V124%"
|
||||
set "PATH=%CUDA_PATH_V124%\bin;%PATH%"
|
||||
|
||||
:optcheck
|
||||
|
||||
@ -65,7 +65,7 @@ for /F "usebackq delims=" %%i in (`python -c "import sys; print('{0[0]}{0[1]}'.f
|
||||
if %PYVER% LSS 35 (
|
||||
echo Warning: PyTorch for Python 2 under Windows is experimental.
|
||||
echo Python x64 3.5 or up is recommended to compile PyTorch on Windows
|
||||
echo Maybe you can create a virtual environment if you have conda installed:
|
||||
echo Maybe you can create a virual environment if you have conda installed:
|
||||
echo ^> conda create -n test python=3.6 pyyaml numpy
|
||||
echo ^> activate test
|
||||
)
|
||||
|
||||
@ -8,7 +8,6 @@ copy "%CUDA_PATH%\bin\cusolver*64_*.dll*" pytorch\torch\lib
|
||||
copy "%CUDA_PATH%\bin\cudnn*64_*.dll*" pytorch\torch\lib
|
||||
copy "%CUDA_PATH%\bin\nvrtc*64_*.dll*" pytorch\torch\lib
|
||||
copy "%CUDA_PATH%\extras\CUPTI\lib64\cupti64_*.dll*" pytorch\torch\lib
|
||||
copy "%CUDA_PATH%\extras\CUPTI\lib64\nvperf_host*.dll*" pytorch\torch\lib
|
||||
|
||||
copy "C:\Program Files\NVIDIA Corporation\NvToolsExt\bin\x64\nvToolsExt64_1.dll*" pytorch\torch\lib
|
||||
copy "%PYTHON_LIB_PATH%\libiomp*5md.dll" pytorch\torch\lib
|
||||
|
||||
@ -23,13 +23,66 @@ set CUDNN_LIB_FOLDER="lib\x64"
|
||||
:: Skip all of this if we already have cuda installed
|
||||
if exist "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION_STR%\bin\nvcc.exe" goto set_cuda_env_vars
|
||||
|
||||
if %CUDA_VER% EQU 118 goto cuda118
|
||||
if %CUDA_VER% EQU 124 goto cuda124
|
||||
if %CUDA_VER% EQU 126 goto cuda126
|
||||
if %CUDA_VER% EQU 128 goto cuda128
|
||||
if %CUDA_VER% EQU 129 goto cuda129
|
||||
|
||||
echo CUDA %CUDA_VERSION_STR% is not supported
|
||||
exit /b 1
|
||||
|
||||
:cuda118
|
||||
|
||||
set CUDA_INSTALL_EXE=cuda_11.8.0_522.06_windows.exe
|
||||
if not exist "%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%" (
|
||||
curl -k -L "https://ossci-windows.s3.amazonaws.com/%CUDA_INSTALL_EXE%" --output "%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%" & REM @lint-ignore
|
||||
if errorlevel 1 exit /b 1
|
||||
set "CUDA_SETUP_FILE=%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%"
|
||||
set "ARGS=cuda_profiler_api_11.8 thrust_11.8 nvcc_11.8 cuobjdump_11.8 nvprune_11.8 nvprof_11.8 cupti_11.8 cublas_11.8 cublas_dev_11.8 cudart_11.8 cufft_11.8 cufft_dev_11.8 curand_11.8 curand_dev_11.8 cusolver_11.8 cusolver_dev_11.8 cusparse_11.8 cusparse_dev_11.8 npp_11.8 npp_dev_11.8 nvrtc_11.8 nvrtc_dev_11.8 nvml_dev_11.8 nvtx_11.8"
|
||||
)
|
||||
|
||||
set CUDNN_FOLDER=cudnn-windows-x86_64-9.5.0.50_cuda11-archive
|
||||
set CUDNN_LIB_FOLDER="lib"
|
||||
set "CUDNN_INSTALL_ZIP=%CUDNN_FOLDER%.zip"
|
||||
if not exist "%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%" (
|
||||
curl -k -L "http://s3.amazonaws.com/ossci-windows/%CUDNN_INSTALL_ZIP%" --output "%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%" & REM @lint-ignore
|
||||
if errorlevel 1 exit /b 1
|
||||
set "CUDNN_SETUP_FILE=%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%"
|
||||
)
|
||||
|
||||
@REM cuDNN 8.3+ required zlib to be installed on the path
|
||||
echo Installing ZLIB dlls
|
||||
curl -k -L "http://s3.amazonaws.com/ossci-windows/zlib123dllx64.zip" --output "%SRC_DIR%\temp_build\zlib123dllx64.zip"
|
||||
7z x "%SRC_DIR%\temp_build\zlib123dllx64.zip" -o"%SRC_DIR%\temp_build\zlib"
|
||||
xcopy /Y "%SRC_DIR%\temp_build\zlib\dll_x64\*.dll" "C:\Windows\System32"
|
||||
|
||||
goto cuda_common
|
||||
|
||||
:cuda124
|
||||
|
||||
set CUDA_INSTALL_EXE=cuda_12.4.0_551.61_windows.exe
|
||||
if not exist "%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%" (
|
||||
curl -k -L "https://ossci-windows.s3.amazonaws.com/%CUDA_INSTALL_EXE%" --output "%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%" & REM @lint-ignore
|
||||
if errorlevel 1 exit /b 1
|
||||
set "CUDA_SETUP_FILE=%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%"
|
||||
set "ARGS=cuda_profiler_api_12.4 thrust_12.4 nvcc_12.4 cuobjdump_12.4 nvprune_12.4 nvprof_12.4 cupti_12.4 cublas_12.4 cublas_dev_12.4 cudart_12.4 cufft_12.4 cufft_dev_12.4 curand_12.4 curand_dev_12.4 cusolver_12.4 cusolver_dev_12.4 cusparse_12.4 cusparse_dev_12.4 npp_12.4 npp_dev_12.4 nvrtc_12.4 nvrtc_dev_12.4 nvml_dev_12.4 nvjitlink_12.4 nvtx_12.4"
|
||||
)
|
||||
|
||||
set CUDNN_FOLDER=cudnn-windows-x86_64-9.5.0.50_cuda12-archive
|
||||
set CUDNN_LIB_FOLDER="lib"
|
||||
set "CUDNN_INSTALL_ZIP=%CUDNN_FOLDER%.zip"
|
||||
if not exist "%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%" (
|
||||
curl -k -L "http://s3.amazonaws.com/ossci-windows/%CUDNN_INSTALL_ZIP%" --output "%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%" & REM @lint-ignore
|
||||
if errorlevel 1 exit /b 1
|
||||
set "CUDNN_SETUP_FILE=%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%"
|
||||
)
|
||||
|
||||
@REM cuDNN 8.3+ required zlib to be installed on the path
|
||||
echo Installing ZLIB dlls
|
||||
curl -k -L "http://s3.amazonaws.com/ossci-windows/zlib123dllx64.zip" --output "%SRC_DIR%\temp_build\zlib123dllx64.zip"
|
||||
7z x "%SRC_DIR%\temp_build\zlib123dllx64.zip" -o"%SRC_DIR%\temp_build\zlib"
|
||||
xcopy /Y "%SRC_DIR%\temp_build\zlib\dll_x64\*.dll" "C:\Windows\System32"
|
||||
|
||||
goto cuda_common
|
||||
|
||||
:cuda126
|
||||
@ -86,33 +139,6 @@ xcopy /Y "%SRC_DIR%\temp_build\zlib\dll_x64\*.dll" "C:\Windows\System32"
|
||||
|
||||
goto cuda_common
|
||||
|
||||
:cuda129
|
||||
|
||||
set CUDA_INSTALL_EXE=cuda_12.9.1_576.57_windows.exe
|
||||
if not exist "%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%" (
|
||||
curl -k -L "https://ossci-windows.s3.amazonaws.com/%CUDA_INSTALL_EXE%" --output "%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%" & REM @lint-ignore
|
||||
if errorlevel 1 exit /b 1
|
||||
set "CUDA_SETUP_FILE=%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%"
|
||||
set "ARGS=cuda_profiler_api_12.9 thrust_12.9 nvcc_12.9 cuobjdump_12.9 nvprune_12.9 nvprof_12.9 cupti_12.9 cublas_12.9 cublas_dev_12.9 cudart_12.9 cufft_12.9 cufft_dev_12.9 curand_12.9 curand_dev_12.9 cusolver_12.9 cusolver_dev_12.9 cusparse_12.9 cusparse_dev_12.9 npp_12.9 npp_dev_12.9 nvrtc_12.9 nvrtc_dev_12.9 nvml_dev_12.9 nvjitlink_12.9 nvtx_12.9"
|
||||
)
|
||||
|
||||
set CUDNN_FOLDER=cudnn-windows-x86_64-9.10.2.21_cuda12-archive
|
||||
set CUDNN_LIB_FOLDER="lib"
|
||||
set "CUDNN_INSTALL_ZIP=%CUDNN_FOLDER%.zip"
|
||||
if not exist "%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%" (
|
||||
curl -k -L "http://s3.amazonaws.com/ossci-windows/%CUDNN_INSTALL_ZIP%" --output "%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%" & REM @lint-ignore
|
||||
if errorlevel 1 exit /b 1
|
||||
set "CUDNN_SETUP_FILE=%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%"
|
||||
)
|
||||
|
||||
@REM cuDNN 8.3+ required zlib to be installed on the path
|
||||
echo Installing ZLIB dlls
|
||||
curl -k -L "http://s3.amazonaws.com/ossci-windows/zlib123dllx64.zip" --output "%SRC_DIR%\temp_build\zlib123dllx64.zip"
|
||||
7z x "%SRC_DIR%\temp_build\zlib123dllx64.zip" -o"%SRC_DIR%\temp_build\zlib"
|
||||
xcopy /Y "%SRC_DIR%\temp_build\zlib\dll_x64\*.dll" "C:\Windows\System32"
|
||||
|
||||
goto cuda_common
|
||||
|
||||
:cuda_common
|
||||
:: NOTE: We only install CUDA if we don't have it installed already.
|
||||
:: With GHA runners these should be pre-installed as part of our AMI process
|
||||
|
||||
@ -18,5 +18,3 @@ start /wait "" python-amd64.exe /quiet InstallAllUsers=1 PrependPath=0 Include_t
|
||||
if errorlevel 1 exit /b 1
|
||||
|
||||
set "PATH=%CD%\Python\Scripts;%CD%\Python;%PATH%"
|
||||
%PYTHON_EXEC% -m pip install --upgrade pip setuptools packaging wheel
|
||||
if errorlevel 1 exit /b 1
|
||||
|
||||
@ -99,6 +99,7 @@ goto end
|
||||
:libtorch
|
||||
echo "install and test libtorch"
|
||||
|
||||
if "%VC_YEAR%" == "2019" powershell internal\vs2019_install.ps1
|
||||
if "%VC_YEAR%" == "2022" powershell internal\vs2022_install.ps1
|
||||
|
||||
if ERRORLEVEL 1 exit /b 1
|
||||
@ -110,6 +111,10 @@ pushd tmp\libtorch
|
||||
|
||||
set VC_VERSION_LOWER=17
|
||||
set VC_VERSION_UPPER=18
|
||||
IF "%VC_YEAR%" == "2019" (
|
||||
set VC_VERSION_LOWER=16
|
||||
set VC_VERSION_UPPER=17
|
||||
)
|
||||
|
||||
for /f "usebackq tokens=*" %%i in (`"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -legacy -products * -version [%VC_VERSION_LOWER%^,%VC_VERSION_UPPER%^) -property installationPath`) do (
|
||||
if exist "%%i" if exist "%%i\VC\Auxiliary\Build\vcvarsall.bat" (
|
||||
@ -148,7 +153,14 @@ if "%NVIDIA_GPU_EXISTS%" == "0" (
|
||||
goto end
|
||||
)
|
||||
|
||||
cl %PYTORCH_ROOT%\.ci\pytorch\test_example_code\check-torch-cuda.cpp torch_cpu.lib c10.lib torch_cuda.lib /EHsc /std:c++17 /link /INCLUDE:?warp_size@cuda@at@@YAHXZ
|
||||
set BUILD_SPLIT_CUDA=
|
||||
if exist "%install_root%\lib\torch_cuda_cu.lib" if exist "%install_root%\lib\torch_cuda_cpp.lib" set BUILD_SPLIT_CUDA=ON
|
||||
|
||||
if "%BUILD_SPLIT_CUDA%" == "ON" (
|
||||
cl %PYTORCH_ROOT%\.ci\pytorch\test_example_code\check-torch-cuda.cpp torch_cpu.lib c10.lib torch_cuda_cu.lib torch_cuda_cpp.lib /EHsc /std:c++17 /link /INCLUDE:?warp_size@cuda@at@@YAHXZ /INCLUDE:?_torch_cuda_cu_linker_symbol_op_cuda@native@at@@YA?AVTensor@2@AEBV32@@Z
|
||||
) else (
|
||||
cl %PYTORCH_ROOT%\.ci\pytorch\test_example_code\check-torch-cuda.cpp torch_cpu.lib c10.lib torch_cuda.lib /EHsc /std:c++17 /link /INCLUDE:?warp_size@cuda@at@@YAHXZ
|
||||
)
|
||||
.\check-torch-cuda.exe
|
||||
if ERRORLEVEL 1 exit /b 1
|
||||
|
||||
|
||||
@ -1,7 +1,14 @@
|
||||
if "%VC_YEAR%" == "2019" powershell windows/internal/vs2019_install.ps1
|
||||
if "%VC_YEAR%" == "2022" powershell windows/internal/vs2022_install.ps1
|
||||
|
||||
set VC_VERSION_LOWER=17
|
||||
set VC_VERSION_UPPER=18
|
||||
:: Please don't delete VS2019 as an alternative, in case some Windows compiler issue.
|
||||
:: Reference: https://github.com/pytorch/pytorch/issues/145702#issuecomment-2858693930
|
||||
if "%VC_YEAR%" == "2019" (
|
||||
set VC_VERSION_LOWER=16
|
||||
set VC_VERSION_UPPER=17
|
||||
)
|
||||
|
||||
for /f "usebackq tokens=*" %%i in (`"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -products Microsoft.VisualStudio.Product.BuildTools -version [%VC_VERSION_LOWER%^,%VC_VERSION_UPPER%^) -property installationPath`) do (
|
||||
if exist "%%i" if exist "%%i\VC\Auxiliary\Build\vcvarsall.bat" (
|
||||
|
||||
48
.ci/pytorch/windows/internal/vs2019_install.ps1
Normal file
48
.ci/pytorch/windows/internal/vs2019_install.ps1
Normal file
@ -0,0 +1,48 @@
|
||||
# https://developercommunity.visualstudio.com/t/install-specific-version-of-vs-component/1142479
|
||||
# https://docs.microsoft.com/en-us/visualstudio/releases/2019/history#release-dates-and-build-numbers
|
||||
|
||||
# 16.8.6 BuildTools
|
||||
$VS_DOWNLOAD_LINK = "https://ossci-windows.s3.us-east-1.amazonaws.com/vs16.8.6_BuildTools.exe"
|
||||
$COLLECT_DOWNLOAD_LINK = "https://aka.ms/vscollect.exe"
|
||||
$VS_INSTALL_ARGS = @("--nocache","--quiet","--wait", "--add Microsoft.VisualStudio.Workload.VCTools",
|
||||
"--add Microsoft.Component.MSBuild",
|
||||
"--add Microsoft.VisualStudio.Component.Roslyn.Compiler",
|
||||
"--add Microsoft.VisualStudio.Component.TextTemplating",
|
||||
"--add Microsoft.VisualStudio.Component.VC.CoreIde",
|
||||
"--add Microsoft.VisualStudio.Component.VC.Redist.14.Latest",
|
||||
"--add Microsoft.VisualStudio.ComponentGroup.NativeDesktop.Core",
|
||||
"--add Microsoft.VisualStudio.Component.VC.Tools.x86.x64",
|
||||
"--add Microsoft.VisualStudio.ComponentGroup.NativeDesktop.Win81")
|
||||
|
||||
curl.exe --retry 3 -kL $VS_DOWNLOAD_LINK --output vs_installer.exe
|
||||
if ($LASTEXITCODE -ne 0) {
|
||||
echo "Download of the VS 2019 Version 16.8.5 installer failed"
|
||||
exit 1
|
||||
}
|
||||
|
||||
if (Test-Path "${env:ProgramFiles(x86)}\Microsoft Visual Studio\Installer\vswhere.exe") {
|
||||
$existingPath = & "${env:ProgramFiles(x86)}\Microsoft Visual Studio\Installer\vswhere.exe" -products "Microsoft.VisualStudio.Product.BuildTools" -version "[16, 17)" -property installationPath
|
||||
if ($existingPath -ne $null) {
|
||||
if (!${env:CIRCLECI}) {
|
||||
echo "Found correctly versioned existing BuildTools installation in $existingPath"
|
||||
exit 0
|
||||
}
|
||||
echo "Found existing BuildTools installation in $existingPath, keeping it"
|
||||
}
|
||||
}
|
||||
|
||||
$process = Start-Process "${PWD}\vs_installer.exe" -ArgumentList $VS_INSTALL_ARGS -NoNewWindow -Wait -PassThru
|
||||
Remove-Item -Path vs_installer.exe -Force
|
||||
$exitCode = $process.ExitCode
|
||||
if (($exitCode -ne 0) -and ($exitCode -ne 3010)) {
|
||||
echo "VS 2019 installer exited with code $exitCode, which should be one of [0, 3010]."
|
||||
curl.exe --retry 3 -kL $COLLECT_DOWNLOAD_LINK --output Collect.exe
|
||||
if ($LASTEXITCODE -ne 0) {
|
||||
echo "Download of the VS Collect tool failed."
|
||||
exit 1
|
||||
}
|
||||
Start-Process "${PWD}\Collect.exe" -NoNewWindow -Wait -PassThru
|
||||
New-Item -Path "C:\w\build-results" -ItemType "directory" -Force
|
||||
Copy-Item -Path "C:\Users\${env:USERNAME}\AppData\Local\Temp\vslogs.zip" -Destination "C:\w\build-results\"
|
||||
exit 1
|
||||
}
|
||||
@ -25,8 +25,8 @@ set XPU_EXTRA_INSTALLED=0
|
||||
set XPU_EXTRA_UNINSTALL=0
|
||||
|
||||
if not [%XPU_VERSION%]==[] if [%XPU_VERSION%]==[2025.1] (
|
||||
set XPU_BUNDLE_URL=https://registrationcenter-download.intel.com/akdlm/IRC_NAS/75d4eb97-914a-4a95-852c-7b9733d80f74/intel-deep-learning-essentials-2025.1.3.8_offline.exe
|
||||
set XPU_BUNDLE_VERSION=2025.1.3+5
|
||||
set XPU_BUNDLE_URL=https://registrationcenter-download.intel.com/akdlm/IRC_NAS/1a9fff3d-04c2-4d77-8861-3d86c774b66f/intel-deep-learning-essentials-2025.1.1.26_offline.exe
|
||||
set XPU_BUNDLE_VERSION=2025.1.1+23
|
||||
)
|
||||
|
||||
:: Check if XPU bundle is target version or already installed
|
||||
|
||||
@ -127,7 +127,7 @@ export INSTALL_TEST=0 # dont install test binaries into site-packages
|
||||
export MACOSX_DEPLOYMENT_TARGET=10.15
|
||||
export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"}
|
||||
|
||||
SETUPTOOLS_PINNED_VERSION="==70.1.0"
|
||||
SETUPTOOLS_PINNED_VERSION="=46.0.0"
|
||||
PYYAML_PINNED_VERSION="=5.3"
|
||||
EXTRA_CONDA_INSTALL_FLAGS=""
|
||||
CONDA_ENV_CREATE_FLAGS=""
|
||||
@ -135,7 +135,7 @@ RENAME_WHEEL=true
|
||||
case $desired_python in
|
||||
3.13t)
|
||||
echo "Using 3.13 deps"
|
||||
SETUPTOOLS_PINNED_VERSION=">=70.1.0"
|
||||
SETUPTOOLS_PINNED_VERSION=">=68.0.0"
|
||||
PYYAML_PINNED_VERSION=">=6.0.1"
|
||||
NUMPY_PINNED_VERSION="=2.1.0"
|
||||
CONDA_ENV_CREATE_FLAGS="python-freethreading"
|
||||
@ -145,31 +145,31 @@ case $desired_python in
|
||||
;;
|
||||
3.13)
|
||||
echo "Using 3.13 deps"
|
||||
SETUPTOOLS_PINNED_VERSION=">=70.1.0"
|
||||
SETUPTOOLS_PINNED_VERSION=">=68.0.0"
|
||||
PYYAML_PINNED_VERSION=">=6.0.1"
|
||||
NUMPY_PINNED_VERSION="=2.1.0"
|
||||
;;
|
||||
3.12)
|
||||
echo "Using 3.12 deps"
|
||||
SETUPTOOLS_PINNED_VERSION=">=70.1.0"
|
||||
SETUPTOOLS_PINNED_VERSION=">=68.0.0"
|
||||
PYYAML_PINNED_VERSION=">=6.0.1"
|
||||
NUMPY_PINNED_VERSION="=2.0.2"
|
||||
;;
|
||||
3.11)
|
||||
echo "Using 3.11 deps"
|
||||
SETUPTOOLS_PINNED_VERSION=">=70.1.0"
|
||||
SETUPTOOLS_PINNED_VERSION=">=46.0.0"
|
||||
PYYAML_PINNED_VERSION=">=5.3"
|
||||
NUMPY_PINNED_VERSION="=2.0.2"
|
||||
;;
|
||||
3.10)
|
||||
echo "Using 3.10 deps"
|
||||
SETUPTOOLS_PINNED_VERSION=">=70.1.0"
|
||||
SETUPTOOLS_PINNED_VERSION=">=46.0.0"
|
||||
PYYAML_PINNED_VERSION=">=5.3"
|
||||
NUMPY_PINNED_VERSION="=2.0.2"
|
||||
;;
|
||||
3.9)
|
||||
echo "Using 3.9 deps"
|
||||
SETUPTOOLS_PINNED_VERSION=">=70.1.0"
|
||||
SETUPTOOLS_PINNED_VERSION=">=46.0.0"
|
||||
PYYAML_PINNED_VERSION=">=5.3"
|
||||
NUMPY_PINNED_VERSION="=2.0.2"
|
||||
;;
|
||||
@ -184,14 +184,16 @@ tmp_env_name="wheel_py$python_nodot"
|
||||
conda create ${EXTRA_CONDA_INSTALL_FLAGS} -yn "$tmp_env_name" python="$desired_python" ${CONDA_ENV_CREATE_FLAGS}
|
||||
source activate "$tmp_env_name"
|
||||
|
||||
retry pip install -r "${pytorch_rootdir}/requirements-build.txt"
|
||||
pip install "numpy=${NUMPY_PINNED_VERSION}" "pyyaml${PYYAML_PINNED_VERSION}" requests ninja "setuptools${SETUPTOOLS_PINNED_VERSION}" typing-extensions
|
||||
pip install "numpy=${NUMPY_PINNED_VERSION}" "pyyaml${PYYAML_PINNED_VERSION}" requests ninja "setuptools${SETUPTOOLS_PINNED_VERSION}" typing_extensions
|
||||
retry pip install -r "${pytorch_rootdir}/requirements.txt" || true
|
||||
retry brew install libomp
|
||||
|
||||
# For USE_DISTRIBUTED=1 on macOS, need libuv, which is build as part of tensorpipe submodule
|
||||
export USE_DISTRIBUTED=1
|
||||
|
||||
if [[ -n "$CROSS_COMPILE_ARM64" ]]; then
|
||||
export CMAKE_OSX_ARCHITECTURES=arm64
|
||||
fi
|
||||
export USE_MKLDNN=OFF
|
||||
export USE_QNNPACK=OFF
|
||||
export BUILD_TEST=OFF
|
||||
@ -199,7 +201,16 @@ export BUILD_TEST=OFF
|
||||
pushd "$pytorch_rootdir"
|
||||
echo "Calling setup.py bdist_wheel at $(date)"
|
||||
|
||||
python setup.py bdist_wheel -d "$whl_tmp_dir"
|
||||
if [[ "$USE_SPLIT_BUILD" == "true" ]]; then
|
||||
echo "Calling setup.py bdist_wheel for split build (BUILD_LIBTORCH_WHL)"
|
||||
BUILD_LIBTORCH_WHL=1 BUILD_PYTHON_ONLY=0 python setup.py bdist_wheel -d "$whl_tmp_dir"
|
||||
echo "Finished setup.py bdist_wheel for split build (BUILD_LIBTORCH_WHL)"
|
||||
echo "Calling setup.py bdist_wheel for split build (BUILD_PYTHON_ONLY)"
|
||||
BUILD_PYTHON_ONLY=1 BUILD_LIBTORCH_WHL=0 python setup.py bdist_wheel -d "$whl_tmp_dir" --cmake
|
||||
echo "Finished setup.py bdist_wheel for split build (BUILD_PYTHON_ONLY)"
|
||||
else
|
||||
python setup.py bdist_wheel -d "$whl_tmp_dir"
|
||||
fi
|
||||
|
||||
echo "Finished setup.py bdist_wheel at $(date)"
|
||||
|
||||
|
||||
@ -65,8 +65,16 @@ fi
|
||||
|
||||
if [[ "$PACKAGE_TYPE" != libtorch ]]; then
|
||||
if [[ "\$BUILD_ENVIRONMENT" != *s390x* ]]; then
|
||||
pip install "\$pkg" --index-url "https://download.pytorch.org/whl/\${CHANNEL}/${DESIRED_CUDA}"
|
||||
retry pip install -q numpy protobuf typing-extensions
|
||||
if [[ "$USE_SPLIT_BUILD" == "true" ]]; then
|
||||
pkg_no_python="$(ls -1 /final_pkgs/torch_no_python* | sort |tail -1)"
|
||||
pkg_torch="$(ls -1 /final_pkgs/torch-* | sort |tail -1)"
|
||||
# todo: after folder is populated use the pypi_pkg channel instead
|
||||
pip install "\$pkg_no_python" "\$pkg_torch" --index-url "https://download.pytorch.org/whl/\${CHANNEL}/${DESIRED_CUDA}_pypi_pkg"
|
||||
retry pip install -q numpy protobuf typing-extensions
|
||||
else
|
||||
pip install "\$pkg" --index-url "https://download.pytorch.org/whl/\${CHANNEL}/${DESIRED_CUDA}"
|
||||
retry pip install -q numpy protobuf typing-extensions
|
||||
fi
|
||||
else
|
||||
pip install "\$pkg"
|
||||
retry pip install -q numpy protobuf typing-extensions
|
||||
|
||||
@ -75,8 +75,8 @@ TRITON_VERSION=$(cat $PYTORCH_ROOT/.ci/docker/triton_version.txt)
|
||||
# Here PYTORCH_EXTRA_INSTALL_REQUIREMENTS is already set for the all the wheel builds hence append TRITON_CONSTRAINT
|
||||
TRITON_CONSTRAINT="platform_system == 'Linux' and platform_machine == 'x86_64'"
|
||||
|
||||
# CUDA 12.9 builds have triton for Linux and Linux aarch64 binaries.
|
||||
if [[ "$DESIRED_CUDA" == "cu129" ]]; then
|
||||
# CUDA 12.8 builds have triton for Linux and Linux aarch64 binaries.
|
||||
if [[ "$DESIRED_CUDA" == cu128 ]]; then
|
||||
TRITON_CONSTRAINT="platform_system == 'Linux'"
|
||||
fi
|
||||
|
||||
@ -105,7 +105,6 @@ fi
|
||||
|
||||
# Set triton via PYTORCH_EXTRA_INSTALL_REQUIREMENTS for triton xpu package
|
||||
if [[ "$PACKAGE_TYPE" =~ .*wheel.* && -n "$PYTORCH_BUILD_VERSION" && "$PYTORCH_BUILD_VERSION" =~ .*xpu.* ]]; then
|
||||
TRITON_VERSION=$(cat $PYTORCH_ROOT/.ci/docker/triton_xpu_version.txt)
|
||||
TRITON_REQUIREMENT="pytorch-triton-xpu==${TRITON_VERSION}"
|
||||
if [[ -n "$PYTORCH_BUILD_VERSION" && "$PYTORCH_BUILD_VERSION" =~ .*dev.* ]]; then
|
||||
TRITON_SHORTHASH=$(cut -c1-8 $PYTORCH_ROOT/.ci/docker/ci_commit_pins/triton-xpu.txt)
|
||||
@ -134,6 +133,7 @@ export DESIRED_PYTHON="${DESIRED_PYTHON:-}"
|
||||
export DESIRED_CUDA="$DESIRED_CUDA"
|
||||
export LIBTORCH_VARIANT="${LIBTORCH_VARIANT:-}"
|
||||
export BUILD_PYTHONLESS="${BUILD_PYTHONLESS:-}"
|
||||
export USE_SPLIT_BUILD="${USE_SPLIT_BUILD:-}"
|
||||
if [[ "${OSTYPE}" == "msys" ]]; then
|
||||
export LIBTORCH_CONFIG="${LIBTORCH_CONFIG:-}"
|
||||
if [[ "${LIBTORCH_CONFIG:-}" == 'debug' ]]; then
|
||||
|
||||
@ -23,6 +23,10 @@ if [[ "${DRY_RUN}" = "disabled" ]]; then
|
||||
AWS_S3_CP="aws s3 cp"
|
||||
fi
|
||||
|
||||
if [[ "${USE_SPLIT_BUILD:-false}" == "true" ]]; then
|
||||
UPLOAD_SUBFOLDER="${UPLOAD_SUBFOLDER}_pypi_pkg"
|
||||
fi
|
||||
|
||||
# this is special build with all dependencies packaged
|
||||
if [[ ${BUILD_NAME} == *-full* ]]; then
|
||||
UPLOAD_SUBFOLDER="${UPLOAD_SUBFOLDER}_full"
|
||||
|
||||
157
.circleci/scripts/trigger_azure_pipeline.py
Normal file
157
.circleci/scripts/trigger_azure_pipeline.py
Normal file
@ -0,0 +1,157 @@
|
||||
# Documentation: https://docs.microsoft.com/en-us/rest/api/azure/devops/build/?view=azure-devops-rest-6.0
|
||||
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
|
||||
import requests
|
||||
|
||||
|
||||
AZURE_PIPELINE_BASE_URL = "https://aiinfra.visualstudio.com/PyTorch/"
|
||||
AZURE_DEVOPS_PAT_BASE64 = os.environ.get("AZURE_DEVOPS_PAT_BASE64_SECRET", "")
|
||||
PIPELINE_ID = "911"
|
||||
PROJECT_ID = "0628bce4-2d33-499e-bac5-530e12db160f"
|
||||
TARGET_BRANCH = os.environ.get("CIRCLE_BRANCH", "main")
|
||||
TARGET_COMMIT = os.environ.get("CIRCLE_SHA1", "")
|
||||
|
||||
build_base_url = AZURE_PIPELINE_BASE_URL + "_apis/build/builds?api-version=6.0"
|
||||
|
||||
s = requests.Session()
|
||||
s.headers.update({"Authorization": "Basic " + AZURE_DEVOPS_PAT_BASE64})
|
||||
|
||||
|
||||
def submit_build(pipeline_id, project_id, source_branch, source_version):
|
||||
print("Submitting build for branch: " + source_branch)
|
||||
print("Commit SHA1: ", source_version)
|
||||
|
||||
run_build_raw = s.post(
|
||||
build_base_url,
|
||||
json={
|
||||
"definition": {"id": pipeline_id},
|
||||
"project": {"id": project_id},
|
||||
"sourceBranch": source_branch,
|
||||
"sourceVersion": source_version,
|
||||
},
|
||||
)
|
||||
|
||||
try:
|
||||
run_build_json = run_build_raw.json()
|
||||
except json.decoder.JSONDecodeError as e:
|
||||
print(e)
|
||||
print(
|
||||
"Failed to parse the response. Check if the Azure DevOps PAT is incorrect or expired."
|
||||
)
|
||||
sys.exit(-1)
|
||||
|
||||
build_id = run_build_json["id"]
|
||||
|
||||
print("Submitted bulid: " + str(build_id))
|
||||
print("Bulid URL: " + run_build_json["url"])
|
||||
return build_id
|
||||
|
||||
|
||||
def get_build(_id):
|
||||
get_build_url = (
|
||||
AZURE_PIPELINE_BASE_URL + f"/_apis/build/builds/{_id}?api-version=6.0"
|
||||
)
|
||||
get_build_raw = s.get(get_build_url)
|
||||
return get_build_raw.json()
|
||||
|
||||
|
||||
def get_build_logs(_id):
|
||||
get_build_logs_url = (
|
||||
AZURE_PIPELINE_BASE_URL + f"/_apis/build/builds/{_id}/logs?api-version=6.0"
|
||||
)
|
||||
get_build_logs_raw = s.get(get_build_logs_url)
|
||||
return get_build_logs_raw.json()
|
||||
|
||||
|
||||
def get_log_content(url):
|
||||
resp = s.get(url)
|
||||
return resp.text
|
||||
|
||||
|
||||
def wait_for_build(_id):
|
||||
build_detail = get_build(_id)
|
||||
build_status = build_detail["status"]
|
||||
|
||||
while build_status == "notStarted":
|
||||
print("Waiting for run to start: " + str(_id))
|
||||
sys.stdout.flush()
|
||||
try:
|
||||
build_detail = get_build(_id)
|
||||
build_status = build_detail["status"]
|
||||
except Exception as e:
|
||||
print("Error getting build")
|
||||
print(e)
|
||||
|
||||
time.sleep(30)
|
||||
|
||||
print("Bulid started: ", str(_id))
|
||||
|
||||
handled_logs = set()
|
||||
while build_status == "inProgress":
|
||||
try:
|
||||
print("Waiting for log: " + str(_id))
|
||||
logs = get_build_logs(_id)
|
||||
except Exception as e:
|
||||
print("Error fetching logs")
|
||||
print(e)
|
||||
time.sleep(30)
|
||||
continue
|
||||
|
||||
for log in logs["value"]:
|
||||
log_id = log["id"]
|
||||
if log_id in handled_logs:
|
||||
continue
|
||||
handled_logs.add(log_id)
|
||||
print("Fetching log: \n" + log["url"])
|
||||
try:
|
||||
log_content = get_log_content(log["url"])
|
||||
print(log_content)
|
||||
except Exception as e:
|
||||
print("Error getting log content")
|
||||
print(e)
|
||||
sys.stdout.flush()
|
||||
build_detail = get_build(_id)
|
||||
build_status = build_detail["status"]
|
||||
time.sleep(30)
|
||||
|
||||
build_result = build_detail["result"]
|
||||
|
||||
print("Bulid status: " + build_status)
|
||||
print("Bulid result: " + build_result)
|
||||
|
||||
return build_status, build_result
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Convert the branch name for Azure DevOps
|
||||
match = re.search(r"pull/(\d+)", TARGET_BRANCH)
|
||||
if match is not None:
|
||||
pr_num = match.group(1)
|
||||
SOURCE_BRANCH = f"refs/pull/{pr_num}/head"
|
||||
else:
|
||||
SOURCE_BRANCH = f"refs/heads/{TARGET_BRANCH}"
|
||||
|
||||
MAX_RETRY = 2
|
||||
retry = MAX_RETRY
|
||||
|
||||
while retry > 0:
|
||||
build_id = submit_build(PIPELINE_ID, PROJECT_ID, SOURCE_BRANCH, TARGET_COMMIT)
|
||||
build_status, build_result = wait_for_build(build_id)
|
||||
|
||||
if build_result != "succeeded":
|
||||
retry = retry - 1
|
||||
if retry > 0:
|
||||
print("Retrying... remaining attempt: " + str(retry))
|
||||
# Wait a bit before retrying
|
||||
time.sleep((MAX_RETRY - retry) * 120)
|
||||
continue
|
||||
else:
|
||||
print("No more chance to retry. Giving up.")
|
||||
sys.exit(-1)
|
||||
else:
|
||||
break
|
||||
@ -120,7 +120,6 @@ UseTab: Never
|
||||
Language: ObjC
|
||||
ColumnLimit: 120
|
||||
AlignAfterOpenBracket: Align
|
||||
IndentWidth: 2
|
||||
ObjCBlockIndentWidth: 2
|
||||
ObjCSpaceAfterProperty: false
|
||||
ObjCSpaceBeforeProtocolList: false
|
||||
|
||||
@ -61,8 +61,8 @@ You are now all set to start developing with PyTorch in a DevContainer environme
|
||||
## Step 8: Build PyTorch
|
||||
|
||||
To build pytorch from source, simply run:
|
||||
```bash
|
||||
python -m pip install --no-build-isolation -v -e .
|
||||
```
|
||||
python setup.py develop
|
||||
```
|
||||
|
||||
The process involves compiling thousands of files, and would take a long time. Fortunately, the compiled objects can be useful for your next build. When you modify some files, you only need to compile the changed files the next time.
|
||||
|
||||
@ -1,36 +1,14 @@
|
||||
root = true
|
||||
|
||||
[*]
|
||||
charset = utf-8
|
||||
end_of_line = lf
|
||||
insert_final_newline = true
|
||||
|
||||
# Python
|
||||
[*.{py,pyi,py.in,pyi.in}]
|
||||
[*.py]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
|
||||
# C/C++/CUDA
|
||||
[*.{cpp,hpp,cxx,cc,c,h,cu,cuh}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
|
||||
# Objective-C
|
||||
[*.{mm,m,M}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
|
||||
# Clang tools
|
||||
[.clang-{format,tidy}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
|
||||
# Make
|
||||
[Makefile]
|
||||
indent_style = tab
|
||||
|
||||
# Batch file
|
||||
[*.bat]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
end_of_line = crlf
|
||||
|
||||
4
.flake8
4
.flake8
@ -7,12 +7,12 @@ max-line-length = 120
|
||||
# C408 ignored because we like the dict keyword argument syntax
|
||||
# E501 is not flexible enough, we're using B950 instead
|
||||
ignore =
|
||||
E203,E305,E402,E501,E704,E721,E741,F405,F841,F999,W503,W504,C408,E302,W291,E303,F824,
|
||||
E203,E305,E402,E501,E704,E721,E741,F405,F841,F999,W503,W504,C408,E302,W291,E303,
|
||||
# shebang has extra meaning in fbcode lints, so I think it's not worth trying
|
||||
# to line this up with executable bit
|
||||
EXE001,
|
||||
# these ignores are from flake8-bugbear; please fix!
|
||||
B007,B008,B017,B019,B023,B028,B903,B904,B905,B906,B907,B908,B910
|
||||
B007,B008,B017,B019,B023,B028,B903,B904,B905,B906,B907
|
||||
# these ignores are from flake8-comprehensions; please fix!
|
||||
C407,
|
||||
# these ignores are from flake8-logging-format; please fix!
|
||||
|
||||
5
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
5
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@ -12,9 +12,7 @@ body:
|
||||
description: |
|
||||
Please provide a clear and concise description of what the bug is.
|
||||
|
||||
If relevant, add a minimal example so that we can reproduce the error by running the code. It is very important for the snippet to be as succinct (minimal) as possible, so please take time to trim down any irrelevant code to help us debug efficiently.
|
||||
Your example should be fully self-contained and not rely on any artifact that should be downloaded.
|
||||
For example:
|
||||
If relevant, add a minimal example so that we can reproduce the error by running the code. It is very important for the snippet to be as succinct (minimal) as possible, so please take time to trim down any irrelevant code to help us debug efficiently. We are going to copy-paste your code and we expect to get the same result as you did: avoid any external data, and include the relevant imports, etc. For example:
|
||||
|
||||
```python
|
||||
# All necessary imports at the beginning
|
||||
@ -28,7 +26,6 @@ body:
|
||||
If the code is too long (hopefully, it isn't), feel free to put it in a public gist and link it in the issue: https://gist.github.com.
|
||||
|
||||
Please also paste or describe the results you observe instead of the expected results. If you observe an error, please paste the error message including the **full** traceback of the exception. It may be relevant to wrap error messages in ```` ```triple quotes blocks``` ````.
|
||||
If your issue is related to numerical accuracy or reproducibility, please read the [numerical accuracy](https://docs.pytorch.org/docs/stable/notes/numerical_accuracy.html) and [reproducibility](https://docs.pytorch.org/docs/stable/notes/randomness.html) notes. If the difference is not expected as described in these documents, please provide appropriate justification on why one result is wrong and the other is correct.
|
||||
placeholder: |
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
|
||||
12
.github/actionlint.yaml
vendored
12
.github/actionlint.yaml
vendored
@ -14,7 +14,6 @@ self-hosted-runner:
|
||||
- linux.12xlarge
|
||||
- linux.24xlarge
|
||||
- linux.24xlarge.ephemeral
|
||||
- linux.24xlarge.amd
|
||||
- linux.arm64.2xlarge
|
||||
- linux.arm64.2xlarge.ephemeral
|
||||
- linux.arm64.m7g.4xlarge
|
||||
@ -50,15 +49,18 @@ self-hosted-runner:
|
||||
# Organization-wide AMD-hosted runners
|
||||
# MI2xx runners
|
||||
- linux.rocm.gpu
|
||||
- linux.rocm.gpu.mi250
|
||||
- linux.rocm.gpu.2
|
||||
- linux.rocm.gpu.4
|
||||
# gfx942 runners
|
||||
- linux.rocm.gpu.gfx942.2
|
||||
- linux.rocm.gpu.gfx942.4
|
||||
# MI300 runners
|
||||
- linux.rocm.gpu.mi300.2
|
||||
- linux.rocm.gpu.mi300.4
|
||||
- rocm-docker
|
||||
# Repo-specific Apple hosted runners
|
||||
- macos-m1-ultra
|
||||
- macos-m2-14
|
||||
# Org wise AWS `mac2.metal` runners (2020 Mac mini hardware powered by Apple silicon M1 processors)
|
||||
- macos-m1-stable
|
||||
- macos-m1-13
|
||||
- macos-m1-14
|
||||
# GitHub-hosted MacOS runners
|
||||
- macos-latest-xlarge
|
||||
|
||||
78
.github/actions/build-android/action.yml
vendored
Normal file
78
.github/actions/build-android/action.yml
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
name: build android
|
||||
|
||||
description: build android for a specific arch
|
||||
|
||||
inputs:
|
||||
arch:
|
||||
description: arch to build
|
||||
required: true
|
||||
arch-for-build-env:
|
||||
description: |
|
||||
arch to pass to build environment.
|
||||
This is currently different than the arch name we use elswhere, which
|
||||
should be fixed.
|
||||
required: true
|
||||
github-secret:
|
||||
description: github token
|
||||
required: true
|
||||
build-environment:
|
||||
required: true
|
||||
description: Top-level label for what's being built/tested.
|
||||
docker-image:
|
||||
required: true
|
||||
description: Name of the base docker image to build with.
|
||||
branch:
|
||||
required: true
|
||||
description: What branch we are building on.
|
||||
outputs:
|
||||
container_id:
|
||||
description: Docker container identifier used to build the artifacts
|
||||
value: ${{ steps.build.outputs.container_id }}
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Build-${{ inputs.arch }}
|
||||
id: build
|
||||
shell: bash
|
||||
env:
|
||||
BRANCH: ${{ inputs.branch }}
|
||||
BUILD_ENVIRONMENT: pytorch-linux-xenial-py3-clang5-android-ndk-r19c-${{ inputs.arch-for-build-env }}-build"
|
||||
AWS_DEFAULT_REGION: us-east-1
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
SCCACHE_BUCKET: ossci-compiler-cache-circleci-v2
|
||||
SCCACHE_REGION: us-east-1
|
||||
DOCKER_IMAGE: ${{ inputs.docker-image }}
|
||||
MATRIX_ARCH: ${{ inputs.arch }}
|
||||
run: |
|
||||
# detached container should get cleaned up by teardown_ec2_linux
|
||||
set -exo pipefail
|
||||
export container_name
|
||||
container_name=$(docker run \
|
||||
-e BUILD_ENVIRONMENT \
|
||||
-e MAX_JOBS="$(nproc --ignore=2)" \
|
||||
-e AWS_DEFAULT_REGION \
|
||||
-e PR_NUMBER \
|
||||
-e SHA1 \
|
||||
-e BRANCH \
|
||||
-e SCCACHE_BUCKET \
|
||||
-e SCCACHE_REGION \
|
||||
-e SKIP_SCCACHE_INITIALIZATION=1 \
|
||||
--env-file="/tmp/github_env_${GITHUB_RUN_ID}" \
|
||||
--security-opt seccomp=unconfined \
|
||||
--cap-add=SYS_PTRACE \
|
||||
--tty \
|
||||
--detach \
|
||||
--user jenkins \
|
||||
-w /var/lib/jenkins/workspace \
|
||||
"${DOCKER_IMAGE}"
|
||||
)
|
||||
git submodule sync && git submodule update -q --init --recursive --depth 1
|
||||
docker cp "${GITHUB_WORKSPACE}/." "${container_name}:/var/lib/jenkins/workspace"
|
||||
(echo "sudo chown -R jenkins . && .ci/pytorch/build.sh && find ${BUILD_ROOT} -type f -name "*.a" -or -name "*.o" -delete" | docker exec -u jenkins -i "${container_name}" bash) 2>&1
|
||||
|
||||
# Copy install binaries back
|
||||
mkdir -p "${GITHUB_WORKSPACE}/build_android_install_${MATRIX_ARCH}"
|
||||
docker cp "${container_name}:/var/lib/jenkins/workspace/build_android/install" "${GITHUB_WORKSPACE}/build_android_install_${MATRIX_ARCH}"
|
||||
echo "container_id=${container_name}" >> "${GITHUB_OUTPUT}"
|
||||
@ -70,7 +70,7 @@ runs:
|
||||
set -eux
|
||||
# PyYAML 6.0 doesn't work with MacOS x86 anymore
|
||||
# This must run on Python-3.7 (AmazonLinux2) so can't use request=3.32.2
|
||||
python3 -m pip install requests==2.27.1 pyyaml==6.0.2
|
||||
python3 -m pip install requests==2.27.1 pyyaml==6.0.1
|
||||
|
||||
- name: Parse ref
|
||||
id: parse-ref
|
||||
@ -125,7 +125,7 @@ runs:
|
||||
TAG: ${{ steps.parse-ref.outputs.tag }}
|
||||
EVENT_NAME: ${{ github.event_name }}
|
||||
SCHEDULE: ${{ github.event.schedule }}
|
||||
HEAD_BRANCH: ${{ steps.parse-ref.outputs.branch }}
|
||||
HEAD_BRANCH: ${{ github.event.workflow_run.head_branch }}
|
||||
id: filter
|
||||
run: |
|
||||
echo "Workflow: ${GITHUB_WORKFLOW}"
|
||||
@ -157,4 +157,4 @@ runs:
|
||||
echo "Is keep-going label set? ${{ steps.filter.outputs.keep-going }}"
|
||||
|
||||
echo
|
||||
echo "Reenabled issues? ${{ steps.filter.outputs.reenabled-issues }}"
|
||||
echo "Renabled issues? ${{ steps.filter.outputs.reenabled-issues }}"
|
||||
|
||||
4
.github/actions/linux-test/action.yml
vendored
4
.github/actions/linux-test/action.yml
vendored
@ -126,7 +126,7 @@ runs:
|
||||
shell: bash
|
||||
continue-on-error: true
|
||||
run: |
|
||||
python3 -m pip install psutil==5.9.8 nvidia-ml-py==11.525.84
|
||||
python3 -m pip install psutil==5.9.1 nvidia-ml-py==11.525.84
|
||||
python3 -m tools.stats.monitor > usage_log.txt 2>&1 &
|
||||
echo "monitor-script-pid=${!}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
@ -153,7 +153,7 @@ runs:
|
||||
github-token: ${{ inputs.GITHUB_TOKEN }}
|
||||
|
||||
- name: Check for keep-going label and re-enabled test issues
|
||||
# This uses the filter-test-configs action because it conveniently
|
||||
# This uses the filter-test-configs action because it conviniently
|
||||
# checks for labels and re-enabled test issues. It does not actually do
|
||||
# any filtering. All filtering is done in the build step.
|
||||
id: keep-going
|
||||
|
||||
9
.github/actions/reuse-old-whl/action.yml
vendored
9
.github/actions/reuse-old-whl/action.yml
vendored
@ -13,12 +13,6 @@ inputs:
|
||||
github-token:
|
||||
description: GitHub token
|
||||
required: true
|
||||
job-id:
|
||||
description: Job ID
|
||||
required: true
|
||||
job-name:
|
||||
description: Job name
|
||||
required: true
|
||||
|
||||
outputs:
|
||||
reuse:
|
||||
@ -36,11 +30,8 @@ runs:
|
||||
continue-on-error: true
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ inputs.github-token }}
|
||||
JOB_ID: ${{ inputs.job-id }}
|
||||
JOB_NAME: ${{ inputs.job-name }}
|
||||
run: |
|
||||
set -x
|
||||
python3 -m pip install boto3==1.35.42
|
||||
python3 ${GITHUB_ACTION_PATH}/reuse_old_whl.py \
|
||||
--build-environment "${{ inputs.build-environment }}" \
|
||||
--run-id "${{ inputs.run-id }}" \
|
||||
|
||||
165
.github/actions/reuse-old-whl/reuse_old_whl.py
vendored
165
.github/actions/reuse-old-whl/reuse_old_whl.py
vendored
@ -1,22 +1,13 @@
|
||||
import argparse
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
from functools import lru_cache
|
||||
from pathlib import Path
|
||||
from typing import Any, cast, Optional, Union
|
||||
from typing import Any, cast, Optional
|
||||
|
||||
import requests
|
||||
|
||||
|
||||
REPO_ROOT = Path(__file__).resolve().parent.parent.parent.parent
|
||||
sys.path.insert(0, str(REPO_ROOT))
|
||||
from tools.stats.upload_metrics import emit_metric
|
||||
|
||||
|
||||
sys.path.remove(str(REPO_ROOT)) # Clean up sys.path after import
|
||||
|
||||
|
||||
FORCE_REBUILD_LABEL = "ci-force-rebuild"
|
||||
|
||||
|
||||
@ -123,43 +114,15 @@ def ok_changed_file(file: str) -> bool:
|
||||
return True
|
||||
if file.startswith("test/") and file.endswith(".py"):
|
||||
return True
|
||||
if file.startswith("docs/") and file.endswith((".md", ".rst")):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def check_changed_files(sha: str) -> bool:
|
||||
# Return true if all the changed files are in the list of allowed files to
|
||||
# be changed to reuse the old whl
|
||||
|
||||
# Removing files in the torch folder is not allowed since rsync will not
|
||||
# remove files
|
||||
removed_files = (
|
||||
subprocess.check_output(
|
||||
[
|
||||
"git",
|
||||
"diff",
|
||||
"--name-only",
|
||||
sha,
|
||||
"HEAD",
|
||||
"--diff-filter=D",
|
||||
"--no-renames",
|
||||
],
|
||||
text=True,
|
||||
stderr=subprocess.DEVNULL,
|
||||
)
|
||||
.strip()
|
||||
.split()
|
||||
)
|
||||
if any(file.startswith("torch/") for file in removed_files):
|
||||
print(
|
||||
f"Removed files between {sha} and HEAD: {removed_files}, cannot reuse old whl"
|
||||
)
|
||||
return False
|
||||
|
||||
changed_files = (
|
||||
subprocess.check_output(
|
||||
["git", "diff", "--name-only", sha, "HEAD", "--no-renames"],
|
||||
["git", "diff", "--name-only", sha, "HEAD"],
|
||||
text=True,
|
||||
stderr=subprocess.DEVNULL,
|
||||
)
|
||||
@ -216,83 +179,38 @@ def unzip_artifact_and_replace_files() -> None:
|
||||
)
|
||||
os.remove("artifacts.zip")
|
||||
|
||||
head_sha = get_head_sha()
|
||||
|
||||
# Rename wheel into zip
|
||||
wheel_path = Path("artifacts/dist").glob("*.whl")
|
||||
for path in wheel_path:
|
||||
# Should be of the form torch-2.0.0+git1234567-cp37-etc.whl
|
||||
# Should usually be the merge base sha but for the ones that didn't do
|
||||
# the replacement, it won't be. Can probably change it to just be merge
|
||||
# base later
|
||||
old_version = f"+git{path.stem.split('+')[1].split('-')[0][3:]}"
|
||||
new_version = f"+git{head_sha[:7]}"
|
||||
|
||||
def rename_to_new_version(file: Union[str, Path]) -> None:
|
||||
# Rename file with old_version to new_version
|
||||
subprocess.check_output(
|
||||
["mv", file, str(file).replace(old_version, new_version)]
|
||||
)
|
||||
|
||||
def change_content_to_new_version(file: Union[str, Path]) -> None:
|
||||
# Check if is a file
|
||||
if os.path.isdir(file):
|
||||
return
|
||||
# Replace the old version in the file with the new version
|
||||
with open(file) as f:
|
||||
content = f.read()
|
||||
content = content.replace(old_version, new_version)
|
||||
with open(file, "w") as f:
|
||||
f.write(content)
|
||||
|
||||
zip_path = path.with_suffix(".zip")
|
||||
os.rename(path, zip_path)
|
||||
old_stem = zip_path.stem
|
||||
new_path = path.with_suffix(".zip")
|
||||
os.rename(path, new_path)
|
||||
print(f"Renamed {path} to {new_path}")
|
||||
print(new_path.stem)
|
||||
# Unzip the wheel
|
||||
subprocess.check_output(
|
||||
["unzip", "-o", zip_path, "-d", f"artifacts/dist/{old_stem}"],
|
||||
["unzip", "-o", new_path, "-d", f"artifacts/dist/{new_path.stem}"],
|
||||
)
|
||||
|
||||
# Remove the old wheel (which is now a zip file)
|
||||
os.remove(zip_path)
|
||||
|
||||
# Copy python files into the artifact
|
||||
subprocess.check_output(
|
||||
["rsync", "-avz", "torch", f"artifacts/dist/{old_stem}"],
|
||||
["rsync", "-avz", "torch", f"artifacts/dist/{new_path.stem}"],
|
||||
)
|
||||
|
||||
change_content_to_new_version(f"artifacts/dist/{old_stem}/torch/version.py")
|
||||
|
||||
for file in Path(f"artifacts/dist/{old_stem}").glob(
|
||||
"*.dist-info/**",
|
||||
):
|
||||
change_content_to_new_version(file)
|
||||
|
||||
rename_to_new_version(f"artifacts/dist/{old_stem}")
|
||||
new_stem = old_stem.replace(old_version, new_version)
|
||||
|
||||
for file in Path(f"artifacts/dist/{new_stem}").glob(
|
||||
"*.dist-info",
|
||||
):
|
||||
rename_to_new_version(file)
|
||||
|
||||
# Zip the wheel back
|
||||
subprocess.check_output(
|
||||
["zip", "-r", f"{new_stem}.zip", "."],
|
||||
cwd=f"artifacts/dist/{new_stem}",
|
||||
["zip", "-r", f"{new_path.stem}.zip", "."],
|
||||
cwd=f"artifacts/dist/{new_path.stem}",
|
||||
)
|
||||
|
||||
subprocess.check_output(
|
||||
[
|
||||
"mv",
|
||||
f"artifacts/dist/{new_stem}/{new_stem}.zip",
|
||||
f"artifacts/dist/{new_stem}.whl",
|
||||
f"artifacts/dist/{new_path.stem}/{new_path.stem}.zip",
|
||||
f"artifacts/dist/{new_path.stem}.whl",
|
||||
],
|
||||
)
|
||||
|
||||
# Remove the extracted folder
|
||||
subprocess.check_output(
|
||||
["rm", "-rf", f"artifacts/dist/{new_stem}"],
|
||||
["rm", "-rf", f"artifacts/dist/{new_path.stem}"],
|
||||
)
|
||||
|
||||
# Rezip the artifact
|
||||
@ -304,7 +222,8 @@ def unzip_artifact_and_replace_files() -> None:
|
||||
|
||||
|
||||
def set_output() -> None:
|
||||
print("Setting output reuse=true")
|
||||
# Disable for now so we can monitor first
|
||||
# pass
|
||||
if os.getenv("GITHUB_OUTPUT"):
|
||||
with open(str(os.getenv("GITHUB_OUTPUT")), "a") as env:
|
||||
print("reuse=true", file=env)
|
||||
@ -325,60 +244,46 @@ def parse_args() -> argparse.Namespace:
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def can_reuse_whl(args: argparse.Namespace) -> tuple[bool, str]:
|
||||
if args.github_ref and any(
|
||||
args.github_ref.startswith(x)
|
||||
for x in [
|
||||
"refs/heads/release",
|
||||
"refs/tags/v",
|
||||
"refs/heads/nightly",
|
||||
]
|
||||
):
|
||||
print("Release branch, rebuild whl")
|
||||
return (False, "Release branch")
|
||||
|
||||
if not check_changed_files(get_merge_base()):
|
||||
print("Cannot use old whl due to the changed files, rebuild whl")
|
||||
return (False, "Changed files not allowed")
|
||||
def can_reuse_whl(args: argparse.Namespace) -> bool:
|
||||
# if is_main_branch() or (
|
||||
# args.github_ref
|
||||
# and any(
|
||||
# args.github_ref.startswith(x)
|
||||
# for x in ["refs/heads/release", "refs/tags/v", "refs/heads/main"]
|
||||
# )
|
||||
# ):
|
||||
# print("On main branch or release branch, rebuild whl")
|
||||
# return False
|
||||
|
||||
if check_labels_for_pr():
|
||||
print(f"Found {FORCE_REBUILD_LABEL} label on PR, rebuild whl")
|
||||
return (False, "Found FORCE_REBUILD_LABEL on PR")
|
||||
return False
|
||||
|
||||
if check_issue_open():
|
||||
print("Issue #153759 is open, rebuild whl")
|
||||
return (False, "Issue #153759 is open")
|
||||
return False
|
||||
|
||||
if not check_changed_files(get_merge_base()):
|
||||
print("Cannot use old whl due to the changed files, rebuild whl")
|
||||
return False
|
||||
|
||||
workflow_id = get_workflow_id(args.run_id)
|
||||
if workflow_id is None:
|
||||
print("No workflow ID found, rebuild whl")
|
||||
return (False, "No workflow ID found")
|
||||
return False
|
||||
|
||||
if not find_old_whl(workflow_id, args.build_environment, get_merge_base()):
|
||||
print("No old whl found, rebuild whl")
|
||||
return (False, "No old whl found")
|
||||
# TODO: go backwards from merge base to find more runs
|
||||
return False
|
||||
|
||||
return (True, "Found old whl")
|
||||
return True
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = parse_args()
|
||||
|
||||
reuse_whl, reason = can_reuse_whl(args)
|
||||
|
||||
if reuse_whl:
|
||||
if can_reuse_whl(args):
|
||||
print("Reusing old whl")
|
||||
unzip_artifact_and_replace_files()
|
||||
set_output()
|
||||
|
||||
emit_metric(
|
||||
"reuse_old_whl",
|
||||
{
|
||||
"reuse_whl": reuse_whl,
|
||||
"reason": reason,
|
||||
"build_environment": args.build_environment,
|
||||
"merge_base": get_merge_base(),
|
||||
"head_sha": get_head_sha(),
|
||||
},
|
||||
)
|
||||
|
||||
4
.github/actions/setup-linux/action.yml
vendored
4
.github/actions/setup-linux/action.yml
vendored
@ -33,14 +33,14 @@ runs:
|
||||
id: check_container_runner
|
||||
run: echo "IN_CONTAINER_RUNNER=$(if [ -f /.inarc ] || [ -f /.incontainer ]; then echo true ; else echo false; fi)" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Start docker if docker daemon is not running
|
||||
- name: Start docker if docker deamon is not running
|
||||
shell: bash
|
||||
if: ${{ steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'false' }}
|
||||
run: |
|
||||
if systemctl is-active --quiet docker; then
|
||||
echo "Docker daemon is running...";
|
||||
else
|
||||
echo "Starting docker daemon..." && sudo systemctl start docker;
|
||||
echo "Starting docker deamon..." && sudo systemctl start docker;
|
||||
fi
|
||||
|
||||
- name: Log in to ECR
|
||||
|
||||
4
.github/actions/setup-xpu/action.yml
vendored
4
.github/actions/setup-xpu/action.yml
vendored
@ -29,13 +29,13 @@ runs:
|
||||
if: always()
|
||||
shell: bash
|
||||
run: |
|
||||
timeout 30 xpu-smi discovery || true
|
||||
xpu-smi discovery
|
||||
|
||||
- name: Runner health check GPU count
|
||||
if: always()
|
||||
shell: bash
|
||||
run: |
|
||||
ngpu=$(timeout 30 xpu-smi discovery | grep -c -E 'Device Name' || true)
|
||||
ngpu=$(xpu-smi discovery | grep -c -E 'Device Name')
|
||||
msg="Please file an issue on pytorch/pytorch reporting the faulty runner. Include a link to the runner logs so the runner can be identified"
|
||||
if [[ $ngpu -eq 0 ]]; then
|
||||
echo "Error: Failed to detect any GPUs on the runner"
|
||||
|
||||
@ -24,6 +24,7 @@ runs:
|
||||
-e PYTORCH_FINAL_PACKAGE_DIR \
|
||||
-e PYTORCH_ROOT \
|
||||
-e SKIP_ALL_TESTS \
|
||||
-e USE_SPLIT_BUILD \
|
||||
--tty \
|
||||
--detach \
|
||||
-v "${GITHUB_WORKSPACE}/pytorch:/pytorch" \
|
||||
|
||||
2
.github/ci_commit_pins/audio.txt
vendored
2
.github/ci_commit_pins/audio.txt
vendored
@ -1 +1 @@
|
||||
0c22347335f4c9a5b92a2f5bad65e05e2464c184
|
||||
1a8f6213b0b61efc6a4862bc45b853551a93dbb6
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user