mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-24 07:27:32 +08:00
Compare commits
122 Commits
validate_f
...
v2.1.2-rc1
| Author | SHA1 | Date | |
|---|---|---|---|
| b3b22d7390 | |||
| 7405d70c30 | |||
| d62c757533 | |||
| 7833889a44 | |||
| 4c55dc5035 | |||
| f58669bc5f | |||
| 33106b706e | |||
| 4b4c012a60 | |||
| 47ac50248a | |||
| dc96ecb8ac | |||
| 18a2ed1db1 | |||
| b2e1277247 | |||
| b249946c40 | |||
| ee79fc8a35 | |||
| 084343ee12 | |||
| 8a178f153e | |||
| 2353915d69 | |||
| c1bc460377 | |||
| 2dc37f4f70 | |||
| f82d6e41a4 | |||
| c79d2936d0 | |||
| ab5ea22c1d | |||
| 5274580eb0 | |||
| cd5859373c | |||
| 7cc6081f87 | |||
| af1590cdf4 | |||
| 3f59221062 | |||
| ab5b9192ce | |||
| 736ebd3313 | |||
| 6ba919da27 | |||
| cc54a5072e | |||
| 3788d86e3e | |||
| 1f0450eed2 | |||
| 9570baa150 | |||
| b3b274ddcb | |||
| 5bcfb1b9b4 | |||
| c496f9a40b | |||
| 39a66a66fe | |||
| ed87177528 | |||
| c07240e5e4 | |||
| bb96803a35 | |||
| 3002bf71e6 | |||
| e7892b2e02 | |||
| 909fcf9b21 | |||
| 0bc598a604 | |||
| dd7fb44d20 | |||
| 6026c29db0 | |||
| 0f9ac00ac6 | |||
| 209f2fa8ff | |||
| fa1db4310d | |||
| e6702486f6 | |||
| e68aa76642 | |||
| 88cde0c37c | |||
| e4c42a93bc | |||
| 7bcf7da3a2 | |||
| 1841d54370 | |||
| fca42334be | |||
| 539a971161 | |||
| 9287a0cf59 | |||
| c464075d5d | |||
| 1b4161c686 | |||
| 28220534de | |||
| da9639c752 | |||
| e534243ec2 | |||
| 01fa8c140a | |||
| 5aae979614 | |||
| ced78cc2a7 | |||
| d8db5808ce | |||
| 889811ab5b | |||
| 1191449343 | |||
| 6d9fad8474 | |||
| ed62318bea | |||
| ee67c4dd6a | |||
| 5529b81631 | |||
| 7e23b4907d | |||
| 71c9d5c3a6 | |||
| 91e414957b | |||
| ce3ed7f293 | |||
| bd372d460b | |||
| 12b8c26f35 | |||
| 7397cf324c | |||
| fa8259db8d | |||
| d83c8287ea | |||
| ba19c52e31 | |||
| c5c9536aa7 | |||
| 6b7a777661 | |||
| ebd3224303 | |||
| 6e4ae13657 | |||
| 265e46e193 | |||
| da7290dfbd | |||
| 828992cf13 | |||
| 48246f3dfb | |||
| 7d6971dcee | |||
| 5417e23ba8 | |||
| 7a9101951d | |||
| 03e7f0b99d | |||
| c0e7239f43 | |||
| 04c1e07fd7 | |||
| cb4362ba5f | |||
| bddd30ca7a | |||
| 9cc99906e9 | |||
| a49fca4dd4 | |||
| 83964c761e | |||
| 085bd1da62 | |||
| 90452f41e3 | |||
| 35c3d5a080 | |||
| d07ac50e26 | |||
| 8a3b017769 | |||
| a82894b0d3 | |||
| 050fc31538 | |||
| b3cb05b396 | |||
| fec68a2799 | |||
| f139dda1cc | |||
| 5252dfb762 | |||
| da1ccca830 | |||
| c9cbdaf24f | |||
| f187e42a54 | |||
| 9175987fcc | |||
| d8e6594fb8 | |||
| f82c027774 | |||
| 6d20b39d3f | |||
| 17f400404f |
@ -1,4 +1,3 @@
|
||||
# We do not use this library in our Bazel build. It contains an
|
||||
# infinitely recursing symlink that makes Bazel very unhappy.
|
||||
third_party/ittapi/
|
||||
third_party/opentelemetry-cpp
|
||||
|
||||
@ -19,7 +19,6 @@ See `build.sh` for valid build environments (it's the giant switch).
|
||||
* `ubuntu` -- Dockerfile for Ubuntu image for CPU build and test jobs
|
||||
* `ubuntu-cuda` -- Dockerfile for Ubuntu image with CUDA support for nvidia-docker
|
||||
* `ubuntu-rocm` -- Dockerfile for Ubuntu image with ROCm support
|
||||
* `ubuntu-xpu` -- Dockerfile for Ubuntu image with XPU support
|
||||
|
||||
## Usage
|
||||
|
||||
|
||||
@ -1,5 +0,0 @@
|
||||
0.6b
|
||||
manylinux_2_17
|
||||
rocm6.1
|
||||
7f07e8a1cb1f99627eb6d77f5c0e9295c775f3c7
|
||||
77c29fa3f3b614e187d7213d745e989a92708cee2bc6020419ab49019af399d1
|
||||
@ -71,11 +71,6 @@ if [[ "$image" == *cuda* && "$UBUNTU_VERSION" != "22.04" ]]; then
|
||||
DOCKERFILE="${OS}-cuda/Dockerfile"
|
||||
elif [[ "$image" == *rocm* ]]; then
|
||||
DOCKERFILE="${OS}-rocm/Dockerfile"
|
||||
elif [[ "$image" == *xpu* ]]; then
|
||||
DOCKERFILE="${OS}-xpu/Dockerfile"
|
||||
elif [[ "$image" == *cuda*linter* ]]; then
|
||||
# Use a separate Dockerfile for linter to keep a small image size
|
||||
DOCKERFILE="linter-cuda/Dockerfile"
|
||||
elif [[ "$image" == *linter* ]]; then
|
||||
# Use a separate Dockerfile for linter to keep a small image size
|
||||
DOCKERFILE="linter/Dockerfile"
|
||||
@ -84,30 +79,16 @@ fi
|
||||
# CMake 3.18 is needed to support CUDA17 language variant
|
||||
CMAKE_VERSION=3.18.5
|
||||
|
||||
_UCX_COMMIT=7bb2722ff2187a0cad557ae4a6afa090569f83fb
|
||||
_UCC_COMMIT=20eae37090a4ce1b32bcce6144ccad0b49943e0b
|
||||
_UCX_COMMIT=00bcc6bb18fc282eb160623b4c0d300147f579af
|
||||
_UCC_COMMIT=7cb07a76ccedad7e56ceb136b865eb9319c258ea
|
||||
|
||||
# It's annoying to rename jobs every time you want to rewrite a
|
||||
# configuration, so we hardcode everything here rather than do it
|
||||
# from scratch
|
||||
case "$image" in
|
||||
pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9)
|
||||
CUDA_VERSION=12.4.0
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
UCX_COMMIT=${_UCX_COMMIT}
|
||||
UCC_COMMIT=${_UCC_COMMIT}
|
||||
CONDA_CMAKE=yes
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9)
|
||||
pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9)
|
||||
CUDA_VERSION=12.1.1
|
||||
CUDNN_VERSION=9
|
||||
CUDNN_VERSION=8
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
@ -119,24 +100,9 @@ case "$image" in
|
||||
CONDA_CMAKE=yes
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9-inductor-benchmarks)
|
||||
CUDA_VERSION=12.4.0
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
UCX_COMMIT=${_UCX_COMMIT}
|
||||
UCC_COMMIT=${_UCC_COMMIT}
|
||||
CONDA_CMAKE=yes
|
||||
TRITON=yes
|
||||
INDUCTOR_BENCHMARKS=yes
|
||||
;;
|
||||
pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9-inductor-benchmarks)
|
||||
pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks)
|
||||
CUDA_VERSION=12.1.1
|
||||
CUDNN_VERSION=9
|
||||
CUDNN_VERSION=8
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
@ -149,39 +115,9 @@ case "$image" in
|
||||
TRITON=yes
|
||||
INDUCTOR_BENCHMARKS=yes
|
||||
;;
|
||||
pytorch-linux-focal-cuda12.1-cudnn9-py3.12-gcc9-inductor-benchmarks)
|
||||
CUDA_VERSION=12.1.1
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.12
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
UCX_COMMIT=${_UCX_COMMIT}
|
||||
UCC_COMMIT=${_UCC_COMMIT}
|
||||
CONDA_CMAKE=yes
|
||||
TRITON=yes
|
||||
INDUCTOR_BENCHMARKS=yes
|
||||
;;
|
||||
pytorch-linux-focal-cuda12.4-cudnn9-py3.12-gcc9-inductor-benchmarks)
|
||||
CUDA_VERSION=12.4.0
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.12
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
UCX_COMMIT=${_UCX_COMMIT}
|
||||
UCC_COMMIT=${_UCC_COMMIT}
|
||||
CONDA_CMAKE=yes
|
||||
TRITON=yes
|
||||
INDUCTOR_BENCHMARKS=yes
|
||||
;;
|
||||
pytorch-linux-focal-cuda11.8-cudnn9-py3-gcc9)
|
||||
pytorch-linux-focal-cuda11.8-cudnn8-py3-gcc9)
|
||||
CUDA_VERSION=11.8.0
|
||||
CUDNN_VERSION=9
|
||||
CUDNN_VERSION=8
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
@ -193,11 +129,11 @@ case "$image" in
|
||||
CONDA_CMAKE=yes
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9)
|
||||
CUDA_VERSION=12.4.0
|
||||
CUDNN_VERSION=9
|
||||
pytorch-linux-focal-cuda11.8-cudnn8-py3-gcc7)
|
||||
CUDA_VERSION=11.8.0
|
||||
CUDNN_VERSION=8
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=9
|
||||
GCC_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
@ -207,23 +143,24 @@ case "$image" in
|
||||
CONDA_CMAKE=yes
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9)
|
||||
pytorch-linux-focal-cuda11.8-cudnn8-py3-gcc7-inductor-benchmarks)
|
||||
CUDA_VERSION=11.8.0
|
||||
CUDNN_VERSION=8
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
UCX_COMMIT=${_UCX_COMMIT}
|
||||
UCC_COMMIT=${_UCC_COMMIT}
|
||||
CONDA_CMAKE=yes
|
||||
TRITON=yes
|
||||
INDUCTOR_BENCHMARKS=yes
|
||||
;;
|
||||
pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9)
|
||||
CUDA_VERSION=12.1.1
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
UCX_COMMIT=${_UCX_COMMIT}
|
||||
UCC_COMMIT=${_UCC_COMMIT}
|
||||
CONDA_CMAKE=yes
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9)
|
||||
CUDA_VERSION=12.4.0
|
||||
CUDNN_VERSION=9
|
||||
CUDNN_VERSION=8
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
@ -244,13 +181,13 @@ case "$image" in
|
||||
CONDA_CMAKE=yes
|
||||
ONNX=yes
|
||||
;;
|
||||
pytorch-linux-focal-py3-clang9-android-ndk-r21e)
|
||||
pytorch-linux-focal-py3-clang7-android-ndk-r19c)
|
||||
ANACONDA_PYTHON_VERSION=3.8
|
||||
CLANG_VERSION=9
|
||||
CLANG_VERSION=7
|
||||
LLVMDEV=yes
|
||||
PROTOBUF=yes
|
||||
ANDROID=yes
|
||||
ANDROID_NDK_VERSION=r21e
|
||||
ANDROID_NDK_VERSION=r19c
|
||||
GRADLE_VERSION=6.8.3
|
||||
NINJA_VERSION=1.9.0
|
||||
;;
|
||||
@ -291,7 +228,7 @@ case "$image" in
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
ROCM_VERSION=6.0
|
||||
ROCM_VERSION=5.4.2
|
||||
NINJA_VERSION=1.9.0
|
||||
CONDA_CMAKE=yes
|
||||
TRITON=yes
|
||||
@ -302,21 +239,21 @@ case "$image" in
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
ROCM_VERSION=6.1
|
||||
ROCM_VERSION=5.6
|
||||
NINJA_VERSION=1.9.0
|
||||
CONDA_CMAKE=yes
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-jammy-xpu-2024.0-py3)
|
||||
pytorch-linux-focal-py3.8-gcc7)
|
||||
ANACONDA_PYTHON_VERSION=3.8
|
||||
GCC_VERSION=11
|
||||
GCC_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
XPU_VERSION=0.5
|
||||
NINJA_VERSION=1.9.0
|
||||
KATEX=yes
|
||||
CONDA_CMAKE=yes
|
||||
TRITON=yes
|
||||
DOCS=yes
|
||||
;;
|
||||
pytorch-linux-jammy-py3.8-gcc11-inductor-benchmarks)
|
||||
ANACONDA_PYTHON_VERSION=3.8
|
||||
@ -330,10 +267,10 @@ case "$image" in
|
||||
DOCS=yes
|
||||
INDUCTOR_BENCHMARKS=yes
|
||||
;;
|
||||
pytorch-linux-jammy-cuda11.8-cudnn9-py3.8-clang12)
|
||||
pytorch-linux-jammy-cuda11.8-cudnn8-py3.8-clang12)
|
||||
ANACONDA_PYTHON_VERSION=3.8
|
||||
CUDA_VERSION=11.8
|
||||
CUDNN_VERSION=9
|
||||
CUDNN_VERSION=8
|
||||
CLANG_VERSION=12
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
@ -349,12 +286,6 @@ case "$image" in
|
||||
CONDA_CMAKE=yes
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-jammy-py3-clang15-asan)
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
CLANG_VERSION=15
|
||||
CONDA_CMAKE=yes
|
||||
VISION=yes
|
||||
;;
|
||||
pytorch-linux-jammy-py3.8-gcc11)
|
||||
ANACONDA_PYTHON_VERSION=3.8
|
||||
GCC_VERSION=11
|
||||
@ -365,20 +296,6 @@ case "$image" in
|
||||
CONDA_CMAKE=yes
|
||||
TRITON=yes
|
||||
DOCS=yes
|
||||
UNINSTALL_DILL=yes
|
||||
;;
|
||||
pytorch-linux-jammy-py3-clang12-executorch)
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
CLANG_VERSION=12
|
||||
CONDA_CMAKE=yes
|
||||
EXECUTORCH=yes
|
||||
;;
|
||||
pytorch-linux-jammy-py3.12-halide)
|
||||
CUDA_VERSION=12.4
|
||||
ANACONDA_PYTHON_VERSION=3.12
|
||||
GCC_VERSION=11
|
||||
CONDA_CMAKE=yes
|
||||
HALIDE=yes
|
||||
;;
|
||||
pytorch-linux-focal-linter)
|
||||
# TODO: Use 3.9 here because of this issue https://github.com/python/mypy/issues/13627.
|
||||
@ -387,26 +304,6 @@ case "$image" in
|
||||
ANACONDA_PYTHON_VERSION=3.9
|
||||
CONDA_CMAKE=yes
|
||||
;;
|
||||
pytorch-linux-jammy-cuda11.8-cudnn9-py3.9-linter)
|
||||
ANACONDA_PYTHON_VERSION=3.9
|
||||
CUDA_VERSION=11.8
|
||||
CONDA_CMAKE=yes
|
||||
;;
|
||||
pytorch-linux-jammy-aarch64-py3.10-gcc11)
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=11
|
||||
ACL=yes
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
CONDA_CMAKE=yes
|
||||
# snadampal: skipping sccache due to the following issue
|
||||
# https://github.com/pytorch/pytorch/issues/121559
|
||||
SKIP_SCCACHE_INSTALL=yes
|
||||
# snadampal: skipping llvm src build install because the current version
|
||||
# from pytorch/llvm:9.0.1 is x86 specific
|
||||
SKIP_LLVM_SRC_BUILD_INSTALL=yes
|
||||
;;
|
||||
*)
|
||||
# Catch-all for builds that are not hardcoded.
|
||||
PROTOBUF=yes
|
||||
@ -424,9 +321,6 @@ case "$image" in
|
||||
extract_version_from_image_name rocm ROCM_VERSION
|
||||
NINJA_VERSION=1.9.0
|
||||
TRITON=yes
|
||||
# To ensure that any ROCm config will build using conda cmake
|
||||
# and thus have LAPACK/MKL enabled
|
||||
CONDA_CMAKE=yes
|
||||
fi
|
||||
if [[ "$image" == *centos7* ]]; then
|
||||
NINJA_VERSION=1.10.2
|
||||
@ -454,17 +348,20 @@ tmp_tag=$(basename "$(mktemp -u)" | tr '[:upper:]' '[:lower:]')
|
||||
#when using cudnn version 8 install it separately from cuda
|
||||
if [[ "$image" == *cuda* && ${OS} == "ubuntu" ]]; then
|
||||
IMAGE_NAME="nvidia/cuda:${CUDA_VERSION}-cudnn${CUDNN_VERSION}-devel-ubuntu${UBUNTU_VERSION}"
|
||||
if [[ ${CUDNN_VERSION} == 9 ]]; then
|
||||
if [[ ${CUDNN_VERSION} == 8 ]]; then
|
||||
IMAGE_NAME="nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Build image
|
||||
# TODO: build-arg THRIFT is not turned on for any image, remove it once we confirm
|
||||
# it's no longer needed.
|
||||
docker build \
|
||||
--no-cache \
|
||||
--progress=plain \
|
||||
--build-arg "BUILD_ENVIRONMENT=${image}" \
|
||||
--build-arg "PROTOBUF=${PROTOBUF:-}" \
|
||||
--build-arg "THRIFT=${THRIFT:-}" \
|
||||
--build-arg "LLVMDEV=${LLVMDEV:-}" \
|
||||
--build-arg "DB=${DB:-}" \
|
||||
--build-arg "VISION=${VISION:-}" \
|
||||
@ -496,18 +393,12 @@ docker build \
|
||||
--build-arg "ONNX=${ONNX}" \
|
||||
--build-arg "DOCS=${DOCS}" \
|
||||
--build-arg "INDUCTOR_BENCHMARKS=${INDUCTOR_BENCHMARKS}" \
|
||||
--build-arg "EXECUTORCH=${EXECUTORCH}" \
|
||||
--build-arg "HALIDE=${HALIDE}" \
|
||||
--build-arg "XPU_VERSION=${XPU_VERSION}" \
|
||||
--build-arg "ACL=${ACL:-}" \
|
||||
--build-arg "SKIP_SCCACHE_INSTALL=${SKIP_SCCACHE_INSTALL:-}" \
|
||||
--build-arg "SKIP_LLVM_SRC_BUILD_INSTALL=${SKIP_LLVM_SRC_BUILD_INSTALL:-}" \
|
||||
-f $(dirname ${DOCKERFILE})/Dockerfile \
|
||||
-t "$tmp_tag" \
|
||||
"$@" \
|
||||
.
|
||||
|
||||
# NVIDIA dockers for RC releases use tag names like `11.0-cudnn9-devel-ubuntu18.04-rc`,
|
||||
# NVIDIA dockers for RC releases use tag names like `11.0-cudnn8-devel-ubuntu18.04-rc`,
|
||||
# for this case we will set UBUNTU_VERSION to `18.04-rc` so that the Dockerfile could
|
||||
# find the correct image. As a result, here we have to replace the
|
||||
# "$UBUNTU_VERSION" == "18.04-rc"
|
||||
|
||||
@ -62,7 +62,7 @@ RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi
|
||||
RUN rm install_db.sh
|
||||
ENV INSTALLED_DB ${DB}
|
||||
|
||||
# (optional) Install vision packages like OpenCV
|
||||
# (optional) Install vision packages like OpenCV and ffmpeg
|
||||
ARG VISION
|
||||
COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./
|
||||
RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi
|
||||
@ -77,9 +77,6 @@ RUN rm install_rocm.sh
|
||||
COPY ./common/install_rocm_magma.sh install_rocm_magma.sh
|
||||
RUN bash ./install_rocm_magma.sh
|
||||
RUN rm install_rocm_magma.sh
|
||||
COPY ./common/install_amdsmi.sh install_amdsmi.sh
|
||||
RUN bash ./install_amdsmi.sh
|
||||
RUN rm install_amdsmi.sh
|
||||
ENV PATH /opt/rocm/bin:$PATH
|
||||
ENV PATH /opt/rocm/hcc/bin:$PATH
|
||||
ENV PATH /opt/rocm/hip/bin:$PATH
|
||||
@ -101,25 +98,6 @@ COPY ./common/install_ninja.sh install_ninja.sh
|
||||
RUN if [ -n "${NINJA_VERSION}" ]; then bash ./install_ninja.sh; fi
|
||||
RUN rm install_ninja.sh
|
||||
|
||||
ARG TRITON
|
||||
# Install triton, this needs to be done before sccache because the latter will
|
||||
# try to reach out to S3, which docker build runners don't have access
|
||||
ENV CMAKE_C_COMPILER cc
|
||||
ENV CMAKE_CXX_COMPILER c++
|
||||
COPY ./common/install_triton.sh install_triton.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ci_commit_pins/triton-rocm.txt triton-rocm.txt
|
||||
COPY triton_version.txt triton_version.txt
|
||||
RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi
|
||||
RUN rm install_triton.sh common_utils.sh triton-rocm.txt triton_version.txt
|
||||
|
||||
# Install AOTriton (Early fail)
|
||||
COPY ./aotriton_version.txt aotriton_version.txt
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ./common/install_aotriton.sh install_aotriton.sh
|
||||
RUN ["/bin/bash", "-c", "./install_aotriton.sh /opt/rocm && rm -rf install_aotriton.sh aotriton_version.txt common_utils.sh"]
|
||||
ENV AOTRITON_INSTALLED_PREFIX /opt/rocm/aotriton
|
||||
|
||||
# Install ccache/sccache (do this last, so we get priority in PATH)
|
||||
COPY ./common/install_cache.sh install_cache.sh
|
||||
ENV PATH /opt/cache/bin:$PATH
|
||||
|
||||
@ -1 +0,0 @@
|
||||
c572f9e509b5ec5d56f4d218271e36269bba244f
|
||||
@ -1 +0,0 @@
|
||||
340136fec6d3ebc73e7a19eba1663e9b0ba8ab2d
|
||||
@ -1 +1 @@
|
||||
243e186efbf7fb93328dd6b34927a4e8c8f24395
|
||||
4.27.4
|
||||
|
||||
@ -1 +1 @@
|
||||
730b907b4d45a4713cbc425cbf224c46089fd514
|
||||
b9d43c7dcac1fe05e851dd7be7187b108af593d2
|
||||
|
||||
@ -1 +1 @@
|
||||
21eae954efa5bf584da70324b640288c3ee7aede
|
||||
34f8189eae57a23cc15b4b4f032fe25757e0db8e
|
||||
|
||||
@ -1 +0,0 @@
|
||||
1b2f15840e0d70eec50d84c7a0575cb835524def
|
||||
@ -1 +1 @@
|
||||
dedb7bdf339a3546896d4820366ca562c586bfa0
|
||||
e6216047b8b0aef1fe8da6ca8667a3ad0a016411
|
||||
|
||||
@ -1,16 +0,0 @@
|
||||
set -euo pipefail
|
||||
|
||||
readonly version=v24.04
|
||||
readonly src_host=https://review.mlplatform.org/ml
|
||||
readonly src_repo=ComputeLibrary
|
||||
|
||||
# Clone ACL
|
||||
[[ ! -d ${src_repo} ]] && git clone ${src_host}/${src_repo}.git
|
||||
cd ${src_repo}
|
||||
|
||||
git checkout $version
|
||||
|
||||
# Build with scons
|
||||
scons -j8 Werror=0 debug=0 neon=1 opencl=0 embed_kernels=0 \
|
||||
os=linux arch=armv8a build=native multi_isa=1 \
|
||||
fixed_format_kernels=1 openmp=1 cppthreads=0
|
||||
@ -1,5 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
cd /opt/rocm/share/amd_smi && pip install .
|
||||
@ -1,23 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh"
|
||||
|
||||
TARBALL='aotriton.tar.bz2'
|
||||
# This read command alwasy returns with exit code 1
|
||||
read -d "\n" VER MANYLINUX ROCMBASE PINNED_COMMIT SHA256 < aotriton_version.txt || true
|
||||
ARCH=$(uname -m)
|
||||
AOTRITON_INSTALL_PREFIX="$1"
|
||||
AOTRITON_URL="https://github.com/ROCm/aotriton/releases/download/${VER}/aotriton-${VER}-${MANYLINUX}_${ARCH}-${ROCMBASE}-shared.tar.bz2"
|
||||
|
||||
cd "${AOTRITON_INSTALL_PREFIX}"
|
||||
# Must use -L to follow redirects
|
||||
curl -L --retry 3 -o "${TARBALL}" "${AOTRITON_URL}"
|
||||
ACTUAL_SHA256=$(sha256sum "${TARBALL}" | cut -d " " -f 1)
|
||||
if [ "${SHA256}" != "${ACTUAL_SHA256}" ]; then
|
||||
echo -n "Error: The SHA256 of downloaded tarball is ${ACTUAL_SHA256},"
|
||||
echo " which does not match the expected value ${SHA256}."
|
||||
exit
|
||||
fi
|
||||
tar xf "${TARBALL}" && rm -rf "${TARBALL}"
|
||||
@ -3,13 +3,16 @@
|
||||
set -ex
|
||||
|
||||
install_ubuntu() {
|
||||
# NVIDIA dockers for RC releases use tag names like `11.0-cudnn9-devel-ubuntu18.04-rc`,
|
||||
# NVIDIA dockers for RC releases use tag names like `11.0-cudnn8-devel-ubuntu18.04-rc`,
|
||||
# for this case we will set UBUNTU_VERSION to `18.04-rc` so that the Dockerfile could
|
||||
# find the correct image. As a result, here we have to check for
|
||||
# "$UBUNTU_VERSION" == "18.04"*
|
||||
# instead of
|
||||
# "$UBUNTU_VERSION" == "18.04"
|
||||
if [[ "$UBUNTU_VERSION" == "20.04"* ]]; then
|
||||
if [[ "$UBUNTU_VERSION" == "18.04"* ]]; then
|
||||
cmake3="cmake=3.10*"
|
||||
maybe_libiomp_dev="libiomp-dev"
|
||||
elif [[ "$UBUNTU_VERSION" == "20.04"* ]]; then
|
||||
cmake3="cmake=3.16*"
|
||||
maybe_libiomp_dev=""
|
||||
elif [[ "$UBUNTU_VERSION" == "22.04"* ]]; then
|
||||
@ -20,9 +23,7 @@ install_ubuntu() {
|
||||
maybe_libiomp_dev="libiomp-dev"
|
||||
fi
|
||||
|
||||
if [[ "$CLANG_VERSION" == 15 ]]; then
|
||||
maybe_libomp_dev="libomp-15-dev"
|
||||
elif [[ "$CLANG_VERSION" == 12 ]]; then
|
||||
if [[ "$CLANG_VERSION" == 12 ]]; then
|
||||
maybe_libomp_dev="libomp-12-dev"
|
||||
elif [[ "$CLANG_VERSION" == 10 ]]; then
|
||||
maybe_libomp_dev="libomp-10-dev"
|
||||
@ -61,7 +62,6 @@ install_ubuntu() {
|
||||
${maybe_libiomp_dev} \
|
||||
libyaml-dev \
|
||||
libz-dev \
|
||||
libjemalloc2 \
|
||||
libjpeg-dev \
|
||||
libasound2-dev \
|
||||
libsndfile-dev \
|
||||
@ -75,7 +75,6 @@ install_ubuntu() {
|
||||
libtool \
|
||||
vim \
|
||||
unzip \
|
||||
gpg-agent \
|
||||
gdb
|
||||
|
||||
# Should resolve issues related to various apt package repository cert issues
|
||||
@ -113,6 +112,7 @@ install_centos() {
|
||||
glibc-devel \
|
||||
glibc-headers \
|
||||
glog-devel \
|
||||
hiredis-devel \
|
||||
libstdc++-devel \
|
||||
libsndfile-devel \
|
||||
make \
|
||||
@ -152,7 +152,7 @@ wget https://ossci-linux.s3.amazonaws.com/valgrind-${VALGRIND_VERSION}.tar.bz2
|
||||
tar -xjf valgrind-${VALGRIND_VERSION}.tar.bz2
|
||||
cd valgrind-${VALGRIND_VERSION}
|
||||
./configure --prefix=/usr/local
|
||||
make -j$[$(nproc) - 2]
|
||||
make -j6
|
||||
sudo make install
|
||||
cd ../../
|
||||
rm -rf valgrind_build
|
||||
|
||||
@ -9,19 +9,10 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
|
||||
MAJOR_PYTHON_VERSION=$(echo "$ANACONDA_PYTHON_VERSION" | cut -d . -f 1)
|
||||
MINOR_PYTHON_VERSION=$(echo "$ANACONDA_PYTHON_VERSION" | cut -d . -f 2)
|
||||
|
||||
if [[ $(uname -m) == "aarch64" ]]; then
|
||||
BASE_URL="https://github.com/conda-forge/miniforge/releases/latest/download"
|
||||
case "$MAJOR_PYTHON_VERSION" in
|
||||
3)
|
||||
CONDA_FILE="Miniforge3-Linux-aarch64.sh"
|
||||
2)
|
||||
CONDA_FILE="Miniconda2-latest-Linux-x86_64.sh"
|
||||
;;
|
||||
*)
|
||||
echo "Unsupported ANACONDA_PYTHON_VERSION: $ANACONDA_PYTHON_VERSION"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
else
|
||||
case "$MAJOR_PYTHON_VERSION" in
|
||||
3)
|
||||
CONDA_FILE="Miniconda3-latest-Linux-x86_64.sh"
|
||||
;;
|
||||
@ -30,7 +21,6 @@ else
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
mkdir -p /opt/conda
|
||||
chown jenkins:jenkins /opt/conda
|
||||
@ -57,44 +47,30 @@ fi
|
||||
# Uncomment the below when resolved to track the latest conda update
|
||||
# as_jenkins conda update -y -n base conda
|
||||
|
||||
if [[ $(uname -m) == "aarch64" ]]; then
|
||||
export SYSROOT_DEP="sysroot_linux-aarch64=2.17"
|
||||
else
|
||||
export SYSROOT_DEP="sysroot_linux-64=2.17"
|
||||
fi
|
||||
|
||||
# Install correct Python version
|
||||
# Also ensure sysroot is using a modern GLIBC to match system compilers
|
||||
as_jenkins conda create -n py_$ANACONDA_PYTHON_VERSION -y\
|
||||
python="$ANACONDA_PYTHON_VERSION" \
|
||||
${SYSROOT_DEP}
|
||||
|
||||
# libstdcxx from conda default channels are too old, we need GLIBCXX_3.4.30
|
||||
# which is provided in libstdcxx 12 and up.
|
||||
conda_install libstdcxx-ng=12.3.0 -c conda-forge
|
||||
as_jenkins conda create -n py_$ANACONDA_PYTHON_VERSION -y python="$ANACONDA_PYTHON_VERSION"
|
||||
|
||||
# Install PyTorch conda deps, as per https://github.com/pytorch/pytorch README
|
||||
if [[ $(uname -m) == "aarch64" ]]; then
|
||||
CONDA_COMMON_DEPS="astunparse pyyaml setuptools openblas==0.3.25=*openmp* ninja==1.11.1 scons==4.5.2"
|
||||
|
||||
if [ "$ANACONDA_PYTHON_VERSION" = "3.8" ]; then
|
||||
conda_install numpy=1.24.4 ${CONDA_COMMON_DEPS}
|
||||
else
|
||||
conda_install numpy=1.26.2 ${CONDA_COMMON_DEPS}
|
||||
fi
|
||||
CONDA_COMMON_DEPS="astunparse pyyaml mkl=2021.4.0 mkl-include=2021.4.0 setuptools"
|
||||
if [ "$ANACONDA_PYTHON_VERSION" = "3.11" ]; then
|
||||
conda_install numpy=1.23.5 ${CONDA_COMMON_DEPS}
|
||||
elif [ "$ANACONDA_PYTHON_VERSION" = "3.10" ]; then
|
||||
conda_install numpy=1.21.2 ${CONDA_COMMON_DEPS}
|
||||
elif [ "$ANACONDA_PYTHON_VERSION" = "3.9" ]; then
|
||||
conda_install numpy=1.21.2 ${CONDA_COMMON_DEPS}
|
||||
elif [ "$ANACONDA_PYTHON_VERSION" = "3.8" ]; then
|
||||
conda_install numpy=1.21.2 ${CONDA_COMMON_DEPS}
|
||||
else
|
||||
CONDA_COMMON_DEPS="astunparse pyyaml mkl=2021.4.0 mkl-include=2021.4.0 setuptools"
|
||||
|
||||
if [ "$ANACONDA_PYTHON_VERSION" = "3.11" ] || [ "$ANACONDA_PYTHON_VERSION" = "3.12" ]; then
|
||||
conda_install numpy=1.26.0 ${CONDA_COMMON_DEPS}
|
||||
else
|
||||
conda_install numpy=1.21.2 ${CONDA_COMMON_DEPS}
|
||||
fi
|
||||
# Install `typing-extensions` for 3.7
|
||||
conda_install numpy=1.21.2 ${CONDA_COMMON_DEPS} typing-extensions
|
||||
fi
|
||||
|
||||
# Install llvm-8 as it is required to compile llvmlite-0.30.0 from source
|
||||
# and libpython-static for torch deploy
|
||||
conda_install llvmdev=8.0.0 "libpython-static=${ANACONDA_PYTHON_VERSION}"
|
||||
# This is only supported in 3.8 upward
|
||||
if [ "$MINOR_PYTHON_VERSION" -gt "7" ]; then
|
||||
# Install llvm-8 as it is required to compile llvmlite-0.30.0 from source
|
||||
# and libpython-static for torch deploy
|
||||
conda_install llvmdev=8.0.0 "libpython-static=${ANACONDA_PYTHON_VERSION}"
|
||||
fi
|
||||
|
||||
# Use conda cmake in some cases. Conda cmake will be newer than our supported
|
||||
# min version (3.5 for xenial and 3.10 for bionic), so we only do it in those
|
||||
@ -113,7 +89,13 @@ fi
|
||||
# Install some other packages, including those needed for Python test reporting
|
||||
pip_install -r /opt/conda/requirements-ci.txt
|
||||
|
||||
pip_install -U scikit-learn
|
||||
# Update scikit-learn to a python-3.8 compatible version
|
||||
if [[ $(python -c "import sys; print(int(sys.version_info >= (3, 8)))") == "1" ]]; then
|
||||
pip_install -U scikit-learn
|
||||
else
|
||||
# Pinned scikit-learn due to https://github.com/scikit-learn/scikit-learn/issues/14485 (affects gcc 5.5 only)
|
||||
pip_install scikit-learn==0.20.3
|
||||
fi
|
||||
|
||||
if [ -n "$DOCS" ]; then
|
||||
apt-get update
|
||||
@ -123,5 +105,14 @@ fi
|
||||
pip_install -r /opt/conda/requirements-docs.txt
|
||||
fi
|
||||
|
||||
# HACK HACK HACK
|
||||
# gcc-9 for ubuntu-18.04 from http://ppa.launchpad.net/ubuntu-toolchain-r/test/ubuntu
|
||||
# Pulls llibstdc++6 13.1.0-8ubuntu1~18.04 which is too new for conda
|
||||
# So remove libstdc++6.so.3.29 installed by https://anaconda.org/anaconda/libstdcxx-ng/files?version=11.2.0
|
||||
# Same is true for gcc-12 from Ubuntu-22.04
|
||||
if grep -e [12][82].04.[623] /etc/issue >/dev/null; then
|
||||
rm /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/lib/libstdc++.so.6
|
||||
fi
|
||||
|
||||
popd
|
||||
fi
|
||||
|
||||
@ -1,22 +1,27 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [[ -n "${CUDNN_VERSION}" ]]; then
|
||||
if [[ ${CUDNN_VERSION} == 8 ]]; then
|
||||
# cuDNN license: https://developer.nvidia.com/cudnn/license_agreement
|
||||
mkdir tmp_cudnn
|
||||
pushd tmp_cudnn
|
||||
if [[ ${CUDA_VERSION:0:2} == "12" ]]; then
|
||||
CUDNN_NAME="cudnn-linux-x86_64-9.1.0.70_cuda12-archive"
|
||||
elif [[ ${CUDA_VERSION:0:2} == "11" ]]; then
|
||||
CUDNN_NAME="cudnn-linux-x86_64-9.1.0.70_cuda11-archive"
|
||||
mkdir tmp_cudnn && cd tmp_cudnn
|
||||
CUDNN_NAME="cudnn-linux-x86_64-8.3.2.44_cuda11.5-archive"
|
||||
if [[ ${CUDA_VERSION:0:4} == "12.1" ]]; then
|
||||
CUDNN_NAME="cudnn-linux-x86_64-8.9.2.26_cuda12-archive"
|
||||
curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/${CUDNN_NAME}.tar.xz
|
||||
elif [[ ${CUDA_VERSION:0:4} == "11.8" ]]; then
|
||||
CUDNN_NAME="cudnn-linux-x86_64-8.7.0.84_cuda11-archive"
|
||||
curl --retry 3 -OLs https://developer.download.nvidia.com/compute/redist/cudnn/v8.7.0/local_installers/11.8/${CUDNN_NAME}.tar.xz
|
||||
else
|
||||
print "Unsupported CUDA version ${CUDA_VERSION}"
|
||||
exit 1
|
||||
curl --retry 3 -OLs https://developer.download.nvidia.com/compute/redist/cudnn/v8.3.2/local_installers/11.5/${CUDNN_NAME}.tar.xz
|
||||
fi
|
||||
curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/${CUDNN_NAME}.tar.xz
|
||||
|
||||
tar xf ${CUDNN_NAME}.tar.xz
|
||||
cp -a ${CUDNN_NAME}/include/* /usr/include/
|
||||
cp -a ${CUDNN_NAME}/include/* /usr/local/cuda/include/
|
||||
cp -a ${CUDNN_NAME}/include/* /usr/include/x86_64-linux-gnu/
|
||||
|
||||
cp -a ${CUDNN_NAME}/lib/* /usr/local/cuda/lib64/
|
||||
popd
|
||||
cp -a ${CUDNN_NAME}/lib/* /usr/lib/x86_64-linux-gnu/
|
||||
cd ..
|
||||
rm -rf tmp_cudnn
|
||||
ldconfig
|
||||
fi
|
||||
|
||||
@ -1,26 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
# cuSPARSELt license: https://docs.nvidia.com/cuda/cusparselt/license.html
|
||||
mkdir tmp_cusparselt && cd tmp_cusparselt
|
||||
|
||||
if [[ ${CUDA_VERSION:0:4} =~ ^12\.[1-4]$ ]]; then
|
||||
arch_path='sbsa'
|
||||
export TARGETARCH=${TARGETARCH:-$(uname -m)}
|
||||
if [ ${TARGETARCH} = 'amd64' ] || [ "${TARGETARCH}" = 'x86_64' ]; then
|
||||
arch_path='x86_64'
|
||||
fi
|
||||
CUSPARSELT_NAME="libcusparse_lt-linux-${arch_path}-0.5.2.1-archive"
|
||||
curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-${arch_path}/${CUSPARSELT_NAME}.tar.xz
|
||||
elif [[ ${CUDA_VERSION:0:4} == "11.8" ]]; then
|
||||
CUSPARSELT_NAME="libcusparse_lt-linux-x86_64-0.4.0.7-archive"
|
||||
curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-x86_64/${CUSPARSELT_NAME}.tar.xz
|
||||
fi
|
||||
|
||||
tar xf ${CUSPARSELT_NAME}.tar.xz
|
||||
cp -a ${CUSPARSELT_NAME}/include/* /usr/local/cuda/include/
|
||||
cp -a ${CUSPARSELT_NAME}/lib/* /usr/local/cuda/lib64/
|
||||
cd ..
|
||||
rm -rf tmp_cusparselt
|
||||
ldconfig
|
||||
@ -4,6 +4,11 @@ set -ex
|
||||
|
||||
install_ubuntu() {
|
||||
apt-get update
|
||||
apt-get install -y --no-install-recommends \
|
||||
libhiredis-dev \
|
||||
libleveldb-dev \
|
||||
liblmdb-dev \
|
||||
libsnappy-dev
|
||||
|
||||
# Cleanup
|
||||
apt-get autoclean && apt-get clean
|
||||
@ -15,6 +20,12 @@ install_centos() {
|
||||
# See http://fedoraproject.org/wiki/EPEL
|
||||
yum --enablerepo=extras install -y epel-release
|
||||
|
||||
yum install -y \
|
||||
hiredis-devel \
|
||||
leveldb-devel \
|
||||
lmdb-devel \
|
||||
snappy-devel
|
||||
|
||||
# Cleanup
|
||||
yum clean all
|
||||
rm -rf /var/cache/yum
|
||||
|
||||
@ -1,65 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh"
|
||||
|
||||
clone_executorch() {
|
||||
EXECUTORCH_PINNED_COMMIT=$(get_pinned_commit executorch)
|
||||
|
||||
# Clone the Executorch
|
||||
git clone https://github.com/pytorch/executorch.git
|
||||
|
||||
# and fetch the target commit
|
||||
pushd executorch
|
||||
git checkout "${EXECUTORCH_PINNED_COMMIT}"
|
||||
git submodule update --init
|
||||
popd
|
||||
|
||||
chown -R jenkins executorch
|
||||
}
|
||||
|
||||
install_buck2() {
|
||||
pushd executorch/.ci/docker
|
||||
|
||||
BUCK2_VERSION=$(cat ci_commit_pins/buck2.txt)
|
||||
source common/install_buck.sh
|
||||
|
||||
popd
|
||||
}
|
||||
|
||||
install_conda_dependencies() {
|
||||
pushd executorch/.ci/docker
|
||||
# Install conda dependencies like flatbuffer
|
||||
conda_install --file conda-env-ci.txt
|
||||
popd
|
||||
}
|
||||
|
||||
install_pip_dependencies() {
|
||||
pushd executorch/.ci/docker
|
||||
# Install PyTorch CPU build beforehand to avoid installing the much bigger CUDA
|
||||
# binaries later, ExecuTorch only needs CPU
|
||||
pip_install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
|
||||
# Install all Python dependencies
|
||||
pip_install -r requirements-ci.txt
|
||||
popd
|
||||
}
|
||||
|
||||
setup_executorch() {
|
||||
pushd executorch
|
||||
# Setup swiftshader and Vulkan SDK which are required to build the Vulkan delegate
|
||||
as_jenkins bash .ci/scripts/setup-vulkan-linux-deps.sh
|
||||
|
||||
export PYTHON_EXECUTABLE=python
|
||||
export EXECUTORCH_BUILD_PYBIND=ON
|
||||
export CMAKE_ARGS="-DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON"
|
||||
|
||||
as_jenkins .ci/scripts/setup-linux.sh cmake
|
||||
popd
|
||||
}
|
||||
|
||||
clone_executorch
|
||||
install_buck2
|
||||
install_conda_dependencies
|
||||
install_pip_dependencies
|
||||
setup_executorch
|
||||
@ -1,46 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh"
|
||||
|
||||
COMMIT=$(get_pinned_commit halide)
|
||||
test -n "$COMMIT"
|
||||
|
||||
# activate conda to populate CONDA_PREFIX
|
||||
test -n "$ANACONDA_PYTHON_VERSION"
|
||||
eval "$(conda shell.bash hook)"
|
||||
conda activate py_$ANACONDA_PYTHON_VERSION
|
||||
|
||||
if [ -n "${UBUNTU_VERSION}" ];then
|
||||
apt update
|
||||
apt-get install -y lld liblld-15-dev libpng-dev libjpeg-dev libgl-dev \
|
||||
libopenblas-dev libeigen3-dev libatlas-base-dev libzstd-dev
|
||||
fi
|
||||
|
||||
conda_install numpy scipy imageio cmake ninja
|
||||
|
||||
git clone --depth 1 --branch release/16.x --recursive https://github.com/llvm/llvm-project.git
|
||||
cmake -DCMAKE_BUILD_TYPE=Release \
|
||||
-DLLVM_ENABLE_PROJECTS="clang" \
|
||||
-DLLVM_TARGETS_TO_BUILD="X86;NVPTX" \
|
||||
-DLLVM_ENABLE_TERMINFO=OFF -DLLVM_ENABLE_ASSERTIONS=ON \
|
||||
-DLLVM_ENABLE_EH=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_BUILD_32_BITS=OFF \
|
||||
-S llvm-project/llvm -B llvm-build -G Ninja
|
||||
cmake --build llvm-build
|
||||
cmake --install llvm-build --prefix llvm-install
|
||||
export LLVM_ROOT=`pwd`/llvm-install
|
||||
export LLVM_CONFIG=$LLVM_ROOT/bin/llvm-config
|
||||
|
||||
git clone https://github.com/halide/Halide.git
|
||||
pushd Halide
|
||||
git checkout ${COMMIT} && git submodule update --init --recursive
|
||||
pip_install -r requirements.txt
|
||||
cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -S . -B build
|
||||
cmake --build build
|
||||
test -e ${CONDA_PREFIX}/lib/python3 || ln -s python${ANACONDA_PYTHON_VERSION} ${CONDA_PREFIX}/lib/python3
|
||||
cmake --install build --prefix ${CONDA_PREFIX}
|
||||
chown -R jenkins ${CONDA_PREFIX}
|
||||
popd
|
||||
rm -rf Halide llvm-build llvm-project llvm-install
|
||||
|
||||
python -c "import halide" # check for errors
|
||||
@ -6,21 +6,19 @@ source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh"
|
||||
|
||||
function install_huggingface() {
|
||||
local version
|
||||
commit=$(get_pinned_commit huggingface)
|
||||
version=$(get_pinned_commit huggingface)
|
||||
pip_install pandas==2.0.3
|
||||
pip_install "git+https://github.com/huggingface/transformers@${commit}"
|
||||
pip_install "transformers==${version}"
|
||||
}
|
||||
|
||||
function install_timm() {
|
||||
local commit
|
||||
commit=$(get_pinned_commit timm)
|
||||
pip_install pandas==2.0.3
|
||||
pip_install "git+https://github.com/huggingface/pytorch-image-models@${commit}"
|
||||
# Clean up
|
||||
conda_run pip uninstall -y cmake torch torchvision triton
|
||||
pip_install "git+https://github.com/rwightman/pytorch-image-models@${commit}"
|
||||
}
|
||||
|
||||
# Pango is needed for weasyprint which is needed for doctr
|
||||
conda_install pango
|
||||
install_huggingface
|
||||
install_timm
|
||||
# install_timm
|
||||
|
||||
@ -10,13 +10,13 @@ retry () {
|
||||
|
||||
# A bunch of custom pip dependencies for ONNX
|
||||
pip_install \
|
||||
beartype==0.15.0 \
|
||||
beartype==0.10.4 \
|
||||
filelock==3.9.0 \
|
||||
flatbuffers==2.0 \
|
||||
mock==5.0.1 \
|
||||
ninja==1.10.2 \
|
||||
networkx==2.0 \
|
||||
numpy==1.24.2
|
||||
numpy==1.22.4
|
||||
|
||||
# ONNXRuntime should be installed before installing
|
||||
# onnx-weekly. Otherwise, onnx-weekly could be
|
||||
@ -26,21 +26,18 @@ pip_install \
|
||||
pytest-cov==4.0.0 \
|
||||
pytest-subtests==0.10.0 \
|
||||
tabulate==0.9.0 \
|
||||
transformers==4.36.2
|
||||
transformers==4.31.0
|
||||
|
||||
pip_install coloredlogs packaging
|
||||
retry pip_install -i https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/ORT-Nightly/pypi/simple/ --no-cache-dir --no-input ort-nightly==1.16.0.dev20230908001
|
||||
|
||||
pip_install onnxruntime==1.18
|
||||
pip_install onnx==1.16.0
|
||||
# pip_install "onnxscript@git+https://github.com/microsoft/onnxscript@3e869ef8ccf19b5ebd21c10d3e9c267c9a9fa729" --no-deps
|
||||
pip_install onnxscript==0.1.0.dev20240613 --no-deps
|
||||
# required by onnxscript
|
||||
pip_install ml_dtypes
|
||||
pip_install onnx==1.14.1
|
||||
pip_install onnxscript-preview==0.1.0.dev20230828 --no-deps
|
||||
|
||||
# Cache the transformers model to be used later by ONNX tests. We need to run the transformers
|
||||
# package to download the model. By default, the model is cached at ~/.cache/huggingface/hub/
|
||||
IMPORT_SCRIPT_FILENAME="/tmp/onnx_import_script.py"
|
||||
as_jenkins echo 'import transformers; transformers.AutoModel.from_pretrained("sshleifer/tiny-gpt2"); transformers.AutoTokenizer.from_pretrained("sshleifer/tiny-gpt2"); transformers.AutoModelForSpeechSeq2Seq.from_pretrained("openai/whisper-large-v3");' > "${IMPORT_SCRIPT_FILENAME}"
|
||||
as_jenkins echo 'import transformers; transformers.AutoModel.from_pretrained("sshleifer/tiny-gpt2"); transformers.AutoTokenizer.from_pretrained("sshleifer/tiny-gpt2");' > "${IMPORT_SCRIPT_FILENAME}"
|
||||
|
||||
# Need a PyTorch version for transformers to work
|
||||
pip_install --pre torch --index-url https://download.pytorch.org/whl/nightly/cpu
|
||||
|
||||
@ -9,8 +9,7 @@ tar xf "${OPENSSL}.tar.gz"
|
||||
cd "${OPENSSL}"
|
||||
./config --prefix=/opt/openssl -d '-Wl,--enable-new-dtags,-rpath,$(LIBRPATH)'
|
||||
# NOTE: openssl install errors out when built with the -j option
|
||||
NPROC=$[$(nproc) - 2]
|
||||
make -j${NPROC}; make install_sw
|
||||
make -j6; make install_sw
|
||||
# Link the ssl libraries to the /usr/lib folder.
|
||||
sudo ln -s /opt/openssl/lib/lib* /usr/lib
|
||||
cd ..
|
||||
|
||||
@ -2,18 +2,55 @@
|
||||
|
||||
set -ex
|
||||
|
||||
pb_dir="/usr/temp_pb_install_dir"
|
||||
mkdir -p $pb_dir
|
||||
# This function installs protobuf 3.17
|
||||
install_protobuf_317() {
|
||||
pb_dir="/usr/temp_pb_install_dir"
|
||||
mkdir -p $pb_dir
|
||||
|
||||
# On the nvidia/cuda:9-cudnn7-devel-centos7 image we need this symlink or
|
||||
# else it will fail with
|
||||
# g++: error: ./../lib64/crti.o: No such file or directory
|
||||
ln -s /usr/lib64 "$pb_dir/lib64"
|
||||
# On the nvidia/cuda:9-cudnn7-devel-centos7 image we need this symlink or
|
||||
# else it will fail with
|
||||
# g++: error: ./../lib64/crti.o: No such file or directory
|
||||
ln -s /usr/lib64 "$pb_dir/lib64"
|
||||
|
||||
curl -LO "https://github.com/protocolbuffers/protobuf/releases/download/v3.17.3/protobuf-all-3.17.3.tar.gz" --retry 3
|
||||
curl -LO "https://github.com/protocolbuffers/protobuf/releases/download/v3.17.3/protobuf-all-3.17.3.tar.gz" --retry 3
|
||||
tar -xvz -C "$pb_dir" --strip-components 1 -f protobuf-all-3.17.3.tar.gz
|
||||
# -j6 to balance memory usage and speed.
|
||||
# naked `-j` seems to use too much memory.
|
||||
pushd "$pb_dir" && ./configure && make -j6 && make -j6 check && sudo make -j6 install && sudo ldconfig
|
||||
popd
|
||||
rm -rf $pb_dir
|
||||
}
|
||||
|
||||
tar -xvz --no-same-owner -C "$pb_dir" --strip-components 1 -f protobuf-all-3.17.3.tar.gz
|
||||
NPROC=$[$(nproc) - 2]
|
||||
pushd "$pb_dir" && ./configure && make -j${NPROC} && make -j${NPROC} check && sudo make -j${NRPOC} install && sudo ldconfig
|
||||
popd
|
||||
rm -rf $pb_dir
|
||||
install_ubuntu() {
|
||||
# Ubuntu 14.04 has cmake 2.8.12 as the default option, so we will
|
||||
# install cmake3 here and use cmake3.
|
||||
apt-get update
|
||||
if [[ "$UBUNTU_VERSION" == 14.04 ]]; then
|
||||
apt-get install -y --no-install-recommends cmake3
|
||||
fi
|
||||
|
||||
# Cleanup
|
||||
apt-get autoclean && apt-get clean
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
install_protobuf_317
|
||||
}
|
||||
|
||||
install_centos() {
|
||||
install_protobuf_317
|
||||
}
|
||||
|
||||
# Install base packages depending on the base OS
|
||||
ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"')
|
||||
case "$ID" in
|
||||
ubuntu)
|
||||
install_ubuntu
|
||||
;;
|
||||
centos)
|
||||
install_centos
|
||||
;;
|
||||
*)
|
||||
echo "Unable to determine OS..."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
@ -6,6 +6,9 @@ ver() {
|
||||
printf "%3d%03d%03d%03d" $(echo "$1" | tr '.' ' ');
|
||||
}
|
||||
|
||||
# Map ROCm version to AMDGPU version
|
||||
declare -A AMDGPU_VERSIONS=( ["5.0"]="21.50" ["5.1.1"]="22.10.1" ["5.2"]="22.20" )
|
||||
|
||||
install_ubuntu() {
|
||||
apt-get update
|
||||
if [[ $UBUNTU_VERSION == 18.04 ]]; then
|
||||
@ -23,14 +26,31 @@ install_ubuntu() {
|
||||
apt-get install -y libc++1
|
||||
apt-get install -y libc++abi1
|
||||
|
||||
# Add amdgpu repository
|
||||
UBUNTU_VERSION_NAME=`cat /etc/os-release | grep UBUNTU_CODENAME | awk -F= '{print $2}'`
|
||||
echo "deb [arch=amd64] https://repo.radeon.com/amdgpu/${ROCM_VERSION}/ubuntu ${UBUNTU_VERSION_NAME} main" > /etc/apt/sources.list.d/amdgpu.list
|
||||
if [[ $(ver $ROCM_VERSION) -ge $(ver 4.5) ]]; then
|
||||
# Add amdgpu repository
|
||||
UBUNTU_VERSION_NAME=`cat /etc/os-release | grep UBUNTU_CODENAME | awk -F= '{print $2}'`
|
||||
local amdgpu_baseurl
|
||||
if [[ $(ver $ROCM_VERSION) -ge $(ver 5.3) ]]; then
|
||||
amdgpu_baseurl="https://repo.radeon.com/amdgpu/${ROCM_VERSION}/ubuntu"
|
||||
else
|
||||
amdgpu_baseurl="https://repo.radeon.com/amdgpu/${AMDGPU_VERSIONS[$ROCM_VERSION]}/ubuntu"
|
||||
fi
|
||||
echo "deb [arch=amd64] ${amdgpu_baseurl} ${UBUNTU_VERSION_NAME} main" > /etc/apt/sources.list.d/amdgpu.list
|
||||
fi
|
||||
|
||||
ROCM_REPO="ubuntu"
|
||||
if [[ $(ver $ROCM_VERSION) -lt $(ver 4.2) ]]; then
|
||||
ROCM_REPO="xenial"
|
||||
fi
|
||||
|
||||
if [[ $(ver $ROCM_VERSION) -ge $(ver 5.3) ]]; then
|
||||
ROCM_REPO="${UBUNTU_VERSION_NAME}"
|
||||
fi
|
||||
|
||||
# Add rocm repository
|
||||
wget -qO - http://repo.radeon.com/rocm/rocm.gpg.key | apt-key add -
|
||||
local rocm_baseurl="http://repo.radeon.com/rocm/apt/${ROCM_VERSION}"
|
||||
echo "deb [arch=amd64] ${rocm_baseurl} ${UBUNTU_VERSION_NAME} main" > /etc/apt/sources.list.d/rocm.list
|
||||
echo "deb [arch=amd64] ${rocm_baseurl} ${ROCM_REPO} main" > /etc/apt/sources.list.d/rocm.list
|
||||
apt-get update --allow-insecure-repositories
|
||||
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated \
|
||||
@ -39,29 +59,27 @@ install_ubuntu() {
|
||||
rocm-libs \
|
||||
rccl \
|
||||
rocprofiler-dev \
|
||||
roctracer-dev \
|
||||
amd-smi-lib
|
||||
|
||||
if [[ $(ver $ROCM_VERSION) -ge $(ver 6.1) ]]; then
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated rocm-llvm-dev
|
||||
fi
|
||||
roctracer-dev
|
||||
|
||||
# precompiled miopen kernels added in ROCm 3.5, renamed in ROCm 5.5
|
||||
# search for all unversioned packages
|
||||
# if search fails it will abort this script; use true to avoid case where search fails
|
||||
MIOPENHIPGFX=$(apt-cache search --names-only miopen-hip-gfx | awk '{print $1}' | grep -F -v . || true)
|
||||
if [[ "x${MIOPENHIPGFX}" = x ]]; then
|
||||
echo "miopen-hip-gfx package not available" && exit 1
|
||||
if [[ $(ver $ROCM_VERSION) -ge $(ver 5.5) ]]; then
|
||||
MIOPENHIPGFX=$(apt-cache search --names-only miopen-hip-gfx | awk '{print $1}' | grep -F -v . || true)
|
||||
if [[ "x${MIOPENHIPGFX}" = x ]]; then
|
||||
echo "miopen-hip-gfx package not available" && exit 1
|
||||
else
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated ${MIOPENHIPGFX}
|
||||
fi
|
||||
else
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated ${MIOPENHIPGFX}
|
||||
MIOPENKERNELS=$(apt-cache search --names-only miopenkernels | awk '{print $1}' | grep -F -v . || true)
|
||||
if [[ "x${MIOPENKERNELS}" = x ]]; then
|
||||
echo "miopenkernels package not available" && exit 1
|
||||
else
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated ${MIOPENKERNELS}
|
||||
fi
|
||||
fi
|
||||
|
||||
# ROCm 6.0 had a regression where journal_mode was enabled on the kdb files resulting in permission errors at runtime
|
||||
for kdb in /opt/rocm/share/miopen/db/*.kdb
|
||||
do
|
||||
sqlite3 $kdb "PRAGMA journal_mode=off; PRAGMA VACUUM;"
|
||||
done
|
||||
|
||||
# Cleanup
|
||||
apt-get autoclean && apt-get clean
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
@ -77,19 +95,25 @@ install_centos() {
|
||||
yum install -y epel-release
|
||||
yum install -y dkms kernel-headers-`uname -r` kernel-devel-`uname -r`
|
||||
|
||||
# Add amdgpu repository
|
||||
local amdgpu_baseurl
|
||||
if [[ $OS_VERSION == 9 ]]; then
|
||||
amdgpu_baseurl="https://repo.radeon.com/amdgpu/${ROCM_VERSION}/rhel/9.0/main/x86_64"
|
||||
else
|
||||
amdgpu_baseurl="https://repo.radeon.com/amdgpu/${ROCM_VERSION}/rhel/7.9/main/x86_64"
|
||||
if [[ $(ver $ROCM_VERSION) -ge $(ver 4.5) ]]; then
|
||||
# Add amdgpu repository
|
||||
local amdgpu_baseurl
|
||||
if [[ $OS_VERSION == 9 ]]; then
|
||||
amdgpu_baseurl="https://repo.radeon.com/amdgpu/${AMDGPU_VERSIONS[$ROCM_VERSION]}/rhel/9.0/main/x86_64"
|
||||
else
|
||||
if [[ $(ver $ROCM_VERSION) -ge $(ver 5.3) ]]; then
|
||||
amdgpu_baseurl="https://repo.radeon.com/amdgpu/${ROCM_VERSION}/rhel/7.9/main/x86_64"
|
||||
else
|
||||
amdgpu_baseurl="https://repo.radeon.com/amdgpu/${AMDGPU_VERSIONS[$ROCM_VERSION]}/rhel/7.9/main/x86_64"
|
||||
fi
|
||||
fi
|
||||
echo "[AMDGPU]" > /etc/yum.repos.d/amdgpu.repo
|
||||
echo "name=AMDGPU" >> /etc/yum.repos.d/amdgpu.repo
|
||||
echo "baseurl=${amdgpu_baseurl}" >> /etc/yum.repos.d/amdgpu.repo
|
||||
echo "enabled=1" >> /etc/yum.repos.d/amdgpu.repo
|
||||
echo "gpgcheck=1" >> /etc/yum.repos.d/amdgpu.repo
|
||||
echo "gpgkey=http://repo.radeon.com/rocm/rocm.gpg.key" >> /etc/yum.repos.d/amdgpu.repo
|
||||
fi
|
||||
echo "[AMDGPU]" > /etc/yum.repos.d/amdgpu.repo
|
||||
echo "name=AMDGPU" >> /etc/yum.repos.d/amdgpu.repo
|
||||
echo "baseurl=${amdgpu_baseurl}" >> /etc/yum.repos.d/amdgpu.repo
|
||||
echo "enabled=1" >> /etc/yum.repos.d/amdgpu.repo
|
||||
echo "gpgcheck=1" >> /etc/yum.repos.d/amdgpu.repo
|
||||
echo "gpgkey=http://repo.radeon.com/rocm/rocm.gpg.key" >> /etc/yum.repos.d/amdgpu.repo
|
||||
|
||||
local rocm_baseurl="http://repo.radeon.com/rocm/yum/${ROCM_VERSION}"
|
||||
echo "[ROCm]" > /etc/yum.repos.d/rocm.repo
|
||||
@ -107,24 +131,26 @@ install_centos() {
|
||||
rocm-libs \
|
||||
rccl \
|
||||
rocprofiler-dev \
|
||||
roctracer-dev \
|
||||
amd-smi-lib
|
||||
roctracer-dev
|
||||
|
||||
# precompiled miopen kernels; search for all unversioned packages
|
||||
# if search fails it will abort this script; use true to avoid case where search fails
|
||||
MIOPENHIPGFX=$(yum -q search miopen-hip-gfx | grep miopen-hip-gfx | awk '{print $1}'| grep -F kdb. || true)
|
||||
if [[ "x${MIOPENHIPGFX}" = x ]]; then
|
||||
echo "miopen-hip-gfx package not available" && exit 1
|
||||
if [[ $(ver $ROCM_VERSION) -ge $(ver 5.5) ]]; then
|
||||
MIOPENHIPGFX=$(yum -q search miopen-hip-gfx | grep miopen-hip-gfx | awk '{print $1}'| grep -F kdb. || true)
|
||||
if [[ "x${MIOPENHIPGFX}" = x ]]; then
|
||||
echo "miopen-hip-gfx package not available" && exit 1
|
||||
else
|
||||
yum install -y ${MIOPENHIPGFX}
|
||||
fi
|
||||
else
|
||||
yum install -y ${MIOPENHIPGFX}
|
||||
MIOPENKERNELS=$(yum -q search miopenkernels | grep miopenkernels- | awk '{print $1}'| grep -F kdb. || true)
|
||||
if [[ "x${MIOPENKERNELS}" = x ]]; then
|
||||
echo "miopenkernels package not available" && exit 1
|
||||
else
|
||||
yum install -y ${MIOPENKERNELS}
|
||||
fi
|
||||
fi
|
||||
|
||||
# ROCm 6.0 had a regression where journal_mode was enabled on the kdb files resulting in permission errors at runtime
|
||||
for kdb in /opt/rocm/share/miopen/db/*.kdb
|
||||
do
|
||||
sqlite3 $kdb "PRAGMA journal_mode=off; PRAGMA VACUUM;"
|
||||
done
|
||||
|
||||
# Cleanup
|
||||
yum clean all
|
||||
rm -rf /var/cache/yum
|
||||
|
||||
@ -5,10 +5,8 @@ set -ex
|
||||
# "install" hipMAGMA into /opt/rocm/magma by copying after build
|
||||
git clone https://bitbucket.org/icl/magma.git
|
||||
pushd magma
|
||||
|
||||
# Version 2.7.2 + ROCm related updates
|
||||
git checkout a1625ff4d9bc362906bd01f805dbbe12612953f6
|
||||
|
||||
# Fixes memory leaks of magma found while executing linalg UTs
|
||||
git checkout 28592a7170e4b3707ed92644bf4a689ed600c27f
|
||||
cp make.inc-examples/make.inc.hip-gcc-mkl make.inc
|
||||
echo 'LIBDIR += -L$(MKLROOT)/lib' >> make.inc
|
||||
echo 'LIB += -Wl,--enable-new-dtags -Wl,--rpath,/opt/rocm/lib -Wl,--rpath,$(MKLROOT)/lib -Wl,--rpath,/opt/rocm/magma/lib' >> make.inc
|
||||
|
||||
14
.ci/docker/common/install_thrift.sh
Executable file
14
.ci/docker/common/install_thrift.sh
Executable file
@ -0,0 +1,14 @@
|
||||
apt-get update
|
||||
apt-get install -y sudo wget libboost-dev libboost-test-dev libboost-program-options-dev libboost-filesystem-dev libboost-thread-dev libevent-dev automake libtool flex bison pkg-config g++ libssl-dev
|
||||
wget https://www-us.apache.org/dist/thrift/0.12.0/thrift-0.12.0.tar.gz
|
||||
tar -xvf thrift-0.12.0.tar.gz
|
||||
cd thrift-0.12.0
|
||||
for file in ./compiler/cpp/Makefile*; do
|
||||
sed -i 's/\-Werror//' $file
|
||||
done
|
||||
./bootstrap.sh
|
||||
./configure --without-php --without-java --without-python --without-nodejs --without-go --without-ruby
|
||||
sudo make
|
||||
sudo make install
|
||||
cd ..
|
||||
rm thrift-0.12.0.tar.gz
|
||||
@ -13,11 +13,8 @@ conda_reinstall() {
|
||||
}
|
||||
|
||||
if [ -n "${ROCM_VERSION}" ]; then
|
||||
TRITON_REPO="https://github.com/openai/triton"
|
||||
TRITON_REPO="https://github.com/ROCmSoftwarePlatform/triton"
|
||||
TRITON_TEXT_FILE="triton-rocm"
|
||||
elif [ -n "${XPU_VERSION}" ]; then
|
||||
TRITON_REPO="https://github.com/intel/intel-xpu-backend-for-triton"
|
||||
TRITON_TEXT_FILE="triton-xpu"
|
||||
else
|
||||
TRITON_REPO="https://github.com/openai/triton"
|
||||
TRITON_TEXT_FILE="triton"
|
||||
@ -26,10 +23,8 @@ fi
|
||||
# The logic here is copied from .ci/pytorch/common_utils.sh
|
||||
TRITON_PINNED_COMMIT=$(get_pinned_commit ${TRITON_TEXT_FILE})
|
||||
|
||||
if [ -n "${UBUNTU_VERSION}" ];then
|
||||
apt update
|
||||
apt-get install -y gpg-agent
|
||||
fi
|
||||
apt update
|
||||
apt-get install -y gpg-agent
|
||||
|
||||
if [ -n "${CONDA_CMAKE}" ]; then
|
||||
# Keep the current cmake and numpy version here, so we can reinstall them later
|
||||
@ -41,12 +36,12 @@ if [ -z "${MAX_JOBS}" ]; then
|
||||
export MAX_JOBS=$(nproc)
|
||||
fi
|
||||
|
||||
if [ -n "${UBUNTU_VERSION}" ] && [ -n "${GCC_VERSION}" ] && [[ "${GCC_VERSION}" == "7" ]]; then
|
||||
if [ -n "${GCC_VERSION}" ] && [[ "${GCC_VERSION}" == "7" ]]; then
|
||||
# Triton needs at least gcc-9 to build
|
||||
apt-get install -y g++-9
|
||||
|
||||
CXX=g++-9 pip_install "git+${TRITON_REPO}@${TRITON_PINNED_COMMIT}#subdirectory=python"
|
||||
elif [ -n "${UBUNTU_VERSION}" ] && [ -n "${CLANG_VERSION}" ]; then
|
||||
elif [ -n "${CLANG_VERSION}" ]; then
|
||||
# Triton needs <filesystem> which surprisingly is not available with clang-9 toolchain
|
||||
add-apt-repository -y ppa:ubuntu-toolchain-r/test
|
||||
apt-get install -y g++-9
|
||||
@ -67,6 +62,5 @@ if [ -n "${CONDA_CMAKE}" ]; then
|
||||
# latest numpy version, which fails ASAN tests with the following import error: Numba
|
||||
# needs NumPy 1.20 or less.
|
||||
conda_reinstall cmake="${CMAKE_VERSION}"
|
||||
# Note that we install numpy with pip as conda might not have the version we want
|
||||
pip_install --force-reinstall numpy=="${NUMPY_VERSION}"
|
||||
conda_reinstall numpy="${NUMPY_VERSION}"
|
||||
fi
|
||||
|
||||
@ -36,12 +36,7 @@ function install_ucc() {
|
||||
git submodule update --init --recursive
|
||||
|
||||
./autogen.sh
|
||||
# We only run distributed tests on Tesla M60 and A10G
|
||||
NVCC_GENCODE="-gencode=arch=compute_52,code=sm_52 -gencode=arch=compute_86,code=compute_86"
|
||||
./configure --prefix=$UCC_HOME \
|
||||
--with-ucx=$UCX_HOME \
|
||||
--with-cuda=$with_cuda \
|
||||
--with-nvcc-gencode="${NVCC_GENCODE}"
|
||||
./configure --prefix=$UCC_HOME --with-ucx=$UCX_HOME --with-cuda=$with_cuda
|
||||
time make -j
|
||||
sudo make install
|
||||
|
||||
|
||||
@ -5,7 +5,8 @@ set -ex
|
||||
install_ubuntu() {
|
||||
apt-get update
|
||||
apt-get install -y --no-install-recommends \
|
||||
libopencv-dev
|
||||
libopencv-dev \
|
||||
libavcodec-dev
|
||||
|
||||
# Cleanup
|
||||
apt-get autoclean && apt-get clean
|
||||
@ -18,7 +19,8 @@ install_centos() {
|
||||
yum --enablerepo=extras install -y epel-release
|
||||
|
||||
yum install -y \
|
||||
opencv-devel
|
||||
opencv-devel \
|
||||
ffmpeg-devel
|
||||
|
||||
# Cleanup
|
||||
yum clean all
|
||||
|
||||
@ -1,114 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -xe
|
||||
|
||||
|
||||
# Intel® software for general purpose GPU capabilities.
|
||||
# Refer to https://www.intel.com/content/www/us/en/developer/articles/tool/pytorch-prerequisites-for-intel-gpus.html
|
||||
|
||||
# Users should update to the latest version as it becomes available
|
||||
|
||||
function install_ubuntu() {
|
||||
apt-get update -y
|
||||
apt-get install -y gpg-agent wget
|
||||
|
||||
# Set up the repository. To do this, download the key to the system keyring
|
||||
wget -qO - https://repositories.intel.com/gpu/intel-graphics.key \
|
||||
| gpg --dearmor --output /usr/share/keyrings/intel-graphics.gpg
|
||||
wget -qO - https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB \
|
||||
| gpg --dearmor --output /usr/share/keyrings/intel-for-pytorch-gpu-dev-keyring.gpg
|
||||
|
||||
# Add the signed entry to APT sources and configure the APT client to use the Intel repository
|
||||
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] \
|
||||
https://repositories.intel.com/gpu/ubuntu jammy/lts/2350 unified" \
|
||||
| tee /etc/apt/sources.list.d/intel-gpu-jammy.list
|
||||
echo "deb [signed-by=/usr/share/keyrings/intel-for-pytorch-gpu-dev-keyring.gpg] \
|
||||
https://apt.repos.intel.com/intel-for-pytorch-gpu-dev all main" \
|
||||
| tee /etc/apt/sources.list.d/intel-for-pytorch-gpu-dev.list
|
||||
|
||||
# Update the packages list and repository index
|
||||
apt-get update
|
||||
|
||||
# The xpu-smi packages
|
||||
apt-get install -y flex bison xpu-smi
|
||||
# Compute and Media Runtimes
|
||||
apt-get install -y \
|
||||
intel-opencl-icd intel-level-zero-gpu level-zero \
|
||||
intel-media-va-driver-non-free libmfx1 libmfxgen1 libvpl2 \
|
||||
libegl-mesa0 libegl1-mesa libegl1-mesa-dev libgbm1 libgl1-mesa-dev libgl1-mesa-dri \
|
||||
libglapi-mesa libgles2-mesa-dev libglx-mesa0 libigdgmm12 libxatracker2 mesa-va-drivers \
|
||||
mesa-vdpau-drivers mesa-vulkan-drivers va-driver-all vainfo hwinfo clinfo
|
||||
# Development Packages
|
||||
apt-get install -y libigc-dev intel-igc-cm libigdfcl-dev libigfxcmrt-dev level-zero-dev
|
||||
# Install Intel Support Packages
|
||||
if [ -n "$XPU_VERSION" ]; then
|
||||
apt-get install -y intel-for-pytorch-gpu-dev-${XPU_VERSION}
|
||||
else
|
||||
apt-get install -y intel-for-pytorch-gpu-dev
|
||||
fi
|
||||
|
||||
# Cleanup
|
||||
apt-get autoclean && apt-get clean
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
}
|
||||
|
||||
function install_centos() {
|
||||
dnf install -y 'dnf-command(config-manager)'
|
||||
dnf config-manager --add-repo \
|
||||
https://repositories.intel.com/gpu/rhel/8.6/production/2328/unified/intel-gpu-8.6.repo
|
||||
# To add the EPEL repository needed for DKMS
|
||||
dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm
|
||||
# https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm
|
||||
|
||||
# Create the YUM repository file in the /temp directory as a normal user
|
||||
tee > /tmp/oneAPI.repo << EOF
|
||||
[oneAPI]
|
||||
name=Intel® oneAPI repository
|
||||
baseurl=https://yum.repos.intel.com/oneapi
|
||||
enabled=1
|
||||
gpgcheck=1
|
||||
repo_gpgcheck=1
|
||||
gpgkey=https://yum.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB
|
||||
EOF
|
||||
|
||||
# Move the newly created oneAPI.repo file to the YUM configuration directory /etc/yum.repos.d
|
||||
mv /tmp/oneAPI.repo /etc/yum.repos.d
|
||||
|
||||
# The xpu-smi packages
|
||||
dnf install -y flex bison xpu-smi
|
||||
# Compute and Media Runtimes
|
||||
dnf install -y \
|
||||
intel-opencl intel-media intel-mediasdk libmfxgen1 libvpl2\
|
||||
level-zero intel-level-zero-gpu mesa-dri-drivers mesa-vulkan-drivers \
|
||||
mesa-vdpau-drivers libdrm mesa-libEGL mesa-libgbm mesa-libGL \
|
||||
mesa-libxatracker libvpl-tools intel-metrics-discovery \
|
||||
intel-metrics-library intel-igc-core intel-igc-cm \
|
||||
libva libva-utils intel-gmmlib libmetee intel-gsc intel-ocloc hwinfo clinfo
|
||||
# Development packages
|
||||
dnf install -y --refresh \
|
||||
intel-igc-opencl-devel level-zero-devel intel-gsc-devel libmetee-devel \
|
||||
level-zero-devel
|
||||
# Install Intel® oneAPI Base Toolkit
|
||||
dnf install intel-basekit -y
|
||||
|
||||
# Cleanup
|
||||
dnf clean all
|
||||
rm -rf /var/cache/yum
|
||||
rm -rf /var/lib/yum/yumdb
|
||||
rm -rf /var/lib/yum/history
|
||||
}
|
||||
|
||||
|
||||
# The installation depends on the base OS
|
||||
ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"')
|
||||
case "$ID" in
|
||||
ubuntu)
|
||||
install_ubuntu
|
||||
;;
|
||||
centos)
|
||||
install_centos
|
||||
;;
|
||||
*)
|
||||
echo "Unable to determine OS..."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
@ -1,44 +0,0 @@
|
||||
ARG UBUNTU_VERSION
|
||||
|
||||
FROM ubuntu:${UBUNTU_VERSION}
|
||||
|
||||
ARG UBUNTU_VERSION
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
# Install common dependencies (so that this step can be cached separately)
|
||||
COPY ./common/install_base.sh install_base.sh
|
||||
RUN bash ./install_base.sh && rm install_base.sh
|
||||
|
||||
# Install missing libomp-dev
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends libomp-dev && apt-get autoclean && apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
# Install user
|
||||
COPY ./common/install_user.sh install_user.sh
|
||||
RUN bash ./install_user.sh && rm install_user.sh
|
||||
|
||||
# Install conda and other packages (e.g., numpy, pytest)
|
||||
ARG ANACONDA_PYTHON_VERSION
|
||||
ARG CONDA_CMAKE
|
||||
ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION
|
||||
ENV PATH /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/bin:/opt/conda/bin:$PATH
|
||||
COPY requirements-ci.txt /opt/conda/requirements-ci.txt
|
||||
COPY ./common/install_conda.sh install_conda.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
RUN bash ./install_conda.sh && rm install_conda.sh common_utils.sh /opt/conda/requirements-ci.txt
|
||||
|
||||
# Install cuda and cudnn
|
||||
ARG CUDA_VERSION
|
||||
RUN wget -q https://raw.githubusercontent.com/pytorch/builder/main/common/install_cuda.sh -O install_cuda.sh
|
||||
RUN bash ./install_cuda.sh ${CUDA_VERSION} && rm install_cuda.sh
|
||||
ENV DESIRED_CUDA ${CUDA_VERSION}
|
||||
ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:$PATH
|
||||
|
||||
# Note that Docker build forbids copying file outside the build context
|
||||
COPY ./common/install_linter.sh install_linter.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
RUN bash ./install_linter.sh
|
||||
RUN rm install_linter.sh common_utils.sh
|
||||
|
||||
USER jenkins
|
||||
CMD ["bash"]
|
||||
@ -15,7 +15,7 @@ click
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
coremltools==5.0b5 ; python_version < "3.12"
|
||||
coremltools==5.0b5
|
||||
#Description: Apple framework for ML integration
|
||||
#Pinned versions: 5.0b5
|
||||
#test that import:
|
||||
@ -25,11 +25,6 @@ coremltools==5.0b5 ; python_version < "3.12"
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
dill==0.3.7
|
||||
#Description: dill extends pickle with serializing and de-serializing for most built-ins
|
||||
#Pinned versions: 0.3.7
|
||||
#test that import: dynamo/test_replay_record.py test_dataloader.py test_datapipe.py test_serialization.py
|
||||
|
||||
expecttest==0.1.6
|
||||
#Description: method for writing tests where test framework auto populates
|
||||
# the expected output based on previous runs
|
||||
@ -52,11 +47,6 @@ junitparser==2.1.1
|
||||
#Pinned versions: 2.1.1
|
||||
#test that import:
|
||||
|
||||
lark==0.12.0
|
||||
#Description: parser
|
||||
#Pinned versions: 0.12.0
|
||||
#test that import:
|
||||
|
||||
librosa>=0.6.2 ; python_version < "3.11"
|
||||
#Description: A python package for music and audio analysis
|
||||
#Pinned versions: >=0.6.2
|
||||
@ -76,7 +66,7 @@ librosa>=0.6.2 ; python_version < "3.11"
|
||||
#Description: A testing library that allows you to replace parts of your
|
||||
#system under test with mock objects
|
||||
#Pinned versions:
|
||||
#test that import: test_modules.py, test_nn.py,
|
||||
#test that import: test_module_init.py, test_modules.py, test_nn.py,
|
||||
#test_testing.py
|
||||
|
||||
#MonkeyType # breaks pytorch-xla-linux-bionic-py3.7-clang8
|
||||
@ -85,10 +75,10 @@ librosa>=0.6.2 ; python_version < "3.11"
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
mypy==1.10.0
|
||||
mypy==1.4.1
|
||||
# Pin MyPy version because new errors are likely to appear with each release
|
||||
#Description: linter
|
||||
#Pinned versions: 1.10.0
|
||||
#Pinned versions: 1.4.1
|
||||
#test that import: test_typing.py, test_type_hints.py
|
||||
|
||||
networkx==2.8.8
|
||||
@ -134,22 +124,10 @@ opt-einsum==3.3
|
||||
#Pinned versions: 3.3
|
||||
#test that import: test_linalg.py
|
||||
|
||||
optree==0.11.0
|
||||
#Description: A library for tree manipulation
|
||||
#Pinned versions: 0.11.0
|
||||
#test that import: test_vmap.py, test_aotdispatch.py, test_dynamic_shapes.py,
|
||||
#test_pytree.py, test_ops.py, test_control_flow.py, test_modules.py,
|
||||
#common_utils.py, test_eager_transforms.py, test_python_dispatch.py,
|
||||
#test_expanded_weights.py, test_decomp.py, test_overrides.py, test_masked.py,
|
||||
#test_ops.py, test_prims.py, test_subclass.py, test_functionalization.py,
|
||||
#test_schema_check.py, test_profiler_tree.py, test_meta.py, test_torchxla_num_output.py,
|
||||
#test_utils.py, test_proxy_tensor.py, test_memory_profiler.py, test_view_ops.py,
|
||||
#test_pointwise_ops.py, test_dtensor_ops.py, test_torchinductor.py, test_fx.py,
|
||||
#test_fake_tensor.py, test_mps.py
|
||||
|
||||
pillow==10.3.0
|
||||
pillow==9.3.0 ; python_version <= "3.8"
|
||||
pillow==9.5.0 ; python_version > "3.8"
|
||||
#Description: Python Imaging Library fork
|
||||
#Pinned versions: 10.3.0
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
protobuf==3.20.2
|
||||
@ -172,6 +150,11 @@ pytest-xdist==3.3.1
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
pytest-shard==0.1.2
|
||||
#Description: plugin spliting up tests in pytest
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
pytest-flakefinder==1.1.0
|
||||
#Description: plugin for rerunning tests a fixed number of times in pytest
|
||||
#Pinned versions: 1.1.0
|
||||
@ -228,11 +211,12 @@ scikit-image==0.20.0 ; python_version >= "3.10"
|
||||
#Pinned versions: 0.20.3
|
||||
#test that import:
|
||||
|
||||
scipy==1.10.1 ; python_version <= "3.11"
|
||||
scipy==1.12.0 ; python_version == "3.12"
|
||||
scipy==1.6.3 ; python_version < "3.10"
|
||||
scipy==1.8.1 ; python_version == "3.10"
|
||||
scipy==1.10.1 ; python_version == "3.11"
|
||||
# Pin SciPy because of failing distribution tests (see #60347)
|
||||
#Description: scientific python
|
||||
#Pinned versions: 1.10.1
|
||||
#Pinned versions: 1.6.3
|
||||
#test that import: test_unary_ufuncs.py, test_torch.py,test_tensor_creation_ops.py
|
||||
#test_spectral_ops.py, test_sparse_csr.py, test_reductions.py,test_nn.py
|
||||
#test_linalg.py, test_binary_ufuncs.py
|
||||
@ -247,8 +231,7 @@ tb-nightly==2.13.0a20230426
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
# needed by torchgen utils
|
||||
typing-extensions
|
||||
#typing-extensions
|
||||
#Description: type hints for python
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
@ -263,10 +246,9 @@ unittest-xml-reporting<=3.2.0,>=2.0.0
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
#lintrunner is supported on aarch64-linux only from 0.12.4 version
|
||||
lintrunner==0.12.5
|
||||
lintrunner==0.10.7
|
||||
#Description: all about linters!
|
||||
#Pinned versions: 0.12.5
|
||||
#Pinned versions: 0.10.7
|
||||
#test that import:
|
||||
|
||||
rockset==1.0.3
|
||||
@ -274,14 +256,14 @@ rockset==1.0.3
|
||||
#Pinned versions: 1.0.3
|
||||
#test that import:
|
||||
|
||||
ghstack==0.8.0
|
||||
ghstack==0.7.1
|
||||
#Description: ghstack tool
|
||||
#Pinned versions: 0.8.0
|
||||
#Pinned versions: 0.7.1
|
||||
#test that import:
|
||||
|
||||
jinja2==3.1.4
|
||||
jinja2==3.1.2
|
||||
#Description: jinja2 template engine
|
||||
#Pinned versions: 3.1.4
|
||||
#Pinned versions: 3.1.2
|
||||
#test that import:
|
||||
|
||||
pytest-cpp==2.3.0
|
||||
@ -298,17 +280,3 @@ tensorboard==2.13.0
|
||||
#Description: Also included in .ci/docker/requirements-docs.txt
|
||||
#Pinned versions:
|
||||
#test that import: test_tensorboard
|
||||
|
||||
pywavelets==1.4.1 ; python_version < "3.12"
|
||||
pywavelets==1.5.0 ; python_version >= "3.12"
|
||||
#Description: This is a requirement of scikit-image, we need to pin
|
||||
# it here because 1.5.0 conflicts with numpy 1.21.2 used in CI
|
||||
#Pinned versions: 1.4.1
|
||||
#test that import:
|
||||
|
||||
lxml==5.0.0
|
||||
#Description: This is a requirement of unittest-xml-reporting
|
||||
|
||||
# Python-3.9 binaries
|
||||
|
||||
PyGithub==2.3.0
|
||||
|
||||
@ -1 +1 @@
|
||||
3.0.0
|
||||
2.1.0
|
||||
|
||||
@ -56,7 +56,7 @@ RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi
|
||||
RUN rm install_db.sh
|
||||
ENV INSTALLED_DB ${DB}
|
||||
|
||||
# (optional) Install vision packages like OpenCV
|
||||
# (optional) Install vision packages like OpenCV and ffmpeg
|
||||
ARG VISION
|
||||
COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./
|
||||
RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi
|
||||
@ -79,6 +79,12 @@ ENV OPENSSL_ROOT_DIR /opt/openssl
|
||||
RUN bash ./install_openssl.sh
|
||||
ENV OPENSSL_DIR /opt/openssl
|
||||
|
||||
# (optional) Install non-default CMake version
|
||||
ARG CMAKE_VERSION
|
||||
COPY ./common/install_cmake.sh install_cmake.sh
|
||||
RUN if [ -n "${CMAKE_VERSION}" ]; then bash ./install_cmake.sh; fi
|
||||
RUN rm install_cmake.sh
|
||||
|
||||
ARG INDUCTOR_BENCHMARKS
|
||||
COPY ./common/install_inductor_benchmark_deps.sh install_inductor_benchmark_deps.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
@ -87,12 +93,6 @@ COPY ci_commit_pins/timm.txt timm.txt
|
||||
RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi
|
||||
RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface.txt
|
||||
|
||||
# (optional) Install non-default CMake version
|
||||
ARG CMAKE_VERSION
|
||||
COPY ./common/install_cmake.sh install_cmake.sh
|
||||
RUN if [ -n "${CMAKE_VERSION}" ]; then bash ./install_cmake.sh; fi
|
||||
RUN rm install_cmake.sh
|
||||
|
||||
ARG TRITON
|
||||
# Install triton, this needs to be done before sccache because the latter will
|
||||
# try to reach out to S3, which docker build runners don't have access
|
||||
@ -103,14 +103,6 @@ COPY triton_version.txt triton_version.txt
|
||||
RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi
|
||||
RUN rm install_triton.sh common_utils.sh triton.txt triton_version.txt
|
||||
|
||||
ARG HALIDE
|
||||
# Build and install halide
|
||||
COPY ./common/install_halide.sh install_halide.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ci_commit_pins/halide.txt halide.txt
|
||||
RUN if [ -n "${HALIDE}" ]; then bash ./install_halide.sh; fi
|
||||
RUN rm install_halide.sh common_utils.sh halide.txt
|
||||
|
||||
# Install ccache/sccache (do this last, so we get priority in PATH)
|
||||
COPY ./common/install_cache.sh install_cache.sh
|
||||
ENV PATH /opt/cache/bin:$PATH
|
||||
@ -147,20 +139,13 @@ COPY --from=pytorch/llvm:9.0.1 /opt/llvm /opt/llvm
|
||||
ARG CUDNN_VERSION
|
||||
ARG CUDA_VERSION
|
||||
COPY ./common/install_cudnn.sh install_cudnn.sh
|
||||
RUN if [ -n "${CUDNN_VERSION}" ]; then bash install_cudnn.sh; fi
|
||||
RUN if [ "${CUDNN_VERSION}" -eq 8 ]; then bash install_cudnn.sh; fi
|
||||
RUN rm install_cudnn.sh
|
||||
|
||||
# Install CUSPARSELT
|
||||
ARG CUDA_VERSION
|
||||
COPY ./common/install_cusparselt.sh install_cusparselt.sh
|
||||
RUN bash install_cusparselt.sh
|
||||
RUN rm install_cusparselt.sh
|
||||
|
||||
# Delete /usr/local/cuda-11.X/cuda-11.X symlinks
|
||||
RUN if [ -h /usr/local/cuda-11.6/cuda-11.6 ]; then rm /usr/local/cuda-11.6/cuda-11.6; fi
|
||||
RUN if [ -h /usr/local/cuda-11.7/cuda-11.7 ]; then rm /usr/local/cuda-11.7/cuda-11.7; fi
|
||||
RUN if [ -h /usr/local/cuda-12.1/cuda-12.1 ]; then rm /usr/local/cuda-12.1/cuda-12.1; fi
|
||||
RUN if [ -h /usr/local/cuda-12.4/cuda-12.4 ]; then rm /usr/local/cuda-12.4/cuda-12.4; fi
|
||||
|
||||
USER jenkins
|
||||
CMD ["bash"]
|
||||
|
||||
@ -53,7 +53,7 @@ RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi
|
||||
RUN rm install_db.sh
|
||||
ENV INSTALLED_DB ${DB}
|
||||
|
||||
# (optional) Install vision packages like OpenCV
|
||||
# (optional) Install vision packages like OpenCV and ffmpeg
|
||||
ARG VISION
|
||||
COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./
|
||||
RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi
|
||||
@ -78,11 +78,6 @@ ENV MAGMA_HOME /opt/rocm/magma
|
||||
ENV LANG C.UTF-8
|
||||
ENV LC_ALL C.UTF-8
|
||||
|
||||
# Install amdsmi
|
||||
COPY ./common/install_amdsmi.sh install_amdsmi.sh
|
||||
RUN bash ./install_amdsmi.sh
|
||||
RUN rm install_amdsmi.sh
|
||||
|
||||
# (optional) Install non-default CMake version
|
||||
ARG CMAKE_VERSION
|
||||
COPY ./common/install_cmake.sh install_cmake.sh
|
||||
@ -105,13 +100,6 @@ COPY triton_version.txt triton_version.txt
|
||||
RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi
|
||||
RUN rm install_triton.sh common_utils.sh triton-rocm.txt triton_version.txt
|
||||
|
||||
# Install AOTriton
|
||||
COPY ./aotriton_version.txt aotriton_version.txt
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ./common/install_aotriton.sh install_aotriton.sh
|
||||
RUN ["/bin/bash", "-c", "./install_aotriton.sh /opt/rocm && rm -rf install_aotriton.sh aotriton_version.txt common_utils.sh"]
|
||||
ENV AOTRITON_INSTALLED_PREFIX /opt/rocm/aotriton
|
||||
|
||||
# Install ccache/sccache (do this last, so we get priority in PATH)
|
||||
COPY ./common/install_cache.sh install_cache.sh
|
||||
ENV PATH /opt/cache/bin:$PATH
|
||||
|
||||
@ -1,118 +0,0 @@
|
||||
ARG UBUNTU_VERSION
|
||||
|
||||
FROM ubuntu:${UBUNTU_VERSION}
|
||||
|
||||
ARG UBUNTU_VERSION
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
ARG CLANG_VERSION
|
||||
|
||||
# Install common dependencies (so that this step can be cached separately)
|
||||
COPY ./common/install_base.sh install_base.sh
|
||||
RUN bash ./install_base.sh && rm install_base.sh
|
||||
|
||||
# Install clang
|
||||
ARG LLVMDEV
|
||||
COPY ./common/install_clang.sh install_clang.sh
|
||||
RUN bash ./install_clang.sh && rm install_clang.sh
|
||||
|
||||
# Install user
|
||||
COPY ./common/install_user.sh install_user.sh
|
||||
RUN bash ./install_user.sh && rm install_user.sh
|
||||
|
||||
# Install katex
|
||||
ARG KATEX
|
||||
COPY ./common/install_docs_reqs.sh install_docs_reqs.sh
|
||||
RUN bash ./install_docs_reqs.sh && rm install_docs_reqs.sh
|
||||
|
||||
# Install conda and other packages (e.g., numpy, pytest)
|
||||
ARG ANACONDA_PYTHON_VERSION
|
||||
ARG CONDA_CMAKE
|
||||
ARG DOCS
|
||||
ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION
|
||||
ENV PATH /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/bin:/opt/conda/bin:$PATH
|
||||
ENV DOCS=$DOCS
|
||||
COPY requirements-ci.txt requirements-docs.txt /opt/conda/
|
||||
COPY ./common/install_conda.sh install_conda.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
RUN bash ./install_conda.sh && rm install_conda.sh common_utils.sh /opt/conda/requirements-ci.txt /opt/conda/requirements-docs.txt
|
||||
|
||||
# Install gcc
|
||||
ARG GCC_VERSION
|
||||
COPY ./common/install_gcc.sh install_gcc.sh
|
||||
RUN bash ./install_gcc.sh && rm install_gcc.sh
|
||||
|
||||
# Install lcov for C++ code coverage
|
||||
COPY ./common/install_lcov.sh install_lcov.sh
|
||||
RUN bash ./install_lcov.sh && rm install_lcov.sh
|
||||
|
||||
COPY ./common/install_openssl.sh install_openssl.sh
|
||||
RUN bash ./install_openssl.sh
|
||||
ENV OPENSSL_ROOT_DIR /opt/openssl
|
||||
ENV OPENSSL_DIR /opt/openssl
|
||||
RUN rm install_openssl.sh
|
||||
|
||||
ARG INDUCTOR_BENCHMARKS
|
||||
COPY ./common/install_inductor_benchmark_deps.sh install_inductor_benchmark_deps.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ci_commit_pins/huggingface.txt huggingface.txt
|
||||
COPY ci_commit_pins/timm.txt timm.txt
|
||||
RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi
|
||||
RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface.txt
|
||||
|
||||
# Install XPU Dependencies
|
||||
ARG XPU_VERSION
|
||||
COPY ./common/install_xpu.sh install_xpu.sh
|
||||
RUN bash ./install_xpu.sh && rm install_xpu.sh
|
||||
|
||||
ARG TRITON
|
||||
# Install triton, this needs to be done before sccache because the latter will
|
||||
# try to reach out to S3, which docker build runners don't have access
|
||||
COPY ./common/install_triton.sh install_triton.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ci_commit_pins/triton-xpu.txt triton-xpu.txt
|
||||
COPY triton_version.txt triton_version.txt
|
||||
RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi
|
||||
RUN rm install_triton.sh common_utils.sh triton-xpu.txt triton_version.txt
|
||||
|
||||
# (optional) Install database packages like LMDB and LevelDB
|
||||
ARG DB
|
||||
COPY ./common/install_db.sh install_db.sh
|
||||
RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi
|
||||
RUN rm install_db.sh
|
||||
ENV INSTALLED_DB ${DB}
|
||||
|
||||
# (optional) Install vision packages like OpenCV
|
||||
ARG VISION
|
||||
COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./
|
||||
RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi
|
||||
RUN rm install_vision.sh cache_vision_models.sh common_utils.sh
|
||||
ENV INSTALLED_VISION ${VISION}
|
||||
|
||||
# (optional) Install non-default CMake version
|
||||
ARG CMAKE_VERSION
|
||||
COPY ./common/install_cmake.sh install_cmake.sh
|
||||
RUN if [ -n "${CMAKE_VERSION}" ]; then bash ./install_cmake.sh; fi
|
||||
RUN rm install_cmake.sh
|
||||
|
||||
# (optional) Install non-default Ninja version
|
||||
ARG NINJA_VERSION
|
||||
COPY ./common/install_ninja.sh install_ninja.sh
|
||||
RUN if [ -n "${NINJA_VERSION}" ]; then bash ./install_ninja.sh; fi
|
||||
RUN rm install_ninja.sh
|
||||
|
||||
# Install ccache/sccache (do this last, so we get priority in PATH)
|
||||
COPY ./common/install_cache.sh install_cache.sh
|
||||
ENV PATH /opt/cache/bin:$PATH
|
||||
RUN bash ./install_cache.sh && rm install_cache.sh
|
||||
|
||||
# Include BUILD_ENVIRONMENT environment variable in image
|
||||
ARG BUILD_ENVIRONMENT
|
||||
ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT}
|
||||
|
||||
# Install LLVM dev version (Defined in the pytorch/builder github repository)
|
||||
COPY --from=pytorch/llvm:9.0.1 /opt/llvm /opt/llvm
|
||||
|
||||
USER jenkins
|
||||
CMD ["bash"]
|
||||
@ -17,6 +17,13 @@ ARG LLVMDEV
|
||||
COPY ./common/install_clang.sh install_clang.sh
|
||||
RUN bash ./install_clang.sh && rm install_clang.sh
|
||||
|
||||
# (optional) Install thrift.
|
||||
ARG THRIFT
|
||||
COPY ./common/install_thrift.sh install_thrift.sh
|
||||
RUN if [ -n "${THRIFT}" ]; then bash ./install_thrift.sh; fi
|
||||
RUN rm install_thrift.sh
|
||||
ENV INSTALLED_THRIFT ${THRIFT}
|
||||
|
||||
# Install user
|
||||
COPY ./common/install_user.sh install_user.sh
|
||||
RUN bash ./install_user.sh && rm install_user.sh
|
||||
@ -37,7 +44,6 @@ COPY requirements-ci.txt requirements-docs.txt /opt/conda/
|
||||
COPY ./common/install_conda.sh install_conda.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
RUN bash ./install_conda.sh && rm install_conda.sh common_utils.sh /opt/conda/requirements-ci.txt /opt/conda/requirements-docs.txt
|
||||
RUN if [ -n "${UNINSTALL_DILL}" ]; then pip uninstall -y dill; fi
|
||||
|
||||
# Install gcc
|
||||
ARG GCC_VERSION
|
||||
@ -80,7 +86,7 @@ RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi
|
||||
RUN rm install_db.sh
|
||||
ENV INSTALLED_DB ${DB}
|
||||
|
||||
# (optional) Install vision packages like OpenCV
|
||||
# (optional) Install vision packages like OpenCV and ffmpeg
|
||||
ARG VISION
|
||||
COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./
|
||||
RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi
|
||||
@ -147,41 +153,16 @@ COPY ci_commit_pins/triton.txt triton.txt
|
||||
RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi
|
||||
RUN rm install_triton.sh common_utils.sh triton.txt
|
||||
|
||||
ARG EXECUTORCH
|
||||
# Build and install executorch
|
||||
COPY ./common/install_executorch.sh install_executorch.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ci_commit_pins/executorch.txt executorch.txt
|
||||
RUN if [ -n "${EXECUTORCH}" ]; then bash ./install_executorch.sh; fi
|
||||
RUN rm install_executorch.sh common_utils.sh executorch.txt
|
||||
|
||||
ARG HALIDE
|
||||
# Build and install halide
|
||||
COPY ./common/install_halide.sh install_halide.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ci_commit_pins/halide.txt halide.txt
|
||||
RUN if [ -n "${HALIDE}" ]; then bash ./install_halide.sh; fi
|
||||
RUN rm install_halide.sh common_utils.sh halide.txt
|
||||
|
||||
ARG ONNX
|
||||
# Install ONNX dependencies
|
||||
COPY ./common/install_onnx.sh ./common/common_utils.sh ./
|
||||
RUN if [ -n "${ONNX}" ]; then bash ./install_onnx.sh; fi
|
||||
RUN rm install_onnx.sh common_utils.sh
|
||||
|
||||
# (optional) Build ACL
|
||||
ARG ACL
|
||||
COPY ./common/install_acl.sh install_acl.sh
|
||||
RUN if [ -n "${ACL}" ]; then bash ./install_acl.sh; fi
|
||||
RUN rm install_acl.sh
|
||||
ENV INSTALLED_ACL ${ACL}
|
||||
|
||||
# Install ccache/sccache (do this last, so we get priority in PATH)
|
||||
ARG SKIP_SCCACHE_INSTALL
|
||||
COPY ./common/install_cache.sh install_cache.sh
|
||||
ENV PATH /opt/cache/bin:$PATH
|
||||
RUN if [ -z "${SKIP_SCCACHE_INSTALL}" ]; then bash ./install_cache.sh; fi
|
||||
RUN rm install_cache.sh
|
||||
RUN bash ./install_cache.sh && rm install_cache.sh
|
||||
|
||||
# Add jni.h for java host build
|
||||
COPY ./common/install_jni.sh install_jni.sh
|
||||
@ -198,9 +179,7 @@ ARG BUILD_ENVIRONMENT
|
||||
ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT}
|
||||
|
||||
# Install LLVM dev version (Defined in the pytorch/builder github repository)
|
||||
ARG SKIP_LLVM_SRC_BUILD_INSTALL
|
||||
COPY --from=pytorch/llvm:9.0.1 /opt/llvm /opt/llvm
|
||||
RUN if [ -n "${SKIP_LLVM_SRC_BUILD_INSTALL}" ]; then set -eu; rm -rf /opt/llvm; fi
|
||||
|
||||
# AWS specific CUDA build guidance
|
||||
ENV TORCH_CUDA_ARCH_LIST Maxwell
|
||||
|
||||
@ -1,9 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/../pytorch/common_utils.sh"
|
||||
|
||||
LOCAL_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
ROOT_DIR=$(cd "$LOCAL_DIR"/../.. && pwd)
|
||||
TEST_DIR="$ROOT_DIR/test"
|
||||
|
||||
@ -3,19 +3,10 @@
|
||||
# shellcheck source=./common.sh
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
|
||||
|
||||
# Workaround for dind-rootless userid mapping (https://github.com/pytorch/ci-infra/issues/96)
|
||||
WORKSPACE_ORIGINAL_OWNER_ID=$(stat -c '%u' "/var/lib/jenkins/workspace")
|
||||
cleanup_workspace() {
|
||||
echo "sudo may print the following warning message that can be ignored. The chown command will still run."
|
||||
echo " sudo: setrlimit(RLIMIT_STACK): Operation not permitted"
|
||||
echo "For more details refer to https://github.com/sudo-project/sudo/issues/42"
|
||||
sudo chown -R "$WORKSPACE_ORIGINAL_OWNER_ID" /var/lib/jenkins/workspace
|
||||
# Use to retry ONNX test, only retry it twice
|
||||
retry () {
|
||||
"$@" || (sleep 60 && "$@")
|
||||
}
|
||||
# Disable shellcheck SC2064 as we want to parse the original owner immediately.
|
||||
# shellcheck disable=SC2064
|
||||
trap_add cleanup_workspace EXIT
|
||||
sudo chown -R jenkins /var/lib/jenkins/workspace
|
||||
git config --global --add safe.directory /var/lib/jenkins/workspace
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *onnx* ]]; then
|
||||
# TODO: This can be removed later once vision is also part of the Docker image
|
||||
@ -25,5 +16,5 @@ if [[ "$BUILD_ENVIRONMENT" == *onnx* ]]; then
|
||||
# NB: ONNX test is fast (~15m) so it's ok to retry it few more times to avoid any flaky issue, we
|
||||
# need to bring this to the standard PyTorch run_test eventually. The issue will be tracked in
|
||||
# https://github.com/pytorch/pytorch/issues/98626
|
||||
"$ROOT_DIR/scripts/onnx/test.sh"
|
||||
retry "$ROOT_DIR/scripts/onnx/test.sh"
|
||||
fi
|
||||
|
||||
@ -28,8 +28,6 @@ echo "Environment variables:"
|
||||
env
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *cuda* ]]; then
|
||||
# Use jemalloc during compilation to mitigate https://github.com/pytorch/pytorch/issues/116289
|
||||
export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.2
|
||||
echo "NVCC version:"
|
||||
nvcc --version
|
||||
fi
|
||||
@ -44,7 +42,15 @@ if [[ "$BUILD_ENVIRONMENT" == *cuda11* ]]; then
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ ${BUILD_ENVIRONMENT} == *"parallelnative"* ]]; then
|
||||
if [[ ${BUILD_ENVIRONMENT} == *"caffe2"* ]]; then
|
||||
echo "Caffe2 build is ON"
|
||||
export BUILD_CAFFE2=ON
|
||||
fi
|
||||
|
||||
if [[ ${BUILD_ENVIRONMENT} == *"paralleltbb"* ]]; then
|
||||
export ATEN_THREADING=TBB
|
||||
export USE_TBB=1
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"parallelnative"* ]]; then
|
||||
export ATEN_THREADING=NATIVE
|
||||
fi
|
||||
|
||||
@ -57,12 +63,6 @@ else
|
||||
export LLVM_DIR=/opt/llvm/lib/cmake/llvm
|
||||
fi
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *executorch* ]]; then
|
||||
# To build test_edge_op_registration
|
||||
export BUILD_EXECUTORCH=ON
|
||||
export USE_CUDA=0
|
||||
fi
|
||||
|
||||
if ! which conda; then
|
||||
# In ROCm CIs, we are doing cross compilation on build machines with
|
||||
# intel cpu and later run tests on machines with amd cpu.
|
||||
@ -73,35 +73,7 @@ if ! which conda; then
|
||||
export USE_MKLDNN=0
|
||||
fi
|
||||
else
|
||||
# CMAKE_PREFIX_PATH precedences
|
||||
# 1. $CONDA_PREFIX, if defined. This follows the pytorch official build instructions.
|
||||
# 2. /opt/conda/envs/py_${ANACONDA_PYTHON_VERSION}, if ANACONDA_PYTHON_VERSION defined.
|
||||
# This is for CI, which defines ANACONDA_PYTHON_VERSION but not CONDA_PREFIX.
|
||||
# 3. $(conda info --base). The fallback value of pytorch official build
|
||||
# instructions actually refers to this.
|
||||
# Commonly this is /opt/conda/
|
||||
if [[ -v CONDA_PREFIX ]]; then
|
||||
export CMAKE_PREFIX_PATH=${CONDA_PREFIX}
|
||||
elif [[ -v ANACONDA_PYTHON_VERSION ]]; then
|
||||
export CMAKE_PREFIX_PATH="/opt/conda/envs/py_${ANACONDA_PYTHON_VERSION}"
|
||||
else
|
||||
# already checked by `! which conda`
|
||||
CMAKE_PREFIX_PATH="$(conda info --base)"
|
||||
export CMAKE_PREFIX_PATH
|
||||
fi
|
||||
|
||||
# Workaround required for MKL library linkage
|
||||
# https://github.com/pytorch/pytorch/issues/119557
|
||||
if [ "$ANACONDA_PYTHON_VERSION" = "3.12" ]; then
|
||||
export CMAKE_LIBRARY_PATH="/opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/lib/"
|
||||
export CMAKE_INCLUDE_PATH="/opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/include/"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *aarch64* ]]; then
|
||||
export USE_MKLDNN=1
|
||||
export USE_MKLDNN_ACL=1
|
||||
export ACL_ROOT_DIR=/ComputeLibrary
|
||||
export CMAKE_PREFIX_PATH=/opt/conda
|
||||
fi
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *libtorch* ]]; then
|
||||
@ -173,12 +145,6 @@ if [[ "$BUILD_ENVIRONMENT" == *rocm* ]]; then
|
||||
python tools/amd_build/build_amd.py
|
||||
fi
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *xpu* ]]; then
|
||||
# shellcheck disable=SC1091
|
||||
source /opt/intel/oneapi/compiler/latest/env/vars.sh
|
||||
export USE_XPU=1
|
||||
fi
|
||||
|
||||
# sccache will fail for CUDA builds if all cores are used for compiling
|
||||
# gcc 7 with sccache seems to have intermittent OOM issue if all cores are used
|
||||
if [ -z "$MAX_JOBS" ]; then
|
||||
@ -193,14 +159,6 @@ if [[ "$BUILD_ENVIRONMENT" == *cuda* && -z "$TORCH_CUDA_ARCH_LIST" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# We only build FlashAttention files for CUDA 8.0+, and they require large amounts of
|
||||
# memory to build and will OOM
|
||||
if [[ "$BUILD_ENVIRONMENT" == *cuda* ]] && [[ "$TORCH_CUDA_ARCH_LIST" == *"8.6"* || "$TORCH_CUDA_ARCH_LIST" == *"8.0"* ]]; then
|
||||
echo "WARNING: FlashAttention files require large amounts of memory to build and will OOM"
|
||||
echo "Setting MAX_JOBS=(nproc-2)/3 to reduce memory usage"
|
||||
export MAX_JOBS="$(( $(nproc --ignore=2) / 3 ))"
|
||||
fi
|
||||
|
||||
if [[ "${BUILD_ENVIRONMENT}" == *clang* ]]; then
|
||||
export CC=clang
|
||||
export CXX=clang++
|
||||
@ -210,6 +168,7 @@ if [[ "$BUILD_ENVIRONMENT" == *-clang*-asan* ]]; then
|
||||
export LDSHARED="clang --shared"
|
||||
export USE_CUDA=0
|
||||
export USE_ASAN=1
|
||||
export USE_MKLDNN=0
|
||||
export UBSAN_FLAGS="-fno-sanitize-recover=all;-fno-sanitize=float-divide-by-zero;-fno-sanitize=float-cast-overflow"
|
||||
unset USE_LLVM
|
||||
fi
|
||||
@ -230,24 +189,6 @@ if [[ "${BUILD_ENVIRONMENT}" != *android* && "${BUILD_ENVIRONMENT}" != *cuda* ]]
|
||||
export BUILD_STATIC_RUNTIME_BENCHMARK=ON
|
||||
fi
|
||||
|
||||
# Do not change workspace permissions for ROCm CI jobs
|
||||
# as it can leave workspace with bad permissions for cancelled jobs
|
||||
if [[ "$BUILD_ENVIRONMENT" != *rocm* ]]; then
|
||||
# Workaround for dind-rootless userid mapping (https://github.com/pytorch/ci-infra/issues/96)
|
||||
WORKSPACE_ORIGINAL_OWNER_ID=$(stat -c '%u' "/var/lib/jenkins/workspace")
|
||||
cleanup_workspace() {
|
||||
echo "sudo may print the following warning message that can be ignored. The chown command will still run."
|
||||
echo " sudo: setrlimit(RLIMIT_STACK): Operation not permitted"
|
||||
echo "For more details refer to https://github.com/sudo-project/sudo/issues/42"
|
||||
sudo chown -R "$WORKSPACE_ORIGINAL_OWNER_ID" /var/lib/jenkins/workspace
|
||||
}
|
||||
# Disable shellcheck SC2064 as we want to parse the original owner immediately.
|
||||
# shellcheck disable=SC2064
|
||||
trap_add cleanup_workspace EXIT
|
||||
sudo chown -R jenkins /var/lib/jenkins/workspace
|
||||
git config --global --add safe.directory /var/lib/jenkins/workspace
|
||||
fi
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *-bazel-* ]]; then
|
||||
set -e
|
||||
|
||||
@ -273,37 +214,16 @@ else
|
||||
( ! get_exit_code python setup.py clean bad_argument )
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" != *libtorch* ]]; then
|
||||
|
||||
# rocm builds fail when WERROR=1
|
||||
# XLA test build fails when WERROR=1
|
||||
# set only when building other architectures
|
||||
# or building non-XLA tests.
|
||||
if [[ "$BUILD_ENVIRONMENT" != *rocm* &&
|
||||
"$BUILD_ENVIRONMENT" != *xla* ]]; then
|
||||
if [[ "$BUILD_ENVIRONMENT" != *py3.8* ]]; then
|
||||
# Install numpy-2.0 release candidate for builds
|
||||
# Which should be backward compatible with Numpy-1.X
|
||||
python -mpip install --pre numpy==2.0.0rc1
|
||||
fi
|
||||
|
||||
WERROR=1 python setup.py clean
|
||||
|
||||
if [[ "$USE_SPLIT_BUILD" == "true" ]]; then
|
||||
BUILD_LIBTORCH_WHL=1 BUILD_PYTHON_ONLY=0 python setup.py bdist_wheel
|
||||
BUILD_LIBTORCH_WHL=0 BUILD_PYTHON_ONLY=1 python setup.py bdist_wheel --cmake
|
||||
else
|
||||
WERROR=1 python setup.py bdist_wheel
|
||||
fi
|
||||
WERROR=1 python setup.py bdist_wheel
|
||||
else
|
||||
python setup.py clean
|
||||
if [[ "$BUILD_ENVIRONMENT" == *xla* ]]; then
|
||||
source .ci/pytorch/install_cache_xla.sh
|
||||
fi
|
||||
if [[ "$USE_SPLIT_BUILD" == "true" ]]; then
|
||||
echo "USE_SPLIT_BUILD cannot be used with xla or rocm"
|
||||
exit 1
|
||||
else
|
||||
python setup.py bdist_wheel
|
||||
fi
|
||||
python setup.py bdist_wheel
|
||||
fi
|
||||
pip_install_whl "$(echo dist/*.whl)"
|
||||
|
||||
@ -342,10 +262,9 @@ else
|
||||
CUSTOM_OP_TEST="$PWD/test/custom_operator"
|
||||
python --version
|
||||
SITE_PACKAGES="$(python -c 'from distutils.sysconfig import get_python_lib; print(get_python_lib())')"
|
||||
|
||||
mkdir -p "$CUSTOM_OP_BUILD"
|
||||
pushd "$CUSTOM_OP_BUILD"
|
||||
cmake "$CUSTOM_OP_TEST" -DCMAKE_PREFIX_PATH="$SITE_PACKAGES/torch;$SITE_PACKAGES" -DPython_EXECUTABLE="$(which python)" \
|
||||
cmake "$CUSTOM_OP_TEST" -DCMAKE_PREFIX_PATH="$SITE_PACKAGES/torch" -DPYTHON_EXECUTABLE="$(which python)" \
|
||||
-DCMAKE_MODULE_PATH="$CUSTOM_TEST_MODULE_PATH" -DUSE_ROCM="$CUSTOM_TEST_USE_ROCM"
|
||||
make VERBOSE=1
|
||||
popd
|
||||
@ -358,7 +277,7 @@ else
|
||||
SITE_PACKAGES="$(python -c 'from distutils.sysconfig import get_python_lib; print(get_python_lib())')"
|
||||
mkdir -p "$JIT_HOOK_BUILD"
|
||||
pushd "$JIT_HOOK_BUILD"
|
||||
cmake "$JIT_HOOK_TEST" -DCMAKE_PREFIX_PATH="$SITE_PACKAGES/torch;$SITE_PACKAGES" -DPython_EXECUTABLE="$(which python)" \
|
||||
cmake "$JIT_HOOK_TEST" -DCMAKE_PREFIX_PATH="$SITE_PACKAGES/torch" -DPYTHON_EXECUTABLE="$(which python)" \
|
||||
-DCMAKE_MODULE_PATH="$CUSTOM_TEST_MODULE_PATH" -DUSE_ROCM="$CUSTOM_TEST_USE_ROCM"
|
||||
make VERBOSE=1
|
||||
popd
|
||||
@ -370,7 +289,7 @@ else
|
||||
python --version
|
||||
mkdir -p "$CUSTOM_BACKEND_BUILD"
|
||||
pushd "$CUSTOM_BACKEND_BUILD"
|
||||
cmake "$CUSTOM_BACKEND_TEST" -DCMAKE_PREFIX_PATH="$SITE_PACKAGES/torch;$SITE_PACKAGES" -DPython_EXECUTABLE="$(which python)" \
|
||||
cmake "$CUSTOM_BACKEND_TEST" -DCMAKE_PREFIX_PATH="$SITE_PACKAGES/torch" -DPYTHON_EXECUTABLE="$(which python)" \
|
||||
-DCMAKE_MODULE_PATH="$CUSTOM_TEST_MODULE_PATH" -DUSE_ROCM="$CUSTOM_TEST_USE_ROCM"
|
||||
make VERBOSE=1
|
||||
popd
|
||||
@ -401,8 +320,4 @@ if [[ "$BUILD_ENVIRONMENT" != *libtorch* && "$BUILD_ENVIRONMENT" != *bazel* ]];
|
||||
python tools/stats/export_test_times.py
|
||||
fi
|
||||
|
||||
# snadampal: skipping it till sccache support added for aarch64
|
||||
# https://github.com/pytorch/pytorch/issues/121559
|
||||
if [[ "$BUILD_ENVIRONMENT" != *aarch64* ]]; then
|
||||
print_sccache_stats
|
||||
fi
|
||||
print_sccache_stats
|
||||
|
||||
@ -43,7 +43,7 @@ function assert_git_not_dirty() {
|
||||
# TODO: we should add an option to `build_amd.py` that reverts the repo to
|
||||
# an unmodified state.
|
||||
if [[ "$BUILD_ENVIRONMENT" != *rocm* ]] && [[ "$BUILD_ENVIRONMENT" != *xla* ]] ; then
|
||||
git_status=$(git status --porcelain | grep -v '?? third_party' || true)
|
||||
git_status=$(git status --porcelain)
|
||||
if [[ $git_status ]]; then
|
||||
echo "Build left local git repository checkout dirty"
|
||||
echo "git status --porcelain:"
|
||||
@ -56,29 +56,9 @@ function assert_git_not_dirty() {
|
||||
function pip_install_whl() {
|
||||
# This is used to install PyTorch and other build artifacts wheel locally
|
||||
# without using any network connection
|
||||
|
||||
# Convert the input arguments into an array
|
||||
local args=("$@")
|
||||
|
||||
# Check if the first argument contains multiple paths separated by spaces
|
||||
if [[ "${args[0]}" == *" "* ]]; then
|
||||
# Split the string by spaces into an array
|
||||
IFS=' ' read -r -a paths <<< "${args[0]}"
|
||||
# Loop through each path and install individually
|
||||
for path in "${paths[@]}"; do
|
||||
echo "Installing $path"
|
||||
python3 -mpip install --no-index --no-deps "$path"
|
||||
done
|
||||
else
|
||||
# Loop through each argument and install individually
|
||||
for path in "${args[@]}"; do
|
||||
echo "Installing $path"
|
||||
python3 -mpip install --no-index --no-deps "$path"
|
||||
done
|
||||
fi
|
||||
python3 -mpip install --no-index --no-deps "$@"
|
||||
}
|
||||
|
||||
|
||||
function pip_install() {
|
||||
# retry 3 times
|
||||
# old versions of pip don't have the "--progress-bar" flag
|
||||
@ -178,11 +158,6 @@ function install_torchvision() {
|
||||
fi
|
||||
}
|
||||
|
||||
function install_tlparse() {
|
||||
pip_install --user "tlparse==0.3.7"
|
||||
PATH="$(python -m site --user-base)/bin:$PATH"
|
||||
}
|
||||
|
||||
function install_torchrec_and_fbgemm() {
|
||||
local torchrec_commit
|
||||
torchrec_commit=$(get_pinned_commit torchrec)
|
||||
@ -196,9 +171,16 @@ function install_torchrec_and_fbgemm() {
|
||||
pip_install --no-use-pep517 --user "git+https://github.com/pytorch/torchrec.git@${torchrec_commit}"
|
||||
}
|
||||
|
||||
function install_numpy_pytorch_interop() {
|
||||
local commit
|
||||
commit=$(get_pinned_commit numpy_pytorch_interop)
|
||||
# TODO: --no-use-pep517 will result in failure.
|
||||
pip_install --user "git+https://github.com/Quansight-Labs/numpy_pytorch_interop.git@${commit}"
|
||||
}
|
||||
|
||||
function clone_pytorch_xla() {
|
||||
if [[ ! -d ./xla ]]; then
|
||||
git clone --recursive --quiet https://github.com/pytorch/xla.git
|
||||
git clone --recursive -b r2.1 https://github.com/pytorch/xla.git
|
||||
pushd xla
|
||||
# pin the xla hash so that we don't get broken by changes to xla
|
||||
git checkout "$(cat ../.github/ci_commit_pins/xla.txt)"
|
||||
@ -208,6 +190,37 @@ function clone_pytorch_xla() {
|
||||
fi
|
||||
}
|
||||
|
||||
function checkout_install_torchdeploy() {
|
||||
local commit
|
||||
commit=$(get_pinned_commit multipy)
|
||||
pushd ..
|
||||
git clone --recurse-submodules https://github.com/pytorch/multipy.git
|
||||
pushd multipy
|
||||
git checkout "${commit}"
|
||||
python multipy/runtime/example/generate_examples.py
|
||||
BUILD_CUDA_TESTS=1 pip install -e .
|
||||
popd
|
||||
popd
|
||||
}
|
||||
|
||||
function test_torch_deploy(){
|
||||
pushd ..
|
||||
pushd multipy
|
||||
./multipy/runtime/build/test_deploy
|
||||
./multipy/runtime/build/test_deploy_gpu
|
||||
popd
|
||||
popd
|
||||
}
|
||||
|
||||
function install_timm() {
|
||||
local commit
|
||||
commit=$(get_pinned_commit timm)
|
||||
pip_install pandas
|
||||
pip_install scipy
|
||||
pip_install z3-solver
|
||||
pip_install "git+https://github.com/rwightman/pytorch-image-models@${commit}"
|
||||
}
|
||||
|
||||
function checkout_install_torchbench() {
|
||||
local commit
|
||||
commit=$(get_pinned_commit torchbench)
|
||||
@ -222,8 +235,6 @@ function checkout_install_torchbench() {
|
||||
# to install and test other models
|
||||
python install.py --continue_on_fail
|
||||
fi
|
||||
echo "Print all dependencies after TorchBench is installed"
|
||||
python -mpip freeze
|
||||
popd
|
||||
}
|
||||
|
||||
|
||||
@ -6,4 +6,4 @@ source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
|
||||
echo "Testing pytorch docs"
|
||||
|
||||
cd docs
|
||||
TERM=vt100 make doctest
|
||||
make doctest
|
||||
|
||||
@ -1,37 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Script for installing sccache on the xla build job, which uses xla's docker
|
||||
# image and doesn't have sccache installed on it. This is mostly copied from
|
||||
# .ci/docker/install_cache.sh. Changes are: removing checks that will always
|
||||
# return the same thing, ex checks for for rocm, CUDA, and changing the path
|
||||
# where sccache is installed, and not changing /etc/environment.
|
||||
|
||||
set -ex
|
||||
|
||||
install_binary() {
|
||||
echo "Downloading sccache binary from S3 repo"
|
||||
curl --retry 3 https://s3.amazonaws.com/ossci-linux/sccache -o /tmp/cache/bin/sccache
|
||||
}
|
||||
|
||||
mkdir -p /tmp/cache/bin
|
||||
mkdir -p /tmp/cache/lib
|
||||
export PATH="/tmp/cache/bin:$PATH"
|
||||
|
||||
install_binary
|
||||
chmod a+x /tmp/cache/bin/sccache
|
||||
|
||||
function write_sccache_stub() {
|
||||
# Unset LD_PRELOAD for ps because of asan + ps issues
|
||||
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90589
|
||||
# shellcheck disable=SC2086
|
||||
# shellcheck disable=SC2059
|
||||
printf "#!/bin/sh\nif [ \$(env -u LD_PRELOAD ps -p \$PPID -o comm=) != sccache ]; then\n exec sccache $(which $1) \"\$@\"\nelse\n exec $(which $1) \"\$@\"\nfi" > "/tmp/cache/bin/$1"
|
||||
chmod a+x "/tmp/cache/bin/$1"
|
||||
}
|
||||
|
||||
write_sccache_stub cc
|
||||
write_sccache_stub c++
|
||||
write_sccache_stub gcc
|
||||
write_sccache_stub g++
|
||||
write_sccache_stub clang
|
||||
write_sccache_stub clang++
|
||||
@ -43,7 +43,7 @@ cross_compile_arm64() {
|
||||
compile_arm64() {
|
||||
# Compilation for arm64
|
||||
# TODO: Compile with OpenMP support (but this causes CI regressions as cross-compilation were done with OpenMP disabled)
|
||||
USE_DISTRIBUTED=0 USE_OPENMP=1 MACOSX_DEPLOYMENT_TARGET=11.0 WERROR=1 BUILD_TEST=OFF USE_PYTORCH_METAL=1 python setup.py bdist_wheel
|
||||
USE_DISTRIBUTED=0 USE_OPENMP=0 MACOSX_DEPLOYMENT_TARGET=11.0 WERROR=1 BUILD_TEST=OFF USE_PYTORCH_METAL=1 python setup.py bdist_wheel
|
||||
}
|
||||
|
||||
compile_x86_64() {
|
||||
|
||||
@ -9,7 +9,7 @@ sysctl -a | grep machdep.cpu
|
||||
|
||||
# These are required for both the build job and the test job.
|
||||
# In the latter to test cpp extensions.
|
||||
export MACOSX_DEPLOYMENT_TARGET=11.1
|
||||
export MACOSX_DEPLOYMENT_TARGET=11.0
|
||||
export CXX=clang++
|
||||
export CC=clang
|
||||
|
||||
|
||||
@ -149,8 +149,6 @@ test_jit_hooks() {
|
||||
assert_git_not_dirty
|
||||
}
|
||||
|
||||
install_tlparse
|
||||
|
||||
if [[ $NUM_TEST_SHARDS -gt 1 ]]; then
|
||||
test_python_shard "${SHARD_NUMBER}"
|
||||
if [[ "${SHARD_NUMBER}" == 1 ]]; then
|
||||
|
||||
@ -19,7 +19,6 @@ time python test/run_test.py --verbose -i distributed/test_c10d_nccl
|
||||
time python test/run_test.py --verbose -i distributed/test_c10d_spawn_gloo
|
||||
time python test/run_test.py --verbose -i distributed/test_c10d_spawn_nccl
|
||||
time python test/run_test.py --verbose -i distributed/test_store
|
||||
time python test/run_test.py --verbose -i distributed/test_symmetric_memory
|
||||
time python test/run_test.py --verbose -i distributed/test_pg_wrapper
|
||||
time python test/run_test.py --verbose -i distributed/rpc/cuda/test_tensorpipe_agent
|
||||
# FSDP tests
|
||||
@ -35,28 +34,19 @@ time python test/run_test.py --verbose -i distributed/_shard/sharded_tensor/test
|
||||
# functional collective tests
|
||||
time python test/run_test.py --verbose -i distributed/test_functional_api
|
||||
|
||||
|
||||
# DTensor tests
|
||||
time python test/run_test.py --verbose -i distributed/_tensor/test_device_mesh
|
||||
time python test/run_test.py --verbose -i distributed/_tensor/test_random_ops
|
||||
time python test/run_test.py --verbose -i distributed/_tensor/test_dtensor_compile
|
||||
|
||||
# DeviceMesh test
|
||||
time python test/run_test.py --verbose -i distributed/test_device_mesh
|
||||
|
||||
# DTensor/TP tests
|
||||
time python test/run_test.py --verbose -i distributed/tensor/parallel/test_ddp_2d_parallel
|
||||
time python test/run_test.py --verbose -i distributed/tensor/parallel/test_fsdp_2d_parallel
|
||||
time python test/run_test.py --verbose -i distributed/tensor/parallel/test_tp_examples
|
||||
time python test/run_test.py --verbose -i distributed/tensor/parallel/test_tp_random_state
|
||||
|
||||
# FSDP2 tests
|
||||
time python test/run_test.py --verbose -i distributed/_composable/fsdp/test_fully_shard_training -- -k test_2d_mlp_with_nd_mesh
|
||||
|
||||
# Pipelining composability tests
|
||||
time python test/run_test.py --verbose -i distributed/pipelining/test_composability.py
|
||||
|
||||
# Other tests
|
||||
time python test/run_test.py --verbose -i test_cuda_primary_ctx
|
||||
time python test/run_test.py --verbose -i test_optim -- -k test_forloop_goes_right_direction_multigpu
|
||||
time python test/run_test.py --verbose -i test_optim -- -k test_mixed_device_dtype
|
||||
time python test/run_test.py --verbose -i test_optim -- -k optimizers_with_varying_tensors
|
||||
time python test/run_test.py --verbose -i test_foreach -- -k test_tensors_grouping
|
||||
assert_git_not_dirty
|
||||
|
||||
@ -59,16 +59,16 @@ print("sample mean: ", sample_mean)
|
||||
print("sample sigma: ", sample_sigma)
|
||||
|
||||
if math.isnan(sample_mean):
|
||||
raise Exception("""Error: sample mean is NaN""") # noqa: TRY002
|
||||
raise Exception("""Error: sample mean is NaN""")
|
||||
elif math.isnan(sample_sigma):
|
||||
raise Exception("""Error: sample sigma is NaN""") # noqa: TRY002
|
||||
raise Exception("""Error: sample sigma is NaN""")
|
||||
|
||||
z_value = (sample_mean - mean) / sigma
|
||||
|
||||
print("z-value: ", z_value)
|
||||
|
||||
if z_value >= 3:
|
||||
raise Exception( # noqa: TRY002
|
||||
raise Exception(
|
||||
f"""\n
|
||||
z-value >= 3, there is high chance of perf regression.\n
|
||||
To reproduce this regression, run
|
||||
|
||||
@ -26,8 +26,8 @@ echo "error: python_doc_push_script.sh: version (arg2) not specified"
|
||||
fi
|
||||
|
||||
# Argument 1: Where to copy the built documentation to
|
||||
# (pytorch_docs/$install_path)
|
||||
install_path="${1:-${DOCS_INSTALL_PATH:-${DOCS_VERSION}}}"
|
||||
# (pytorch.github.io/$install_path)
|
||||
install_path="${1:-${DOCS_INSTALL_PATH:-docs/${DOCS_VERSION}}}"
|
||||
if [ -z "$install_path" ]; then
|
||||
echo "error: python_doc_push_script.sh: install_path (arg1) not specified"
|
||||
exit 1
|
||||
@ -68,8 +68,8 @@ build_docs () {
|
||||
}
|
||||
|
||||
|
||||
git clone https://github.com/pytorch/docs pytorch_docs -b "$branch" --depth 1
|
||||
pushd pytorch_docs
|
||||
git clone https://github.com/pytorch/pytorch.github.io -b "$branch" --depth 1
|
||||
pushd pytorch.github.io
|
||||
|
||||
export LC_ALL=C
|
||||
export PATH=/opt/conda/bin:$PATH
|
||||
@ -105,7 +105,6 @@ if [ "$is_main_doc" = true ]; then
|
||||
echo undocumented objects found:
|
||||
cat build/coverage/python.txt
|
||||
echo "Make sure you've updated relevant .rsts in docs/source!"
|
||||
echo "You can reproduce locally by running 'cd docs && make coverage && cat build/coverage/python.txt'"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
|
||||
@ -6,27 +6,6 @@
|
||||
|
||||
set -ex
|
||||
|
||||
# shellcheck source=./common.sh
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
|
||||
|
||||
# Do not change workspace permissions for ROCm CI jobs
|
||||
# as it can leave workspace with bad permissions for cancelled jobs
|
||||
if [[ "$BUILD_ENVIRONMENT" != *rocm* ]]; then
|
||||
# Workaround for dind-rootless userid mapping (https://github.com/pytorch/ci-infra/issues/96)
|
||||
WORKSPACE_ORIGINAL_OWNER_ID=$(stat -c '%u' "/var/lib/jenkins/workspace")
|
||||
cleanup_workspace() {
|
||||
echo "sudo may print the following warning message that can be ignored. The chown command will still run."
|
||||
echo " sudo: setrlimit(RLIMIT_STACK): Operation not permitted"
|
||||
echo "For more details refer to https://github.com/sudo-project/sudo/issues/42"
|
||||
sudo chown -R "$WORKSPACE_ORIGINAL_OWNER_ID" /var/lib/jenkins/workspace
|
||||
}
|
||||
# Disable shellcheck SC2064 as we want to parse the original owner immediately.
|
||||
# shellcheck disable=SC2064
|
||||
trap_add cleanup_workspace EXIT
|
||||
sudo chown -R jenkins /var/lib/jenkins/workspace
|
||||
git config --global --add safe.directory /var/lib/jenkins/workspace
|
||||
fi
|
||||
|
||||
echo "Environment variables:"
|
||||
env
|
||||
|
||||
@ -39,10 +18,6 @@ BUILD_DIR="build"
|
||||
BUILD_RENAMED_DIR="build_renamed"
|
||||
BUILD_BIN_DIR="$BUILD_DIR"/bin
|
||||
|
||||
#Set Default values for these variables in case they are not set
|
||||
SHARD_NUMBER="${SHARD_NUMBER:=1}"
|
||||
NUM_TEST_SHARDS="${NUM_TEST_SHARDS:=1}"
|
||||
|
||||
export VALGRIND=ON
|
||||
# export TORCH_INDUCTOR_INSTALL_GXX=ON
|
||||
if [[ "$BUILD_ENVIRONMENT" == *clang9* ]]; then
|
||||
@ -105,11 +80,9 @@ if [[ "$BUILD_ENVIRONMENT" != *bazel* ]]; then
|
||||
CUSTOM_TEST_ARTIFACT_BUILD_DIR=$(realpath "${CUSTOM_TEST_ARTIFACT_BUILD_DIR:-"build/custom_test_artifacts"}")
|
||||
fi
|
||||
|
||||
# Reduce set of tests to include when running run_test.py
|
||||
if [[ -n $TESTS_TO_INCLUDE ]]; then
|
||||
echo "Setting INCLUDE_CLAUSE"
|
||||
INCLUDE_CLAUSE="--include $TESTS_TO_INCLUDE"
|
||||
fi
|
||||
|
||||
# shellcheck source=./common.sh
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
|
||||
|
||||
echo "Environment variables"
|
||||
env
|
||||
@ -146,10 +119,6 @@ if [[ "$BUILD_ENVIRONMENT" == *cuda* || "$BUILD_ENVIRONMENT" == *rocm* ]]; then
|
||||
# mainly used so that we're not spending extra cycles testing cpu
|
||||
# devices on expensive gpu machines
|
||||
export PYTORCH_TESTING_DEVICE_ONLY_FOR="cuda"
|
||||
elif [[ "$BUILD_ENVIRONMENT" == *xpu* ]]; then
|
||||
export PYTORCH_TESTING_DEVICE_ONLY_FOR="xpu"
|
||||
# setting PYTHON_TEST_EXTRA_OPTION
|
||||
export PYTHON_TEST_EXTRA_OPTION="--xpu"
|
||||
fi
|
||||
|
||||
if [[ "$TEST_CONFIG" == *crossref* ]]; then
|
||||
@ -157,22 +126,11 @@ if [[ "$TEST_CONFIG" == *crossref* ]]; then
|
||||
fi
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *rocm* ]]; then
|
||||
# regression in ROCm 6.0 on MI50 CI runners due to hipblaslt; remove in 6.1
|
||||
export VALGRIND=OFF
|
||||
# Print GPU info
|
||||
rocminfo
|
||||
rocminfo | grep -E 'Name:.*\sgfx|Marketing'
|
||||
fi
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *xpu* ]]; then
|
||||
# Source Intel oneAPI envrioment script to enable xpu runtime related libraries
|
||||
# refer to https://www.intel.com/content/www/us/en/docs/oneapi/programming-guide/2024-0/use-the-setvars-and-oneapi-vars-scripts-with-linux.html
|
||||
# shellcheck disable=SC1091
|
||||
source /opt/intel/oneapi/compiler/latest/env/vars.sh
|
||||
# Check XPU status before testing
|
||||
xpu-smi discovery
|
||||
fi
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" != *-bazel-* ]] ; then
|
||||
# JIT C++ extensions require ninja.
|
||||
pip_install --user "ninja==1.10.2"
|
||||
@ -181,13 +139,6 @@ if [[ "$BUILD_ENVIRONMENT" != *-bazel-* ]] ; then
|
||||
export PATH="$HOME/.local/bin:$PATH"
|
||||
fi
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *aarch64* ]]; then
|
||||
# TODO: revisit this once the CI is stabilized on aarch64 linux
|
||||
export VALGRIND=OFF
|
||||
fi
|
||||
|
||||
install_tlparse
|
||||
|
||||
# DANGER WILL ROBINSON. The LD_PRELOAD here could cause you problems
|
||||
# if you're not careful. Check this if you made some changes and the
|
||||
# ASAN test is not working
|
||||
@ -197,7 +148,7 @@ if [[ "$BUILD_ENVIRONMENT" == *asan* ]]; then
|
||||
export PYTORCH_TEST_WITH_ASAN=1
|
||||
export PYTORCH_TEST_WITH_UBSAN=1
|
||||
# TODO: Figure out how to avoid hard-coding these paths
|
||||
export ASAN_SYMBOLIZER_PATH=/usr/lib/llvm-15/bin/llvm-symbolizer
|
||||
export ASAN_SYMBOLIZER_PATH=/usr/lib/llvm-12/bin/llvm-symbolizer
|
||||
export TORCH_USE_RTLD_GLOBAL=1
|
||||
# NB: We load libtorch.so with RTLD_GLOBAL for UBSAN, unlike our
|
||||
# default behavior.
|
||||
@ -231,9 +182,11 @@ if [[ "$BUILD_ENVIRONMENT" == *asan* ]]; then
|
||||
# have, and it applies to child processes.
|
||||
|
||||
# TODO: get rid of the hardcoded path
|
||||
export LD_PRELOAD=/usr/lib/llvm-15/lib/clang/15.0.7/lib/linux/libclang_rt.asan-x86_64.so
|
||||
export LD_PRELOAD=/usr/lib/llvm-12/lib/clang/12.0.1/lib/linux/libclang_rt.asan-x86_64.so
|
||||
# Disable valgrind for asan
|
||||
export VALGRIND=OFF
|
||||
# Increase stack size, because ASAN red zones use more stack
|
||||
ulimit -s 81920
|
||||
|
||||
(cd test && python -c "import torch; print(torch.__version__, torch.version.git_version)")
|
||||
echo "The next four invocations are expected to crash; if they don't that means ASAN/UBSAN is misconfigured"
|
||||
@ -275,19 +228,13 @@ test_python_shard() {
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Bare --include flag is not supported and quoting for lint ends up with flag not being interpreted correctly
|
||||
# shellcheck disable=SC2086
|
||||
|
||||
# modify LD_LIBRARY_PATH to ensure it has the conda env.
|
||||
# This set of tests has been shown to be buggy without it for the split-build
|
||||
time python test/run_test.py --exclude-jit-executor --exclude-distributed-tests $INCLUDE_CLAUSE --shard "$1" "$NUM_TEST_SHARDS" --verbose $PYTHON_TEST_EXTRA_OPTION
|
||||
time python test/run_test.py --exclude-jit-executor --exclude-distributed-tests --shard "$1" "$NUM_TEST_SHARDS" --verbose
|
||||
|
||||
assert_git_not_dirty
|
||||
}
|
||||
|
||||
test_python() {
|
||||
# shellcheck disable=SC2086
|
||||
time python test/run_test.py --exclude-jit-executor --exclude-distributed-tests $INCLUDE_CLAUSE --verbose $PYTHON_TEST_EXTRA_OPTION
|
||||
time python test/run_test.py --exclude-jit-executor --exclude-distributed-tests --verbose
|
||||
assert_git_not_dirty
|
||||
}
|
||||
|
||||
@ -298,13 +245,33 @@ test_dynamo_shard() {
|
||||
exit 1
|
||||
fi
|
||||
python tools/dynamo/verify_dynamo.py
|
||||
# PLEASE DO NOT ADD ADDITIONAL EXCLUDES HERE.
|
||||
# Instead, use @skipIfTorchDynamo on your tests.
|
||||
# Temporarily disable test_fx for dynamo pending the investigation on TTS
|
||||
# regression in https://github.com/pytorch/torchdynamo/issues/784
|
||||
time python test/run_test.py --dynamo \
|
||||
--exclude-inductor-tests \
|
||||
--exclude-jit-executor \
|
||||
--exclude-distributed-tests \
|
||||
--exclude-torch-export-tests \
|
||||
--exclude \
|
||||
test_autograd \
|
||||
test_jit \
|
||||
test_proxy_tensor \
|
||||
test_quantization \
|
||||
test_public_bindings \
|
||||
test_dataloader \
|
||||
test_reductions \
|
||||
test_namedtensor \
|
||||
test_namedtuple_return_api \
|
||||
profiler/test_profiler \
|
||||
profiler/test_profiler_tree \
|
||||
test_overrides \
|
||||
test_python_dispatch \
|
||||
test_fx \
|
||||
test_package \
|
||||
test_legacy_vmap \
|
||||
test_custom_ops \
|
||||
test_content_store \
|
||||
export/test_db \
|
||||
functorch/test_dims \
|
||||
functorch/test_aotdispatch \
|
||||
--shard "$1" "$NUM_TEST_SHARDS" \
|
||||
--verbose
|
||||
assert_git_not_dirty
|
||||
@ -313,24 +280,7 @@ test_dynamo_shard() {
|
||||
test_inductor_distributed() {
|
||||
# Smuggle a few multi-gpu tests here so that we don't have to request another large node
|
||||
echo "Testing multi_gpu tests in test_torchinductor"
|
||||
python test/run_test.py -i inductor/test_torchinductor.py -k test_multi_gpu --verbose
|
||||
python test/run_test.py -i inductor/test_aot_inductor.py -k test_non_default_cuda_device --verbose
|
||||
python test/run_test.py -i inductor/test_aot_inductor.py -k test_replicate_on_devices --verbose
|
||||
python test/run_test.py -i distributed/test_c10d_functional_native.py --verbose
|
||||
python test/run_test.py -i distributed/_tensor/test_dtensor_compile.py --verbose
|
||||
python test/run_test.py -i distributed/tensor/parallel/test_fsdp_2d_parallel.py --verbose
|
||||
python test/run_test.py -i distributed/_composable/fsdp/test_fully_shard_comm.py --verbose
|
||||
python test/run_test.py -i distributed/_composable/fsdp/test_fully_shard_training.py -k test_train_parity_multi_group --verbose
|
||||
python test/run_test.py -i distributed/_composable/fsdp/test_fully_shard_training.py -k test_train_parity_with_activation_checkpointing --verbose
|
||||
python test/run_test.py -i distributed/_composable/fsdp/test_fully_shard_training.py -k test_train_parity_2d_mlp --verbose
|
||||
python test/run_test.py -i distributed/_composable/fsdp/test_fully_shard_training.py -k test_train_parity_hsdp --verbose
|
||||
python test/run_test.py -i distributed/_composable/fsdp/test_fully_shard_training.py -k test_train_parity_2d_transformer_checkpoint_resume --verbose
|
||||
python test/run_test.py -i distributed/_composable/fsdp/test_fully_shard_training.py -k test_gradient_accumulation --verbose
|
||||
python test/run_test.py -i distributed/_composable/fsdp/test_fully_shard_frozen.py --verbose
|
||||
python test/run_test.py -i distributed/_composable/fsdp/test_fully_shard_mixed_precision.py -k test_compute_dtype --verbose
|
||||
python test/run_test.py -i distributed/_composable/fsdp/test_fully_shard_mixed_precision.py -k test_reduce_dtype --verbose
|
||||
python test/run_test.py -i distributed/_composable/fsdp/test_fully_shard_clip_grad_norm_.py -k test_clip_grad_norm_2d --verbose
|
||||
python test/run_test.py -i distributed/fsdp/test_fsdp_tp_integration.py -k test_fsdp_tp_integration --verbose
|
||||
pytest test/inductor/test_torchinductor.py -k test_multi_gpu
|
||||
|
||||
# this runs on both single-gpu and multi-gpu instance. It should be smart about skipping tests that aren't supported
|
||||
# with if required # gpus aren't available
|
||||
@ -338,66 +288,29 @@ test_inductor_distributed() {
|
||||
assert_git_not_dirty
|
||||
}
|
||||
|
||||
test_inductor_shard() {
|
||||
if [[ -z "$NUM_TEST_SHARDS" ]]; then
|
||||
echo "NUM_TEST_SHARDS must be defined to run a Python test shard"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
test_inductor() {
|
||||
python tools/dynamo/verify_dynamo.py
|
||||
python test/run_test.py --inductor \
|
||||
--include test_modules test_ops test_ops_gradients test_torch \
|
||||
--shard "$1" "$NUM_TEST_SHARDS" \
|
||||
--verbose
|
||||
|
||||
python test/run_test.py --inductor --include test_modules test_ops test_ops_gradients test_torch --verbose
|
||||
# Do not add --inductor for the following inductor unit tests, otherwise we will fail because of nested dynamo state
|
||||
python test/run_test.py \
|
||||
--include inductor/test_torchinductor inductor/test_torchinductor_opinfo inductor/test_aot_inductor \
|
||||
--shard "$1" "$NUM_TEST_SHARDS" \
|
||||
--verbose
|
||||
}
|
||||
python test/run_test.py --include inductor/test_torchinductor inductor/test_torchinductor_opinfo --verbose
|
||||
|
||||
test_inductor_aoti() {
|
||||
# docker build uses bdist_wheel which does not work with test_aot_inductor
|
||||
# TODO: need a faster way to build
|
||||
if [[ "$BUILD_ENVIRONMENT" != *rocm* ]]; then
|
||||
BUILD_AOT_INDUCTOR_TEST=1 python setup.py develop
|
||||
CPP_TESTS_DIR="${BUILD_BIN_DIR}" LD_LIBRARY_PATH="${TORCH_LIB_DIR}" python test/run_test.py --cpp --verbose -i cpp/test_aoti_abi_check cpp/test_aoti_inference
|
||||
fi
|
||||
}
|
||||
|
||||
test_inductor_cpp_wrapper_abi_compatible() {
|
||||
export TORCHINDUCTOR_ABI_COMPATIBLE=1
|
||||
TEST_REPORTS_DIR=$(pwd)/test/test-reports
|
||||
mkdir -p "$TEST_REPORTS_DIR"
|
||||
|
||||
echo "Testing Inductor cpp wrapper mode with TORCHINDUCTOR_ABI_COMPATIBLE=1"
|
||||
# cpu stack allocation causes segfault and needs more investigation
|
||||
PYTORCH_TESTING_DEVICE_ONLY_FOR="" python test/run_test.py --include inductor/test_cpu_cpp_wrapper
|
||||
python test/run_test.py --include inductor/test_cuda_cpp_wrapper
|
||||
|
||||
TORCHINDUCTOR_CPP_WRAPPER=1 python benchmarks/dynamo/timm_models.py --device cuda --accuracy --amp \
|
||||
--training --inductor --disable-cudagraphs --only vit_base_patch16_224 \
|
||||
--output "$TEST_REPORTS_DIR/inductor_cpp_wrapper_training.csv"
|
||||
python benchmarks/dynamo/check_accuracy.py \
|
||||
--actual "$TEST_REPORTS_DIR/inductor_cpp_wrapper_training.csv" \
|
||||
--expected "benchmarks/dynamo/ci_expected_accuracy/inductor_timm_training.csv"
|
||||
BUILD_AOT_INDUCTOR_TEST=1 python setup.py develop
|
||||
CPP_TESTS_DIR="${BUILD_BIN_DIR}" LD_LIBRARY_PATH="${TORCH_LIB_DIR}" python test/run_test.py --cpp --verbose -i cpp/test_aot_inductor
|
||||
}
|
||||
|
||||
# "Global" flags for inductor benchmarking controlled by TEST_CONFIG
|
||||
# For example 'dynamic_aot_eager_torchbench' TEST_CONFIG means we run
|
||||
# the benchmark script with '--dynamic-shapes --backend aot_eager --device cuda'
|
||||
# The matrix of test options is specified in .github/workflows/inductor.yml,
|
||||
# .github/workflows/inductor-periodic.yml, and
|
||||
# .github/workflows/inductor-perf-test-nightly.yml
|
||||
# The matrix of test options is specified in .github/workflows/periodic.yml
|
||||
# and .github/workflows/inductor.yml
|
||||
DYNAMO_BENCHMARK_FLAGS=()
|
||||
|
||||
if [[ "${TEST_CONFIG}" == *dynamo_eager* ]]; then
|
||||
DYNAMO_BENCHMARK_FLAGS+=(--backend eager)
|
||||
elif [[ "${TEST_CONFIG}" == *aot_eager* ]]; then
|
||||
DYNAMO_BENCHMARK_FLAGS+=(--backend aot_eager)
|
||||
elif [[ "${TEST_CONFIG}" == *aot_inductor* ]]; then
|
||||
DYNAMO_BENCHMARK_FLAGS+=(--export-aot-inductor)
|
||||
elif [[ "${TEST_CONFIG}" == *inductor* && "${TEST_CONFIG}" != *perf* ]]; then
|
||||
DYNAMO_BENCHMARK_FLAGS+=(--inductor)
|
||||
fi
|
||||
@ -406,7 +319,7 @@ if [[ "${TEST_CONFIG}" == *dynamic* ]]; then
|
||||
DYNAMO_BENCHMARK_FLAGS+=(--dynamic-shapes --dynamic-batch-only)
|
||||
fi
|
||||
|
||||
if [[ "${TEST_CONFIG}" == *cpu_inductor* || "${TEST_CONFIG}" == *cpu_aot_inductor* ]]; then
|
||||
if [[ "${TEST_CONFIG}" == *cpu_accuracy* ]]; then
|
||||
DYNAMO_BENCHMARK_FLAGS+=(--device cpu)
|
||||
else
|
||||
DYNAMO_BENCHMARK_FLAGS+=(--device cuda)
|
||||
@ -461,8 +374,8 @@ test_perf_for_dashboard() {
|
||||
--output "$TEST_REPORTS_DIR/${backend}_dynamic_${suite}_${dtype}_${mode}_cuda_${target}.csv"
|
||||
fi
|
||||
if [[ "$DASHBOARD_TAG" == *cppwrapper-true* ]] && [[ "$mode" == "inference" ]]; then
|
||||
TORCHINDUCTOR_CPP_WRAPPER=1 python "benchmarks/dynamo/$suite.py" \
|
||||
"${target_flag[@]}" --"$mode" --"$dtype" --backend "$backend" --disable-cudagraphs "$@" \
|
||||
python "benchmarks/dynamo/$suite.py" \
|
||||
"${target_flag[@]}" --"$mode" --"$dtype" --backend "$backend" --disable-cudagraphs --cpp-wrapper "$@" \
|
||||
--output "$TEST_REPORTS_DIR/${backend}_cpp_wrapper_${suite}_${dtype}_${mode}_cuda_${target}.csv"
|
||||
fi
|
||||
if [[ "$DASHBOARD_TAG" == *freezing_cudagraphs-true* ]] && [[ "$mode" == "inference" ]]; then
|
||||
@ -470,13 +383,8 @@ test_perf_for_dashboard() {
|
||||
"${target_flag[@]}" --"$mode" --"$dtype" --backend "$backend" "$@" --freezing \
|
||||
--output "$TEST_REPORTS_DIR/${backend}_with_cudagraphs_freezing_${suite}_${dtype}_${mode}_cuda_${target}.csv"
|
||||
fi
|
||||
if [[ "$DASHBOARD_TAG" == *freeze_autotune_cudagraphs-true* ]] && [[ "$mode" == "inference" ]]; then
|
||||
TORCHINDUCTOR_MAX_AUTOTUNE=1 python "benchmarks/dynamo/$suite.py" \
|
||||
"${target_flag[@]}" --"$mode" --"$dtype" --backend "$backend" "$@" --freezing \
|
||||
--output "$TEST_REPORTS_DIR/${backend}_with_cudagraphs_freezing_autotune_${suite}_${dtype}_${mode}_cuda_${target}.csv"
|
||||
fi
|
||||
if [[ "$DASHBOARD_TAG" == *aotinductor-true* ]] && [[ "$mode" == "inference" ]]; then
|
||||
TORCHINDUCTOR_ABI_COMPATIBLE=1 python "benchmarks/dynamo/$suite.py" \
|
||||
python "benchmarks/dynamo/$suite.py" \
|
||||
"${target_flag[@]}" --"$mode" --"$dtype" --export-aot-inductor --disable-cudagraphs "$@" \
|
||||
--output "$TEST_REPORTS_DIR/${backend}_aot_inductor_${suite}_${dtype}_${mode}_cuda_${target}.csv"
|
||||
fi
|
||||
@ -485,17 +393,6 @@ test_perf_for_dashboard() {
|
||||
"${target_flag[@]}" --"$mode" --"$dtype" --backend "$backend" "$@" \
|
||||
--output "$TEST_REPORTS_DIR/${backend}_max_autotune_${suite}_${dtype}_${mode}_cuda_${target}.csv"
|
||||
fi
|
||||
if [[ "$DASHBOARD_TAG" == *cudagraphs_low_precision-true* ]] && [[ "$mode" == "inference" ]]; then
|
||||
# TODO: This has a new dtype called quant and the benchmarks script needs to be updated to support this.
|
||||
# The tentative command is as follows. It doesn't work now, but it's ok because we only need mock data
|
||||
# to fill the dashboard.
|
||||
python "benchmarks/dynamo/$suite.py" \
|
||||
"${target_flag[@]}" --"$mode" --quant --backend "$backend" "$@" \
|
||||
--output "$TEST_REPORTS_DIR/${backend}_cudagraphs_low_precision_${suite}_quant_${mode}_cuda_${target}.csv" || true
|
||||
# Copy cudagraph results as mock data, easiest choice?
|
||||
cp "$TEST_REPORTS_DIR/${backend}_with_cudagraphs_${suite}_${dtype}_${mode}_cuda_${target}.csv" \
|
||||
"$TEST_REPORTS_DIR/${backend}_cudagraphs_low_precision_${suite}_quant_${mode}_cuda_${target}.csv"
|
||||
fi
|
||||
done
|
||||
done
|
||||
}
|
||||
@ -531,36 +428,27 @@ test_single_dynamo_benchmark() {
|
||||
test_perf_for_dashboard "$suite" \
|
||||
"${DYNAMO_BENCHMARK_FLAGS[@]}" "$@" "${partition_flags[@]}"
|
||||
else
|
||||
if [[ "${TEST_CONFIG}" == *aot_inductor* && "${TEST_CONFIG}" != *cpu_aot_inductor* ]]; then
|
||||
# Test AOTInductor with the ABI-compatible mode on CI
|
||||
# This can be removed once the ABI-compatible mode becomes default.
|
||||
# For CPU device, we perfer non ABI-compatible mode on CI when testing AOTInductor.
|
||||
export TORCHINDUCTOR_ABI_COMPATIBLE=1
|
||||
fi
|
||||
python "benchmarks/dynamo/$suite.py" \
|
||||
--ci --accuracy --timing --explain \
|
||||
"${DYNAMO_BENCHMARK_FLAGS[@]}" \
|
||||
"$@" "${partition_flags[@]}" \
|
||||
--output "$TEST_REPORTS_DIR/${name}_${suite}.csv"
|
||||
python benchmarks/dynamo/check_accuracy.py \
|
||||
--actual "$TEST_REPORTS_DIR/${name}_$suite.csv" \
|
||||
--expected "benchmarks/dynamo/ci_expected_accuracy/${TEST_CONFIG}_${name}.csv"
|
||||
python benchmarks/dynamo/check_graph_breaks.py \
|
||||
--actual "$TEST_REPORTS_DIR/${name}_$suite.csv" \
|
||||
--expected "benchmarks/dynamo/ci_expected_accuracy/${TEST_CONFIG}_${name}.csv"
|
||||
|
||||
if [[ "${TEST_CONFIG}" == *inductor* ]] && [[ "${TEST_CONFIG}" != *cpu_accuracy* ]]; then
|
||||
# other jobs (e.g. periodic, cpu-accuracy) may have different set of expected models.
|
||||
python benchmarks/dynamo/check_accuracy.py \
|
||||
--actual "$TEST_REPORTS_DIR/${name}_$suite.csv" \
|
||||
--expected "benchmarks/dynamo/ci_expected_accuracy/${TEST_CONFIG}_${name}.csv"
|
||||
python benchmarks/dynamo/check_graph_breaks.py \
|
||||
--actual "$TEST_REPORTS_DIR/${name}_$suite.csv" \
|
||||
--expected "benchmarks/dynamo/ci_expected_accuracy/${TEST_CONFIG}_${name}.csv"
|
||||
else
|
||||
python benchmarks/dynamo/check_csv.py \
|
||||
-f "$TEST_REPORTS_DIR/${name}_${suite}.csv"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
test_inductor_micro_benchmark() {
|
||||
TEST_REPORTS_DIR=$(pwd)/test/test-reports
|
||||
python benchmarks/gpt_fast/benchmark.py --output "${TEST_REPORTS_DIR}/gpt_fast_benchmark.csv"
|
||||
}
|
||||
|
||||
test_inductor_halide() {
|
||||
python test/run_test.py --include inductor/test_halide.py --verbose
|
||||
assert_git_not_dirty
|
||||
}
|
||||
|
||||
test_dynamo_benchmark() {
|
||||
# Usage: test_dynamo_benchmark huggingface 0
|
||||
TEST_REPORTS_DIR=$(pwd)/test/test-reports
|
||||
@ -575,18 +463,8 @@ test_dynamo_benchmark() {
|
||||
elif [[ "${TEST_CONFIG}" == *perf* ]]; then
|
||||
test_single_dynamo_benchmark "dashboard" "$suite" "$shard_id" "$@"
|
||||
else
|
||||
if [[ "${TEST_CONFIG}" == *cpu_inductor* || "${TEST_CONFIG}" == *cpu_aot_inductor* ]]; then
|
||||
local dt="float32"
|
||||
if [[ "${TEST_CONFIG}" == *amp* ]]; then
|
||||
dt="amp"
|
||||
fi
|
||||
if [[ "${TEST_CONFIG}" == *freezing* ]]; then
|
||||
test_single_dynamo_benchmark "inference" "$suite" "$shard_id" --inference --"$dt" --freezing "$@"
|
||||
else
|
||||
test_single_dynamo_benchmark "inference" "$suite" "$shard_id" --inference --"$dt" "$@"
|
||||
fi
|
||||
elif [[ "${TEST_CONFIG}" == *aot_inductor* ]]; then
|
||||
test_single_dynamo_benchmark "inference" "$suite" "$shard_id" --inference --bfloat16 "$@"
|
||||
if [[ "${TEST_CONFIG}" == *cpu_accuracy* ]]; then
|
||||
test_single_dynamo_benchmark "inference" "$suite" "$shard_id" --inference --float32 "$@"
|
||||
else
|
||||
test_single_dynamo_benchmark "inference" "$suite" "$shard_id" --inference --bfloat16 "$@"
|
||||
test_single_dynamo_benchmark "training" "$suite" "$shard_id" --training --amp "$@"
|
||||
@ -598,32 +476,12 @@ test_inductor_torchbench_smoketest_perf() {
|
||||
TEST_REPORTS_DIR=$(pwd)/test/test-reports
|
||||
mkdir -p "$TEST_REPORTS_DIR"
|
||||
|
||||
# Test some models in the cpp wrapper mode
|
||||
TORCHINDUCTOR_ABI_COMPATIBLE=1 TORCHINDUCTOR_CPP_WRAPPER=1 python benchmarks/dynamo/torchbench.py --device cuda --accuracy \
|
||||
--bfloat16 --inference --inductor --only hf_T5 --output "$TEST_REPORTS_DIR/inductor_cpp_wrapper_inference.csv"
|
||||
TORCHINDUCTOR_ABI_COMPATIBLE=1 TORCHINDUCTOR_CPP_WRAPPER=1 python benchmarks/dynamo/torchbench.py --device cuda --accuracy \
|
||||
--bfloat16 --inference --inductor --only llama --output "$TEST_REPORTS_DIR/inductor_cpp_wrapper_inference.csv"
|
||||
TORCHINDUCTOR_ABI_COMPATIBLE=1 TORCHINDUCTOR_CPP_WRAPPER=1 python benchmarks/dynamo/torchbench.py --device cuda --accuracy \
|
||||
--bfloat16 --inference --inductor --only moco --output "$TEST_REPORTS_DIR/inductor_cpp_wrapper_inference.csv"
|
||||
python benchmarks/dynamo/check_accuracy.py \
|
||||
--actual "$TEST_REPORTS_DIR/inductor_cpp_wrapper_inference.csv" \
|
||||
--expected "benchmarks/dynamo/ci_expected_accuracy/inductor_torchbench_inference.csv"
|
||||
|
||||
python benchmarks/dynamo/torchbench.py --device cuda --performance --backend inductor --float16 --training \
|
||||
--batch-size-file "$(realpath benchmarks/dynamo/torchbench_models_list.txt)" --only hf_Bert \
|
||||
--output "$TEST_REPORTS_DIR/inductor_training_smoketest.csv"
|
||||
# The threshold value needs to be actively maintained to make this check useful
|
||||
python benchmarks/dynamo/check_perf_csv.py -f "$TEST_REPORTS_DIR/inductor_training_smoketest.csv" -t 1.4
|
||||
|
||||
TORCHINDUCTOR_ABI_COMPATIBLE=1 python benchmarks/dynamo/torchbench.py --device cuda --performance --bfloat16 --inference \
|
||||
--export-aot-inductor --only nanogpt --output "$TEST_REPORTS_DIR/inductor_inference_smoketest.csv"
|
||||
# The threshold value needs to be actively maintained to make this check useful
|
||||
# The perf number of nanogpt seems not very stable, e.g.
|
||||
# https://github.com/pytorch/pytorch/actions/runs/7158691360/job/19491437314,
|
||||
# and thus we lower its threshold to reduce flakiness. If this continues to be a problem,
|
||||
# we switch to use some other model.
|
||||
# lowering threshold from 4.9 to 4.7 for cu124. Will bump it up after cuda 12.4.0->12.4.1 update
|
||||
python benchmarks/dynamo/check_perf_csv.py -f "$TEST_REPORTS_DIR/inductor_inference_smoketest.csv" -t 4.7
|
||||
# the reference speedup value is hardcoded in check_hf_bert_perf_csv.py
|
||||
# this value needs to be actively maintained to make this check useful
|
||||
python benchmarks/dynamo/check_hf_bert_perf_csv.py -f "$TEST_REPORTS_DIR/inductor_training_smoketest.csv"
|
||||
|
||||
# Check memory compression ratio for a few models
|
||||
for test in hf_Albert timm_vision_transformer; do
|
||||
@ -635,65 +493,6 @@ test_inductor_torchbench_smoketest_perf() {
|
||||
"$TEST_REPORTS_DIR/inductor_training_smoketest_$test.csv" \
|
||||
--expected benchmarks/dynamo/expected_ci_perf_inductor_torchbench.csv
|
||||
done
|
||||
|
||||
# Perform some "warm-start" runs for a few huggingface models.
|
||||
for test in AlbertForQuestionAnswering AllenaiLongformerBase DistilBertForMaskedLM DistillGPT2 GoogleFnet YituTechConvBert; do
|
||||
python benchmarks/dynamo/huggingface.py --accuracy --training --amp --inductor --device cuda --warm-start-latency \
|
||||
--only $test --output "$TEST_REPORTS_DIR/inductor_warm_start_smoketest_$test.csv"
|
||||
python benchmarks/dynamo/check_accuracy.py \
|
||||
--actual "$TEST_REPORTS_DIR/inductor_warm_start_smoketest_$test.csv" \
|
||||
--expected "benchmarks/dynamo/ci_expected_accuracy/inductor_huggingface_training.csv"
|
||||
done
|
||||
}
|
||||
|
||||
test_inductor_torchbench_cpu_smoketest_perf(){
|
||||
TEST_REPORTS_DIR=$(pwd)/test/test-reports
|
||||
mkdir -p "$TEST_REPORTS_DIR"
|
||||
|
||||
#set jemalloc
|
||||
JEMALLOC_LIB="/usr/lib/x86_64-linux-gnu/libjemalloc.so.2"
|
||||
IOMP_LIB="$(dirname "$(which python)")/../lib/libiomp5.so"
|
||||
export LD_PRELOAD="$JEMALLOC_LIB":"$IOMP_LIB":"$LD_PRELOAD"
|
||||
export MALLOC_CONF="oversize_threshold:1,background_thread:true,metadata_thp:auto,dirty_decay_ms:-1,muzzy_decay_ms:-1"
|
||||
export KMP_AFFINITY=granularity=fine,compact,1,0
|
||||
export KMP_BLOCKTIME=1
|
||||
CORES=$(lscpu | grep Core | awk '{print $4}')
|
||||
export OMP_NUM_THREADS=$CORES
|
||||
end_core=$(( CORES-1 ))
|
||||
|
||||
MODELS_SPEEDUP_TARGET=benchmarks/dynamo/expected_ci_speedup_inductor_torchbench_cpu.csv
|
||||
|
||||
grep -v '^ *#' < "$MODELS_SPEEDUP_TARGET" | while IFS=',' read -r -a model_cfg
|
||||
do
|
||||
local model_name=${model_cfg[0]}
|
||||
local data_type=${model_cfg[1]}
|
||||
local speedup_target=${model_cfg[4]}
|
||||
if [[ ${model_cfg[3]} == "cpp" ]]; then
|
||||
export TORCHINDUCTOR_CPP_WRAPPER=1
|
||||
else
|
||||
unset TORCHINDUCTOR_CPP_WRAPPER
|
||||
fi
|
||||
local output_name="$TEST_REPORTS_DIR/inductor_inference_${model_cfg[0]}_${model_cfg[1]}_${model_cfg[2]}_${model_cfg[3]}_cpu_smoketest.csv"
|
||||
|
||||
if [[ ${model_cfg[2]} == "dynamic" ]]; then
|
||||
taskset -c 0-"$end_core" python benchmarks/dynamo/torchbench.py \
|
||||
--inference --performance --"$data_type" -dcpu -n50 --only "$model_name" --dynamic-shapes \
|
||||
--dynamic-batch-only --freezing --timeout 9000 --backend=inductor --output "$output_name"
|
||||
else
|
||||
taskset -c 0-"$end_core" python benchmarks/dynamo/torchbench.py \
|
||||
--inference --performance --"$data_type" -dcpu -n50 --only "$model_name" \
|
||||
--freezing --timeout 9000 --backend=inductor --output "$output_name"
|
||||
fi
|
||||
cat "$output_name"
|
||||
# The threshold value needs to be actively maintained to make this check useful.
|
||||
python benchmarks/dynamo/check_perf_csv.py -f "$output_name" -t "$speedup_target"
|
||||
done
|
||||
}
|
||||
|
||||
test_torchbench_gcp_smoketest(){
|
||||
pushd "${TORCHBENCHPATH}"
|
||||
python test.py -v
|
||||
popd
|
||||
}
|
||||
|
||||
test_python_gloo_with_tls() {
|
||||
@ -727,6 +526,7 @@ test_aten() {
|
||||
${SUDO} ln -sf "$TORCH_LIB_DIR"/libmkldnn* "$TEST_BASE_DIR"
|
||||
${SUDO} ln -sf "$TORCH_LIB_DIR"/libnccl* "$TEST_BASE_DIR"
|
||||
${SUDO} ln -sf "$TORCH_LIB_DIR"/libtorch* "$TEST_BASE_DIR"
|
||||
${SUDO} ln -sf "$TORCH_LIB_DIR"/libtbb* "$TEST_BASE_DIR"
|
||||
|
||||
ls "$TEST_BASE_DIR"
|
||||
aten/tools/run_tests.sh "$TEST_BASE_DIR"
|
||||
@ -751,6 +551,21 @@ test_without_numpy() {
|
||||
popd
|
||||
}
|
||||
|
||||
# pytorch extensions require including torch/extension.h which includes all.h
|
||||
# which includes utils.h which includes Parallel.h.
|
||||
# So you can call for instance parallel_for() from your extension,
|
||||
# but the compilation will fail because of Parallel.h has only declarations
|
||||
# and definitions are conditionally included Parallel.h(see last lines of Parallel.h).
|
||||
# I tried to solve it #39612 and #39881 by including Config.h into Parallel.h
|
||||
# But if Pytorch is built with TBB it provides Config.h
|
||||
# that has AT_PARALLEL_NATIVE_TBB=1(see #3961 or #39881) and it means that if you include
|
||||
# torch/extension.h which transitively includes Parallel.h
|
||||
# which transitively includes tbb.h which is not available!
|
||||
if [[ "${BUILD_ENVIRONMENT}" == *tbb* ]]; then
|
||||
sudo mkdir -p /usr/include/tbb
|
||||
sudo cp -r "$PWD"/third_party/tbb/include/tbb/* /usr/include/tbb
|
||||
fi
|
||||
|
||||
test_libtorch() {
|
||||
local SHARD="$1"
|
||||
|
||||
@ -764,6 +579,7 @@ test_libtorch() {
|
||||
ln -sf "$TORCH_LIB_DIR"/libc10* "$TORCH_BIN_DIR"
|
||||
ln -sf "$TORCH_LIB_DIR"/libshm* "$TORCH_BIN_DIR"
|
||||
ln -sf "$TORCH_LIB_DIR"/libtorch* "$TORCH_BIN_DIR"
|
||||
ln -sf "$TORCH_LIB_DIR"/libtbb* "$TORCH_BIN_DIR"
|
||||
ln -sf "$TORCH_LIB_DIR"/libnvfuser* "$TORCH_BIN_DIR"
|
||||
|
||||
export CPP_TESTS_DIR="${TORCH_BIN_DIR}"
|
||||
@ -789,7 +605,7 @@ test_libtorch_jit() {
|
||||
|
||||
# Run jit and lazy tensor cpp tests together to finish them faster
|
||||
if [[ "$BUILD_ENVIRONMENT" == *cuda* && "$TEST_CONFIG" != *nogpu* ]]; then
|
||||
LTC_TS_CUDA=1 python test/run_test.py --cpp --verbose -i cpp/test_jit cpp/test_lazy
|
||||
LTC_TS_CUDA=1 python test/run_test.py --cpp --verbose -i cpp/test_jit cpp/nvfuser_tests cpp/test_lazy
|
||||
else
|
||||
# CUDA tests have already been skipped when CUDA is not available
|
||||
python test/run_test.py --cpp --verbose -i cpp/test_jit cpp/test_lazy -k "not CUDA"
|
||||
@ -825,19 +641,6 @@ test_libtorch_api() {
|
||||
fi
|
||||
}
|
||||
|
||||
test_xpu_bin(){
|
||||
TEST_REPORTS_DIR=$(pwd)/test/test-reports
|
||||
mkdir -p "$TEST_REPORTS_DIR"
|
||||
|
||||
for xpu_case in "${BUILD_BIN_DIR}"/*{xpu,sycl}*; do
|
||||
if [[ "$xpu_case" != *"*"* && "$xpu_case" != *.so && "$xpu_case" != *.a ]]; then
|
||||
case_name=$(basename "$xpu_case")
|
||||
echo "Testing ${case_name} ..."
|
||||
"$xpu_case" --gtest_output=xml:"$TEST_REPORTS_DIR"/"$case_name".xml
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
test_aot_compilation() {
|
||||
echo "Testing Ahead of Time compilation"
|
||||
ln -sf "$TORCH_LIB_DIR"/libc10* "$TORCH_BIN_DIR"
|
||||
@ -863,8 +666,7 @@ test_vulkan() {
|
||||
|
||||
test_distributed() {
|
||||
echo "Testing distributed python tests"
|
||||
# shellcheck disable=SC2086
|
||||
time python test/run_test.py --distributed-tests --shard "$SHARD_NUMBER" "$NUM_TEST_SHARDS" $INCLUDE_CLAUSE --verbose
|
||||
time python test/run_test.py --distributed-tests --shard "$SHARD_NUMBER" "$NUM_TEST_SHARDS" --verbose
|
||||
assert_git_not_dirty
|
||||
|
||||
if [[ ("$BUILD_ENVIRONMENT" == *cuda* || "$BUILD_ENVIRONMENT" == *rocm*) && "$SHARD_NUMBER" == 1 ]]; then
|
||||
@ -900,6 +702,7 @@ test_rpc() {
|
||||
# test reporting process to function as expected.
|
||||
ln -sf "$TORCH_LIB_DIR"/libtorch* "$TORCH_BIN_DIR"
|
||||
ln -sf "$TORCH_LIB_DIR"/libc10* "$TORCH_BIN_DIR"
|
||||
ln -sf "$TORCH_LIB_DIR"/libtbb* "$TORCH_BIN_DIR"
|
||||
|
||||
CPP_TESTS_DIR="${TORCH_BIN_DIR}" python test/run_test.py --cpp --verbose -i cpp/test_cpp_rpc
|
||||
}
|
||||
@ -1077,8 +880,7 @@ test_bazel() {
|
||||
|
||||
tools/bazel test --config=cpu-only --test_timeout=480 --test_output=all --test_tag_filters=-gpu-required --test_filter=-*CUDA :all_tests
|
||||
else
|
||||
# Increase the test timeout to 480 like CPU tests because modules_test frequently timeout
|
||||
tools/bazel test --test_timeout=480 --test_output=errors \
|
||||
tools/bazel test --test_output=errors \
|
||||
//:any_test \
|
||||
//:autograd_test \
|
||||
//:dataloader_test \
|
||||
@ -1173,75 +975,23 @@ test_docs_test() {
|
||||
}
|
||||
|
||||
test_executorch() {
|
||||
echo "Install torchvision and torchaudio"
|
||||
install_torchvision
|
||||
install_torchaudio
|
||||
|
||||
pushd /executorch
|
||||
|
||||
export PYTHON_EXECUTABLE=python
|
||||
export EXECUTORCH_BUILD_PYBIND=ON
|
||||
export CMAKE_ARGS="-DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON"
|
||||
|
||||
# NB: We need to rebuild ExecuTorch runner here because it depends on PyTorch
|
||||
# from the PR
|
||||
# shellcheck disable=SC1091
|
||||
source .ci/scripts/setup-linux.sh cmake
|
||||
|
||||
echo "Run ExecuTorch unit tests"
|
||||
pytest -v -n auto
|
||||
# shellcheck disable=SC1091
|
||||
LLVM_PROFDATA=llvm-profdata-12 LLVM_COV=llvm-cov-12 bash test/run_oss_cpp_tests.sh
|
||||
|
||||
echo "Run ExecuTorch regression tests for some models"
|
||||
# TODO(huydhn): Add more coverage here using ExecuTorch's gather models script
|
||||
# shellcheck disable=SC1091
|
||||
source .ci/scripts/test.sh mv3 cmake xnnpack-quantization-delegation ''
|
||||
|
||||
popd
|
||||
|
||||
# Test torchgen generated code for Executorch.
|
||||
echo "Testing ExecuTorch op registration"
|
||||
echo "Testing Executorch op registration"
|
||||
"$BUILD_BIN_DIR"/test_edge_op_registration
|
||||
|
||||
assert_git_not_dirty
|
||||
}
|
||||
|
||||
test_linux_aarch64(){
|
||||
python test/run_test.py --include test_modules test_mkldnn test_mkldnn_fusion test_openmp test_torch test_dynamic_shapes \
|
||||
test_transformers test_multiprocessing test_numpy_interop --verbose
|
||||
|
||||
# Dynamo tests
|
||||
python test/run_test.py --include dynamo/test_compile dynamo/test_backends dynamo/test_comptime dynamo/test_config \
|
||||
dynamo/test_functions dynamo/test_fx_passes_pre_grad dynamo/test_interop dynamo/test_model_output dynamo/test_modules \
|
||||
dynamo/test_optimizers dynamo/test_recompile_ux dynamo/test_recompiles --verbose
|
||||
|
||||
# Inductor tests
|
||||
python test/run_test.py --include inductor/test_torchinductor inductor/test_benchmark_fusion inductor/test_codecache \
|
||||
inductor/test_config inductor/test_control_flow inductor/test_coordinate_descent_tuner inductor/test_fx_fusion \
|
||||
inductor/test_group_batch_fusion inductor/test_inductor_freezing inductor/test_inductor_utils \
|
||||
inductor/test_inplacing_pass inductor/test_kernel_benchmark inductor/test_layout_optim \
|
||||
inductor/test_max_autotune inductor/test_memory_planning inductor/test_metrics inductor/test_multi_kernel inductor/test_pad_mm \
|
||||
inductor/test_pattern_matcher inductor/test_perf inductor/test_profiler inductor/test_select_algorithm inductor/test_smoke \
|
||||
inductor/test_split_cat_fx_passes inductor/test_standalone_compile inductor/test_torchinductor \
|
||||
inductor/test_torchinductor_codegen_dynamic_shapes inductor/test_torchinductor_dynamic_shapes --verbose
|
||||
}
|
||||
|
||||
if ! [[ "${BUILD_ENVIRONMENT}" == *libtorch* || "${BUILD_ENVIRONMENT}" == *-bazel-* ]]; then
|
||||
(cd test && python -c "import torch; print(torch.__config__.show())")
|
||||
(cd test && python -c "import torch; print(torch.__config__.parallel_info())")
|
||||
fi
|
||||
if [[ "$BUILD_ENVIRONMENT" == *aarch64* ]]; then
|
||||
test_linux_aarch64
|
||||
elif [[ "${TEST_CONFIG}" == *backward* ]]; then
|
||||
if [[ "${TEST_CONFIG}" == *backward* ]]; then
|
||||
test_forward_backward_compatibility
|
||||
# Do NOT add tests after bc check tests, see its comment.
|
||||
elif [[ "${TEST_CONFIG}" == *xla* ]]; then
|
||||
install_torchvision
|
||||
build_xla
|
||||
test_xla
|
||||
elif [[ "${TEST_CONFIG}" == *executorch* ]]; then
|
||||
test_executorch
|
||||
elif [[ "$TEST_CONFIG" == 'jit_legacy' ]]; then
|
||||
test_python_legacy_jit
|
||||
elif [[ "${BUILD_ENVIRONMENT}" == *libtorch* ]]; then
|
||||
@ -1253,81 +1003,63 @@ elif [[ "$TEST_CONFIG" == distributed ]]; then
|
||||
if [[ "${SHARD_NUMBER}" == 1 ]]; then
|
||||
test_rpc
|
||||
fi
|
||||
elif [[ "$TEST_CONFIG" == deploy ]]; then
|
||||
checkout_install_torchdeploy
|
||||
test_torch_deploy
|
||||
elif [[ "${TEST_CONFIG}" == *inductor_distributed* ]]; then
|
||||
test_inductor_distributed
|
||||
elif [[ "${TEST_CONFIG}" == *inductor-halide* ]]; then
|
||||
test_inductor_halide
|
||||
elif [[ "${TEST_CONFIG}" == *inductor-micro-benchmark* ]]; then
|
||||
test_inductor_micro_benchmark
|
||||
elif [[ "${TEST_CONFIG}" == *huggingface* ]]; then
|
||||
install_torchvision
|
||||
id=$((SHARD_NUMBER-1))
|
||||
test_dynamo_benchmark huggingface "$id"
|
||||
elif [[ "${TEST_CONFIG}" == *timm* ]]; then
|
||||
install_torchvision
|
||||
install_timm
|
||||
id=$((SHARD_NUMBER-1))
|
||||
test_dynamo_benchmark timm_models "$id"
|
||||
elif [[ "${TEST_CONFIG}" == *torchbench* ]]; then
|
||||
if [[ "${TEST_CONFIG}" == *cpu_inductor* || "${TEST_CONFIG}" == *cpu_aot_inductor* ]]; then
|
||||
if [[ "${TEST_CONFIG}" == *cpu_accuracy* ]]; then
|
||||
install_torchaudio cpu
|
||||
else
|
||||
install_torchaudio cuda
|
||||
fi
|
||||
install_torchtext
|
||||
install_torchvision
|
||||
TORCH_CUDA_ARCH_LIST="8.0;8.6" pip_install git+https://github.com/pytorch/ao.git
|
||||
id=$((SHARD_NUMBER-1))
|
||||
# https://github.com/opencv/opencv-python/issues/885
|
||||
pip_install opencv-python==4.8.0.74
|
||||
if [[ "${TEST_CONFIG}" == *inductor_torchbench_smoketest_perf* ]]; then
|
||||
checkout_install_torchbench hf_Bert hf_Albert nanogpt timm_vision_transformer
|
||||
checkout_install_torchbench hf_Bert hf_Albert timm_vision_transformer
|
||||
PYTHONPATH=$(pwd)/torchbench test_inductor_torchbench_smoketest_perf
|
||||
elif [[ "${TEST_CONFIG}" == *inductor_torchbench_cpu_smoketest_perf* ]]; then
|
||||
checkout_install_torchbench timm_vision_transformer phlippe_densenet basic_gnn_gcn \
|
||||
llama_v2_7b_16h resnet50 timm_efficientnet mobilenet_v3_large timm_resnest \
|
||||
shufflenet_v2_x1_0 hf_GPT2
|
||||
PYTHONPATH=$(pwd)/torchbench test_inductor_torchbench_cpu_smoketest_perf
|
||||
elif [[ "${TEST_CONFIG}" == *torchbench_gcp_smoketest* ]]; then
|
||||
checkout_install_torchbench
|
||||
TORCHBENCHPATH=$(pwd)/torchbench test_torchbench_gcp_smoketest
|
||||
else
|
||||
checkout_install_torchbench
|
||||
# Do this after checkout_install_torchbench to ensure we clobber any
|
||||
# nightlies that torchbench may pull in
|
||||
if [[ "${TEST_CONFIG}" != *cpu_inductor* && "${TEST_CONFIG}" != *cpu_aot_inductor* ]]; then
|
||||
if [[ "${TEST_CONFIG}" != *cpu_accuracy* ]]; then
|
||||
install_torchrec_and_fbgemm
|
||||
fi
|
||||
PYTHONPATH=$(pwd)/torchbench test_dynamo_benchmark torchbench "$id"
|
||||
fi
|
||||
elif [[ "${TEST_CONFIG}" == *inductor_cpp_wrapper_abi_compatible* ]]; then
|
||||
elif [[ "${TEST_CONFIG}" == *inductor* && "${SHARD_NUMBER}" == 1 ]]; then
|
||||
install_torchvision
|
||||
test_inductor_cpp_wrapper_abi_compatible
|
||||
elif [[ "${TEST_CONFIG}" == *inductor* ]]; then
|
||||
test_inductor
|
||||
test_inductor_distributed
|
||||
elif [[ "${TEST_CONFIG}" == *dynamo* && "${SHARD_NUMBER}" == 1 && $NUM_TEST_SHARDS -gt 1 ]]; then
|
||||
test_without_numpy
|
||||
install_torchvision
|
||||
test_inductor_shard "${SHARD_NUMBER}"
|
||||
if [[ "${SHARD_NUMBER}" == 1 ]]; then
|
||||
test_inductor_aoti
|
||||
test_inductor_distributed
|
||||
fi
|
||||
elif [[ "${TEST_CONFIG}" == *dynamo* ]]; then
|
||||
install_torchvision
|
||||
test_dynamo_shard "${SHARD_NUMBER}"
|
||||
if [[ "${SHARD_NUMBER}" == 1 ]]; then
|
||||
test_aten
|
||||
fi
|
||||
elif [[ "${BUILD_ENVIRONMENT}" == *rocm* && -n "$TESTS_TO_INCLUDE" ]]; then
|
||||
install_torchvision
|
||||
test_python_shard "$SHARD_NUMBER"
|
||||
install_numpy_pytorch_interop
|
||||
test_dynamo_shard 1
|
||||
test_aten
|
||||
elif [[ "${TEST_CONFIG}" == *dynamo* && "${SHARD_NUMBER}" == 2 && $NUM_TEST_SHARDS -gt 1 ]]; then
|
||||
install_torchvision
|
||||
install_numpy_pytorch_interop
|
||||
test_dynamo_shard 2
|
||||
elif [[ "${SHARD_NUMBER}" == 1 && $NUM_TEST_SHARDS -gt 1 ]]; then
|
||||
test_without_numpy
|
||||
install_torchvision
|
||||
test_python_shard 1
|
||||
test_aten
|
||||
test_libtorch 1
|
||||
if [[ "${BUILD_ENVIRONMENT}" == *xpu* ]]; then
|
||||
test_xpu_bin
|
||||
fi
|
||||
elif [[ "${SHARD_NUMBER}" == 2 && $NUM_TEST_SHARDS -gt 1 ]]; then
|
||||
install_torchvision
|
||||
test_python_shard 2
|
||||
@ -1348,11 +1080,6 @@ elif [[ "${BUILD_ENVIRONMENT}" == *-mobile-lightweight-dispatch* ]]; then
|
||||
test_libtorch
|
||||
elif [[ "${TEST_CONFIG}" = docs_test ]]; then
|
||||
test_docs_test
|
||||
elif [[ "${BUILD_ENVIRONMENT}" == *xpu* ]]; then
|
||||
install_torchvision
|
||||
test_python
|
||||
test_aten
|
||||
test_xpu_bin
|
||||
else
|
||||
install_torchvision
|
||||
install_monkeytype
|
||||
@ -1365,4 +1092,5 @@ else
|
||||
test_custom_backend
|
||||
test_torch_function_benchmark
|
||||
test_benchmarks
|
||||
test_executorch
|
||||
fi
|
||||
|
||||
@ -16,23 +16,24 @@ set PATH=C:\Program Files\CMake\bin;C:\Program Files\7-Zip;C:\ProgramData\chocol
|
||||
|
||||
set INSTALLER_DIR=%SCRIPT_HELPERS_DIR%\installation-helpers
|
||||
|
||||
|
||||
call %INSTALLER_DIR%\install_mkl.bat
|
||||
if errorlevel 1 exit /b
|
||||
if not errorlevel 0 exit /b
|
||||
|
||||
call %INSTALLER_DIR%\install_magma.bat
|
||||
if errorlevel 1 goto fail
|
||||
if not errorlevel 0 goto fail
|
||||
if errorlevel 1 exit /b
|
||||
if not errorlevel 0 exit /b
|
||||
|
||||
call %INSTALLER_DIR%\install_sccache.bat
|
||||
if errorlevel 1 goto fail
|
||||
if not errorlevel 0 goto fail
|
||||
if errorlevel 1 exit /b
|
||||
if not errorlevel 0 exit /b
|
||||
|
||||
:: Miniconda has been installed as part of the Windows AMI with all the dependencies.
|
||||
:: We just need to activate it here
|
||||
call %INSTALLER_DIR%\activate_miniconda3.bat
|
||||
if errorlevel 1 goto fail
|
||||
if not errorlevel 0 goto fail
|
||||
|
||||
call pip install mkl-include==2021.4.0 mkl-devel==2021.4.0
|
||||
if errorlevel 1 goto fail
|
||||
if not errorlevel 0 goto fail
|
||||
if errorlevel 1 exit /b
|
||||
if not errorlevel 0 exit /b
|
||||
|
||||
:: Override VS env here
|
||||
pushd .
|
||||
@ -41,8 +42,8 @@ if "%VC_VERSION%" == "" (
|
||||
) else (
|
||||
call "C:\Program Files (x86)\Microsoft Visual Studio\%VC_YEAR%\%VC_PRODUCT%\VC\Auxiliary\Build\vcvarsall.bat" x64 -vcvars_ver=%VC_VERSION%
|
||||
)
|
||||
if errorlevel 1 goto fail
|
||||
if not errorlevel 0 goto fail
|
||||
if errorlevel 1 exit /b
|
||||
if not errorlevel 0 exit /b
|
||||
@echo on
|
||||
popd
|
||||
|
||||
@ -52,12 +53,12 @@ set CUDA_PATH=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION%
|
||||
|
||||
if x%CUDA_VERSION:.=%==x%CUDA_VERSION% (
|
||||
echo CUDA version %CUDA_VERSION% format isn't correct, which doesn't contain '.'
|
||||
goto fail
|
||||
exit /b 1
|
||||
)
|
||||
rem version transformer, for example 10.1 to 10_1.
|
||||
if x%CUDA_VERSION:.=%==x%CUDA_VERSION% (
|
||||
echo CUDA version %CUDA_VERSION% format isn't correct, which doesn't contain '.'
|
||||
goto fail
|
||||
exit /b 1
|
||||
)
|
||||
set VERSION_SUFFIX=%CUDA_VERSION:.=_%
|
||||
set CUDA_PATH_V%VERSION_SUFFIX%=%CUDA_PATH%
|
||||
@ -88,8 +89,8 @@ set SCCACHE_IGNORE_SERVER_IO_ERROR=1
|
||||
sccache --stop-server
|
||||
sccache --start-server
|
||||
sccache --zero-stats
|
||||
set CMAKE_C_COMPILER_LAUNCHER=sccache
|
||||
set CMAKE_CXX_COMPILER_LAUNCHER=sccache
|
||||
set CC=sccache-cl
|
||||
set CXX=sccache-cl
|
||||
|
||||
set CMAKE_GENERATOR=Ninja
|
||||
|
||||
@ -101,8 +102,8 @@ if "%USE_CUDA%"=="1" (
|
||||
:: CMake requires a single command as CUDA_NVCC_EXECUTABLE, so we push the wrappers
|
||||
:: randomtemp.exe and sccache.exe into a batch file which CMake invokes.
|
||||
curl -kL https://github.com/peterjc123/randomtemp-rust/releases/download/v0.4/randomtemp.exe --output %TMP_DIR_WIN%\bin\randomtemp.exe
|
||||
if errorlevel 1 goto fail
|
||||
if not errorlevel 0 goto fail
|
||||
if errorlevel 1 exit /b
|
||||
if not errorlevel 0 exit /b
|
||||
echo @"%TMP_DIR_WIN%\bin\randomtemp.exe" "%TMP_DIR_WIN%\bin\sccache.exe" "%CUDA_PATH%\bin\nvcc.exe" %%* > "%TMP_DIR%/bin/nvcc.bat"
|
||||
cat %TMP_DIR%/bin/nvcc.bat
|
||||
set CUDA_NVCC_EXECUTABLE=%TMP_DIR%/bin/nvcc.bat
|
||||
@ -114,8 +115,8 @@ if "%USE_CUDA%"=="1" (
|
||||
set
|
||||
|
||||
python setup.py bdist_wheel
|
||||
if errorlevel 1 goto fail
|
||||
if not errorlevel 0 goto fail
|
||||
if errorlevel 1 exit /b
|
||||
if not errorlevel 0 exit /b
|
||||
sccache --show-stats
|
||||
python -c "import os, glob; os.system('python -mpip install --no-index --no-deps ' + glob.glob('dist/*.whl')[0])"
|
||||
(
|
||||
@ -126,7 +127,8 @@ python -c "import os, glob; os.system('python -mpip install --no-index --no-deps
|
||||
|
||||
:: export test times so that potential sharded tests that'll branch off this build will use consistent data
|
||||
python tools/stats/export_test_times.py
|
||||
robocopy /E ".additional_ci_files" "%PYTORCH_FINAL_PACKAGE_DIR%\.additional_ci_files"
|
||||
copy /Y ".pytorch-test-times.json" "%PYTORCH_FINAL_PACKAGE_DIR%"
|
||||
copy /Y ".pytorch-test-file-ratings.json" "%PYTORCH_FINAL_PACKAGE_DIR%"
|
||||
|
||||
:: Also save build/.ninja_log as an artifact
|
||||
copy /Y "build\.ninja_log" "%PYTORCH_FINAL_PACKAGE_DIR%\"
|
||||
@ -135,8 +137,3 @@ python -c "import os, glob; os.system('python -mpip install --no-index --no-deps
|
||||
|
||||
sccache --show-stats --stats-format json | jq .stats > sccache-stats-%BUILD_ENVIRONMENT%-%OUR_GITHUB_JOB_ID%.json
|
||||
sccache --stop-server
|
||||
|
||||
exit /b 0
|
||||
|
||||
:fail
|
||||
exit /b 1
|
||||
|
||||
@ -0,0 +1,14 @@
|
||||
if "%REBUILD%"=="" (
|
||||
if "%BUILD_ENVIRONMENT%"=="" (
|
||||
curl --retry 3 --retry-all-errors -k https://s3.amazonaws.com/ossci-windows/mkl_2020.2.254.7z --output %TMP_DIR_WIN%\mkl.7z
|
||||
) else (
|
||||
aws s3 cp s3://ossci-windows/mkl_2020.2.254.7z %TMP_DIR_WIN%\mkl.7z --quiet
|
||||
)
|
||||
if errorlevel 1 exit /b
|
||||
if not errorlevel 0 exit /b
|
||||
7z x -aoa %TMP_DIR_WIN%\mkl.7z -o%TMP_DIR_WIN%\mkl
|
||||
if errorlevel 1 exit /b
|
||||
if not errorlevel 0 exit /b
|
||||
)
|
||||
set CMAKE_INCLUDE_PATH=%TMP_DIR_WIN%\mkl\include
|
||||
set LIB=%TMP_DIR_WIN%\mkl\lib;%LIB%
|
||||
@ -1,13 +1,18 @@
|
||||
mkdir %TMP_DIR_WIN%\bin
|
||||
|
||||
if "%REBUILD%"=="" (
|
||||
IF EXIST %TMP_DIR_WIN%\bin\sccache.exe (
|
||||
:check_sccache
|
||||
%TMP_DIR_WIN%\bin\sccache.exe --show-stats || (
|
||||
taskkill /im sccache.exe /f /t || ver > nul
|
||||
del %TMP_DIR_WIN%\bin\sccache.exe || ver > nul
|
||||
del %TMP_DIR_WIN%\bin\sccache-cl.exe || ver > nul
|
||||
if "%BUILD_ENVIRONMENT%"=="" (
|
||||
curl --retry 3 --retry-all-errors -k https://s3.amazonaws.com/ossci-windows/sccache.exe --output %TMP_DIR_WIN%\bin\sccache.exe
|
||||
curl --retry 3 --retry-all-errors -k https://s3.amazonaws.com/ossci-windows/sccache-cl.exe --output %TMP_DIR_WIN%\bin\sccache-cl.exe
|
||||
) else (
|
||||
aws s3 cp s3://ossci-windows/sccache.exe %TMP_DIR_WIN%\bin\sccache.exe
|
||||
aws s3 cp s3://ossci-windows/sccache-cl.exe %TMP_DIR_WIN%\bin\sccache-cl.exe
|
||||
)
|
||||
goto :check_sccache
|
||||
)
|
||||
if "%BUILD_ENVIRONMENT%"=="" (
|
||||
curl --retry 3 --retry-all-errors -k https://s3.amazonaws.com/ossci-windows/sccache-v0.7.4.exe --output %TMP_DIR_WIN%\bin\sccache.exe
|
||||
) else (
|
||||
aws s3 cp s3://ossci-windows/sccache-v0.7.4.exe %TMP_DIR_WIN%\bin\sccache.exe
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
@ -2,7 +2,6 @@
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
COMMON_TESTS = [
|
||||
(
|
||||
@ -54,4 +53,4 @@ if __name__ == "__main__":
|
||||
print("Reruning with traceback enabled")
|
||||
print("Command:", command_string)
|
||||
subprocess.run(command_args, check=False)
|
||||
sys.exit(e.returncode)
|
||||
exit(e.returncode)
|
||||
|
||||
@ -26,6 +26,11 @@ popd
|
||||
python test_custom_ops.py -v
|
||||
if ERRORLEVEL 1 exit /b 1
|
||||
|
||||
:: TODO: fix and re-enable this test
|
||||
:: See https://github.com/pytorch/pytorch/issues/25155
|
||||
:: python test_custom_classes.py -v
|
||||
:: if ERRORLEVEL 1 exit /b 1
|
||||
|
||||
python model.py --export-script-module="build/model.pt"
|
||||
if ERRORLEVEL 1 exit /b 1
|
||||
|
||||
|
||||
@ -1,3 +1,7 @@
|
||||
:: Skip LibTorch tests when building a GPU binary and testing on a CPU machine
|
||||
:: because LibTorch tests are not well designed for this use case.
|
||||
if "%USE_CUDA%" == "0" IF NOT "%CUDA_VERSION%" == "cpu" exit /b 0
|
||||
|
||||
call %SCRIPT_HELPERS_DIR%\setup_pytorch_env.bat
|
||||
if errorlevel 1 exit /b 1
|
||||
|
||||
@ -17,7 +21,7 @@ if not errorlevel 0 exit /b 1
|
||||
cd %TMP_DIR_WIN%\build\torch\test
|
||||
for /r "." %%a in (*.exe) do (
|
||||
call :libtorch_check "%%~na" "%%~fa"
|
||||
if errorlevel 1 goto fail
|
||||
if errorlevel 1 exit /b 1
|
||||
)
|
||||
|
||||
goto :eof
|
||||
@ -30,6 +34,18 @@ set CPP_TESTS_DIR=%TMP_DIR_WIN%\build\torch\test
|
||||
:: Skip verify_api_visibility as it a compile level test
|
||||
if "%~1" == "verify_api_visibility" goto :eof
|
||||
|
||||
:: See https://github.com/pytorch/pytorch/issues/25161
|
||||
if "%~1" == "c10_metaprogramming_test" goto :eof
|
||||
if "%~1" == "module_test" goto :eof
|
||||
:: See https://github.com/pytorch/pytorch/issues/25312
|
||||
if "%~1" == "converter_nomigraph_test" goto :eof
|
||||
:: See https://github.com/pytorch/pytorch/issues/35636
|
||||
if "%~1" == "generate_proposals_op_gpu_test" goto :eof
|
||||
:: See https://github.com/pytorch/pytorch/issues/35648
|
||||
if "%~1" == "reshape_op_gpu_test" goto :eof
|
||||
:: See https://github.com/pytorch/pytorch/issues/35651
|
||||
if "%~1" == "utility_ops_gpu_test" goto :eof
|
||||
|
||||
echo Running "%~2"
|
||||
if "%~1" == "c10_intrusive_ptr_benchmark" (
|
||||
:: NB: This is not a gtest executable file, thus couldn't be handled by pytest-cpp
|
||||
@ -40,15 +56,11 @@ if "%~1" == "c10_intrusive_ptr_benchmark" (
|
||||
python test\run_test.py --cpp --verbose -i "cpp/%~1"
|
||||
if errorlevel 1 (
|
||||
echo %1 failed with exit code %errorlevel%
|
||||
goto fail
|
||||
exit /b 1
|
||||
)
|
||||
if not errorlevel 0 (
|
||||
echo %1 failed with exit code %errorlevel%
|
||||
goto fail
|
||||
exit /b 1
|
||||
)
|
||||
|
||||
:eof
|
||||
exit /b 0
|
||||
|
||||
:fail
|
||||
exit /b 1
|
||||
goto :eof
|
||||
|
||||
@ -1,7 +1,8 @@
|
||||
call %SCRIPT_HELPERS_DIR%\setup_pytorch_env.bat
|
||||
|
||||
echo Copying over test times file
|
||||
robocopy /E "%PYTORCH_FINAL_PACKAGE_DIR_WIN%\.additional_ci_files" "%PROJECT_DIR_WIN%\.additional_ci_files"
|
||||
copy /Y "%PYTORCH_FINAL_PACKAGE_DIR_WIN%\.pytorch-test-times.json" "%PROJECT_DIR_WIN%"
|
||||
copy /Y "%PYTORCH_FINAL_PACKAGE_DIR_WIN%\.pytorch-test-file-ratings.json" "%PROJECT_DIR_WIN%"
|
||||
|
||||
pushd test
|
||||
|
||||
|
||||
@ -22,7 +22,8 @@ if "%SHARD_NUMBER%" == "1" (
|
||||
)
|
||||
|
||||
echo Copying over test times file
|
||||
robocopy /E "%PYTORCH_FINAL_PACKAGE_DIR_WIN%\.additional_ci_files" "%PROJECT_DIR_WIN%\.additional_ci_files"
|
||||
copy /Y "%PYTORCH_FINAL_PACKAGE_DIR_WIN%\.pytorch-test-times.json" "%PROJECT_DIR_WIN%"
|
||||
copy /Y "%PYTORCH_FINAL_PACKAGE_DIR_WIN%\.pytorch-test-file-ratings.json" "%PROJECT_DIR_WIN%"
|
||||
|
||||
echo Run nn tests
|
||||
python run_test.py --exclude-jit-executor --exclude-distributed-tests --shard "%SHARD_NUMBER%" "%NUM_TEST_SHARDS%" --verbose
|
||||
|
||||
@ -38,7 +38,7 @@ fi
|
||||
python -m pip install pytest-rerunfailures==10.3 pytest-cpp==2.3.0 tensorboard==2.13.0
|
||||
|
||||
# Install Z3 optional dependency for Windows builds.
|
||||
python -m pip install z3-solver==4.12.2.0
|
||||
python -m pip install z3-solver
|
||||
|
||||
run_tests() {
|
||||
# Run nvidia-smi if available
|
||||
|
||||
@ -1,4 +1,468 @@
|
||||
Warning
|
||||
=======
|
||||
|
||||
PyTorch migration from CircleCI to github actions has been completed. All continuous integration & deployment workflows are defined in `.github/workflows` folder
|
||||
Contents may be out of date. Our CircleCI workflows are gradually being migrated to Github actions.
|
||||
|
||||
Structure of CI
|
||||
===============
|
||||
|
||||
setup job:
|
||||
1. Does a git checkout
|
||||
2. Persists CircleCI scripts (everything in `.circleci`) into a workspace. Why?
|
||||
We don't always do a Git checkout on all subjobs, but we usually
|
||||
still want to be able to call scripts one way or another in a subjob.
|
||||
Persisting files this way lets us have access to them without doing a
|
||||
checkout. This workspace is conventionally mounted on `~/workspace`
|
||||
(this is distinguished from `~/project`, which is the conventional
|
||||
working directory that CircleCI will default to starting your jobs
|
||||
in.)
|
||||
3. Write out the commit message to `.circleci/COMMIT_MSG`. This is so
|
||||
we can determine in subjobs if we should actually run the jobs or
|
||||
not, even if there isn't a Git checkout.
|
||||
|
||||
|
||||
CircleCI configuration generator
|
||||
================================
|
||||
|
||||
One may no longer make changes to the `.circleci/config.yml` file directly.
|
||||
Instead, one must edit these Python scripts or files in the `verbatim-sources/` directory.
|
||||
|
||||
|
||||
Usage
|
||||
----------
|
||||
|
||||
1. Make changes to these scripts.
|
||||
2. Run the `regenerate.sh` script in this directory and commit the script changes and the resulting change to `config.yml`.
|
||||
|
||||
You'll see a build failure on GitHub if the scripts don't agree with the checked-in version.
|
||||
|
||||
|
||||
Motivation
|
||||
----------
|
||||
|
||||
These scripts establish a single, authoritative source of documentation for the CircleCI configuration matrix.
|
||||
The documentation, in the form of diagrams, is automatically generated and cannot drift out of sync with the YAML content.
|
||||
|
||||
Furthermore, consistency is enforced within the YAML config itself, by using a single source of data to generate
|
||||
multiple parts of the file.
|
||||
|
||||
* Facilitates one-off culling/enabling of CI configs for testing PRs on special targets
|
||||
|
||||
Also see https://github.com/pytorch/pytorch/issues/17038
|
||||
|
||||
|
||||
Future direction
|
||||
----------------
|
||||
|
||||
### Declaring sparse config subsets
|
||||
See comment [here](https://github.com/pytorch/pytorch/pull/17323#pullrequestreview-206945747):
|
||||
|
||||
In contrast with a full recursive tree traversal of configuration dimensions,
|
||||
> in the future I think we actually want to decrease our matrix somewhat and have only a few mostly-orthogonal builds that taste as many different features as possible on PRs, plus a more complete suite on every PR and maybe an almost full suite nightly/weekly (we don't have this yet). Specifying PR jobs in the future might be easier to read with an explicit list when we come to this.
|
||||
----------------
|
||||
----------------
|
||||
|
||||
# How do the binaries / nightlies / releases work?
|
||||
|
||||
### What is a binary?
|
||||
|
||||
A binary or package (used interchangeably) is a pre-built collection of c++ libraries, header files, python bits, and other files. We build these and distribute them so that users do not need to install from source.
|
||||
|
||||
A **binary configuration** is a collection of
|
||||
|
||||
* release or nightly
|
||||
* releases are stable, nightlies are beta and built every night
|
||||
* python version
|
||||
* linux: 3.7m (mu is wide unicode or something like that. It usually doesn't matter but you should know that it exists)
|
||||
* macos: 3.7, 3.8
|
||||
* windows: 3.7, 3.8
|
||||
* cpu version
|
||||
* cpu, cuda 9.0, cuda 10.0
|
||||
* The supported cuda versions occasionally change
|
||||
* operating system
|
||||
* Linux - these are all built on CentOS. There haven't been any problems in the past building on CentOS and using on Ubuntu
|
||||
* MacOS
|
||||
* Windows - these are built on Azure pipelines
|
||||
* devtoolset version (gcc compiler version)
|
||||
* This only matters on Linux cause only Linux uses gcc. tldr is gcc made a backwards incompatible change from gcc 4.8 to gcc 5, because it had to change how it implemented std::vector and std::string
|
||||
|
||||
### Where are the binaries?
|
||||
|
||||
The binaries are built in CircleCI. There are nightly binaries built every night at 9pm PST (midnight EST) and release binaries corresponding to Pytorch releases, usually every few months.
|
||||
|
||||
We have 3 types of binary packages
|
||||
|
||||
* pip packages - nightlies are stored on s3 (pip install -f \<a s3 url\>). releases are stored in a pip repo (pip install torch) (ask Soumith about this)
|
||||
* conda packages - nightlies and releases are both stored in a conda repo. Nighty packages have a '_nightly' suffix
|
||||
* libtorch packages - these are zips of all the c++ libraries, header files, and sometimes dependencies. These are c++ only
|
||||
* shared with dependencies (the only supported option for Windows)
|
||||
* static with dependencies
|
||||
* shared without dependencies
|
||||
* static without dependencies
|
||||
|
||||
All binaries are built in CircleCI workflows except Windows. There are checked-in workflows (committed into the .circleci/config.yml) to build the nightlies every night. Releases are built by manually pushing a PR that builds the suite of release binaries (overwrite the config.yml to build the release)
|
||||
|
||||
# CircleCI structure of the binaries
|
||||
|
||||
Some quick vocab:
|
||||
|
||||
* A \**workflow** is a CircleCI concept; it is a DAG of '**jobs**'. ctrl-f 'workflows' on https://github.com/pytorch/pytorch/blob/main/.circleci/config.yml to see the workflows.
|
||||
* **jobs** are a sequence of '**steps**'
|
||||
* **steps** are usually just a bash script or a builtin CircleCI command. *All steps run in new environments, environment variables declared in one script DO NOT persist to following steps*
|
||||
* CircleCI has a **workspace**, which is essentially a cache between steps of the *same job* in which you can store artifacts between steps.
|
||||
|
||||
## How are the workflows structured?
|
||||
|
||||
The nightly binaries have 3 workflows. We have one job (actually 3 jobs: build, test, and upload) per binary configuration
|
||||
|
||||
1. binary_builds
|
||||
1. every day midnight EST
|
||||
2. linux: https://github.com/pytorch/pytorch/blob/main/.circleci/verbatim-sources/linux-binary-build-defaults.yml
|
||||
3. macos: https://github.com/pytorch/pytorch/blob/main/.circleci/verbatim-sources/macos-binary-build-defaults.yml
|
||||
4. For each binary configuration, e.g. linux_conda_3.7_cpu there is a
|
||||
1. binary_linux_conda_3.7_cpu_build
|
||||
1. Builds the build. On linux jobs this uses the 'docker executor'.
|
||||
2. Persists the package to the workspace
|
||||
2. binary_linux_conda_3.7_cpu_test
|
||||
1. Loads the package to the workspace
|
||||
2. Spins up a docker image (on Linux), mapping the package and code repos into the docker
|
||||
3. Runs some smoke tests in the docker
|
||||
4. (Actually, for macos this is a step rather than a separate job)
|
||||
3. binary_linux_conda_3.7_cpu_upload
|
||||
1. Logs in to aws/conda
|
||||
2. Uploads the package
|
||||
2. update_s3_htmls
|
||||
1. every day 5am EST
|
||||
2. https://github.com/pytorch/pytorch/blob/main/.circleci/verbatim-sources/binary_update_htmls.yml
|
||||
3. See below for what these are for and why they're needed
|
||||
4. Three jobs that each examine the current contents of aws and the conda repo and update some html files in s3
|
||||
3. binarysmoketests
|
||||
1. every day
|
||||
2. https://github.com/pytorch/pytorch/blob/main/.circleci/verbatim-sources/nightly-build-smoke-tests-defaults.yml
|
||||
3. For each binary configuration, e.g. linux_conda_3.7_cpu there is a
|
||||
1. smoke_linux_conda_3.7_cpu
|
||||
1. Downloads the package from the cloud, e.g. using the official pip or conda instructions
|
||||
2. Runs the smoke tests
|
||||
|
||||
## How are the jobs structured?
|
||||
|
||||
The jobs are in https://github.com/pytorch/pytorch/tree/main/.circleci/verbatim-sources. Jobs are made of multiple steps. There are some shared steps used by all the binaries/smokes. Steps of these jobs are all delegated to scripts in https://github.com/pytorch/pytorch/tree/main/.circleci/scripts .
|
||||
|
||||
* Linux jobs: https://github.com/pytorch/pytorch/blob/main/.circleci/verbatim-sources/linux-binary-build-defaults.yml
|
||||
* binary_linux_build.sh
|
||||
* binary_linux_test.sh
|
||||
* binary_linux_upload.sh
|
||||
* MacOS jobs: https://github.com/pytorch/pytorch/blob/main/.circleci/verbatim-sources/macos-binary-build-defaults.yml
|
||||
* binary_macos_build.sh
|
||||
* binary_macos_test.sh
|
||||
* binary_macos_upload.sh
|
||||
* Update html jobs: https://github.com/pytorch/pytorch/blob/main/.circleci/verbatim-sources/binary_update_htmls.yml
|
||||
* These delegate from the pytorch/builder repo
|
||||
* https://github.com/pytorch/builder/blob/main/cron/update_s3_htmls.sh
|
||||
* https://github.com/pytorch/builder/blob/main/cron/upload_binary_sizes.sh
|
||||
* Smoke jobs (both linux and macos): https://github.com/pytorch/pytorch/blob/main/.circleci/verbatim-sources/nightly-build-smoke-tests-defaults.yml
|
||||
* These delegate from the pytorch/builder repo
|
||||
* https://github.com/pytorch/builder/blob/main/run_tests.sh
|
||||
* https://github.com/pytorch/builder/blob/main/smoke_test.sh
|
||||
* https://github.com/pytorch/builder/blob/main/check_binary.sh
|
||||
* Common shared code (shared across linux and macos): https://github.com/pytorch/pytorch/blob/main/.circleci/verbatim-sources/nightly-binary-build-defaults.yml
|
||||
* binary_checkout.sh - checks out pytorch/builder repo. Right now this also checks out pytorch/pytorch, but it shouldn't. pytorch/pytorch should just be shared through the workspace. This can handle being run before binary_populate_env.sh
|
||||
* binary_populate_env.sh - parses BUILD_ENVIRONMENT into the separate env variables that make up a binary configuration. Also sets lots of default values, the date, the version strings, the location of folders in s3, all sorts of things. This generally has to be run before other steps.
|
||||
* binary_install_miniconda.sh - Installs miniconda, cross platform. Also hacks this for the update_binary_sizes job that doesn't have the right env variables
|
||||
* binary_run_in_docker.sh - Takes a bash script file (the actual test code) from a hardcoded location, spins up a docker image, and runs the script inside the docker image
|
||||
|
||||
### **Why do the steps all refer to scripts?**
|
||||
|
||||
CircleCI creates a final yaml file by inlining every <<* segment, so if we were to keep all the code in the config.yml itself then the config size would go over 4 MB and cause infra problems.
|
||||
|
||||
### **What is binary_run_in_docker for?**
|
||||
|
||||
So, CircleCI has several executor types: macos, machine, and docker are the ones we use. The 'machine' executor gives you two cores on some linux vm. The 'docker' executor gives you considerably more cores (nproc was 32 instead of 2 back when I tried in February). Since the dockers are faster, we try to run everything that we can in dockers. Thus
|
||||
|
||||
* linux build jobs use the docker executor. Running them on the docker executor was at least 2x faster than running them on the machine executor
|
||||
* linux test jobs use the machine executor in order for them to properly interface with GPUs since docker executors cannot execute with attached GPUs
|
||||
* linux upload jobs use the machine executor. The upload jobs are so short that it doesn't really matter what they use
|
||||
* linux smoke test jobs use the machine executor for the same reason as the linux test jobs
|
||||
|
||||
binary_run_in_docker.sh is a way to share the docker start-up code between the binary test jobs and the binary smoke test jobs
|
||||
|
||||
### **Why does binary_checkout also checkout pytorch? Why shouldn't it?**
|
||||
|
||||
We want all the nightly binary jobs to run on the exact same git commit, so we wrote our own checkout logic to ensure that the same commit was always picked. Later circleci changed that to use a single pytorch checkout and persist it through the workspace (they did this because our config file was too big, so they wanted to take a lot of the setup code into scripts, but the scripts needed the code repo to exist to be called, so they added a prereq step called 'setup' to checkout the code and persist the needed scripts to the workspace). The changes to the binary jobs were not properly tested, so they all broke from missing pytorch code no longer existing. We hotfixed the problem by adding the pytorch checkout back to binary_checkout, so now there's two checkouts of pytorch on the binary jobs. This problem still needs to be fixed, but it takes careful tracing of which code is being called where.
|
||||
|
||||
# Code structure of the binaries (circleci agnostic)
|
||||
|
||||
## Overview
|
||||
|
||||
The code that runs the binaries lives in two places, in the normal [github.com/pytorch/pytorch](http://github.com/pytorch/pytorch), but also in [github.com/pytorch/builder](http://github.com/pytorch/builder), which is a repo that defines how all the binaries are built. The relevant code is
|
||||
|
||||
|
||||
```
|
||||
# All code needed to set-up environments for build code to run in,
|
||||
# but only code that is specific to the current CI system
|
||||
pytorch/pytorch
|
||||
- .circleci/ # Folder that holds all circleci related stuff
|
||||
- config.yml # GENERATED file that actually controls all circleci behavior
|
||||
- verbatim-sources # Used to generate job/workflow sections in ^
|
||||
- scripts/ # Code needed to prepare circleci environments for binary build scripts
|
||||
- setup.py # Builds pytorch. This is wrapped in pytorch/builder
|
||||
- cmake files # used in normal building of pytorch
|
||||
# All code needed to prepare a binary build, given an environment
|
||||
# with all the right variables/packages/paths.
|
||||
pytorch/builder
|
||||
# Given an installed binary and a proper python env, runs some checks
|
||||
# to make sure the binary was built the proper way. Checks things like
|
||||
# the library dependencies, symbols present, etc.
|
||||
- check_binary.sh
|
||||
# Given an installed binary, runs python tests to make sure everything
|
||||
# is in order. These should be de-duped. Right now they both run smoke
|
||||
# tests, but are called from different places. Usually just call some
|
||||
# import statements, but also has overlap with check_binary.sh above
|
||||
- run_tests.sh
|
||||
- smoke_test.sh
|
||||
# Folders that govern how packages are built. See paragraphs below
|
||||
- conda/
|
||||
- build_pytorch.sh # Entrypoint. Delegates to proper conda build folder
|
||||
- switch_cuda_version.sh # Switches activate CUDA installation in Docker
|
||||
- pytorch-nightly/ # Build-folder
|
||||
- manywheel/
|
||||
- build_cpu.sh # Entrypoint for cpu builds
|
||||
- build.sh # Entrypoint for CUDA builds
|
||||
- build_common.sh # Actual build script that ^^ call into
|
||||
- wheel/
|
||||
- build_wheel.sh # Entrypoint for wheel builds
|
||||
- windows/
|
||||
- build_pytorch.bat # Entrypoint for wheel builds on Windows
|
||||
```
|
||||
|
||||
Every type of package has an entrypoint build script that handles the all the important logic.
|
||||
|
||||
## Conda
|
||||
|
||||
Linux, MacOS and Windows use the same code flow for the conda builds.
|
||||
|
||||
Conda packages are built with conda-build, see https://conda.io/projects/conda-build/en/latest/resources/commands/conda-build.html
|
||||
|
||||
Basically, you pass `conda build` a build folder (pytorch-nightly/ above) that contains a build script and a meta.yaml. The meta.yaml specifies in what python environment to build the package in, and what dependencies the resulting package should have, and the build script gets called in the env to build the thing.
|
||||
tl;dr on conda-build is
|
||||
|
||||
1. Creates a brand new conda environment, based off of deps in the meta.yaml
|
||||
1. Note that environment variables do not get passed into this build env unless they are specified in the meta.yaml
|
||||
2. If the build fails this environment will stick around. You can activate it for much easier debugging. The “General Python” section below explains what exactly a python “environment” is.
|
||||
2. Calls build.sh in the environment
|
||||
3. Copies the finished package to a new conda env, also specified by the meta.yaml
|
||||
4. Runs some simple import tests (if specified in the meta.yaml)
|
||||
5. Saves the finished package as a tarball
|
||||
|
||||
The build.sh we use is essentially a wrapper around `python setup.py build`, but it also manually copies in some of our dependent libraries into the resulting tarball and messes with some rpaths.
|
||||
|
||||
The entrypoint file `builder/conda/build_conda.sh` is complicated because
|
||||
|
||||
* It works for Linux, MacOS and Windows
|
||||
* The mac builds used to create their own environments, since they all used to be on the same machine. There’s now a lot of extra logic to handle conda envs. This extra machinery could be removed
|
||||
* It used to handle testing too, which adds more logic messing with python environments too. This extra machinery could be removed.
|
||||
|
||||
## Manywheels (linux pip and libtorch packages)
|
||||
|
||||
Manywheels are pip packages for linux distros. Note that these manywheels are not actually manylinux compliant.
|
||||
|
||||
`builder/manywheel/build_cpu.sh` and `builder/manywheel/build.sh` (for CUDA builds) just set different env vars and then call into `builder/manywheel/build_common.sh`
|
||||
|
||||
The entrypoint file `builder/manywheel/build_common.sh` is really really complicated because
|
||||
|
||||
* This used to handle building for several different python versions at the same time. The loops have been removed, but there's still unnecessary folders and movements here and there.
|
||||
* The script is never used this way anymore. This extra machinery could be removed.
|
||||
* This used to handle testing the pip packages too. This is why there’s testing code at the end that messes with python installations and stuff
|
||||
* The script is never used this way anymore. This extra machinery could be removed.
|
||||
* This also builds libtorch packages
|
||||
* This should really be separate. libtorch packages are c++ only and have no python. They should not share infra with all the python specific stuff in this file.
|
||||
* There is a lot of messing with rpaths. This is necessary, but could be made much much simpler if the above issues were fixed.
|
||||
|
||||
## Wheels (MacOS pip and libtorch packages)
|
||||
|
||||
The entrypoint file `builder/wheel/build_wheel.sh` is complicated because
|
||||
|
||||
* The mac builds used to all run on one machine (we didn’t have autoscaling mac machines till circleci). So this script handled siloing itself by setting-up and tearing-down its build env and siloing itself into its own build directory.
|
||||
* The script is never used this way anymore. This extra machinery could be removed.
|
||||
* This also builds libtorch packages
|
||||
* Ditto the comment above. This should definitely be separated out.
|
||||
|
||||
Note that the MacOS Python wheels are still built in conda environments. Some of the dependencies present during build also come from conda.
|
||||
|
||||
## Windows Wheels (Windows pip and libtorch packages)
|
||||
|
||||
The entrypoint file `builder/windows/build_pytorch.bat` is complicated because
|
||||
|
||||
* This used to handle building for several different python versions at the same time. This is why there are loops everywhere
|
||||
* The script is never used this way anymore. This extra machinery could be removed.
|
||||
* This used to handle testing the pip packages too. This is why there’s testing code at the end that messes with python installations and stuff
|
||||
* The script is never used this way anymore. This extra machinery could be removed.
|
||||
* This also builds libtorch packages
|
||||
* This should really be separate. libtorch packages are c++ only and have no python. They should not share infra with all the python specific stuff in this file.
|
||||
|
||||
Note that the Windows Python wheels are still built in conda environments. Some of the dependencies present during build also come from conda.
|
||||
|
||||
## General notes
|
||||
|
||||
### Note on run_tests.sh, smoke_test.sh, and check_binary.sh
|
||||
|
||||
* These should all be consolidated
|
||||
* These must run on all OS types: MacOS, Linux, and Windows
|
||||
* These all run smoke tests at the moment. They inspect the packages some, maybe run a few import statements. They DO NOT run the python tests nor the cpp tests. The idea is that python tests on main and PR merges will catch all breakages. All these tests have to do is make sure the special binary machinery didn’t mess anything up.
|
||||
* There are separate run_tests.sh and smoke_test.sh because one used to be called by the smoke jobs and one used to be called by the binary test jobs (see circleci structure section above). This is still true actually, but these could be united into a single script that runs these checks, given an installed pytorch package.
|
||||
|
||||
### Note on libtorch
|
||||
|
||||
Libtorch packages are built in the wheel build scripts: manywheel/build_*.sh for linux and build_wheel.sh for mac. There are several things wrong with this
|
||||
|
||||
* It’s confusing. Most of those scripts deal with python specifics.
|
||||
* The extra conditionals everywhere severely complicate the wheel build scripts
|
||||
* The process for building libtorch is different from the official instructions (a plain call to cmake, or a call to a script)
|
||||
|
||||
### Note on docker images / Dockerfiles
|
||||
|
||||
All linux builds occur in docker images. The docker images are
|
||||
|
||||
* pytorch/conda-cuda
|
||||
* Has ALL CUDA versions installed. The script pytorch/builder/conda/switch_cuda_version.sh sets /usr/local/cuda to a symlink to e.g. /usr/local/cuda-10.0 to enable different CUDA builds
|
||||
* Also used for cpu builds
|
||||
* pytorch/manylinux-cuda90
|
||||
* pytorch/manylinux-cuda100
|
||||
* Also used for cpu builds
|
||||
|
||||
The Dockerfiles are available in pytorch/builder, but there is no circleci job or script to build these docker images, and they cannot be run locally (unless you have the correct local packages/paths). Only Soumith can build them right now.
|
||||
|
||||
### General Python
|
||||
|
||||
* This is still a good explanation of python installations https://caffe2.ai/docs/faq.html#why-do-i-get-import-errors-in-python-when-i-try-to-use-caffe2
|
||||
|
||||
# How to manually rebuild the binaries
|
||||
|
||||
tl;dr make a PR that looks like https://github.com/pytorch/pytorch/pull/21159
|
||||
|
||||
Sometimes we want to push a change to mainand then rebuild all of today's binaries after that change. As of May 30, 2019 there isn't a way to manually run a workflow in the UI. You can manually re-run a workflow, but it will use the exact same git commits as the first run and will not include any changes. So we have to make a PR and then force circleci to run the binary workflow instead of the normal tests. The above PR is an example of how to do this; essentially you copy-paste the binarybuilds workflow steps into the default workflow steps. If you need to point the builder repo to a different commit then you'd need to change https://github.com/pytorch/pytorch/blob/main/.circleci/scripts/binary_checkout.sh#L42-L45 to checkout what you want.
|
||||
|
||||
## How to test changes to the binaries via .circleci
|
||||
|
||||
Writing PRs that test the binaries is annoying, since the default circleci jobs that run on PRs are not the jobs that you want to run. Likely, changes to the binaries will touch something under .circleci/ and require that .circleci/config.yml be regenerated (.circleci/config.yml controls all .circleci behavior, and is generated using `.circleci/regenerate.sh` in python 3.7). But you also need to manually hardcode the binary jobs that you want to test into the .circleci/config.yml workflow, so you should actually make at least two commits, one for your changes and one to temporarily hardcode jobs. See https://github.com/pytorch/pytorch/pull/22928 as an example of how to do this.
|
||||
|
||||
```sh
|
||||
# Make your changes
|
||||
touch .circleci/verbatim-sources/nightly-binary-build-defaults.yml
|
||||
# Regenerate the yaml, has to be in python 3.7
|
||||
.circleci/regenerate.sh
|
||||
# Make a commit
|
||||
git add .circleci *
|
||||
git commit -m "My real changes"
|
||||
git push origin my_branch
|
||||
# Now hardcode the jobs that you want in the .circleci/config.yml workflows section
|
||||
# Also eliminate ensure-consistency and should_run_job checks
|
||||
# e.g. https://github.com/pytorch/pytorch/commit/2b3344bfed8772fe86e5210cc4ee915dee42b32d
|
||||
# Make a commit you won't keep
|
||||
git add .circleci
|
||||
git commit -m "[DO NOT LAND] testing binaries for above changes"
|
||||
git push origin my_branch
|
||||
# Now you need to make some changes to the first commit.
|
||||
git rebase -i HEAD~2 # mark the first commit as 'edit'
|
||||
# Make the changes
|
||||
touch .circleci/verbatim-sources/nightly-binary-build-defaults.yml
|
||||
.circleci/regenerate.sh
|
||||
# Ammend the commit and recontinue
|
||||
git add .circleci
|
||||
git commit --amend
|
||||
git rebase --continue
|
||||
# Update the PR, need to force since the commits are different now
|
||||
git push origin my_branch --force
|
||||
```
|
||||
|
||||
The advantage of this flow is that you can make new changes to the base commit and regenerate the .circleci without having to re-write which binary jobs you want to test on. The downside is that all updates will be force pushes.
|
||||
|
||||
## How to build a binary locally
|
||||
|
||||
### Linux
|
||||
|
||||
You can build Linux binaries locally easily using docker.
|
||||
|
||||
```sh
|
||||
# Run the docker
|
||||
# Use the correct docker image, pytorch/conda-cuda used here as an example
|
||||
#
|
||||
# -v path/to/foo:path/to/bar makes path/to/foo on your local machine (the
|
||||
# machine that you're running the command on) accessible to the docker
|
||||
# container at path/to/bar. So if you then run `touch path/to/bar/baz`
|
||||
# in the docker container then you will see path/to/foo/baz on your local
|
||||
# machine. You could also clone the pytorch and builder repos in the docker.
|
||||
#
|
||||
# If you know how, add ccache as a volume too and speed up everything
|
||||
docker run \
|
||||
-v your/pytorch/repo:/pytorch \
|
||||
-v your/builder/repo:/builder \
|
||||
-v where/you/want/packages/to/appear:/final_pkgs \
|
||||
-it pytorch/conda-cuda /bin/bash
|
||||
# Export whatever variables are important to you. All variables that you'd
|
||||
# possibly need are in .circleci/scripts/binary_populate_env.sh
|
||||
# You should probably always export at least these 3 variables
|
||||
export PACKAGE_TYPE=conda
|
||||
export DESIRED_PYTHON=3.7
|
||||
export DESIRED_CUDA=cpu
|
||||
# Call the entrypoint
|
||||
# `|& tee foo.log` just copies all stdout and stderr output to foo.log
|
||||
# The builds generate lots of output so you probably need this when
|
||||
# building locally.
|
||||
/builder/conda/build_pytorch.sh |& tee build_output.log
|
||||
```
|
||||
|
||||
**Building CUDA binaries on docker**
|
||||
|
||||
You can build CUDA binaries on CPU only machines, but you can only run CUDA binaries on CUDA machines. This means that you can build a CUDA binary on a docker on your laptop if you so choose (though it’s gonna take a long time).
|
||||
|
||||
For Facebook employees, ask about beefy machines that have docker support and use those instead of your laptop; it will be 5x as fast.
|
||||
|
||||
### MacOS
|
||||
|
||||
There’s no easy way to generate reproducible hermetic MacOS environments. If you have a Mac laptop then you can try emulating the .circleci environments as much as possible, but you probably have packages in /usr/local/, possibly installed by brew, that will probably interfere with the build. If you’re trying to repro an error on a Mac build in .circleci and you can’t seem to repro locally, then my best advice is actually to iterate on .circleci :/
|
||||
|
||||
But if you want to try, then I’d recommend
|
||||
|
||||
```sh
|
||||
# Create a new terminal
|
||||
# Clear your LD_LIBRARY_PATH and trim as much out of your PATH as you
|
||||
# know how to do
|
||||
# Install a new miniconda
|
||||
# First remove any other python or conda installation from your PATH
|
||||
# Always install miniconda 3, even if building for Python <3
|
||||
new_conda="~/my_new_conda"
|
||||
conda_sh="$new_conda/install_miniconda.sh"
|
||||
curl -o "$conda_sh" https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
chmod +x "$conda_sh"
|
||||
"$conda_sh" -b -p "$MINICONDA_ROOT"
|
||||
rm -f "$conda_sh"
|
||||
export PATH="~/my_new_conda/bin:$PATH"
|
||||
# Create a clean python env
|
||||
# All MacOS builds use conda to manage the python env and dependencies
|
||||
# that are built with, even the pip packages
|
||||
conda create -yn binary python=2.7
|
||||
conda activate binary
|
||||
# Export whatever variables are important to you. All variables that you'd
|
||||
# possibly need are in .circleci/scripts/binary_populate_env.sh
|
||||
# You should probably always export at least these 3 variables
|
||||
export PACKAGE_TYPE=conda
|
||||
export DESIRED_PYTHON=3.7
|
||||
export DESIRED_CUDA=cpu
|
||||
# Call the entrypoint you want
|
||||
path/to/builder/wheel/build_wheel.sh
|
||||
```
|
||||
|
||||
N.B. installing a brand new miniconda is important. This has to do with how conda installations work. See the “General Python” section above, but tldr; is that
|
||||
|
||||
1. You make the ‘conda’ command accessible by prepending `path/to/conda_root/bin` to your PATH.
|
||||
2. You make a new env and activate it, which then also gets prepended to your PATH. Now you have `path/to/conda_root/envs/new_env/bin:path/to/conda_root/bin:$PATH`
|
||||
3. Now say you (or some code that you ran) call python executable `foo`
|
||||
1. if you installed `foo` in `new_env`, then `path/to/conda_root/envs/new_env/bin/foo` will get called, as expected.
|
||||
2. But if you forgot to installed `foo` in `new_env` but happened to previously install it in your root conda env (called ‘base’), then unix/linux will still find `path/to/conda_root/bin/foo` . This is dangerous, since `foo` can be a different version than you want; `foo` can even be for an incompatible python version!
|
||||
|
||||
Newer conda versions and proper python hygiene can prevent this, but just install a new miniconda to be safe.
|
||||
|
||||
### Windows
|
||||
|
||||
TODO: fill in
|
||||
|
||||
198
.circleci/cimodel/data/binary_build_data.py
Normal file
198
.circleci/cimodel/data/binary_build_data.py
Normal file
@ -0,0 +1,198 @@
|
||||
"""
|
||||
This module models the tree of configuration variants
|
||||
for "smoketest" builds.
|
||||
|
||||
Each subclass of ConfigNode represents a layer of the configuration hierarchy.
|
||||
These tree nodes encapsulate the logic for whether a branch of the hierarchy
|
||||
should be "pruned".
|
||||
"""
|
||||
|
||||
from collections import OrderedDict
|
||||
|
||||
import cimodel.data.dimensions as dimensions
|
||||
|
||||
from cimodel.lib.conf_tree import ConfigNode
|
||||
|
||||
|
||||
LINKING_DIMENSIONS = [
|
||||
"shared",
|
||||
"static",
|
||||
]
|
||||
|
||||
|
||||
DEPS_INCLUSION_DIMENSIONS = [
|
||||
"with-deps",
|
||||
"without-deps",
|
||||
]
|
||||
|
||||
|
||||
def get_processor_arch_name(gpu_version):
|
||||
return (
|
||||
"cpu"
|
||||
if not gpu_version
|
||||
else (
|
||||
"cu" + gpu_version.strip("cuda")
|
||||
if gpu_version.startswith("cuda")
|
||||
else gpu_version
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
CONFIG_TREE_DATA = OrderedDict()
|
||||
|
||||
# GCC config variants:
|
||||
#
|
||||
# All the nightlies (except libtorch with new gcc ABI) are built with devtoolset7,
|
||||
# which can only build with old gcc ABI. It is better than devtoolset3
|
||||
# because it understands avx512, which is needed for good fbgemm performance.
|
||||
#
|
||||
# Libtorch with new gcc ABI is built with gcc 5.4 on Ubuntu 16.04.
|
||||
LINUX_GCC_CONFIG_VARIANTS = OrderedDict(
|
||||
manywheel=["devtoolset7"],
|
||||
conda=["devtoolset7"],
|
||||
libtorch=[
|
||||
"devtoolset7",
|
||||
"gcc5.4_cxx11-abi",
|
||||
],
|
||||
)
|
||||
|
||||
WINDOWS_LIBTORCH_CONFIG_VARIANTS = [
|
||||
"debug",
|
||||
"release",
|
||||
]
|
||||
|
||||
|
||||
class TopLevelNode(ConfigNode):
|
||||
def __init__(self, node_name, config_tree_data, smoke):
|
||||
super().__init__(None, node_name)
|
||||
|
||||
self.config_tree_data = config_tree_data
|
||||
self.props["smoke"] = smoke
|
||||
|
||||
def get_children(self):
|
||||
return [
|
||||
OSConfigNode(self, x, c, p) for (x, (c, p)) in self.config_tree_data.items()
|
||||
]
|
||||
|
||||
|
||||
class OSConfigNode(ConfigNode):
|
||||
def __init__(self, parent, os_name, gpu_versions, py_tree):
|
||||
super().__init__(parent, os_name)
|
||||
|
||||
self.py_tree = py_tree
|
||||
self.props["os_name"] = os_name
|
||||
self.props["gpu_versions"] = gpu_versions
|
||||
|
||||
def get_children(self):
|
||||
return [PackageFormatConfigNode(self, k, v) for k, v in self.py_tree.items()]
|
||||
|
||||
|
||||
class PackageFormatConfigNode(ConfigNode):
|
||||
def __init__(self, parent, package_format, python_versions):
|
||||
super().__init__(parent, package_format)
|
||||
|
||||
self.props["python_versions"] = python_versions
|
||||
self.props["package_format"] = package_format
|
||||
|
||||
def get_children(self):
|
||||
if self.find_prop("os_name") == "linux":
|
||||
return [
|
||||
LinuxGccConfigNode(self, v)
|
||||
for v in LINUX_GCC_CONFIG_VARIANTS[self.find_prop("package_format")]
|
||||
]
|
||||
elif (
|
||||
self.find_prop("os_name") == "windows"
|
||||
and self.find_prop("package_format") == "libtorch"
|
||||
):
|
||||
return [
|
||||
WindowsLibtorchConfigNode(self, v)
|
||||
for v in WINDOWS_LIBTORCH_CONFIG_VARIANTS
|
||||
]
|
||||
else:
|
||||
return [ArchConfigNode(self, v) for v in self.find_prop("gpu_versions")]
|
||||
|
||||
|
||||
class LinuxGccConfigNode(ConfigNode):
|
||||
def __init__(self, parent, gcc_config_variant):
|
||||
super().__init__(parent, "GCC_CONFIG_VARIANT=" + str(gcc_config_variant))
|
||||
|
||||
self.props["gcc_config_variant"] = gcc_config_variant
|
||||
|
||||
def get_children(self):
|
||||
gpu_versions = self.find_prop("gpu_versions")
|
||||
|
||||
# XXX devtoolset7 on CUDA 9.0 is temporarily disabled
|
||||
# see https://github.com/pytorch/pytorch/issues/20066
|
||||
if self.find_prop("gcc_config_variant") == "devtoolset7":
|
||||
gpu_versions = filter(lambda x: x != "cuda_90", gpu_versions)
|
||||
|
||||
# XXX disabling conda rocm build since docker images are not there
|
||||
if self.find_prop("package_format") == "conda":
|
||||
gpu_versions = filter(
|
||||
lambda x: x not in dimensions.ROCM_VERSION_LABELS, gpu_versions
|
||||
)
|
||||
|
||||
# XXX libtorch rocm build is temporarily disabled
|
||||
if self.find_prop("package_format") == "libtorch":
|
||||
gpu_versions = filter(
|
||||
lambda x: x not in dimensions.ROCM_VERSION_LABELS, gpu_versions
|
||||
)
|
||||
|
||||
return [ArchConfigNode(self, v) for v in gpu_versions]
|
||||
|
||||
|
||||
class WindowsLibtorchConfigNode(ConfigNode):
|
||||
def __init__(self, parent, libtorch_config_variant):
|
||||
super().__init__(
|
||||
parent, "LIBTORCH_CONFIG_VARIANT=" + str(libtorch_config_variant)
|
||||
)
|
||||
|
||||
self.props["libtorch_config_variant"] = libtorch_config_variant
|
||||
|
||||
def get_children(self):
|
||||
return [ArchConfigNode(self, v) for v in self.find_prop("gpu_versions")]
|
||||
|
||||
|
||||
class ArchConfigNode(ConfigNode):
|
||||
def __init__(self, parent, gpu):
|
||||
super().__init__(parent, get_processor_arch_name(gpu))
|
||||
|
||||
self.props["gpu"] = gpu
|
||||
|
||||
def get_children(self):
|
||||
return [PyVersionConfigNode(self, v) for v in self.find_prop("python_versions")]
|
||||
|
||||
|
||||
class PyVersionConfigNode(ConfigNode):
|
||||
def __init__(self, parent, pyver):
|
||||
super().__init__(parent, pyver)
|
||||
|
||||
self.props["pyver"] = pyver
|
||||
|
||||
def get_children(self):
|
||||
package_format = self.find_prop("package_format")
|
||||
os_name = self.find_prop("os_name")
|
||||
|
||||
has_libtorch_variants = package_format == "libtorch" and os_name == "linux"
|
||||
linking_variants = LINKING_DIMENSIONS if has_libtorch_variants else []
|
||||
|
||||
return [LinkingVariantConfigNode(self, v) for v in linking_variants]
|
||||
|
||||
|
||||
class LinkingVariantConfigNode(ConfigNode):
|
||||
def __init__(self, parent, linking_variant):
|
||||
super().__init__(parent, linking_variant)
|
||||
|
||||
def get_children(self):
|
||||
return [
|
||||
DependencyInclusionConfigNode(self, v) for v in DEPS_INCLUSION_DIMENSIONS
|
||||
]
|
||||
|
||||
|
||||
class DependencyInclusionConfigNode(ConfigNode):
|
||||
def __init__(self, parent, deps_variant):
|
||||
super().__init__(parent, deps_variant)
|
||||
|
||||
self.props["libtorch_variant"] = "-".join(
|
||||
[self.parent.get_label(), self.get_label()]
|
||||
)
|
||||
275
.circleci/cimodel/data/binary_build_definitions.py
Normal file
275
.circleci/cimodel/data/binary_build_definitions.py
Normal file
@ -0,0 +1,275 @@
|
||||
from collections import OrderedDict
|
||||
|
||||
import cimodel.data.binary_build_data as binary_build_data
|
||||
|
||||
import cimodel.data.simple.util.branch_filters as branch_filters
|
||||
import cimodel.lib.conf_tree as conf_tree
|
||||
import cimodel.lib.miniutils as miniutils
|
||||
|
||||
|
||||
class Conf:
|
||||
def __init__(
|
||||
self,
|
||||
os,
|
||||
gpu_version,
|
||||
pydistro,
|
||||
parms,
|
||||
smoke,
|
||||
libtorch_variant,
|
||||
gcc_config_variant,
|
||||
libtorch_config_variant,
|
||||
):
|
||||
self.os = os
|
||||
self.gpu_version = gpu_version
|
||||
self.pydistro = pydistro
|
||||
self.parms = parms
|
||||
self.smoke = smoke
|
||||
self.libtorch_variant = libtorch_variant
|
||||
self.gcc_config_variant = gcc_config_variant
|
||||
self.libtorch_config_variant = libtorch_config_variant
|
||||
|
||||
def gen_build_env_parms(self):
|
||||
elems = (
|
||||
[self.pydistro]
|
||||
+ self.parms
|
||||
+ [binary_build_data.get_processor_arch_name(self.gpu_version)]
|
||||
)
|
||||
if self.gcc_config_variant is not None:
|
||||
elems.append(str(self.gcc_config_variant))
|
||||
if self.libtorch_config_variant is not None:
|
||||
elems.append(str(self.libtorch_config_variant))
|
||||
return elems
|
||||
|
||||
def gen_docker_image(self):
|
||||
if self.gcc_config_variant == "gcc5.4_cxx11-abi":
|
||||
if self.gpu_version is None:
|
||||
return miniutils.quote("pytorch/libtorch-cxx11-builder:cpu")
|
||||
else:
|
||||
return miniutils.quote(
|
||||
f"pytorch/libtorch-cxx11-builder:{self.gpu_version}"
|
||||
)
|
||||
if self.pydistro == "conda":
|
||||
if self.gpu_version is None:
|
||||
return miniutils.quote("pytorch/conda-builder:cpu")
|
||||
else:
|
||||
return miniutils.quote(f"pytorch/conda-builder:{self.gpu_version}")
|
||||
|
||||
docker_word_substitution = {
|
||||
"manywheel": "manylinux",
|
||||
"libtorch": "manylinux",
|
||||
}
|
||||
|
||||
docker_distro_prefix = miniutils.override(
|
||||
self.pydistro, docker_word_substitution
|
||||
)
|
||||
|
||||
# The cpu nightlies are built on the pytorch/manylinux-cuda102 docker image
|
||||
# TODO cuda images should consolidate into tag-base images similar to rocm
|
||||
alt_docker_suffix = (
|
||||
"cuda102"
|
||||
if not self.gpu_version
|
||||
else (
|
||||
"rocm:" + self.gpu_version.strip("rocm")
|
||||
if self.gpu_version.startswith("rocm")
|
||||
else self.gpu_version
|
||||
)
|
||||
)
|
||||
docker_distro_suffix = (
|
||||
alt_docker_suffix
|
||||
if self.pydistro != "conda"
|
||||
else ("cuda" if alt_docker_suffix.startswith("cuda") else "rocm")
|
||||
)
|
||||
return miniutils.quote(
|
||||
"pytorch/" + docker_distro_prefix + "-" + docker_distro_suffix
|
||||
)
|
||||
|
||||
def get_name_prefix(self):
|
||||
return "smoke" if self.smoke else "binary"
|
||||
|
||||
def gen_build_name(self, build_or_test, nightly):
|
||||
parts = [self.get_name_prefix(), self.os] + self.gen_build_env_parms()
|
||||
|
||||
if nightly:
|
||||
parts.append("nightly")
|
||||
|
||||
if self.libtorch_variant:
|
||||
parts.append(self.libtorch_variant)
|
||||
|
||||
if not self.smoke:
|
||||
parts.append(build_or_test)
|
||||
|
||||
joined = "_".join(parts)
|
||||
return joined.replace(".", "_")
|
||||
|
||||
def gen_workflow_job(self, phase, upload_phase_dependency=None, nightly=False):
|
||||
job_def = OrderedDict()
|
||||
job_def["name"] = self.gen_build_name(phase, nightly)
|
||||
job_def["build_environment"] = miniutils.quote(
|
||||
" ".join(self.gen_build_env_parms())
|
||||
)
|
||||
if self.smoke:
|
||||
job_def["requires"] = [
|
||||
"update_s3_htmls",
|
||||
]
|
||||
job_def["filters"] = branch_filters.gen_filter_dict(
|
||||
branches_list=["postnightly"],
|
||||
)
|
||||
else:
|
||||
filter_branch = r"/.*/"
|
||||
job_def["filters"] = branch_filters.gen_filter_dict(
|
||||
branches_list=[filter_branch],
|
||||
tags_list=[branch_filters.RC_PATTERN],
|
||||
)
|
||||
if self.libtorch_variant:
|
||||
job_def["libtorch_variant"] = miniutils.quote(self.libtorch_variant)
|
||||
if phase == "test":
|
||||
if not self.smoke:
|
||||
job_def["requires"] = [self.gen_build_name("build", nightly)]
|
||||
if not (self.smoke and self.os == "macos") and self.os != "windows":
|
||||
job_def["docker_image"] = self.gen_docker_image()
|
||||
|
||||
# fix this. only works on cuda not rocm
|
||||
if self.os != "windows" and self.gpu_version:
|
||||
job_def["use_cuda_docker_runtime"] = miniutils.quote("1")
|
||||
else:
|
||||
if self.os == "linux" and phase != "upload":
|
||||
job_def["docker_image"] = self.gen_docker_image()
|
||||
|
||||
if phase == "test":
|
||||
if self.gpu_version:
|
||||
if self.os == "windows":
|
||||
job_def["executor"] = "windows-with-nvidia-gpu"
|
||||
else:
|
||||
job_def["resource_class"] = "gpu.medium"
|
||||
|
||||
os_name = miniutils.override(self.os, {"macos": "mac"})
|
||||
job_name = "_".join([self.get_name_prefix(), os_name, phase])
|
||||
return {job_name: job_def}
|
||||
|
||||
def gen_upload_job(self, phase, requires_dependency):
|
||||
"""Generate binary_upload job for configuration
|
||||
|
||||
Output looks similar to:
|
||||
|
||||
- binary_upload:
|
||||
name: binary_linux_manywheel_3_7m_cu113_devtoolset7_nightly_upload
|
||||
context: org-member
|
||||
requires: binary_linux_manywheel_3_7m_cu113_devtoolset7_nightly_test
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- nightly
|
||||
tags:
|
||||
only: /v[0-9]+(\\.[0-9]+)*-rc[0-9]+/
|
||||
package_type: manywheel
|
||||
upload_subfolder: cu113
|
||||
"""
|
||||
return {
|
||||
"binary_upload": OrderedDict(
|
||||
{
|
||||
"name": self.gen_build_name(phase, nightly=True),
|
||||
"context": "org-member",
|
||||
"requires": [
|
||||
self.gen_build_name(requires_dependency, nightly=True)
|
||||
],
|
||||
"filters": branch_filters.gen_filter_dict(
|
||||
branches_list=["nightly"],
|
||||
tags_list=[branch_filters.RC_PATTERN],
|
||||
),
|
||||
"package_type": self.pydistro,
|
||||
"upload_subfolder": binary_build_data.get_processor_arch_name(
|
||||
self.gpu_version,
|
||||
),
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
def get_root(smoke, name):
|
||||
return binary_build_data.TopLevelNode(
|
||||
name,
|
||||
binary_build_data.CONFIG_TREE_DATA,
|
||||
smoke,
|
||||
)
|
||||
|
||||
|
||||
def gen_build_env_list(smoke):
|
||||
root = get_root(smoke, "N/A")
|
||||
config_list = conf_tree.dfs(root)
|
||||
|
||||
newlist = []
|
||||
for c in config_list:
|
||||
conf = Conf(
|
||||
c.find_prop("os_name"),
|
||||
c.find_prop("gpu"),
|
||||
c.find_prop("package_format"),
|
||||
[c.find_prop("pyver")],
|
||||
c.find_prop("smoke")
|
||||
and not (c.find_prop("os_name") == "macos_arm64"), # don't test arm64
|
||||
c.find_prop("libtorch_variant"),
|
||||
c.find_prop("gcc_config_variant"),
|
||||
c.find_prop("libtorch_config_variant"),
|
||||
)
|
||||
newlist.append(conf)
|
||||
|
||||
return newlist
|
||||
|
||||
|
||||
def predicate_exclude_macos(config):
|
||||
return config.os == "linux" or config.os == "windows"
|
||||
|
||||
|
||||
def get_nightly_uploads():
|
||||
configs = gen_build_env_list(False)
|
||||
mylist = []
|
||||
for conf in configs:
|
||||
phase_dependency = "test" if predicate_exclude_macos(conf) else "build"
|
||||
mylist.append(conf.gen_upload_job("upload", phase_dependency))
|
||||
|
||||
return mylist
|
||||
|
||||
|
||||
def get_post_upload_jobs():
|
||||
return [
|
||||
{
|
||||
"update_s3_htmls": {
|
||||
"name": "update_s3_htmls",
|
||||
"context": "org-member",
|
||||
"filters": branch_filters.gen_filter_dict(
|
||||
branches_list=["postnightly"],
|
||||
),
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
def get_nightly_tests():
|
||||
configs = gen_build_env_list(False)
|
||||
filtered_configs = filter(predicate_exclude_macos, configs)
|
||||
|
||||
tests = []
|
||||
for conf_options in filtered_configs:
|
||||
yaml_item = conf_options.gen_workflow_job("test", nightly=True)
|
||||
tests.append(yaml_item)
|
||||
|
||||
return tests
|
||||
|
||||
|
||||
def get_jobs(toplevel_key, smoke):
|
||||
jobs_list = []
|
||||
configs = gen_build_env_list(smoke)
|
||||
phase = "build" if toplevel_key == "binarybuilds" else "test"
|
||||
for build_config in configs:
|
||||
# don't test for macos_arm64 as it's cross compiled
|
||||
if phase != "test" or build_config.os != "macos_arm64":
|
||||
jobs_list.append(build_config.gen_workflow_job(phase, nightly=True))
|
||||
|
||||
return jobs_list
|
||||
|
||||
|
||||
def get_binary_build_jobs():
|
||||
return get_jobs("binarybuilds", False)
|
||||
|
||||
|
||||
def get_binary_smoke_test_jobs():
|
||||
return get_jobs("binarysmoketests", True)
|
||||
19
.circleci/cimodel/data/dimensions.py
Normal file
19
.circleci/cimodel/data/dimensions.py
Normal file
@ -0,0 +1,19 @@
|
||||
PHASES = ["build", "test"]
|
||||
|
||||
CUDA_VERSIONS = [
|
||||
"102",
|
||||
"113",
|
||||
"116",
|
||||
"117",
|
||||
]
|
||||
|
||||
ROCM_VERSIONS = [
|
||||
"4.3.1",
|
||||
"4.5.2",
|
||||
]
|
||||
|
||||
ROCM_VERSION_LABELS = ["rocm" + v for v in ROCM_VERSIONS]
|
||||
|
||||
GPU_VERSIONS = [None] + ["cuda" + v for v in CUDA_VERSIONS] + ROCM_VERSION_LABELS
|
||||
|
||||
STANDARD_PYTHON_VERSIONS = ["3.7", "3.8", "3.9", "3.10"]
|
||||
296
.circleci/cimodel/data/pytorch_build_data.py
Normal file
296
.circleci/cimodel/data/pytorch_build_data.py
Normal file
@ -0,0 +1,296 @@
|
||||
from cimodel.lib.conf_tree import ConfigNode
|
||||
|
||||
|
||||
CONFIG_TREE_DATA = []
|
||||
|
||||
|
||||
def get_major_pyver(dotted_version):
|
||||
parts = dotted_version.split(".")
|
||||
return "py" + parts[0]
|
||||
|
||||
|
||||
class TreeConfigNode(ConfigNode):
|
||||
def __init__(self, parent, node_name, subtree):
|
||||
super().__init__(parent, self.modify_label(node_name))
|
||||
self.subtree = subtree
|
||||
self.init2(node_name)
|
||||
|
||||
def modify_label(self, label):
|
||||
return label
|
||||
|
||||
def init2(self, node_name):
|
||||
pass
|
||||
|
||||
def get_children(self):
|
||||
return [self.child_constructor()(self, k, v) for (k, v) in self.subtree]
|
||||
|
||||
|
||||
class TopLevelNode(TreeConfigNode):
|
||||
def __init__(self, node_name, subtree):
|
||||
super().__init__(None, node_name, subtree)
|
||||
|
||||
# noinspection PyMethodMayBeStatic
|
||||
def child_constructor(self):
|
||||
return DistroConfigNode
|
||||
|
||||
|
||||
class DistroConfigNode(TreeConfigNode):
|
||||
def init2(self, node_name):
|
||||
self.props["distro_name"] = node_name
|
||||
|
||||
def child_constructor(self):
|
||||
distro = self.find_prop("distro_name")
|
||||
|
||||
next_nodes = {
|
||||
"xenial": XenialCompilerConfigNode,
|
||||
"bionic": BionicCompilerConfigNode,
|
||||
}
|
||||
return next_nodes[distro]
|
||||
|
||||
|
||||
class PyVerConfigNode(TreeConfigNode):
|
||||
def init2(self, node_name):
|
||||
self.props["pyver"] = node_name
|
||||
self.props["abbreviated_pyver"] = get_major_pyver(node_name)
|
||||
if node_name == "3.9":
|
||||
self.props["abbreviated_pyver"] = "py3.9"
|
||||
|
||||
# noinspection PyMethodMayBeStatic
|
||||
def child_constructor(self):
|
||||
return ExperimentalFeatureConfigNode
|
||||
|
||||
|
||||
class ExperimentalFeatureConfigNode(TreeConfigNode):
|
||||
def init2(self, node_name):
|
||||
self.props["experimental_feature"] = node_name
|
||||
|
||||
def child_constructor(self):
|
||||
experimental_feature = self.find_prop("experimental_feature")
|
||||
|
||||
next_nodes = {
|
||||
"asan": AsanConfigNode,
|
||||
"xla": XlaConfigNode,
|
||||
"mps": MPSConfigNode,
|
||||
"vulkan": VulkanConfigNode,
|
||||
"parallel_tbb": ParallelTBBConfigNode,
|
||||
"crossref": CrossRefConfigNode,
|
||||
"dynamo": DynamoConfigNode,
|
||||
"parallel_native": ParallelNativeConfigNode,
|
||||
"onnx": ONNXConfigNode,
|
||||
"libtorch": LibTorchConfigNode,
|
||||
"important": ImportantConfigNode,
|
||||
"build_only": BuildOnlyConfigNode,
|
||||
"shard_test": ShardTestConfigNode,
|
||||
"cuda_gcc_override": CudaGccOverrideConfigNode,
|
||||
"pure_torch": PureTorchConfigNode,
|
||||
"slow_gradcheck": SlowGradcheckConfigNode,
|
||||
}
|
||||
return next_nodes[experimental_feature]
|
||||
|
||||
|
||||
class SlowGradcheckConfigNode(TreeConfigNode):
|
||||
def init2(self, node_name):
|
||||
self.props["is_slow_gradcheck"] = True
|
||||
|
||||
def child_constructor(self):
|
||||
return ExperimentalFeatureConfigNode
|
||||
|
||||
|
||||
class PureTorchConfigNode(TreeConfigNode):
|
||||
def modify_label(self, label):
|
||||
return "PURE_TORCH=" + str(label)
|
||||
|
||||
def init2(self, node_name):
|
||||
self.props["is_pure_torch"] = node_name
|
||||
|
||||
def child_constructor(self):
|
||||
return ImportantConfigNode
|
||||
|
||||
|
||||
class XlaConfigNode(TreeConfigNode):
|
||||
def modify_label(self, label):
|
||||
return "XLA=" + str(label)
|
||||
|
||||
def init2(self, node_name):
|
||||
self.props["is_xla"] = node_name
|
||||
|
||||
def child_constructor(self):
|
||||
return ImportantConfigNode
|
||||
|
||||
|
||||
class MPSConfigNode(TreeConfigNode):
|
||||
def modify_label(self, label):
|
||||
return "MPS=" + str(label)
|
||||
|
||||
def init2(self, node_name):
|
||||
self.props["is_mps"] = node_name
|
||||
|
||||
def child_constructor(self):
|
||||
return ImportantConfigNode
|
||||
|
||||
|
||||
class AsanConfigNode(TreeConfigNode):
|
||||
def modify_label(self, label):
|
||||
return "Asan=" + str(label)
|
||||
|
||||
def init2(self, node_name):
|
||||
self.props["is_asan"] = node_name
|
||||
|
||||
def child_constructor(self):
|
||||
return ExperimentalFeatureConfigNode
|
||||
|
||||
|
||||
class ONNXConfigNode(TreeConfigNode):
|
||||
def modify_label(self, label):
|
||||
return "Onnx=" + str(label)
|
||||
|
||||
def init2(self, node_name):
|
||||
self.props["is_onnx"] = node_name
|
||||
|
||||
def child_constructor(self):
|
||||
return ImportantConfigNode
|
||||
|
||||
|
||||
class VulkanConfigNode(TreeConfigNode):
|
||||
def modify_label(self, label):
|
||||
return "Vulkan=" + str(label)
|
||||
|
||||
def init2(self, node_name):
|
||||
self.props["is_vulkan"] = node_name
|
||||
|
||||
def child_constructor(self):
|
||||
return ImportantConfigNode
|
||||
|
||||
|
||||
class ParallelTBBConfigNode(TreeConfigNode):
|
||||
def modify_label(self, label):
|
||||
return "PARALLELTBB=" + str(label)
|
||||
|
||||
def init2(self, node_name):
|
||||
self.props["parallel_backend"] = "paralleltbb"
|
||||
|
||||
def child_constructor(self):
|
||||
return ImportantConfigNode
|
||||
|
||||
|
||||
class CrossRefConfigNode(TreeConfigNode):
|
||||
def init2(self, node_name):
|
||||
self.props["is_crossref"] = node_name
|
||||
|
||||
def child_constructor(self):
|
||||
return ImportantConfigNode
|
||||
|
||||
|
||||
class DynamoConfigNode(TreeConfigNode):
|
||||
def init2(self, node_name):
|
||||
self.props["is_dynamo"] = node_name
|
||||
|
||||
def child_constructor(self):
|
||||
return ImportantConfigNode
|
||||
|
||||
|
||||
class ParallelNativeConfigNode(TreeConfigNode):
|
||||
def modify_label(self, label):
|
||||
return "PARALLELNATIVE=" + str(label)
|
||||
|
||||
def init2(self, node_name):
|
||||
self.props["parallel_backend"] = "parallelnative"
|
||||
|
||||
def child_constructor(self):
|
||||
return ImportantConfigNode
|
||||
|
||||
|
||||
class LibTorchConfigNode(TreeConfigNode):
|
||||
def modify_label(self, label):
|
||||
return "BUILD_TEST_LIBTORCH=" + str(label)
|
||||
|
||||
def init2(self, node_name):
|
||||
self.props["is_libtorch"] = node_name
|
||||
|
||||
def child_constructor(self):
|
||||
return ExperimentalFeatureConfigNode
|
||||
|
||||
|
||||
class CudaGccOverrideConfigNode(TreeConfigNode):
|
||||
def init2(self, node_name):
|
||||
self.props["cuda_gcc_override"] = node_name
|
||||
|
||||
def child_constructor(self):
|
||||
return ExperimentalFeatureConfigNode
|
||||
|
||||
|
||||
class BuildOnlyConfigNode(TreeConfigNode):
|
||||
def init2(self, node_name):
|
||||
self.props["build_only"] = node_name
|
||||
|
||||
def child_constructor(self):
|
||||
return ExperimentalFeatureConfigNode
|
||||
|
||||
|
||||
class ShardTestConfigNode(TreeConfigNode):
|
||||
def init2(self, node_name):
|
||||
self.props["shard_test"] = node_name
|
||||
|
||||
def child_constructor(self):
|
||||
return ImportantConfigNode
|
||||
|
||||
|
||||
class ImportantConfigNode(TreeConfigNode):
|
||||
def modify_label(self, label):
|
||||
return "IMPORTANT=" + str(label)
|
||||
|
||||
def init2(self, node_name):
|
||||
self.props["is_important"] = node_name
|
||||
|
||||
def get_children(self):
|
||||
return []
|
||||
|
||||
|
||||
class XenialCompilerConfigNode(TreeConfigNode):
|
||||
def modify_label(self, label):
|
||||
return label or "<unspecified>"
|
||||
|
||||
def init2(self, node_name):
|
||||
self.props["compiler_name"] = node_name
|
||||
|
||||
# noinspection PyMethodMayBeStatic
|
||||
def child_constructor(self):
|
||||
return (
|
||||
XenialCompilerVersionConfigNode
|
||||
if self.props["compiler_name"]
|
||||
else PyVerConfigNode
|
||||
)
|
||||
|
||||
|
||||
class BionicCompilerConfigNode(TreeConfigNode):
|
||||
def modify_label(self, label):
|
||||
return label or "<unspecified>"
|
||||
|
||||
def init2(self, node_name):
|
||||
self.props["compiler_name"] = node_name
|
||||
|
||||
# noinspection PyMethodMayBeStatic
|
||||
def child_constructor(self):
|
||||
return (
|
||||
BionicCompilerVersionConfigNode
|
||||
if self.props["compiler_name"]
|
||||
else PyVerConfigNode
|
||||
)
|
||||
|
||||
|
||||
class XenialCompilerVersionConfigNode(TreeConfigNode):
|
||||
def init2(self, node_name):
|
||||
self.props["compiler_version"] = node_name
|
||||
|
||||
# noinspection PyMethodMayBeStatic
|
||||
def child_constructor(self):
|
||||
return PyVerConfigNode
|
||||
|
||||
|
||||
class BionicCompilerVersionConfigNode(TreeConfigNode):
|
||||
def init2(self, node_name):
|
||||
self.props["compiler_version"] = node_name
|
||||
|
||||
# noinspection PyMethodMayBeStatic
|
||||
def child_constructor(self):
|
||||
return PyVerConfigNode
|
||||
382
.circleci/cimodel/data/pytorch_build_definitions.py
Normal file
382
.circleci/cimodel/data/pytorch_build_definitions.py
Normal file
@ -0,0 +1,382 @@
|
||||
from collections import OrderedDict
|
||||
from dataclasses import dataclass, field
|
||||
from typing import List, Optional
|
||||
|
||||
import cimodel.data.dimensions as dimensions
|
||||
import cimodel.lib.conf_tree as conf_tree
|
||||
import cimodel.lib.miniutils as miniutils
|
||||
from cimodel.data.pytorch_build_data import CONFIG_TREE_DATA, TopLevelNode
|
||||
from cimodel.data.simple.util.branch_filters import gen_filter_dict, RC_PATTERN
|
||||
from cimodel.data.simple.util.docker_constants import gen_docker_image
|
||||
|
||||
|
||||
@dataclass
|
||||
class Conf:
|
||||
distro: str
|
||||
parms: List[str]
|
||||
parms_list_ignored_for_docker_image: Optional[List[str]] = None
|
||||
pyver: Optional[str] = None
|
||||
cuda_version: Optional[str] = None
|
||||
rocm_version: Optional[str] = None
|
||||
# TODO expand this to cover all the USE_* that we want to test for
|
||||
# tesnrorrt, leveldb, lmdb, redis, opencv, mkldnn, ideep, etc.
|
||||
# (from https://github.com/pytorch/pytorch/pull/17323#discussion_r259453608)
|
||||
is_xla: bool = False
|
||||
is_vulkan: bool = False
|
||||
is_pure_torch: bool = False
|
||||
restrict_phases: Optional[List[str]] = None
|
||||
gpu_resource: Optional[str] = None
|
||||
dependent_tests: List = field(default_factory=list)
|
||||
parent_build: Optional["Conf"] = None
|
||||
is_libtorch: bool = False
|
||||
is_important: bool = False
|
||||
parallel_backend: Optional[str] = None
|
||||
build_only: bool = False
|
||||
|
||||
@staticmethod
|
||||
def is_test_phase(phase):
|
||||
return "test" in phase
|
||||
|
||||
# TODO: Eliminate the special casing for docker paths
|
||||
# In the short term, we *will* need to support special casing as docker images are merged for caffe2 and pytorch
|
||||
def get_parms(self, for_docker):
|
||||
leading = []
|
||||
# We just don't run non-important jobs on pull requests;
|
||||
# previously we also named them in a way to make it obvious
|
||||
# if self.is_important and not for_docker:
|
||||
# leading.append("AAA")
|
||||
leading.append("pytorch")
|
||||
if self.is_xla and not for_docker:
|
||||
leading.append("xla")
|
||||
if self.is_vulkan and not for_docker:
|
||||
leading.append("vulkan")
|
||||
if self.is_libtorch and not for_docker:
|
||||
leading.append("libtorch")
|
||||
if self.is_pure_torch and not for_docker:
|
||||
leading.append("pure_torch")
|
||||
if self.parallel_backend is not None and not for_docker:
|
||||
leading.append(self.parallel_backend)
|
||||
|
||||
cuda_parms = []
|
||||
if self.cuda_version:
|
||||
cudnn = "cudnn8" if self.cuda_version.startswith("11.") else "cudnn7"
|
||||
cuda_parms.extend(["cuda" + self.cuda_version, cudnn])
|
||||
if self.rocm_version:
|
||||
cuda_parms.extend([f"rocm{self.rocm_version}"])
|
||||
result = leading + ["linux", self.distro] + cuda_parms + self.parms
|
||||
if not for_docker and self.parms_list_ignored_for_docker_image is not None:
|
||||
result = result + self.parms_list_ignored_for_docker_image
|
||||
return result
|
||||
|
||||
def gen_docker_image_path(self):
|
||||
parms_source = self.parent_build or self
|
||||
base_build_env_name = "-".join(parms_source.get_parms(True))
|
||||
image_name, _ = gen_docker_image(base_build_env_name)
|
||||
return miniutils.quote(image_name)
|
||||
|
||||
def gen_docker_image_requires(self):
|
||||
parms_source = self.parent_build or self
|
||||
base_build_env_name = "-".join(parms_source.get_parms(True))
|
||||
_, requires = gen_docker_image(base_build_env_name)
|
||||
return miniutils.quote(requires)
|
||||
|
||||
def get_build_job_name_pieces(self, build_or_test):
|
||||
return self.get_parms(False) + [build_or_test]
|
||||
|
||||
def gen_build_name(self, build_or_test):
|
||||
return (
|
||||
("_".join(map(str, self.get_build_job_name_pieces(build_or_test))))
|
||||
.replace(".", "_")
|
||||
.replace("-", "_")
|
||||
)
|
||||
|
||||
def get_dependents(self):
|
||||
return self.dependent_tests or []
|
||||
|
||||
def gen_workflow_params(self, phase):
|
||||
parameters = OrderedDict()
|
||||
build_job_name_pieces = self.get_build_job_name_pieces(phase)
|
||||
|
||||
build_env_name = "-".join(map(str, build_job_name_pieces))
|
||||
parameters["build_environment"] = miniutils.quote(build_env_name)
|
||||
parameters["docker_image"] = self.gen_docker_image_path()
|
||||
if Conf.is_test_phase(phase) and self.gpu_resource:
|
||||
parameters["use_cuda_docker_runtime"] = miniutils.quote("1")
|
||||
if Conf.is_test_phase(phase):
|
||||
resource_class = "large"
|
||||
if self.gpu_resource:
|
||||
resource_class = "gpu." + self.gpu_resource
|
||||
if self.rocm_version is not None:
|
||||
resource_class = "pytorch/amd-gpu"
|
||||
parameters["resource_class"] = resource_class
|
||||
if phase == "build" and self.rocm_version is not None:
|
||||
parameters["resource_class"] = "xlarge"
|
||||
if hasattr(self, "filters"):
|
||||
parameters["filters"] = self.filters
|
||||
if self.build_only:
|
||||
parameters["build_only"] = miniutils.quote(str(int(True)))
|
||||
return parameters
|
||||
|
||||
def gen_workflow_job(self, phase):
|
||||
job_def = OrderedDict()
|
||||
job_def["name"] = self.gen_build_name(phase)
|
||||
|
||||
if Conf.is_test_phase(phase):
|
||||
# TODO When merging the caffe2 and pytorch jobs, it might be convenient for a while to make a
|
||||
# caffe2 test job dependent on a pytorch build job. This way we could quickly dedup the repeated
|
||||
# build of pytorch in the caffe2 build job, and just run the caffe2 tests off of a completed
|
||||
# pytorch build job (from https://github.com/pytorch/pytorch/pull/17323#discussion_r259452641)
|
||||
|
||||
dependency_build = self.parent_build or self
|
||||
job_def["requires"] = [dependency_build.gen_build_name("build")]
|
||||
job_name = "pytorch_linux_test"
|
||||
else:
|
||||
job_name = "pytorch_linux_build"
|
||||
job_def["requires"] = [self.gen_docker_image_requires()]
|
||||
|
||||
if not self.is_important:
|
||||
job_def["filters"] = gen_filter_dict()
|
||||
job_def.update(self.gen_workflow_params(phase))
|
||||
|
||||
return {job_name: job_def}
|
||||
|
||||
|
||||
# TODO This is a hack to special case some configs just for the workflow list
|
||||
class HiddenConf:
|
||||
def __init__(self, name, parent_build=None, filters=None):
|
||||
self.name = name
|
||||
self.parent_build = parent_build
|
||||
self.filters = filters
|
||||
|
||||
def gen_workflow_job(self, phase):
|
||||
return {
|
||||
self.gen_build_name(phase): {
|
||||
"requires": [self.parent_build.gen_build_name("build")],
|
||||
"filters": self.filters,
|
||||
}
|
||||
}
|
||||
|
||||
def gen_build_name(self, _):
|
||||
return self.name
|
||||
|
||||
|
||||
class DocPushConf:
|
||||
def __init__(self, name, parent_build=None, branch="master"):
|
||||
self.name = name
|
||||
self.parent_build = parent_build
|
||||
self.branch = branch
|
||||
|
||||
def gen_workflow_job(self, phase):
|
||||
return {
|
||||
"pytorch_doc_push": {
|
||||
"name": self.name,
|
||||
"branch": self.branch,
|
||||
"requires": [self.parent_build],
|
||||
"context": "org-member",
|
||||
"filters": gen_filter_dict(
|
||||
branches_list=["nightly"], tags_list=RC_PATTERN
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def gen_docs_configs(xenial_parent_config):
|
||||
configs = []
|
||||
|
||||
configs.append(
|
||||
HiddenConf(
|
||||
"pytorch_python_doc_build",
|
||||
parent_build=xenial_parent_config,
|
||||
filters=gen_filter_dict(
|
||||
branches_list=["master", "main", "nightly"], tags_list=RC_PATTERN
|
||||
),
|
||||
)
|
||||
)
|
||||
configs.append(
|
||||
DocPushConf(
|
||||
"pytorch_python_doc_push",
|
||||
parent_build="pytorch_python_doc_build",
|
||||
branch="site",
|
||||
)
|
||||
)
|
||||
|
||||
configs.append(
|
||||
HiddenConf(
|
||||
"pytorch_cpp_doc_build",
|
||||
parent_build=xenial_parent_config,
|
||||
filters=gen_filter_dict(
|
||||
branches_list=["master", "main", "nightly"], tags_list=RC_PATTERN
|
||||
),
|
||||
)
|
||||
)
|
||||
configs.append(
|
||||
DocPushConf(
|
||||
"pytorch_cpp_doc_push",
|
||||
parent_build="pytorch_cpp_doc_build",
|
||||
branch="master",
|
||||
)
|
||||
)
|
||||
return configs
|
||||
|
||||
|
||||
def get_root():
|
||||
return TopLevelNode("PyTorch Builds", CONFIG_TREE_DATA)
|
||||
|
||||
|
||||
def gen_tree():
|
||||
root = get_root()
|
||||
configs_list = conf_tree.dfs(root)
|
||||
return configs_list
|
||||
|
||||
|
||||
def instantiate_configs(only_slow_gradcheck):
|
||||
config_list = []
|
||||
|
||||
root = get_root()
|
||||
found_configs = conf_tree.dfs(root)
|
||||
for fc in found_configs:
|
||||
restrict_phases = None
|
||||
distro_name = fc.find_prop("distro_name")
|
||||
compiler_name = fc.find_prop("compiler_name")
|
||||
compiler_version = fc.find_prop("compiler_version")
|
||||
is_xla = fc.find_prop("is_xla") or False
|
||||
is_asan = fc.find_prop("is_asan") or False
|
||||
is_crossref = fc.find_prop("is_crossref") or False
|
||||
is_dynamo = fc.find_prop("is_dynamo") or False
|
||||
is_onnx = fc.find_prop("is_onnx") or False
|
||||
is_pure_torch = fc.find_prop("is_pure_torch") or False
|
||||
is_vulkan = fc.find_prop("is_vulkan") or False
|
||||
is_slow_gradcheck = fc.find_prop("is_slow_gradcheck") or False
|
||||
parms_list_ignored_for_docker_image = []
|
||||
|
||||
if only_slow_gradcheck ^ is_slow_gradcheck:
|
||||
continue
|
||||
|
||||
python_version = None
|
||||
if compiler_name == "cuda" or compiler_name == "android":
|
||||
python_version = fc.find_prop("pyver")
|
||||
parms_list = [fc.find_prop("abbreviated_pyver")]
|
||||
else:
|
||||
parms_list = ["py" + fc.find_prop("pyver")]
|
||||
|
||||
cuda_version = None
|
||||
rocm_version = None
|
||||
if compiler_name == "cuda":
|
||||
cuda_version = fc.find_prop("compiler_version")
|
||||
|
||||
elif compiler_name == "rocm":
|
||||
rocm_version = fc.find_prop("compiler_version")
|
||||
restrict_phases = ["build", "test1", "test2", "caffe2_test"]
|
||||
|
||||
elif compiler_name == "android":
|
||||
android_ndk_version = fc.find_prop("compiler_version")
|
||||
# TODO: do we need clang to compile host binaries like protoc?
|
||||
parms_list.append("clang5")
|
||||
parms_list.append("android-ndk-" + android_ndk_version)
|
||||
android_abi = fc.find_prop("android_abi")
|
||||
parms_list_ignored_for_docker_image.append(android_abi)
|
||||
restrict_phases = ["build"]
|
||||
|
||||
elif compiler_name:
|
||||
gcc_version = compiler_name + (fc.find_prop("compiler_version") or "")
|
||||
parms_list.append(gcc_version)
|
||||
|
||||
if is_asan:
|
||||
parms_list.append("asan")
|
||||
python_version = fc.find_prop("pyver")
|
||||
parms_list[0] = fc.find_prop("abbreviated_pyver")
|
||||
|
||||
if is_crossref:
|
||||
parms_list_ignored_for_docker_image.append("crossref")
|
||||
|
||||
if is_dynamo:
|
||||
parms_list_ignored_for_docker_image.append("dynamo")
|
||||
|
||||
if is_onnx:
|
||||
parms_list.append("onnx")
|
||||
python_version = fc.find_prop("pyver")
|
||||
parms_list[0] = fc.find_prop("abbreviated_pyver")
|
||||
restrict_phases = ["build", "ort_test1", "ort_test2"]
|
||||
|
||||
if cuda_version:
|
||||
cuda_gcc_version = fc.find_prop("cuda_gcc_override") or "gcc7"
|
||||
parms_list.append(cuda_gcc_version)
|
||||
|
||||
is_libtorch = fc.find_prop("is_libtorch") or False
|
||||
is_important = fc.find_prop("is_important") or False
|
||||
parallel_backend = fc.find_prop("parallel_backend") or None
|
||||
build_only = fc.find_prop("build_only") or False
|
||||
shard_test = fc.find_prop("shard_test") or False
|
||||
# TODO: fix pure_torch python test packaging issue.
|
||||
if shard_test:
|
||||
restrict_phases = ["build"] if restrict_phases is None else restrict_phases
|
||||
restrict_phases.extend(["test1", "test2"])
|
||||
if build_only or is_pure_torch:
|
||||
restrict_phases = ["build"]
|
||||
|
||||
if is_slow_gradcheck:
|
||||
parms_list_ignored_for_docker_image.append("old")
|
||||
parms_list_ignored_for_docker_image.append("gradcheck")
|
||||
|
||||
gpu_resource = None
|
||||
if cuda_version and cuda_version != "10":
|
||||
gpu_resource = "medium"
|
||||
|
||||
c = Conf(
|
||||
distro_name,
|
||||
parms_list,
|
||||
parms_list_ignored_for_docker_image,
|
||||
python_version,
|
||||
cuda_version,
|
||||
rocm_version,
|
||||
is_xla,
|
||||
is_vulkan,
|
||||
is_pure_torch,
|
||||
restrict_phases,
|
||||
gpu_resource,
|
||||
is_libtorch=is_libtorch,
|
||||
is_important=is_important,
|
||||
parallel_backend=parallel_backend,
|
||||
build_only=build_only,
|
||||
)
|
||||
|
||||
# run docs builds on "pytorch-linux-xenial-py3.7-gcc5.4". Docs builds
|
||||
# should run on a CPU-only build that runs on all PRs.
|
||||
# XXX should this be updated to a more modern build?
|
||||
if (
|
||||
distro_name == "xenial"
|
||||
and fc.find_prop("pyver") == "3.7"
|
||||
and cuda_version is None
|
||||
and parallel_backend is None
|
||||
and not is_vulkan
|
||||
and not is_pure_torch
|
||||
and compiler_name == "gcc"
|
||||
and fc.find_prop("compiler_version") == "5.4"
|
||||
):
|
||||
c.filters = gen_filter_dict(branches_list=r"/.*/", tags_list=RC_PATTERN)
|
||||
c.dependent_tests = gen_docs_configs(c)
|
||||
|
||||
config_list.append(c)
|
||||
|
||||
return config_list
|
||||
|
||||
|
||||
def get_workflow_jobs(only_slow_gradcheck=False):
|
||||
config_list = instantiate_configs(only_slow_gradcheck)
|
||||
|
||||
x = []
|
||||
for conf_options in config_list:
|
||||
phases = conf_options.restrict_phases or dimensions.PHASES
|
||||
|
||||
for phase in phases:
|
||||
# TODO why does this not have a test?
|
||||
if Conf.is_test_phase(phase) and conf_options.cuda_version == "10":
|
||||
continue
|
||||
|
||||
x.append(conf_options.gen_workflow_job(phase))
|
||||
|
||||
# TODO convert to recursion
|
||||
for conf in conf_options.get_dependents():
|
||||
x.append(conf.gen_workflow_job("test"))
|
||||
|
||||
return x
|
||||
28
.circleci/cimodel/data/simple/anaconda_prune_defintions.py
Normal file
28
.circleci/cimodel/data/simple/anaconda_prune_defintions.py
Normal file
@ -0,0 +1,28 @@
|
||||
from collections import OrderedDict
|
||||
|
||||
from cimodel.data.simple.util.branch_filters import gen_filter_dict
|
||||
from cimodel.lib.miniutils import quote
|
||||
|
||||
|
||||
CHANNELS_TO_PRUNE = ["pytorch-nightly", "pytorch-test"]
|
||||
PACKAGES_TO_PRUNE = "pytorch torchvision torchaudio torchtext ignite torchcsprng"
|
||||
|
||||
|
||||
def gen_workflow_job(channel: str):
|
||||
return OrderedDict(
|
||||
{
|
||||
"anaconda_prune": OrderedDict(
|
||||
{
|
||||
"name": f"anaconda-prune-{channel}",
|
||||
"context": quote("org-member"),
|
||||
"packages": quote(PACKAGES_TO_PRUNE),
|
||||
"channel": channel,
|
||||
"filters": gen_filter_dict(branches_list=["postnightly"]),
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def get_workflow_jobs():
|
||||
return [gen_workflow_job(channel) for channel in CHANNELS_TO_PRUNE]
|
||||
39
.circleci/cimodel/data/simple/docker_definitions.py
Normal file
39
.circleci/cimodel/data/simple/docker_definitions.py
Normal file
@ -0,0 +1,39 @@
|
||||
from collections import OrderedDict
|
||||
|
||||
from cimodel.data.simple.util.branch_filters import gen_filter_dict, RC_PATTERN
|
||||
|
||||
from cimodel.lib.miniutils import quote
|
||||
|
||||
|
||||
# NOTE: All hardcoded docker image builds have been migrated to GHA
|
||||
IMAGE_NAMES = []
|
||||
|
||||
# This entry should be an element from the list above
|
||||
# This should contain the image matching the "slow_gradcheck" entry in
|
||||
# pytorch_build_data.py
|
||||
SLOW_GRADCHECK_IMAGE_NAME = "pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7"
|
||||
|
||||
|
||||
def get_workflow_jobs(images=IMAGE_NAMES, only_slow_gradcheck=False):
|
||||
"""Generates a list of docker image build definitions"""
|
||||
ret = []
|
||||
for image_name in images:
|
||||
if image_name.startswith("docker-"):
|
||||
image_name = image_name.lstrip("docker-")
|
||||
if only_slow_gradcheck and image_name is not SLOW_GRADCHECK_IMAGE_NAME:
|
||||
continue
|
||||
|
||||
parameters = OrderedDict(
|
||||
{
|
||||
"name": quote(f"docker-{image_name}"),
|
||||
"image_name": quote(image_name),
|
||||
}
|
||||
)
|
||||
if image_name == "pytorch-linux-xenial-py3.7-gcc5.4":
|
||||
# pushing documentation on tags requires CircleCI to also
|
||||
# build all the dependencies on tags, including this docker image
|
||||
parameters["filters"] = gen_filter_dict(
|
||||
branches_list=r"/.*/", tags_list=RC_PATTERN
|
||||
)
|
||||
ret.append(OrderedDict({"docker_build_job": parameters}))
|
||||
return ret
|
||||
100
.circleci/cimodel/data/simple/ios_definitions.py
Normal file
100
.circleci/cimodel/data/simple/ios_definitions.py
Normal file
@ -0,0 +1,100 @@
|
||||
import cimodel.lib.miniutils as miniutils
|
||||
from cimodel.data.simple.util.branch_filters import gen_filter_dict_exclude
|
||||
from cimodel.data.simple.util.versions import MultiPartVersion
|
||||
|
||||
XCODE_VERSION = MultiPartVersion([12, 5, 1])
|
||||
|
||||
|
||||
class ArchVariant:
|
||||
def __init__(self, name, custom_build_name=""):
|
||||
self.name = name
|
||||
self.custom_build_name = custom_build_name
|
||||
|
||||
def render(self):
|
||||
extra_parts = (
|
||||
[self.custom_build_name] if len(self.custom_build_name) > 0 else []
|
||||
)
|
||||
return "-".join([self.name] + extra_parts).replace("_", "-")
|
||||
|
||||
|
||||
def get_platform(arch_variant_name):
|
||||
return "SIMULATOR" if arch_variant_name == "x86_64" else "OS"
|
||||
|
||||
|
||||
class IOSJob:
|
||||
def __init__(
|
||||
self, xcode_version, arch_variant, is_org_member_context=True, extra_props=None
|
||||
):
|
||||
self.xcode_version = xcode_version
|
||||
self.arch_variant = arch_variant
|
||||
self.is_org_member_context = is_org_member_context
|
||||
self.extra_props = extra_props
|
||||
|
||||
def gen_name_parts(self):
|
||||
version_parts = self.xcode_version.render_dots_or_parts("-")
|
||||
build_variant_suffix = self.arch_variant.render()
|
||||
return (
|
||||
[
|
||||
"ios",
|
||||
]
|
||||
+ version_parts
|
||||
+ [
|
||||
build_variant_suffix,
|
||||
]
|
||||
)
|
||||
|
||||
def gen_job_name(self):
|
||||
return "-".join(self.gen_name_parts())
|
||||
|
||||
def gen_tree(self):
|
||||
platform_name = get_platform(self.arch_variant.name)
|
||||
props_dict = {
|
||||
"name": self.gen_job_name(),
|
||||
"build_environment": self.gen_job_name(),
|
||||
"ios_arch": self.arch_variant.name,
|
||||
"ios_platform": platform_name,
|
||||
}
|
||||
|
||||
if self.is_org_member_context:
|
||||
props_dict["context"] = "org-member"
|
||||
|
||||
if self.extra_props:
|
||||
props_dict.update(self.extra_props)
|
||||
|
||||
props_dict["filters"] = gen_filter_dict_exclude()
|
||||
|
||||
return [{"pytorch_ios_build": props_dict}]
|
||||
|
||||
|
||||
WORKFLOW_DATA = [
|
||||
IOSJob(
|
||||
XCODE_VERSION,
|
||||
ArchVariant("x86_64"),
|
||||
is_org_member_context=False,
|
||||
extra_props={"lite_interpreter": miniutils.quote(str(int(True)))},
|
||||
),
|
||||
# IOSJob(XCODE_VERSION, ArchVariant("arm64"), extra_props={
|
||||
# "lite_interpreter": miniutils.quote(str(int(True)))}),
|
||||
# IOSJob(XCODE_VERSION, ArchVariant("arm64", "metal"), extra_props={
|
||||
# "use_metal": miniutils.quote(str(int(True))),
|
||||
# "lite_interpreter": miniutils.quote(str(int(True)))}),
|
||||
# IOSJob(XCODE_VERSION, ArchVariant("arm64", "custom-ops"), extra_props={
|
||||
# "op_list": "mobilenetv2.yaml",
|
||||
# "lite_interpreter": miniutils.quote(str(int(True)))}),
|
||||
IOSJob(
|
||||
XCODE_VERSION,
|
||||
ArchVariant("x86_64", "coreml"),
|
||||
is_org_member_context=False,
|
||||
extra_props={
|
||||
"use_coreml": miniutils.quote(str(int(True))),
|
||||
"lite_interpreter": miniutils.quote(str(int(True))),
|
||||
},
|
||||
),
|
||||
# IOSJob(XCODE_VERSION, ArchVariant("arm64", "coreml"), extra_props={
|
||||
# "use_coreml": miniutils.quote(str(int(True))),
|
||||
# "lite_interpreter": miniutils.quote(str(int(True)))}),
|
||||
]
|
||||
|
||||
|
||||
def get_workflow_jobs():
|
||||
return [item.gen_tree() for item in WORKFLOW_DATA]
|
||||
54
.circleci/cimodel/data/simple/macos_definitions.py
Normal file
54
.circleci/cimodel/data/simple/macos_definitions.py
Normal file
@ -0,0 +1,54 @@
|
||||
class MacOsJob:
|
||||
def __init__(self, os_version, is_build=False, is_test=False, extra_props=tuple()):
|
||||
# extra_props is tuple type, because mutable data structures for argument defaults
|
||||
# is not recommended.
|
||||
self.os_version = os_version
|
||||
self.is_build = is_build
|
||||
self.is_test = is_test
|
||||
self.extra_props = dict(extra_props)
|
||||
|
||||
def gen_tree(self):
|
||||
non_phase_parts = ["pytorch", "macos", self.os_version, "py3"]
|
||||
|
||||
extra_name_list = [name for name, exist in self.extra_props.items() if exist]
|
||||
full_job_name_list = (
|
||||
non_phase_parts
|
||||
+ extra_name_list
|
||||
+ [
|
||||
"build" if self.is_build else None,
|
||||
"test" if self.is_test else None,
|
||||
]
|
||||
)
|
||||
|
||||
full_job_name = "_".join(list(filter(None, full_job_name_list)))
|
||||
|
||||
test_build_dependency = "_".join(non_phase_parts + ["build"])
|
||||
extra_dependencies = [test_build_dependency] if self.is_test else []
|
||||
job_dependencies = extra_dependencies
|
||||
|
||||
# Yes we name the job after itself, it needs a non-empty value in here
|
||||
# for the YAML output to work.
|
||||
props_dict = {"requires": job_dependencies, "name": full_job_name}
|
||||
|
||||
return [{full_job_name: props_dict}]
|
||||
|
||||
|
||||
WORKFLOW_DATA = [
|
||||
MacOsJob("10_15", is_build=True),
|
||||
MacOsJob("10_13", is_build=True),
|
||||
MacOsJob(
|
||||
"10_13",
|
||||
is_build=False,
|
||||
is_test=True,
|
||||
),
|
||||
MacOsJob(
|
||||
"10_13",
|
||||
is_build=True,
|
||||
is_test=True,
|
||||
extra_props=tuple({"lite_interpreter": True}.items()),
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
def get_workflow_jobs():
|
||||
return [item.gen_tree() for item in WORKFLOW_DATA]
|
||||
51
.circleci/cimodel/data/simple/mobile_definitions.py
Normal file
51
.circleci/cimodel/data/simple/mobile_definitions.py
Normal file
@ -0,0 +1,51 @@
|
||||
"""
|
||||
PyTorch Mobile PR builds (use linux host toolchain + mobile build options)
|
||||
"""
|
||||
|
||||
import cimodel.data.simple.util.branch_filters
|
||||
import cimodel.lib.miniutils as miniutils
|
||||
|
||||
|
||||
class MobileJob:
|
||||
def __init__(
|
||||
self, docker_image, docker_requires, variant_parts, is_master_only=False
|
||||
):
|
||||
self.docker_image = docker_image
|
||||
self.docker_requires = docker_requires
|
||||
self.variant_parts = variant_parts
|
||||
self.is_master_only = is_master_only
|
||||
|
||||
def gen_tree(self):
|
||||
non_phase_parts = [
|
||||
"pytorch",
|
||||
"linux",
|
||||
"xenial",
|
||||
"py3",
|
||||
"clang5",
|
||||
"mobile",
|
||||
] + self.variant_parts
|
||||
|
||||
full_job_name = "_".join(non_phase_parts)
|
||||
build_env_name = "-".join(non_phase_parts)
|
||||
|
||||
props_dict = {
|
||||
"build_environment": build_env_name,
|
||||
"build_only": miniutils.quote(str(int(True))),
|
||||
"docker_image": self.docker_image,
|
||||
"requires": self.docker_requires,
|
||||
"name": full_job_name,
|
||||
}
|
||||
|
||||
if self.is_master_only:
|
||||
props_dict[
|
||||
"filters"
|
||||
] = cimodel.data.simple.util.branch_filters.gen_filter_dict()
|
||||
|
||||
return [{"pytorch_linux_build": props_dict}]
|
||||
|
||||
|
||||
WORKFLOW_DATA = []
|
||||
|
||||
|
||||
def get_workflow_jobs():
|
||||
return [item.gen_tree() for item in WORKFLOW_DATA]
|
||||
96
.circleci/cimodel/data/simple/nightly_ios.py
Normal file
96
.circleci/cimodel/data/simple/nightly_ios.py
Normal file
@ -0,0 +1,96 @@
|
||||
import cimodel.data.simple.ios_definitions as ios_definitions
|
||||
import cimodel.lib.miniutils as miniutils
|
||||
|
||||
|
||||
class IOSNightlyJob:
|
||||
def __init__(self, variant, is_full_jit=False, is_upload=False):
|
||||
self.variant = variant
|
||||
self.is_full_jit = is_full_jit
|
||||
self.is_upload = is_upload
|
||||
|
||||
def get_phase_name(self):
|
||||
return "upload" if self.is_upload else "build"
|
||||
|
||||
def get_common_name_pieces(self, sep):
|
||||
extra_name_suffix = [self.get_phase_name()] if self.is_upload else []
|
||||
|
||||
extra_name = ["full_jit"] if self.is_full_jit else []
|
||||
|
||||
common_name_pieces = (
|
||||
[
|
||||
"ios",
|
||||
]
|
||||
+ extra_name
|
||||
+ []
|
||||
+ ios_definitions.XCODE_VERSION.render_dots_or_parts(sep)
|
||||
+ [
|
||||
"nightly",
|
||||
self.variant,
|
||||
"build",
|
||||
]
|
||||
+ extra_name_suffix
|
||||
)
|
||||
|
||||
return common_name_pieces
|
||||
|
||||
def gen_job_name(self):
|
||||
return "_".join(["pytorch"] + self.get_common_name_pieces(None))
|
||||
|
||||
def gen_tree(self):
|
||||
build_configs = BUILD_CONFIGS_FULL_JIT if self.is_full_jit else BUILD_CONFIGS
|
||||
extra_requires = (
|
||||
[x.gen_job_name() for x in build_configs] if self.is_upload else []
|
||||
)
|
||||
|
||||
props_dict = {
|
||||
"build_environment": "-".join(
|
||||
["libtorch"] + self.get_common_name_pieces(".")
|
||||
),
|
||||
"requires": extra_requires,
|
||||
"context": "org-member",
|
||||
"filters": {"branches": {"only": "nightly"}},
|
||||
}
|
||||
|
||||
if not self.is_upload:
|
||||
props_dict["ios_arch"] = self.variant
|
||||
props_dict["ios_platform"] = ios_definitions.get_platform(self.variant)
|
||||
props_dict["name"] = self.gen_job_name()
|
||||
props_dict["use_metal"] = miniutils.quote(str(int(True)))
|
||||
props_dict["use_coreml"] = miniutils.quote(str(int(True)))
|
||||
|
||||
if self.is_full_jit:
|
||||
props_dict["lite_interpreter"] = miniutils.quote(str(int(False)))
|
||||
|
||||
template_name = "_".join(
|
||||
[
|
||||
"binary",
|
||||
"ios",
|
||||
self.get_phase_name(),
|
||||
]
|
||||
)
|
||||
|
||||
return [{template_name: props_dict}]
|
||||
|
||||
|
||||
BUILD_CONFIGS = [
|
||||
IOSNightlyJob("x86_64"),
|
||||
IOSNightlyJob("arm64"),
|
||||
]
|
||||
|
||||
BUILD_CONFIGS_FULL_JIT = [
|
||||
IOSNightlyJob("x86_64", is_full_jit=True),
|
||||
IOSNightlyJob("arm64", is_full_jit=True),
|
||||
]
|
||||
|
||||
WORKFLOW_DATA = (
|
||||
BUILD_CONFIGS
|
||||
+ BUILD_CONFIGS_FULL_JIT
|
||||
+ [
|
||||
IOSNightlyJob("binary", is_full_jit=False, is_upload=True),
|
||||
IOSNightlyJob("binary", is_full_jit=True, is_upload=True),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def get_workflow_jobs():
|
||||
return [item.gen_tree() for item in WORKFLOW_DATA]
|
||||
36
.circleci/cimodel/data/simple/util/branch_filters.py
Normal file
36
.circleci/cimodel/data/simple/util/branch_filters.py
Normal file
@ -0,0 +1,36 @@
|
||||
NON_PR_BRANCH_LIST = [
|
||||
"main",
|
||||
"master",
|
||||
r"/ci-all\/.*/",
|
||||
r"/release\/.*/",
|
||||
]
|
||||
|
||||
PR_BRANCH_LIST = [
|
||||
r"/gh\/.*\/head/",
|
||||
r"/pull\/.*/",
|
||||
]
|
||||
|
||||
RC_PATTERN = r"/v[0-9]+(\.[0-9]+)*-rc[0-9]+/"
|
||||
|
||||
MAC_IOS_EXCLUSION_LIST = ["nightly", "postnightly"]
|
||||
|
||||
|
||||
def gen_filter_dict(branches_list=NON_PR_BRANCH_LIST, tags_list=None):
|
||||
"""Generates a filter dictionary for use with CircleCI's job filter"""
|
||||
filter_dict = {
|
||||
"branches": {
|
||||
"only": branches_list,
|
||||
},
|
||||
}
|
||||
|
||||
if tags_list is not None:
|
||||
filter_dict["tags"] = {"only": tags_list}
|
||||
return filter_dict
|
||||
|
||||
|
||||
def gen_filter_dict_exclude(branches_list=MAC_IOS_EXCLUSION_LIST):
|
||||
return {
|
||||
"branches": {
|
||||
"ignore": branches_list,
|
||||
},
|
||||
}
|
||||
35
.circleci/cimodel/data/simple/util/docker_constants.py
Normal file
35
.circleci/cimodel/data/simple/util/docker_constants.py
Normal file
@ -0,0 +1,35 @@
|
||||
AWS_DOCKER_HOST = "308535385114.dkr.ecr.us-east-1.amazonaws.com"
|
||||
|
||||
|
||||
def gen_docker_image(container_type):
|
||||
return (
|
||||
"/".join([AWS_DOCKER_HOST, "pytorch", container_type]),
|
||||
f"docker-{container_type}",
|
||||
)
|
||||
|
||||
|
||||
def gen_docker_image_requires(image_name):
|
||||
return [f"docker-{image_name}"]
|
||||
|
||||
|
||||
DOCKER_IMAGE_BASIC, DOCKER_REQUIREMENT_BASE = gen_docker_image(
|
||||
"pytorch-linux-xenial-py3.7-gcc5.4"
|
||||
)
|
||||
|
||||
DOCKER_IMAGE_CUDA_10_2, DOCKER_REQUIREMENT_CUDA_10_2 = gen_docker_image(
|
||||
"pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7"
|
||||
)
|
||||
|
||||
DOCKER_IMAGE_GCC7, DOCKER_REQUIREMENT_GCC7 = gen_docker_image(
|
||||
"pytorch-linux-xenial-py3.7-gcc7"
|
||||
)
|
||||
|
||||
|
||||
def gen_mobile_docker(specifier):
|
||||
container_type = "pytorch-linux-xenial-py3-clang5-" + specifier
|
||||
return gen_docker_image(container_type)
|
||||
|
||||
|
||||
DOCKER_IMAGE_ASAN, DOCKER_REQUIREMENT_ASAN = gen_mobile_docker("asan")
|
||||
|
||||
DOCKER_IMAGE_NDK, DOCKER_REQUIREMENT_NDK = gen_mobile_docker("android-ndk-r19c")
|
||||
36
.circleci/cimodel/data/simple/util/versions.py
Normal file
36
.circleci/cimodel/data/simple/util/versions.py
Normal file
@ -0,0 +1,36 @@
|
||||
from typing import Optional
|
||||
|
||||
|
||||
class MultiPartVersion:
|
||||
def __init__(self, parts, prefix=""):
|
||||
self.parts = parts
|
||||
self.prefix = prefix
|
||||
|
||||
def prefixed_parts(self):
|
||||
"""
|
||||
Prepends the first element of the version list
|
||||
with the prefix string.
|
||||
"""
|
||||
if self.parts:
|
||||
return [self.prefix + str(self.parts[0])] + [
|
||||
str(part) for part in self.parts[1:]
|
||||
]
|
||||
else:
|
||||
return [self.prefix]
|
||||
|
||||
def render_dots_or_parts(self, sep: Optional[str] = None):
|
||||
if sep is None:
|
||||
return self.prefixed_parts()
|
||||
else:
|
||||
return [sep.join(self.prefixed_parts())]
|
||||
|
||||
|
||||
class CudaVersion(MultiPartVersion):
|
||||
def __init__(self, major, minor):
|
||||
self.major = major
|
||||
self.minor = minor
|
||||
|
||||
super().__init__([self.major, self.minor], "cuda")
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.major}.{self.minor}"
|
||||
111
.circleci/cimodel/lib/conf_tree.py
Normal file
111
.circleci/cimodel/lib/conf_tree.py
Normal file
@ -0,0 +1,111 @@
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Dict, Optional
|
||||
|
||||
|
||||
def X(val):
|
||||
"""
|
||||
Compact way to write a leaf node
|
||||
"""
|
||||
return val, []
|
||||
|
||||
|
||||
def XImportant(name):
|
||||
"""Compact way to write an important (run on PRs) leaf node"""
|
||||
return (name, [("important", [X(True)])])
|
||||
|
||||
|
||||
@dataclass
|
||||
class Ver:
|
||||
"""
|
||||
Represents a product with a version number
|
||||
"""
|
||||
|
||||
name: str
|
||||
version: str = ""
|
||||
|
||||
def __str__(self):
|
||||
return self.name + self.version
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConfigNode:
|
||||
parent: Optional["ConfigNode"]
|
||||
node_name: str
|
||||
props: Dict[str, str] = field(default_factory=dict)
|
||||
|
||||
def get_label(self):
|
||||
return self.node_name
|
||||
|
||||
# noinspection PyMethodMayBeStatic
|
||||
def get_children(self):
|
||||
return []
|
||||
|
||||
def get_parents(self):
|
||||
return (
|
||||
(self.parent.get_parents() + [self.parent.get_label()])
|
||||
if self.parent
|
||||
else []
|
||||
)
|
||||
|
||||
def get_depth(self):
|
||||
return len(self.get_parents())
|
||||
|
||||
def get_node_key(self):
|
||||
return "%".join(self.get_parents() + [self.get_label()])
|
||||
|
||||
def find_prop(self, propname, searched=None):
|
||||
"""
|
||||
Checks if its own dictionary has
|
||||
the property, otherwise asks parent node.
|
||||
"""
|
||||
|
||||
if searched is None:
|
||||
searched = []
|
||||
|
||||
searched.append(self.node_name)
|
||||
|
||||
if propname in self.props:
|
||||
return self.props[propname]
|
||||
elif self.parent:
|
||||
return self.parent.find_prop(propname, searched)
|
||||
else:
|
||||
# raise Exception('Property "%s" does not exist anywhere in the tree! Searched: %s' % (propname, searched))
|
||||
return None
|
||||
|
||||
|
||||
def dfs_recurse(
|
||||
node,
|
||||
leaf_callback=lambda x: None,
|
||||
discovery_callback=lambda x, y, z: None,
|
||||
child_callback=lambda x, y: None,
|
||||
sibling_index=0,
|
||||
sibling_count=1,
|
||||
):
|
||||
discovery_callback(node, sibling_index, sibling_count)
|
||||
|
||||
node_children = node.get_children()
|
||||
if node_children:
|
||||
for i, child in enumerate(node_children):
|
||||
child_callback(node, child)
|
||||
|
||||
dfs_recurse(
|
||||
child,
|
||||
leaf_callback,
|
||||
discovery_callback,
|
||||
child_callback,
|
||||
i,
|
||||
len(node_children),
|
||||
)
|
||||
else:
|
||||
leaf_callback(node)
|
||||
|
||||
|
||||
def dfs(toplevel_config_node):
|
||||
config_list = []
|
||||
|
||||
def leaf_callback(node):
|
||||
config_list.append(node)
|
||||
|
||||
dfs_recurse(toplevel_config_node, leaf_callback)
|
||||
|
||||
return config_list
|
||||
10
.circleci/cimodel/lib/miniutils.py
Normal file
10
.circleci/cimodel/lib/miniutils.py
Normal file
@ -0,0 +1,10 @@
|
||||
def quote(s):
|
||||
return sandwich('"', s)
|
||||
|
||||
|
||||
def sandwich(bread, jam):
|
||||
return bread + jam + bread
|
||||
|
||||
|
||||
def override(word, substitutions):
|
||||
return substitutions.get(word, word)
|
||||
51
.circleci/cimodel/lib/miniyaml.py
Normal file
51
.circleci/cimodel/lib/miniyaml.py
Normal file
@ -0,0 +1,51 @@
|
||||
from collections import OrderedDict
|
||||
|
||||
import cimodel.lib.miniutils as miniutils
|
||||
|
||||
|
||||
LIST_MARKER = "- "
|
||||
INDENTATION_WIDTH = 2
|
||||
|
||||
|
||||
def is_dict(data):
|
||||
return type(data) in [dict, OrderedDict]
|
||||
|
||||
|
||||
def is_collection(data):
|
||||
return is_dict(data) or type(data) is list
|
||||
|
||||
|
||||
def render(fh, data, depth, is_list_member=False):
|
||||
"""
|
||||
PyYaml does not allow precise control over the quoting
|
||||
behavior, especially for merge references.
|
||||
Therefore, we use this custom YAML renderer.
|
||||
"""
|
||||
|
||||
indentation = " " * INDENTATION_WIDTH * depth
|
||||
|
||||
if is_dict(data):
|
||||
tuples = list(data.items())
|
||||
if type(data) is not OrderedDict:
|
||||
tuples.sort()
|
||||
|
||||
for i, (k, v) in enumerate(tuples):
|
||||
if not v:
|
||||
continue
|
||||
# If this dict is itself a list member, the first key gets prefixed with a list marker
|
||||
list_marker_prefix = LIST_MARKER if is_list_member and not i else ""
|
||||
|
||||
trailing_whitespace = "\n" if is_collection(v) else " "
|
||||
fh.write(indentation + list_marker_prefix + k + ":" + trailing_whitespace)
|
||||
|
||||
render(fh, v, depth + 1 + int(is_list_member))
|
||||
|
||||
elif type(data) is list:
|
||||
for v in data:
|
||||
render(fh, v, depth, True)
|
||||
|
||||
else:
|
||||
# use empty quotes to denote an empty string value instead of blank space
|
||||
modified_data = miniutils.quote(data) if data == "" else data
|
||||
list_member_prefix = indentation + LIST_MARKER if is_list_member else ""
|
||||
fh.write(list_member_prefix + str(modified_data) + "\n")
|
||||
1435
.circleci/config.yml
generated
Normal file
1435
.circleci/config.yml
generated
Normal file
File diff suppressed because it is too large
Load Diff
41
.circleci/ensure-consistency.py
Executable file
41
.circleci/ensure-consistency.py
Executable file
@ -0,0 +1,41 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
import generate_config_yml
|
||||
|
||||
|
||||
CHECKED_IN_FILE = "config.yml"
|
||||
REGENERATION_SCRIPT = "regenerate.sh"
|
||||
|
||||
PARENT_DIR = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
|
||||
README_PATH = os.path.join(PARENT_DIR, "README.md")
|
||||
|
||||
ERROR_MESSAGE_TEMPLATE = """
|
||||
The checked-in CircleCI "%s" file does not match what was generated by the scripts.
|
||||
Please re-run the "%s" script in the "%s" directory and commit the result. See "%s" for more information.
|
||||
"""
|
||||
|
||||
|
||||
def check_consistency():
|
||||
_, temp_filename = tempfile.mkstemp("-generated-config.yml")
|
||||
|
||||
with open(temp_filename, "w") as fh:
|
||||
generate_config_yml.stitch_sources(fh)
|
||||
|
||||
try:
|
||||
subprocess.check_call(["cmp", temp_filename, CHECKED_IN_FILE])
|
||||
except subprocess.CalledProcessError:
|
||||
sys.exit(
|
||||
ERROR_MESSAGE_TEMPLATE
|
||||
% (CHECKED_IN_FILE, REGENERATION_SCRIPT, PARENT_DIR, README_PATH)
|
||||
)
|
||||
finally:
|
||||
os.remove(temp_filename)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
check_consistency()
|
||||
199
.circleci/generate_config_yml.py
Executable file
199
.circleci/generate_config_yml.py
Executable file
@ -0,0 +1,199 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
This script is the source of truth for config.yml.
|
||||
Please see README.md in this directory for details.
|
||||
"""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
from collections import namedtuple
|
||||
|
||||
import cimodel.data.simple.anaconda_prune_defintions
|
||||
|
||||
import cimodel.data.simple.docker_definitions
|
||||
import cimodel.data.simple.mobile_definitions
|
||||
import cimodel.data.simple.nightly_ios
|
||||
import cimodel.lib.miniutils as miniutils
|
||||
import cimodel.lib.miniyaml as miniyaml
|
||||
|
||||
|
||||
class File:
|
||||
"""
|
||||
Verbatim copy the contents of a file into config.yml
|
||||
"""
|
||||
|
||||
def __init__(self, filename):
|
||||
self.filename = filename
|
||||
|
||||
def write(self, output_filehandle):
|
||||
with open(os.path.join("verbatim-sources", self.filename)) as fh:
|
||||
shutil.copyfileobj(fh, output_filehandle)
|
||||
|
||||
|
||||
class FunctionGen(namedtuple("FunctionGen", "function depth")):
|
||||
__slots__ = ()
|
||||
|
||||
|
||||
class Treegen(FunctionGen):
|
||||
"""
|
||||
Insert the content of a YAML tree into config.yml
|
||||
"""
|
||||
|
||||
def write(self, output_filehandle):
|
||||
miniyaml.render(output_filehandle, self.function(), self.depth)
|
||||
|
||||
|
||||
class Listgen(FunctionGen):
|
||||
"""
|
||||
Insert the content of a YAML list into config.yml
|
||||
"""
|
||||
|
||||
def write(self, output_filehandle):
|
||||
miniyaml.render(output_filehandle, self.function(), self.depth)
|
||||
|
||||
|
||||
def horizontal_rule():
|
||||
return "".join("#" * 78)
|
||||
|
||||
|
||||
class Header:
|
||||
def __init__(self, title, summary=None):
|
||||
self.title = title
|
||||
self.summary_lines = summary or []
|
||||
|
||||
def write(self, output_filehandle):
|
||||
text_lines = [self.title] + self.summary_lines
|
||||
comment_lines = ["# " + x for x in text_lines]
|
||||
lines = miniutils.sandwich([horizontal_rule()], comment_lines)
|
||||
|
||||
for line in filter(None, lines):
|
||||
output_filehandle.write(line + "\n")
|
||||
|
||||
|
||||
def _for_all_items(items, functor) -> None:
|
||||
if isinstance(items, list):
|
||||
for item in items:
|
||||
_for_all_items(item, functor)
|
||||
if isinstance(items, dict) and len(items) == 1:
|
||||
item_type, item = next(iter(items.items()))
|
||||
functor(item_type, item)
|
||||
|
||||
|
||||
def filter_master_only_jobs(items):
|
||||
def _is_main_or_master_item(item):
|
||||
filters = item.get("filters", None)
|
||||
branches = filters.get("branches", None) if filters is not None else None
|
||||
branches_only = branches.get("only", None) if branches is not None else None
|
||||
return (
|
||||
("main" in branches_only or "master" in branches_only)
|
||||
if branches_only is not None
|
||||
else False
|
||||
)
|
||||
|
||||
master_deps = set()
|
||||
|
||||
def _save_requires_if_master(item_type, item):
|
||||
requires = item.get("requires", None)
|
||||
item_name = item.get("name", None)
|
||||
if not isinstance(requires, list):
|
||||
return
|
||||
if _is_main_or_master_item(item) or item_name in master_deps:
|
||||
master_deps.update([n.strip('"') for n in requires])
|
||||
|
||||
def _do_filtering(items):
|
||||
if isinstance(items, list):
|
||||
rc = [_do_filtering(item) for item in items]
|
||||
return [item for item in rc if len(item if item is not None else []) > 0]
|
||||
assert isinstance(items, dict) and len(items) == 1
|
||||
item_type, item = next(iter(items.items()))
|
||||
item_name = item.get("name", None)
|
||||
item_name = item_name.strip('"') if item_name is not None else None
|
||||
if not _is_main_or_master_item(item) and item_name not in master_deps:
|
||||
return None
|
||||
if "filters" in item:
|
||||
item = item.copy()
|
||||
item.pop("filters")
|
||||
return {item_type: item}
|
||||
|
||||
# Scan of dependencies twice to pick up nested required jobs
|
||||
# I.e. jobs depending on jobs that main-only job depend on
|
||||
_for_all_items(items, _save_requires_if_master)
|
||||
_for_all_items(items, _save_requires_if_master)
|
||||
return _do_filtering(items)
|
||||
|
||||
|
||||
def generate_required_docker_images(items):
|
||||
required_docker_images = set()
|
||||
|
||||
def _requires_docker_image(item_type, item):
|
||||
requires = item.get("requires", None)
|
||||
if not isinstance(requires, list):
|
||||
return
|
||||
for requirement in requires:
|
||||
requirement = requirement.replace('"', "")
|
||||
if requirement.startswith("docker-"):
|
||||
required_docker_images.add(requirement)
|
||||
|
||||
_for_all_items(items, _requires_docker_image)
|
||||
return required_docker_images
|
||||
|
||||
|
||||
def gen_build_workflows_tree():
|
||||
build_workflows_functions = [
|
||||
cimodel.data.simple.mobile_definitions.get_workflow_jobs,
|
||||
cimodel.data.simple.nightly_ios.get_workflow_jobs,
|
||||
cimodel.data.simple.anaconda_prune_defintions.get_workflow_jobs,
|
||||
]
|
||||
build_jobs = [f() for f in build_workflows_functions]
|
||||
build_jobs.extend(
|
||||
cimodel.data.simple.docker_definitions.get_workflow_jobs(
|
||||
# sort for consistency
|
||||
sorted(generate_required_docker_images(build_jobs))
|
||||
)
|
||||
)
|
||||
master_build_jobs = filter_master_only_jobs(build_jobs)
|
||||
|
||||
rc = {
|
||||
"workflows": {
|
||||
"build": {
|
||||
"when": r"<< pipeline.parameters.run_build >>",
|
||||
"jobs": build_jobs,
|
||||
},
|
||||
}
|
||||
}
|
||||
if len(master_build_jobs) > 0:
|
||||
rc["workflows"]["master_build"] = {
|
||||
"when": r"<< pipeline.parameters.run_master_build >>",
|
||||
"jobs": master_build_jobs,
|
||||
}
|
||||
return rc
|
||||
|
||||
|
||||
# Order of this list matters to the generated config.yml.
|
||||
YAML_SOURCES = [
|
||||
File("header-section.yml"),
|
||||
File("commands.yml"),
|
||||
File("nightly-binary-build-defaults.yml"),
|
||||
Header("Build parameters"),
|
||||
File("build-parameters/pytorch-build-params.yml"),
|
||||
File("build-parameters/binary-build-params.yml"),
|
||||
Header("Job specs"),
|
||||
File("job-specs/binary-job-specs.yml"),
|
||||
File("job-specs/job-specs-custom.yml"),
|
||||
File("job-specs/binary_update_htmls.yml"),
|
||||
File("job-specs/binary-build-tests.yml"),
|
||||
File("job-specs/docker_jobs.yml"),
|
||||
Header("Workflows"),
|
||||
Treegen(gen_build_workflows_tree, 0),
|
||||
]
|
||||
|
||||
|
||||
def stitch_sources(output_filehandle):
|
||||
for f in YAML_SOURCES:
|
||||
f.write(output_filehandle)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
stitch_sources(sys.stdout)
|
||||
5
.circleci/regenerate.ps1
Normal file
5
.circleci/regenerate.ps1
Normal file
@ -0,0 +1,5 @@
|
||||
cd $PSScriptRoot;
|
||||
$NewFile = New-TemporaryFile;
|
||||
python generate_config_yml.py > $NewFile.name
|
||||
(Get-Content $NewFile.name -Raw).TrimEnd().Replace("`r`n","`n") | Set-Content config.yml -Force
|
||||
Remove-Item $NewFile.name
|
||||
17
.circleci/regenerate.sh
Executable file
17
.circleci/regenerate.sh
Executable file
@ -0,0 +1,17 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
# Allows this script to be invoked from any directory:
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
UNCOMMIT_CHANGE=$(git status -s | grep " config.yml" | wc -l | xargs)
|
||||
if [[ $UNCOMMIT_CHANGE != 0 ]]; then
|
||||
OLD_FILE=$(mktemp)
|
||||
cp config.yml "$OLD_FILE"
|
||||
echo "Uncommitted change detected in .circleci/config.yml"
|
||||
echo "It has been backed up to $OLD_FILE"
|
||||
fi
|
||||
|
||||
NEW_FILE=$(mktemp)
|
||||
./generate_config_yml.py > "$NEW_FILE"
|
||||
cp "$NEW_FILE" config.yml
|
||||
echo "New config generated in .circleci/config.yml"
|
||||
69
.circleci/scripts/binary_checkout.sh
Executable file
69
.circleci/scripts/binary_checkout.sh
Executable file
@ -0,0 +1,69 @@
|
||||
#!/bin/bash
|
||||
set -eux -o pipefail
|
||||
|
||||
retry () {
|
||||
$* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*)
|
||||
}
|
||||
|
||||
|
||||
# This step runs on multiple executors with different envfile locations
|
||||
if [[ "$(uname)" == Darwin ]]; then
|
||||
# macos executor (builds and tests)
|
||||
workdir="/Users/distiller/project"
|
||||
elif [[ "$OSTYPE" == "msys" ]]; then
|
||||
# windows executor (builds and tests)
|
||||
rm -rf /c/w
|
||||
ln -s "/c/Users/circleci/project" /c/w
|
||||
workdir="/c/w"
|
||||
elif [[ -d "/home/circleci/project" ]]; then
|
||||
# machine executor (binary tests)
|
||||
workdir="/home/circleci/project"
|
||||
else
|
||||
# docker executor (binary builds)
|
||||
workdir="/"
|
||||
fi
|
||||
|
||||
# It is very important that this stays in sync with binary_populate_env.sh
|
||||
if [[ "$OSTYPE" == "msys" ]]; then
|
||||
# We need to make the paths as short as possible on Windows
|
||||
export PYTORCH_ROOT="$workdir/p"
|
||||
export BUILDER_ROOT="$workdir/b"
|
||||
else
|
||||
export PYTORCH_ROOT="$workdir/pytorch"
|
||||
export BUILDER_ROOT="$workdir/builder"
|
||||
fi
|
||||
|
||||
# Try to extract PR number from branch if not already set
|
||||
if [[ -z "${CIRCLE_PR_NUMBER:-}" ]]; then
|
||||
CIRCLE_PR_NUMBER="$(echo ${CIRCLE_BRANCH} | sed -E -n 's/pull\/([0-9]*).*/\1/p')"
|
||||
fi
|
||||
|
||||
# Clone the Pytorch branch
|
||||
retry git clone https://github.com/pytorch/pytorch.git "$PYTORCH_ROOT"
|
||||
pushd "$PYTORCH_ROOT"
|
||||
if [[ -n "${CIRCLE_PR_NUMBER:-}" ]]; then
|
||||
# "smoke" binary build on PRs
|
||||
git fetch --force origin "pull/${CIRCLE_PR_NUMBER}/head:remotes/origin/pull/${CIRCLE_PR_NUMBER}"
|
||||
git reset --hard "$CIRCLE_SHA1"
|
||||
git checkout -q -B "$CIRCLE_BRANCH"
|
||||
git reset --hard "$CIRCLE_SHA1"
|
||||
elif [[ -n "${CIRCLE_SHA1:-}" ]]; then
|
||||
# Scheduled workflows & "smoke" binary build on trunk on PR merges
|
||||
DEFAULT_BRANCH="$(git remote show $CIRCLE_REPOSITORY_URL | awk '/HEAD branch/ {print $NF}')"
|
||||
git reset --hard "$CIRCLE_SHA1"
|
||||
git checkout -q -B $DEFAULT_BRANCH
|
||||
else
|
||||
echo "Can't tell what to checkout"
|
||||
exit 1
|
||||
fi
|
||||
retry git submodule update --init --recursive
|
||||
echo "Using Pytorch from "
|
||||
git --no-pager log --max-count 1
|
||||
popd
|
||||
|
||||
# Clone the Builder main repo
|
||||
retry git clone -q https://github.com/pytorch/builder.git -b release/2.1 "$BUILDER_ROOT"
|
||||
pushd "$BUILDER_ROOT"
|
||||
echo "Using builder from "
|
||||
git --no-pager log --max-count 1
|
||||
popd
|
||||
44
.circleci/scripts/binary_install_miniconda.sh
Executable file
44
.circleci/scripts/binary_install_miniconda.sh
Executable file
@ -0,0 +1,44 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eux -o pipefail
|
||||
|
||||
# This step runs on multiple executors with different envfile locations
|
||||
if [[ "$(uname)" == Darwin ]]; then
|
||||
envfile="/Users/distiller/project/env"
|
||||
elif [[ -d "/home/circleci/project" ]]; then
|
||||
# machine executor (binary tests)
|
||||
envfile="/home/circleci/project/env"
|
||||
else
|
||||
# docker executor (binary builds)
|
||||
envfile="/env"
|
||||
fi
|
||||
|
||||
# TODO this is super hacky and ugly. Basically, the binary_update_html job does
|
||||
# not have an env file, since it does not call binary_populate_env.sh, since it
|
||||
# does not have a BUILD_ENVIRONMENT. So for this one case, which we detect by a
|
||||
# lack of an env file, we manually export the environment variables that we
|
||||
# need to install miniconda
|
||||
if [[ ! -f "$envfile" ]]; then
|
||||
MINICONDA_ROOT="/home/circleci/project/miniconda"
|
||||
workdir="/home/circleci/project"
|
||||
retry () {
|
||||
$* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*)
|
||||
}
|
||||
export -f retry
|
||||
else
|
||||
source "$envfile"
|
||||
fi
|
||||
|
||||
conda_sh="$workdir/install_miniconda.sh"
|
||||
if [[ "$(uname)" == Darwin ]]; then
|
||||
curl --retry 3 --retry-all-errors -o "$conda_sh" https://repo.anaconda.com/miniconda/Miniconda3-py39_4.12.0-MacOSX-x86_64.sh
|
||||
else
|
||||
curl --retry 3 --retry-all-errors -o "$conda_sh" https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh
|
||||
fi
|
||||
chmod +x "$conda_sh"
|
||||
"$conda_sh" -b -p "$MINICONDA_ROOT"
|
||||
rm -f "$conda_sh"
|
||||
|
||||
# We can't actually add miniconda to the PATH in the envfile, because that
|
||||
# breaks 'unbuffer' in Mac jobs. This is probably because conda comes with
|
||||
# a tclsh, which then gets inserted before the tclsh needed in /usr/bin
|
||||
@ -33,7 +33,7 @@ fi
|
||||
cp ${PROJ_ROOT}/LICENSE ${ZIP_DIR}/
|
||||
# zip the library
|
||||
export DATE="$(date -u +%Y%m%d)"
|
||||
export IOS_NIGHTLY_BUILD_VERSION="2.2.0.${DATE}"
|
||||
export IOS_NIGHTLY_BUILD_VERSION="2.1.0.${DATE}"
|
||||
if [ "${BUILD_LITE_INTERPRETER}" == "1" ]; then
|
||||
# libtorch_lite_ios_nightly_1.11.0.20210810.zip
|
||||
ZIPFILE="libtorch_lite_ios_nightly_${IOS_NIGHTLY_BUILD_VERSION}.zip"
|
||||
|
||||
@ -54,7 +54,7 @@ fi
|
||||
|
||||
|
||||
|
||||
# Move debug wheels out of the package dir so they don't get installed
|
||||
# Move debug wheels out of the the package dir so they don't get installed
|
||||
mkdir -p /tmp/debug_final_pkgs
|
||||
mv /final_pkgs/debug-*.zip /tmp/debug_final_pkgs || echo "no debug packages to move"
|
||||
|
||||
@ -66,12 +66,6 @@ mv /final_pkgs/debug-*.zip /tmp/debug_final_pkgs || echo "no debug packages to m
|
||||
# conda build scripts themselves. These should really be consolidated
|
||||
# Pick only one package of multiple available (which happens as result of workflow re-runs)
|
||||
pkg="/final_pkgs/\$(ls -1 /final_pkgs|sort|tail -1)"
|
||||
if [[ "\$PYTORCH_BUILD_VERSION" == *dev* ]]; then
|
||||
CHANNEL="nightly"
|
||||
else
|
||||
CHANNEL="test"
|
||||
fi
|
||||
|
||||
if [[ "$PACKAGE_TYPE" == conda ]]; then
|
||||
(
|
||||
# For some reason conda likes to re-activate the conda environment when attempting this install
|
||||
@ -89,28 +83,26 @@ if [[ "$PACKAGE_TYPE" == conda ]]; then
|
||||
if [[ "$DESIRED_CUDA" == 'cpu' ]]; then
|
||||
retry conda install -c pytorch -y cpuonly
|
||||
else
|
||||
|
||||
cu_ver="${DESIRED_CUDA:2:2}.${DESIRED_CUDA:4}"
|
||||
CUDA_PACKAGE="pytorch-cuda"
|
||||
retry conda install \${EXTRA_CONDA_FLAGS} -yq -c nvidia -c "pytorch-\${CHANNEL}" "pytorch-cuda=\${cu_ver}"
|
||||
PYTORCH_CHANNEL="pytorch"
|
||||
if [[ "\${TORCH_CONDA_BUILD_FOLDER}" == "pytorch-nightly" ]]; then
|
||||
PYTORCH_CHANNEL="pytorch-nightly"
|
||||
fi
|
||||
retry conda install \${EXTRA_CONDA_FLAGS} -yq -c nvidia -c pytorch-test "pytorch-cuda=\${cu_ver}"
|
||||
fi
|
||||
conda install \${EXTRA_CONDA_FLAGS} -y "\$pkg" --offline
|
||||
)
|
||||
elif [[ "$PACKAGE_TYPE" != libtorch ]]; then
|
||||
if [[ "\$BUILD_ENVIRONMENT" != *s390x* ]]; then
|
||||
if [[ "$USE_SPLIT_BUILD" == "true" ]]; then
|
||||
pkg_no_python="$(ls -1 /final_pkgs/torch_no_python* | sort |tail -1)"
|
||||
pkg_torch="$(ls -1 /final_pkgs/torch-* | sort |tail -1)"
|
||||
# todo: after folder is populated use the pypi_pkg channel instead
|
||||
pip install "\$pkg_no_python" "\$pkg_torch" --index-url "https://download.pytorch.org/whl/\${CHANNEL}/${DESIRED_CUDA}_pypi_pkg"
|
||||
retry pip install -q numpy protobuf typing-extensions
|
||||
else
|
||||
pip install "\$pkg" --index-url "https://download.pytorch.org/whl/\${CHANNEL}/${DESIRED_CUDA}"
|
||||
retry pip install -q numpy protobuf typing-extensions
|
||||
fi
|
||||
if [[ "$(uname -m)" == aarch64 ]]; then
|
||||
# Using "extra-index-url" until all needed aarch64 dependencies are
|
||||
# added to "https://download.pytorch.org/whl/nightly/"
|
||||
pip install "\$pkg" --extra-index-url "https://download.pytorch.org/whl/test/${DESIRED_CUDA}"
|
||||
else
|
||||
pip install "\$pkg"
|
||||
retry pip install -q numpy protobuf typing-extensions
|
||||
pip install "\$pkg" --index-url "https://download.pytorch.org/whl/test/${DESIRED_CUDA}"
|
||||
fi
|
||||
retry pip install -q numpy protobuf typing-extensions
|
||||
fi
|
||||
if [[ "$PACKAGE_TYPE" == libtorch ]]; then
|
||||
pkg="\$(ls /final_pkgs/*-latest.zip)"
|
||||
|
||||
@ -4,6 +4,10 @@ set -eux -o pipefail
|
||||
source "${BINARY_ENV_FILE:-/Users/distiller/project/env}"
|
||||
mkdir -p "$PYTORCH_FINAL_PACKAGE_DIR"
|
||||
|
||||
if [[ -z "${GITHUB_ACTIONS:-}" ]]; then
|
||||
export PATH="${workdir:-${HOME}}/miniconda/bin:${PATH}"
|
||||
fi
|
||||
|
||||
# Build
|
||||
export USE_PYTORCH_METAL_EXPORT=1
|
||||
export USE_COREML_DELEGATE=1
|
||||
|
||||
@ -3,9 +3,17 @@ set -eux -o pipefail
|
||||
export TZ=UTC
|
||||
|
||||
tagged_version() {
|
||||
GIT_DIR="${workdir}/pytorch/.git"
|
||||
# Grabs version from either the env variable CIRCLE_TAG
|
||||
# or the pytorch git described version
|
||||
if [[ "$OSTYPE" == "msys" && -z "${GITHUB_ACTIONS:-}" ]]; then
|
||||
GIT_DIR="${workdir}/p/.git"
|
||||
else
|
||||
GIT_DIR="${workdir}/pytorch/.git"
|
||||
fi
|
||||
GIT_DESCRIBE="git --git-dir ${GIT_DIR} describe --tags --match v[0-9]*.[0-9]*.[0-9]*"
|
||||
if [[ ! -d "${GIT_DIR}" ]]; then
|
||||
if [[ -n "${CIRCLE_TAG:-}" ]]; then
|
||||
echo "${CIRCLE_TAG}"
|
||||
elif [[ ! -d "${GIT_DIR}" ]]; then
|
||||
echo "Abort, abort! Git dir ${GIT_DIR} does not exists!"
|
||||
kill $$
|
||||
elif ${GIT_DESCRIBE} --exact >/dev/null; then
|
||||
@ -33,9 +41,9 @@ if [[ -z "$DOCKER_IMAGE" ]]; then
|
||||
if [[ "$PACKAGE_TYPE" == conda ]]; then
|
||||
export DOCKER_IMAGE="pytorch/conda-cuda"
|
||||
elif [[ "$DESIRED_CUDA" == cpu ]]; then
|
||||
export DOCKER_IMAGE="pytorch/manylinux:cpu"
|
||||
export DOCKER_IMAGE="pytorch/manylinux-cpu"
|
||||
else
|
||||
export DOCKER_IMAGE="pytorch/manylinux-builder:${DESIRED_CUDA:2}"
|
||||
export DOCKER_IMAGE="pytorch/manylinux-cuda${DESIRED_CUDA:2}"
|
||||
fi
|
||||
fi
|
||||
|
||||
@ -50,8 +58,8 @@ fi
|
||||
PIP_UPLOAD_FOLDER='nightly/'
|
||||
# We put this here so that OVERRIDE_PACKAGE_VERSION below can read from it
|
||||
export DATE="$(date -u +%Y%m%d)"
|
||||
BASE_BUILD_VERSION="$(cat ${PYTORCH_ROOT}/version.txt|cut -da -f1).dev${DATE}"
|
||||
|
||||
#TODO: We should be pulling semver version from the base version.txt
|
||||
BASE_BUILD_VERSION="2.1.0.dev$DATE"
|
||||
# Change BASE_BUILD_VERSION to git tag when on a git tag
|
||||
# Use 'git -C' to make doubly sure we're in the correct directory for checking
|
||||
# the git tag
|
||||
@ -69,35 +77,39 @@ else
|
||||
export PYTORCH_BUILD_VERSION="${BASE_BUILD_VERSION}+$DESIRED_CUDA"
|
||||
fi
|
||||
|
||||
export PYTORCH_BUILD_NUMBER=1
|
||||
|
||||
# Set triton version as part of PYTORCH_EXTRA_INSTALL_REQUIREMENTS
|
||||
TRITON_VERSION=$(cat $PYTORCH_ROOT/.ci/docker/triton_version.txt)
|
||||
|
||||
# Here PYTORCH_EXTRA_INSTALL_REQUIREMENTS is already set for the all the wheel builds hence append TRITON_CONSTRAINT
|
||||
TRITON_CONSTRAINT="platform_system == 'Linux' and platform_machine == 'x86_64' and python_version < '3.13'"
|
||||
if [[ "$PACKAGE_TYPE" =~ .*wheel.* && -n "${PYTORCH_EXTRA_INSTALL_REQUIREMENTS:-}" ]]; then
|
||||
# Only linux Python < 3.13 are supported wheels for triton
|
||||
TRITON_REQUIREMENT="triton==${TRITON_VERSION}; ${TRITON_CONSTRAINT}"
|
||||
if [[ -n "$PYTORCH_BUILD_VERSION" && "$PYTORCH_BUILD_VERSION" =~ .*dev.* ]]; then
|
||||
TRITON_SHORTHASH=$(cut -c1-10 $PYTORCH_ROOT/.ci/docker/ci_commit_pins/triton.txt)
|
||||
TRITON_REQUIREMENT="pytorch-triton==${TRITON_VERSION}+${TRITON_SHORTHASH}; ${TRITON_CONSTRAINT}"
|
||||
fi
|
||||
export PYTORCH_EXTRA_INSTALL_REQUIREMENTS="${PYTORCH_EXTRA_INSTALL_REQUIREMENTS} | ${TRITON_REQUIREMENT}"
|
||||
# The build with with-pypi-cudnn suffix is only applicabe to
|
||||
# pypi small wheel Linux x86 build
|
||||
if [[ -n "${PYTORCH_EXTRA_INSTALL_REQUIREMENTS:-}" ]] && [[ "$(uname)" == 'Linux' && "$(uname -m)" == "x86_64" ]]; then
|
||||
export PYTORCH_BUILD_VERSION="${PYTORCH_BUILD_VERSION}-with-pypi-cudnn"
|
||||
fi
|
||||
|
||||
# Set triton via PYTORCH_EXTRA_INSTALL_REQUIREMENTS for triton rocm package
|
||||
if [[ "$PACKAGE_TYPE" =~ .*wheel.* && -n "$PYTORCH_BUILD_VERSION" && "$PYTORCH_BUILD_VERSION" =~ .*rocm.* && $(uname) == "Linux" ]]; then
|
||||
TRITON_REQUIREMENT="pytorch-triton-rocm==${TRITON_VERSION}; ${TRITON_CONSTRAINT}"
|
||||
if [[ -n "$PYTORCH_BUILD_VERSION" && "$PYTORCH_BUILD_VERSION" =~ .*dev.* ]]; then
|
||||
TRITON_SHORTHASH=$(cut -c1-10 $PYTORCH_ROOT/.ci/docker/ci_commit_pins/triton-rocm.txt)
|
||||
TRITON_REQUIREMENT="pytorch-triton-rocm==${TRITON_VERSION}+${TRITON_SHORTHASH}; ${TRITON_CONSTRAINT}"
|
||||
fi
|
||||
if [[ -z "${PYTORCH_EXTRA_INSTALL_REQUIREMENTS:-}" ]]; then
|
||||
export PYTORCH_EXTRA_INSTALL_REQUIREMENTS="${TRITON_REQUIREMENT}"
|
||||
else
|
||||
export PYTORCH_EXTRA_INSTALL_REQUIREMENTS="${PYTORCH_EXTRA_INSTALL_REQUIREMENTS} | ${TRITON_REQUIREMENT}"
|
||||
export PYTORCH_BUILD_NUMBER=1
|
||||
|
||||
|
||||
JAVA_HOME=
|
||||
BUILD_JNI=OFF
|
||||
if [[ "$PACKAGE_TYPE" == libtorch ]]; then
|
||||
POSSIBLE_JAVA_HOMES=()
|
||||
POSSIBLE_JAVA_HOMES+=(/usr/local)
|
||||
POSSIBLE_JAVA_HOMES+=(/usr/lib/jvm/java-8-openjdk-amd64)
|
||||
POSSIBLE_JAVA_HOMES+=(/Library/Java/JavaVirtualMachines/*.jdk/Contents/Home)
|
||||
# Add the Windows-specific JNI path
|
||||
POSSIBLE_JAVA_HOMES+=("$PWD/pytorch/.circleci/windows-jni/")
|
||||
for JH in "${POSSIBLE_JAVA_HOMES[@]}" ; do
|
||||
if [[ -e "$JH/include/jni.h" ]] ; then
|
||||
# Skip if we're not on Windows but haven't found a JAVA_HOME
|
||||
if [[ "$JH" == "$PWD/pytorch/.circleci/windows-jni/" && "$OSTYPE" != "msys" ]] ; then
|
||||
break
|
||||
fi
|
||||
echo "Found jni.h under $JH"
|
||||
JAVA_HOME="$JH"
|
||||
BUILD_JNI=ON
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ -z "$JAVA_HOME" ]; then
|
||||
echo "Did not find jni.h"
|
||||
fi
|
||||
fi
|
||||
|
||||
cat >"$envfile" <<EOL
|
||||
@ -110,7 +122,6 @@ export DESIRED_PYTHON="${DESIRED_PYTHON:-}"
|
||||
export DESIRED_CUDA="$DESIRED_CUDA"
|
||||
export LIBTORCH_VARIANT="${LIBTORCH_VARIANT:-}"
|
||||
export BUILD_PYTHONLESS="${BUILD_PYTHONLESS:-}"
|
||||
export USE_SPLIT_BUILD="${USE_SPLIT_BUILD:-}"
|
||||
if [[ "${OSTYPE}" == "msys" ]]; then
|
||||
export LIBTORCH_CONFIG="${LIBTORCH_CONFIG:-}"
|
||||
if [[ "${LIBTORCH_CONFIG:-}" == 'debug' ]]; then
|
||||
@ -120,13 +131,12 @@ if [[ "${OSTYPE}" == "msys" ]]; then
|
||||
else
|
||||
export DESIRED_DEVTOOLSET="${DESIRED_DEVTOOLSET:-}"
|
||||
fi
|
||||
|
||||
export PYTORCH_EXTRA_INSTALL_REQUIREMENTS="${PYTORCH_EXTRA_INSTALL_REQUIREMENTS:-}"
|
||||
export DATE="$DATE"
|
||||
export NIGHTLIES_DATE_PREAMBLE=1.14.0.dev
|
||||
export PYTORCH_BUILD_VERSION="$PYTORCH_BUILD_VERSION"
|
||||
export PYTORCH_BUILD_NUMBER="$PYTORCH_BUILD_NUMBER"
|
||||
export OVERRIDE_PACKAGE_VERSION="$PYTORCH_BUILD_VERSION"
|
||||
export PYTORCH_EXTRA_INSTALL_REQUIREMENTS="${PYTORCH_EXTRA_INSTALL_REQUIREMENTS:-}"
|
||||
|
||||
# TODO: We don't need this anymore IIUC
|
||||
export TORCH_PACKAGE_NAME='torch'
|
||||
@ -134,6 +144,8 @@ export TORCH_CONDA_BUILD_FOLDER='pytorch-nightly'
|
||||
export ANACONDA_USER='pytorch'
|
||||
|
||||
export USE_FBGEMM=1
|
||||
export JAVA_HOME=$JAVA_HOME
|
||||
export BUILD_JNI=$BUILD_JNI
|
||||
export PIP_UPLOAD_FOLDER="$PIP_UPLOAD_FOLDER"
|
||||
export DOCKER_IMAGE="$DOCKER_IMAGE"
|
||||
|
||||
@ -145,8 +157,8 @@ EOL
|
||||
|
||||
# nproc doesn't exist on darwin
|
||||
if [[ "$(uname)" != Darwin ]]; then
|
||||
# This was lowered from 18 to 12 to avoid OOMs when compiling FlashAttentionV2
|
||||
MEMORY_LIMIT_MAX_JOBS=12
|
||||
# Because most Circle executors only have 20 CPUs, using more causes OOMs w/ Ninja and nvcc parallelization
|
||||
MEMORY_LIMIT_MAX_JOBS=18
|
||||
NUM_CPUS=$(( $(nproc) - 2 ))
|
||||
|
||||
# Defaults here for **binary** linux builds so they can be changed in one place
|
||||
@ -157,6 +169,28 @@ if [[ "$(uname)" != Darwin ]]; then
|
||||
EOL
|
||||
fi
|
||||
|
||||
if [[ -z "${GITHUB_ACTIONS:-}" ]]; then
|
||||
cat >>"$envfile" <<EOL
|
||||
export workdir="$workdir"
|
||||
export MAC_PACKAGE_WORK_DIR="$workdir"
|
||||
if [[ "$OSTYPE" == "msys" ]]; then
|
||||
export PYTORCH_ROOT="$workdir/p"
|
||||
export BUILDER_ROOT="$workdir/b"
|
||||
else
|
||||
export PYTORCH_ROOT="$workdir/pytorch"
|
||||
export BUILDER_ROOT="$workdir/builder"
|
||||
fi
|
||||
export MINICONDA_ROOT="$workdir/miniconda"
|
||||
export PYTORCH_FINAL_PACKAGE_DIR="$workdir/final_pkgs"
|
||||
|
||||
export CIRCLE_TAG="${CIRCLE_TAG:-}"
|
||||
export CIRCLE_SHA1="$CIRCLE_SHA1"
|
||||
export CIRCLE_PR_NUMBER="${CIRCLE_PR_NUMBER:-}"
|
||||
export CIRCLE_BRANCH="$CIRCLE_BRANCH"
|
||||
export CIRCLE_WORKFLOW_ID="$CIRCLE_WORKFLOW_ID"
|
||||
EOL
|
||||
fi
|
||||
|
||||
echo 'retry () {' >> "$envfile"
|
||||
echo ' $* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*)' >> "$envfile"
|
||||
echo '}' >> "$envfile"
|
||||
|
||||
29
.circleci/scripts/binary_run_in_docker.sh
Executable file
29
.circleci/scripts/binary_run_in_docker.sh
Executable file
@ -0,0 +1,29 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This section is used in the binary_test and smoke_test jobs. It expects
|
||||
# 'binary_populate_env' to have populated /home/circleci/project/env and it
|
||||
# expects another section to populate /home/circleci/project/ci_test_script.sh
|
||||
# with the code to run in the docker
|
||||
|
||||
# Expect all needed environment variables to be written to this file
|
||||
source /home/circleci/project/env
|
||||
echo "Running the following code in Docker"
|
||||
cat /home/circleci/project/ci_test_script.sh
|
||||
echo
|
||||
echo
|
||||
set -eux -o pipefail
|
||||
|
||||
# Expect actual code to be written to this file
|
||||
chmod +x /home/circleci/project/ci_test_script.sh
|
||||
|
||||
VOLUME_MOUNTS="-v /home/circleci/project/:/circleci_stuff -v /home/circleci/project/final_pkgs:/final_pkgs -v ${PYTORCH_ROOT}:/pytorch -v ${BUILDER_ROOT}:/builder"
|
||||
# Run the docker
|
||||
if [ -n "${USE_CUDA_DOCKER_RUNTIME:-}" ]; then
|
||||
export id=$(docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --gpus all ${VOLUME_MOUNTS} -t -d "${DOCKER_IMAGE}")
|
||||
else
|
||||
export id=$(docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined ${VOLUME_MOUNTS} -t -d "${DOCKER_IMAGE}")
|
||||
fi
|
||||
|
||||
# Execute the test script that was populated by an earlier section
|
||||
export COMMAND='((echo "source /circleci_stuff/env && /circleci_stuff/ci_test_script.sh") | docker exec -i "$id" bash) 2>&1'
|
||||
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
|
||||
@ -16,6 +16,11 @@ UPLOAD_BUCKET="s3://pytorch"
|
||||
BACKUP_BUCKET="s3://pytorch-backup"
|
||||
BUILD_NAME=${BUILD_NAME:-}
|
||||
|
||||
# this is temporary change to upload pypi-cudnn builds to separate folder
|
||||
if [[ ${BUILD_NAME} == *with-pypi-cudnn* ]]; then
|
||||
UPLOAD_SUBFOLDER="${UPLOAD_SUBFOLDER}_pypi_cudnn"
|
||||
fi
|
||||
|
||||
DRY_RUN=${DRY_RUN:-enabled}
|
||||
# Don't actually do work unless explicit
|
||||
ANACONDA="true anaconda"
|
||||
@ -25,10 +30,6 @@ if [[ "${DRY_RUN}" = "disabled" ]]; then
|
||||
AWS_S3_CP="aws s3 cp"
|
||||
fi
|
||||
|
||||
if [[ "${USE_SPLIT_BUILD:-false}" == "true" ]]; then
|
||||
UPLOAD_SUBFOLDER="${UPLOAD_SUBFOLDER}_pypi_pkg"
|
||||
fi
|
||||
|
||||
# Sleep 2 minutes between retries for conda upload
|
||||
retry () {
|
||||
"$@" || (sleep 5m && "$@") || (sleep 5m && "$@") || (sleep 5m && "$@") || (sleep 5m && "$@")
|
||||
|
||||
@ -22,7 +22,7 @@ done < <(find /var/lib/jenkins/.gradle -type f -print0)
|
||||
|
||||
# Patch pocketfft (as Android does not have aligned_alloc even if compiled with c++17
|
||||
if [ -f ~/workspace/third_party/pocketfft/pocketfft_hdronly.h ]; then
|
||||
sed -i -e "s/__cplusplus >= 201703L/0/" ~/workspace/third_party/pocketfft/pocketfft_hdronly.h
|
||||
sed -i -e "s/#if __cplusplus >= 201703L/#if 0/" ~/workspace/third_party/pocketfft/pocketfft_hdronly.h
|
||||
fi
|
||||
|
||||
export GRADLE_LOCAL_PROPERTIES=~/workspace/android/local.properties
|
||||
@ -40,7 +40,7 @@ if [[ "${BUILD_ENVIRONMENT}" == *-gradle-custom-build* ]]; then
|
||||
# Install torch & torchvision - used to download & dump used ops from test model.
|
||||
retry pip install torch torchvision --progress-bar off
|
||||
|
||||
exec "$(dirname "${BASH_SOURCE[0]}")/../android/build_test_app_custom.sh" armeabi-v7a
|
||||
exec "$(dirname "${BASH_SOURCE[0]}")/../../android/build_test_app_custom.sh" armeabi-v7a
|
||||
fi
|
||||
|
||||
# Run default build
|
||||
111
.circleci/scripts/setup_ci_environment.sh
Executable file
111
.circleci/scripts/setup_ci_environment.sh
Executable file
@ -0,0 +1,111 @@
|
||||
#!/usr/bin/env bash
|
||||
set -ex -o pipefail
|
||||
|
||||
# Remove unnecessary sources
|
||||
sudo rm -f /etc/apt/sources.list.d/google-chrome.list
|
||||
sudo rm -f /etc/apt/heroku.list
|
||||
sudo rm -f /etc/apt/openjdk-r-ubuntu-ppa-xenial.list
|
||||
sudo rm -f /etc/apt/partner.list
|
||||
|
||||
# To increase the network reliability, let apt decide which mirror is best to use
|
||||
sudo sed -i -e 's/http:\/\/.*archive/mirror:\/\/mirrors/' -e 's/\/ubuntu\//\/mirrors.txt/' /etc/apt/sources.list
|
||||
|
||||
retry () {
|
||||
$* || $* || $* || $* || $*
|
||||
}
|
||||
|
||||
# Method adapted from here: https://askubuntu.com/questions/875213/apt-get-to-retry-downloading
|
||||
# (with use of tee to avoid permissions problems)
|
||||
# This is better than retrying the whole apt-get command
|
||||
echo "APT::Acquire::Retries \"3\";" | sudo tee /etc/apt/apt.conf.d/80-retries
|
||||
|
||||
retry sudo apt-get update -qq
|
||||
retry sudo apt-get -y install \
|
||||
moreutils \
|
||||
expect-dev
|
||||
|
||||
echo "== DOCKER VERSION =="
|
||||
docker version
|
||||
|
||||
if ! command -v aws >/dev/null; then
|
||||
retry sudo pip3 -q install awscli==1.19.64
|
||||
fi
|
||||
|
||||
if [ -n "${USE_CUDA_DOCKER_RUNTIME:-}" ]; then
|
||||
DRIVER_FN="NVIDIA-Linux-x86_64-515.76.run"
|
||||
wget "https://s3.amazonaws.com/ossci-linux/nvidia_driver/$DRIVER_FN"
|
||||
sudo /bin/bash "$DRIVER_FN" -s --no-drm || (sudo cat /var/log/nvidia-installer.log && false)
|
||||
nvidia-smi
|
||||
|
||||
# Taken directly from https://github.com/NVIDIA/nvidia-docker
|
||||
# Add the package repositories
|
||||
distribution=$(. /etc/os-release;echo "$ID$VERSION_ID")
|
||||
curl -s -L --retry 3 --retry-all-errors https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add -
|
||||
curl -s -L --retry 3 --retry-all-errors "https://nvidia.github.io/nvidia-docker/${distribution}/nvidia-docker.list" | sudo tee /etc/apt/sources.list.d/nvidia-docker.list
|
||||
|
||||
retry sudo apt-get update -qq
|
||||
# Necessary to get the `--gpus` flag to function within docker
|
||||
retry sudo apt-get install -y nvidia-container-toolkit
|
||||
sudo systemctl restart docker
|
||||
else
|
||||
# Explicitly remove nvidia docker apt repositories if not building for cuda
|
||||
sudo rm -rf /etc/apt/sources.list.d/nvidia-docker.list
|
||||
fi
|
||||
|
||||
add_to_env_file() {
|
||||
local name=$1
|
||||
local value=$2
|
||||
case "$value" in
|
||||
*\ *)
|
||||
# BASH_ENV should be set by CircleCI
|
||||
echo "${name}='${value}'" >> "${BASH_ENV:-/tmp/env}"
|
||||
;;
|
||||
*)
|
||||
echo "${name}=${value}" >> "${BASH_ENV:-/tmp/env}"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
add_to_env_file CI_MASTER "${CI_MASTER:-}"
|
||||
add_to_env_file COMMIT_SOURCE "${CIRCLE_BRANCH:-}"
|
||||
add_to_env_file BUILD_ENVIRONMENT "${BUILD_ENVIRONMENT}"
|
||||
add_to_env_file CIRCLE_PULL_REQUEST "${CIRCLE_PULL_REQUEST}"
|
||||
|
||||
|
||||
if [[ "${BUILD_ENVIRONMENT}" == *-build ]]; then
|
||||
add_to_env_file SCCACHE_BUCKET ossci-compiler-cache-circleci-v2
|
||||
|
||||
SCCACHE_MAX_JOBS=$(( $(nproc) - 1 ))
|
||||
MEMORY_LIMIT_MAX_JOBS=8 # the "large" resource class on CircleCI has 32 CPU cores, if we use all of them we'll OOM
|
||||
MAX_JOBS=$(( ${SCCACHE_MAX_JOBS} > ${MEMORY_LIMIT_MAX_JOBS} ? ${MEMORY_LIMIT_MAX_JOBS} : ${SCCACHE_MAX_JOBS} ))
|
||||
add_to_env_file MAX_JOBS "${MAX_JOBS}"
|
||||
|
||||
if [ -n "${USE_CUDA_DOCKER_RUNTIME:-}" ]; then
|
||||
add_to_env_file TORCH_CUDA_ARCH_LIST 5.2
|
||||
fi
|
||||
|
||||
if [[ "${BUILD_ENVIRONMENT}" == *xla* ]]; then
|
||||
# This IAM user allows write access to S3 bucket for sccache & bazels3cache
|
||||
set +x
|
||||
add_to_env_file XLA_CLANG_CACHE_S3_BUCKET_NAME "${XLA_CLANG_CACHE_S3_BUCKET_NAME:-}"
|
||||
add_to_env_file AWS_ACCESS_KEY_ID "${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_AND_XLA_BAZEL_S3_BUCKET_V2:-}"
|
||||
add_to_env_file AWS_SECRET_ACCESS_KEY "${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_AND_XLA_BAZEL_S3_BUCKET_V2:-}"
|
||||
set -x
|
||||
else
|
||||
# This IAM user allows write access to S3 bucket for sccache
|
||||
set +x
|
||||
add_to_env_file XLA_CLANG_CACHE_S3_BUCKET_NAME "${XLA_CLANG_CACHE_S3_BUCKET_NAME:-}"
|
||||
add_to_env_file AWS_ACCESS_KEY_ID "${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_S3_BUCKET_V4:-}"
|
||||
add_to_env_file AWS_SECRET_ACCESS_KEY "${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_S3_BUCKET_V4:-}"
|
||||
set -x
|
||||
fi
|
||||
fi
|
||||
|
||||
# This IAM user only allows read-write access to ECR
|
||||
set +x
|
||||
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_ECR_READ_WRITE_V4:-}
|
||||
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_ECR_READ_WRITE_V4:-}
|
||||
export AWS_ACCOUNT_ID=$(aws sts get-caller-identity|grep Account|cut -f4 -d\")
|
||||
export AWS_REGION=us-east-1
|
||||
aws ecr get-login-password --region $AWS_REGION|docker login --username AWS --password-stdin $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com
|
||||
set -x
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user