Compare commits

..

1 Commits

Author SHA1 Message Date
89fb2567e7 Add annotation to assertion nodes 2025-11-05 17:53:13 -08:00
1024 changed files with 10750 additions and 30444 deletions

View File

@ -36,7 +36,11 @@ case ${DOCKER_TAG_PREFIX} in
;; ;;
rocm*) rocm*)
BASE_TARGET=rocm BASE_TARGET=rocm
PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201;gfx950;gfx1150;gfx1151" PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
# add gfx950, gfx115x conditionally starting in ROCm 7.0
if [[ "$ROCM_VERSION" == *"7.0"* ]]; then
PYTORCH_ROCM_ARCH="${PYTORCH_ROCM_ARCH};gfx950;gfx1150;gfx1151"
fi
EXTRA_BUILD_ARGS="${EXTRA_BUILD_ARGS} --build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH}" EXTRA_BUILD_ARGS="${EXTRA_BUILD_ARGS} --build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH}"
;; ;;
*) *)

View File

@ -168,18 +168,6 @@ case "$tag" in
VISION=yes VISION=yes
TRITON=yes TRITON=yes
;; ;;
pytorch-linux-jammy-py3.11-clang12)
ANACONDA_PYTHON_VERSION=3.11
CLANG_VERSION=12
VISION=no
TRITON=no
;;
pytorch-linux-jammy-py3.12-clang12)
ANACONDA_PYTHON_VERSION=3.12
CLANG_VERSION=12
VISION=no
TRITON=no
;;
pytorch-linux-jammy-rocm-n-py3 | pytorch-linux-jammy-rocm-n-py3-benchmarks | pytorch-linux-noble-rocm-n-py3) pytorch-linux-jammy-rocm-n-py3 | pytorch-linux-jammy-rocm-n-py3-benchmarks | pytorch-linux-noble-rocm-n-py3)
if [[ $tag =~ "jammy" ]]; then if [[ $tag =~ "jammy" ]]; then
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.10
@ -207,9 +195,9 @@ case "$tag" in
NINJA_VERSION=1.9.0 NINJA_VERSION=1.9.0
TRITON=yes TRITON=yes
;; ;;
pytorch-linux-noble-xpu-n-py3 | pytorch-linux-noble-xpu-n-py3-inductor-benchmarks) pytorch-linux-jammy-xpu-n-py3 | pytorch-linux-jammy-xpu-n-py3-inductor-benchmarks)
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.10
GCC_VERSION=13 GCC_VERSION=11
VISION=yes VISION=yes
XPU_VERSION=2025.2 XPU_VERSION=2025.2
NINJA_VERSION=1.9.0 NINJA_VERSION=1.9.0
@ -260,12 +248,6 @@ case "$tag" in
HALIDE=yes HALIDE=yes
TRITON=yes TRITON=yes
;; ;;
pytorch-linux-jammy-cuda12.8-py3.12-pallas)
CUDA_VERSION=12.8.1
ANACONDA_PYTHON_VERSION=3.12
GCC_VERSION=11
PALLAS=yes
;;
pytorch-linux-jammy-py3.12-triton-cpu) pytorch-linux-jammy-py3.12-triton-cpu)
CUDA_VERSION=12.6 CUDA_VERSION=12.6
ANACONDA_PYTHON_VERSION=3.12 ANACONDA_PYTHON_VERSION=3.12
@ -279,9 +261,9 @@ case "$tag" in
PYTHON_VERSION=3.10 PYTHON_VERSION=3.10
CUDA_VERSION=12.8.1 CUDA_VERSION=12.8.1
;; ;;
pytorch-linux-jammy-aarch64-py3.10-gcc13) pytorch-linux-jammy-aarch64-py3.10-gcc11)
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.10
GCC_VERSION=13 GCC_VERSION=11
ACL=yes ACL=yes
VISION=yes VISION=yes
OPENBLAS=yes OPENBLAS=yes
@ -299,9 +281,9 @@ case "$tag" in
# from pytorch/llvm:9.0.1 is x86 specific # from pytorch/llvm:9.0.1 is x86 specific
SKIP_LLVM_SRC_BUILD_INSTALL=yes SKIP_LLVM_SRC_BUILD_INSTALL=yes
;; ;;
pytorch-linux-jammy-aarch64-py3.10-gcc13-inductor-benchmarks) pytorch-linux-jammy-aarch64-py3.10-gcc11-inductor-benchmarks)
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.10
GCC_VERSION=13 GCC_VERSION=11
ACL=yes ACL=yes
VISION=yes VISION=yes
OPENBLAS=yes OPENBLAS=yes
@ -387,7 +369,6 @@ docker build \
--build-arg "INDUCTOR_BENCHMARKS=${INDUCTOR_BENCHMARKS}" \ --build-arg "INDUCTOR_BENCHMARKS=${INDUCTOR_BENCHMARKS}" \
--build-arg "EXECUTORCH=${EXECUTORCH}" \ --build-arg "EXECUTORCH=${EXECUTORCH}" \
--build-arg "HALIDE=${HALIDE}" \ --build-arg "HALIDE=${HALIDE}" \
--build-arg "PALLAS=${PALLAS}" \
--build-arg "XPU_VERSION=${XPU_VERSION}" \ --build-arg "XPU_VERSION=${XPU_VERSION}" \
--build-arg "UNINSTALL_DILL=${UNINSTALL_DILL}" \ --build-arg "UNINSTALL_DILL=${UNINSTALL_DILL}" \
--build-arg "ACL=${ACL:-}" \ --build-arg "ACL=${ACL:-}" \

View File

@ -1 +0,0 @@
0.8.0

View File

@ -7,11 +7,11 @@ if [ -n "$GCC_VERSION" ]; then
# Need the official toolchain repo to get alternate packages # Need the official toolchain repo to get alternate packages
add-apt-repository ppa:ubuntu-toolchain-r/test add-apt-repository ppa:ubuntu-toolchain-r/test
apt-get update apt-get update
apt-get install -y g++-$GCC_VERSION gfortran-$GCC_VERSION apt-get install -y g++-$GCC_VERSION
update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-"$GCC_VERSION" 50 update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-"$GCC_VERSION" 50
update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-"$GCC_VERSION" 50 update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-"$GCC_VERSION" 50
update-alternatives --install /usr/bin/gcov gcov /usr/bin/gcov-"$GCC_VERSION" 50 update-alternatives --install /usr/bin/gcov gcov /usr/bin/gcov-"$GCC_VERSION" 50
update-alternatives --install /usr/bin/gfortran gfortran /usr/bin/gfortran-"$GCC_VERSION" 50
# Cleanup package manager # Cleanup package manager
apt-get autoclean && apt-get clean apt-get autoclean && apt-get clean

View File

@ -1,40 +0,0 @@
#!/bin/bash
set -ex
source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh"
# Get the pinned JAX version (same for all CUDA versions)
JAX_VERSION=$(get_pinned_commit /ci_commit_pins/jax)
function install_jax_12() {
echo "Installing JAX ${JAX_VERSION} with CUDA 12 support"
pip_install "jax[cuda12]==${JAX_VERSION}" -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
# Verify installation
python -c "import jax" # check for errors
echo "JAX ${JAX_VERSION} installation completed successfully for CUDA 12"
}
function install_jax_13() {
echo "Installing JAX ${JAX_VERSION} with CUDA 13 support"
pip_install "jax[cuda13]==${JAX_VERSION}" -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
# Verify installation
python -c "import jax" # check for errors
echo "JAX ${JAX_VERSION} installation completed successfully for CUDA 13"
}
# idiomatic parameter and option handling in sh
while test $# -gt 0
do
case "$1" in
12.4|12.6|12.6.*|12.8|12.8.*|12.9|12.9.*) install_jax_12;
;;
13.0|13.0.*) install_jax_13;
;;
*) echo "bad argument $1"; exit 1
;;
esac
shift
done

View File

@ -1,56 +0,0 @@
#!/bin/bash
# Script used only in CD pipeline
set -ex
# install dependencies
dnf -y install gmp-devel libmpc-devel texinfo flex bison
cd /usr/local/src
# fetch source for gcc 13
git clone --depth 1 --single-branch -b releases/gcc-13.3.0 https://github.com/gcc-mirror/gcc.git gcc-13.3.0
mkdir -p gcc-13.3.0/build-gomp
cd gcc-13.3.0/build-gomp
# configure gcc build
# I got these flags by:
# 1. downloading the source rpm for gcc-11 on AlmaLinux 8 container
# dnf install -y dnf-plugins-core rpmdevtools
# dnf download --source libgomp
# 2. extracting the gcc.spec from the source.
# rpmdev-extract gcc-xx.src.rpm
# 3. extracting optflags and ld_flags from gcc.spec:
# rpm --eval '%{optflags}'
# rpm --eval '%{build_ldflags}'
#
# I had to remove the following flags because they didn't compile for this version of libgomp:
# -Werror=format-security
# -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1
# -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1
#
# I added -march=armv8-a -mtune=generic to make them explicit. I don't think they're strictly needed.
OPT_FLAGS='-O2 -march=armv8-a -mtune=generic'\
' -fexceptions -g -grecord-gcc-switches -pipe -Wall'\
' -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS'\
' -fstack-protector-strong -fasynchronous-unwind-tables'\
' -fstack-clash-protection'
LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now'
CFLAGS="$OPT_FLAGS" \
CXXFLAGS="$OPT_FLAGS" \
LDFLAGS="$LDFLAGS" \
../configure \
--prefix=/usr \
--libdir=/usr/lib64 \
--enable-languages=c,c++ \
--disable-multilib \
--disable-bootstrap \
--enable-libgomp
# only build libgomp
make -j$(nproc) all-target-libgomp
make install-target-libgomp

View File

@ -9,7 +9,7 @@ set -xe
function install_ubuntu() { function install_ubuntu() {
. /etc/os-release . /etc/os-release
if [[ ! " jammy noble " =~ " ${VERSION_CODENAME} " ]]; then if [[ ! " jammy " =~ " ${VERSION_CODENAME} " ]]; then
echo "Ubuntu version ${VERSION_CODENAME} not supported" echo "Ubuntu version ${VERSION_CODENAME} not supported"
exit exit
fi fi
@ -35,24 +35,25 @@ function install_ubuntu() {
# The xpu-smi packages # The xpu-smi packages
apt-get install -y flex bison xpu-smi apt-get install -y flex bison xpu-smi
# Compute and Media Runtimes if [[ "${XPU_DRIVER_TYPE,,}" == "lts" ]]; then
if [[ " ${VERSION_CODENAME} " =~ " noble " ]]; then # Compute and Media Runtimes
apt-get install -y \ apt-get install -y \
intel-opencl-icd libze-intel-gpu1 libze1 \ intel-opencl-icd intel-level-zero-gpu level-zero \
intel-media-va-driver-non-free libmfx-gen1 libvpl2 \ intel-media-va-driver-non-free libmfx1 libmfxgen1 libvpl2 \
libegl-mesa0 libegl1-mesa-dev libgbm1 libgl1-mesa-dev libgl1-mesa-dri \ libegl-mesa0 libegl1-mesa libegl1-mesa-dev libgbm1 libgl1-mesa-dev libgl1-mesa-dri \
libglapi-mesa libgles2-mesa-dev libglx-mesa0 libigdgmm12 libxatracker2 mesa-va-drivers \ libglapi-mesa libgles2-mesa-dev libglx-mesa0 libigdgmm12 libxatracker2 mesa-va-drivers \
mesa-vdpau-drivers mesa-vulkan-drivers va-driver-all vainfo hwinfo clinfo intel-ocloc mesa-vdpau-drivers mesa-vulkan-drivers va-driver-all vainfo hwinfo clinfo
else # jammy # Development Packages
apt-get install -y libigc-dev intel-igc-cm libigdfcl-dev libigfxcmrt-dev level-zero-dev
else # rolling driver
apt-get install -y \ apt-get install -y \
intel-opencl-icd libze-intel-gpu1 libze1 \ intel-opencl-icd libze-intel-gpu1 libze1 \
intel-media-va-driver-non-free libmfx-gen1 libvpl2 \ intel-media-va-driver-non-free libmfx-gen1 libvpl2 \
libegl-mesa0 libegl1-mesa libegl1-mesa-dev libgbm1 libgl1-mesa-dev libgl1-mesa-dri \ libegl-mesa0 libegl1-mesa libegl1-mesa-dev libgbm1 libgl1-mesa-dev libgl1-mesa-dri \
libglapi-mesa libglx-mesa0 libigdgmm12 libxatracker2 mesa-va-drivers \ libglapi-mesa libglx-mesa0 libigdgmm12 libxatracker2 mesa-va-drivers \
mesa-vdpau-drivers mesa-vulkan-drivers va-driver-all vainfo hwinfo clinfo intel-ocloc mesa-vdpau-drivers mesa-vulkan-drivers va-driver-all vainfo hwinfo clinfo intel-ocloc
apt-get install -y libigc-dev intel-igc-cm libigdfcl-dev libigfxcmrt-dev libze-dev
fi fi
# Development Packages
apt-get install -y libigc-dev intel-igc-cm libigdfcl-dev libigfxcmrt-dev libze-dev
# Install Intel Support Packages # Install Intel Support Packages
apt-get install -y ${XPU_PACKAGES} apt-get install -y ${XPU_PACKAGES}
@ -65,7 +66,7 @@ function install_ubuntu() {
function install_rhel() { function install_rhel() {
. /etc/os-release . /etc/os-release
if [[ "${ID}" == "rhel" ]]; then if [[ "${ID}" == "rhel" ]]; then
if [[ ! " 8.8 8.10 9.0 9.2 9.3 " =~ " ${VERSION_ID} " ]]; then if [[ ! " 8.8 8.9 9.0 9.2 9.3 " =~ " ${VERSION_ID} " ]]; then
echo "RHEL version ${VERSION_ID} not supported" echo "RHEL version ${VERSION_ID} not supported"
exit exit
fi fi
@ -146,7 +147,7 @@ function install_sles() {
XPU_DRIVER_VERSION="" XPU_DRIVER_VERSION=""
if [[ "${XPU_DRIVER_TYPE,,}" == "lts" ]]; then if [[ "${XPU_DRIVER_TYPE,,}" == "lts" ]]; then
# Use GPU driver LTS releases # Use GPU driver LTS releases
XPU_DRIVER_VERSION="/lts/2523" XPU_DRIVER_VERSION="/lts/2350"
fi fi
# Default use Intel® oneAPI Deep Learning Essentials 2025.1 # Default use Intel® oneAPI Deep Learning Essentials 2025.1

View File

@ -49,7 +49,11 @@ case ${DOCKER_TAG_PREFIX} in
fi fi
BASE_TARGET=rocm BASE_TARGET=rocm
GPU_IMAGE=rocm/dev-ubuntu-22.04:${GPU_ARCH_VERSION}-complete GPU_IMAGE=rocm/dev-ubuntu-22.04:${GPU_ARCH_VERSION}-complete
PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201;gfx950;gfx1150;gfx1151" PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
# add gfx950, gfx115x conditionally starting in ROCm 7.0
if [[ "$GPU_ARCH_VERSION" == *"7.0"* ]]; then
PYTORCH_ROCM_ARCH="${PYTORCH_ROCM_ARCH};gfx950;gfx1150;gfx1151"
fi
DOCKER_GPU_BUILD_ARG="--build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH} --build-arg ROCM_VERSION=${GPU_ARCH_VERSION}" DOCKER_GPU_BUILD_ARG="--build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH} --build-arg ROCM_VERSION=${GPU_ARCH_VERSION}"
;; ;;
*) *)

View File

@ -50,10 +50,6 @@ RUN rm install_ninja.sh
ENV PATH=/opt/rh/gcc-toolset-${GCCTOOLSET_VERSION}/root/usr/bin:$PATH ENV PATH=/opt/rh/gcc-toolset-${GCCTOOLSET_VERSION}/root/usr/bin:$PATH
ENV LD_LIBRARY_PATH=/opt/rh/gcc-toolset-${GCCTOOLSET_VERSION}/root/usr/lib64:/opt/rh/gcc-toolset-${GCCTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH ENV LD_LIBRARY_PATH=/opt/rh/gcc-toolset-${GCCTOOLSET_VERSION}/root/usr/lib64:/opt/rh/gcc-toolset-${GCCTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH
# Build a newer version of libgomp than that supported in in Almalinux 8.
COPY ./common/install_libgomp.sh install_libgomp.sh
RUN bash ./install_libgomp.sh && rm install_libgomp.sh
# git236+ would refuse to run git commands in repos owned by other users # git236+ would refuse to run git commands in repos owned by other users
# Which causes version check to fail, as pytorch repo is bind-mounted into the image # Which causes version check to fail, as pytorch repo is bind-mounted into the image
# Override this behaviour by treating every folder as safe # Override this behaviour by treating every folder as safe

View File

@ -87,7 +87,11 @@ case ${image} in
MANY_LINUX_VERSION="2_28" MANY_LINUX_VERSION="2_28"
DEVTOOLSET_VERSION="11" DEVTOOLSET_VERSION="11"
GPU_IMAGE=rocm/dev-almalinux-8:${GPU_ARCH_VERSION}-complete GPU_IMAGE=rocm/dev-almalinux-8:${GPU_ARCH_VERSION}-complete
PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201;gfx950;gfx1150;gfx1151" PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
# add gfx950, gfx115x conditionally starting in ROCm 7.0
if [[ "$GPU_ARCH_VERSION" == *"7.0"* ]]; then
PYTORCH_ROCM_ARCH="${PYTORCH_ROCM_ARCH};gfx950;gfx1150;gfx1151"
fi
DOCKER_GPU_BUILD_ARG="--build-arg ROCM_VERSION=${GPU_ARCH_VERSION} --build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH} --build-arg DEVTOOLSET_VERSION=${DEVTOOLSET_VERSION}" DOCKER_GPU_BUILD_ARG="--build-arg ROCM_VERSION=${GPU_ARCH_VERSION} --build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH} --build-arg DEVTOOLSET_VERSION=${DEVTOOLSET_VERSION}"
;; ;;
manylinux2_28-builder:xpu) manylinux2_28-builder:xpu)

View File

@ -1,11 +1,15 @@
sphinx==7.2.6 sphinx==5.3.0
#Description: This is used to generate PyTorch docs #Description: This is used to generate PyTorch docs
#Pinned versions: 7.2.6 #Pinned versions: 5.3.0
pytorch_sphinx_theme2==0.2.0 standard-imghdr==3.13.0; python_version >= "3.13"
#Description: This is needed to generate PyTorch docs #Description: This is needed by Sphinx, so it needs to be added here.
#Pinned versions: 0.2.0 # The reasons are as follows:
# 1) This module has been removed from the Python standard library since Python 3.13(https://peps.python.org/pep-0594/#imghdr);
# 2) The current version of Sphinx (5.3.0) is not compatible with Python 3.13.
# Once Sphinx is upgraded to a version compatible with Python 3.13 or later, we can remove this dependency.
-e git+https://github.com/pytorch/pytorch_sphinx_theme.git@71e55749be14ceb56e7f8211a9fb649866b87ad4#egg=pytorch_sphinx_theme2
# TODO: sphinxcontrib.katex 0.9.0 adds a local KaTeX server to speed up pre-rendering # TODO: sphinxcontrib.katex 0.9.0 adds a local KaTeX server to speed up pre-rendering
# but it doesn't seem to work and hangs around idly. The initial thought that it is probably # but it doesn't seem to work and hangs around idly. The initial thought that it is probably
# something related to Docker setup. We can investigate this later. # something related to Docker setup. We can investigate this later.
@ -32,17 +36,17 @@ tensorboard==2.18.0 ; python_version >= "3.13"
#Description: This is used to generate PyTorch docs #Description: This is used to generate PyTorch docs
#Pinned versions: 2.13.0 #Pinned versions: 2.13.0
breathe==4.36.0 breathe==4.34.0
#Description: This is used to generate PyTorch C++ docs #Description: This is used to generate PyTorch C++ docs
#Pinned versions: 4.36.0 #Pinned versions: 4.34.0
exhale==0.3.7 exhale==0.2.3
#Description: This is used to generate PyTorch C++ docs #Description: This is used to generate PyTorch C++ docs
#Pinned versions: 0.3.7 #Pinned versions: 0.2.3
docutils==0.20 docutils==0.16
#Description: This is used to generate PyTorch C++ docs #Description: This is used to generate PyTorch C++ docs
#Pinned versions: 0.20 #Pinned versions: 0.16
bs4==0.0.1 bs4==0.0.1
#Description: This is used to generate PyTorch C++ docs #Description: This is used to generate PyTorch C++ docs
@ -52,13 +56,13 @@ IPython==8.12.0
#Description: This is used to generate PyTorch functorch docs #Description: This is used to generate PyTorch functorch docs
#Pinned versions: 8.12.0 #Pinned versions: 8.12.0
myst-nb==1.3.0 myst-nb==0.17.2
#Description: This is used to generate PyTorch functorch and torch.compile docs. #Description: This is used to generate PyTorch functorch and torch.compile docs.
#Pinned versions: 1.3.0 #Pinned versions: 0.17.2
# The following are required to build torch.distributed.elastic.rendezvous.etcd* docs # The following are required to build torch.distributed.elastic.rendezvous.etcd* docs
python-etcd==0.4.5 python-etcd==0.4.5
sphinx-copybutton==0.5.0 sphinx-copybutton==0.5.0
sphinx-design==0.6.1 sphinx-design==0.4.0
sphinxcontrib-mermaid==1.0.0 sphinxcontrib-mermaid==1.0.0
myst-parser==4.0.1 myst-parser==0.18.1

View File

@ -143,15 +143,6 @@ COPY ci_commit_pins/halide.txt halide.txt
RUN if [ -n "${HALIDE}" ]; then bash ./install_halide.sh; fi RUN if [ -n "${HALIDE}" ]; then bash ./install_halide.sh; fi
RUN rm install_halide.sh common_utils.sh halide.txt RUN rm install_halide.sh common_utils.sh halide.txt
ARG PALLAS
ARG CUDA_VERSION
# Install JAX with CUDA support (for Pallas)
COPY ./common/install_jax.sh install_jax.sh
COPY ./common/common_utils.sh common_utils.sh
COPY ./ci_commit_pins/jax.txt /ci_commit_pins/jax.txt
RUN if [ -n "${PALLAS}" ]; then bash ./install_jax.sh ${CUDA_VERSION}; fi
RUN rm -f install_jax.sh common_utils.sh /ci_commit_pins/jax.txt
ARG ONNX ARG ONNX
# Install ONNX dependencies # Install ONNX dependencies
COPY ./common/install_onnx.sh ./common/common_utils.sh ./ COPY ./common/install_onnx.sh ./common/common_utils.sh ./

View File

@ -8,11 +8,9 @@ from abc import ABC, abstractmethod
try: try:
from collections.abc import Callable # Python 3.11+ from typing import Any, Callable, Required, TypedDict # Python 3.11+
from typing import Any, Required, TypedDict
except ImportError: except ImportError:
from collections.abc import Callable from typing import Any, Callable, TypedDict
from typing import Any, TypedDict
from typing_extensions import Required # Fallback for Python <3.11 from typing_extensions import Required # Fallback for Python <3.11

View File

@ -30,6 +30,7 @@ into a tarball, with the following structure:
More specifically, `build_magma.sh` copies over the relevant files from the `package_files` directory depending on the ROCm version. More specifically, `build_magma.sh` copies over the relevant files from the `package_files` directory depending on the ROCm version.
Outputted binaries should be in the `output` folder. Outputted binaries should be in the `output` folder.
## Pushing ## Pushing
Packages can be uploaded to an S3 bucket using: Packages can be uploaded to an S3 bucket using:

View File

@ -168,16 +168,14 @@ if [[ "$BUILD_ENVIRONMENT" == *xpu* ]]; then
# shellcheck disable=SC1091 # shellcheck disable=SC1091
source /opt/intel/oneapi/compiler/latest/env/vars.sh source /opt/intel/oneapi/compiler/latest/env/vars.sh
# shellcheck disable=SC1091 # shellcheck disable=SC1091
source /opt/intel/oneapi/umf/latest/env/vars.sh
# shellcheck disable=SC1091
source /opt/intel/oneapi/ccl/latest/env/vars.sh source /opt/intel/oneapi/ccl/latest/env/vars.sh
# shellcheck disable=SC1091 # shellcheck disable=SC1091
source /opt/intel/oneapi/mpi/latest/env/vars.sh source /opt/intel/oneapi/mpi/latest/env/vars.sh
# shellcheck disable=SC1091
source /opt/intel/oneapi/pti/latest/env/vars.sh
# Enable XCCL build # Enable XCCL build
export USE_XCCL=1 export USE_XCCL=1
export USE_MPI=0 export USE_MPI=0
# XPU kineto feature dependencies are not fully ready, disable kineto build as temp WA
export USE_KINETO=0
export TORCH_XPU_ARCH_LIST=pvc export TORCH_XPU_ARCH_LIST=pvc
fi fi

View File

@ -96,6 +96,7 @@ function pip_build_and_install() {
python3 -m pip wheel \ python3 -m pip wheel \
--no-build-isolation \ --no-build-isolation \
--no-deps \ --no-deps \
--no-use-pep517 \
-w "${wheel_dir}" \ -w "${wheel_dir}" \
"${build_target}" "${build_target}"
fi fi
@ -307,28 +308,6 @@ function install_torchao() {
pip_build_and_install "git+https://github.com/pytorch/ao.git@${commit}" dist/ao pip_build_and_install "git+https://github.com/pytorch/ao.git@${commit}" dist/ao
} }
function install_flash_attn_cute() {
echo "Installing FlashAttention CuTe from GitHub..."
# Grab latest main til we have a pinned commit
local flash_attn_commit
flash_attn_commit=$(git ls-remote https://github.com/Dao-AILab/flash-attention.git HEAD | cut -f1)
# Clone the repo to a temporary directory
rm -rf flash-attention-build
git clone --depth 1 --recursive https://github.com/Dao-AILab/flash-attention.git flash-attention-build
pushd flash-attention-build
git checkout "${flash_attn_commit}"
# Install only the 'cute' sub-directory
pip_install -e flash_attn/cute/
popd
# remove the local repo
rm -rf flash-attention-build
echo "FlashAttention CuTe installation complete."
}
function print_sccache_stats() { function print_sccache_stats() {
echo 'PyTorch Build Statistics' echo 'PyTorch Build Statistics'
sccache --show-stats sccache --show-stats

View File

@ -89,41 +89,23 @@ if [ "$is_main_doc" = true ]; then
make coverage make coverage
# Now we have the coverage report, we need to make sure it is empty. # Now we have the coverage report, we need to make sure it is empty.
# Sphinx 7.2.6+ format: python.txt contains a statistics table with a TOTAL row # Count the number of lines in the file and turn that number into a variable
# showing the undocumented count in the third column. # $lines. The `cut -f1 ...` is to only parse the number, not the filename
# Example: | TOTAL | 99.83% | 2 | # Skip the report header by subtracting 2: the header will be output even if
# there are no undocumented items.
# #
# Also: see docs/source/conf.py for "coverage_ignore*" items, which should # Also: see docs/source/conf.py for "coverage_ignore*" items, which should
# be documented then removed from there. # be documented then removed from there.
lines=$(wc -l build/coverage/python.txt 2>/dev/null |cut -f1 -d' ')
# Extract undocumented count from TOTAL row in Sphinx 7.2.6 statistics table undocumented=$((lines - 2))
# The table format is: | Module | Coverage | Undocumented | if [ $undocumented -lt 0 ]; then
# Extract the third column (undocumented count) from the TOTAL row
undocumented=$(grep "| TOTAL" build/coverage/python.txt | awk -F'|' '{print $4}' | tr -d ' ')
if [ -z "$undocumented" ] || ! [[ "$undocumented" =~ ^[0-9]+$ ]]; then
echo coverage output not found echo coverage output not found
exit 1 exit 1
elif [ "$undocumented" -gt 0 ]; then elif [ $undocumented -gt 0 ]; then
set +x # Disable command echoing for cleaner output echo undocumented objects found:
echo "" cat build/coverage/python.txt
echo "====================="
echo "UNDOCUMENTED OBJECTS:"
echo "====================="
echo ""
# Find the line number of the TOTAL row and print only what comes after it
total_line=$(grep -n "| TOTAL" build/coverage/python.txt | cut -d: -f1)
if [ -n "$total_line" ]; then
# Print only the detailed list (skip the statistics table)
tail -n +$((total_line + 2)) build/coverage/python.txt
else
# Fallback to showing entire file if TOTAL line not found
cat build/coverage/python.txt
fi
echo ""
echo "Make sure you've updated relevant .rsts in docs/source!" echo "Make sure you've updated relevant .rsts in docs/source!"
echo "You can reproduce locally by running 'cd docs && make coverage && tail -n +\$((grep -n \"| TOTAL\" build/coverage/python.txt | cut -d: -f1) + 2)) build/coverage/python.txt'" echo "You can reproduce locally by running 'cd docs && make coverage && cat build/coverage/python.txt'"
set -x # Re-enable command echoing
exit 1 exit 1
fi fi
else else

View File

@ -100,337 +100,6 @@ def check_lib_statically_linked_libstdc_cxx_abi_symbols(lib: str) -> None:
) )
def _compile_and_extract_symbols(
cpp_content: str, compile_flags: list[str], exclude_list: list[str] | None = None
) -> list[str]:
"""
Helper to compile a C++ file and extract all symbols.
Args:
cpp_content: C++ source code to compile
compile_flags: Compilation flags
exclude_list: List of symbol names to exclude. Defaults to ["main"].
Returns:
List of all symbols found in the object file (excluding those in exclude_list).
"""
import subprocess
import tempfile
if exclude_list is None:
exclude_list = ["main"]
with tempfile.TemporaryDirectory() as tmpdir:
tmppath = Path(tmpdir)
cpp_file = tmppath / "test.cpp"
obj_file = tmppath / "test.o"
cpp_file.write_text(cpp_content)
result = subprocess.run(
compile_flags + [str(cpp_file), "-o", str(obj_file)],
capture_output=True,
text=True,
timeout=60,
)
if result.returncode != 0:
raise RuntimeError(f"Compilation failed: {result.stderr}")
symbols = get_symbols(str(obj_file))
# Return all symbol names, excluding those in the exclude list
return [name for _addr, _stype, name in symbols if name not in exclude_list]
def check_stable_only_symbols(install_root: Path) -> None:
"""
Test TORCH_STABLE_ONLY and TORCH_TARGET_VERSION by compiling test code and comparing symbol counts.
This approach tests:
1. WITHOUT macros -> many torch symbols exposed
2. WITH TORCH_STABLE_ONLY -> zero torch symbols (all hidden)
3. WITH TORCH_TARGET_VERSION -> zero torch symbols (all hidden)
4. WITH both macros -> zero torch symbols (all hidden)
"""
include_dir = install_root / "include"
assert include_dir.exists(), f"Expected {include_dir} to be present"
test_cpp_content = """
// Main torch C++ API headers
#include <torch/torch.h>
#include <torch/all.h>
// ATen tensor library
#include <ATen/ATen.h>
// Core c10 headers (commonly used)
#include <c10/core/Device.h>
#include <c10/core/DeviceType.h>
#include <c10/core/ScalarType.h>
#include <c10/core/TensorOptions.h>
#include <c10/util/Optional.h>
int main() { return 0; }
"""
base_compile_flags = [
"g++",
"-std=c++17",
f"-I{include_dir}",
f"-I{include_dir}/torch/csrc/api/include",
"-c", # Compile only, don't link
]
# Compile WITHOUT any macros
symbols_without = _compile_and_extract_symbols(
cpp_content=test_cpp_content,
compile_flags=base_compile_flags,
)
# We expect constexpr symbols, inline functions used by other headers etc.
# to produce symbols
num_symbols_without = len(symbols_without)
print(f"Found {num_symbols_without} symbols without any macros defined")
assert num_symbols_without != 0, (
"Expected a non-zero number of symbols without any macros"
)
# Compile WITH TORCH_STABLE_ONLY (expect 0 symbols)
compile_flags_with_stable_only = base_compile_flags + ["-DTORCH_STABLE_ONLY"]
symbols_with_stable_only = _compile_and_extract_symbols(
cpp_content=test_cpp_content,
compile_flags=compile_flags_with_stable_only,
)
num_symbols_with_stable_only = len(symbols_with_stable_only)
assert num_symbols_with_stable_only == 0, (
f"Expected no symbols with TORCH_STABLE_ONLY macro, but found {num_symbols_with_stable_only}"
)
# Compile WITH TORCH_TARGET_VERSION (expect 0 symbols)
compile_flags_with_target_version = base_compile_flags + [
"-DTORCH_TARGET_VERSION=1"
]
symbols_with_target_version = _compile_and_extract_symbols(
cpp_content=test_cpp_content,
compile_flags=compile_flags_with_target_version,
)
num_symbols_with_target_version = len(symbols_with_target_version)
assert num_symbols_with_target_version == 0, (
f"Expected no symbols with TORCH_TARGET_VERSION macro, but found {num_symbols_with_target_version}"
)
# Compile WITH both macros (expect 0 symbols)
compile_flags_with_both = base_compile_flags + [
"-DTORCH_STABLE_ONLY",
"-DTORCH_TARGET_VERSION=1",
]
symbols_with_both = _compile_and_extract_symbols(
cpp_content=test_cpp_content,
compile_flags=compile_flags_with_both,
)
num_symbols_with_both = len(symbols_with_both)
assert num_symbols_with_both == 0, (
f"Expected no symbols with both macros, but found {num_symbols_with_both}"
)
def check_stable_api_symbols(install_root: Path) -> None:
"""
Test that stable API headers still expose symbols with TORCH_STABLE_ONLY.
The torch/csrc/stable/c/shim.h header is tested in check_stable_c_shim_symbols
"""
include_dir = install_root / "include"
assert include_dir.exists(), f"Expected {include_dir} to be present"
stable_dir = include_dir / "torch" / "csrc" / "stable"
assert stable_dir.exists(), f"Expected {stable_dir} to be present"
stable_headers = list(stable_dir.rglob("*.h"))
if not stable_headers:
raise RuntimeError("Could not find any stable headers")
includes = []
for header in stable_headers:
rel_path = header.relative_to(include_dir)
includes.append(f"#include <{rel_path.as_posix()}>")
includes_str = "\n".join(includes)
test_stable_content = f"""
{includes_str}
int main() {{ return 0; }}
"""
compile_flags = [
"g++",
"-std=c++17",
f"-I{include_dir}",
f"-I{include_dir}/torch/csrc/api/include",
"-c",
"-DTORCH_STABLE_ONLY",
]
symbols_stable = _compile_and_extract_symbols(
cpp_content=test_stable_content,
compile_flags=compile_flags,
)
num_symbols_stable = len(symbols_stable)
print(f"Found {num_symbols_stable} symbols in torch/csrc/stable")
assert num_symbols_stable > 0, (
f"Expected stable headers to expose symbols with TORCH_STABLE_ONLY, "
f"but found {num_symbols_stable} symbols"
)
def check_headeronly_symbols(install_root: Path) -> None:
"""
Test that header-only utility headers still expose symbols with TORCH_STABLE_ONLY.
"""
include_dir = install_root / "include"
assert include_dir.exists(), f"Expected {include_dir} to be present"
# Find all headers in torch/headeronly
headeronly_dir = include_dir / "torch" / "headeronly"
assert headeronly_dir.exists(), f"Expected {headeronly_dir} to be present"
headeronly_headers = list(headeronly_dir.rglob("*.h"))
if not headeronly_headers:
raise RuntimeError("Could not find any headeronly headers")
# Filter out platform-specific headers that may not compile everywhere
platform_specific_keywords = [
"cpu/vec",
]
filtered_headers = []
for header in headeronly_headers:
rel_path = header.relative_to(include_dir).as_posix()
if not any(
keyword in rel_path.lower() for keyword in platform_specific_keywords
):
filtered_headers.append(header)
includes = []
for header in filtered_headers:
rel_path = header.relative_to(include_dir)
includes.append(f"#include <{rel_path.as_posix()}>")
includes_str = "\n".join(includes)
test_headeronly_content = f"""
{includes_str}
int main() {{ return 0; }}
"""
compile_flags = [
"g++",
"-std=c++17",
f"-I{include_dir}",
f"-I{include_dir}/torch/csrc/api/include",
"-c",
"-DTORCH_STABLE_ONLY",
]
symbols_headeronly = _compile_and_extract_symbols(
cpp_content=test_headeronly_content,
compile_flags=compile_flags,
)
num_symbols_headeronly = len(symbols_headeronly)
print(f"Found {num_symbols_headeronly} symbols in torch/headeronly")
assert num_symbols_headeronly > 0, (
f"Expected headeronly headers to expose symbols with TORCH_STABLE_ONLY, "
f"but found {num_symbols_headeronly} symbols"
)
def check_aoti_shim_symbols(install_root: Path) -> None:
"""
Test that AOTI shim headers still expose symbols with TORCH_STABLE_ONLY.
"""
include_dir = install_root / "include"
assert include_dir.exists(), f"Expected {include_dir} to be present"
# There are no constexpr symbols etc., so we need to actually use functions
# so that some symbols are found.
test_shim_content = """
#include <torch/csrc/inductor/aoti_torch/c/shim.h>
int main() {
int32_t (*fp1)() = &aoti_torch_device_type_cpu;
int32_t (*fp2)() = &aoti_torch_dtype_float32;
(void)fp1; (void)fp2;
return 0;
}
"""
compile_flags = [
"g++",
"-std=c++17",
f"-I{include_dir}",
f"-I{include_dir}/torch/csrc/api/include",
"-c",
"-DTORCH_STABLE_ONLY",
]
symbols_shim = _compile_and_extract_symbols(
cpp_content=test_shim_content,
compile_flags=compile_flags,
)
num_symbols_shim = len(symbols_shim)
assert num_symbols_shim > 0, (
f"Expected shim headers to expose symbols with TORCH_STABLE_ONLY, "
f"but found {num_symbols_shim} symbols"
)
def check_stable_c_shim_symbols(install_root: Path) -> None:
"""
Test that stable C shim headers still expose symbols with TORCH_STABLE_ONLY.
"""
include_dir = install_root / "include"
assert include_dir.exists(), f"Expected {include_dir} to be present"
# Check if the stable C shim exists
stable_shim = include_dir / "torch" / "csrc" / "stable" / "c" / "shim.h"
if not stable_shim.exists():
raise RuntimeError("Could not find stable c shim")
# There are no constexpr symbols etc., so we need to actually use functions
# so that some symbols are found.
test_stable_shim_content = """
#include <torch/csrc/stable/c/shim.h>
int main() {
// Reference stable C API functions to create undefined symbols
AOTITorchError (*fp1)(const char*, uint32_t*, int32_t*) = &torch_parse_device_string;
AOTITorchError (*fp2)(uint32_t*) = &torch_get_num_threads;
(void)fp1; (void)fp2;
return 0;
}
"""
compile_flags = [
"g++",
"-std=c++17",
f"-I{include_dir}",
f"-I{include_dir}/torch/csrc/api/include",
"-c",
"-DTORCH_STABLE_ONLY",
]
symbols_stable_shim = _compile_and_extract_symbols(
cpp_content=test_stable_shim_content,
compile_flags=compile_flags,
)
num_symbols_stable_shim = len(symbols_stable_shim)
assert num_symbols_stable_shim > 0, (
f"Expected stable C shim headers to expose symbols with TORCH_STABLE_ONLY, "
f"but found {num_symbols_stable_shim} symbols"
)
def check_lib_symbols_for_abi_correctness(lib: str) -> None: def check_lib_symbols_for_abi_correctness(lib: str) -> None:
print(f"lib: {lib}") print(f"lib: {lib}")
cxx11_symbols = grep_symbols(lib, LIBTORCH_CXX11_PATTERNS) cxx11_symbols = grep_symbols(lib, LIBTORCH_CXX11_PATTERNS)
@ -460,13 +129,6 @@ def main() -> None:
check_lib_symbols_for_abi_correctness(libtorch_cpu_path) check_lib_symbols_for_abi_correctness(libtorch_cpu_path)
check_lib_statically_linked_libstdc_cxx_abi_symbols(libtorch_cpu_path) check_lib_statically_linked_libstdc_cxx_abi_symbols(libtorch_cpu_path)
# Check symbols when TORCH_STABLE_ONLY is defined
check_stable_only_symbols(install_root)
check_stable_api_symbols(install_root)
check_headeronly_symbols(install_root)
check_aoti_shim_symbols(install_root)
check_stable_c_shim_symbols(install_root)
if __name__ == "__main__": if __name__ == "__main__":
main() main()

View File

@ -353,17 +353,6 @@ def test_linalg(device="cpu") -> None:
torch.linalg.svd(A) torch.linalg.svd(A)
def test_sdpa(device="cpu", dtype=torch.float16) -> None:
"""Regression test for https://github.com/pytorch/pytorch/issues/167602
Without nvrtc_builtins on CuDNN-9.13 on CUDA-13 fails with ` No valid execution plans built.`
"""
print(f"Testing SDPA on {device} using type {dtype}")
k, q, v = torch.rand(3, 1, 16, 77, 64, dtype=dtype, device=device).unbind(0)
attn = torch.rand(1, 1, 77, 77, dtype=dtype, device=device)
rc = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn)
assert rc.isnan().any().item() is False
def smoke_test_compile(device: str = "cpu") -> None: def smoke_test_compile(device: str = "cpu") -> None:
supported_dtypes = [torch.float16, torch.float32, torch.float64] supported_dtypes = [torch.float16, torch.float32, torch.float64]
@ -500,12 +489,10 @@ def main() -> None:
smoke_test_conv2d() smoke_test_conv2d()
test_linalg() test_linalg()
test_numpy() test_numpy()
test_sdpa()
if is_cuda_system: if is_cuda_system:
test_linalg("cuda") test_linalg("cuda")
test_cuda_gds_errors_captured() test_cuda_gds_errors_captured()
test_sdpa("cuda")
if options.package == "all": if options.package == "all":
smoke_test_modules() smoke_test_modules()

View File

@ -208,8 +208,6 @@ if [[ "$BUILD_ENVIRONMENT" == *xpu* ]]; then
source /opt/intel/oneapi/ccl/latest/env/vars.sh source /opt/intel/oneapi/ccl/latest/env/vars.sh
# shellcheck disable=SC1091 # shellcheck disable=SC1091
source /opt/intel/oneapi/mpi/latest/env/vars.sh source /opt/intel/oneapi/mpi/latest/env/vars.sh
# shellcheck disable=SC1091
source /opt/intel/oneapi/pti/latest/env/vars.sh
# Check XPU status before testing # Check XPU status before testing
timeout 30 xpu-smi discovery || true timeout 30 xpu-smi discovery || true
fi fi
@ -339,23 +337,13 @@ test_python() {
test_python_smoke() { test_python_smoke() {
# Smoke tests for H100/B200 # Smoke tests for H100/B200
time python test/run_test.py --include test_matmul_cuda test_scaled_matmul_cuda inductor/test_fp8 inductor/test_max_autotune inductor/test_cutedsl_grouped_mm $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running time python test/run_test.py --include test_matmul_cuda test_scaled_matmul_cuda inductor/test_fp8 inductor/test_max_autotune $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
assert_git_not_dirty assert_git_not_dirty
} }
test_python_smoke_b200() { test_python_smoke_b200() {
# Targeted smoke tests for B200 including FlashAttention CuTe coverage # Targeted smoke tests for B200 - staged approach to avoid too many failures
install_flash_attn_cute time python test/run_test.py --include test_matmul_cuda test_scaled_matmul_cuda inductor/test_fp8 $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
time python test/run_test.py \
--include \
test_matmul_cuda \
test_scaled_matmul_cuda \
inductor/test_fp8 \
nn/attention/test_fa4 \
nn/attention/test_open_registry \
inductor/test_flex_flash \
$PYTHON_TEST_EXTRA_OPTION \
--upload-artifacts-while-running
assert_git_not_dirty assert_git_not_dirty
} }
@ -836,11 +824,6 @@ test_inductor_halide() {
assert_git_not_dirty assert_git_not_dirty
} }
test_inductor_pallas() {
python test/run_test.py --include inductor/test_pallas.py --verbose
assert_git_not_dirty
}
test_inductor_triton_cpu() { test_inductor_triton_cpu() {
python test/run_test.py --include inductor/test_triton_cpu_backend.py inductor/test_torchinductor_strided_blocks.py --verbose python test/run_test.py --include inductor/test_triton_cpu_backend.py inductor/test_torchinductor_strided_blocks.py --verbose
assert_git_not_dirty assert_git_not_dirty
@ -1680,22 +1663,6 @@ test_operator_microbenchmark() {
done done
} }
test_attention_microbenchmark() {
TEST_REPORTS_DIR=$(pwd)/test/test-reports
mkdir -p "$TEST_REPORTS_DIR"
TEST_DIR=$(pwd)
# Install attention-gym dependency
echo "Installing attention-gym..."
python -m pip install git+https://github.com/meta-pytorch/attention-gym.git@main
pip show triton
cd "${TEST_DIR}"/benchmarks/transformer
$TASKSET python score_mod.py --config configs/config_basic.yaml \
--output-json-for-dashboard "${TEST_REPORTS_DIR}/attention_microbenchmark.json"
}
if ! [[ "${BUILD_ENVIRONMENT}" == *libtorch* || "${BUILD_ENVIRONMENT}" == *-bazel-* ]]; then if ! [[ "${BUILD_ENVIRONMENT}" == *libtorch* || "${BUILD_ENVIRONMENT}" == *-bazel-* ]]; then
(cd test && python -c "import torch; print(torch.__config__.show())") (cd test && python -c "import torch; print(torch.__config__.show())")
(cd test && python -c "import torch; print(torch.__config__.parallel_info())") (cd test && python -c "import torch; print(torch.__config__.parallel_info())")
@ -1753,14 +1720,10 @@ elif [[ "${TEST_CONFIG}" == *operator_benchmark* ]]; then
fi fi
elif [[ "${TEST_CONFIG}" == *operator_microbenchmark* ]]; then elif [[ "${TEST_CONFIG}" == *operator_microbenchmark* ]]; then
test_operator_microbenchmark test_operator_microbenchmark
elif [[ "${TEST_CONFIG}" == *attention_microbenchmark* ]]; then
test_attention_microbenchmark
elif [[ "${TEST_CONFIG}" == *inductor_distributed* ]]; then elif [[ "${TEST_CONFIG}" == *inductor_distributed* ]]; then
test_inductor_distributed test_inductor_distributed
elif [[ "${TEST_CONFIG}" == *inductor-halide* ]]; then elif [[ "${TEST_CONFIG}" == *inductor-halide* ]]; then
test_inductor_halide test_inductor_halide
elif [[ "${TEST_CONFIG}" == *inductor-pallas* ]]; then
test_inductor_pallas
elif [[ "${TEST_CONFIG}" == *inductor-triton-cpu* ]]; then elif [[ "${TEST_CONFIG}" == *inductor-triton-cpu* ]]; then
test_inductor_triton_cpu test_inductor_triton_cpu
elif [[ "${TEST_CONFIG}" == *inductor-micro-benchmark* ]]; then elif [[ "${TEST_CONFIG}" == *inductor-micro-benchmark* ]]; then

View File

@ -70,7 +70,7 @@ sccache --zero-stats
sccache --show-stats sccache --show-stats
# Build the wheel # Build the wheel
python -m build --wheel --no-isolation python -m build --wheel --no-build-isolation
if ($LASTEXITCODE -ne 0) { exit 1 } if ($LASTEXITCODE -ne 0) { exit 1 }
# Install the wheel locally # Install the wheel locally

View File

@ -1,11 +1,11 @@
name: 🚀 New Feature for Release name: 🚀 Release highlight for proposed Feature
description: Submit a Release highlight for proposed Feature description: Submit a Release highlight for proposed Feature
labels: ["release-feature-request"] labels: ["release-feature-request"]
body: body:
- type: textarea - type: textarea
attributes: attributes:
label: New Feature for Release label: Release highlight for proposed Feature
description: > description: >
Example: “A torch.special module, analogous to SciPy's special module.” Example: “A torch.special module, analogous to SciPy's special module.”
- type: input - type: input

View File

@ -63,7 +63,7 @@ self-hosted-runner:
- linux.rocm.gpu.gfx942.1 - linux.rocm.gpu.gfx942.1
- linux.rocm.gpu.gfx942.2 - linux.rocm.gpu.gfx942.2
- linux.rocm.gpu.gfx942.4 - linux.rocm.gpu.gfx942.4
- linux.rocm.gfx942.docker-cache - rocm-docker
# Org wise AWS `mac2.metal` runners (2020 Mac mini hardware powered by Apple silicon M1 processors) # Org wise AWS `mac2.metal` runners (2020 Mac mini hardware powered by Apple silicon M1 processors)
- macos-m1-stable - macos-m1-stable
- macos-m1-14 - macos-m1-14

View File

@ -1 +1 @@
07b6cbde121417a70e4dc871adb6d27030e0ce3f 3b0e7a6f192ca2715e7e6cbe5db007aea7165fe2

View File

@ -1 +1 @@
acccf86477759b2d3500f1ae1be065f7b1e409ec cfbc5c2f1c798991715a6b06bb3ce46478c4487c

View File

@ -1 +1 @@
e4d25697f9dc5eedaf8f0a5bf085c62c5455a53a c8b09f5f77d6bf6fb7ed7a9aa83e5d8156b3a5e9

22
.github/labeler.yml vendored
View File

@ -138,8 +138,7 @@
- test/test_matmul_cuda.py - test/test_matmul_cuda.py
- test/test_scaled_matmul_cuda.py - test/test_scaled_matmul_cuda.py
- test/inductor/test_fp8.py - test/inductor/test_fp8.py
- aten/src/ATen/native/cuda/*Blas.cpp - aten/src/ATen/native/cuda/Blas.cpp
- aten/src/ATen/cuda/CUDA*Blas.*
- torch/**/*cublas* - torch/**/*cublas*
- torch/_inductor/kernel/mm.py - torch/_inductor/kernel/mm.py
- test/inductor/test_max_autotune.py - test/inductor/test_max_autotune.py
@ -149,8 +148,7 @@
- test/test_matmul_cuda.py - test/test_matmul_cuda.py
- test/test_scaled_matmul_cuda.py - test/test_scaled_matmul_cuda.py
- test/inductor/test_fp8.py - test/inductor/test_fp8.py
- aten/src/ATen/native/cuda/*Blas.cpp - aten/src/ATen/native/cuda/Blas.cpp
- aten/src/ATen/cuda/CUDA*Blas.*
- torch/**/*cublas* - torch/**/*cublas*
- torch/_inductor/kernel/mm.py - torch/_inductor/kernel/mm.py
- test/inductor/test_max_autotune.py - test/inductor/test_max_autotune.py
@ -160,21 +158,7 @@
- test/test_matmul_cuda.py - test/test_matmul_cuda.py
- test/test_scaled_matmul_cuda.py - test/test_scaled_matmul_cuda.py
- test/inductor/test_fp8.py - test/inductor/test_fp8.py
- aten/src/ATen/native/cuda/*Blas.cpp - aten/src/ATen/native/cuda/Blas.cpp
- aten/src/ATen/cuda/CUDA*Blas.*
- torch/_inductor/kernel/mm.py - torch/_inductor/kernel/mm.py
- test/inductor/test_max_autotune.py - test/inductor/test_max_autotune.py
- third_party/fbgemm - third_party/fbgemm
"ciflow/mps":
- aten/src/ATen/mps/**
- aten/src/ATen/native/mps/**
- torch/_inductor/codegen/mps.py
- test/test_mps.py
- test/inductor/test_mps_basic.py
"ciflow/h100-symm-mem":
- torch/csrc/distributed/c10d/symm_mem/**
- torch/distributed/_symmetric_memory/**
- test/distributed/**/*mem*
- test/distributed/**/*mem*/**

View File

@ -10,4 +10,3 @@
pathFilter: pathFilter:
- 'torch/csrc/inductor/aoti_torch/c/*' - 'torch/csrc/inductor/aoti_torch/c/*'
- 'torch/csrc/inductor/aoti_torch/generated/*' - 'torch/csrc/inductor/aoti_torch/generated/*'
- 'torch/csrc/stable/c/*'

View File

@ -2,8 +2,8 @@ tracking_issue: 24422
ciflow_tracking_issue: 64124 ciflow_tracking_issue: 64124
ciflow_push_tags: ciflow_push_tags:
- ciflow/b200 - ciflow/b200
- ciflow/b200-distributed
- ciflow/b200-symm-mem - ciflow/b200-symm-mem
- ciflow/b200-distributed
- ciflow/binaries - ciflow/binaries
- ciflow/binaries_libtorch - ciflow/binaries_libtorch
- ciflow/binaries_wheel - ciflow/binaries_wheel
@ -22,8 +22,6 @@ ciflow_push_tags:
- ciflow/inductor-perf-test-nightly-xpu - ciflow/inductor-perf-test-nightly-xpu
- ciflow/inductor-periodic - ciflow/inductor-periodic
- ciflow/inductor-rocm - ciflow/inductor-rocm
- ciflow/inductor-rocm-mi200
- ciflow/inductor-rocm-mi300
- ciflow/linux-aarch64 - ciflow/linux-aarch64
- ciflow/mps - ciflow/mps
- ciflow/nightly - ciflow/nightly
@ -35,13 +33,11 @@ ciflow_push_tags:
- ciflow/quantization-periodic - ciflow/quantization-periodic
- ciflow/riscv64 - ciflow/riscv64
- ciflow/rocm - ciflow/rocm
- ciflow/rocm-mi200
- ciflow/rocm-mi300 - ciflow/rocm-mi300
- ciflow/rocm-mi355 - ciflow/rocm-mi355
- ciflow/rocm-navi31 - ciflow/rocm-navi31
- ciflow/s390 - ciflow/s390
- ciflow/slow - ciflow/slow
- ciflow/slow-rocm-mi200
- ciflow/torchbench - ciflow/torchbench
- ciflow/triton_binaries - ciflow/triton_binaries
- ciflow/trunk - ciflow/trunk

View File

@ -1,11 +1,10 @@
# Delete old branches # Delete old branches
import os import os
import re import re
from collections.abc import Callable
from datetime import datetime from datetime import datetime
from functools import lru_cache from functools import lru_cache
from pathlib import Path from pathlib import Path
from typing import Any from typing import Any, Callable
from github_utils import gh_fetch_json_dict, gh_graphql from github_utils import gh_fetch_json_dict, gh_graphql
from gitutils import GitRepo from gitutils import GitRepo

View File

@ -8,11 +8,10 @@ import re
import subprocess import subprocess
import sys import sys
import warnings import warnings
from collections.abc import Callable
from enum import Enum from enum import Enum
from functools import cache from functools import cache
from logging import info from logging import info
from typing import Any, Optional from typing import Any, Callable, Optional
from urllib.request import Request, urlopen from urllib.request import Request, urlopen
import yaml import yaml

View File

@ -11,8 +11,7 @@ import sys
import time import time
import urllib import urllib
import urllib.parse import urllib.parse
from collections.abc import Callable from typing import Any, Callable, Optional
from typing import Any, Optional
from urllib.request import Request, urlopen from urllib.request import Request, urlopen

View File

@ -3,9 +3,8 @@
import json import json
import os import os
import warnings import warnings
from collections.abc import Callable
from dataclasses import dataclass from dataclasses import dataclass
from typing import Any, cast, Optional, Union from typing import Any, Callable, cast, Optional, Union
from urllib.error import HTTPError from urllib.error import HTTPError
from urllib.parse import quote from urllib.parse import quote
from urllib.request import Request, urlopen from urllib.request import Request, urlopen

View File

@ -4,10 +4,10 @@ import os
import re import re
import tempfile import tempfile
from collections import defaultdict from collections import defaultdict
from collections.abc import Callable, Iterator from collections.abc import Iterator
from datetime import datetime from datetime import datetime
from functools import wraps from functools import wraps
from typing import Any, cast, Optional, TypeVar, Union from typing import Any, Callable, cast, Optional, TypeVar, Union
T = TypeVar("T") T = TypeVar("T")

View File

@ -34,9 +34,6 @@ python3 torch/utils/data/datapipes/gen_pyi.py
# Also check generated pyi files # Also check generated pyi files
find torch -name '*.pyi' -exec git add --force -- "{}" + find torch -name '*.pyi' -exec git add --force -- "{}" +
# Print current environment
python3 -m pip freeze
RC=0 RC=0
# Run lintrunner on all files # Run lintrunner on all files
if ! lintrunner --force-color --tee-json=lint.json ${ADDITIONAL_LINTRUNNER_ARGS} 2> /dev/null; then if ! lintrunner --force-color --tee-json=lint.json ${ADDITIONAL_LINTRUNNER_ARGS} 2> /dev/null; then

View File

@ -17,12 +17,12 @@ import re
import time import time
import urllib.parse import urllib.parse
from collections import defaultdict from collections import defaultdict
from collections.abc import Callable, Iterable from collections.abc import Iterable
from dataclasses import dataclass from dataclasses import dataclass
from functools import cache from functools import cache
from pathlib import Path from pathlib import Path
from re import Pattern from re import Pattern
from typing import Any, cast, NamedTuple, Optional from typing import Any, Callable, cast, NamedTuple, Optional
from warnings import warn from warnings import warn
import yaml import yaml

View File

@ -1,73 +0,0 @@
name: attention_op_microbenchmark
on:
push:
tags:
- ciflow/op-benchmark/*
workflow_dispatch:
schedule:
# Run at 06:00 UTC everyday
- cron: 0 7 * * *
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}
cancel-in-progress: true
permissions:
id-token: write
contents: read
jobs:
attn-microbenchmark-build:
if: github.repository_owner == 'pytorch'
uses: ./.github/workflows/_linux-build.yml
with:
runner: linux.12xlarge.memory
build-environment: linux-jammy-cuda12.8-py3.10-gcc9-sm80
docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc11
cuda-arch-list: '8.0 9.0'
test-matrix: |
{ include: [
{ config: "attention_microbenchmark_test", shard: 1, num_shards: 1, runner: "linux.aws.a100" },
{ config: "attention_microbenchmark_test", shard: 1, num_shards: 1, runner: "linux.aws.h100" },
]}
secrets: inherit
attn-microbenchmark-test:
name: attn-microbenchmark-test
uses: ./.github/workflows/_linux-test.yml
needs: attn-microbenchmark-build
with:
timeout-minutes: 500
build-environment: linux-jammy-cuda12.8-py3.10-gcc9-sm80
docker-image: ${{ needs.attn-microbenchmark-build.outputs.docker-image }}
test-matrix: ${{ needs.attn-microbenchmark-build.outputs.test-matrix }}
secrets: inherit
# B200 runner
opmicrobenchmark-build-b200:
if: github.repository_owner == 'pytorch'
name: opmicrobenchmark-build-b200
uses: ./.github/workflows/_linux-build.yml
with:
runner: linux.12xlarge.memory
build-environment: linux-jammy-cuda12.8-py3.10-gcc9-sm100
docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc11
cuda-arch-list: '10.0'
test-matrix: |
{ include: [
{ config: "operator_microbenchmark_test", shard: 1, num_shards: 1, runner: "linux.dgx.b200" },
]}
secrets: inherit
opmicrobenchmark-test-b200:
name: opmicrobenchmark-test-b200
uses: ./.github/workflows/_linux-test.yml
needs: opmicrobenchmark-build-b200
with:
timeout-minutes: 500
build-environment: linux-jammy-cuda12.8-py3.10-gcc9-sm100
docker-image: ${{ needs.opmicrobenchmark-build-b200.outputs.docker-image }}
test-matrix: ${{ needs.opmicrobenchmark-build-b200.outputs.test-matrix }}
aws-role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_s3_and_ecr_read_only
secrets: inherit

View File

@ -56,8 +56,6 @@ jobs:
pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc9, pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc9,
pytorch-linux-jammy-cuda12.4-cudnn9-py3-gcc11, pytorch-linux-jammy-cuda12.4-cudnn9-py3-gcc11,
pytorch-linux-jammy-py3.10-clang12, pytorch-linux-jammy-py3.10-clang12,
pytorch-linux-jammy-py3.11-clang12,
pytorch-linux-jammy-py3.12-clang12,
pytorch-linux-jammy-py3.13-clang12, pytorch-linux-jammy-py3.13-clang12,
pytorch-linux-jammy-py3.14-clang12, pytorch-linux-jammy-py3.14-clang12,
pytorch-linux-jammy-rocm-n-py3, pytorch-linux-jammy-rocm-n-py3,
@ -67,10 +65,9 @@ jobs:
pytorch-linux-jammy-py3.10-gcc11, pytorch-linux-jammy-py3.10-gcc11,
pytorch-linux-jammy-py3-gcc11-inductor-benchmarks, pytorch-linux-jammy-py3-gcc11-inductor-benchmarks,
pytorch-linux-jammy-py3.12-halide, pytorch-linux-jammy-py3.12-halide,
pytorch-linux-jammy-cuda12.8-py3.12-pallas,
pytorch-linux-jammy-xpu-n-1-py3, pytorch-linux-jammy-xpu-n-1-py3,
pytorch-linux-noble-xpu-n-py3, pytorch-linux-jammy-xpu-n-py3,
pytorch-linux-noble-xpu-n-py3-inductor-benchmarks, pytorch-linux-jammy-xpu-n-py3-inductor-benchmarks,
pytorch-linux-jammy-py3-clang18-asan, pytorch-linux-jammy-py3-clang18-asan,
pytorch-linux-jammy-py3-clang12-onnx, pytorch-linux-jammy-py3-clang12-onnx,
pytorch-linux-jammy-linter, pytorch-linux-jammy-linter,
@ -80,11 +77,11 @@ jobs:
pytorch-linux-noble-riscv64-py3.12-gcc14 pytorch-linux-noble-riscv64-py3.12-gcc14
] ]
include: include:
- docker-image-name: pytorch-linux-jammy-aarch64-py3.10-gcc13 - docker-image-name: pytorch-linux-jammy-aarch64-py3.10-gcc11
runner: linux.arm64.m7g.4xlarge runner: linux.arm64.m7g.4xlarge
- docker-image-name: pytorch-linux-jammy-aarch64-py3.10-clang21 - docker-image-name: pytorch-linux-jammy-aarch64-py3.10-clang21
runner: linux.arm64.m7g.4xlarge runner: linux.arm64.m7g.4xlarge
- docker-image-name: pytorch-linux-jammy-aarch64-py3.10-gcc13-inductor-benchmarks - docker-image-name: pytorch-linux-jammy-aarch64-py3.10-gcc11-inductor-benchmarks
runner: linux.arm64.m7g.4xlarge runner: linux.arm64.m7g.4xlarge
timeout-minutes: 600 timeout-minutes: 600
# Docker uploads fail from LF runners, see https://github.com/pytorch/pytorch/pull/137358 # Docker uploads fail from LF runners, see https://github.com/pytorch/pytorch/pull/137358
@ -119,22 +116,6 @@ jobs:
with: with:
docker-image: ${{ steps.build-docker-image.outputs.docker-image }} docker-image: ${{ steps.build-docker-image.outputs.docker-image }}
- name: Generate output
if: contains(matrix.docker-image-name, 'rocm')
id: generate_output
run: |
docker_image_name="${{ matrix.docker-image-name }}"
docker_image_tag="${{ steps.build-docker-image.outputs.docker-image }}"
echo "${docker_image_name}=${docker_image_tag}" >> docker-builds-output-${docker_image_name}.txt
- name: Upload artifacts
uses: actions/upload-artifact@v4.4.0
if: contains(matrix.docker-image-name, 'rocm')
with:
name: docker-builds-artifacts-${{ matrix.docker-image-name }}
retention-days: 14
path: ./docker-builds-output-${{ matrix.docker-image-name }}.txt
- uses: nick-fields/retry@7152eba30c6575329ac0576536151aca5a72780e # v3.0.0 - uses: nick-fields/retry@7152eba30c6575329ac0576536151aca5a72780e # v3.0.0
name: Push to https://ghcr.io/ name: Push to https://ghcr.io/
id: push-to-ghcr-io id: push-to-ghcr-io

View File

@ -0,0 +1,55 @@
name: docker-cache-mi300
on:
# run every 6 hours
schedule:
- cron: 0 0,6,12,18 * * *
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name }}
cancel-in-progress: true
permissions:
id-token: write
contents: read
jobs:
docker-cache:
if: github.repository_owner == 'pytorch'
runs-on: rocm-docker
steps:
- name: Checkout PyTorch
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
with:
no-sudo: true
- name: configure aws credentials
id: aws_creds
uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722 # v4.1.0
with:
role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_s3_and_ecr_read_only
aws-region: us-east-1
role-duration-seconds: 18000
- name: Login to Amazon ECR
id: login-ecr
continue-on-error: false
uses: aws-actions/amazon-ecr-login@062b18b96a7aff071d4dc91bc00c4c1a7945b076 # v2.0.1
- name: Calculate docker image
id: calculate-docker-image
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
with:
docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3
push: false
- name: Pull docker image
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
with:
docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
- name: Tar and upload to S3 bucket
run: |
sudo docker save -o ~/docker-data/pytorch/pytorch_docker_image.tar ${{ steps.calculate-docker-image.outputs.docker-image }}
sudo rclone copy -P --s3-upload-concurrency 64 --s3-chunk-size 200M --s3-upload-cutoff 300M ~/docker-data/pytorch/pytorch_docker_image.tar oci:pytorchbucket0002/pytorch_docker_image --progress

View File

@ -1,105 +0,0 @@
name: docker-cache-rocm
on:
workflow_run:
workflows: [docker-builds]
branches: [main, release]
types:
- completed
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name }}
cancel-in-progress: true
permissions:
id-token: write
contents: read
actions: read
jobs:
download-docker-builds-artifacts:
if: github.repository_owner == 'pytorch'
name: download-docker-builds-artifacts
runs-on: ubuntu-latest
outputs:
pytorch-linux-jammy-rocm-n-py3: ${{ steps.process-artifacts.outputs.pytorch-linux-jammy-rocm-n-py3 }}
pytorch-linux-noble-rocm-n-py3: ${{ steps.process-artifacts.outputs.pytorch-linux-noble-rocm-n-py3 }}
pytorch-linux-jammy-rocm-n-py3-benchmarks: ${{ steps.process-artifacts.outputs.pytorch-linux-jammy-rocm-n-py3-benchmarks }}
steps:
- name: Download artifacts
uses: actions/download-artifact@v4.1.7
with:
run-id: ${{ github.event.workflow_run.id }}
path: ./docker-builds-artifacts
merge-multiple: true
github-token: ${{ secrets.GITHUB_TOKEN }}
- name: Process artifacts
id: process-artifacts
run: |
ls -R ./docker-builds-artifacts
cat ./docker-builds-artifacts/*txt >> "${GITHUB_OUTPUT}"
cat "${GITHUB_OUTPUT}"
docker-cache:
if: github.repository_owner == 'pytorch'
needs: download-docker-builds-artifacts
strategy:
fail-fast: false
matrix:
runner: [linux.rocm.gfx942.docker-cache]
docker-image: [
"${{ needs.download-docker-builds-artifacts.outputs.pytorch-linux-jammy-rocm-n-py3 }}",
"${{ needs.download-docker-builds-artifacts.outputs.pytorch-linux-noble-rocm-n-py3 }}",
"${{ needs.download-docker-builds-artifacts.outputs.pytorch-linux-jammy-rocm-n-py3-benchmarks }}"
]
runs-on: "${{ matrix.runner }}"
steps:
- name: debug
run: |
JSON_STRINGIFIED="${{ toJSON(needs.download-docker-builds-artifacts.outputs) }}"
echo "Outputs of download-docker-builds-artifacts job: ${JSON_STRINGIFIED}"
- name: configure aws credentials
id: aws_creds
uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722 # v4.1.0
with:
role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_s3_and_ecr_read_only
aws-region: us-east-1
role-duration-seconds: 18000
- name: Login to Amazon ECR
id: login-ecr
continue-on-error: false
uses: aws-actions/amazon-ecr-login@062b18b96a7aff071d4dc91bc00c4c1a7945b076 # v2.0.1
- name: Generate ghrc.io tag
id: ghcr-io-tag
run: |
ecr_image="${{ matrix.docker-image }}"
ghcr_image="ghcr.io/pytorch/ci-image:${ecr_image##*:}"
echo "ghcr_image=${ghcr_image}" >> "$GITHUB_OUTPUT"
- name: Pull docker image
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
with:
docker-image: ${{ steps.ghcr-io-tag.outputs.ghcr_image }}
- name: Save as tarball
run: |
docker_image_tag=${{ matrix.docker-image }}
docker_image_tag="${docker_image_tag#*:}" # Remove everything before and including first ":"
docker_image_tag="${docker_image_tag%-*}" # Remove everything after and including last "-"
ref_name=${{ github.event.workflow_run.head_branch }}
if [[ $ref_name =~ "release/" ]]; then
ref_suffix="release"
elif [[ $ref_name == "main" ]]; then
ref_suffix="main"
else
echo "Unexpected branch in ref_name: ${ref_name}" && exit 1
fi
docker tag ${{ steps.ghcr-io-tag.outputs.ghcr_image }} ${{ matrix.docker-image }}
# mv is atomic operation, so we use intermediate tar.tmp file to prevent read-write contention
docker save -o ~/pytorch-data/docker/${docker_image_tag}.tar.tmp ${{ matrix.docker-image }}
mv ~/pytorch-data/docker/${docker_image_tag}.tar.tmp ~/pytorch-data/docker/${docker_image_tag}_${ref_suffix}.tar

View File

@ -37,6 +37,7 @@ jobs:
needs: get-label-type needs: get-label-type
with: with:
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
runner: "linux.c7i.12xlarge"
build-environment: linux-jammy-cuda12.8-py3.10-gcc11-sm90-dist build-environment: linux-jammy-cuda12.8-py3.10-gcc11-sm90-dist
docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc11 docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc11
cuda-arch-list: '9.0' cuda-arch-list: '9.0'

View File

@ -72,7 +72,7 @@ jobs:
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
runner: linux.arm64.m7g.4xlarge runner: linux.arm64.m7g.4xlarge
build-environment: linux-jammy-aarch64-py3.10 build-environment: linux-jammy-aarch64-py3.10
docker-image-name: ci-image:pytorch-linux-jammy-aarch64-py3.10-gcc13-inductor-benchmarks docker-image-name: ci-image:pytorch-linux-jammy-aarch64-py3.10-gcc11-inductor-benchmarks
test-matrix: | test-matrix: |
{ include: [ { include: [
{ config: "inductor_huggingface_perf_cpu_aarch64", shard: 1, num_shards: 9, runner: "linux.arm64.m7g.metal" }, { config: "inductor_huggingface_perf_cpu_aarch64", shard: 1, num_shards: 9, runner: "linux.arm64.m7g.metal" },

View File

@ -83,8 +83,8 @@ jobs:
needs: get-label-type needs: get-label-type
with: with:
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
build-environment: linux-noble-xpu-n-py3.10 build-environment: linux-jammy-xpu-n-py3.10
docker-image-name: ci-image:pytorch-linux-noble-xpu-n-py3-inductor-benchmarks docker-image-name: ci-image:pytorch-linux-jammy-xpu-n-py3-inductor-benchmarks
runner: linux.c7i.12xlarge runner: linux.c7i.12xlarge
test-matrix: | test-matrix: |
{ include: [ { include: [
@ -117,7 +117,7 @@ jobs:
uses: ./.github/workflows/_xpu-test.yml uses: ./.github/workflows/_xpu-test.yml
needs: xpu-n-py3_10-inductor-benchmark-build needs: xpu-n-py3_10-inductor-benchmark-build
with: with:
build-environment: linux-noble-xpu-n-py3.10 build-environment: linux-jammy-xpu-n-py3.10
dashboard-tag: training-true-inference-true-default-true-dynamic-true-cudagraphs-false-cppwrapper-true-aotinductor-true-freezing_cudagraphs-false-cudagraphs_low_precision-false dashboard-tag: training-true-inference-true-default-true-dynamic-true-cudagraphs-false-cppwrapper-true-aotinductor-true-freezing_cudagraphs-false-cudagraphs_low_precision-false
docker-image: ${{ needs.xpu-n-py3_10-inductor-benchmark-build.outputs.docker-image }} docker-image: ${{ needs.xpu-n-py3_10-inductor-benchmark-build.outputs.docker-image }}
test-matrix: ${{ needs.xpu-n-py3_10-inductor-benchmark-build.outputs.test-matrix }} test-matrix: ${{ needs.xpu-n-py3_10-inductor-benchmark-build.outputs.test-matrix }}
@ -137,7 +137,7 @@ jobs:
uses: ./.github/workflows/_xpu-test.yml uses: ./.github/workflows/_xpu-test.yml
needs: xpu-n-py3_10-inductor-benchmark-build needs: xpu-n-py3_10-inductor-benchmark-build
with: with:
build-environment: linux-noble-xpu-n-py3.10 build-environment: linux-jammy-xpu-n-py3.10
dashboard-tag: training-${{ inputs.training }}-inference-${{ inputs.inference }}-default-${{ inputs.default }}-dynamic-${{ inputs.dynamic }}-cudagraphs-${{ inputs.cudagraphs }}-cppwrapper-${{ inputs.cppwrapper }}-aotinductor-${{ inputs.aotinductor }}-maxautotune-${{ inputs.maxautotune }}-freezing_cudagraphs-${{ inputs.freezing_cudagraphs }}-cudagraphs_low_precision-${{ inputs.cudagraphs }} dashboard-tag: training-${{ inputs.training }}-inference-${{ inputs.inference }}-default-${{ inputs.default }}-dynamic-${{ inputs.dynamic }}-cudagraphs-${{ inputs.cudagraphs }}-cppwrapper-${{ inputs.cppwrapper }}-aotinductor-${{ inputs.aotinductor }}-maxautotune-${{ inputs.maxautotune }}-freezing_cudagraphs-${{ inputs.freezing_cudagraphs }}-cudagraphs_low_precision-${{ inputs.cudagraphs }}
docker-image: ${{ needs.xpu-n-py3_10-inductor-benchmark-build.outputs.docker-image }} docker-image: ${{ needs.xpu-n-py3_10-inductor-benchmark-build.outputs.docker-image }}
test-matrix: ${{ needs.xpu-n-py3_10-inductor-benchmark-build.outputs.test-matrix }} test-matrix: ${{ needs.xpu-n-py3_10-inductor-benchmark-build.outputs.test-matrix }}

View File

@ -7,7 +7,6 @@ on:
- release/* - release/*
tags: tags:
- ciflow/inductor-rocm/* - ciflow/inductor-rocm/*
- ciflow/inductor-rocm-mi300/*
workflow_dispatch: workflow_dispatch:
concurrency: concurrency:

View File

@ -1,13 +1,13 @@
name: inductor-rocm-mi200 name: inductor-rocm
on: on:
schedule: schedule:
- cron: 0 */3 * * * - cron: 0 * * * *
push: push:
branches: branches:
- release/* - release/*
tags: tags:
- ciflow/inductor-rocm-mi200/* - ciflow/inductor-rocm/*
workflow_dispatch: workflow_dispatch:
concurrency: concurrency:

View File

@ -81,32 +81,6 @@ jobs:
test-matrix: ${{ needs.inductor-halide-build.outputs.test-matrix }} test-matrix: ${{ needs.inductor-halide-build.outputs.test-matrix }}
secrets: inherit secrets: inherit
inductor-pallas-build:
name: inductor-pallas-build
uses: ./.github/workflows/_linux-build.yml
needs: get-label-type
with:
build-environment: linux-jammy-cuda12.8-py3.12-gcc11
docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-py3.12-pallas
cuda-arch-list: '8.9'
runner: linux.8xlarge.memory
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
test-matrix: |
{ include: [
{ config: "inductor-pallas", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.12xlarge.nvidia.gpu" },
]}
secrets: inherit
inductor-pallas-test:
name: inductor-pallas-test
uses: ./.github/workflows/_linux-test.yml
needs: inductor-pallas-build
with:
build-environment: linux-jammy-py3.12-gcc11
docker-image: ${{ needs.inductor-pallas-build.outputs.docker-image }}
test-matrix: ${{ needs.inductor-pallas-build.outputs.test-matrix }}
secrets: inherit
inductor-triton-cpu-build: inductor-triton-cpu-build:
name: inductor-triton-cpu-build name: inductor-triton-cpu-build
uses: ./.github/workflows/_linux-build.yml uses: ./.github/workflows/_linux-build.yml

View File

@ -33,7 +33,7 @@ jobs:
with: with:
runner_prefix: ${{ needs.get-label-type.outputs.label-type }} runner_prefix: ${{ needs.get-label-type.outputs.label-type }}
build-environment: linux-jammy-aarch64-py3.10 build-environment: linux-jammy-aarch64-py3.10
docker-image-name: ci-image:pytorch-linux-jammy-aarch64-py3.10-gcc13 docker-image-name: ci-image:pytorch-linux-jammy-aarch64-py3.10-gcc11
runner: linux.arm64.m7g.4xlarge runner: linux.arm64.m7g.4xlarge
test-matrix: | test-matrix: |
{ include: [ { include: [

View File

@ -5,11 +5,9 @@ on:
- cron: 0 0 * * * - cron: 0 0 * * *
push: push:
tags: tags:
# NOTE: Doc build pipelines should only get triggered on: # NOTE: Doc build pipelines should only get triggered on release candidate builds
# Major or minor release candidates builds # Release candidate tags look like: v1.11.0-rc1
- v[0-9]+.[0-9]+.0+-rc[0-9]+ - v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
# Final RC for major, minor and patch releases
- v[0-9]+.[0-9]+.[0-9]+
- ciflow/nightly/* - ciflow/nightly/*
workflow_dispatch: workflow_dispatch:

View File

@ -60,7 +60,7 @@ jobs:
with: with:
build-environment: linux-jammy-aarch64-py3.10 build-environment: linux-jammy-aarch64-py3.10
runner: linux.arm64.m7g.4xlarge runner: linux.arm64.m7g.4xlarge
docker-image-name: ci-image:pytorch-linux-jammy-aarch64-py3.10-gcc13 docker-image-name: ci-image:pytorch-linux-jammy-aarch64-py3.10-gcc11
test-matrix: | test-matrix: |
{ include: [ { include: [
{ config: "cpu_operator_benchmark_short", shard: 1, num_shards: 1, runner: "linux.arm64.m8g.4xlarge" }, { config: "cpu_operator_benchmark_short", shard: 1, num_shards: 1, runner: "linux.arm64.m8g.4xlarge" },

View File

@ -11,6 +11,7 @@ on:
- cron: 29 8 * * * # about 1:29am PDT, for mem leak check and rerun disabled tests - cron: 29 8 * * * # about 1:29am PDT, for mem leak check and rerun disabled tests
push: push:
tags: tags:
- ciflow/periodic/*
- ciflow/periodic-rocm-mi200/* - ciflow/periodic-rocm-mi200/*
branches: branches:
- release/* - release/*

View File

@ -11,7 +11,6 @@ on:
- cron: 29 8 * * * # about 1:29am PDT, for mem leak check and rerun disabled tests - cron: 29 8 * * * # about 1:29am PDT, for mem leak check and rerun disabled tests
push: push:
tags: tags:
- ciflow/periodic/*
- ciflow/periodic-rocm-mi300/* - ciflow/periodic-rocm-mi300/*
branches: branches:
- release/* - release/*

View File

@ -342,16 +342,16 @@ jobs:
test-matrix: ${{ needs.linux-jammy-cuda12_8-py3_10-gcc9-inductor-build.outputs.test-matrix }} test-matrix: ${{ needs.linux-jammy-cuda12_8-py3_10-gcc9-inductor-build.outputs.test-matrix }}
secrets: inherit secrets: inherit
linux-noble-xpu-n-py3_10-build: linux-jammy-xpu-n-py3_10-build:
name: linux-noble-xpu-n-py3.10 name: linux-jammy-xpu-n-py3.10
uses: ./.github/workflows/_linux-build.yml uses: ./.github/workflows/_linux-build.yml
needs: get-label-type needs: get-label-type
with: with:
# This should sync with the build in xpu.yml but xpu uses a larger runner # This should sync with the build in xpu.yml but xpu uses a larger runner
# sync-tag: linux-xpu-n-build # sync-tag: linux-xpu-n-build
runner_prefix: ${{ needs.get-label-type.outputs.label-type }} runner_prefix: ${{ needs.get-label-type.outputs.label-type }}
build-environment: linux-noble-xpu-n-py3.10 build-environment: linux-jammy-xpu-n-py3.10
docker-image-name: ci-image:pytorch-linux-noble-xpu-n-py3 docker-image-name: ci-image:pytorch-linux-jammy-xpu-n-py3
test-matrix: | test-matrix: |
{ include: [ { include: [
{ config: "default", shard: 1, num_shards: 4, runner: "linux.idc.xpu" }, { config: "default", shard: 1, num_shards: 4, runner: "linux.idc.xpu" },

View File

@ -6,7 +6,6 @@ on:
- main - main
- release/* - release/*
tags: tags:
- ciflow/rocm/*
- ciflow/rocm-mi300/* - ciflow/rocm-mi300/*
workflow_dispatch: workflow_dispatch:
schedule: schedule:

View File

@ -1,16 +1,15 @@
name: rocm-mi200 name: rocm
on: on:
push: push:
branches: branches:
- release/* - release/*
tags: tags:
- ciflow/rocm-mi200/* - ciflow/rocm/*
workflow_dispatch: workflow_dispatch:
schedule: schedule:
- cron: 29 8 * * * # about 1:29am PDT - cron: 29 8 * * * # about 1:29am PDT
- cron: 0 */3 * * * - cron: 0 * * * *
concurrency: concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }} group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}

View File

@ -1,81 +0,0 @@
# This workflow is dedicated to host slow jobs that are run only periodically because
# they are too slow to run in every commit. The list of slow tests can be found in
# https://github.com/pytorch/test-infra/blob/generated-stats/stats/slow-tests.json
name: slow-rocm-mi200
on:
push:
branches:
- release/*
tags:
- ciflow/slow/*
- ciflow/slow-rocm-mi200/*
schedule:
- cron: 0 */3 * * *
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}-${{ github.event.schedule }}
cancel-in-progress: true
permissions:
id-token: write
contents: read
jobs:
llm-td:
if: github.repository_owner == 'pytorch'
name: before-test
uses: ./.github/workflows/llm_td_retrieval.yml
permissions:
id-token: write
contents: read
target-determination:
name: before-test
uses: ./.github/workflows/target_determination.yml
needs: llm-td
permissions:
id-token: write
contents: read
get-label-type:
name: get-label-type
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }}
with:
triggering_actor: ${{ github.triggering_actor }}
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
curr_branch: ${{ github.head_ref || github.ref_name }}
curr_ref_type: ${{ github.ref_type }}
linux-jammy-rocm-py3_10-build:
name: linux-jammy-rocm-py3.10
uses: ./.github/workflows/_linux-build.yml
needs: get-label-type
with:
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
build-environment: linux-jammy-rocm-py3.10
docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3
sync-tag: rocm-build
test-matrix: |
{ include: [
{ config: "slow", shard: 1, num_shards: 2, runner: "linux.rocm.gpu.2", owners: ["module:rocm"] },
{ config: "slow", shard: 2, num_shards: 2, runner: "linux.rocm.gpu.2", owners: ["module:rocm"] },
]}
secrets: inherit
linux-jammy-rocm-py3_10-test:
permissions:
id-token: write
contents: read
name: linux-jammy-rocm-py3.10
uses: ./.github/workflows/_rocm-test.yml
needs:
- linux-jammy-rocm-py3_10-build
- target-determination
with:
build-environment: linux-jammy-rocm-py3.10
docker-image: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.test-matrix }}
secrets: inherit

View File

@ -105,6 +105,36 @@ jobs:
test-matrix: ${{ needs.linux-jammy-py3_10-clang12-build.outputs.test-matrix }} test-matrix: ${{ needs.linux-jammy-py3_10-clang12-build.outputs.test-matrix }}
secrets: inherit secrets: inherit
linux-jammy-rocm-py3_10-build:
name: linux-jammy-rocm-py3.10
uses: ./.github/workflows/_linux-build.yml
needs: get-label-type
with:
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
build-environment: linux-jammy-rocm-py3.10
docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3
test-matrix: |
{ include: [
{ config: "slow", shard: 1, num_shards: 2, runner: "linux.rocm.gpu.2", owners: ["module:rocm"] },
{ config: "slow", shard: 2, num_shards: 2, runner: "linux.rocm.gpu.2", owners: ["module:rocm"] },
]}
secrets: inherit
linux-jammy-rocm-py3_10-test:
permissions:
id-token: write
contents: read
name: linux-jammy-rocm-py3.10
uses: ./.github/workflows/_rocm-test.yml
needs:
- linux-jammy-rocm-py3_10-build
- target-determination
with:
build-environment: linux-jammy-rocm-py3.10
docker-image: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.test-matrix }}
secrets: inherit
linux-jammy-py3_10-clang18-asan-build: linux-jammy-py3_10-clang18-asan-build:
name: linux-jammy-py3.10-clang18-asan name: linux-jammy-py3.10-clang18-asan
uses: ./.github/workflows/_linux-build.yml uses: ./.github/workflows/_linux-build.yml

View File

@ -5,9 +5,7 @@
# Flow: # Flow:
# 1. Builds PyTorch with CUDA 12.8+ and sm100 architecture for B200 # 1. Builds PyTorch with CUDA 12.8+ and sm100 architecture for B200
# 2. Runs smoke tests on linux.dgx.b200 runner # 2. Runs smoke tests on linux.dgx.b200 runner
# 3. Tests executed are defined in .ci/pytorch/test.sh -> test_python_smoke_b200() function # 3. Tests executed are defined in .ci/pytorch/test.sh -> test_python_smoke() function
# - Includes matmul, scaled_matmul, FP8, and FlashAttention CuTe tests
# - FlashAttention CuTe DSL is installed as part of test execution
# #
# Triggered by: # Triggered by:
# - Pull requests modifying this workflow file # - Pull requests modifying this workflow file

View File

@ -41,6 +41,7 @@ jobs:
needs: get-label-type needs: get-label-type
with: with:
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
runner: linux.12xlarge.memory
build-environment: linux-jammy-cuda12.8-py3.10-gcc11-sm90 build-environment: linux-jammy-cuda12.8-py3.10-gcc11-sm90
docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc11 docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc11
cuda-arch-list: '9.0' cuda-arch-list: '9.0'

View File

@ -1,83 +0,0 @@
name: trunk-rocm-mi300
on:
push:
branches:
- main
- release/*
workflow_dispatch:
schedule:
- cron: 29 8 * * * # about 1:29am PDT
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}
cancel-in-progress: true
permissions:
id-token: write
contents: read
jobs:
llm-td:
if: github.repository_owner == 'pytorch'
name: before-test
uses: ./.github/workflows/llm_td_retrieval.yml
permissions:
id-token: write
contents: read
target-determination:
name: before-test
uses: ./.github/workflows/target_determination.yml
needs: llm-td
permissions:
id-token: write
contents: read
get-label-type:
name: get-label-type
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }}
with:
triggering_actor: ${{ github.triggering_actor }}
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
curr_branch: ${{ github.head_ref || github.ref_name }}
curr_ref_type: ${{ github.ref_type }}
linux-jammy-rocm-py3_10-build:
name: linux-jammy-rocm-py3.10
uses: ./.github/workflows/_linux-build.yml
needs: get-label-type
with:
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
build-environment: linux-jammy-rocm-py3.10
docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3
sync-tag: rocm-build
test-matrix: |
{ include: [
{ config: "default", shard: 1, num_shards: 6, runner: "linux.rocm.gpu.gfx942.1.b" },
{ config: "default", shard: 2, num_shards: 6, runner: "linux.rocm.gpu.gfx942.1.b" },
{ config: "default", shard: 3, num_shards: 6, runner: "linux.rocm.gpu.gfx942.1.b" },
{ config: "default", shard: 4, num_shards: 6, runner: "linux.rocm.gpu.gfx942.1.b" },
{ config: "default", shard: 5, num_shards: 6, runner: "linux.rocm.gpu.gfx942.1.b" },
{ config: "default", shard: 6, num_shards: 6, runner: "linux.rocm.gpu.gfx942.1.b" },
{ config: "distributed", shard: 1, num_shards: 3, runner: "linux.rocm.gpu.gfx942.4.b" },
{ config: "distributed", shard: 2, num_shards: 3, runner: "linux.rocm.gpu.gfx942.4.b" },
{ config: "distributed", shard: 3, num_shards: 3, runner: "linux.rocm.gpu.gfx942.4.b" },
]}
secrets: inherit
linux-jammy-rocm-py3_10-test:
permissions:
id-token: write
contents: read
name: linux-jammy-rocm-py3.10
uses: ./.github/workflows/_rocm-test.yml
needs:
- linux-jammy-rocm-py3_10-build
- target-determination
with:
build-environment: linux-jammy-rocm-py3.10
docker-image: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.test-matrix }}
secrets: inherit

View File

@ -5,23 +5,21 @@ on:
workflows: workflows:
- pull - pull
- trunk - trunk
- trunk-rocm-mi300
- periodic - periodic
- periodic-rocm-mi200 - periodic-rocm-mi200
- periodic-rocm-mi300 - periodic-rocm-mi300
- inductor - inductor
- unstable - unstable
- slow - slow
- slow-rocm-mi200
- unstable-periodic - unstable-periodic
- inductor-periodic - inductor-periodic
- rocm-mi200 - rocm
- rocm-mi300 - rocm-mi300
- rocm-mi355 - rocm-mi355
- inductor-micro-benchmark - inductor-micro-benchmark
- inductor-micro-benchmark-x86 - inductor-micro-benchmark-x86
- inductor-cu124 - inductor-cu124
- inductor-rocm-mi200 - inductor-rocm
- inductor-rocm-mi300 - inductor-rocm-mi300
- mac-mps - mac-mps
- linux-aarch64 - linux-aarch64

View File

@ -47,15 +47,15 @@ jobs:
]} ]}
secrets: inherit secrets: inherit
linux-noble-xpu-n-py3_10-build: linux-jammy-xpu-n-py3_10-build:
name: linux-noble-xpu-n-py3.10 name: linux-jammy-xpu-n-py3.10
uses: ./.github/workflows/_linux-build.yml uses: ./.github/workflows/_linux-build.yml
needs: get-label-type needs: get-label-type
with: with:
sync-tag: linux-xpu-n-build sync-tag: linux-xpu-n-build
runner_prefix: ${{ needs.get-label-type.outputs.label-type }} runner_prefix: ${{ needs.get-label-type.outputs.label-type }}
build-environment: linux-noble-xpu-n-py3.10 build-environment: linux-jammy-xpu-n-py3.10
docker-image-name: ci-image:pytorch-linux-noble-xpu-n-py3 docker-image-name: ci-image:pytorch-linux-jammy-xpu-n-py3
runner: linux.c7i.12xlarge runner: linux.c7i.12xlarge
test-matrix: | test-matrix: |
{ include: [ { include: [
@ -74,17 +74,17 @@ jobs:
]} ]}
secrets: inherit secrets: inherit
linux-noble-xpu-n-py3_10-test: linux-jammy-xpu-n-py3_10-test:
name: linux-noble-xpu-n-py3.10 name: linux-jammy-xpu-n-py3.10
uses: ./.github/workflows/_xpu-test.yml uses: ./.github/workflows/_xpu-test.yml
needs: linux-noble-xpu-n-py3_10-build needs: linux-jammy-xpu-n-py3_10-build
permissions: permissions:
id-token: write id-token: write
contents: read contents: read
with: with:
build-environment: linux-noble-xpu-n-py3.10 build-environment: linux-jammy-xpu-n-py3.10
docker-image: ${{ needs.linux-noble-xpu-n-py3_10-build.outputs.docker-image }} docker-image: ${{ needs.linux-jammy-xpu-n-py3_10-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-noble-xpu-n-py3_10-build.outputs.test-matrix }} test-matrix: ${{ needs.linux-jammy-xpu-n-py3_10-build.outputs.test-matrix }}
secrets: inherit secrets: inherit
windows-xpu-n-1-build: windows-xpu-n-1-build:

1
.gitignore vendored
View File

@ -127,7 +127,6 @@ torch/test/
torch/utils/benchmark/utils/valgrind_wrapper/callgrind.h torch/utils/benchmark/utils/valgrind_wrapper/callgrind.h
torch/utils/benchmark/utils/valgrind_wrapper/valgrind.h torch/utils/benchmark/utils/valgrind_wrapper/valgrind.h
torch/version.py torch/version.py
torch/_inductor/kernel/vendored_templates/*
minifier_launcher.py minifier_launcher.py
aten/src/ATen/native/transformers/hip/flash_attn/ck/fmha_fwd_d* aten/src/ATen/native/transformers/hip/flash_attn/ck/fmha_fwd_d*
aten/src/ATen/native/transformers/hip/flash_attn/ck/fmha_bwd_d* aten/src/ATen/native/transformers/hip/flash_attn/ck/fmha_bwd_d*

View File

@ -143,8 +143,7 @@ init_command = [
'tools/linter/adapters/pip_init.py', 'tools/linter/adapters/pip_init.py',
'--dry-run={{DRYRUN}}', '--dry-run={{DRYRUN}}',
'numpy==1.26.4 ; python_version >= "3.10" and python_version <= "3.11"', 'numpy==1.26.4 ; python_version >= "3.10" and python_version <= "3.11"',
'numpy==2.1.0 ; python_version >= "3.12" and python_version <= "3.13"', 'numpy==2.1.0 ; python_version >= "3.12"',
'numpy==2.3.4 ; python_version >= "3.14"',
'expecttest==0.3.0', 'expecttest==0.3.0',
'pyrefly==0.36.2', 'pyrefly==0.36.2',
'sympy==1.13.3', 'sympy==1.13.3',
@ -186,8 +185,6 @@ include_patterns = [
'aten/src/ATen/native/nested/cuda/*.h', 'aten/src/ATen/native/nested/cuda/*.h',
'aten/src/ATen/native/nested/*.cpp', 'aten/src/ATen/native/nested/*.cpp',
'aten/src/ATen/native/nested/*.h', 'aten/src/ATen/native/nested/*.h',
'aten/src/ATen/xpu/**/*.h',
'aten/src/ATen/xpu/**/*.cpp',
'c10/**/*.cpp', 'c10/**/*.cpp',
'c10/**/*.h', 'c10/**/*.h',
'torch/*.h', 'torch/*.h',
@ -1404,7 +1401,7 @@ init_command = [
'--dry-run={{DRYRUN}}', '--dry-run={{DRYRUN}}',
'usort==1.0.8.post1', 'usort==1.0.8.post1',
'isort==6.0.1', 'isort==6.0.1',
'ruff==0.14.4', # sync with RUFF 'ruff==0.13.1', # sync with RUFF
] ]
is_formatter = true is_formatter = true
@ -1539,7 +1536,7 @@ init_command = [
'python3', 'python3',
'tools/linter/adapters/pip_init.py', 'tools/linter/adapters/pip_init.py',
'--dry-run={{DRYRUN}}', '--dry-run={{DRYRUN}}',
'ruff==0.14.4', # sync with PYFMT 'ruff==0.13.1', # sync with PYFMT
] ]
is_formatter = true is_formatter = true

View File

@ -736,44 +736,6 @@ if(NOT DEFINED USE_BLAS)
set(USE_BLAS ON) set(USE_BLAS ON)
endif() endif()
# Prioritized Text Linker Optimization
if(USE_PRIORITIZED_TEXT_FOR_LD)
set(LINKER_SCRIPT_FILE_IN "${CMAKE_SOURCE_DIR}/cmake/prioritized_text.txt")
set(LINKER_SCRIPT_FILE_OUT "${CMAKE_SOURCE_DIR}/cmake/linker_script.ld")
execute_process(
COMMAND ${Python_EXECUTABLE}
${CMAKE_SOURCE_DIR}/tools/setup_helpers/generate_linker_script.py
--filein "${LINKER_SCRIPT_FILE_IN}"
--fout "${LINKER_SCRIPT_FILE_OUT}"
RESULT_VARIABLE _gen_result
OUTPUT_VARIABLE _gen_output
ERROR_VARIABLE _gen_error
)
if(NOT _gen_result EQUAL 0)
message(FATAL_ERROR
"Failed to generate linker script:\n${_gen_output}\n${_gen_error}")
endif()
append_cxx_flag_if_supported("-ffunction-sections" CMAKE_CXX_FLAGS)
append_cxx_flag_if_supported("-fdata-sections" CMAKE_CXX_FLAGS)
append_c_flag_if_supported("-ffunction-sections" CMAKE_C_FLAGS)
append_c_flag_if_supported("-fdata-sections" CMAKE_C_FLAGS)
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -T${LINKER_SCRIPT_FILE_OUT}")
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -T${LINKER_SCRIPT_FILE_OUT}")
else()
if(LINUX AND CPU_AARCH64)
message(WARNING [[
It is strongly recommend to enable linker script optimization for all AArch64 Linux builds.
To do so please export USE_PRIORITIZED_TEXT_FOR_LD=1
]])
endif()
endif()
# Build libtorch mobile library, which contains ATen/TH ops and native support # Build libtorch mobile library, which contains ATen/TH ops and native support
# for TorchScript model, but doesn't contain not-yet-unified caffe2 ops; # for TorchScript model, but doesn't contain not-yet-unified caffe2 ops;
if(INTERN_BUILD_MOBILE) if(INTERN_BUILD_MOBILE)
@ -1440,6 +1402,9 @@ if(BUILD_JNI)
add_subdirectory(android/pytorch_android) add_subdirectory(android/pytorch_android)
endif() endif()
include(cmake/Summary.cmake)
caffe2_print_configuration_summary()
# Parse custom debug info # Parse custom debug info
if(DEFINED USE_CUSTOM_DEBINFO) if(DEFINED USE_CUSTOM_DEBINFO)
string(REPLACE ";" " " SOURCE_FILES "${USE_CUSTOM_DEBINFO}") string(REPLACE ";" " " SOURCE_FILES "${USE_CUSTOM_DEBINFO}")
@ -1479,5 +1444,56 @@ if(BUILD_BUNDLE_PTXAS AND USE_CUDA)
DESTINATION "${CMAKE_INSTALL_BINDIR}") DESTINATION "${CMAKE_INSTALL_BINDIR}")
endif() endif()
include(cmake/Summary.cmake) if(USE_PRIORITIZED_TEXT_FOR_LD)
caffe2_print_configuration_summary() add_compile_options(
$<$<COMPILE_LANGUAGE:C,CXX>:-ffunction-sections>
$<$<COMPILE_LANGUAGE:C,CXX>:-fdata-sections>
)
set(LINKER_SCRIPT_FILE_OUT "${CMAKE_SOURCE_DIR}/cmake/linker_script.ld")
set(LINKER_SCRIPT_FILE_IN "${CMAKE_SOURCE_DIR}/cmake/prioritized_text.txt")
add_custom_command(
OUTPUT "${LINKER_SCRIPT_FILE_OUT}"
COMMAND ${Python_EXECUTABLE} ${CMAKE_SOURCE_DIR}/tools/setup_helpers/generate_linker_script.py --filein "${LINKER_SCRIPT_FILE_IN}" --fout "${LINKER_SCRIPT_FILE_OUT}"
DEPENDS ${CMAKE_SOURCE_DIR}/tools/setup_helpers/generate_linker_script.py "${LINKER_SCRIPT_FILE_IN}"
COMMENT "Generating prioritized text linker files"
VERBATIM
)
add_custom_target(generate_linker_script DEPENDS "${LINKER_SCRIPT_FILE_OUT}")
if(BUILD_PYTHON)
set(LINKER_OPT_TARGETS torch_python)
endif()
if(NOT BUILD_LIBTORCHLESS)
list(APPEND LINKER_OPT_TARGETS torch_cpu c10)
if(USE_CUDA)
list(APPEND LINKER_OPT_TARGETS torch_cuda c10_cuda)
endif()
if(USE_XPU)
list(APPEND LINKER_OPT_TARGETS torch_xpu c10_xpu)
endif()
if(USE_ROCM)
list(APPEND LINKER_OPT_TARGETS torch_hip c10_hip)
endif()
endif()
foreach(tgt IN LISTS LINKER_OPT_TARGETS)
if(TARGET ${tgt})
add_dependencies("${tgt}" generate_linker_script)
target_link_options_if_supported(${tgt} "-T,${LINKER_SCRIPT_FILE_OUT}")
set_property(TARGET ${tgt} APPEND PROPERTY LINK_DEPENDS "${LINKER_SCRIPT_FILE_OUT}")
else()
message(WARNING "Requested target '${tgt}' for linker script optimization was not found.")
endif()
endforeach()
else()
if(LINUX AND CPU_AARCH64)
message(WARNING [[
It is strongly recommend to enable linker script optimization for all AArch64 Linux builds.
To do so please export USE_PRIORITIZED_TEXT_FOR_LD=1
]])
endif()
endif()

View File

@ -210,12 +210,8 @@ torch/backends/cudnn/ @eqy @syed-ahmed @Aidyn-A
/test/inductor/test_flex_attention.py @drisspg /test/inductor/test_flex_attention.py @drisspg
/test/inductor/test_flex_decoding.py @drisspg /test/inductor/test_flex_decoding.py @drisspg
# Low Precision & Grouped GEMMs # Low Precision GEMMs
/aten/src/ATen/native/cuda/Blas.cpp @drisspg @slayton58 /aten/src/ATen/native/cuda/Blas.cpp @drisspg @slayton58
/aten/src/ATen/native/cuda/GroupedBlas.cpp @drisspg @slayton58
/aten/src/ATen/native/cuda/ScaledBlas.cpp @drisspg @slayton58
/aten/src/ATen/cuda/CUDABlas.cpp @drisspg @slayton58 /aten/src/ATen/cuda/CUDABlas.cpp @drisspg @slayton58
/aten/src/ATen/cuda/CUDABlas.h @drisspg @slayton58 /aten/src/ATen/cuda/CUDABlas.h @drisspg @slayton58
/aten/src/ATen/cuda/CUDAScaledBlas.cpp @drisspg @slayton58
/aten/src/ATen/cuda/CUDAScaledBlas.h @drisspg @slayton58
/test/test_scaled_matmul_cuda.py @drisspg @slayton58 /test/test_scaled_matmul_cuda.py @drisspg @slayton58

View File

@ -37,7 +37,7 @@ Copyright (c) 2024 Tri Dao.
All rights reserved. All rights reserved.
All contributions by Arm: All contributions by Arm:
Copyright (c) 2021, 2023-2025 Arm Limited and/or its affiliates Copyright (c) 2021, 2023-2024 Arm Limited and/or its affiliates
All contributions from Caffe: All contributions from Caffe:
Copyright(c) 2013, 2014, 2015, the respective contributors Copyright(c) 2013, 2014, 2015, the respective contributors

View File

@ -18,8 +18,6 @@ Please report security issues using https://github.com/pytorch/pytorch/security/
All reports submitted through the security advisories mechanism would **either be made public or dismissed by the team within 90 days of the submission**. If advisory has been closed on the grounds that it is not a security issue, please do not hesitate to create an [new issue](https://github.com/pytorch/pytorch/issues/new?template=bug-report.yml) as it is still likely a valid issue within the framework. All reports submitted through the security advisories mechanism would **either be made public or dismissed by the team within 90 days of the submission**. If advisory has been closed on the grounds that it is not a security issue, please do not hesitate to create an [new issue](https://github.com/pytorch/pytorch/issues/new?template=bug-report.yml) as it is still likely a valid issue within the framework.
**Note on crashes and out of bounds access**: PyTorch is a computational framework that performs operations on behalf of the caller. Like many low-level libraries, PyTorch generally does not validate all inputs to every function—the responsibility for providing valid arguments lies with the calling code. While crashes and out of bounds memory access should be reported as bugs, they are generally not considered security vulnerabilities in PyTorch's threat model.
Please refer to the following page for our responsible disclosure policy, reward guidelines, and those things that should not be reported: Please refer to the following page for our responsible disclosure policy, reward guidelines, and those things that should not be reported:
https://www.facebook.com/whitehat https://www.facebook.com/whitehat

View File

@ -174,12 +174,6 @@ class TORCH_API Context {
static long versionCuDNN() { static long versionCuDNN() {
return detail::getCUDAHooks().versionCuDNN(); return detail::getCUDAHooks().versionCuDNN();
} }
static long versionRuntimeCuDNN() {
return detail::getCUDAHooks().versionRuntimeCuDNN();
}
static long versionCuDNNFrontend() {
return detail::getCUDAHooks().versionCuDNNFrontend();
}
static bool hasCuSOLVER() { static bool hasCuSOLVER() {
return detail::getCUDAHooks().hasCuSOLVER(); return detail::getCUDAHooks().hasCuSOLVER();
} }

View File

@ -94,11 +94,6 @@ TORCH_API inline void resetPeakStats(c10::DeviceIndex device_index) {
at::getDeviceAllocator(device_type)->resetPeakStats(device_index); at::getDeviceAllocator(device_type)->resetPeakStats(device_index);
} }
TORCH_API inline std::pair<size_t, size_t> getMemoryInfo(
c10::DeviceIndex device_index) {
const auto device_type = getAccelerator(true).value();
return at::getDeviceAllocator(device_type)->getMemoryInfo(device_index);
}
} // namespace at::accelerator } // namespace at::accelerator
namespace at { namespace at {

View File

@ -6,7 +6,6 @@
#include <c10/util/Half.h> #include <c10/util/Half.h>
#include <c10/util/Metaprogramming.h> #include <c10/util/Metaprogramming.h>
#include <c10/util/complex.h> #include <c10/util/complex.h>
#include <torch/headeronly/core/Dispatch.h>
#ifdef __CUDACC__ #ifdef __CUDACC__
#include <cuda.h> // For CUDA_VERSION #include <cuda.h> // For CUDA_VERSION
@ -62,9 +61,12 @@ TORCH_API void record_kernel_function_dtype(std::string name);
} \ } \
} while (0) } while (0)
#define AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, HINT, ...) \ #define AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, HINT, ...) \
THO_PRIVATE_CASE_TYPE_USING_HINT_TMPL( \ case enum_type: { \
AT_PRIVATE_CHECK_SELECTIVE_BUILD, enum_type, HINT, __VA_ARGS__) AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type); \
using HINT [[maybe_unused]] = c10::impl::ScalarTypeToCPPTypeT<enum_type>; \
return __VA_ARGS__(); \
}
#define AT_DISPATCH_CASE(enum_type, ...) \ #define AT_DISPATCH_CASE(enum_type, ...) \
AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__) AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
@ -93,6 +95,14 @@ TORCH_API void record_kernel_function_dtype(std::string name);
return __VA_ARGS__(); \ return __VA_ARGS__(); \
} }
namespace detail {
inline at::ScalarType scalar_type(at::ScalarType s) {
return s;
}
} // namespace detail
// The AT_DISPATCH_* family of macros provides the ability to // The AT_DISPATCH_* family of macros provides the ability to
// conveniently generate specializations of a kernel over all of the // conveniently generate specializations of a kernel over all of the
// dtypes we care about in PyTorch. We call it "dispatch" because // dtypes we care about in PyTorch. We call it "dispatch" because
@ -180,13 +190,27 @@ TORCH_API void record_kernel_function_dtype(std::string name);
// but we're just being safe (and it doesn't hurt.) Note we must // but we're just being safe (and it doesn't hurt.) Note we must
// use it to shut up warnings about unused store. // use it to shut up warnings about unused store.
#define AT_DISPATCH_SWITCH(TYPE, NAME, ...) \ #define AT_DISPATCH_SWITCH(TYPE, NAME, ...) \
THO_DISPATCH_SWITCH_TMPL( \ [&] { \
RECORD_KERNEL_FUNCTION_DTYPE, \ const auto& the_type = TYPE; \
TORCH_CHECK_NOT_IMPLEMENTED, \ constexpr const char* at_dispatch_name = NAME; \
TYPE, \ /* don't use TYPE again in case it is an expensive or side-effect op */ \
NAME, \ at::ScalarType _st = ::detail::scalar_type(the_type); \
__VA_ARGS__) RECORD_KERNEL_FUNCTION_DTYPE(at_dispatch_name, _st); \
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wswitch-enum") \
switch (_st) { \
__VA_ARGS__ \
default: \
TORCH_CHECK_NOT_IMPLEMENTED( \
false, \
'"', \
at_dispatch_name, \
"\" not implemented for '", \
toString(_st), \
"'"); \
} \
C10_DIAGNOSTIC_POP() \
}()
#define AT_DISPATCH_CASE_FLOATING_TYPES(...) \ #define AT_DISPATCH_CASE_FLOATING_TYPES(...) \
AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \ AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \

View File

@ -1,8 +1,3 @@
#pragma once
#include <torch/headeronly/core/Dispatch_v2.h>
// Get AT_DISPATCH_SWITCH and AT_DISPATCH_CASE:
#include <ATen/Dispatch.h> #include <ATen/Dispatch.h>
// This is a new implementation of the AT_DISPATCH macro family from // This is a new implementation of the AT_DISPATCH macro family from
@ -79,19 +74,41 @@
// macro expansion occurs, mediated with AT_EXPAND and AT_GUARD. I mostly // macro expansion occurs, mediated with AT_EXPAND and AT_GUARD. I mostly
// relied on GPT4 to help me get it right. // relied on GPT4 to help me get it right.
// Public API macros
// See documentation above // See documentation above
#define AT_DISPATCH_V2(TYPE, NAME, BODY, ...) \ #define AT_DISPATCH_V2(TYPE, NAME, BODY, ...) \
THO_DISPATCH_V2_TMPL( \ AT_DISPATCH_SWITCH(TYPE, NAME, AT_AP_VAR(AT_WRAP(BODY), TYPE, __VA_ARGS__))
AT_DISPATCH_SWITCH, \
AT_DISPATCH_CASE, \ // This macro lets you pass an arbitrary expression that may contain internal
TYPE, \ // commas to another macro without having the commas causing the expression
NAME, \ // to be interpreted as being multiple arguments
AT_WRAP(BODY), \ #define AT_WRAP(...) __VA_ARGS__
__VA_ARGS__)
#define AT_FLOAT8_TYPES \
c10::kFloat8_e5m2, c10::kFloat8_e5m2fnuz, c10::kFloat8_e4m3fn, \
c10::kFloat8_e4m3fnuz, c10::kFloat8_e8m0fnu
#define AT_INTEGRAL_TYPES \
c10::kByte, c10::kChar, c10::kInt, c10::kLong, c10::kShort
#define AT_FLOATING_TYPES c10::kDouble, c10::kFloat
#define AT_BAREBONES_UNSIGNED_TYPES c10::kUInt16, c10::kUInt32, c10::kUInt64
#define AT_INTEGRAL_TYPES_V2 \
AT_EXPAND(AT_INTEGRAL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES)
#define AT_COMPLEX_TYPES c10::kComplexDouble, c10::kComplexFloat
#define AT_QINT_TYPES c10::kQInt8, c10::kQUInt8, c10::kQInt32
// NB: not *actually* all types
#define AT_ALL_TYPES AT_EXPAND(AT_INTEGRAL_TYPES), AT_EXPAND(AT_FLOATING_TYPES)
#define AT_ALL_TYPES_AND_COMPLEX \
AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_COMPLEX_TYPES)
// Helper macros
// Unused helper macros, kept for BC:
#define AT_AP_VAR(N, T, ...) \ #define AT_AP_VAR(N, T, ...) \
AT_EXPAND(AT_CONCAT(AT_AP, AT_NUM_ARGS(__VA_ARGS__))(AT_WRAP(N), __VA_ARGS__)) AT_EXPAND(AT_CONCAT(AT_AP, AT_NUM_ARGS(__VA_ARGS__))(AT_WRAP(N), __VA_ARGS__))
#define AT_CONCAT(a, b) AT_CONCAT_AUX(a, b)
#define AT_CONCAT_AUX(a, b) a##b
#define AT_EXPAND(X) X
// Ensure we never have too many scalar types for the expansion here to // Ensure we never have too many scalar types for the expansion here to
// support. To bump this, you must regenerate the macros below. // support. To bump this, you must regenerate the macros below.
@ -102,6 +119,12 @@ static_assert(static_cast<int>(c10::ScalarType::NumOptions) < 60);
num_args = 60 num_args = 60
nums = ', '.join(str(i) for i in reversed(range(num_args+1)))
args = ', '.join(f'_{i}' for i in range(1, num_args+1))
print(f'#define AT_NUM_ARGS(...) AT_EXPAND(AT_NUM_ARGS_AUX(__VA_ARGS__, {nums}))')
print(f'#define AT_NUM_ARGS_AUX({args}, N, ...) N')
for i in range(1, num_args+1): for i in range(1, num_args+1):
args = ', '.join(f'_{i}' for i in range(1, i+1)) args = ', '.join(f'_{i}' for i in range(1, i+1))
cases = ' '.join([f'AT_DISPATCH_CASE(_{j}, N)' for j in range(1, i+1)]) cases = ' '.join([f'AT_DISPATCH_CASE(_{j}, N)' for j in range(1, i+1)])
@ -112,6 +135,8 @@ for i in range(1, num_args+1):
// Begin generated code // Begin generated code
// clang-format off // clang-format off
#define AT_NUM_ARGS(...) AT_EXPAND(AT_NUM_ARGS_AUX(__VA_ARGS__, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0))
#define AT_NUM_ARGS_AUX(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, _45, _46, _47, _48, _49, _50, _51, _52, _53, _54, _55, _56, _57, _58, _59, _60, N, ...) N
#define AT_AP1(N, _1) AT_DISPATCH_CASE(_1, N) #define AT_AP1(N, _1) AT_DISPATCH_CASE(_1, N)
#define AT_AP2(N, _1, _2) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) #define AT_AP2(N, _1, _2) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N)
#define AT_AP3(N, _1, _2, _3) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N) #define AT_AP3(N, _1, _2, _3) AT_DISPATCH_CASE(_1, N) AT_DISPATCH_CASE(_2, N) AT_DISPATCH_CASE(_3, N)

View File

@ -226,8 +226,8 @@ template <
typename B = HostBlock<S>> typename B = HostBlock<S>>
struct CachingHostAllocatorImpl { struct CachingHostAllocatorImpl {
virtual ~CachingHostAllocatorImpl() { virtual ~CachingHostAllocatorImpl() {
if (active_) { active_ = false;
active_ = false; if (pinned_use_background_threads()) {
getBackgroundThreadPool()->waitWorkComplete(); getBackgroundThreadPool()->waitWorkComplete();
} }
} }
@ -260,7 +260,6 @@ struct CachingHostAllocatorImpl {
if (pinned_use_background_threads()) { if (pinned_use_background_threads()) {
// Launch the background thread and process events in a loop. // Launch the background thread and process events in a loop.
static bool background_thread_flag [[maybe_unused]] = [this] { static bool background_thread_flag [[maybe_unused]] = [this] {
active_ = true;
getBackgroundThreadPool()->run([&]() { getBackgroundThreadPool()->run([&]() {
while (active_) { while (active_) {
process_events(); process_events();
@ -684,9 +683,9 @@ struct CachingHostAllocatorImpl {
alignas(hardware_destructive_interference_size) std::mutex events_mutex_; alignas(hardware_destructive_interference_size) std::mutex events_mutex_;
std::deque<std::pair<E, B*>> events_; // event queue paired with block std::deque<std::pair<E, B*>> events_; // event queue paired with block
// Indicates whether the event-processing thread pool is active. // Indicates whether the object is active.
// Set to false in the destructor to signal background threads to stop. // Set to false in the destructor to signal background threads to stop.
std::atomic<bool> active_{false}; std::atomic<bool> active_{true};
protected: protected:
alignas(hardware_destructive_interference_size) HostStatsStaged stats_; alignas(hardware_destructive_interference_size) HostStatsStaged stats_;
}; };

View File

@ -18,8 +18,6 @@
#include <unordered_set> #include <unordered_set>
#include <utility> #include <utility>
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wswitch-default")
namespace torch { namespace torch {
class TORCH_API CustomClassHolder : public c10::intrusive_ptr_target {}; class TORCH_API CustomClassHolder : public c10::intrusive_ptr_target {};
namespace jit { namespace jit {
@ -1632,6 +1630,4 @@ struct TORCH_API WeakOrStrongTypePtr {
} // namespace c10 } // namespace c10
C10_DIAGNOSTIC_POP()
#include <ATen/core/ivalue_inl.h> // IWYU pragma: keep #include <ATen/core/ivalue_inl.h> // IWYU pragma: keep

View File

@ -29,8 +29,6 @@
#include <c10/util/intrusive_ptr.h> #include <c10/util/intrusive_ptr.h>
#include <c10/util/irange.h> #include <c10/util/irange.h>
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wswitch-default")
namespace torch { namespace torch {
namespace jit { namespace jit {
struct Function; struct Function;
@ -2569,5 +2567,3 @@ TypePtr IValue::type() const {
} }
} // namespace c10 } // namespace c10
C10_DIAGNOSTIC_POP()

View File

@ -11,8 +11,6 @@
#include <sleef.h> #include <sleef.h>
#endif #endif
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wswitch-default")
// Sleef offers vectorized versions of some transcedentals // Sleef offers vectorized versions of some transcedentals
// such as sin, cos, tan etc.. // such as sin, cos, tan etc..
// However for now opting for STL, since we are not building // However for now opting for STL, since we are not building
@ -652,5 +650,3 @@ inline Vectorized<float> Vectorized<float>::erf() const {
} // namespace CPU_CAPABILITY } // namespace CPU_CAPABILITY
} // namespace at::vec } // namespace at::vec
C10_DIAGNOSTIC_POP()

View File

@ -1597,7 +1597,7 @@ bool gemm_and_bias(
} }
using opmath_t = at::opmath_type<Dtype>; using opmath_t = at::opmath_type<Dtype>;
opmath_t beta_val = bias ? 0 : 1; // bias is added in epilogue unless nullptr opmath_t beta_val = 0; // bias is added in epilogue
cudaDataType_t abType = CUDA_R_32F; cudaDataType_t abType = CUDA_R_32F;
cudaDataType_t cType = CUDA_R_32F; cudaDataType_t cType = CUDA_R_32F;
@ -1686,22 +1686,15 @@ bool gemm_and_bias(
_syncCurrentWithCarveoutStream(stream, true); _syncCurrentWithCarveoutStream(stream, true);
} }
#endif #endif
const auto epilogue = [&]() -> cublasLtEpilogue_t { cublasLtEpilogue_t epilogue = CUBLASLT_EPILOGUE_BIAS;
// The cuBLAS documentation indicates that if (activation == GEMMAndBiasActivationEpilogue::RELU) {
// *_<ACTIVATION>_BIAS = *_<ACTIVATION>, epilogue = CUBLASLT_EPILOGUE_RELU_BIAS;
// but we keep it verbose here for clarity. } else if (activation == GEMMAndBiasActivationEpilogue::GELU) {
switch (activation) { epilogue = CUBLASLT_EPILOGUE_GELU_BIAS;
case GEMMAndBiasActivationEpilogue::RELU: }
return bias ? CUBLASLT_EPILOGUE_RELU_BIAS : CUBLASLT_EPILOGUE_RELU;
case GEMMAndBiasActivationEpilogue::GELU:
return bias ? CUBLASLT_EPILOGUE_GELU_BIAS : CUBLASLT_EPILOGUE_GELU;
default:
return bias ? CUBLASLT_EPILOGUE_BIAS : CUBLASLT_EPILOGUE_DEFAULT;
}
}();
computeDesc.setAttribute(CUBLASLT_MATMUL_DESC_EPILOGUE, epilogue);
if (bias) { if (bias != nullptr) {
computeDesc.setAttribute(CUBLASLT_MATMUL_DESC_EPILOGUE, epilogue);
computeDesc.setAttribute(CUBLASLT_MATMUL_DESC_BIAS_POINTER, bias); computeDesc.setAttribute(CUBLASLT_MATMUL_DESC_BIAS_POINTER, bias);
} }

View File

@ -1,7 +1,6 @@
#include <ATen/cuda/CUDAGeneratorImpl.h> #include <ATen/cuda/CUDAGeneratorImpl.h>
#include <ATen/cuda/CUDAGraph.h> #include <ATen/cuda/CUDAGraph.h>
#include <ATen/cuda/Exceptions.h> #include <ATen/cuda/Exceptions.h>
#include <ATen/cuda/MemPool.h>
#include <ATen/Functions.h> #include <ATen/Functions.h>
#include <c10/cuda/CUDAFunctions.h> #include <c10/cuda/CUDAFunctions.h>
@ -14,7 +13,7 @@ static bool _cuda_graphs_debug = false;
MempoolId_t graph_pool_handle() { MempoolId_t graph_pool_handle() {
// Sets just the second value, to distinguish it from MempoolId_ts created from // Sets just the second value, to distinguish it from MempoolId_ts created from
// cudaStreamGetCaptureInfo id_s in capture_begin. // cudaStreamGetCaptureInfo id_s in capture_begin.
return at::cuda::MemPool::graph_pool_handle(); return c10::cuda::MemPool::graph_pool_handle();
} }
/** /**
@ -91,7 +90,7 @@ void CUDAGraph::capture_begin(MempoolId_t pool/*=0*/, cudaStreamCaptureMode capt
} else { } else {
// User did not ask us to share a mempool. Create graph pool handle using is_user_created=false. // User did not ask us to share a mempool. Create graph pool handle using is_user_created=false.
// Sets just the first value, to distinguish it from MempoolId_ts created by graph_pool_handle(). // Sets just the first value, to distinguish it from MempoolId_ts created by graph_pool_handle().
mempool_id_ = at::cuda::MemPool::graph_pool_handle(false); mempool_id_ = c10::cuda::MemPool::graph_pool_handle(false);
TORCH_INTERNAL_ASSERT(mempool_id_.first > 0); TORCH_INTERNAL_ASSERT(mempool_id_.first > 0);
} }

View File

@ -1,69 +0,0 @@
#include <ATen/core/CachingHostAllocator.h>
#include <ATen/cuda/MemPool.h>
namespace at::cuda {
// uid_ is incremented when a user creates a MemPool,
// for example: using graph_pool_handle() or c10::cuda::MemPool().
//
// uuid_ is incremented when CUDAGraph creates a MemPool
// as a result of a user not providing a pool.
//
// MempoolId_t of {0, 0} is used to denote when no MemPool has been
// passed to a function, either by user or CUDAGraphs. For example,
// default value of MempoolId_t for capture_begin function is {0, 0}.
// That's why uid_ and uuid_ start at 1.
std::atomic<CaptureId_t> MemPool::uid_{1};
std::atomic<CaptureId_t> MemPool::uuid_{1};
MemPool::MemPool(
CUDACachingAllocator::CUDAAllocator* allocator,
bool is_user_created,
bool use_on_oom)
: allocator_(allocator), is_user_created_(is_user_created) {
if (is_user_created_) {
id_ = {0, uid_++};
} else {
id_ = {uuid_++, 0};
}
device_ = c10::cuda::current_device();
CUDACachingAllocator::createOrIncrefPool(device_, id_, allocator);
if (use_on_oom) {
CUDACachingAllocator::setUseOnOOM(device_, id_);
}
}
MemPool::~MemPool() {
// TORCH_INTERNAL_ASSERT(use_count() == 1);
// We used to assert that TORCH_INTERNAL_ASSERT(use_count() == 1);
// However, this assertion is not true if a memory pool is shared
// with a cuda graph. That CUDAGraph will increase the use count
// until it is reset.
CUDACachingAllocator::releasePool(device_, id_);
c10::cuda::CUDACachingAllocator::emptyCache(id_);
}
MempoolId_t MemPool::id() {
return id_;
}
CUDACachingAllocator::CUDAAllocator* MemPool::allocator() {
return allocator_;
}
int MemPool::use_count() {
return CUDACachingAllocator::getPoolUseCount(device_, id_);
}
c10::DeviceIndex MemPool::device() {
return device_;
}
MempoolId_t MemPool::graph_pool_handle(bool is_user_created) {
if (is_user_created) {
return {0, uid_++};
}
return {uuid_++, 0};
}
} // namespace at::cuda

View File

@ -1,44 +0,0 @@
#pragma once
#include <c10/core/Allocator.h>
#include <c10/cuda/CUDACachingAllocator.h>
namespace at::cuda {
// Keep BC only
using c10::CaptureId_t;
using c10::MempoolId_t;
// MemPool represents a pool of memory in a caching allocator. Currently,
// it's just the ID of the pool object maintained in the CUDACachingAllocator.
//
// An allocator pointer can be passed to the MemPool to define how the
// allocations should be done in the pool. For example: using a different
// system allocator such as ncclMemAlloc.
struct TORCH_CUDA_CPP_API MemPool {
MemPool(
c10::cuda::CUDACachingAllocator::CUDAAllocator* allocator = nullptr,
bool is_user_created = true,
bool use_on_oom = false);
MemPool(const MemPool&) = delete;
MemPool(MemPool&&) = default;
MemPool& operator=(const MemPool&) = delete;
MemPool& operator=(MemPool&&) = default;
~MemPool();
MempoolId_t id();
c10::cuda::CUDACachingAllocator::CUDAAllocator* allocator();
int use_count();
c10::DeviceIndex device();
static MempoolId_t graph_pool_handle(bool is_user_created = true);
private:
static std::atomic<CaptureId_t> uid_;
static std::atomic<CaptureId_t> uuid_;
c10::cuda::CUDACachingAllocator::CUDAAllocator* allocator_;
bool is_user_created_;
MempoolId_t id_;
c10::DeviceIndex device_;
};
} // namespace at::cuda

View File

@ -55,6 +55,14 @@ struct numeric_limits<int8_t> {
static inline __host__ __device__ int8_t upper_bound() { return INT8_MAX; } static inline __host__ __device__ int8_t upper_bound() { return INT8_MAX; }
}; };
template <>
struct numeric_limits<uint16_t> {
static inline __host__ __device__ uint16_t lowest() { return 0; }
static inline __host__ __device__ uint16_t max() { return UINT16_MAX; }
static inline __host__ __device__ uint16_t lower_bound() { return 0; }
static inline __host__ __device__ uint16_t upper_bound() { return UINT16_MAX; }
};
template <> template <>
struct numeric_limits<int16_t> { struct numeric_limits<int16_t> {
static inline __host__ __device__ int16_t lowest() { return INT16_MIN; } static inline __host__ __device__ int16_t lowest() { return INT16_MIN; }
@ -63,6 +71,14 @@ struct numeric_limits<int16_t> {
static inline __host__ __device__ int16_t upper_bound() { return INT16_MAX; } static inline __host__ __device__ int16_t upper_bound() { return INT16_MAX; }
}; };
template <>
struct numeric_limits<uint32_t> {
static inline __host__ __device__ uint32_t lowest() { return 0; }
static inline __host__ __device__ uint32_t max() { return UINT32_MAX; }
static inline __host__ __device__ uint32_t lower_bound() { return 0; }
static inline __host__ __device__ uint32_t upper_bound() { return UINT32_MAX; }
};
template <> template <>
struct numeric_limits<int32_t> { struct numeric_limits<int32_t> {
static inline __host__ __device__ int32_t lowest() { return INT32_MIN; } static inline __host__ __device__ int32_t lowest() { return INT32_MIN; }
@ -71,6 +87,21 @@ struct numeric_limits<int32_t> {
static inline __host__ __device__ int32_t upper_bound() { return INT32_MAX; } static inline __host__ __device__ int32_t upper_bound() { return INT32_MAX; }
}; };
template <>
struct numeric_limits<uint64_t> {
#ifdef _MSC_VER
static inline __host__ __device__ uint64_t lowest() { return 0; }
static inline __host__ __device__ uint64_t max() { return _UI64_MAX; }
static inline __host__ __device__ uint64_t lower_bound() { return 0; }
static inline __host__ __device__ uint64_t upper_bound() { return _UI64_MAX; }
#else
static inline __host__ __device__ uint64_t lowest() { return 0; }
static inline __host__ __device__ uint64_t max() { return UINT64_MAX; }
static inline __host__ __device__ uint64_t lower_bound() { return 0; }
static inline __host__ __device__ uint64_t upper_bound() { return UINT64_MAX; }
#endif
};
template <> template <>
struct numeric_limits<int64_t> { struct numeric_limits<int64_t> {
#ifdef _MSC_VER #ifdef _MSC_VER

View File

@ -21,7 +21,6 @@
#if AT_CUDNN_ENABLED() #if AT_CUDNN_ENABLED()
#include <ATen/cudnn/cudnn-wrapper.h> #include <ATen/cudnn/cudnn-wrapper.h>
#include <cudnn_frontend.h>
#endif #endif
#if AT_MAGMA_ENABLED() #if AT_MAGMA_ENABLED()
@ -352,26 +351,6 @@ long CUDAHooks::versionCuDNN() const {
#endif #endif
} }
long CUDAHooks::versionRuntimeCuDNN() const {
#if AT_CUDNN_ENABLED()
#ifndef USE_STATIC_CUDNN
return cudnnGetVersion();
#else
return CUDNN_VERSION;
#endif
#else
TORCH_CHECK(false, "Cannot query CuDNN version if ATen_cuda is not built with CuDNN");
#endif
}
long CUDAHooks::versionCuDNNFrontend() const {
#if AT_CUDNN_ENABLED()
return CUDNN_FRONTEND_VERSION;
#else
TORCH_CHECK(false, "Cannot query CuDNN Frontend version if ATen_cuda is not built with CuDNN");
#endif
}
long CUDAHooks::versionMIOpen() const { long CUDAHooks::versionMIOpen() const {
#if AT_ROCM_ENABLED() #if AT_ROCM_ENABLED()
return MIOPEN_VERSION_MAJOR * 10000 + return MIOPEN_VERSION_MAJOR * 10000 +

View File

@ -49,8 +49,6 @@ struct CUDAHooks : public at::CUDAHooksInterface {
bool hasCUDART() const override; bool hasCUDART() const override;
long versionCUDART() const override; long versionCUDART() const override;
long versionCuDNN() const override; long versionCuDNN() const override;
long versionRuntimeCuDNN() const override;
long versionCuDNNFrontend() const override;
long versionMIOpen() const override; long versionMIOpen() const override;
std::string showConfig() const override; std::string showConfig() const override;
double batchnormMinEpsilonCuDNN() const override; double batchnormMinEpsilonCuDNN() const override;

View File

@ -174,14 +174,6 @@ struct TORCH_API CUDAHooksInterface : AcceleratorHooksInterface {
TORCH_CHECK(false, "Cannot query cuDNN version without ATen_cuda library. ", CUDA_HELP); TORCH_CHECK(false, "Cannot query cuDNN version without ATen_cuda library. ", CUDA_HELP);
} }
virtual long versionRuntimeCuDNN() const {
TORCH_CHECK(false, "Cannot query cuDNN version without ATen_cuda library. ", CUDA_HELP);
}
virtual long versionCuDNNFrontend() const {
TORCH_CHECK(false, "Cannot query cuDNN Frontend version without ATen_cuda library. ", CUDA_HELP);
}
virtual long versionMIOpen() const { virtual long versionMIOpen() const {
TORCH_CHECK(false, "Cannot query MIOpen version without ATen_cuda library. ", CUDA_HELP); TORCH_CHECK(false, "Cannot query MIOpen version without ATen_cuda library. ", CUDA_HELP);
} }

View File

@ -157,8 +157,6 @@ constexpr DispatchKeySet kKeysToPropagateToWrapper({
DispatchKey::Negative, DispatchKey::Negative,
DispatchKey::Conjugate, DispatchKey::Conjugate,
DispatchKey::XLA, DispatchKey::XLA,
DispatchKey::XPU,
DispatchKey::HPU,
DispatchKey::CUDA, DispatchKey::CUDA,
DispatchKey::CPU, DispatchKey::CPU,
DispatchKey::PrivateUse1, DispatchKey::PrivateUse1,

View File

@ -440,7 +440,7 @@ bool MPSHeapAllocatorImpl::release_cached_buffers() {
// we need to release the lock temporarily as synchronizing may cause deadlock with completion handlers. // we need to release the lock temporarily as synchronizing may cause deadlock with completion handlers.
m_mutex.unlock(); m_mutex.unlock();
auto stream = getDefaultMPSStream(); auto stream = getDefaultMPSStream();
dispatch_sync_with_rethrow(stream->queue(), ^() { dispatch_sync(stream->queue(), ^() {
stream->synchronize(SyncType::COMMIT_AND_WAIT); stream->synchronize(SyncType::COMMIT_AND_WAIT);
}); });
m_mutex.lock(); m_mutex.lock();

View File

@ -110,9 +110,6 @@ class TORCH_API MPSStream {
return _stream; return _stream;
} }
MTLBuffer_t getErrorBuffer();
void checkLastError();
private: private:
Stream _stream; Stream _stream;
MTLCommandQueue_t _commandQueue = nil; MTLCommandQueue_t _commandQueue = nil;
@ -124,8 +121,6 @@ class TORCH_API MPSStream {
dispatch_queue_t _serialQueue = nullptr; dispatch_queue_t _serialQueue = nullptr;
// CommitAndContinue is enabled by default // CommitAndContinue is enabled by default
bool _enableCommitAndContinue = true; bool _enableCommitAndContinue = true;
// Buffer that contains last raised error
MTLBuffer_t _errorBuffer = nil;
// use synchronize() to access any of these commit functions outside MPSStream // use synchronize() to access any of these commit functions outside MPSStream
void commit(); void commit();
@ -160,7 +155,4 @@ class TORCH_API MPSStreamImpl {
MPSStreamImpl(); MPSStreamImpl();
}; };
#ifdef __OBJC__
void dispatch_sync_with_rethrow(dispatch_queue_t queue, void (^block)());
#endif
} // namespace at::mps } // namespace at::mps

View File

@ -3,13 +3,13 @@
#include <ATen/mps/MPSAllocatorInterface.h> #include <ATen/mps/MPSAllocatorInterface.h>
#include <ATen/mps/MPSProfiler.h> #include <ATen/mps/MPSProfiler.h>
#include <ATen/mps/MPSStream.h> #include <ATen/mps/MPSStream.h>
#include <c10/metal/error.h>
@interface MPSGraphExecutionDescriptor () @interface MPSGraphExecutionDescriptor ()
@property(readwrite, atomic) BOOL enableCommitAndContinue; @property(readwrite, atomic) BOOL enableCommitAndContinue;
@end @end
namespace at::mps { namespace at::mps {
//----------------------------------------------------------------- //-----------------------------------------------------------------
// MPSStream // MPSStream
//----------------------------------------------------------------- //-----------------------------------------------------------------
@ -30,10 +30,6 @@ MPSStream::MPSStream(Stream stream) : _stream(stream) {
// Choose level which optimizes for GPU // Choose level which optimizes for GPU
_compilationDescriptor.optimizationLevel = MPSGraphOptimizationLevel0; _compilationDescriptor.optimizationLevel = MPSGraphOptimizationLevel0;
_executionDescriptor.compilationDescriptor = _compilationDescriptor; _executionDescriptor.compilationDescriptor = _compilationDescriptor;
_errorBuffer = [MPSDevice::getInstance()->device() newBufferWithLength:sizeof(c10::metal::ErrorMessages)
options:MTLResourceStorageModeShared];
std::memset([_errorBuffer contents], 0, 1024);
} }
MPSStream::~MPSStream() { MPSStream::~MPSStream() {
@ -42,8 +38,6 @@ MPSStream::~MPSStream() {
[_executionDescriptor release]; [_executionDescriptor release];
[_compilationDescriptor release]; [_compilationDescriptor release];
_executionDescriptor = nil; _executionDescriptor = nil;
[_errorBuffer release];
_errorBuffer = nil;
_compilationDescriptor = nil; _compilationDescriptor = nil;
assert(_commandBuffer == nil); assert(_commandBuffer == nil);
@ -110,7 +104,6 @@ void MPSStream::commitAndWait() {
[_prevCommandBuffer waitUntilCompleted]; [_prevCommandBuffer waitUntilCompleted];
[_prevCommandBuffer release]; [_prevCommandBuffer release];
_prevCommandBuffer = nil; _prevCommandBuffer = nil;
checkLastError();
} }
if (_commandBuffer) { if (_commandBuffer) {
@ -118,7 +111,6 @@ void MPSStream::commitAndWait() {
[_commandBuffer waitUntilCompleted]; [_commandBuffer waitUntilCompleted];
[_commandBuffer release]; [_commandBuffer release];
_commandBuffer = nil; _commandBuffer = nil;
checkLastError();
} }
} }
@ -161,7 +153,7 @@ void MPSStream::fill(id<MTLBuffer> buffer, uint8_t value, size_t length, size_t
if (length == 0) { if (length == 0) {
return; return;
} }
dispatch_sync_with_rethrow(_serialQueue, ^() { dispatch_sync(_serialQueue, ^() {
@autoreleasepool { @autoreleasepool {
endKernelCoalescing(); endKernelCoalescing();
id<MTLBlitCommandEncoder> blitEncoder = [commandBuffer() blitCommandEncoder]; id<MTLBlitCommandEncoder> blitEncoder = [commandBuffer() blitCommandEncoder];
@ -191,7 +183,7 @@ void MPSStream::copy(id<MTLBuffer> srcBuffer,
size_t dstOffset, size_t dstOffset,
uint64_t profileId, uint64_t profileId,
SyncType syncType) { SyncType syncType) {
dispatch_sync_with_rethrow(_serialQueue, ^() { dispatch_sync(_serialQueue, ^() {
@autoreleasepool { @autoreleasepool {
endKernelCoalescing(); endKernelCoalescing();
id<MTLBlitCommandEncoder> blitEncoder = [commandBuffer() blitCommandEncoder]; id<MTLBlitCommandEncoder> blitEncoder = [commandBuffer() blitCommandEncoder];
@ -244,7 +236,7 @@ void MPSStream::executeMPSGraph(MPSGraph* mpsGraph, NSDictionary* feeds, NSDicti
auto& profiler = getMPSProfiler(); auto& profiler = getMPSProfiler();
const bool isGraphProfilingEnabled = profiler.isOperationProfilingEnabled(); const bool isGraphProfilingEnabled = profiler.isOperationProfilingEnabled();
dispatch_sync_with_rethrow(_serialQueue, ^() { dispatch_sync(_serialQueue, ^() {
endKernelCoalescing(); endKernelCoalescing();
if (isGraphProfilingEnabled) { if (isGraphProfilingEnabled) {
// this function call is only relevant for interval-based Signposts // this function call is only relevant for interval-based Signposts
@ -274,24 +266,6 @@ void MPSStream::executeMPSGraph(MPSGraph* mpsGraph, NSDictionary* feeds, NSDicti
}); });
} }
id<MTLBuffer> MPSStream::getErrorBuffer() {
return _errorBuffer;
}
void MPSStream::checkLastError() {
auto msgs = reinterpret_cast<c10::metal::ErrorMessages*>([_errorBuffer contents]);
const auto& msg = msgs->msg[0];
if (!msgs) {
return;
}
unsigned int count = 0;
std::swap(count, msgs->count);
if (!count) {
return;
}
throw c10::AcceleratorError({msg.func, msg.file, msg.line}, 1, msg.message);
}
//----------------------------------------------------------------- //-----------------------------------------------------------------
// MPSStreamImpl // MPSStreamImpl
//----------------------------------------------------------------- //-----------------------------------------------------------------
@ -315,19 +289,4 @@ MPSStream* getDefaultMPSStream() {
return MPSStreamImpl::getInstance(); return MPSStreamImpl::getInstance();
} }
// Helper methods
void dispatch_sync_with_rethrow(dispatch_queue_t queue, void (^block)()) {
__block std::optional<std::exception_ptr> block_exception;
dispatch_sync(queue, ^() {
try {
block();
} catch (...) {
block_exception = std::current_exception();
}
});
if (block_exception) {
std::rethrow_exception(*block_exception);
}
}
} // namespace at::mps } // namespace at::mps

View File

@ -1009,25 +1009,12 @@ static Device correct_out_device(const Tensor& self, const Tensor& other) {
} }
} }
static Tensor send_to_meta(const Tensor& self, const Device& device) {
Tensor out_meta;
if (self._is_zerotensor() && self.unsafeGetTensorImpl()->is_wrapped_number()) {
out_meta = at::_efficientzerotensor(self.sizes(), self.options().device(device));
out_meta.unsafeGetTensorImpl()->set_wrapped_number(true);
} else {
out_meta = self.to(device);
}
return out_meta;
}
Tensor mul_zerotensor(const Tensor& self, const Tensor& other) { Tensor mul_zerotensor(const Tensor& self, const Tensor& other) {
auto out_device = correct_out_device(self, other); auto out_device = correct_out_device(self, other);
// hack to use the TensorIterator to get the correct broadcasting and type promotion logic // hack to use the TensorIterator to get the correct broadcasting and type promotion logic
auto device_ = Device(DeviceType::Meta); auto device_ = Device(DeviceType::Meta);
constexpr c10::DispatchKeySet meta_dks(at::DispatchKey::Meta); constexpr c10::DispatchKeySet meta_dks(at::DispatchKey::Meta);
auto self_meta = send_to_meta(self, device_); auto meta_out = at::_ops::mul_Tensor::redispatch(meta_dks, self.to(device_), other.to(device_));
auto other_meta = send_to_meta(other, device_);
auto meta_out = at::_ops::mul_Tensor::redispatch(meta_dks, self_meta, other_meta);
return at::_efficientzerotensor(meta_out.sizes(), meta_out.options().device(out_device)); return at::_efficientzerotensor(meta_out.sizes(), meta_out.options().device(out_device));
} }
@ -1036,9 +1023,7 @@ Tensor div_zerotensor(const Tensor& self, const Tensor& other) {
// hack to use the TensorIterator to get the correct broadcasting and type promotion logic // hack to use the TensorIterator to get the correct broadcasting and type promotion logic
auto device_ = Device(DeviceType::Meta); auto device_ = Device(DeviceType::Meta);
constexpr c10::DispatchKeySet meta_dks(at::DispatchKey::Meta); constexpr c10::DispatchKeySet meta_dks(at::DispatchKey::Meta);
auto self_meta = send_to_meta(self, device_); auto meta_out = at::_ops::div_Tensor::redispatch(meta_dks, self.to(device_), other.to(device_));
auto other_meta = send_to_meta(other, device_);
auto meta_out = at::_ops::div_Tensor::redispatch(meta_dks, self_meta, other_meta);
if (self._is_zerotensor()) { if (self._is_zerotensor()) {
if (other._is_zerotensor()) { if (other._is_zerotensor()) {
@ -1067,9 +1052,8 @@ static Tensor maybe_add_maybe_sub(const Tensor& self, const Tensor& other, const
// hack to use the TensorIterator to get the correct broadcasting and type promotion logic // hack to use the TensorIterator to get the correct broadcasting and type promotion logic
auto device_ = Device(DeviceType::Meta); auto device_ = Device(DeviceType::Meta);
constexpr c10::DispatchKeySet meta_dks(at::DispatchKey::Meta); constexpr c10::DispatchKeySet meta_dks(at::DispatchKey::Meta);
auto self_meta = send_to_meta(self, device_); auto meta_out = at::_ops::add_Tensor::redispatch(
auto other_meta = send_to_meta(other, device_); meta_dks, self.to(device_), other.to(device_), alpha);
auto meta_out = at::_ops::add_Tensor::redispatch(meta_dks, self_meta, other_meta, alpha);
auto get_out_like = [&] (const Tensor& tensor) auto get_out_like = [&] (const Tensor& tensor)
{ {

View File

@ -409,7 +409,7 @@ struct ConvParams {
if (!detail::getCUDAHooks().compiledWithCuDNN() || !input.is_cuda() || !cudnn_enabled) { if (!detail::getCUDAHooks().compiledWithCuDNN() || !input.is_cuda() || !cudnn_enabled) {
return false; return false;
} }
static long cudnn_version = detail::getCUDAHooks().versionRuntimeCuDNN(); static long cudnn_version = detail::getCUDAHooks().versionCuDNN();
// broken on cuDNN 9.8 - 9.14 // broken on cuDNN 9.8 - 9.14
if (cudnn_version >= 90800 && cudnn_version < 91500) { if (cudnn_version >= 90800 && cudnn_version < 91500) {
if (cudnn_conv_suggest_memory_format(input, weight) == at::MemoryFormat::Contiguous && if (cudnn_conv_suggest_memory_format(input, weight) == at::MemoryFormat::Contiguous &&
@ -453,7 +453,7 @@ struct ConvParams {
} }
// native kernel doesn't support 64-bit non-splittable case // native kernel doesn't support 64-bit non-splittable case
if (!(canUse32BitIndexMath(input) && canUse32BitIndexMath(weight))) { if (!(canUse32BitIndexMath(input) && canUse32BitIndexMath(weight))) {
static long cudnn_version = detail::getCUDAHooks().compiledWithCuDNN() ? detail::getCUDAHooks().versionRuntimeCuDNN() : -1; static long cudnn_version = detail::getCUDAHooks().compiledWithCuDNN() ? detail::getCUDAHooks().versionCuDNN() : -1;
// TODO(eqy): remove this once cuDNN fixes 64-bit depthwise support, first broken in 9.11x // TODO(eqy): remove this once cuDNN fixes 64-bit depthwise support, first broken in 9.11x
if (cudnn_conv_suggest_memory_format(input, weight) != at::MemoryFormat::Contiguous) { if (cudnn_conv_suggest_memory_format(input, weight) != at::MemoryFormat::Contiguous) {
if (cudnn_version < 0 || cudnn_version > 91000) { if (cudnn_version < 0 || cudnn_version > 91000) {

View File

@ -50,35 +50,18 @@ static inline bool parseLinearFlatten3d() {
// `_flatten_nd_linear` flattens all but the last dimension of the input tensor // `_flatten_nd_linear` flattens all but the last dimension of the input tensor
// before passing it to linear operation // before passing it to linear operation
static inline Tensor _flatten_nd_linear(const Tensor& input, const Tensor& weight, const Tensor& bias) { static inline Tensor _flatten_nd_linear(const Tensor& input, const Tensor& weight, const Tensor& bias) {
const auto input_sizes = input.sym_sizes(); const auto input_sizes = input.sym_sizes();
// can't use -1 in reshape because it errors when a dimension is 0
const auto result_flattened = [&]() -> Tensor { c10::SymInt flattened_dim = 1;
const auto input_ncols = input_sizes.back(); for (int64_t i = 0, ndim = input_sizes.size(); i < ndim - 1; ++i) {
const auto input_flattened_nrows = [&]() -> c10::SymInt { flattened_dim = flattened_dim * input_sizes[i];
// can't use -1 in reshape because it errors when a dimension is 0
auto flattened_nrows = c10::SymInt{1};
for (const auto& size : input_sizes.slice(0, input_sizes.size() - 1)) {
flattened_nrows *= size;
}
return flattened_nrows;
}();
const auto input_flattened = input.view_symint({input_flattened_nrows, input_ncols});
if (weight.layout() == c10::kStrided) {
return at::addmm(bias, input_flattened, weight.t());
} else {
// weight is sparse, and addmm for sparse expects matmul lhs to be sparse,
// so we transpose the problem.
// NOTE: at::matmul handles (dense @ sparse) similarly.
const auto bias_t = (bias.dim() >= 2) ? bias.mT() : bias.unsqueeze(-1);
return at::addmm(bias_t, weight, input_flattened.t()).t();
} }
}(); auto inp_reshape = input.reshape_symint({flattened_dim, input_sizes.at(input_sizes.size() -1)});
const auto result = at::addmm(bias, inp_reshape, weight.t());
// Unflatten flattened row dims auto new_size = input_sizes.slice(0, input_sizes.size() - 1);
auto result_sizes = c10::SymDimVector{input_sizes.begin(), input_sizes.end()}; c10::SymDimVector sizes_vec(new_size.begin(), new_size.end());
result_sizes.back() = result_flattened.sym_size(1); sizes_vec.push_back(result.sym_size(1));
return result_flattened.view_symint(result_sizes); return result.view_symint(sizes_vec);
} }
@ -107,23 +90,15 @@ Tensor linear(const Tensor& input, const Tensor& weight, const std::optional<Ten
// Fused op is marginally faster. // Fused op is marginally faster.
return at::addmm(*bias, input, weight.t()); return at::addmm(*bias, input, weight.t());
} }
if (bias->defined() && !input.is_xla()) {
const auto is_bias_likely_fusable = ( // Also hit the fused path for contiguous 3D input, if not using xla
bias->defined() &&
// cuBLASLt: will fuse in the epilogue without copies
// when input/weight/bias are all strided.
// When weight is not strided, bias will not be fused,
// but we can still dispatch here to avoid at::matmul
// path which will probably use a very similar
// flattening optimization.
((bias->dim() == 1 || bias->squeeze().dim() == 1) && bias->is_contiguous_or_false())
);
if (is_bias_likely_fusable && !input.is_xla()) {
// Also hit the fused path for contiguous nD input, if not using xla
// backend. Reshaping/flattening has some performance implications on xla. // backend. Reshaping/flattening has some performance implications on xla.
if (input.is_contiguous_or_false()) { bool is_contiguous = input.is_contiguous_or_false();
if (is_contiguous && input_dim == 3) {
return _flatten_nd_linear(input, weight, *bias); return _flatten_nd_linear(input, weight, *bias);
} else if (parseLinearFlatten3d()) { } else if (is_contiguous && input.layout() == c10::kStrided && weight.layout() == c10::kStrided && bias->dim() == 1) {
return _flatten_nd_linear(input, weight, *bias);
} else if (parseLinearFlatten3d() && input_dim == 3) {
// If user forces flattening via env var // If user forces flattening via env var
const Tensor input_cont = input.contiguous(); const Tensor input_cont = input.contiguous();
return _flatten_nd_linear(input_cont, weight, *bias); return _flatten_nd_linear(input_cont, weight, *bias);

View File

@ -1936,7 +1936,7 @@ static bool should_fold(const Tensor& tensor1, const Tensor& tensor2, bool has_o
// We order the tensors. t1 will be the larger tensor // We order the tensors. t1 will be the larger tensor
// We can always transpose tensor2 as the dimensions are always >= 1 (precondition from matmul) // We can always transpose tensor2 as the dimensions are always >= 1 (precondition from matmul)
// and tensor1_larger iff tensor2.dim() > tensor1.dim() // and tensor1_larger iff tensor2.dim() > tensor1.dim(9
const auto t1 = tensor1_larger ? MaybeOwned<Tensor>::borrowed(tensor1) const auto t1 = tensor1_larger ? MaybeOwned<Tensor>::borrowed(tensor1)
: MaybeOwned<Tensor>::owned(tensor2.mT()); : MaybeOwned<Tensor>::owned(tensor2.mT());
const int64_t dim_t1 = t1->dim(); const int64_t dim_t1 = t1->dim();
@ -1948,11 +1948,20 @@ static bool should_fold(const Tensor& tensor1, const Tensor& tensor2, bool has_o
return false; return false;
} }
// If we require a gradient, we should fold to minimize backward memory usage - even if this // In this case we *do* incur in an extra copy to avoid creating an unnecessary large tensor in the backward
// leads to a copy in forward because is needed in backward, // Suppose we don't fold here. Let t1.shape = [b, m, n] t2.shape = [n, k] like in a transformer
// only time we avoid this strict pre-allocated memory usage (has_out = True) // t2 will be expanded to a tensor of shape [b, n, k] and then we do t1.bmm(t2_expanded)
bool requires_grad = tensor1.requires_grad() || tensor2.requires_grad(); // The issue appears in the backward.
if (requires_grad && !has_out) { // The output gradient g of this operation would have shape [b, m, k]
// The backward wrt. t2 of bmm would be given by t1.mH @ g, which has shape [b, n, k]
// Then, the backward of expand is simply `sum(0)`. As such, we are instantiating a tensor
// of shape [b, n, k] unnecessarily, which may cause a large memory footprint, and in the
// worst case, an OOM
bool t2_requires_grad = tensor1_larger ? tensor2.requires_grad() : tensor1.requires_grad();
if (t2_requires_grad && !has_out) {
// We should be checking !at::GradMode::is_enabled(), but apparently
// this regresses performance in some cases:
// https://github.com/pytorch/pytorch/issues/118548#issuecomment-1916022394
return true; return true;
} }

View File

@ -142,7 +142,6 @@ Tensor _pack_padded_sequence_backward_symint(const Tensor& grad, c10::SymIntArra
std::tuple<Tensor, Tensor> _pad_packed_sequence(const Tensor& data, const Tensor& _batch_sizes, bool batch_first, const Scalar& padding_value, int64_t total_length) { std::tuple<Tensor, Tensor> _pad_packed_sequence(const Tensor& data, const Tensor& _batch_sizes, bool batch_first, const Scalar& padding_value, int64_t total_length) {
auto batch_sizes_t = _batch_sizes.contiguous(); auto batch_sizes_t = _batch_sizes.contiguous();
checkLongTensor(batch_sizes_t); checkLongTensor(batch_sizes_t);
TORCH_CHECK(batch_sizes_t.numel() > 0, "batch_sizes can not be empty");
int64_t * batch_sizes = batch_sizes_t.data_ptr<int64_t>(); int64_t * batch_sizes = batch_sizes_t.data_ptr<int64_t>();
int64_t max_batch_size = batch_sizes[0]; int64_t max_batch_size = batch_sizes[0];

View File

@ -23,7 +23,6 @@
#include <ATen/ops/_aminmax_native.h> #include <ATen/ops/_aminmax_native.h>
#include <ATen/ops/_assert_async_native.h> #include <ATen/ops/_assert_async_native.h>
#include <ATen/ops/_assert_scalar_native.h> #include <ATen/ops/_assert_scalar_native.h>
#include <ATen/ops/_async_error_native.h>
#include <ATen/ops/_functional_assert_async_native.h> #include <ATen/ops/_functional_assert_async_native.h>
#include <ATen/ops/_functional_assert_scalar_native.h> #include <ATen/ops/_functional_assert_scalar_native.h>
#include <ATen/ops/_make_per_tensor_quantized_tensor.h> #include <ATen/ops/_make_per_tensor_quantized_tensor.h>
@ -480,14 +479,6 @@ Tensor isfinite(const Tensor& self) {
}); });
} }
void _async_error(std::string_view msg) {
TORCH_CHECK(0, msg);
}
void _async_error_meta(std::string_view msg) {
// Do NOT error, it's an async error!
}
void _assert_async_cpu(const Tensor& self) { void _assert_async_cpu(const Tensor& self) {
TORCH_CHECK( TORCH_CHECK(
native::is_nonzero(self), native::is_nonzero(self),

View File

@ -1784,8 +1784,8 @@ Tensor narrow_tensor_symint(
start.dim() == 0 && start.dim() == 0 &&
isIntegralType(start.scalar_type(), /*includeBool=*/false), isIntegralType(start.scalar_type(), /*includeBool=*/false),
"start must be an 0-dim integral Tensor."); "start must be an 0-dim integral Tensor.");
c10::SymInt st = start.item().toSymInt(); int64_t st = start.item<int64_t>();
return at::narrow_symint(self, dim, std::move(st), std::move(length)); return at::narrow_symint(self, dim, c10::SymInt(st), std::move(length));
} }
std:: std::

View File

@ -1,8 +1,6 @@
#pragma once #pragma once
#include <c10/util/Exception.h> #include <c10/util/Exception.h>
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wswitch-default")
namespace at::native { namespace at::native {
// Used as an interface between the different BLAS-like libraries // Used as an interface between the different BLAS-like libraries
@ -23,5 +21,3 @@ static inline char to_blas(TransposeType trans) {
} }
} // namespace at::native } // namespace at::native
C10_DIAGNOSTIC_POP()

View File

@ -247,8 +247,8 @@ void binary_kernel_reduce(TensorIteratorBase& iter, ops_t ops, init_t init) {
}); });
} }
template <typename func_t, typename vec_func_t> template <typename func_t, typename vec_func_t, typename ident_t = double>
void binary_kernel_reduce_vec(TensorIteratorBase& iter, func_t op, vec_func_t vop, double ident = 0) { void binary_kernel_reduce_vec(TensorIteratorBase& iter, func_t op, vec_func_t vop, ident_t ident = static_cast<ident_t>(0)) {
using traits = binary_function_traits<func_t>; using traits = binary_function_traits<func_t>;
static_assert( static_assert(
all_same< all_same<

View File

@ -5,6 +5,7 @@
#include <ATen/native/ReduceOpsUtils.h> #include <ATen/native/ReduceOpsUtils.h>
#include <ATen/Dispatch.h> #include <ATen/Dispatch.h>
#include <ATen/Dispatch_v2.h>
#include <ATen/Parallel.h> #include <ATen/Parallel.h>
#include <ATen/TensorIterator.h> #include <ATen/TensorIterator.h>
#include <ATen/OpMathType.h> #include <ATen/OpMathType.h>
@ -78,12 +79,12 @@ void min_all_kernel_impl(Tensor& result, const Tensor& input) {
reduce_all_impl<int64_t>(result, input, upper_bound<int64_t>(), reduce_all_impl<int64_t>(result, input, upper_bound<int64_t>(),
[=](int64_t a, int64_t b) -> int64_t { return min_impl(a, b); }); [=](int64_t a, int64_t b) -> int64_t { return min_impl(a, b); });
} else { } else {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "min_all", [&] { AT_DISPATCH_V2(input.scalar_type(), "min_all", AT_WRAP([&] {
using Vec = Vectorized<opmath_type<scalar_t>>; using Vec = Vectorized<opmath_type<scalar_t>>;
reduce_all_impl_vec<scalar_t>(result, input, upper_bound<scalar_t>(), reduce_all_impl_vec<scalar_t>(result, input, upper_bound<scalar_t>(),
[=] (scalar_t a , scalar_t b) -> scalar_t { return min_impl(a, b); }, [=] (scalar_t a , scalar_t b) -> scalar_t { return min_impl(a, b); },
[=](Vec a, Vec b) -> Vec { return minimum(a, b); }); [=](Vec a, Vec b) -> Vec { return minimum(a, b); });
}); }), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kHalf, kBFloat16);
} }
} }
@ -103,12 +104,12 @@ void max_all_kernel_impl(Tensor& result, const Tensor& input) {
reduce_all_impl<int64_t>(result, input, lower_bound<int64_t>(), reduce_all_impl<int64_t>(result, input, lower_bound<int64_t>(),
[=](int64_t a, int64_t b) -> int64_t { return max_impl(a, b); }); [=](int64_t a, int64_t b) -> int64_t { return max_impl(a, b); });
} else { } else {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "max_all", [&] { AT_DISPATCH_V2(input.scalar_type(), "max_all", AT_WRAP([&] {
using Vec = Vectorized<opmath_type<scalar_t>>; using Vec = Vectorized<opmath_type<scalar_t>>;
reduce_all_impl_vec<scalar_t>(result, input, lower_bound<scalar_t>(), reduce_all_impl_vec<scalar_t>(result, input, lower_bound<scalar_t>(),
[=] (scalar_t a , scalar_t b) -> scalar_t { return max_impl(a, b); }, [=] (scalar_t a , scalar_t b) -> scalar_t { return max_impl(a, b); },
[=](Vec a, Vec b) -> Vec { return maximum(a, b); }); [=](Vec a, Vec b) -> Vec { return maximum(a, b); });
}); }), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kHalf, kBFloat16);
} }
} }
@ -199,7 +200,7 @@ void aminmax_allreduce_kernel(
} }
); );
} else { } else {
AT_DISPATCH_ALL_TYPES_AND2(kBFloat16, kHalf, input.scalar_type(), "aminmax_cpu", [&] { AT_DISPATCH_V2(input.scalar_type(), "aminmax_cpu", AT_WRAP([&] {
using Vec = Vectorized<opmath_type<scalar_t>>; using Vec = Vectorized<opmath_type<scalar_t>>;
using scalar_t_pair = std::pair<scalar_t, scalar_t>; using scalar_t_pair = std::pair<scalar_t, scalar_t>;
reduce_all_impl_vec_two_outputs<scalar_t>( reduce_all_impl_vec_two_outputs<scalar_t>(
@ -214,7 +215,7 @@ void aminmax_allreduce_kernel(
[=](Vec a, Vec b) -> Vec { return minimum(a, b); }, [=](Vec a, Vec b) -> Vec { return minimum(a, b); },
[=](Vec a, Vec b) -> Vec { return maximum(a, b); } [=](Vec a, Vec b) -> Vec { return maximum(a, b); }
); );
}); }), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kBFloat16, kHalf);
} }
} }

View File

@ -3,6 +3,7 @@
#include <ATen/core/Tensor.h> #include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h> #include <ATen/Dispatch.h>
#include <ATen/Dispatch_v2.h>
#include <ATen/OpMathType.h> #include <ATen/OpMathType.h>
#include <ATen/cpu/vec/vec.h> #include <ATen/cpu/vec/vec.h>
#include <ATen/cpu/vec/functional.h> #include <ATen/cpu/vec/functional.h>
@ -338,43 +339,24 @@ void or_kernel_impl(TensorIterator& iter) {
} }
} }
template<typename scalar_t>
struct MinValuesOps: public at::native::MinOps<scalar_t> {
using arg_t = typename MinOps<scalar_t>::arg_t;
static scalar_t project(arg_t arg) {
return arg.first;
}
};
void min_values_kernel_impl(TensorIterator& iter) { void min_values_kernel_impl(TensorIterator& iter) {
if (iter.dtype() == kLong) { AT_DISPATCH_V2(iter.dtype(), "min_values_cpu", AT_WRAP([&iter] {
// This case is special because of Vectorized<int64_t> does not
// handle upper_bound<int64_t>().
// See: https://github.com/pytorch/pytorch/issues/43254
using scalar_t = int64_t;
binary_kernel_reduce(
iter,
MinValuesOps<scalar_t>{},
std::pair<scalar_t, int64_t>(upper_bound<scalar_t>(), -1));
return;
}
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.dtype(), "min_values_cpu", [&iter] {
binary_kernel_reduce_vec( binary_kernel_reduce_vec(
iter, iter,
[](scalar_t a, scalar_t b) -> scalar_t { return min_impl(a, b); }, [](scalar_t a, scalar_t b) -> scalar_t { return min_impl(a, b); },
[](Vectorized<scalar_t> a, Vectorized<scalar_t> b) { return minimum(a, b); }, [](Vectorized<scalar_t> a, Vectorized<scalar_t> b) { return minimum(a, b); },
static_cast<double>(upper_bound<scalar_t>())); upper_bound<scalar_t>());
}); }), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kBFloat16, kHalf, kBool);
} }
void max_values_kernel_impl(TensorIterator& iter) { void max_values_kernel_impl(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.dtype(), "max_values_cpu", [&iter] { AT_DISPATCH_V2(iter.dtype(), "max_values_cpu", AT_WRAP([&iter] {
binary_kernel_reduce_vec( binary_kernel_reduce_vec(
iter, iter,
[](scalar_t a, scalar_t b) -> scalar_t { return max_impl(a, b); }, [](scalar_t a, scalar_t b) -> scalar_t { return max_impl(a, b); },
[](Vectorized<scalar_t> a, Vectorized<scalar_t> b) { return maximum(a, b); }, [](Vectorized<scalar_t> a, Vectorized<scalar_t> b) { return maximum(a, b); },
lower_bound<scalar_t>()); lower_bound<scalar_t>());
}); }), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kBFloat16, kHalf, kBool);
} }
void argmax_kernel_impl(TensorIterator &iter) { void argmax_kernel_impl(TensorIterator &iter) {

View File

@ -11,6 +11,7 @@
#include <vector> #include <vector>
#include <ATen/Dispatch.h> #include <ATen/Dispatch.h>
#include <ATen/Dispatch_v2.h>
#include <ATen/Parallel.h> #include <ATen/Parallel.h>
#include <ATen/NumericUtils.h> #include <ATen/NumericUtils.h>
#include <ATen/TensorIterator.h> #include <ATen/TensorIterator.h>
@ -106,7 +107,7 @@ void min_kernel_impl(
bool keepdim) { bool keepdim) {
int64_t self_dim_size = ensure_nonempty_size(self, dim); int64_t self_dim_size = ensure_nonempty_size(self, dim);
AT_DISPATCH_ALL_TYPES_AND3(ScalarType::Half, ScalarType::BFloat16, ScalarType::Bool, self.scalar_type(), "min_cpu", [&] { AT_DISPATCH_V2(self.scalar_type(), "min_cpu", AT_WRAP([&] {
compare_base_kernel<scalar_t>(result, indice, self, dim, keepdim, [&] ( compare_base_kernel<scalar_t>(result, indice, self, dim, keepdim, [&] (
scalar_t* result_data, int64_t* indice_data, scalar_t* result_data, int64_t* indice_data,
const scalar_t* self_data, auto self_dim_stride) { const scalar_t* self_data, auto self_dim_stride) {
@ -128,7 +129,7 @@ void min_kernel_impl(
*indice_data = index; *indice_data = index;
} }
); );
}); }), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), ScalarType::Half, ScalarType::BFloat16, ScalarType::Bool);
} }
void max_kernel_impl( void max_kernel_impl(
@ -139,7 +140,7 @@ void max_kernel_impl(
bool keepdim) { bool keepdim) {
int64_t self_dim_size = ensure_nonempty_size(self, dim); int64_t self_dim_size = ensure_nonempty_size(self, dim);
AT_DISPATCH_ALL_TYPES_AND3(ScalarType::Half, ScalarType::BFloat16, ScalarType::Bool, self.scalar_type(), "max_cpu", [&] { AT_DISPATCH_V2(self.scalar_type(), "max_cpu", AT_WRAP([&] {
compare_base_kernel<scalar_t>(result, indice, self, dim, keepdim, [&] ( compare_base_kernel<scalar_t>(result, indice, self, dim, keepdim, [&] (
scalar_t* result_data, int64_t* indice_data, scalar_t* result_data, int64_t* indice_data,
const scalar_t* self_data, auto self_dim_stride) { const scalar_t* self_data, auto self_dim_stride) {
@ -161,7 +162,7 @@ void max_kernel_impl(
*indice_data = index; *indice_data = index;
} }
); );
}); }), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), ScalarType::Half, ScalarType::BFloat16, ScalarType::Bool);
} }
void aminmax_kernel( void aminmax_kernel(
@ -186,7 +187,7 @@ void aminmax_kernel(
return; return;
} }
AT_DISPATCH_ALL_TYPES_AND3(ScalarType::Bool, ScalarType::BFloat16, ScalarType::Half, self.scalar_type(), "aminmax_cpu", [&] { AT_DISPATCH_V2(self.scalar_type(), "aminmax_cpu", AT_WRAP([&] {
compare_base_kernel<scalar_t, scalar_t>(min_result, max_result, self, wrap_dim, keepdim, [&] ( compare_base_kernel<scalar_t, scalar_t>(min_result, max_result, self, wrap_dim, keepdim, [&] (
scalar_t* min_result_data, scalar_t* max_result_data, scalar_t* min_result_data, scalar_t* max_result_data,
const scalar_t* self_data, auto self_dim_stride) { const scalar_t* self_data, auto self_dim_stride) {
@ -209,7 +210,7 @@ void aminmax_kernel(
*max_result_data = max_number; *max_result_data = max_number;
} }
); );
}); }), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), ScalarType::Bool, ScalarType::BFloat16, ScalarType::Half);
} }
void where_kernel_impl(TensorIterator &iter) { void where_kernel_impl(TensorIterator &iter) {

View File

@ -147,24 +147,14 @@ static bool isGloballyDisabledAddmmCudaLt(const at::Device& device) {
/* /*
* Check whether for the given input we want to enable the Lt interface * Check whether for the given input we want to enable the Lt interface
*/ */
static bool isInputCompliesAddmmCudaLt( static bool isInputCompliesAddmmCudaLt(Tensor& result, const Tensor& self, const Tensor& mat1, const Tensor& mat2, const Scalar& beta, const Scalar& alpha) {
Tensor& result,
const Tensor& self,
const Tensor& mat1,
const Tensor& mat2,
const Scalar& beta,
const Scalar& alpha,
Activation activation
) {
#ifdef USE_ROCM
// Implies 2D bias which we currently not send through Lt. // Implies 2D bias which we currently not send through Lt.
// TODO: this check is done pre col-major input preparation, // TODO: this check is done pre col-major input preparation,
// so, this condition can be ralexed in cases when a col-major // so, this condition can be ralexed in cases when a col-major
// copy of result is needed. // copy of result is needed.
if (self.is_same(result) || self.dim() == 2) { if (result.is_same(self)) {
return false; return false;
} }
#endif
#if defined(USE_ROCM) && ROCM_VERSION == 60400 #if defined(USE_ROCM) && ROCM_VERSION == 60400
// hipblaslt TT fp32 regression on ROCm 6.4, cannot use // hipblaslt TT fp32 regression on ROCm 6.4, cannot use
@ -179,33 +169,13 @@ static bool isInputCompliesAddmmCudaLt(
#if defined(CUDA_VERSION) || defined(USE_ROCM) #if defined(CUDA_VERSION) || defined(USE_ROCM)
const auto scalar_type = mat1.scalar_type(); const auto scalar_type = mat1.scalar_type();
return (beta.toComplexDouble() == 1.0 return (beta.toComplexDouble() == 1.0
// NOTE: row-major result is important when bias is 1D.
// This is because Lt broadcasts 1D bias over the columns
// while the aten::addmm API broadcasts it over the rows,
// and this is in conjuction with the data preparation
// procedure that does not transpose arguments with
// col-major result. For col-major result we need
// to explicitly transpose the problem so that bias is
// correctly applied.
// TODO: enable col-major result if needed.
// TODO: no need to check result's layout when
// !result.is_same(self) and self.dim() == 2, because
// self needs to be copied into result and the bias ptr
// will be ignored.
&& result.dim() == 2 && result.is_contiguous() && result.dim() == 2 && result.is_contiguous()
// Conditions for bias to be fusable
&& ( && (
( // Conditions for bias to be fusable -- implies direct Lt path without copies. self.is_contiguous() &&
self.is_contiguous() && // NOTE: fine to have 1-len dims to the left from the right-most one
// NOTE: fine to have 1-len dims to the left from the right-most one (self.dim() == 1 || self.squeeze().dim() == 1) &&
(self.dim() == 1 || self.squeeze().dim() == 1) && self.sizes().back() == mat2_sizes[1]
self.sizes().back() == mat2_sizes[1]
)
|| ( // 2D bias restrictions. self.is_contiguous() is implicit when result.is_same(self),
// and we need to copy self into result otherwise, so the self's layout becomes irrelevant.
// See also TODO from above.
activation != Activation::None && // Lt is faster when activation is fused
(self.dim() == 2 && at::is_expandable_to(self.sizes(), {mat1_sizes[0], mat2_sizes[1]}))
)
) )
&& ( // some dtype restrictions && ( // some dtype restrictions
#ifndef USE_ROCM #ifndef USE_ROCM
@ -300,16 +270,7 @@ bool launchGemmAndBiasCublasLt(
const Scalar& alpha, const Scalar& alpha,
Activation activation = Activation::None Activation activation = Activation::None
) { ) {
// We apply bias in the epilogue only when it is 1D, const auto* self_ptr = self.const_data_ptr<scalar_t>();
// or when it can be squeezed to 1D.
// self_ptr == nullptr implies ignore bias epilogue
// and use standard gemm-like API.
const auto* self_ptr = [&]() -> auto {
if (self.dim() == 1 || self.squeeze().dim() == 1) {
return self.const_data_ptr<scalar_t>();
}
return static_cast<const scalar_t*>(nullptr);
}();
const auto tuning_ctx = at::cuda::tunable::getTuningContext(); const auto tuning_ctx = at::cuda::tunable::getTuningContext();
if (tuning_ctx->IsTunableOpEnabled()) { if (tuning_ctx->IsTunableOpEnabled()) {
@ -395,7 +356,7 @@ Tensor& addmm_out_cuda_impl(Tensor& result, const Tensor& self, const Tensor& ma
disable_addmm_cuda_lt = isGloballyDisabledAddmmCudaLt(self.device()) || disable_addmm_cuda_lt; disable_addmm_cuda_lt = isGloballyDisabledAddmmCudaLt(self.device()) || disable_addmm_cuda_lt;
#endif #endif
// Condition on the input // Condition on the input
disable_addmm_cuda_lt = !isInputCompliesAddmmCudaLt(result, self, mat1, mat2, beta, alpha, activation) || disable_addmm_cuda_lt; disable_addmm_cuda_lt = !isInputCompliesAddmmCudaLt(result, self, mat1, mat2, beta, alpha) || disable_addmm_cuda_lt;
// } // }
at::ScalarType scalar_type = mat1.scalar_type(); at::ScalarType scalar_type = mat1.scalar_type();
@ -405,20 +366,19 @@ Tensor& addmm_out_cuda_impl(Tensor& result, const Tensor& self, const Tensor& ma
if (!result.is_same(self)) { if (!result.is_same(self)) {
at::native::resize_output(result, {mat1.sizes()[0], mat2.sizes()[1]}); at::native::resize_output(result, {mat1.sizes()[0], mat2.sizes()[1]});
// We use bias ptr in the Lt path only when bias is 1D
const auto use_bias_ptr_lt = (self.dim() == 1) && !disable_addmm_cuda_lt;
const auto self_maybe_expanded = [&]() -> c10::MaybeOwned<Tensor> { const auto self_maybe_expanded = [&]() -> c10::MaybeOwned<Tensor> {
if (!use_bias_ptr_lt) { if (disable_addmm_cuda_lt) {
// We do expand self even before // When in non-Lt path we do expand self even before
// check for beta != 0.0 to make sure that // check for beta != 0.0 to make sure that
// test_sparse_csr.py::TestSparseCSRCUDA::test_addmm_errors_* // test_sparse_csr.py::TestSparseCSRCUDA::test_addmm_errors_*
// runs green. // runs green.
return expand_size(self, result.sizes(), "addmm"); return expand_size(self, result.sizes(), "addmm");
} }
// copy next, should broadcast
return c10::MaybeOwned<Tensor>::borrowed(self); return c10::MaybeOwned<Tensor>::borrowed(self);
}(); }();
// We do not copy bias only when we need the bias ptr // We copy bias when in the non-Lt path
if (beta.toComplexDouble() != 0.0 && !use_bias_ptr_lt) { if (beta.toComplexDouble() != 0.0 && disable_addmm_cuda_lt) {
// NOTE: self should broadcast over result // NOTE: self should broadcast over result
at::native::copy_(result, *self_maybe_expanded); at::native::copy_(result, *self_maybe_expanded);
} }

Some files were not shown because too many files have changed in this diff Show More