Compare commits

..

2 Commits

Author SHA1 Message Date
8311bb86e8 Adjust derivative calculation for removed linalg.solve optimization 2025-11-07 10:43:56 +01:00
ac2b7f35d8 Avoid differing results in linalg.(tensor_)solve
Remove an optimization potentially using a transposed matrix as input
for `linalg_lu_factor_ex_out`.

Depending on whether the input memory layout is contiguous or not this
may lead to slightly different results which may cause larger
differences in subsequent steps ultimately leading to test failures in
e.g. `test_vmapvjp_linalg_tensorsolve_cpu_float32` & `test_vmapvjpvjp_linalg_tensorsolve_cpu_float32`.

The intended optimization no longer applies after 59bc76f so this code
can be removed too resolving the accuracy issues observed in those tests.

Fixes #151440
2025-11-07 10:43:55 +01:00
671 changed files with 7042 additions and 16925 deletions

View File

@ -36,7 +36,11 @@ case ${DOCKER_TAG_PREFIX} in
;;
rocm*)
BASE_TARGET=rocm
PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201;gfx950;gfx1150;gfx1151"
PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
# add gfx950, gfx115x conditionally starting in ROCm 7.0
if [[ "$ROCM_VERSION" == *"7.0"* ]]; then
PYTORCH_ROCM_ARCH="${PYTORCH_ROCM_ARCH};gfx950;gfx1150;gfx1151"
fi
EXTRA_BUILD_ARGS="${EXTRA_BUILD_ARGS} --build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH}"
;;
*)

View File

@ -207,9 +207,9 @@ case "$tag" in
NINJA_VERSION=1.9.0
TRITON=yes
;;
pytorch-linux-noble-xpu-n-py3 | pytorch-linux-noble-xpu-n-py3-inductor-benchmarks)
pytorch-linux-jammy-xpu-n-py3 | pytorch-linux-jammy-xpu-n-py3-inductor-benchmarks)
ANACONDA_PYTHON_VERSION=3.10
GCC_VERSION=13
GCC_VERSION=11
VISION=yes
XPU_VERSION=2025.2
NINJA_VERSION=1.9.0
@ -260,12 +260,6 @@ case "$tag" in
HALIDE=yes
TRITON=yes
;;
pytorch-linux-jammy-cuda12.8-py3.12-pallas)
CUDA_VERSION=12.8.1
ANACONDA_PYTHON_VERSION=3.12
GCC_VERSION=11
PALLAS=yes
;;
pytorch-linux-jammy-py3.12-triton-cpu)
CUDA_VERSION=12.6
ANACONDA_PYTHON_VERSION=3.12
@ -387,7 +381,6 @@ docker build \
--build-arg "INDUCTOR_BENCHMARKS=${INDUCTOR_BENCHMARKS}" \
--build-arg "EXECUTORCH=${EXECUTORCH}" \
--build-arg "HALIDE=${HALIDE}" \
--build-arg "PALLAS=${PALLAS}" \
--build-arg "XPU_VERSION=${XPU_VERSION}" \
--build-arg "UNINSTALL_DILL=${UNINSTALL_DILL}" \
--build-arg "ACL=${ACL:-}" \

View File

@ -1 +0,0 @@
0.8.0

View File

@ -1,40 +0,0 @@
#!/bin/bash
set -ex
source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh"
# Get the pinned JAX version (same for all CUDA versions)
JAX_VERSION=$(get_pinned_commit /ci_commit_pins/jax)
function install_jax_12() {
echo "Installing JAX ${JAX_VERSION} with CUDA 12 support"
pip_install "jax[cuda12]==${JAX_VERSION}" -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
# Verify installation
python -c "import jax" # check for errors
echo "JAX ${JAX_VERSION} installation completed successfully for CUDA 12"
}
function install_jax_13() {
echo "Installing JAX ${JAX_VERSION} with CUDA 13 support"
pip_install "jax[cuda13]==${JAX_VERSION}" -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
# Verify installation
python -c "import jax" # check for errors
echo "JAX ${JAX_VERSION} installation completed successfully for CUDA 13"
}
# idiomatic parameter and option handling in sh
while test $# -gt 0
do
case "$1" in
12.4|12.6|12.6.*|12.8|12.8.*|12.9|12.9.*) install_jax_12;
;;
13.0|13.0.*) install_jax_13;
;;
*) echo "bad argument $1"; exit 1
;;
esac
shift
done

View File

@ -9,7 +9,7 @@ set -xe
function install_ubuntu() {
. /etc/os-release
if [[ ! " jammy noble " =~ " ${VERSION_CODENAME} " ]]; then
if [[ ! " jammy " =~ " ${VERSION_CODENAME} " ]]; then
echo "Ubuntu version ${VERSION_CODENAME} not supported"
exit
fi
@ -35,24 +35,25 @@ function install_ubuntu() {
# The xpu-smi packages
apt-get install -y flex bison xpu-smi
# Compute and Media Runtimes
if [[ " ${VERSION_CODENAME} " =~ " noble " ]]; then
if [[ "${XPU_DRIVER_TYPE,,}" == "lts" ]]; then
# Compute and Media Runtimes
apt-get install -y \
intel-opencl-icd libze-intel-gpu1 libze1 \
intel-media-va-driver-non-free libmfx-gen1 libvpl2 \
libegl-mesa0 libegl1-mesa-dev libgbm1 libgl1-mesa-dev libgl1-mesa-dri \
intel-opencl-icd intel-level-zero-gpu level-zero \
intel-media-va-driver-non-free libmfx1 libmfxgen1 libvpl2 \
libegl-mesa0 libegl1-mesa libegl1-mesa-dev libgbm1 libgl1-mesa-dev libgl1-mesa-dri \
libglapi-mesa libgles2-mesa-dev libglx-mesa0 libigdgmm12 libxatracker2 mesa-va-drivers \
mesa-vdpau-drivers mesa-vulkan-drivers va-driver-all vainfo hwinfo clinfo intel-ocloc
else # jammy
mesa-vdpau-drivers mesa-vulkan-drivers va-driver-all vainfo hwinfo clinfo
# Development Packages
apt-get install -y libigc-dev intel-igc-cm libigdfcl-dev libigfxcmrt-dev level-zero-dev
else # rolling driver
apt-get install -y \
intel-opencl-icd libze-intel-gpu1 libze1 \
intel-media-va-driver-non-free libmfx-gen1 libvpl2 \
libegl-mesa0 libegl1-mesa libegl1-mesa-dev libgbm1 libgl1-mesa-dev libgl1-mesa-dri \
libglapi-mesa libglx-mesa0 libigdgmm12 libxatracker2 mesa-va-drivers \
mesa-vdpau-drivers mesa-vulkan-drivers va-driver-all vainfo hwinfo clinfo intel-ocloc
apt-get install -y libigc-dev intel-igc-cm libigdfcl-dev libigfxcmrt-dev libze-dev
fi
# Development Packages
apt-get install -y libigc-dev intel-igc-cm libigdfcl-dev libigfxcmrt-dev libze-dev
# Install Intel Support Packages
apt-get install -y ${XPU_PACKAGES}
@ -65,7 +66,7 @@ function install_ubuntu() {
function install_rhel() {
. /etc/os-release
if [[ "${ID}" == "rhel" ]]; then
if [[ ! " 8.8 8.10 9.0 9.2 9.3 " =~ " ${VERSION_ID} " ]]; then
if [[ ! " 8.8 8.9 9.0 9.2 9.3 " =~ " ${VERSION_ID} " ]]; then
echo "RHEL version ${VERSION_ID} not supported"
exit
fi
@ -146,7 +147,7 @@ function install_sles() {
XPU_DRIVER_VERSION=""
if [[ "${XPU_DRIVER_TYPE,,}" == "lts" ]]; then
# Use GPU driver LTS releases
XPU_DRIVER_VERSION="/lts/2523"
XPU_DRIVER_VERSION="/lts/2350"
fi
# Default use Intel® oneAPI Deep Learning Essentials 2025.1

View File

@ -49,7 +49,11 @@ case ${DOCKER_TAG_PREFIX} in
fi
BASE_TARGET=rocm
GPU_IMAGE=rocm/dev-ubuntu-22.04:${GPU_ARCH_VERSION}-complete
PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201;gfx950;gfx1150;gfx1151"
PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
# add gfx950, gfx115x conditionally starting in ROCm 7.0
if [[ "$GPU_ARCH_VERSION" == *"7.0"* ]]; then
PYTORCH_ROCM_ARCH="${PYTORCH_ROCM_ARCH};gfx950;gfx1150;gfx1151"
fi
DOCKER_GPU_BUILD_ARG="--build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH} --build-arg ROCM_VERSION=${GPU_ARCH_VERSION}"
;;
*)

View File

@ -87,7 +87,11 @@ case ${image} in
MANY_LINUX_VERSION="2_28"
DEVTOOLSET_VERSION="11"
GPU_IMAGE=rocm/dev-almalinux-8:${GPU_ARCH_VERSION}-complete
PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201;gfx950;gfx1150;gfx1151"
PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
# add gfx950, gfx115x conditionally starting in ROCm 7.0
if [[ "$GPU_ARCH_VERSION" == *"7.0"* ]]; then
PYTORCH_ROCM_ARCH="${PYTORCH_ROCM_ARCH};gfx950;gfx1150;gfx1151"
fi
DOCKER_GPU_BUILD_ARG="--build-arg ROCM_VERSION=${GPU_ARCH_VERSION} --build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH} --build-arg DEVTOOLSET_VERSION=${DEVTOOLSET_VERSION}"
;;
manylinux2_28-builder:xpu)

View File

@ -143,15 +143,6 @@ COPY ci_commit_pins/halide.txt halide.txt
RUN if [ -n "${HALIDE}" ]; then bash ./install_halide.sh; fi
RUN rm install_halide.sh common_utils.sh halide.txt
ARG PALLAS
ARG CUDA_VERSION
# Install JAX with CUDA support (for Pallas)
COPY ./common/install_jax.sh install_jax.sh
COPY ./common/common_utils.sh common_utils.sh
COPY ./ci_commit_pins/jax.txt /ci_commit_pins/jax.txt
RUN if [ -n "${PALLAS}" ]; then bash ./install_jax.sh ${CUDA_VERSION}; fi
RUN rm -f install_jax.sh common_utils.sh /ci_commit_pins/jax.txt
ARG ONNX
# Install ONNX dependencies
COPY ./common/install_onnx.sh ./common/common_utils.sh ./

View File

@ -8,11 +8,9 @@ from abc import ABC, abstractmethod
try:
from collections.abc import Callable # Python 3.11+
from typing import Any, Required, TypedDict
from typing import Any, Callable, Required, TypedDict # Python 3.11+
except ImportError:
from collections.abc import Callable
from typing import Any, TypedDict
from typing import Any, Callable, TypedDict
from typing_extensions import Required # Fallback for Python <3.11

View File

@ -30,6 +30,7 @@ into a tarball, with the following structure:
More specifically, `build_magma.sh` copies over the relevant files from the `package_files` directory depending on the ROCm version.
Outputted binaries should be in the `output` folder.
## Pushing
Packages can be uploaded to an S3 bucket using:

View File

@ -168,16 +168,14 @@ if [[ "$BUILD_ENVIRONMENT" == *xpu* ]]; then
# shellcheck disable=SC1091
source /opt/intel/oneapi/compiler/latest/env/vars.sh
# shellcheck disable=SC1091
source /opt/intel/oneapi/umf/latest/env/vars.sh
# shellcheck disable=SC1091
source /opt/intel/oneapi/ccl/latest/env/vars.sh
# shellcheck disable=SC1091
source /opt/intel/oneapi/mpi/latest/env/vars.sh
# shellcheck disable=SC1091
source /opt/intel/oneapi/pti/latest/env/vars.sh
# Enable XCCL build
export USE_XCCL=1
export USE_MPI=0
# XPU kineto feature dependencies are not fully ready, disable kineto build as temp WA
export USE_KINETO=0
export TORCH_XPU_ARCH_LIST=pvc
fi

View File

@ -96,6 +96,7 @@ function pip_build_and_install() {
python3 -m pip wheel \
--no-build-isolation \
--no-deps \
--no-use-pep517 \
-w "${wheel_dir}" \
"${build_target}"
fi
@ -307,28 +308,6 @@ function install_torchao() {
pip_build_and_install "git+https://github.com/pytorch/ao.git@${commit}" dist/ao
}
function install_flash_attn_cute() {
echo "Installing FlashAttention CuTe from GitHub..."
# Grab latest main til we have a pinned commit
local flash_attn_commit
flash_attn_commit=$(git ls-remote https://github.com/Dao-AILab/flash-attention.git HEAD | cut -f1)
# Clone the repo to a temporary directory
rm -rf flash-attention-build
git clone --depth 1 --recursive https://github.com/Dao-AILab/flash-attention.git flash-attention-build
pushd flash-attention-build
git checkout "${flash_attn_commit}"
# Install only the 'cute' sub-directory
pip_install -e flash_attn/cute/
popd
# remove the local repo
rm -rf flash-attention-build
echo "FlashAttention CuTe installation complete."
}
function print_sccache_stats() {
echo 'PyTorch Build Statistics'
sccache --show-stats

View File

@ -100,337 +100,6 @@ def check_lib_statically_linked_libstdc_cxx_abi_symbols(lib: str) -> None:
)
def _compile_and_extract_symbols(
cpp_content: str, compile_flags: list[str], exclude_list: list[str] | None = None
) -> list[str]:
"""
Helper to compile a C++ file and extract all symbols.
Args:
cpp_content: C++ source code to compile
compile_flags: Compilation flags
exclude_list: List of symbol names to exclude. Defaults to ["main"].
Returns:
List of all symbols found in the object file (excluding those in exclude_list).
"""
import subprocess
import tempfile
if exclude_list is None:
exclude_list = ["main"]
with tempfile.TemporaryDirectory() as tmpdir:
tmppath = Path(tmpdir)
cpp_file = tmppath / "test.cpp"
obj_file = tmppath / "test.o"
cpp_file.write_text(cpp_content)
result = subprocess.run(
compile_flags + [str(cpp_file), "-o", str(obj_file)],
capture_output=True,
text=True,
timeout=60,
)
if result.returncode != 0:
raise RuntimeError(f"Compilation failed: {result.stderr}")
symbols = get_symbols(str(obj_file))
# Return all symbol names, excluding those in the exclude list
return [name for _addr, _stype, name in symbols if name not in exclude_list]
def check_stable_only_symbols(install_root: Path) -> None:
"""
Test TORCH_STABLE_ONLY and TORCH_TARGET_VERSION by compiling test code and comparing symbol counts.
This approach tests:
1. WITHOUT macros -> many torch symbols exposed
2. WITH TORCH_STABLE_ONLY -> zero torch symbols (all hidden)
3. WITH TORCH_TARGET_VERSION -> zero torch symbols (all hidden)
4. WITH both macros -> zero torch symbols (all hidden)
"""
include_dir = install_root / "include"
assert include_dir.exists(), f"Expected {include_dir} to be present"
test_cpp_content = """
// Main torch C++ API headers
#include <torch/torch.h>
#include <torch/all.h>
// ATen tensor library
#include <ATen/ATen.h>
// Core c10 headers (commonly used)
#include <c10/core/Device.h>
#include <c10/core/DeviceType.h>
#include <c10/core/ScalarType.h>
#include <c10/core/TensorOptions.h>
#include <c10/util/Optional.h>
int main() { return 0; }
"""
base_compile_flags = [
"g++",
"-std=c++17",
f"-I{include_dir}",
f"-I{include_dir}/torch/csrc/api/include",
"-c", # Compile only, don't link
]
# Compile WITHOUT any macros
symbols_without = _compile_and_extract_symbols(
cpp_content=test_cpp_content,
compile_flags=base_compile_flags,
)
# We expect constexpr symbols, inline functions used by other headers etc.
# to produce symbols
num_symbols_without = len(symbols_without)
print(f"Found {num_symbols_without} symbols without any macros defined")
assert num_symbols_without != 0, (
"Expected a non-zero number of symbols without any macros"
)
# Compile WITH TORCH_STABLE_ONLY (expect 0 symbols)
compile_flags_with_stable_only = base_compile_flags + ["-DTORCH_STABLE_ONLY"]
symbols_with_stable_only = _compile_and_extract_symbols(
cpp_content=test_cpp_content,
compile_flags=compile_flags_with_stable_only,
)
num_symbols_with_stable_only = len(symbols_with_stable_only)
assert num_symbols_with_stable_only == 0, (
f"Expected no symbols with TORCH_STABLE_ONLY macro, but found {num_symbols_with_stable_only}"
)
# Compile WITH TORCH_TARGET_VERSION (expect 0 symbols)
compile_flags_with_target_version = base_compile_flags + [
"-DTORCH_TARGET_VERSION=1"
]
symbols_with_target_version = _compile_and_extract_symbols(
cpp_content=test_cpp_content,
compile_flags=compile_flags_with_target_version,
)
num_symbols_with_target_version = len(symbols_with_target_version)
assert num_symbols_with_target_version == 0, (
f"Expected no symbols with TORCH_TARGET_VERSION macro, but found {num_symbols_with_target_version}"
)
# Compile WITH both macros (expect 0 symbols)
compile_flags_with_both = base_compile_flags + [
"-DTORCH_STABLE_ONLY",
"-DTORCH_TARGET_VERSION=1",
]
symbols_with_both = _compile_and_extract_symbols(
cpp_content=test_cpp_content,
compile_flags=compile_flags_with_both,
)
num_symbols_with_both = len(symbols_with_both)
assert num_symbols_with_both == 0, (
f"Expected no symbols with both macros, but found {num_symbols_with_both}"
)
def check_stable_api_symbols(install_root: Path) -> None:
"""
Test that stable API headers still expose symbols with TORCH_STABLE_ONLY.
The torch/csrc/stable/c/shim.h header is tested in check_stable_c_shim_symbols
"""
include_dir = install_root / "include"
assert include_dir.exists(), f"Expected {include_dir} to be present"
stable_dir = include_dir / "torch" / "csrc" / "stable"
assert stable_dir.exists(), f"Expected {stable_dir} to be present"
stable_headers = list(stable_dir.rglob("*.h"))
if not stable_headers:
raise RuntimeError("Could not find any stable headers")
includes = []
for header in stable_headers:
rel_path = header.relative_to(include_dir)
includes.append(f"#include <{rel_path.as_posix()}>")
includes_str = "\n".join(includes)
test_stable_content = f"""
{includes_str}
int main() {{ return 0; }}
"""
compile_flags = [
"g++",
"-std=c++17",
f"-I{include_dir}",
f"-I{include_dir}/torch/csrc/api/include",
"-c",
"-DTORCH_STABLE_ONLY",
]
symbols_stable = _compile_and_extract_symbols(
cpp_content=test_stable_content,
compile_flags=compile_flags,
)
num_symbols_stable = len(symbols_stable)
print(f"Found {num_symbols_stable} symbols in torch/csrc/stable")
assert num_symbols_stable > 0, (
f"Expected stable headers to expose symbols with TORCH_STABLE_ONLY, "
f"but found {num_symbols_stable} symbols"
)
def check_headeronly_symbols(install_root: Path) -> None:
"""
Test that header-only utility headers still expose symbols with TORCH_STABLE_ONLY.
"""
include_dir = install_root / "include"
assert include_dir.exists(), f"Expected {include_dir} to be present"
# Find all headers in torch/headeronly
headeronly_dir = include_dir / "torch" / "headeronly"
assert headeronly_dir.exists(), f"Expected {headeronly_dir} to be present"
headeronly_headers = list(headeronly_dir.rglob("*.h"))
if not headeronly_headers:
raise RuntimeError("Could not find any headeronly headers")
# Filter out platform-specific headers that may not compile everywhere
platform_specific_keywords = [
"cpu/vec",
]
filtered_headers = []
for header in headeronly_headers:
rel_path = header.relative_to(include_dir).as_posix()
if not any(
keyword in rel_path.lower() for keyword in platform_specific_keywords
):
filtered_headers.append(header)
includes = []
for header in filtered_headers:
rel_path = header.relative_to(include_dir)
includes.append(f"#include <{rel_path.as_posix()}>")
includes_str = "\n".join(includes)
test_headeronly_content = f"""
{includes_str}
int main() {{ return 0; }}
"""
compile_flags = [
"g++",
"-std=c++17",
f"-I{include_dir}",
f"-I{include_dir}/torch/csrc/api/include",
"-c",
"-DTORCH_STABLE_ONLY",
]
symbols_headeronly = _compile_and_extract_symbols(
cpp_content=test_headeronly_content,
compile_flags=compile_flags,
)
num_symbols_headeronly = len(symbols_headeronly)
print(f"Found {num_symbols_headeronly} symbols in torch/headeronly")
assert num_symbols_headeronly > 0, (
f"Expected headeronly headers to expose symbols with TORCH_STABLE_ONLY, "
f"but found {num_symbols_headeronly} symbols"
)
def check_aoti_shim_symbols(install_root: Path) -> None:
"""
Test that AOTI shim headers still expose symbols with TORCH_STABLE_ONLY.
"""
include_dir = install_root / "include"
assert include_dir.exists(), f"Expected {include_dir} to be present"
# There are no constexpr symbols etc., so we need to actually use functions
# so that some symbols are found.
test_shim_content = """
#include <torch/csrc/inductor/aoti_torch/c/shim.h>
int main() {
int32_t (*fp1)() = &aoti_torch_device_type_cpu;
int32_t (*fp2)() = &aoti_torch_dtype_float32;
(void)fp1; (void)fp2;
return 0;
}
"""
compile_flags = [
"g++",
"-std=c++17",
f"-I{include_dir}",
f"-I{include_dir}/torch/csrc/api/include",
"-c",
"-DTORCH_STABLE_ONLY",
]
symbols_shim = _compile_and_extract_symbols(
cpp_content=test_shim_content,
compile_flags=compile_flags,
)
num_symbols_shim = len(symbols_shim)
assert num_symbols_shim > 0, (
f"Expected shim headers to expose symbols with TORCH_STABLE_ONLY, "
f"but found {num_symbols_shim} symbols"
)
def check_stable_c_shim_symbols(install_root: Path) -> None:
"""
Test that stable C shim headers still expose symbols with TORCH_STABLE_ONLY.
"""
include_dir = install_root / "include"
assert include_dir.exists(), f"Expected {include_dir} to be present"
# Check if the stable C shim exists
stable_shim = include_dir / "torch" / "csrc" / "stable" / "c" / "shim.h"
if not stable_shim.exists():
raise RuntimeError("Could not find stable c shim")
# There are no constexpr symbols etc., so we need to actually use functions
# so that some symbols are found.
test_stable_shim_content = """
#include <torch/csrc/stable/c/shim.h>
int main() {
// Reference stable C API functions to create undefined symbols
AOTITorchError (*fp1)(const char*, uint32_t*, int32_t*) = &torch_parse_device_string;
AOTITorchError (*fp2)(uint32_t*) = &torch_get_num_threads;
(void)fp1; (void)fp2;
return 0;
}
"""
compile_flags = [
"g++",
"-std=c++17",
f"-I{include_dir}",
f"-I{include_dir}/torch/csrc/api/include",
"-c",
"-DTORCH_STABLE_ONLY",
]
symbols_stable_shim = _compile_and_extract_symbols(
cpp_content=test_stable_shim_content,
compile_flags=compile_flags,
)
num_symbols_stable_shim = len(symbols_stable_shim)
assert num_symbols_stable_shim > 0, (
f"Expected stable C shim headers to expose symbols with TORCH_STABLE_ONLY, "
f"but found {num_symbols_stable_shim} symbols"
)
def check_lib_symbols_for_abi_correctness(lib: str) -> None:
print(f"lib: {lib}")
cxx11_symbols = grep_symbols(lib, LIBTORCH_CXX11_PATTERNS)
@ -460,13 +129,6 @@ def main() -> None:
check_lib_symbols_for_abi_correctness(libtorch_cpu_path)
check_lib_statically_linked_libstdc_cxx_abi_symbols(libtorch_cpu_path)
# Check symbols when TORCH_STABLE_ONLY is defined
check_stable_only_symbols(install_root)
check_stable_api_symbols(install_root)
check_headeronly_symbols(install_root)
check_aoti_shim_symbols(install_root)
check_stable_c_shim_symbols(install_root)
if __name__ == "__main__":
main()

View File

@ -353,17 +353,6 @@ def test_linalg(device="cpu") -> None:
torch.linalg.svd(A)
def test_sdpa(device="cpu", dtype=torch.float16) -> None:
"""Regression test for https://github.com/pytorch/pytorch/issues/167602
Without nvrtc_builtins on CuDNN-9.13 on CUDA-13 fails with ` No valid execution plans built.`
"""
print(f"Testing SDPA on {device} using type {dtype}")
k, q, v = torch.rand(3, 1, 16, 77, 64, dtype=dtype, device=device).unbind(0)
attn = torch.rand(1, 1, 77, 77, dtype=dtype, device=device)
rc = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn)
assert rc.isnan().any().item() is False
def smoke_test_compile(device: str = "cpu") -> None:
supported_dtypes = [torch.float16, torch.float32, torch.float64]
@ -500,12 +489,10 @@ def main() -> None:
smoke_test_conv2d()
test_linalg()
test_numpy()
test_sdpa()
if is_cuda_system:
test_linalg("cuda")
test_cuda_gds_errors_captured()
test_sdpa("cuda")
if options.package == "all":
smoke_test_modules()

View File

@ -208,8 +208,6 @@ if [[ "$BUILD_ENVIRONMENT" == *xpu* ]]; then
source /opt/intel/oneapi/ccl/latest/env/vars.sh
# shellcheck disable=SC1091
source /opt/intel/oneapi/mpi/latest/env/vars.sh
# shellcheck disable=SC1091
source /opt/intel/oneapi/pti/latest/env/vars.sh
# Check XPU status before testing
timeout 30 xpu-smi discovery || true
fi
@ -344,18 +342,8 @@ test_python_smoke() {
}
test_python_smoke_b200() {
# Targeted smoke tests for B200 including FlashAttention CuTe coverage
install_flash_attn_cute
time python test/run_test.py \
--include \
test_matmul_cuda \
test_scaled_matmul_cuda \
inductor/test_fp8 \
nn/attention/test_fa4 \
nn/attention/test_open_registry \
inductor/test_flex_flash \
$PYTHON_TEST_EXTRA_OPTION \
--upload-artifacts-while-running
# Targeted smoke tests for B200 - staged approach to avoid too many failures
time python test/run_test.py --include test_matmul_cuda test_scaled_matmul_cuda inductor/test_fp8 $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
assert_git_not_dirty
}
@ -836,11 +824,6 @@ test_inductor_halide() {
assert_git_not_dirty
}
test_inductor_pallas() {
python test/run_test.py --include inductor/test_pallas.py --verbose
assert_git_not_dirty
}
test_inductor_triton_cpu() {
python test/run_test.py --include inductor/test_triton_cpu_backend.py inductor/test_torchinductor_strided_blocks.py --verbose
assert_git_not_dirty
@ -1741,8 +1724,6 @@ elif [[ "${TEST_CONFIG}" == *inductor_distributed* ]]; then
test_inductor_distributed
elif [[ "${TEST_CONFIG}" == *inductor-halide* ]]; then
test_inductor_halide
elif [[ "${TEST_CONFIG}" == *inductor-pallas* ]]; then
test_inductor_pallas
elif [[ "${TEST_CONFIG}" == *inductor-triton-cpu* ]]; then
test_inductor_triton_cpu
elif [[ "${TEST_CONFIG}" == *inductor-micro-benchmark* ]]; then

View File

@ -63,7 +63,7 @@ self-hosted-runner:
- linux.rocm.gpu.gfx942.1
- linux.rocm.gpu.gfx942.2
- linux.rocm.gpu.gfx942.4
- linux.rocm.gfx942.docker-cache
- rocm-docker
# Org wise AWS `mac2.metal` runners (2020 Mac mini hardware powered by Apple silicon M1 processors)
- macos-m1-stable
- macos-m1-14

View File

@ -1 +1 @@
07b6cbde121417a70e4dc871adb6d27030e0ce3f
ad5816f0eee1c873df1b7d371c69f1f811a89387

View File

@ -1 +1 @@
acccf86477759b2d3500f1ae1be065f7b1e409ec
ca2212438fdd8ce29b66999ed70ed54b0f9372d1

View File

@ -1 +1 @@
e4d25697f9dc5eedaf8f0a5bf085c62c5455a53a
c8b09f5f77d6bf6fb7ed7a9aa83e5d8156b3a5e9

22
.github/labeler.yml vendored
View File

@ -138,8 +138,7 @@
- test/test_matmul_cuda.py
- test/test_scaled_matmul_cuda.py
- test/inductor/test_fp8.py
- aten/src/ATen/native/cuda/*Blas.cpp
- aten/src/ATen/cuda/CUDA*Blas.*
- aten/src/ATen/native/cuda/Blas.cpp
- torch/**/*cublas*
- torch/_inductor/kernel/mm.py
- test/inductor/test_max_autotune.py
@ -149,8 +148,7 @@
- test/test_matmul_cuda.py
- test/test_scaled_matmul_cuda.py
- test/inductor/test_fp8.py
- aten/src/ATen/native/cuda/*Blas.cpp
- aten/src/ATen/cuda/CUDA*Blas.*
- aten/src/ATen/native/cuda/Blas.cpp
- torch/**/*cublas*
- torch/_inductor/kernel/mm.py
- test/inductor/test_max_autotune.py
@ -160,21 +158,7 @@
- test/test_matmul_cuda.py
- test/test_scaled_matmul_cuda.py
- test/inductor/test_fp8.py
- aten/src/ATen/native/cuda/*Blas.cpp
- aten/src/ATen/cuda/CUDA*Blas.*
- aten/src/ATen/native/cuda/Blas.cpp
- torch/_inductor/kernel/mm.py
- test/inductor/test_max_autotune.py
- third_party/fbgemm
"ciflow/mps":
- aten/src/ATen/mps/**
- aten/src/ATen/native/mps/**
- torch/_inductor/codegen/mps.py
- test/test_mps.py
- test/inductor/test_mps_basic.py
"ciflow/h100-symm-mem":
- torch/csrc/distributed/c10d/symm_mem/**
- torch/distributed/_symmetric_memory/**
- test/distributed/**/*mem*
- test/distributed/**/*mem*/**

View File

@ -10,4 +10,3 @@
pathFilter:
- 'torch/csrc/inductor/aoti_torch/c/*'
- 'torch/csrc/inductor/aoti_torch/generated/*'
- 'torch/csrc/stable/c/*'

View File

@ -1,11 +1,10 @@
# Delete old branches
import os
import re
from collections.abc import Callable
from datetime import datetime
from functools import lru_cache
from pathlib import Path
from typing import Any
from typing import Any, Callable
from github_utils import gh_fetch_json_dict, gh_graphql
from gitutils import GitRepo

View File

@ -8,11 +8,10 @@ import re
import subprocess
import sys
import warnings
from collections.abc import Callable
from enum import Enum
from functools import cache
from logging import info
from typing import Any, Optional
from typing import Any, Callable, Optional
from urllib.request import Request, urlopen
import yaml

View File

@ -11,8 +11,7 @@ import sys
import time
import urllib
import urllib.parse
from collections.abc import Callable
from typing import Any, Optional
from typing import Any, Callable, Optional
from urllib.request import Request, urlopen

View File

@ -3,9 +3,8 @@
import json
import os
import warnings
from collections.abc import Callable
from dataclasses import dataclass
from typing import Any, cast, Optional, Union
from typing import Any, Callable, cast, Optional, Union
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.request import Request, urlopen

View File

@ -4,10 +4,10 @@ import os
import re
import tempfile
from collections import defaultdict
from collections.abc import Callable, Iterator
from collections.abc import Iterator
from datetime import datetime
from functools import wraps
from typing import Any, cast, Optional, TypeVar, Union
from typing import Any, Callable, cast, Optional, TypeVar, Union
T = TypeVar("T")

View File

@ -34,9 +34,6 @@ python3 torch/utils/data/datapipes/gen_pyi.py
# Also check generated pyi files
find torch -name '*.pyi' -exec git add --force -- "{}" +
# Print current environment
python3 -m pip freeze
RC=0
# Run lintrunner on all files
if ! lintrunner --force-color --tee-json=lint.json ${ADDITIONAL_LINTRUNNER_ARGS} 2> /dev/null; then

View File

@ -17,12 +17,12 @@ import re
import time
import urllib.parse
from collections import defaultdict
from collections.abc import Callable, Iterable
from collections.abc import Iterable
from dataclasses import dataclass
from functools import cache
from pathlib import Path
from re import Pattern
from typing import Any, cast, NamedTuple, Optional
from typing import Any, Callable, cast, NamedTuple, Optional
from warnings import warn
import yaml

View File

@ -67,10 +67,9 @@ jobs:
pytorch-linux-jammy-py3.10-gcc11,
pytorch-linux-jammy-py3-gcc11-inductor-benchmarks,
pytorch-linux-jammy-py3.12-halide,
pytorch-linux-jammy-cuda12.8-py3.12-pallas,
pytorch-linux-jammy-xpu-n-1-py3,
pytorch-linux-noble-xpu-n-py3,
pytorch-linux-noble-xpu-n-py3-inductor-benchmarks,
pytorch-linux-jammy-xpu-n-py3,
pytorch-linux-jammy-xpu-n-py3-inductor-benchmarks,
pytorch-linux-jammy-py3-clang18-asan,
pytorch-linux-jammy-py3-clang12-onnx,
pytorch-linux-jammy-linter,
@ -119,22 +118,6 @@ jobs:
with:
docker-image: ${{ steps.build-docker-image.outputs.docker-image }}
- name: Generate output
if: contains(matrix.docker-image-name, 'rocm')
id: generate_output
run: |
docker_image_name="${{ matrix.docker-image-name }}"
docker_image_tag="${{ steps.build-docker-image.outputs.docker-image }}"
echo "${docker_image_name}=${docker_image_tag}" >> docker-builds-output-${docker_image_name}.txt
- name: Upload artifacts
uses: actions/upload-artifact@v4.4.0
if: contains(matrix.docker-image-name, 'rocm')
with:
name: docker-builds-artifacts-${{ matrix.docker-image-name }}
retention-days: 14
path: ./docker-builds-output-${{ matrix.docker-image-name }}.txt
- uses: nick-fields/retry@7152eba30c6575329ac0576536151aca5a72780e # v3.0.0
name: Push to https://ghcr.io/
id: push-to-ghcr-io

View File

@ -0,0 +1,55 @@
name: docker-cache-mi300
on:
# run every 6 hours
schedule:
- cron: 0 0,6,12,18 * * *
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name }}
cancel-in-progress: true
permissions:
id-token: write
contents: read
jobs:
docker-cache:
if: github.repository_owner == 'pytorch'
runs-on: rocm-docker
steps:
- name: Checkout PyTorch
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
with:
no-sudo: true
- name: configure aws credentials
id: aws_creds
uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722 # v4.1.0
with:
role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_s3_and_ecr_read_only
aws-region: us-east-1
role-duration-seconds: 18000
- name: Login to Amazon ECR
id: login-ecr
continue-on-error: false
uses: aws-actions/amazon-ecr-login@062b18b96a7aff071d4dc91bc00c4c1a7945b076 # v2.0.1
- name: Calculate docker image
id: calculate-docker-image
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
with:
docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3
push: false
- name: Pull docker image
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
with:
docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
- name: Tar and upload to S3 bucket
run: |
sudo docker save -o ~/docker-data/pytorch/pytorch_docker_image.tar ${{ steps.calculate-docker-image.outputs.docker-image }}
sudo rclone copy -P --s3-upload-concurrency 64 --s3-chunk-size 200M --s3-upload-cutoff 300M ~/docker-data/pytorch/pytorch_docker_image.tar oci:pytorchbucket0002/pytorch_docker_image --progress

View File

@ -1,105 +0,0 @@
name: docker-cache-rocm
on:
workflow_run:
workflows: [docker-builds]
branches: [main, release]
types:
- completed
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name }}
cancel-in-progress: true
permissions:
id-token: write
contents: read
actions: read
jobs:
download-docker-builds-artifacts:
if: github.repository_owner == 'pytorch'
name: download-docker-builds-artifacts
runs-on: ubuntu-latest
outputs:
pytorch-linux-jammy-rocm-n-py3: ${{ steps.process-artifacts.outputs.pytorch-linux-jammy-rocm-n-py3 }}
pytorch-linux-noble-rocm-n-py3: ${{ steps.process-artifacts.outputs.pytorch-linux-noble-rocm-n-py3 }}
pytorch-linux-jammy-rocm-n-py3-benchmarks: ${{ steps.process-artifacts.outputs.pytorch-linux-jammy-rocm-n-py3-benchmarks }}
steps:
- name: Download artifacts
uses: actions/download-artifact@v4.1.7
with:
run-id: ${{ github.event.workflow_run.id }}
path: ./docker-builds-artifacts
merge-multiple: true
github-token: ${{ secrets.GITHUB_TOKEN }}
- name: Process artifacts
id: process-artifacts
run: |
ls -R ./docker-builds-artifacts
cat ./docker-builds-artifacts/*txt >> "${GITHUB_OUTPUT}"
cat "${GITHUB_OUTPUT}"
docker-cache:
if: github.repository_owner == 'pytorch'
needs: download-docker-builds-artifacts
strategy:
fail-fast: false
matrix:
runner: [linux.rocm.gfx942.docker-cache]
docker-image: [
"${{ needs.download-docker-builds-artifacts.outputs.pytorch-linux-jammy-rocm-n-py3 }}",
"${{ needs.download-docker-builds-artifacts.outputs.pytorch-linux-noble-rocm-n-py3 }}",
"${{ needs.download-docker-builds-artifacts.outputs.pytorch-linux-jammy-rocm-n-py3-benchmarks }}"
]
runs-on: "${{ matrix.runner }}"
steps:
- name: debug
run: |
JSON_STRINGIFIED="${{ toJSON(needs.download-docker-builds-artifacts.outputs) }}"
echo "Outputs of download-docker-builds-artifacts job: ${JSON_STRINGIFIED}"
- name: configure aws credentials
id: aws_creds
uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722 # v4.1.0
with:
role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_s3_and_ecr_read_only
aws-region: us-east-1
role-duration-seconds: 18000
- name: Login to Amazon ECR
id: login-ecr
continue-on-error: false
uses: aws-actions/amazon-ecr-login@062b18b96a7aff071d4dc91bc00c4c1a7945b076 # v2.0.1
- name: Generate ghrc.io tag
id: ghcr-io-tag
run: |
ecr_image="${{ matrix.docker-image }}"
ghcr_image="ghcr.io/pytorch/ci-image:${ecr_image##*:}"
echo "ghcr_image=${ghcr_image}" >> "$GITHUB_OUTPUT"
- name: Pull docker image
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
with:
docker-image: ${{ steps.ghcr-io-tag.outputs.ghcr_image }}
- name: Save as tarball
run: |
docker_image_tag=${{ matrix.docker-image }}
docker_image_tag="${docker_image_tag#*:}" # Remove everything before and including first ":"
docker_image_tag="${docker_image_tag%-*}" # Remove everything after and including last "-"
ref_name=${{ github.event.workflow_run.head_branch }}
if [[ $ref_name =~ "release/" ]]; then
ref_suffix="release"
elif [[ $ref_name == "main" ]]; then
ref_suffix="main"
else
echo "Unexpected branch in ref_name: ${ref_name}" && exit 1
fi
docker tag ${{ steps.ghcr-io-tag.outputs.ghcr_image }} ${{ matrix.docker-image }}
# mv is atomic operation, so we use intermediate tar.tmp file to prevent read-write contention
docker save -o ~/pytorch-data/docker/${docker_image_tag}.tar.tmp ${{ matrix.docker-image }}
mv ~/pytorch-data/docker/${docker_image_tag}.tar.tmp ~/pytorch-data/docker/${docker_image_tag}_${ref_suffix}.tar

View File

@ -37,6 +37,7 @@ jobs:
needs: get-label-type
with:
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
runner: "linux.c7i.12xlarge"
build-environment: linux-jammy-cuda12.8-py3.10-gcc11-sm90-dist
docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc11
cuda-arch-list: '9.0'

View File

@ -83,8 +83,8 @@ jobs:
needs: get-label-type
with:
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
build-environment: linux-noble-xpu-n-py3.10
docker-image-name: ci-image:pytorch-linux-noble-xpu-n-py3-inductor-benchmarks
build-environment: linux-jammy-xpu-n-py3.10
docker-image-name: ci-image:pytorch-linux-jammy-xpu-n-py3-inductor-benchmarks
runner: linux.c7i.12xlarge
test-matrix: |
{ include: [
@ -117,7 +117,7 @@ jobs:
uses: ./.github/workflows/_xpu-test.yml
needs: xpu-n-py3_10-inductor-benchmark-build
with:
build-environment: linux-noble-xpu-n-py3.10
build-environment: linux-jammy-xpu-n-py3.10
dashboard-tag: training-true-inference-true-default-true-dynamic-true-cudagraphs-false-cppwrapper-true-aotinductor-true-freezing_cudagraphs-false-cudagraphs_low_precision-false
docker-image: ${{ needs.xpu-n-py3_10-inductor-benchmark-build.outputs.docker-image }}
test-matrix: ${{ needs.xpu-n-py3_10-inductor-benchmark-build.outputs.test-matrix }}
@ -137,7 +137,7 @@ jobs:
uses: ./.github/workflows/_xpu-test.yml
needs: xpu-n-py3_10-inductor-benchmark-build
with:
build-environment: linux-noble-xpu-n-py3.10
build-environment: linux-jammy-xpu-n-py3.10
dashboard-tag: training-${{ inputs.training }}-inference-${{ inputs.inference }}-default-${{ inputs.default }}-dynamic-${{ inputs.dynamic }}-cudagraphs-${{ inputs.cudagraphs }}-cppwrapper-${{ inputs.cppwrapper }}-aotinductor-${{ inputs.aotinductor }}-maxautotune-${{ inputs.maxautotune }}-freezing_cudagraphs-${{ inputs.freezing_cudagraphs }}-cudagraphs_low_precision-${{ inputs.cudagraphs }}
docker-image: ${{ needs.xpu-n-py3_10-inductor-benchmark-build.outputs.docker-image }}
test-matrix: ${{ needs.xpu-n-py3_10-inductor-benchmark-build.outputs.test-matrix }}

View File

@ -1,4 +1,4 @@
name: inductor-rocm-mi200
name: inductor-rocm
on:
schedule:

View File

@ -81,32 +81,6 @@ jobs:
test-matrix: ${{ needs.inductor-halide-build.outputs.test-matrix }}
secrets: inherit
inductor-pallas-build:
name: inductor-pallas-build
uses: ./.github/workflows/_linux-build.yml
needs: get-label-type
with:
build-environment: linux-jammy-cuda12.8-py3.12-gcc11
docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-py3.12-pallas
cuda-arch-list: '8.9'
runner: linux.8xlarge.memory
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
test-matrix: |
{ include: [
{ config: "inductor-pallas", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.12xlarge.nvidia.gpu" },
]}
secrets: inherit
inductor-pallas-test:
name: inductor-pallas-test
uses: ./.github/workflows/_linux-test.yml
needs: inductor-pallas-build
with:
build-environment: linux-jammy-py3.12-gcc11
docker-image: ${{ needs.inductor-pallas-build.outputs.docker-image }}
test-matrix: ${{ needs.inductor-pallas-build.outputs.test-matrix }}
secrets: inherit
inductor-triton-cpu-build:
name: inductor-triton-cpu-build
uses: ./.github/workflows/_linux-build.yml

View File

@ -5,11 +5,9 @@ on:
- cron: 0 0 * * *
push:
tags:
# NOTE: Doc build pipelines should only get triggered on:
# Major or minor release candidates builds
- v[0-9]+.[0-9]+.0+-rc[0-9]+
# Final RC for major, minor and patch releases
- v[0-9]+.[0-9]+.[0-9]+
# NOTE: Doc build pipelines should only get triggered on release candidate builds
# Release candidate tags look like: v1.11.0-rc1
- v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
- ciflow/nightly/*
workflow_dispatch:

View File

@ -342,16 +342,16 @@ jobs:
test-matrix: ${{ needs.linux-jammy-cuda12_8-py3_10-gcc9-inductor-build.outputs.test-matrix }}
secrets: inherit
linux-noble-xpu-n-py3_10-build:
name: linux-noble-xpu-n-py3.10
linux-jammy-xpu-n-py3_10-build:
name: linux-jammy-xpu-n-py3.10
uses: ./.github/workflows/_linux-build.yml
needs: get-label-type
with:
# This should sync with the build in xpu.yml but xpu uses a larger runner
# sync-tag: linux-xpu-n-build
runner_prefix: ${{ needs.get-label-type.outputs.label-type }}
build-environment: linux-noble-xpu-n-py3.10
docker-image-name: ci-image:pytorch-linux-noble-xpu-n-py3
build-environment: linux-jammy-xpu-n-py3.10
docker-image-name: ci-image:pytorch-linux-jammy-xpu-n-py3
test-matrix: |
{ include: [
{ config: "default", shard: 1, num_shards: 4, runner: "linux.idc.xpu" },

View File

@ -1,4 +1,4 @@
name: rocm-mi200
name: rocm
on:
push:

View File

@ -5,9 +5,7 @@
# Flow:
# 1. Builds PyTorch with CUDA 12.8+ and sm100 architecture for B200
# 2. Runs smoke tests on linux.dgx.b200 runner
# 3. Tests executed are defined in .ci/pytorch/test.sh -> test_python_smoke_b200() function
# - Includes matmul, scaled_matmul, FP8, and FlashAttention CuTe tests
# - FlashAttention CuTe DSL is installed as part of test execution
# 3. Tests executed are defined in .ci/pytorch/test.sh -> test_python_smoke() function
#
# Triggered by:
# - Pull requests modifying this workflow file

View File

@ -41,6 +41,7 @@ jobs:
needs: get-label-type
with:
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
runner: linux.12xlarge.memory
build-environment: linux-jammy-cuda12.8-py3.10-gcc11-sm90
docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc11
cuda-arch-list: '9.0'

View File

@ -1,83 +0,0 @@
name: trunk-rocm-mi300
on:
push:
branches:
- main
- release/*
workflow_dispatch:
schedule:
- cron: 29 8 * * * # about 1:29am PDT
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}
cancel-in-progress: true
permissions:
id-token: write
contents: read
jobs:
llm-td:
if: github.repository_owner == 'pytorch'
name: before-test
uses: ./.github/workflows/llm_td_retrieval.yml
permissions:
id-token: write
contents: read
target-determination:
name: before-test
uses: ./.github/workflows/target_determination.yml
needs: llm-td
permissions:
id-token: write
contents: read
get-label-type:
name: get-label-type
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }}
with:
triggering_actor: ${{ github.triggering_actor }}
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
curr_branch: ${{ github.head_ref || github.ref_name }}
curr_ref_type: ${{ github.ref_type }}
linux-jammy-rocm-py3_10-build:
name: linux-jammy-rocm-py3.10
uses: ./.github/workflows/_linux-build.yml
needs: get-label-type
with:
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
build-environment: linux-jammy-rocm-py3.10
docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3
sync-tag: rocm-build
test-matrix: |
{ include: [
{ config: "default", shard: 1, num_shards: 6, runner: "linux.rocm.gpu.gfx942.1.b" },
{ config: "default", shard: 2, num_shards: 6, runner: "linux.rocm.gpu.gfx942.1.b" },
{ config: "default", shard: 3, num_shards: 6, runner: "linux.rocm.gpu.gfx942.1.b" },
{ config: "default", shard: 4, num_shards: 6, runner: "linux.rocm.gpu.gfx942.1.b" },
{ config: "default", shard: 5, num_shards: 6, runner: "linux.rocm.gpu.gfx942.1.b" },
{ config: "default", shard: 6, num_shards: 6, runner: "linux.rocm.gpu.gfx942.1.b" },
{ config: "distributed", shard: 1, num_shards: 3, runner: "linux.rocm.gpu.gfx942.4.b" },
{ config: "distributed", shard: 2, num_shards: 3, runner: "linux.rocm.gpu.gfx942.4.b" },
{ config: "distributed", shard: 3, num_shards: 3, runner: "linux.rocm.gpu.gfx942.4.b" },
]}
secrets: inherit
linux-jammy-rocm-py3_10-test:
permissions:
id-token: write
contents: read
name: linux-jammy-rocm-py3.10
uses: ./.github/workflows/_rocm-test.yml
needs:
- linux-jammy-rocm-py3_10-build
- target-determination
with:
build-environment: linux-jammy-rocm-py3.10
docker-image: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.test-matrix }}
secrets: inherit

View File

@ -5,7 +5,6 @@ on:
workflows:
- pull
- trunk
- trunk-rocm-mi300
- periodic
- periodic-rocm-mi200
- periodic-rocm-mi300

View File

@ -47,15 +47,15 @@ jobs:
]}
secrets: inherit
linux-noble-xpu-n-py3_10-build:
name: linux-noble-xpu-n-py3.10
linux-jammy-xpu-n-py3_10-build:
name: linux-jammy-xpu-n-py3.10
uses: ./.github/workflows/_linux-build.yml
needs: get-label-type
with:
sync-tag: linux-xpu-n-build
runner_prefix: ${{ needs.get-label-type.outputs.label-type }}
build-environment: linux-noble-xpu-n-py3.10
docker-image-name: ci-image:pytorch-linux-noble-xpu-n-py3
build-environment: linux-jammy-xpu-n-py3.10
docker-image-name: ci-image:pytorch-linux-jammy-xpu-n-py3
runner: linux.c7i.12xlarge
test-matrix: |
{ include: [
@ -74,17 +74,17 @@ jobs:
]}
secrets: inherit
linux-noble-xpu-n-py3_10-test:
name: linux-noble-xpu-n-py3.10
linux-jammy-xpu-n-py3_10-test:
name: linux-jammy-xpu-n-py3.10
uses: ./.github/workflows/_xpu-test.yml
needs: linux-noble-xpu-n-py3_10-build
needs: linux-jammy-xpu-n-py3_10-build
permissions:
id-token: write
contents: read
with:
build-environment: linux-noble-xpu-n-py3.10
docker-image: ${{ needs.linux-noble-xpu-n-py3_10-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-noble-xpu-n-py3_10-build.outputs.test-matrix }}
build-environment: linux-jammy-xpu-n-py3.10
docker-image: ${{ needs.linux-jammy-xpu-n-py3_10-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-jammy-xpu-n-py3_10-build.outputs.test-matrix }}
secrets: inherit
windows-xpu-n-1-build:

View File

@ -186,8 +186,6 @@ include_patterns = [
'aten/src/ATen/native/nested/cuda/*.h',
'aten/src/ATen/native/nested/*.cpp',
'aten/src/ATen/native/nested/*.h',
'aten/src/ATen/xpu/**/*.h',
'aten/src/ATen/xpu/**/*.cpp',
'c10/**/*.cpp',
'c10/**/*.h',
'torch/*.h',
@ -1404,7 +1402,7 @@ init_command = [
'--dry-run={{DRYRUN}}',
'usort==1.0.8.post1',
'isort==6.0.1',
'ruff==0.14.4', # sync with RUFF
'ruff==0.13.1', # sync with RUFF
]
is_formatter = true
@ -1539,7 +1537,7 @@ init_command = [
'python3',
'tools/linter/adapters/pip_init.py',
'--dry-run={{DRYRUN}}',
'ruff==0.14.4', # sync with PYFMT
'ruff==0.13.1', # sync with PYFMT
]
is_formatter = true

View File

@ -736,44 +736,6 @@ if(NOT DEFINED USE_BLAS)
set(USE_BLAS ON)
endif()
# Prioritized Text Linker Optimization
if(USE_PRIORITIZED_TEXT_FOR_LD)
set(LINKER_SCRIPT_FILE_IN "${CMAKE_SOURCE_DIR}/cmake/prioritized_text.txt")
set(LINKER_SCRIPT_FILE_OUT "${CMAKE_SOURCE_DIR}/cmake/linker_script.ld")
execute_process(
COMMAND ${Python_EXECUTABLE}
${CMAKE_SOURCE_DIR}/tools/setup_helpers/generate_linker_script.py
--filein "${LINKER_SCRIPT_FILE_IN}"
--fout "${LINKER_SCRIPT_FILE_OUT}"
RESULT_VARIABLE _gen_result
OUTPUT_VARIABLE _gen_output
ERROR_VARIABLE _gen_error
)
if(NOT _gen_result EQUAL 0)
message(FATAL_ERROR
"Failed to generate linker script:\n${_gen_output}\n${_gen_error}")
endif()
append_cxx_flag_if_supported("-ffunction-sections" CMAKE_CXX_FLAGS)
append_cxx_flag_if_supported("-fdata-sections" CMAKE_CXX_FLAGS)
append_c_flag_if_supported("-ffunction-sections" CMAKE_C_FLAGS)
append_c_flag_if_supported("-fdata-sections" CMAKE_C_FLAGS)
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -T${LINKER_SCRIPT_FILE_OUT}")
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -T${LINKER_SCRIPT_FILE_OUT}")
else()
if(LINUX AND CPU_AARCH64)
message(WARNING [[
It is strongly recommend to enable linker script optimization for all AArch64 Linux builds.
To do so please export USE_PRIORITIZED_TEXT_FOR_LD=1
]])
endif()
endif()
# Build libtorch mobile library, which contains ATen/TH ops and native support
# for TorchScript model, but doesn't contain not-yet-unified caffe2 ops;
if(INTERN_BUILD_MOBILE)
@ -1440,6 +1402,9 @@ if(BUILD_JNI)
add_subdirectory(android/pytorch_android)
endif()
include(cmake/Summary.cmake)
caffe2_print_configuration_summary()
# Parse custom debug info
if(DEFINED USE_CUSTOM_DEBINFO)
string(REPLACE ";" " " SOURCE_FILES "${USE_CUSTOM_DEBINFO}")
@ -1479,5 +1444,56 @@ if(BUILD_BUNDLE_PTXAS AND USE_CUDA)
DESTINATION "${CMAKE_INSTALL_BINDIR}")
endif()
include(cmake/Summary.cmake)
caffe2_print_configuration_summary()
if(USE_PRIORITIZED_TEXT_FOR_LD)
add_compile_options(
$<$<COMPILE_LANGUAGE:C,CXX>:-ffunction-sections>
$<$<COMPILE_LANGUAGE:C,CXX>:-fdata-sections>
)
set(LINKER_SCRIPT_FILE_OUT "${CMAKE_SOURCE_DIR}/cmake/linker_script.ld")
set(LINKER_SCRIPT_FILE_IN "${CMAKE_SOURCE_DIR}/cmake/prioritized_text.txt")
add_custom_command(
OUTPUT "${LINKER_SCRIPT_FILE_OUT}"
COMMAND ${Python_EXECUTABLE} ${CMAKE_SOURCE_DIR}/tools/setup_helpers/generate_linker_script.py --filein "${LINKER_SCRIPT_FILE_IN}" --fout "${LINKER_SCRIPT_FILE_OUT}"
DEPENDS ${CMAKE_SOURCE_DIR}/tools/setup_helpers/generate_linker_script.py "${LINKER_SCRIPT_FILE_IN}"
COMMENT "Generating prioritized text linker files"
VERBATIM
)
add_custom_target(generate_linker_script DEPENDS "${LINKER_SCRIPT_FILE_OUT}")
if(BUILD_PYTHON)
set(LINKER_OPT_TARGETS torch_python)
endif()
if(NOT BUILD_LIBTORCHLESS)
list(APPEND LINKER_OPT_TARGETS torch_cpu c10)
if(USE_CUDA)
list(APPEND LINKER_OPT_TARGETS torch_cuda c10_cuda)
endif()
if(USE_XPU)
list(APPEND LINKER_OPT_TARGETS torch_xpu c10_xpu)
endif()
if(USE_ROCM)
list(APPEND LINKER_OPT_TARGETS torch_hip c10_hip)
endif()
endif()
foreach(tgt IN LISTS LINKER_OPT_TARGETS)
if(TARGET ${tgt})
add_dependencies("${tgt}" generate_linker_script)
target_link_options_if_supported(${tgt} "-T,${LINKER_SCRIPT_FILE_OUT}")
set_property(TARGET ${tgt} APPEND PROPERTY LINK_DEPENDS "${LINKER_SCRIPT_FILE_OUT}")
else()
message(WARNING "Requested target '${tgt}' for linker script optimization was not found.")
endif()
endforeach()
else()
if(LINUX AND CPU_AARCH64)
message(WARNING [[
It is strongly recommend to enable linker script optimization for all AArch64 Linux builds.
To do so please export USE_PRIORITIZED_TEXT_FOR_LD=1
]])
endif()
endif()

View File

@ -210,12 +210,8 @@ torch/backends/cudnn/ @eqy @syed-ahmed @Aidyn-A
/test/inductor/test_flex_attention.py @drisspg
/test/inductor/test_flex_decoding.py @drisspg
# Low Precision & Grouped GEMMs
# Low Precision GEMMs
/aten/src/ATen/native/cuda/Blas.cpp @drisspg @slayton58
/aten/src/ATen/native/cuda/GroupedBlas.cpp @drisspg @slayton58
/aten/src/ATen/native/cuda/ScaledBlas.cpp @drisspg @slayton58
/aten/src/ATen/cuda/CUDABlas.cpp @drisspg @slayton58
/aten/src/ATen/cuda/CUDABlas.h @drisspg @slayton58
/aten/src/ATen/cuda/CUDAScaledBlas.cpp @drisspg @slayton58
/aten/src/ATen/cuda/CUDAScaledBlas.h @drisspg @slayton58
/test/test_scaled_matmul_cuda.py @drisspg @slayton58

View File

@ -37,7 +37,7 @@ Copyright (c) 2024 Tri Dao.
All rights reserved.
All contributions by Arm:
Copyright (c) 2021, 2023-2025 Arm Limited and/or its affiliates
Copyright (c) 2021, 2023-2024 Arm Limited and/or its affiliates
All contributions from Caffe:
Copyright(c) 2013, 2014, 2015, the respective contributors

View File

@ -18,8 +18,6 @@ Please report security issues using https://github.com/pytorch/pytorch/security/
All reports submitted through the security advisories mechanism would **either be made public or dismissed by the team within 90 days of the submission**. If advisory has been closed on the grounds that it is not a security issue, please do not hesitate to create an [new issue](https://github.com/pytorch/pytorch/issues/new?template=bug-report.yml) as it is still likely a valid issue within the framework.
**Note on crashes and out of bounds access**: PyTorch is a computational framework that performs operations on behalf of the caller. Like many low-level libraries, PyTorch generally does not validate all inputs to every function—the responsibility for providing valid arguments lies with the calling code. While crashes and out of bounds memory access should be reported as bugs, they are generally not considered security vulnerabilities in PyTorch's threat model.
Please refer to the following page for our responsible disclosure policy, reward guidelines, and those things that should not be reported:
https://www.facebook.com/whitehat

View File

@ -226,8 +226,8 @@ template <
typename B = HostBlock<S>>
struct CachingHostAllocatorImpl {
virtual ~CachingHostAllocatorImpl() {
if (active_) {
active_ = false;
active_ = false;
if (pinned_use_background_threads()) {
getBackgroundThreadPool()->waitWorkComplete();
}
}
@ -260,7 +260,6 @@ struct CachingHostAllocatorImpl {
if (pinned_use_background_threads()) {
// Launch the background thread and process events in a loop.
static bool background_thread_flag [[maybe_unused]] = [this] {
active_ = true;
getBackgroundThreadPool()->run([&]() {
while (active_) {
process_events();
@ -684,9 +683,9 @@ struct CachingHostAllocatorImpl {
alignas(hardware_destructive_interference_size) std::mutex events_mutex_;
std::deque<std::pair<E, B*>> events_; // event queue paired with block
// Indicates whether the event-processing thread pool is active.
// Indicates whether the object is active.
// Set to false in the destructor to signal background threads to stop.
std::atomic<bool> active_{false};
std::atomic<bool> active_{true};
protected:
alignas(hardware_destructive_interference_size) HostStatsStaged stats_;
};

View File

@ -18,8 +18,6 @@
#include <unordered_set>
#include <utility>
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wswitch-default")
namespace torch {
class TORCH_API CustomClassHolder : public c10::intrusive_ptr_target {};
namespace jit {
@ -1632,6 +1630,4 @@ struct TORCH_API WeakOrStrongTypePtr {
} // namespace c10
C10_DIAGNOSTIC_POP()
#include <ATen/core/ivalue_inl.h> // IWYU pragma: keep

View File

@ -29,8 +29,6 @@
#include <c10/util/intrusive_ptr.h>
#include <c10/util/irange.h>
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wswitch-default")
namespace torch {
namespace jit {
struct Function;
@ -2569,5 +2567,3 @@ TypePtr IValue::type() const {
}
} // namespace c10
C10_DIAGNOSTIC_POP()

View File

@ -11,8 +11,6 @@
#include <sleef.h>
#endif
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wswitch-default")
// Sleef offers vectorized versions of some transcedentals
// such as sin, cos, tan etc..
// However for now opting for STL, since we are not building
@ -652,5 +650,3 @@ inline Vectorized<float> Vectorized<float>::erf() const {
} // namespace CPU_CAPABILITY
} // namespace at::vec
C10_DIAGNOSTIC_POP()

View File

@ -3,7 +3,6 @@
#include <cstdint>
#include <map>
#include <shared_mutex>
#include <cuda_runtime_api.h>
#include <cusparse.h>
@ -89,13 +88,8 @@ TORCH_CUDA_CPP_API cublasHandle_t getCurrentCUDABlasHandle();
TORCH_CUDA_CPP_API cublasLtHandle_t getCurrentCUDABlasLtHandle();
TORCH_CUDA_CPP_API void clearCublasWorkspaces();
struct WorkspaceMapWithMutex {
std::map<std::tuple<void*, void*>, at::DataPtr> map;
std::shared_mutex mutex;
};
TORCH_CUDA_CPP_API WorkspaceMapWithMutex& cublas_handle_stream_to_workspace();
TORCH_CUDA_CPP_API WorkspaceMapWithMutex& cublaslt_handle_stream_to_workspace();
TORCH_CUDA_CPP_API std::map<std::tuple<void *, void *>, at::DataPtr>& cublas_handle_stream_to_workspace();
TORCH_CUDA_CPP_API std::map<std::tuple<void *, void *>, at::DataPtr>& cublaslt_handle_stream_to_workspace();
TORCH_CUDA_CPP_API size_t getChosenWorkspaceSize();
TORCH_CUDA_CPP_API size_t getCUDABlasLtWorkspaceSize();
TORCH_CUDA_CPP_API void* getCUDABlasLtWorkspace();

View File

@ -99,7 +99,7 @@ void destroyCublasHandle(cublasHandle_t handle) {
// - Comments of @soumith copied from cuDNN handle pool implementation
#ifdef NO_CUDNN_DESTROY_HANDLE
#else
cublasDestroy(handle);
cublasDestroy(handle);
#endif
}
@ -107,27 +107,19 @@ using CuBlasPoolType = DeviceThreadHandlePool<cublasHandle_t, createCublasHandle
} // namespace
WorkspaceMapWithMutex& cublas_handle_stream_to_workspace() {
static auto& instance = *new WorkspaceMapWithMutex;
std::map<std::tuple<void *, void *>, at::DataPtr>& cublas_handle_stream_to_workspace() {
static auto& instance = *new std::map<std::tuple<void *, void *>, at::DataPtr>;
return instance;
}
WorkspaceMapWithMutex& cublaslt_handle_stream_to_workspace() {
static auto& instance = *new WorkspaceMapWithMutex;
std::map<std::tuple<void *, void *>, at::DataPtr>& cublaslt_handle_stream_to_workspace() {
static auto& instance = *new std::map<std::tuple<void *, void *>, at::DataPtr>;
return instance;
}
void clearCublasWorkspaces() {
{
auto& workspace = cublas_handle_stream_to_workspace();
std::unique_lock<std::shared_mutex> lock(workspace.mutex);
workspace.map.clear();
}
{
auto& workspace = cublaslt_handle_stream_to_workspace();
std::unique_lock<std::shared_mutex> lock(workspace.mutex);
workspace.map.clear();
}
cublas_handle_stream_to_workspace().clear();
cublaslt_handle_stream_to_workspace().clear();
}
size_t parseChosenWorkspaceSize() {
@ -249,10 +241,8 @@ void* getCUDABlasLtWorkspace() {
auto stream = c10::cuda::getCurrentCUDAStream();
cudaStream_t _stream = stream;
auto key = std::make_tuple(static_cast<void *>(handle), static_cast<void *>(_stream));
auto& workspace = at::cuda::cublas_handle_stream_to_workspace();
std::shared_lock<std::shared_mutex> lock(workspace.mutex);
auto workspace_it = workspace.map.find(key);
TORCH_INTERNAL_ASSERT(workspace_it != workspace.map.end());
auto workspace_it = at::cuda::cublas_handle_stream_to_workspace().find(key);
TORCH_INTERNAL_ASSERT(workspace_it != at::cuda::cublas_handle_stream_to_workspace().end());
return workspace_it->second.mutable_get();
}
#endif
@ -260,34 +250,11 @@ void* getCUDABlasLtWorkspace() {
auto stream = c10::cuda::getCurrentCUDAStream();
cudaStream_t _stream = stream;
auto key = std::make_tuple(static_cast<void *>(handle), static_cast<void *>(_stream));
auto& workspace = cublaslt_handle_stream_to_workspace();
// Fast path: check if workspace already exists
{
std::shared_lock<std::shared_mutex> lock(workspace.mutex);
auto workspace_it = workspace.map.find(key);
if (workspace_it != workspace.map.end()) {
return workspace_it->second.mutable_get();
}
}
// Slow path: allocate workspace outside the lock
auto new_workspace = getNewCUDABlasLtWorkspace();
// Insert with lock (double-check in case another thread inserted while we
// were allocating)
{
std::unique_lock<std::shared_mutex> lock(workspace.mutex);
auto workspace_it = workspace.map.find(key);
if (workspace_it == workspace.map.end()) {
workspace_it =
workspace.map.emplace(key, std::move(new_workspace)).first;
}
// else: another thread inserted it, our new_workspace will be automatically
// freed
return workspace_it->second.mutable_get();
auto workspace_it = cublaslt_handle_stream_to_workspace().find(key);
if (workspace_it == cublaslt_handle_stream_to_workspace().end()) {
workspace_it = cublaslt_handle_stream_to_workspace().insert(workspace_it, {key, getNewCUDABlasLtWorkspace()});
}
return workspace_it->second.mutable_get();
}
cublasHandle_t getCurrentCUDABlasHandle() {
@ -333,39 +300,11 @@ cublasHandle_t getCurrentCUDABlasHandle() {
// all the memory and cublas's cudaMallocAsync will return OOM
cudaStream_t _stream = stream;
auto key = std::make_tuple(static_cast<void *>(handle), static_cast<void *>(_stream));
auto& workspace = cublas_handle_stream_to_workspace();
size_t workspace_size = getChosenWorkspaceSize();
// Fast path: check if workspace already exists
{
std::shared_lock<std::shared_mutex> lock(workspace.mutex);
auto workspace_it = workspace.map.find(key);
if (workspace_it != workspace.map.end()) {
TORCH_CUDABLAS_CHECK(cublasSetWorkspace(
handle, workspace_it->second.get(), workspace_size));
return handle;
}
}
// Slow path: allocate workspace outside the lock
auto new_workspace = getNewWorkspace();
// Insert with lock (double-check in case another thread inserted while we
// were allocating)
{
std::unique_lock<std::shared_mutex> lock(workspace.mutex);
auto workspace_it = workspace.map.find(key);
if (workspace_it == workspace.map.end()) {
workspace_it =
workspace.map.emplace(key, std::move(new_workspace)).first;
}
// else: another thread inserted it, our new_workspace will be automatically
// freed
TORCH_CUDABLAS_CHECK(
cublasSetWorkspace(handle, workspace_it->second.get(), workspace_size));
auto workspace_it = cublas_handle_stream_to_workspace().find(key);
if (workspace_it == cublas_handle_stream_to_workspace().end()) {
workspace_it = cublas_handle_stream_to_workspace().insert(workspace_it, {key, getNewWorkspace()});
}
TORCH_CUDABLAS_CHECK(cublasSetWorkspace(handle, workspace_it->second.get(), getChosenWorkspaceSize()));
#if !defined(USE_ROCM)
// On CUDA >= 11, and architecture >= Ampere, cuBLAS can use TF32 to speedup
// FP32 data type calculations based on the value of the allow_tf32 flag.

View File

@ -55,6 +55,14 @@ struct numeric_limits<int8_t> {
static inline __host__ __device__ int8_t upper_bound() { return INT8_MAX; }
};
template <>
struct numeric_limits<uint16_t> {
static inline __host__ __device__ uint16_t lowest() { return 0; }
static inline __host__ __device__ uint16_t max() { return UINT16_MAX; }
static inline __host__ __device__ uint16_t lower_bound() { return 0; }
static inline __host__ __device__ uint16_t upper_bound() { return UINT16_MAX; }
};
template <>
struct numeric_limits<int16_t> {
static inline __host__ __device__ int16_t lowest() { return INT16_MIN; }
@ -63,6 +71,14 @@ struct numeric_limits<int16_t> {
static inline __host__ __device__ int16_t upper_bound() { return INT16_MAX; }
};
template <>
struct numeric_limits<uint32_t> {
static inline __host__ __device__ uint32_t lowest() { return 0; }
static inline __host__ __device__ uint32_t max() { return UINT32_MAX; }
static inline __host__ __device__ uint32_t lower_bound() { return 0; }
static inline __host__ __device__ uint32_t upper_bound() { return UINT32_MAX; }
};
template <>
struct numeric_limits<int32_t> {
static inline __host__ __device__ int32_t lowest() { return INT32_MIN; }
@ -71,6 +87,21 @@ struct numeric_limits<int32_t> {
static inline __host__ __device__ int32_t upper_bound() { return INT32_MAX; }
};
template <>
struct numeric_limits<uint64_t> {
#ifdef _MSC_VER
static inline __host__ __device__ uint64_t lowest() { return 0; }
static inline __host__ __device__ uint64_t max() { return _UI64_MAX; }
static inline __host__ __device__ uint64_t lower_bound() { return 0; }
static inline __host__ __device__ uint64_t upper_bound() { return _UI64_MAX; }
#else
static inline __host__ __device__ uint64_t lowest() { return 0; }
static inline __host__ __device__ uint64_t max() { return UINT64_MAX; }
static inline __host__ __device__ uint64_t lower_bound() { return 0; }
static inline __host__ __device__ uint64_t upper_bound() { return UINT64_MAX; }
#endif
};
template <>
struct numeric_limits<int64_t> {
#ifdef _MSC_VER

View File

@ -382,14 +382,6 @@ fourOutputs solve_ex_batch_rule(
A_ = ensure_has_bdim(A_, A_bdim.has_value(), batch_size);
B_ = ensure_has_bdim(B_, B_bdim.has_value(), batch_size);
// NOTE [ solve_ex Batch Rule Contiguity ]
// A determines whether or not linalg_solve takes an optimized path. We need the check on A_ to match the one run on
// A as BatchedTensor since it might have been saved by autograd (specifically by the jvp) and the autograd behavior
// differs based on whether or not the optimized path was taken
const auto batched_A_was_contiguous = A_bdim.has_value() ? at::select(A, *A_bdim, 0).is_contiguous() : A.is_contiguous();
if (batched_A_was_contiguous && !A.is_complex()) {
A_ = A_.contiguous();
}
auto res = _linalg_solve_ex(A_, B_, left, check_errors);
return std::make_tuple(std::move(std::get<0>(res)), 0, std::move(std::get<1>(res)), 0, std::move(std::get<2>(res)), 0, std::move(std::get<3>(res)), 0);
}

View File

@ -157,8 +157,6 @@ constexpr DispatchKeySet kKeysToPropagateToWrapper({
DispatchKey::Negative,
DispatchKey::Conjugate,
DispatchKey::XLA,
DispatchKey::XPU,
DispatchKey::HPU,
DispatchKey::CUDA,
DispatchKey::CPU,
DispatchKey::PrivateUse1,

View File

@ -440,7 +440,7 @@ bool MPSHeapAllocatorImpl::release_cached_buffers() {
// we need to release the lock temporarily as synchronizing may cause deadlock with completion handlers.
m_mutex.unlock();
auto stream = getDefaultMPSStream();
dispatch_sync_with_rethrow(stream->queue(), ^() {
dispatch_sync(stream->queue(), ^() {
stream->synchronize(SyncType::COMMIT_AND_WAIT);
});
m_mutex.lock();

View File

@ -110,9 +110,6 @@ class TORCH_API MPSStream {
return _stream;
}
MTLBuffer_t getErrorBuffer();
void checkLastError();
private:
Stream _stream;
MTLCommandQueue_t _commandQueue = nil;
@ -124,8 +121,6 @@ class TORCH_API MPSStream {
dispatch_queue_t _serialQueue = nullptr;
// CommitAndContinue is enabled by default
bool _enableCommitAndContinue = true;
// Buffer that contains last raised error
MTLBuffer_t _errorBuffer = nil;
// use synchronize() to access any of these commit functions outside MPSStream
void commit();
@ -160,7 +155,4 @@ class TORCH_API MPSStreamImpl {
MPSStreamImpl();
};
#ifdef __OBJC__
void dispatch_sync_with_rethrow(dispatch_queue_t queue, void (^block)());
#endif
} // namespace at::mps

View File

@ -3,13 +3,13 @@
#include <ATen/mps/MPSAllocatorInterface.h>
#include <ATen/mps/MPSProfiler.h>
#include <ATen/mps/MPSStream.h>
#include <c10/metal/error.h>
@interface MPSGraphExecutionDescriptor ()
@property(readwrite, atomic) BOOL enableCommitAndContinue;
@end
namespace at::mps {
//-----------------------------------------------------------------
// MPSStream
//-----------------------------------------------------------------
@ -30,10 +30,6 @@ MPSStream::MPSStream(Stream stream) : _stream(stream) {
// Choose level which optimizes for GPU
_compilationDescriptor.optimizationLevel = MPSGraphOptimizationLevel0;
_executionDescriptor.compilationDescriptor = _compilationDescriptor;
_errorBuffer = [MPSDevice::getInstance()->device() newBufferWithLength:sizeof(c10::metal::ErrorMessages)
options:MTLResourceStorageModeShared];
std::memset([_errorBuffer contents], 0, 1024);
}
MPSStream::~MPSStream() {
@ -42,8 +38,6 @@ MPSStream::~MPSStream() {
[_executionDescriptor release];
[_compilationDescriptor release];
_executionDescriptor = nil;
[_errorBuffer release];
_errorBuffer = nil;
_compilationDescriptor = nil;
assert(_commandBuffer == nil);
@ -110,7 +104,6 @@ void MPSStream::commitAndWait() {
[_prevCommandBuffer waitUntilCompleted];
[_prevCommandBuffer release];
_prevCommandBuffer = nil;
checkLastError();
}
if (_commandBuffer) {
@ -118,7 +111,6 @@ void MPSStream::commitAndWait() {
[_commandBuffer waitUntilCompleted];
[_commandBuffer release];
_commandBuffer = nil;
checkLastError();
}
}
@ -161,7 +153,7 @@ void MPSStream::fill(id<MTLBuffer> buffer, uint8_t value, size_t length, size_t
if (length == 0) {
return;
}
dispatch_sync_with_rethrow(_serialQueue, ^() {
dispatch_sync(_serialQueue, ^() {
@autoreleasepool {
endKernelCoalescing();
id<MTLBlitCommandEncoder> blitEncoder = [commandBuffer() blitCommandEncoder];
@ -191,7 +183,7 @@ void MPSStream::copy(id<MTLBuffer> srcBuffer,
size_t dstOffset,
uint64_t profileId,
SyncType syncType) {
dispatch_sync_with_rethrow(_serialQueue, ^() {
dispatch_sync(_serialQueue, ^() {
@autoreleasepool {
endKernelCoalescing();
id<MTLBlitCommandEncoder> blitEncoder = [commandBuffer() blitCommandEncoder];
@ -244,7 +236,7 @@ void MPSStream::executeMPSGraph(MPSGraph* mpsGraph, NSDictionary* feeds, NSDicti
auto& profiler = getMPSProfiler();
const bool isGraphProfilingEnabled = profiler.isOperationProfilingEnabled();
dispatch_sync_with_rethrow(_serialQueue, ^() {
dispatch_sync(_serialQueue, ^() {
endKernelCoalescing();
if (isGraphProfilingEnabled) {
// this function call is only relevant for interval-based Signposts
@ -274,24 +266,6 @@ void MPSStream::executeMPSGraph(MPSGraph* mpsGraph, NSDictionary* feeds, NSDicti
});
}
id<MTLBuffer> MPSStream::getErrorBuffer() {
return _errorBuffer;
}
void MPSStream::checkLastError() {
auto msgs = reinterpret_cast<c10::metal::ErrorMessages*>([_errorBuffer contents]);
const auto& msg = msgs->msg[0];
if (!msgs) {
return;
}
unsigned int count = 0;
std::swap(count, msgs->count);
if (!count) {
return;
}
throw c10::AcceleratorError({msg.func, msg.file, msg.line}, 1, msg.message);
}
//-----------------------------------------------------------------
// MPSStreamImpl
//-----------------------------------------------------------------
@ -315,19 +289,4 @@ MPSStream* getDefaultMPSStream() {
return MPSStreamImpl::getInstance();
}
// Helper methods
void dispatch_sync_with_rethrow(dispatch_queue_t queue, void (^block)()) {
__block std::optional<std::exception_ptr> block_exception;
dispatch_sync(queue, ^() {
try {
block();
} catch (...) {
block_exception = std::current_exception();
}
});
if (block_exception) {
std::rethrow_exception(*block_exception);
}
}
} // namespace at::mps

View File

@ -1957,15 +1957,10 @@ TORCH_IMPL_FUNC(_linalg_solve_ex_out)(const Tensor& A,
const Tensor& LU,
const Tensor& pivots,
const Tensor& info) {
// Possible optimization: Compute the LU factorization of A^T if A is contiguous
// Then we solve A^T X = B with adjoint=True
// This saves a copy as A doesn't need to be copied into an F-contig matrix in lu_factor
// This optimization makes functorch's batching rule difficult. See NOTE [ solve_ex Batch Rule Contiguity ]
const bool use_A_T = A.is_contiguous() && !A.is_complex();
at::linalg_lu_factor_ex_out(const_cast<Tensor&>(LU),
const_cast<Tensor&>(pivots),
const_cast<Tensor&>(info),
use_A_T ? A.mT() : A);
A);
if (check_errors) {
at::_linalg_check_errors(info, "torch.linalg.solve_ex", A.dim() == 2);
}
@ -1974,7 +1969,7 @@ TORCH_IMPL_FUNC(_linalg_solve_ex_out)(const Tensor& A,
const bool vector_case = at::native::linalg_solve_is_vector_rhs(LU, B);
auto result_ = vector_case ? result.unsqueeze(-1) : result;
auto B_ = vector_case ? B.unsqueeze(-1) : B;
at::linalg_lu_solve_out(result_, LU, pivots, B_, left, /*adjoint*/use_A_T);
at::linalg_lu_solve_out(result_, LU, pivots, B_, left);
}
std::tuple<Tensor&, Tensor&> linalg_solve_ex_out(const Tensor& A,

View File

@ -1936,7 +1936,7 @@ static bool should_fold(const Tensor& tensor1, const Tensor& tensor2, bool has_o
// We order the tensors. t1 will be the larger tensor
// We can always transpose tensor2 as the dimensions are always >= 1 (precondition from matmul)
// and tensor1_larger iff tensor2.dim() > tensor1.dim()
// and tensor1_larger iff tensor2.dim() > tensor1.dim(9
const auto t1 = tensor1_larger ? MaybeOwned<Tensor>::borrowed(tensor1)
: MaybeOwned<Tensor>::owned(tensor2.mT());
const int64_t dim_t1 = t1->dim();
@ -1948,11 +1948,20 @@ static bool should_fold(const Tensor& tensor1, const Tensor& tensor2, bool has_o
return false;
}
// If we require a gradient, we should fold to minimize backward memory usage - even if this
// leads to a copy in forward because is needed in backward,
// only time we avoid this strict pre-allocated memory usage (has_out = True)
bool requires_grad = tensor1.requires_grad() || tensor2.requires_grad();
if (requires_grad && !has_out) {
// In this case we *do* incur in an extra copy to avoid creating an unnecessary large tensor in the backward
// Suppose we don't fold here. Let t1.shape = [b, m, n] t2.shape = [n, k] like in a transformer
// t2 will be expanded to a tensor of shape [b, n, k] and then we do t1.bmm(t2_expanded)
// The issue appears in the backward.
// The output gradient g of this operation would have shape [b, m, k]
// The backward wrt. t2 of bmm would be given by t1.mH @ g, which has shape [b, n, k]
// Then, the backward of expand is simply `sum(0)`. As such, we are instantiating a tensor
// of shape [b, n, k] unnecessarily, which may cause a large memory footprint, and in the
// worst case, an OOM
bool t2_requires_grad = tensor1_larger ? tensor2.requires_grad() : tensor1.requires_grad();
if (t2_requires_grad && !has_out) {
// We should be checking !at::GradMode::is_enabled(), but apparently
// this regresses performance in some cases:
// https://github.com/pytorch/pytorch/issues/118548#issuecomment-1916022394
return true;
}

View File

@ -142,7 +142,6 @@ Tensor _pack_padded_sequence_backward_symint(const Tensor& grad, c10::SymIntArra
std::tuple<Tensor, Tensor> _pad_packed_sequence(const Tensor& data, const Tensor& _batch_sizes, bool batch_first, const Scalar& padding_value, int64_t total_length) {
auto batch_sizes_t = _batch_sizes.contiguous();
checkLongTensor(batch_sizes_t);
TORCH_CHECK(batch_sizes_t.numel() > 0, "batch_sizes can not be empty");
int64_t * batch_sizes = batch_sizes_t.data_ptr<int64_t>();
int64_t max_batch_size = batch_sizes[0];

View File

@ -23,7 +23,6 @@
#include <ATen/ops/_aminmax_native.h>
#include <ATen/ops/_assert_async_native.h>
#include <ATen/ops/_assert_scalar_native.h>
#include <ATen/ops/_async_error_native.h>
#include <ATen/ops/_functional_assert_async_native.h>
#include <ATen/ops/_functional_assert_scalar_native.h>
#include <ATen/ops/_make_per_tensor_quantized_tensor.h>
@ -480,14 +479,6 @@ Tensor isfinite(const Tensor& self) {
});
}
void _async_error(std::string_view msg) {
TORCH_CHECK(0, msg);
}
void _async_error_meta(std::string_view msg) {
// Do NOT error, it's an async error!
}
void _assert_async_cpu(const Tensor& self) {
TORCH_CHECK(
native::is_nonzero(self),

View File

@ -1,8 +1,6 @@
#pragma once
#include <c10/util/Exception.h>
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wswitch-default")
namespace at::native {
// Used as an interface between the different BLAS-like libraries
@ -23,5 +21,3 @@ static inline char to_blas(TransposeType trans) {
}
} // namespace at::native
C10_DIAGNOSTIC_POP()

View File

@ -5,6 +5,7 @@
#include <ATen/native/ReduceOpsUtils.h>
#include <ATen/Dispatch.h>
#include <ATen/Dispatch_v2.h>
#include <ATen/Parallel.h>
#include <ATen/TensorIterator.h>
#include <ATen/OpMathType.h>
@ -78,12 +79,12 @@ void min_all_kernel_impl(Tensor& result, const Tensor& input) {
reduce_all_impl<int64_t>(result, input, upper_bound<int64_t>(),
[=](int64_t a, int64_t b) -> int64_t { return min_impl(a, b); });
} else {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "min_all", [&] {
AT_DISPATCH_V2(input.scalar_type(), "min_all", AT_WRAP([&] {
using Vec = Vectorized<opmath_type<scalar_t>>;
reduce_all_impl_vec<scalar_t>(result, input, upper_bound<scalar_t>(),
[=] (scalar_t a , scalar_t b) -> scalar_t { return min_impl(a, b); },
[=](Vec a, Vec b) -> Vec { return minimum(a, b); });
});
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kHalf, kBFloat16);
}
}
@ -103,12 +104,12 @@ void max_all_kernel_impl(Tensor& result, const Tensor& input) {
reduce_all_impl<int64_t>(result, input, lower_bound<int64_t>(),
[=](int64_t a, int64_t b) -> int64_t { return max_impl(a, b); });
} else {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "max_all", [&] {
AT_DISPATCH_V2(input.scalar_type(), "max_all", AT_WRAP([&] {
using Vec = Vectorized<opmath_type<scalar_t>>;
reduce_all_impl_vec<scalar_t>(result, input, lower_bound<scalar_t>(),
[=] (scalar_t a , scalar_t b) -> scalar_t { return max_impl(a, b); },
[=](Vec a, Vec b) -> Vec { return maximum(a, b); });
});
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kHalf, kBFloat16);
}
}
@ -199,7 +200,7 @@ void aminmax_allreduce_kernel(
}
);
} else {
AT_DISPATCH_ALL_TYPES_AND2(kBFloat16, kHalf, input.scalar_type(), "aminmax_cpu", [&] {
AT_DISPATCH_V2(input.scalar_type(), "aminmax_cpu", AT_WRAP([&] {
using Vec = Vectorized<opmath_type<scalar_t>>;
using scalar_t_pair = std::pair<scalar_t, scalar_t>;
reduce_all_impl_vec_two_outputs<scalar_t>(
@ -214,7 +215,7 @@ void aminmax_allreduce_kernel(
[=](Vec a, Vec b) -> Vec { return minimum(a, b); },
[=](Vec a, Vec b) -> Vec { return maximum(a, b); }
);
});
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kBFloat16, kHalf);
}
}

View File

@ -3,6 +3,7 @@
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/Dispatch_v2.h>
#include <ATen/OpMathType.h>
#include <ATen/cpu/vec/vec.h>
#include <ATen/cpu/vec/functional.h>
@ -347,34 +348,35 @@ struct MinValuesOps: public at::native::MinOps<scalar_t> {
};
void min_values_kernel_impl(TensorIterator& iter) {
if (iter.dtype() == kLong) {
// This case is special because of Vectorized<int64_t> does not
// handle upper_bound<int64_t>().
// See: https://github.com/pytorch/pytorch/issues/43254
using scalar_t = int64_t;
binary_kernel_reduce(
iter,
MinValuesOps<scalar_t>{},
std::pair<scalar_t, int64_t>(upper_bound<scalar_t>(), -1));
// This case is special because of Vectorized<int64_t> does not
// handle upper_bound<int64_t>().
// See: https://github.com/pytorch/pytorch/issues/43254
if (iter.dtype() == kLong || iter.dtype() == kUInt64) {
AT_DISPATCH_V2(iter.dtype(), "min_values_cpu", AT_WRAP([&iter] {
binary_kernel_reduce(
iter,
MinValuesOps<scalar_t>{},
std::pair<scalar_t, int64_t>(upper_bound<scalar_t>(), -1));
}), kLong, kUInt64);
return;
}
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.dtype(), "min_values_cpu", [&iter] {
AT_DISPATCH_V2(iter.dtype(), "min_values_cpu", AT_WRAP([&iter] {
binary_kernel_reduce_vec(
iter,
[](scalar_t a, scalar_t b) -> scalar_t { return min_impl(a, b); },
[](Vectorized<scalar_t> a, Vectorized<scalar_t> b) { return minimum(a, b); },
static_cast<double>(upper_bound<scalar_t>()));
});
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kBFloat16, kHalf, kBool);
}
void max_values_kernel_impl(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.dtype(), "max_values_cpu", [&iter] {
AT_DISPATCH_V2(iter.dtype(), "max_values_cpu", AT_WRAP([&iter] {
binary_kernel_reduce_vec(
iter,
[](scalar_t a, scalar_t b) -> scalar_t { return max_impl(a, b); },
[](Vectorized<scalar_t> a, Vectorized<scalar_t> b) { return maximum(a, b); },
lower_bound<scalar_t>());
});
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kBFloat16, kHalf, kBool);
}
void argmax_kernel_impl(TensorIterator &iter) {

View File

@ -11,6 +11,7 @@
#include <vector>
#include <ATen/Dispatch.h>
#include <ATen/Dispatch_v2.h>
#include <ATen/Parallel.h>
#include <ATen/NumericUtils.h>
#include <ATen/TensorIterator.h>
@ -106,7 +107,7 @@ void min_kernel_impl(
bool keepdim) {
int64_t self_dim_size = ensure_nonempty_size(self, dim);
AT_DISPATCH_ALL_TYPES_AND3(ScalarType::Half, ScalarType::BFloat16, ScalarType::Bool, self.scalar_type(), "min_cpu", [&] {
AT_DISPATCH_V2(self.scalar_type(), "min_cpu", AT_WRAP([&] {
compare_base_kernel<scalar_t>(result, indice, self, dim, keepdim, [&] (
scalar_t* result_data, int64_t* indice_data,
const scalar_t* self_data, auto self_dim_stride) {
@ -128,7 +129,7 @@ void min_kernel_impl(
*indice_data = index;
}
);
});
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), ScalarType::Half, ScalarType::BFloat16, ScalarType::Bool);
}
void max_kernel_impl(
@ -139,7 +140,7 @@ void max_kernel_impl(
bool keepdim) {
int64_t self_dim_size = ensure_nonempty_size(self, dim);
AT_DISPATCH_ALL_TYPES_AND3(ScalarType::Half, ScalarType::BFloat16, ScalarType::Bool, self.scalar_type(), "max_cpu", [&] {
AT_DISPATCH_V2(self.scalar_type(), "max_cpu", AT_WRAP([&] {
compare_base_kernel<scalar_t>(result, indice, self, dim, keepdim, [&] (
scalar_t* result_data, int64_t* indice_data,
const scalar_t* self_data, auto self_dim_stride) {
@ -161,7 +162,7 @@ void max_kernel_impl(
*indice_data = index;
}
);
});
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), ScalarType::Half, ScalarType::BFloat16, ScalarType::Bool);
}
void aminmax_kernel(
@ -186,7 +187,7 @@ void aminmax_kernel(
return;
}
AT_DISPATCH_ALL_TYPES_AND3(ScalarType::Bool, ScalarType::BFloat16, ScalarType::Half, self.scalar_type(), "aminmax_cpu", [&] {
AT_DISPATCH_V2(self.scalar_type(), "aminmax_cpu", AT_WRAP([&] {
compare_base_kernel<scalar_t, scalar_t>(min_result, max_result, self, wrap_dim, keepdim, [&] (
scalar_t* min_result_data, scalar_t* max_result_data,
const scalar_t* self_data, auto self_dim_stride) {
@ -209,7 +210,7 @@ void aminmax_kernel(
*max_result_data = max_number;
}
);
});
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), ScalarType::Bool, ScalarType::BFloat16, ScalarType::Half);
}
void where_kernel_impl(TensorIterator &iter) {

View File

@ -669,12 +669,9 @@ std::optional<c10::ScalarType> out_dtype) {
// _scaled_mm_allowed_device is used here within _grouped_mm_cuda which seems incorrect since scale is not used.
// the _grouped_mm_fallback should be safe for any ROCm GPU since it's just calling typical mm/bmm
bool use_fast_path = false;
// On non CK system(w/ ROCm), make sure use_fast_path is false
#if defined(USE_ROCM_CK_GEMM)
if (at::detail::getCUDAHooks().isGPUArch({"gfx942", "gfx950"})) {
use_fast_path = true;
}
#endif //USE_ROCM_CK_GEMM
#endif
const auto out_dtype_ = _resolve_grouped_mm_out_dtype(mat_a, mat_b, out_dtype);
Tensor out = create_grouped_gemm_output_tensor(mat_a, mat_b, offs, out_dtype_);
@ -683,11 +680,7 @@ std::optional<c10::ScalarType> out_dtype) {
#ifndef USE_ROCM
at::cuda::detail::bf16bf16_grouped_mm(mat_a, mat_b, offs, bias, out);
#else
#if defined(USE_ROCM_CK_GEMM)
at::hip::detail::group_gemm_ck(mat_a, mat_b, offs, bias, out);
#else
TORCH_WARN("ROCm: Group Gemm through CK not selected.");
#endif //USE_ROCM_CK_GEMM
#endif
} else {
_grouped_mm_fallback(mat_a, mat_b, offs, bias, out_dtype, out);

View File

@ -1,5 +1,6 @@
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/Dispatch_v2.h>
#include <ATen/NumericUtils.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/ReduceAllOps.h>
@ -28,22 +29,22 @@ void _min_max_values_kernel_cuda_impl(TensorIterator& iter) {
}
void aminmax_allreduce_launch_kernel(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND3(
kBFloat16, kHalf, kBool, iter.input_dtype(), "aminmax_all_cuda", [&] {
AT_DISPATCH_V2(
iter.input_dtype(), "aminmax_all_cuda", AT_WRAP([&] {
_min_max_values_kernel_cuda_impl<scalar_t>(iter);
});
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kBFloat16, kHalf, kBool);
}
void aminmax_launch_kernel(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND3(
kBFloat16, kHalf, kBool, iter.input_dtype(), "aminmax_cuda", [&]() {
AT_DISPATCH_V2(
iter.input_dtype(), "aminmax_cuda", AT_WRAP([&]() {
gpu_reduce_kernel<scalar_t, scalar_t>(
iter,
MinMaxOps<scalar_t, scalar_t, int32_t>{},
thrust::pair<scalar_t, scalar_t>(
at::numeric_limits<scalar_t>::upper_bound(),
at::numeric_limits<scalar_t>::lower_bound()));
});
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kBFloat16, kHalf, kBool);
}
} // namespace at::native

View File

@ -1,5 +1,6 @@
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/Dispatch_v2.h>
#include <ATen/NumericUtils.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/ReduceAllOps.h>
@ -33,27 +34,27 @@ void max_values_kernel_cuda_impl(TensorIterator& iter) {
}
void max_values_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND3(
kBFloat16, kHalf, kBool, iter.dtype(), "max_values_cuda", [&]() {
AT_DISPATCH_V2(
iter.dtype(), "max_values_cuda", AT_WRAP([&]() {
max_values_kernel_cuda_impl<scalar_t>(iter);
});
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kBFloat16, kHalf, kBool);
}
void max_launch_kernel(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND3(
kBFloat16, kHalf, kBool, iter.input_dtype(), "max_cuda", [&]() {
AT_DISPATCH_V2(
iter.input_dtype(), "max_cuda", AT_WRAP([&]() {
gpu_reduce_kernel<scalar_t, scalar_t>(
iter,
MaxOps<scalar_t>{},
thrust::pair<scalar_t, int64_t>(
at::numeric_limits<scalar_t>::lower_bound(), 0));
});
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kBFloat16, kHalf, kBool);
}
void max_all_launch_kernel(TensorIterator &iter) {
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.input_dtype(), "max_all_cuda", [&] {
AT_DISPATCH_V2(iter.input_dtype(), "max_all_cuda", AT_WRAP([&] {
max_values_kernel_cuda_impl<scalar_t>(iter);
});
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kBFloat16, kHalf, kBool);
}
REGISTER_DISPATCH(max_values_stub, &max_values_kernel_cuda)

View File

@ -12,6 +12,7 @@
#include <ATen/NumericUtils.h>
#include <ATen/Dispatch.h>
#include <ATen/Dispatch_v2.h>
#include <ATen/NumericUtils.h>
#include <ATen/cuda/NumericLimits.cuh>
@ -33,24 +34,24 @@ void min_values_kernel_cuda_impl(TensorIterator& iter) {
}
void min_values_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.dtype(), "min_values_cuda", [&]() {
AT_DISPATCH_V2(iter.dtype(), "min_values_cuda", AT_WRAP([&]() {
min_values_kernel_cuda_impl<scalar_t>(iter);
});
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kBFloat16, kHalf, kBool);
}
void min_launch_kernel(TensorIterator &iter) {
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.input_dtype(), "min_cuda", [&]() {
AT_DISPATCH_V2(iter.input_dtype(), "min_cuda", AT_WRAP([&]() {
gpu_reduce_kernel<scalar_t, scalar_t>(
iter,
MinOps<scalar_t>{},
thrust::pair<scalar_t, int64_t>(at::numeric_limits<scalar_t>::upper_bound(), 0));
});
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kBFloat16, kHalf, kBool);
}
void min_all_launch_kernel(TensorIterator &iter) {
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.input_dtype(), "min_all_cuda", [&] {
AT_DISPATCH_V2(iter.input_dtype(), "min_all_cuda", AT_WRAP([&] {
min_values_kernel_cuda_impl<scalar_t>(iter);
});
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kBFloat16, kHalf, kBool);
}
REGISTER_DISPATCH(min_values_stub, &min_values_kernel_cuda)

View File

@ -133,7 +133,7 @@ at::Tensor quantized_convolution(
// supported in conv.
mask_weight = weight_zero_points.numel() > 1 ? 1 : 0;
if (groups > 1 && weight_zero_points.numel() > 1)
mask_weight = (1 << 0) | (1 << 1); // 2^0 (group) | 2^1 (output channel)
mask_weight = (2 ^ 0) | (2 ^ 1); // 2^0 (group) | 2^1 (output channel)
dnnl::primitive_attr pattr;
bool src_need_zp = (act_zero_point != 0);

View File

@ -40,6 +40,8 @@ using namespace at::mps;
namespace at::native::mps {
void dispatch_sync_with_rethrow(dispatch_queue_t queue, void (^block)());
struct MPSScalar {
id<MTLBuffer> getMTLBuffer() const {
return __builtin_bit_cast(id<MTLBuffer>, buffer.get());

View File

@ -53,6 +53,21 @@
@end
namespace at::native::mps {
void dispatch_sync_with_rethrow(dispatch_queue_t queue, void (^block)()) {
__block std::optional<std::exception_ptr> block_exception;
dispatch_sync(queue, ^() {
try {
block();
} catch (...) {
block_exception = std::current_exception();
}
});
if (block_exception) {
std::rethrow_exception(*block_exception);
}
}
/**
* Computes distance from lowest to highest element offset in given tensor.
*/

View File

@ -1,5 +1,4 @@
#include <c10/metal/atomic.h>
#include <c10/metal/error.h>
#include <c10/metal/indexing.h>
#include <metal_stdlib>
@ -32,24 +31,10 @@ OffsetT index_apply_indices(
constant IndexAB* indices,
constant int64_t* sizes,
constant int64_t* strides,
uint num_indices,
thread bool& error,
device ErrorMessages* error_buf) {
uint num_indices) {
OffsetT rc = offs.x;
for (uint i = 0; i < num_indices; i++) {
auto idx = indices[i].indexArray[offs.y];
if (idx < -sizes[i] || idx >= sizes[i]) {
TORCH_REPORT_ERROR(
error_buf,
"index ",
idx,
" is out of bounds for dimension ",
i,
" with size ",
sizes[i]);
error = true;
break;
}
if (idx < 0) {
idx += sizes[i];
}
@ -70,7 +55,6 @@ kernel void index_select(
constant int64_t* index_sizes,
constant int64_t* index_strides,
constant uint4& ndim_nindices_numel,
device ErrorMessages* error_buffer,
uint thread_index [[thread_position_in_grid]]) {
const auto ndim = ndim_nindices_numel.x;
const auto num_indices = ndim_nindices_numel.y;
@ -81,19 +65,8 @@ kernel void index_select(
indices_strides,
ndim,
thread_index);
bool error = false;
auto input_offs = index_apply_indices<OffsetT>(
offs.yz,
indices,
index_sizes,
index_strides,
num_indices,
error,
error_buffer);
if (error) {
output[offs.x / sizeof(T)] = 0;
return;
}
offs.yz, indices, index_sizes, index_strides, num_indices);
output[offs.x / sizeof(T)] = input[input_offs / sizeof(T)];
}
@ -109,9 +82,7 @@ inline void index_put_impl(
constant int64_t* index_sizes,
constant int64_t* index_strides,
constant uint4& ndim_nindices_numel,
device ErrorMessages* error_buffer,
uint thread_index) {
bool error = false;
const auto ndim = ndim_nindices_numel.x;
const auto num_indices = ndim_nindices_numel.y;
const auto offs = index_get_offsets(
@ -122,16 +93,7 @@ inline void index_put_impl(
ndim,
thread_index);
auto output_offs = index_apply_indices<OffsetT>(
offs.xz,
indices,
index_sizes,
index_strides,
num_indices,
error,
error_buffer);
if (error) {
return;
}
offs.xz, indices, index_sizes, index_strides, num_indices);
output[output_offs / sizeof(T)] = input[offs.y / sizeof(T)];
}
@ -147,7 +109,6 @@ kernel void index_put(
constant int64_t* index_sizes,
constant int64_t* index_strides,
constant uint4& ndim_nindices_numel,
device ErrorMessages* error_buffer,
uint thread_index [[thread_position_in_grid]]) {
index_put_impl(
output,
@ -160,7 +121,6 @@ kernel void index_put(
index_sizes,
index_strides,
ndim_nindices_numel,
error_buffer,
thread_index);
}
@ -176,7 +136,6 @@ kernel void index_put_serial(
constant int64_t* index_sizes,
constant int64_t* index_strides,
constant uint4& ndim_nindices_numel,
device ErrorMessages* error_buffer,
uint thread_index [[thread_position_in_grid]]) {
(void)thread_index; // Suppress unused vairable varning
for (uint idx = 0; idx < ndim_nindices_numel.z; ++idx) {
@ -191,7 +150,6 @@ kernel void index_put_serial(
index_sizes,
index_strides,
ndim_nindices_numel,
error_buffer,
idx);
}
}
@ -208,7 +166,6 @@ kernel void index_put_accumulate(
constant int64_t* index_sizes,
constant int64_t* index_strides,
constant uint4& ndim_nindices_numel,
device ErrorMessages* error_buffer,
uint thread_index [[thread_position_in_grid]]) {
const auto ndim = ndim_nindices_numel.x;
const auto num_indices = ndim_nindices_numel.y;
@ -219,18 +176,8 @@ kernel void index_put_accumulate(
indices_strides,
ndim,
thread_index);
bool error = false;
auto output_offs = index_apply_indices<OffsetT>(
offs.xz,
indices,
index_sizes,
index_strides,
num_indices,
error,
error_buffer);
if (error) {
return;
}
offs.xz, indices, index_sizes, index_strides, num_indices);
AtomicType<T>::atomic_add(
reinterpret_cast<device AtomicType_t<T>*>(output),
output_offs / sizeof(T),
@ -250,7 +197,6 @@ kernel void index_put_accumulate(
constant int64_t* index_sizes, \
constant int64_t* index_strides, \
constant uint4& ndim_nindices_numel, \
device ErrorMessages* error_buffer, \
uint thread_index [[thread_position_in_grid]])
#define REGISTER_INDEX_OP_ALL_DTYPES(OP_NAME) \

View File

@ -141,9 +141,6 @@ static Tensor& addmv_out_mps_impl(const Tensor& self,
};
MPSStream* stream = at::mps::getCurrentMPSStream();
if (result.numel() == 0) {
return result;
}
Tensor matMulVec = at::mm(mat, vec.unsqueeze(1)).squeeze(1);
@autoreleasepool {

View File

@ -220,7 +220,7 @@ Tensor _embedding_bag_dense_backward_mps(const Tensor& output_grad,
auto num_threads = (params.mode == EmbeddingBagMode::MAX) ? output_grad.numel() : num_indices * params.feature_size;
MPSStream* stream = getCurrentMPSStream();
dispatch_sync_with_rethrow(stream->queue(), ^() {
mps::dispatch_sync_with_rethrow(stream->queue(), ^() {
@autoreleasepool {
id<MTLComputeCommandEncoder> computeEncoder = stream->commandEncoder();
auto pipeline_state = lib.getPipelineStateForFunc(fmt::format("embedding_bag_backward_{}_{}",
@ -273,7 +273,7 @@ Tensor _embedding_bag_per_sample_weights_backward_mps(const Tensor& output_grad,
auto num_threads = num_indices * feature_size;
MPSStream* stream = getCurrentMPSStream();
dispatch_sync_with_rethrow(stream->queue(), ^() {
mps::dispatch_sync_with_rethrow(stream->queue(), ^() {
@autoreleasepool {
id<MTLComputeCommandEncoder> computeEncoder = stream->commandEncoder();
auto pipeline_state = lib.getPipelineStateForFunc(fmt::format("embedding_bag_per_sample_weights_backward_{}_{}",

View File

@ -179,8 +179,7 @@ static void dispatch_index_kernel(TensorIteratorBase& iter,
iter.strides(2),
index_size,
index_stride,
ndim_nindiees,
mpsStream->getErrorBuffer());
ndim_nindiees);
mtl_dispatch1DJob(computeEncoder, indexSelectPSO, serial ? 1 : iter.numel());
});
}
@ -300,7 +299,7 @@ static Tensor& nonzero_out_native_mps(const Tensor& self, Tensor& out_) {
MPSStream* stream = getCurrentMPSStream();
using CachedGraph = MPSUnaryCachedGraph;
dispatch_sync_with_rethrow(stream->queue(), ^() {
dispatch_sync(stream->queue(), ^() {
stream->synchronize(SyncType::COMMIT_AND_WAIT);
});
int64_t total_nonzero = at::count_nonzero(self).item<int64_t>();
@ -385,7 +384,7 @@ Tensor& nonzero_out_mps(const Tensor& self, Tensor& out_) {
MPSStream* stream = getCurrentMPSStream();
using CachedGraph = MPSUnaryCachedGraph;
dispatch_sync_with_rethrow(stream->queue(), ^() {
dispatch_sync(stream->queue(), ^() {
stream->synchronize(SyncType::COMMIT_AND_WAIT);
});
int64_t total_nonzero = at::count_nonzero(self).item<int64_t>();

View File

@ -923,7 +923,7 @@ std::tuple<Tensor, Tensor, Tensor> layer_norm_mps(const Tensor& input,
MPSStream* stream = getCurrentMPSStream();
TORCH_CHECK_NOT_IMPLEMENTED(input.scalar_type() != kLong, "Not implemented for long on MPS");
@autoreleasepool {
dispatch_sync_with_rethrow(stream->queue(), ^() {
mps::dispatch_sync_with_rethrow(stream->queue(), ^() {
// which kernel variant to use based on the normalized axis N size
const int N_READS = 4;
auto metalType = mps::scalarToMetalTypeString(input);

View File

@ -192,11 +192,6 @@
CompositeExplicitAutograd: _assert_tensor_metadata
Meta: _assert_tensor_metadata_meta_symint
- func: _async_error(str msg) -> ()
dispatch:
CompositeExplicitAutograd: _async_error
Meta: _async_error_meta
- func: _print(str s) -> ()
dispatch:
CompositeExplicitAutograd: _print
@ -2808,7 +2803,7 @@
- func: floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
dispatch:
CPU, CUDA, MPS, MTIA: floor_divide_out
CPU, CUDA, MPS: floor_divide_out
SparseCPU, SparseCUDA, SparseMPS: floor_divide_out_sparse_zerodim
- func: floor_divide.Scalar(Tensor self, Scalar other) -> Tensor
@ -4297,7 +4292,6 @@
dispatch:
SparseCPU: sparse_sparse_matmul_cpu
SparseCUDA: sparse_sparse_matmul_cuda
SparseMPS: sparse_sparse_matmul_mps
autogen: _sparse_sparse_matmul.out
- func: mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)
@ -4389,7 +4383,7 @@
variants: function, method
dispatch:
CompositeExplicitAutograd: mv
SparseCPU, SparseCUDA, SparseMPS: mv_sparse
SparseCPU, SparseCUDA: mv_sparse
- func: mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!)
dispatch:
@ -9838,7 +9832,7 @@
structured_delegate: erfinv.out
variants: method, function
dispatch:
SparseCPU, SparseCUDA, SparseMPS: erfinv_sparse
SparseCPU, SparseCUDA: erfinv_sparse
SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: erfinv_sparse_csr
tags: pointwise
@ -9847,7 +9841,7 @@
structured_delegate: erfinv.out
variants: method
dispatch:
SparseCPU, SparseCUDA, SparseMPS: erfinv_sparse_
SparseCPU, SparseCUDA: erfinv_sparse_
SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: erfinv_sparse_csr_
tags: pointwise
@ -9857,7 +9851,7 @@
structured_inherits: TensorIteratorBase
dispatch:
CPU, CUDA, MPS: erfinv_out
SparseCPU, SparseCUDA, SparseMPS: erfinv_sparse_out
SparseCPU, SparseCUDA: erfinv_sparse_out
SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: erfinv_sparse_csr_out
tags: pointwise

View File

@ -47,7 +47,6 @@
#include <c10/macros/Macros.h>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/distance.h>
#include <thrust/for_each.h>
#include <thrust/functional.h>
#include <thrust/gather.h>

View File

@ -10,10 +10,6 @@
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_coalesce_native.h>
#include <ATen/ops/repeat_interleave_native.h>
#include <ATen/ops/cumsum.h>
#include <ATen/ops/_sparse_sparse_matmul_native.h>
#include <ATen/ops/_sparse_coo_tensor_unsafe.h>
#include <ATen/ops/_sparse_coo_tensor_unsafe_native.h>
#include <ATen/ops/cat.h>
#include <ATen/ops/add_native.h>
@ -892,114 +888,5 @@ static void sparse_mask_intersection_out_mps_kernel(
/*coalesce_mask=*/false);
}
Tensor sparse_sparse_matmul_mps(const Tensor& mat1_, const Tensor& mat2_) {
TORCH_CHECK(mat1_.is_sparse() && mat2_.is_sparse(),
"sparse_sparse_matmul_mps: both inputs must be sparse COO tensors");
TORCH_CHECK(mat1_.is_mps() && mat2_.is_mps(),
"sparse_sparse_matmul_mps: both inputs must be on MPS device");
TORCH_CHECK(mat1_.dim() == 2 && mat2_.dim() == 2,
"sparse_sparse_matmul_mps: both inputs must be 2D matrices");
TORCH_CHECK(mat1_.dense_dim() == 0 && mat2_.dense_dim() == 0,
"sparse_sparse_matmul_mps: only scalar values supported (dense_dim == 0)");
TORCH_CHECK(mat1_.size(1) == mat2_.size(0),
"mat1 and mat2 shapes cannot be multiplied (", mat1_.size(0), "x", mat1_.size(1), " and ", mat2_.size(0), "x", mat2_.size(1), ")");
TORCH_CHECK(mat1_.scalar_type() == mat2_.scalar_type(),
"sparse_sparse_matmul_mps: mat1 dtype ", mat1_.scalar_type(),
" does not match mat2 dtype ", mat2_.scalar_type());
const auto device = mat1_.device();
auto A = mat1_.coalesce();
auto B = mat2_.coalesce();
const auto I = A.size(0);
const auto K = A.size(1);
const auto N = B.size(1);
const auto nnzA = A._nnz();
const auto nnzB = B._nnz();
// Early empty result, return an empty, coalesced tensor
if (I == 0 || N == 0 || K == 0 || nnzA == 0 || nnzB == 0) {
auto empty_idx = at::empty({2, 0}, at::device(device).dtype(at::kLong));
auto empty_val = at::empty({0}, at::device(device).dtype(mat1_.scalar_type()));
auto out = _sparse_coo_tensor_unsafe(empty_idx, empty_val, {I, N}, mat1_.options());
out._coalesced_(true);
return out;
}
const auto computeDtype = at::result_type(mat1_, mat2_);
auto A_idx = A._indices().contiguous();
auto A_val = A._values().to(computeDtype).contiguous();
auto A_i = A_idx.select(0, 0).contiguous();
auto A_k = A_idx.select(0, 1).contiguous();
auto B_idx = B._indices().contiguous();
auto B_val = B._values().to(computeDtype).contiguous();
auto B_k = B_idx.select(0, 0).contiguous();
auto B_j = B_idx.select(0, 1).contiguous();
// csr-style row pointers for B by k (the shared dimension)
Tensor row_ptr_B;
{
auto batch_ptr = at::tensor({0LL, nnzB}, at::device(device).dtype(at::kLong));
row_ptr_B = at::empty({K + 1}, at::device(device).dtype(at::kLong));
build_row_ptr_per_batch_mps(B_k, batch_ptr, /*B=*/1, /*I=*/K, row_ptr_B);
}
auto row_ptr_B_lo = row_ptr_B.narrow(0, 0, K);
auto row_ptr_B_hi = row_ptr_B.narrow(0, 1, K);
auto deg_B = row_ptr_B_hi.sub(row_ptr_B_lo);
auto counts = deg_B.index_select(0, A_k);
const int64_t P = counts.sum().item<int64_t>();
if (P == 0) {
auto empty_idx = at::empty({2, 0}, at::device(device).dtype(at::kLong));
auto empty_val = at::empty({0}, at::device(device).dtype(mat1_.scalar_type()));
auto out = _sparse_coo_tensor_unsafe(empty_idx, empty_val, {I, N}, mat1_.options());
out._coalesced_(true);
return out;
}
auto group_ids = repeat_interleave_mps(counts);
// exclusive cumsum of counts
auto offsets = cumsum(counts, /*dim=*/0).sub(counts);
auto offsets_gather = offsets.index_select(0, group_ids);
auto within = at::arange(P, at::device(device).dtype(at::kLong)).sub(offsets_gather);
// Map each output element to its source B row and position
auto k_per_out = A_k.index_select(0, group_ids);
auto start_in_B = row_ptr_B.index_select(0, k_per_out);
auto seg_index = start_in_B.add(within);
// Assemble candidate coo pairs and values
auto i_out = A_i.index_select(0, group_ids).contiguous();
auto j_out = B_j.index_select(0, seg_index).contiguous();
auto vA_out = A_val.index_select(0, group_ids).contiguous();
auto vB_out = B_val.index_select(0, seg_index).contiguous();
auto v_out = vA_out.mul(vB_out);
// build (2, P) indices
auto out_indices = at::empty({2, P}, at::device(device).dtype(at::kLong)).contiguous();
out_indices.select(0, 0).copy_(i_out);
out_indices.select(0, 1).copy_(j_out);
auto result = _sparse_coo_tensor_unsafe(
out_indices, v_out, {I, N}, mat1_.options().dtype(computeDtype));
result = result.coalesce();
if (result.scalar_type() != mat1_.scalar_type()) {
auto cast_vals = result._values().to(mat1_.scalar_type());
auto out = _sparse_coo_tensor_unsafe(result._indices(), cast_vals, {I, N}, mat1_.options());
out._coalesced_(true);
return out;
}
return result;
}
REGISTER_MPS_DISPATCH(sparse_mask_intersection_out_stub, &sparse_mask_intersection_out_mps_kernel);
} // namespace at::native

View File

@ -61,7 +61,6 @@ list(APPEND ATen_CUDA_TEST_SRCS
${CMAKE_CURRENT_SOURCE_DIR}/cuda_complex_math_test.cu
${CMAKE_CURRENT_SOURCE_DIR}/cuda_complex_test.cu
${CMAKE_CURRENT_SOURCE_DIR}/cuda_cub_test.cu
${CMAKE_CURRENT_SOURCE_DIR}/cuda_cublas_handle_pool_test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/cuda_device_test.cpp
${CMAKE_CURRENT_SOURCE_DIR}/cuda_distributions_test.cu
${CMAKE_CURRENT_SOURCE_DIR}/cuda_dlconvertor_test.cpp

View File

@ -1,77 +0,0 @@
#include <gtest/gtest.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDACachingAllocator.h>
#include <c10/cuda/CUDAGuard.h>
#include <atomic>
#include <thread>
#include <vector>
// Test concurrent access to getCurrentCUDABlasHandle and getCUDABlasLtWorkspace
// to verify that the data race fix is working correctly
TEST(CUDABlasHandlePoolTest, ConcurrentGetAndClearWorkspaces) {
if (!at::cuda::is_available()) {
return;
}
constexpr int num_accessor_threads = 15;
constexpr int num_clear_threads = 5;
constexpr int iterations_per_thread = 50;
std::atomic<bool> stop{false};
std::atomic<int> error_count{0};
std::vector<std::thread> threads;
threads.reserve(num_accessor_threads + num_clear_threads);
// Launch accessor threads
for (int i = 0; i < num_accessor_threads; ++i) {
threads.emplace_back([&stop, &error_count]() {
try {
at::cuda::CUDAGuard device_guard(0);
while (!stop.load(std::memory_order_relaxed)) {
const auto handle = at::cuda::getCurrentCUDABlasHandle();
const auto workspace = at::cuda::getCUDABlasLtWorkspace();
if (handle == nullptr || workspace == nullptr) {
error_count++;
}
}
} catch (const std::exception& e) {
error_count++;
}
});
}
// Launch threads that clear workspaces
for (int i = 0; i < num_clear_threads; ++i) {
threads.emplace_back([&error_count]() {
try {
for (int j = 0; j < iterations_per_thread; ++j) {
at::cuda::clearCublasWorkspaces();
std::this_thread::yield();
}
} catch (const std::exception& e) {
error_count++;
}
});
}
// Let them run for a bit
std::this_thread::sleep_for(std::chrono::milliseconds(100));
stop.store(true, std::memory_order_relaxed);
for (auto& thread : threads) {
thread.join();
}
EXPECT_EQ(error_count.load(), 0);
}
int main(int argc, char* argv[]) {
::testing::InitGoogleTest(&argc, argv);
c10::cuda::CUDACachingAllocator::init(1);
return RUN_ALL_TESTS();
}

View File

@ -1,3 +1,191 @@
#pragma once
#include <ATen/xpu/XPUContext.h>
#include <c10/xpu/XPUEvent.h>
#include <optional>
namespace at::xpu {
/*
* XPUEvent are movable not copyable wrappers around SYCL event. XPUEvent are
* constructed lazily when first recorded. It has a device, and this device is
* acquired from the first recording stream. Later streams that record the event
* must match the same device.
*
* Currently, XPUEvent does NOT support to export an inter-process event from
* another process via inter-process communication(IPC). So it means that
* inter-process communication for event handles between different processes is
* not available. This could impact some applications that rely on cross-process
* synchronization and communication.
*/
struct TORCH_XPU_API XPUEvent {
// Constructors
XPUEvent(bool enable_timing = false) noexcept
: enable_timing_{enable_timing} {}
~XPUEvent() {
if (isCreated()) {
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
if (C10_UNLIKELY(interp)) {
(*interp)->trace_gpu_event_deletion(
at::kXPU, reinterpret_cast<uintptr_t>(event_.get()));
}
}
}
XPUEvent(const XPUEvent&) = delete;
XPUEvent& operator=(const XPUEvent&) = delete;
XPUEvent(XPUEvent&& other) = default;
XPUEvent& operator=(XPUEvent&& other) = default;
operator sycl::event&() const {
return event();
}
std::optional<at::Device> device() const {
if (isCreated()) {
return at::Device(at::kXPU, device_index_);
} else {
return std::nullopt;
}
}
inline bool isCreated() const {
return (event_.get() != nullptr);
}
DeviceIndex device_index() const {
return device_index_;
}
sycl::event& event() const {
return *event_;
}
bool query() const {
using namespace sycl::info;
if (!isCreated()) {
return true;
}
return event().get_info<event::command_execution_status>() ==
event_command_status::complete;
}
void record() {
record(getCurrentXPUStream());
}
void recordOnce(const XPUStream& stream) {
if (!isCreated()) {
record(stream);
}
}
void record(const XPUStream& stream) {
if (!isCreated()) {
device_index_ = stream.device_index();
assignEvent(stream.queue());
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
if (C10_UNLIKELY(interp)) {
(*interp)->trace_gpu_event_creation(
at::kXPU, reinterpret_cast<uintptr_t>(event_.get()));
}
} else {
TORCH_CHECK(
device_index_ == stream.device_index(),
"Event device ",
device_index_,
" does not match recording stream's device ",
stream.device_index(),
".");
reassignEvent(stream.queue());
}
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
if (C10_UNLIKELY(interp)) {
(*interp)->trace_gpu_event_record(
at::kXPU,
reinterpret_cast<uintptr_t>(event_.get()),
reinterpret_cast<uintptr_t>(&stream.queue()));
}
}
void block(const XPUStream& stream) {
if (isCreated()) {
std::vector<sycl::event> event_list{event()};
// Make this stream wait until event_ is completed.
stream.queue().ext_oneapi_submit_barrier(event_list);
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
if (C10_UNLIKELY(interp)) {
(*interp)->trace_gpu_event_wait(
at::kXPU,
reinterpret_cast<uintptr_t>(event_.get()),
reinterpret_cast<uintptr_t>(&stream.queue()));
}
}
}
double elapsed_time(const XPUEvent& other) const {
TORCH_CHECK(
isCreated() && other.isCreated(),
"Both events must be recorded before calculating elapsed time.");
TORCH_CHECK(
query() && other.query(),
"Both events must be completed before calculating elapsed time.");
TORCH_CHECK(
enable_timing_ && other.enable_timing_,
"Both events must be created with argument 'enable_timing=True'.");
#if SYCL_COMPILER_VERSION < 20250000
TORCH_CHECK_NOT_IMPLEMENTED(
false,
"elapsed_time of XPUEvent requires PyTorch to be built with SYCL compiler version 2025.0.0 or newer.");
#endif
using namespace sycl::info::event_profiling;
// Block until both of the recorded events are completed.
uint64_t end_time_ns = other.event().get_profiling_info<command_end>();
uint64_t start_time_ns = event().get_profiling_info<command_end>();
// Return the eplased time in milliseconds.
return 1e-6 *
(static_cast<double>(end_time_ns) - static_cast<double>(start_time_ns));
}
void synchronize() const {
if (isCreated()) {
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
if (C10_UNLIKELY(interp)) {
(*interp)->trace_gpu_event_synchronization(
at::kXPU, reinterpret_cast<uintptr_t>(event_.get()));
}
event().wait_and_throw();
}
}
private:
void assignEvent(sycl::queue& queue) {
#if SYCL_COMPILER_VERSION >= 20250000
if (enable_timing_) {
event_ = std::make_unique<sycl::event>(
sycl::ext::oneapi::experimental::submit_profiling_tag(queue));
} else {
event_ = std::make_unique<sycl::event>(queue.ext_oneapi_submit_barrier());
}
#else
event_ = std::make_unique<sycl::event>(queue.ext_oneapi_submit_barrier());
#endif
}
void reassignEvent(sycl::queue& queue) {
event_.reset();
assignEvent(queue);
}
bool enable_timing_ = false;
DeviceIndex device_index_ = -1;
// Only need to track the last event, as events in an in-order queue are
// executed sequentially.
std::unique_ptr<sycl::event> event_;
};
} // namespace at::xpu

View File

@ -50,7 +50,6 @@ def check_accuracy(actual_csv, expected_csv, expected_filename):
"mobilenet_v2",
"pytorch_CycleGAN_and_pix2pix",
"pytorch_stargan",
"repvgg_a2",
"resnet152",
"resnet18",
"resnet50",

View File

@ -10,7 +10,7 @@ beit_base_patch16_224,pass,7
convnextv2_nano.fcmae_ft_in22k_in1k,fail_accuracy,7
convnextv2_nano.fcmae_ft_in22k_in1k,pass,7
@ -66,7 +66,7 @@ visformer_small,pass,7
vit_base_patch14_dinov2.lvd142m,fail_accuracy,7
vit_base_patch14_dinov2.lvd142m,pass,7

1 name accuracy graph_breaks
10 mobilenetv2_100 pass 7
11 mobilenetv3_large_100 pass 7
12 mobilevit_s pass 6
13 nfnet_l0 pass 7
14 repvgg_a2 pass 7
15 swin_base_patch4_window7_224 pass 7
16 tf_efficientnet_b0 pass 6
66
67
68
69
70
71
72

View File

@ -50,7 +50,7 @@ nfnet_l0,pass,7
repvgg_a2,pass,7
repvgg_a2,fail_accuracy,7

1 name accuracy graph_breaks
50
51
52
53
54
55
56

View File

@ -952,7 +952,7 @@ def latency_experiment_summary(suite_name, args, model, timings, **kwargs):
first_fields.append(kwargs["tag"])
headers = first_headers + ["speedup", "abs_latency"]
row = first_fields + [float(speedup), median[1] * 1000]
msg = f"{median[0] * 1000} ms, {median[1] * 1000} ms, {speedup:.3f}x"
msg = f"{speedup:.3f}x"
if args.baseline:
headers.extend(
[
@ -1010,7 +1010,7 @@ def latency_experiment_summary(suite_name, args, model, timings, **kwargs):
# Hypothetically you can use this from other places, but it's currently
# inaccessible, and when this assert fails you need to update the
# event_name here to account for the other cases you are using this
assert any([args.quantization, args.optimus])
assert args.quantization is not None
output_signpost(
dict(zip(headers, row)),
args,
@ -2288,9 +2288,11 @@ class BenchmarkRunner:
)
):
is_same = False
except Exception:
except Exception as e:
# Sometimes torch.allclose may throw RuntimeError
is_same = False
exception_string = str(e)
accuracy_status = f"fail_exception: {exception_string}"
return record_status(accuracy_status, dynamo_start_stats=start_stats)
if not is_same:
accuracy_status = "eager_two_runs_differ"
@ -2407,9 +2409,11 @@ class BenchmarkRunner:
force_max_multiplier=force_max_multiplier,
):
is_same = False
except Exception:
except Exception as e:
# Sometimes torch.allclose may throw RuntimeError
is_same = False
exception_string = str(e)
accuracy_status = f"fail_exception: {exception_string}"
return record_status(accuracy_status, dynamo_start_stats=start_stats)
if not is_same:
if self.args.skip_accuracy_check:
@ -2583,9 +2587,6 @@ class BenchmarkRunner:
**experiment_kwargs,
)
# reset dynamo
torch._dynamo.reset()
if self.args.export_aot_inductor:
optimized_model_iter_fn = optimize_ctx
else:
@ -2949,7 +2950,7 @@ class BenchmarkRunner:
status = self.check_tolerance(name, model, example_inputs, optimize_ctx)
print(status)
elif self.args.performance:
if self.args.backend in ["torchao", "optimus"]:
if self.args.backend == "torchao":
status = self.run_performance_test_non_alternate(
name, model, example_inputs, optimize_ctx, experiment, tag
)
@ -3525,12 +3526,6 @@ def parse_args(args=None):
action="store_true",
help="Measure speedup with TorchInductor",
)
group.add_argument(
"--optimus",
choices=["vertical_opt", "horizontal_opt", "all"],
default=None,
help="Measure speedup of Optimus with TorchInductor baseline",
)
group.add_argument(
"--quantization",
choices=[
@ -3788,9 +3783,6 @@ def run(runner, args, original_dir=None):
if args.inductor:
assert args.backend is None
args.backend = "inductor"
if args.optimus:
assert args.backend is None
args.backend = "optimus"
if args.quantization:
assert args.backend is None
args.backend = "torchao"
@ -4075,22 +4067,10 @@ def run(runner, args, original_dir=None):
runner.model_iter_fn = model_iter_fn_and_mark_step
optimize_ctx = torchao_optimize_ctx(args.quantization)
elif args.backend == "optimus":
from .optimus import get_baseline_ctx, get_optimus_optimize_ctx
baseline_ctx = get_baseline_ctx(
nopython=args.nopython, inductor_compile_mode=args.inductor_compile_mode
)
runner.model_iter_fn = baseline_ctx(runner.model_iter_fn)
optimize_ctx = get_optimus_optimize_ctx(
args.optimus, args.nopython, args.inductor_compile_mode
)
else:
optimize_ctx = torch._dynamo.optimize(args.backend, nopython=args.nopython)
experiment = (
speedup_experiment
if args.backend not in ["torchao", "optimus"]
else latency_experiment
speedup_experiment if args.backend != "torchao" else latency_experiment
)
if args.accuracy:
output_filename = f"accuracy_{args.backend}.csv"
@ -4111,12 +4091,7 @@ def run(runner, args, original_dir=None):
if args.only in runner.disable_cudagraph_models:
args.disable_cudagraphs = True
if (
args.inductor
or args.backend == "inductor"
or args.export_aot_inductor
or args.backend == "optimus"
):
if args.inductor or args.backend == "inductor" or args.export_aot_inductor:
inductor_config.triton.cudagraphs = not args.disable_cudagraphs
inductor_config.triton.persistent_reductions = (
not args.disable_persistent_reductions

View File

@ -1,62 +0,0 @@
import functools
import torch
def get_baseline_ctx(nopython, inductor_compile_mode):
return functools.partial(
torch.compile,
backend="inductor",
fullgraph=nopython,
mode=inductor_compile_mode,
)
def get_optimus_optimize_ctx(config, nopython, inductor_compile_mode):
if config == "vertical_opt":
optimus_inductor_config = {
"pre_grad_fusion_options": {
"normalization_pass": {},
"merge_splits_pass": {},
"split_cat_pass": {},
"unbind_stack_pass": {},
"unbind_cat_to_view_pass": {},
}
}
elif config == "horizontal_opt":
optimus_inductor_config = {
"pre_grad_fusion_options": {
"normalization_pass": {},
"batch_linear": {},
"batch_layernorm": {},
},
}
elif config == "all":
optimus_inductor_config = {
"pre_grad_fusion_options": {
"normalization_pass": {},
"batch_linear": {},
"batch_layernorm": {},
"merge_splits_pass": {},
"split_cat_pass": {},
"unbind_stack_pass": {},
"unbind_cat_to_view_pass": {},
},
}
else:
raise RuntimeError(f"Unknown optimus config: {config}")
def _inner(fn):
if "pre_grad_fusion_options" in optimus_inductor_config:
torch._inductor.config.pre_grad_fusion_options = optimus_inductor_config[
"pre_grad_fusion_options"
]
if "post_grad_fusion_options" in optimus_inductor_config:
torch._inductor.config.post_grad_fusion_options = optimus_inductor_config[
"post_grad_fusion_options"
]
return torch.compile(
fn, backend="inductor", fullgraph=nopython, mode=inductor_compile_mode
)
return _inner

View File

@ -2,7 +2,6 @@ import csv
import os
import re
import sys
from pathlib import Path
# This script takes the logs produced by the benchmark scripts (e.g.,
@ -16,7 +15,8 @@ from pathlib import Path
# This script is not very well written, feel free to rewrite it as necessary
assert len(sys.argv) == 2
full_log = Path(sys.argv[1]).read_text()
full_log = open(sys.argv[1]).read()
# If the log contains a gist URL, extract it so we can include it in the CSV
gist_url = ""

View File

@ -484,106 +484,24 @@ PyTorch,sum,sum_R256_V512_dim0_contiguousTrue_cpu,short,False,50.954394,0.000000
PyTorch,sum,sum_R256_V512_dim0_contiguousFalse_cpu,short,False,57.957757,0.000000
PyTorch,sum,sum_R256_V512_dim1_contiguousTrue_cpu,short,False,53.592068,0.000000
PyTorch,sum,sum_R256_V512_dim1_contiguousFalse_cpu,short,False,51.339726,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.bool,short,False,0.927,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.uint8,short,False,6.261,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.int8,short,False,6.351,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.int16,short,False,6.177,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.int32,short,False,6.333,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.int64,short,False,6.588,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.float16,short,False,8.117,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.bfloat16,short,False,9.358,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.float32,short,False,7.844,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.float64,short,False,8.097,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.bool,short,False,6.159,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.uint8,short,False,0.926,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.int8,short,False,6.192,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.int16,short,False,6.276,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.int32,short,False,6.461,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.int64,short,False,6.524,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.float16,short,False,8.136,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.bfloat16,short,False,6.854,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.float32,short,False,6.446,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.float64,short,False,6.829,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.bool,short,False,6.088,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.uint8,short,False,6.059,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.int8,short,False,0.922,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.int16,short,False,6.263,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.int32,short,False,6.330,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.int64,short,False,6.688,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.float16,short,False,8.176,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.bfloat16,short,False,6.959,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.float32,short,False,6.430,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.float64,short,False,6.818,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.bool,short,False,6.350,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.uint8,short,False,6.221,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.int8,short,False,6.193,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.int16,short,False,0.922,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.int32,short,False,6.263,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.int64,short,False,6.525,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.float16,short,False,7.960,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.bfloat16,short,False,6.801,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.float32,short,False,6.594,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.float64,short,False,7.089,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.bool,short,False,6.498,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.uint8,short,False,6.358,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.int8,short,False,6.390,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.int16,short,False,6.415,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.int32,short,False,0.925,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.int64,short,False,6.657,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.float16,short,False,7.954,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.bfloat16,short,False,6.930,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.float32,short,False,6.737,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.float64,short,False,6.948,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.bool,short,False,6.757,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.uint8,short,False,6.402,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.int8,short,False,6.550,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.int16,short,False,6.518,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.int32,short,False,6.766,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.int64,short,False,0.929,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.float16,short,False,8.557,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.bfloat16,short,False,9.045,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.float32,short,False,7.672,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.float64,short,False,7.276,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.bool,short,False,6.414,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.uint8,short,False,7.736,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.int8,short,False,7.889,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.int16,short,False,8.170,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.int32,short,False,7.783,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.int64,short,False,7.743,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.float16,short,False,0.927,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.bfloat16,short,False,7.018,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.float32,short,False,8.428,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.float64,short,False,6.767,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.bool,short,False,6.479,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.uint8,short,False,7.827,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.int8,short,False,6.450,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.int16,short,False,6.320,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.int32,short,False,6.385,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.int64,short,False,8.119,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.float16,short,False,8.063,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.bfloat16,short,False,0.925,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.float32,short,False,8.629,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.float64,short,False,6.638,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.bool,short,False,6.425,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.uint8,short,False,7.803,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.int8,short,False,6.502,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.int16,short,False,6.429,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.int32,short,False,6.549,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.int64,short,False,7.749,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.float16,short,False,7.301,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.bfloat16,short,False,7.682,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.float32,short,False,0.930,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.float64,short,False,6.738,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.bool,short,False,6.798,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.uint8,short,False,6.506,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.int8,short,False,6.494,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.int16,short,False,6.668,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.int32,short,False,6.696,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.int64,short,False,7.115,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.float16,short,False,7.910,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.bfloat16,short,False,7.410,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.float32,short,False,6.868,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.float64,short,False,0.924,0.000000
PyTorch,FloatToHalfTensorConversionBenchmark,FloatToHalfTensorConversionBenchmark_M8_N16_cpu,short,False,7.040985,0.000000
PyTorch,FloatToHalfTensorConversionBenchmark,FloatToHalfTensorConversionBenchmark_M8_N64_cpu,short,False,7.168604,0.000000
PyTorch,FloatToHalfTensorConversionBenchmark,FloatToHalfTensorConversionBenchmark_M8_N128_cpu,short,False,7.434442,0.000000
PyTorch,FloatToHalfTensorConversionBenchmark,FloatToHalfTensorConversionBenchmark_M16_N16_cpu,short,False,7.078318,0.000000
PyTorch,FloatToHalfTensorConversionBenchmark,FloatToHalfTensorConversionBenchmark_M16_N64_cpu,short,False,7.426670,0.000000
PyTorch,FloatToHalfTensorConversionBenchmark,FloatToHalfTensorConversionBenchmark_M16_N128_cpu,short,False,7.679027,0.000000
PyTorch,FloatToHalfTensorConversionBenchmark,FloatToHalfTensorConversionBenchmark_M32_N16_cpu,short,False,7.281365,0.000000
PyTorch,FloatToHalfTensorConversionBenchmark,FloatToHalfTensorConversionBenchmark_M32_N64_cpu,short,False,7.682783,0.000000
PyTorch,FloatToHalfTensorConversionBenchmark,FloatToHalfTensorConversionBenchmark_M32_N128_cpu,short,False,8.381938,0.000000
PyTorch,HalfToFloatTensorConversionBenchmark,HalfToFloatTensorConversionBenchmark_M8_N16_cpu,short,False,7.039854,0.000000
PyTorch,HalfToFloatTensorConversionBenchmark,HalfToFloatTensorConversionBenchmark_M8_N64_cpu,short,False,7.399855,0.000000
PyTorch,HalfToFloatTensorConversionBenchmark,HalfToFloatTensorConversionBenchmark_M8_N128_cpu,short,False,7.715193,0.000000
PyTorch,HalfToFloatTensorConversionBenchmark,HalfToFloatTensorConversionBenchmark_M16_N16_cpu,short,False,7.255140,0.000000
PyTorch,HalfToFloatTensorConversionBenchmark,HalfToFloatTensorConversionBenchmark_M16_N64_cpu,short,False,7.753522,0.000000
PyTorch,HalfToFloatTensorConversionBenchmark,HalfToFloatTensorConversionBenchmark_M16_N128_cpu,short,False,8.364281,0.000000
PyTorch,HalfToFloatTensorConversionBenchmark,HalfToFloatTensorConversionBenchmark_M32_N16_cpu,short,False,7.476377,0.000000
PyTorch,HalfToFloatTensorConversionBenchmark,HalfToFloatTensorConversionBenchmark_M32_N64_cpu,short,False,8.458564,0.000000
PyTorch,HalfToFloatTensorConversionBenchmark,HalfToFloatTensorConversionBenchmark_M32_N128_cpu,short,False,9.391939,0.000000
PyTorch,addcmul,addcmul_M1_N2_cpu_dtypetorch.float32,short,False,4.461410,0.000000
PyTorch,addcmul,addcmul_M1_N2_cpu_dtypetorch.bfloat16,short,False,4.560082,0.000000
PyTorch,addcmul,addcmul_M32_N64_cpu_dtypetorch.float32,short,False,5.141248,0.000000

1 Benchmarking Framework Benchmarking Module Name Case Name tag run_backward Execution Time Peak Memory (KB)
484 PyTorch sum sum_R256_V512_dim0_contiguousFalse_cpu short False 57.957757 0.000000
485 PyTorch sum sum_R256_V512_dim1_contiguousTrue_cpu short False 53.592068 0.000000
486 PyTorch sum sum_R256_V512_dim1_contiguousFalse_cpu short False 51.339726 0.000000
487 PyTorch TensorConversionBenchmark FloatToHalfTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.bool FloatToHalfTensorConversionBenchmark_M8_N16_cpu short False 0.927 7.040985 0.000000
488 PyTorch TensorConversionBenchmark FloatToHalfTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.uint8 FloatToHalfTensorConversionBenchmark_M8_N64_cpu short False 6.261 7.168604 0.000000
489 PyTorch TensorConversionBenchmark FloatToHalfTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.int8 FloatToHalfTensorConversionBenchmark_M8_N128_cpu short False 6.351 7.434442 0.000000
490 PyTorch TensorConversionBenchmark FloatToHalfTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.int16 FloatToHalfTensorConversionBenchmark_M16_N16_cpu short False 6.177 7.078318 0.000000
491 PyTorch TensorConversionBenchmark FloatToHalfTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.int32 FloatToHalfTensorConversionBenchmark_M16_N64_cpu short False 6.333 7.426670 0.000000
492 PyTorch TensorConversionBenchmark FloatToHalfTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.int64 FloatToHalfTensorConversionBenchmark_M16_N128_cpu short False 6.588 7.679027 0.000000
493 PyTorch TensorConversionBenchmark FloatToHalfTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.float16 FloatToHalfTensorConversionBenchmark_M32_N16_cpu short False 8.117 7.281365 0.000000
494 PyTorch TensorConversionBenchmark FloatToHalfTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.bfloat16 FloatToHalfTensorConversionBenchmark_M32_N64_cpu short False 9.358 7.682783 0.000000
495 PyTorch TensorConversionBenchmark FloatToHalfTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.float32 FloatToHalfTensorConversionBenchmark_M32_N128_cpu short False 7.844 8.381938 0.000000
496 PyTorch TensorConversionBenchmark HalfToFloatTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.float64 HalfToFloatTensorConversionBenchmark_M8_N16_cpu short False 8.097 7.039854 0.000000
497 PyTorch TensorConversionBenchmark HalfToFloatTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.bool HalfToFloatTensorConversionBenchmark_M8_N64_cpu short False 6.159 7.399855 0.000000
498 PyTorch TensorConversionBenchmark HalfToFloatTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.uint8 HalfToFloatTensorConversionBenchmark_M8_N128_cpu short False 0.926 7.715193 0.000000
499 PyTorch TensorConversionBenchmark HalfToFloatTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.int8 HalfToFloatTensorConversionBenchmark_M16_N16_cpu short False 6.192 7.255140 0.000000
500 PyTorch TensorConversionBenchmark HalfToFloatTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.int16 HalfToFloatTensorConversionBenchmark_M16_N64_cpu short False 6.276 7.753522 0.000000
501 PyTorch TensorConversionBenchmark HalfToFloatTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.int32 HalfToFloatTensorConversionBenchmark_M16_N128_cpu short False 6.461 8.364281 0.000000
502 PyTorch TensorConversionBenchmark HalfToFloatTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.int64 HalfToFloatTensorConversionBenchmark_M32_N16_cpu short False 6.524 7.476377 0.000000
503 PyTorch TensorConversionBenchmark HalfToFloatTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.float16 HalfToFloatTensorConversionBenchmark_M32_N64_cpu short False 8.136 8.458564 0.000000
504 PyTorch TensorConversionBenchmark HalfToFloatTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.bfloat16 HalfToFloatTensorConversionBenchmark_M32_N128_cpu short False 6.854 9.391939 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.float32 short False 6.446 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.float64 short False 6.829 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.bool short False 6.088 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.uint8 short False 6.059 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.int8 short False 0.922 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.int16 short False 6.263 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.int32 short False 6.330 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.int64 short False 6.688 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.float16 short False 8.176 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.bfloat16 short False 6.959 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.float32 short False 6.430 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.float64 short False 6.818 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.bool short False 6.350 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.uint8 short False 6.221 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.int8 short False 6.193 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.int16 short False 0.922 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.int32 short False 6.263 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.int64 short False 6.525 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.float16 short False 7.960 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.bfloat16 short False 6.801 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.float32 short False 6.594 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.float64 short False 7.089 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.bool short False 6.498 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.uint8 short False 6.358 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.int8 short False 6.390 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.int16 short False 6.415 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.int32 short False 0.925 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.int64 short False 6.657 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.float16 short False 7.954 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.bfloat16 short False 6.930 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.float32 short False 6.737 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.float64 short False 6.948 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.bool short False 6.757 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.uint8 short False 6.402 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.int8 short False 6.550 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.int16 short False 6.518 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.int32 short False 6.766 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.int64 short False 0.929 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.float16 short False 8.557 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.bfloat16 short False 9.045 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.float32 short False 7.672 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.float64 short False 7.276 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.bool short False 6.414 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.uint8 short False 7.736 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.int8 short False 7.889 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.int16 short False 8.170 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.int32 short False 7.783 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.int64 short False 7.743 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.float16 short False 0.927 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.bfloat16 short False 7.018 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.float32 short False 8.428 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.float64 short False 6.767 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.bool short False 6.479 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.uint8 short False 7.827 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.int8 short False 6.450 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.int16 short False 6.320 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.int32 short False 6.385 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.int64 short False 8.119 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.float16 short False 8.063 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.bfloat16 short False 0.925 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.float32 short False 8.629 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.float64 short False 6.638 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.bool short False 6.425 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.uint8 short False 7.803 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.int8 short False 6.502 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.int16 short False 6.429 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.int32 short False 6.549 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.int64 short False 7.749 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.float16 short False 7.301 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.bfloat16 short False 7.682 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.float32 short False 0.930 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.float64 short False 6.738 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.bool short False 6.798 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.uint8 short False 6.506 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.int8 short False 6.494 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.int16 short False 6.668 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.int32 short False 6.696 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.int64 short False 7.115 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.float16 short False 7.910 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.bfloat16 short False 7.410 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.float32 short False 6.868 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.float64 short False 0.924 0.000000
505 PyTorch addcmul addcmul_M1_N2_cpu_dtypetorch.float32 short False 4.461410 0.000000
506 PyTorch addcmul addcmul_M1_N2_cpu_dtypetorch.bfloat16 short False 4.560082 0.000000
507 PyTorch addcmul addcmul_M32_N64_cpu_dtypetorch.float32 short False 5.141248 0.000000

View File

@ -4,84 +4,74 @@ import torch
tensor_conversion_short_configs = op_bench.cross_product_configs(
M=[32],
N=[128],
M=(
8,
16,
32,
),
N=(
16,
64,
128,
),
device=["cpu", "cuda"],
dtype_one=[
torch.bool,
torch.uint8,
torch.int8,
torch.int16,
torch.int32,
torch.int64,
torch.half,
torch.bfloat16,
torch.float,
torch.double,
],
dtype_two=[
torch.bool,
torch.uint8,
torch.int8,
torch.int16,
torch.int32,
torch.int64,
torch.half,
torch.bfloat16,
torch.float,
torch.double,
],
tags=["short"],
)
tensor_conversion_long_configs = op_bench.cross_product_configs(
M=[1024],
N=[1024],
M=(
64,
128,
256,
512,
),
N=(
256,
512,
1024,
2048,
),
device=["cpu", "cuda"],
dtype_one=[
torch.bool,
torch.uint8,
torch.int8,
torch.int16,
torch.int32,
torch.int64,
torch.half,
torch.bfloat16,
torch.float,
torch.double,
],
dtype_two=[
torch.bool,
torch.uint8,
torch.int8,
torch.int16,
torch.int32,
torch.int64,
torch.half,
torch.bfloat16,
torch.float,
torch.double,
],
tags=["long"],
)
class TensorConversionBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, dtype_one, dtype_two, device):
class FloatToHalfTensorConversionBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, device):
self.inputs = {
"input": torch.rand(
M, N, device=device, requires_grad=False, dtype=torch.float
).to(dtype=dtype_one)
)
}
self.dtype_one = dtype_one
self.dtype_two = dtype_two
def forward(self, input):
return input.to(dtype=self.dtype_two)
return input.to(torch.half)
op_bench.generate_pt_test(tensor_conversion_short_configs, TensorConversionBenchmark)
op_bench.generate_pt_test(tensor_conversion_long_configs, TensorConversionBenchmark)
class HalfToFloatTensorConversionBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, device):
self.inputs = {
"input": torch.rand(
M, N, device=device, requires_grad=False, dtype=torch.half
)
}
def forward(self, input):
return input.to(torch.float)
op_bench.generate_pt_test(
tensor_conversion_short_configs, FloatToHalfTensorConversionBenchmark
)
op_bench.generate_pt_test(
tensor_conversion_long_configs, FloatToHalfTensorConversionBenchmark
)
op_bench.generate_pt_test(
tensor_conversion_short_configs, HalfToFloatTensorConversionBenchmark
)
op_bench.generate_pt_test(
tensor_conversion_long_configs, HalfToFloatTensorConversionBenchmark
)
if __name__ == "__main__":
op_bench.benchmark_runner.main()

View File

@ -349,106 +349,24 @@ PyTorch,sum,sum_R256_V512_dim0_contiguousTrue_cpu,short,FALSE,12.5841
PyTorch,sum,sum_R256_V512_dim0_contiguousFALSE_cpu,short,FALSE,20.8765
PyTorch,sum,sum_R256_V512_dim1_contiguousTrue_cpu,short,FALSE,15.4414
PyTorch,sum,sum_R256_V512_dim1_contiguousFALSE_cpu,short,FALSE,15.3287
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.bool,short,False,0.797
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.uint8,short,False,6.071
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.int8,short,False,6.031
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.int16,short,False,6.243
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.int32,short,False,7.231
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.int64,short,False,7.791
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.float16,short,False,12.661
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.bfloat16,short,False,11.225
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.float32,short,False,9.772
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.float64,short,False,9.872
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.bool,short,False,6.033
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.uint8,short,False,0.781
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.int8,short,False,6.060
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.int16,short,False,6.180
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.int32,short,False,7.258
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.int64,short,False,7.758
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.float16,short,False,10.504
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.bfloat16,short,False,6.749
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.float32,short,False,7.679
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.float64,short,False,7.797
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.bool,short,False,6.019
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.uint8,short,False,6.079
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.int8,short,False,0.785
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.int16,short,False,6.188
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.int32,short,False,7.288
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.int64,short,False,7.770
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.float16,short,False,10.466
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.bfloat16,short,False,6.676
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.float32,short,False,7.736
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.float64,short,False,7.780
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.bool,short,False,6.130
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.uint8,short,False,6.221
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.int8,short,False,6.101
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.int16,short,False,0.791
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.int32,short,False,6.254
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.int64,short,False,7.733
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.float16,short,False,10.562
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.bfloat16,short,False,6.704
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.float32,short,False,7.819
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.float64,short,False,8.276
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.bool,short,False,6.361
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.uint8,short,False,6.364
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.int8,short,False,6.309
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.int16,short,False,6.362
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.int32,short,False,0.791
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.int64,short,False,7.746
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.float16,short,False,9.462
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.bfloat16,short,False,6.678
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.float32,short,False,7.827
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.float64,short,False,8.200
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.bool,short,False,6.925
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.uint8,short,False,6.947
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.int8,short,False,6.962
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.int16,short,False,6.906
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.int32,short,False,7.664
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.int64,short,False,0.782
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.float16,short,False,10.528
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.bfloat16,short,False,10.123
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.float32,short,False,9.234
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.float64,short,False,8.694
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.bool,short,False,12.653
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.uint8,short,False,9.348
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.int8,short,False,8.774
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.int16,short,False,9.063
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.int32,short,False,10.012
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.int64,short,False,13.641
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.float16,short,False,0.788
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.bfloat16,short,False,13.757
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.float32,short,False,7.170
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.float64,short,False,12.511
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.bool,short,False,6.516
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.uint8,short,False,8.539
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.int8,short,False,6.483
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.int16,short,False,6.468
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.int32,short,False,7.752
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.int64,short,False,9.868
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.float16,short,False,10.556
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.bfloat16,short,False,0.792
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.float32,short,False,7.577
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.float64,short,False,8.267
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.bool,short,False,6.819
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.uint8,short,False,7.715
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.int8,short,False,6.754
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.int16,short,False,6.825
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.int32,short,False,7.790
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.int64,short,False,9.219
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.float16,short,False,5.977
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.bfloat16,short,False,7.069
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.float32,short,False,0.794
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.float64,short,False,8.301
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.bool,short,False,7.401
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.uint8,short,False,7.843
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.int8,short,False,7.117
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.int16,short,False,7.170
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.int32,short,False,8.000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.int64,short,False,9.284
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.float16,short,False,7.179
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.bfloat16,short,False,7.645
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.float32,short,False,7.988
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.float64,short,False,0.792
PyTorch,FloatToHalfTensorConversionBenchmark,FloatToHalfTensorConversionBenchmark_M8_N16_cpu,short,FALSE,5.0499
PyTorch,FloatToHalfTensorConversionBenchmark,FloatToHalfTensorConversionBenchmark_M8_N64_cpu,short,FALSE,5.3229
PyTorch,FloatToHalfTensorConversionBenchmark,FloatToHalfTensorConversionBenchmark_M8_N128_cpu,short,FALSE,5.4418
PyTorch,FloatToHalfTensorConversionBenchmark,FloatToHalfTensorConversionBenchmark_M16_N16_cpu,short,FALSE,5.0868
PyTorch,FloatToHalfTensorConversionBenchmark,FloatToHalfTensorConversionBenchmark_M16_N64_cpu,short,FALSE,5.4495
PyTorch,FloatToHalfTensorConversionBenchmark,FloatToHalfTensorConversionBenchmark_M16_N128_cpu,short,FALSE,5.5578
PyTorch,FloatToHalfTensorConversionBenchmark,FloatToHalfTensorConversionBenchmark_M32_N16_cpu,short,FALSE,5.2631
PyTorch,FloatToHalfTensorConversionBenchmark,FloatToHalfTensorConversionBenchmark_M32_N64_cpu,short,FALSE,5.5646
PyTorch,FloatToHalfTensorConversionBenchmark,FloatToHalfTensorConversionBenchmark_M32_N128_cpu,short,FALSE,5.7898
PyTorch,HalfToFloatTensorConversionBenchmark,HalfToFloatTensorConversionBenchmark_M8_N16_cpu,short,FALSE,5.0228
PyTorch,HalfToFloatTensorConversionBenchmark,HalfToFloatTensorConversionBenchmark_M8_N64_cpu,short,FALSE,5.3692
PyTorch,HalfToFloatTensorConversionBenchmark,HalfToFloatTensorConversionBenchmark_M8_N128_cpu,short,FALSE,5.4006
PyTorch,HalfToFloatTensorConversionBenchmark,HalfToFloatTensorConversionBenchmark_M16_N16_cpu,short,FALSE,5.1107
PyTorch,HalfToFloatTensorConversionBenchmark,HalfToFloatTensorConversionBenchmark_M16_N64_cpu,short,FALSE,5.4119
PyTorch,HalfToFloatTensorConversionBenchmark,HalfToFloatTensorConversionBenchmark_M16_N128_cpu,short,FALSE,5.5583
PyTorch,HalfToFloatTensorConversionBenchmark,HalfToFloatTensorConversionBenchmark_M32_N16_cpu,short,FALSE,5.3818
PyTorch,HalfToFloatTensorConversionBenchmark,HalfToFloatTensorConversionBenchmark_M32_N64_cpu,short,FALSE,5.5742
PyTorch,HalfToFloatTensorConversionBenchmark,HalfToFloatTensorConversionBenchmark_M32_N128_cpu,short,FALSE,6.8414
PyTorch,relu,"relu_dims(3,4,5)_contigFALSE_inplaceFALSE_dtypetorch.quint8",short,FALSE,9.4657
PyTorch,relu,"relu_dims(3,4,5)_contigFALSE_inplaceFALSE_dtypetorch.qint8",short,FALSE,9.4625
PyTorch,relu,"relu_dims(3,4,5)_contigFALSE_inplaceFALSE_dtypetorch.qint32",short,FALSE,9.4165

1 Benchmarking Framework Benchmarking Module Name Case Name tag run_backward Execution Time
349 PyTorch sum sum_R256_V512_dim0_contiguousFALSE_cpu short FALSE 20.8765
350 PyTorch sum sum_R256_V512_dim1_contiguousTrue_cpu short FALSE 15.4414
351 PyTorch sum sum_R256_V512_dim1_contiguousFALSE_cpu short FALSE 15.3287
352 PyTorch TensorConversionBenchmark FloatToHalfTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.bool FloatToHalfTensorConversionBenchmark_M8_N16_cpu short False FALSE 0.797 5.0499
353 PyTorch TensorConversionBenchmark FloatToHalfTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.uint8 FloatToHalfTensorConversionBenchmark_M8_N64_cpu short False FALSE 6.071 5.3229
354 PyTorch TensorConversionBenchmark FloatToHalfTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.int8 FloatToHalfTensorConversionBenchmark_M8_N128_cpu short False FALSE 6.031 5.4418
355 PyTorch TensorConversionBenchmark FloatToHalfTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.int16 FloatToHalfTensorConversionBenchmark_M16_N16_cpu short False FALSE 6.243 5.0868
356 PyTorch TensorConversionBenchmark FloatToHalfTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.int32 FloatToHalfTensorConversionBenchmark_M16_N64_cpu short False FALSE 7.231 5.4495
357 PyTorch TensorConversionBenchmark FloatToHalfTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.int64 FloatToHalfTensorConversionBenchmark_M16_N128_cpu short False FALSE 7.791 5.5578
358 PyTorch TensorConversionBenchmark FloatToHalfTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.float16 FloatToHalfTensorConversionBenchmark_M32_N16_cpu short False FALSE 12.661 5.2631
359 PyTorch TensorConversionBenchmark FloatToHalfTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.bfloat16 FloatToHalfTensorConversionBenchmark_M32_N64_cpu short False FALSE 11.225 5.5646
360 PyTorch TensorConversionBenchmark FloatToHalfTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.float32 FloatToHalfTensorConversionBenchmark_M32_N128_cpu short False FALSE 9.772 5.7898
361 PyTorch TensorConversionBenchmark HalfToFloatTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.float64 HalfToFloatTensorConversionBenchmark_M8_N16_cpu short False FALSE 9.872 5.0228
362 PyTorch TensorConversionBenchmark HalfToFloatTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.bool HalfToFloatTensorConversionBenchmark_M8_N64_cpu short False FALSE 6.033 5.3692
363 PyTorch TensorConversionBenchmark HalfToFloatTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.uint8 HalfToFloatTensorConversionBenchmark_M8_N128_cpu short False FALSE 0.781 5.4006
364 PyTorch TensorConversionBenchmark HalfToFloatTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.int8 HalfToFloatTensorConversionBenchmark_M16_N16_cpu short False FALSE 6.060 5.1107
365 PyTorch TensorConversionBenchmark HalfToFloatTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.int16 HalfToFloatTensorConversionBenchmark_M16_N64_cpu short False FALSE 6.180 5.4119
366 PyTorch TensorConversionBenchmark HalfToFloatTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.int32 HalfToFloatTensorConversionBenchmark_M16_N128_cpu short False FALSE 7.258 5.5583
367 PyTorch TensorConversionBenchmark HalfToFloatTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.int64 HalfToFloatTensorConversionBenchmark_M32_N16_cpu short False FALSE 7.758 5.3818
368 PyTorch TensorConversionBenchmark HalfToFloatTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.float16 HalfToFloatTensorConversionBenchmark_M32_N64_cpu short False FALSE 10.504 5.5742
369 PyTorch TensorConversionBenchmark HalfToFloatTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.bfloat16 HalfToFloatTensorConversionBenchmark_M32_N128_cpu short False FALSE 6.749 6.8414
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.float32 short False 7.679
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.float64 short False 7.797
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.bool short False 6.019
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.uint8 short False 6.079
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.int8 short False 0.785
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.int16 short False 6.188
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.int32 short False 7.288
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.int64 short False 7.770
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.float16 short False 10.466
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.bfloat16 short False 6.676
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.float32 short False 7.736
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.float64 short False 7.780
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.bool short False 6.130
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.uint8 short False 6.221
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.int8 short False 6.101
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.int16 short False 0.791
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.int32 short False 6.254
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.int64 short False 7.733
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.float16 short False 10.562
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.bfloat16 short False 6.704
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.float32 short False 7.819
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.float64 short False 8.276
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.bool short False 6.361
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.uint8 short False 6.364
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.int8 short False 6.309
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.int16 short False 6.362
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.int32 short False 0.791
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.int64 short False 7.746
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.float16 short False 9.462
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.bfloat16 short False 6.678
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.float32 short False 7.827
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.float64 short False 8.200
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.bool short False 6.925
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.uint8 short False 6.947
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.int8 short False 6.962
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.int16 short False 6.906
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.int32 short False 7.664
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.int64 short False 0.782
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.float16 short False 10.528
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.bfloat16 short False 10.123
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.float32 short False 9.234
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.float64 short False 8.694
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.bool short False 12.653
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.uint8 short False 9.348
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.int8 short False 8.774
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.int16 short False 9.063
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.int32 short False 10.012
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.int64 short False 13.641
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.float16 short False 0.788
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.bfloat16 short False 13.757
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.float32 short False 7.170
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.float64 short False 12.511
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.bool short False 6.516
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.uint8 short False 8.539
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.int8 short False 6.483
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.int16 short False 6.468
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.int32 short False 7.752
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.int64 short False 9.868
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.float16 short False 10.556
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.bfloat16 short False 0.792
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.float32 short False 7.577
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.float64 short False 8.267
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.bool short False 6.819
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.uint8 short False 7.715
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.int8 short False 6.754
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.int16 short False 6.825
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.int32 short False 7.790
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.int64 short False 9.219
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.float16 short False 5.977
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.bfloat16 short False 7.069
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.float32 short False 0.794
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.float64 short False 8.301
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.bool short False 7.401
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.uint8 short False 7.843
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.int8 short False 7.117
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.int16 short False 7.170
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.int32 short False 8.000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.int64 short False 9.284
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.float16 short False 7.179
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.bfloat16 short False 7.645
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.float32 short False 7.988
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.float64 short False 0.792
370 PyTorch relu relu_dims(3,4,5)_contigFALSE_inplaceFALSE_dtypetorch.quint8 short FALSE 9.4657
371 PyTorch relu relu_dims(3,4,5)_contigFALSE_inplaceFALSE_dtypetorch.qint8 short FALSE 9.4625
372 PyTorch relu relu_dims(3,4,5)_contigFALSE_inplaceFALSE_dtypetorch.qint32 short FALSE 9.4165

View File

@ -52,18 +52,19 @@ def test_sparse_coo_and_csr(m, n, k, nnz, test_count):
start.record()
coo.matmul(mat)
stop.record()
times.append(start.elapsed_time(stop))
coo_mean_time = sum(times) / len(times)
coo_mean_time = sum(times) / len(times)
times = []
for _ in range(test_count):
start.record()
csr.matmul(mat)
stop.record()
times.append(start.elapsed_time(stop))
times = []
for _ in range(test_count):
start.record()
csr.matmul(mat)
stop.record()
times.append(start.elapsed_time(stop))
csr_mean_time = sum(times) / len(times)
csr_mean_time = sum(times) / len(times)
return coo_mean_time, csr_mean_time
@ -83,13 +84,10 @@ if __name__ == "__main__":
if args.outfile == "stdout":
outfile = sys.stdout
need_close = False
elif args.outfile == "stderr":
outfile = sys.stderr
need_close = False
else:
outfile = open(args.outfile, "a")
need_close = True
test_count = args.test_count
m = args.m
@ -150,5 +148,3 @@ if __name__ == "__main__":
time,
file=outfile,
)
if need_close:
outfile.close()

View File

@ -82,13 +82,10 @@ if __name__ == "__main__":
if args.outfile == "stdout":
outfile = sys.stdout
need_close = False
elif args.outfile == "stderr":
outfile = sys.stderr
need_close = False
else:
outfile = open(args.outfile, "a")
need_close = True
test_count = args.test_count
m = args.m
@ -135,5 +132,3 @@ if __name__ == "__main__":
time_csr,
file=outfile,
)
if need_close:
outfile.close()

View File

@ -179,13 +179,10 @@ if __name__ == "__main__":
if args.outfile == "stdout":
outfile = sys.stdout
need_close = False
elif args.outfile == "stderr":
outfile = sys.stderr
need_close = False
else:
outfile = open(args.outfile, "a")
need_close = True
ops = args.ops.split(",")
@ -437,5 +434,3 @@ if __name__ == "__main__":
if op not in {"bsr_scatter_mm6", "bsr_dense_mm_with_meta"}:
# Break on operations that do not consume parameters
break
if need_close:
outfile.close()

View File

@ -1,8 +1,6 @@
#pragma once
#include <c10/core/SafePyObject.h>
#include <c10/macros/Export.h>
#include <optional>
namespace c10 {
@ -17,8 +15,7 @@ struct C10_API AutogradState {
bool inference_mode,
bool fw_grad_mode,
bool multithreading_enabled)
: graph_exec_group_(std::nullopt),
grad_mode_(grad_mode),
: grad_mode_(grad_mode),
inference_mode_(inference_mode),
fw_grad_mode_(fw_grad_mode),
multithreading_enabled_(multithreading_enabled),
@ -44,10 +41,6 @@ struct C10_API AutogradState {
view_replay_enabled_ = view_replay_enabled;
}
void set_graph_exec_group(std::optional<SafePyObject> group) {
graph_exec_group_ = std::move(group);
}
bool get_grad_mode() const {
return grad_mode_;
}
@ -68,12 +61,7 @@ struct C10_API AutogradState {
return view_replay_enabled_;
}
const std::optional<SafePyObject>& get_graph_exec_group() const {
return graph_exec_group_;
}
private:
std::optional<SafePyObject> graph_exec_group_;
bool grad_mode_ : 1;
bool inference_mode_ : 1;
bool fw_grad_mode_ : 1;

View File

@ -27,7 +27,6 @@
#include <torch/headeronly/core/ScalarType.h>
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wswitch-enum")
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wswitch-default")
namespace c10 {
@ -206,12 +205,6 @@ inline bool isSignedType(ScalarType t) {
break;
// Do not add default here, but rather define behavior of every new entry
// here. `-Wswitch-enum` would raise a warning in those cases.
// TODO: get PyTorch to adopt exhaustive switches by default with a way to
// opt specific switches to being non-exhaustive.
// Exhaustive:
// `-Wswitch-enum`, `-Wswitch-default`, `-Wno-covered-switch-default`
// Non-Exhaustive:
// `-Wno-switch-enum`, `-Wswitch-default`, `-Wcovered-switch-default`
}
TORCH_CHECK(false, "Unknown ScalarType ", t);
#undef CASE_ISSIGNED

Some files were not shown because too many files have changed in this diff Show More