Compare commits

..

2 Commits

Author SHA1 Message Date
27e0a198be fix hipify docstring 2025-11-10 07:56:31 -08:00
256b61734f [BE] documenting more functions 2025-11-10 07:52:33 -08:00
344 changed files with 3016 additions and 9249 deletions

View File

@ -15,7 +15,7 @@ if [ -z "${image}" ]; then
fi
function extract_version_from_image_name() {
eval export $2=$(echo "${image}" | perl -n -e"/$1(\d+(\.\d+)?(\.\d+)?t?)/ && print \$1")
eval export $2=$(echo "${image}" | perl -n -e"/$1(\d+(\.\d+)?(\.\d+)?)/ && print \$1")
if [ "x${!2}" = x ]; then
echo "variable '$2' not correctly parsed from image='$image'"
exit 1
@ -319,11 +319,6 @@ case "$tag" in
echo "image '$image' did not match an existing build configuration"
if [[ "$image" == *py* ]]; then
extract_version_from_image_name py ANACONDA_PYTHON_VERSION
if [[ "$ANACONDA_PYTHON_VERSION" == *t ]]
then
ANACONDA_PYTHON_VERSION=${ANACONDA_PYTHON_VERSION%?}
PYTHON_FREETHREADED=1
fi
fi
if [[ "$image" == *cuda* ]]; then
extract_version_from_image_name cuda CUDA_VERSION
@ -375,7 +370,6 @@ docker build \
--build-arg "GLIBC_VERSION=${GLIBC_VERSION}" \
--build-arg "CLANG_VERSION=${CLANG_VERSION}" \
--build-arg "ANACONDA_PYTHON_VERSION=${ANACONDA_PYTHON_VERSION}" \
--build-arg "PYTHON_FREETHREADED=${PYTHON_FREETHREADED}" \
--build-arg "PYTHON_VERSION=${PYTHON_VERSION}" \
--build-arg "GCC_VERSION=${GCC_VERSION}" \
--build-arg "CUDA_VERSION=${CUDA_VERSION}" \

View File

@ -49,25 +49,18 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
export SYSROOT_DEP="sysroot_linux-64=2.17"
fi
if [[ $PYTHON_FREETHREADED == "1" ]]
then
PYTHON_DEP="python-freethreading=${ANACONDA_PYTHON_VERSION}"
else
PYTHON_DEP="python=${ANACONDA_PYTHON_VERSION}"
fi
# Install correct Python version
# Also ensure sysroot is using a modern GLIBC to match system compilers
if [ "$ANACONDA_PYTHON_VERSION" = "3.14" ]; then
as_jenkins conda create -n py_$ANACONDA_PYTHON_VERSION -y\
${PYTHON_DEP} \
python="3.14.0" \
${SYSROOT_DEP} \
-c conda-forge
else
# Install correct Python version
# Also ensure sysroot is using a modern GLIBC to match system compilers
as_jenkins conda create -n py_$ANACONDA_PYTHON_VERSION -y\
${PYTHON_DEP} \
python="$ANACONDA_PYTHON_VERSION" \
${SYSROOT_DEP}
fi
# libstdcxx from conda default channels are too old, we need GLIBCXX_3.4.30
@ -101,14 +94,6 @@ fi
conda_install_through_forge libstdcxx-ng=14
fi
if [[ "$ANACONDA_PYTHON_VERSION" == "3.13" ]] && [[ "$PYTHON_FREETHREADED" == "1" ]]; then
# needed for the 3.13t build to build lxml from source
conda_install_through_forge libxslt libxml2-devel
# pygithub depends on pynacl, which depends on cffi, which doesn't support 3.13t
sed '/^PyGithub/d' -i /opt/conda/requirements-ci.txt
fi
# Install some other packages, including those needed for Python test reporting
pip_install -r /opt/conda/requirements-ci.txt

View File

@ -255,9 +255,8 @@ scikit-image==0.22.0
#test that import:
scipy==1.10.1 ; python_version <= "3.11"
scipy==1.14.1 ; python_version > "3.11" and python_version < "3.13"
# TODO breaks some dynamo_wrapped tests in test/nn/test_convolution.py on 3.13
scipy==1.16.2 ; python_version >= "3.13"
scipy==1.14.1 ; python_version > "3.11" and python_version < "3.14"
scipy==1.16.2 ; python_version >= "3.14"
# Pin SciPy because of failing distribution tests (see #60347)
#Description: scientific python
#Pinned versions: 1.10.1
@ -329,8 +328,8 @@ pywavelets==1.7.0 ; python_version >= "3.12"
#Pinned versions: 1.4.1
#test that import:
lxml==5.3.0 ; python_version < "3.13"
lxml==6.0.2 ; python_version >= "3.13"
lxml==5.3.0 ; python_version < "3.14"
lxml==6.0.2 ; python_version >= "3.14"
#Description: This is a requirement of unittest-xml-reporting
PyGithub==2.3.0
@ -370,7 +369,7 @@ pwlf==2.2.1
pyyaml==6.0.3
pyzstd
setuptools==78.1.1
packaging==24.0
packaging==23.1
six
scons==4.5.2 ; platform_machine == "aarch64"

View File

@ -28,10 +28,8 @@ RUN bash ./install_docs_reqs.sh && rm install_docs_reqs.sh
# Install conda and other packages (e.g., numpy, pytest)
ARG ANACONDA_PYTHON_VERSION
ARG PYTHON_FREETHREADED
ARG DOCS
ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION
ENV PYTHON_FREETHREADED=$PYTHON_FREETHREADED
ENV PATH /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/bin:/opt/conda/bin:$PATH
ENV DOCS=$DOCS
COPY requirements-ci.txt requirements-docs.txt /opt/conda/

View File

@ -30,6 +30,7 @@ into a tarball, with the following structure:
More specifically, `build_magma.sh` copies over the relevant files from the `package_files` directory depending on the ROCm version.
Outputted binaries should be in the `output` folder.
## Pushing
Packages can be uploaded to an S3 bucket using:

View File

@ -19,7 +19,7 @@ git config --global --add safe.directory /var/lib/jenkins/workspace
if [[ "$BUILD_ENVIRONMENT" == *onnx* ]]; then
# TODO: This can be removed later once vision is also part of the Docker image
pip install -q --no-build-isolation "git+https://github.com/pytorch/vision.git@$(cat .github/ci_commit_pins/vision.txt)"
pip install -q --no-use-pep517 "git+https://github.com/pytorch/vision.git@$(cat .github/ci_commit_pins/vision.txt)"
# JIT C++ extensions require ninja, so put it into PATH.
export PATH="/var/lib/jenkins/.local/bin:$PATH"
# NB: ONNX test is fast (~15m) so it's ok to retry it few more times to avoid any flaky issue, we

View File

@ -96,6 +96,7 @@ function pip_build_and_install() {
python3 -m pip wheel \
--no-build-isolation \
--no-deps \
--no-use-pep517 \
-w "${wheel_dir}" \
"${build_target}"
fi

View File

@ -63,7 +63,7 @@ self-hosted-runner:
- linux.rocm.gpu.gfx942.1
- linux.rocm.gpu.gfx942.2
- linux.rocm.gpu.gfx942.4
- linux.rocm.gfx942.docker-cache
- rocm-docker
# Org wise AWS `mac2.metal` runners (2020 Mac mini hardware powered by Apple silicon M1 processors)
- macos-m1-stable
- macos-m1-14

View File

@ -1 +1 @@
07b6cbde121417a70e4dc871adb6d27030e0ce3f
ad5816f0eee1c873df1b7d371c69f1f811a89387

View File

@ -1 +1 @@
acccf86477759b2d3500f1ae1be065f7b1e409ec
ccb801b88af136454798b945175c4c87e636ac33

13
.github/labeler.yml vendored
View File

@ -165,16 +165,3 @@
- torch/_inductor/kernel/mm.py
- test/inductor/test_max_autotune.py
- third_party/fbgemm
"ciflow/mps":
- aten/src/ATen/mps/**
- aten/src/ATen/native/mps/**
- torch/_inductor/codegen/mps.py
- test/test_mps.py
- test/inductor/test_mps_basic.py
"ciflow/h100-symm-mem":
- torch/csrc/distributed/c10d/symm_mem/**
- torch/distributed/_symmetric_memory/**
- test/distributed/**/*mem*
- test/distributed/**/*mem*/**

View File

@ -34,9 +34,6 @@ python3 torch/utils/data/datapipes/gen_pyi.py
# Also check generated pyi files
find torch -name '*.pyi' -exec git add --force -- "{}" +
# Print current environment
python3 -m pip freeze
RC=0
# Run lintrunner on all files
if ! lintrunner --force-color --tee-json=lint.json ${ADDITIONAL_LINTRUNNER_ARGS} 2> /dev/null; then

View File

@ -59,9 +59,7 @@ jobs:
pytorch-linux-jammy-py3.11-clang12,
pytorch-linux-jammy-py3.12-clang12,
pytorch-linux-jammy-py3.13-clang12,
pytorch-linux-jammy-py3.13t-clang12,
pytorch-linux-jammy-py3.14-clang12,
pytorch-linux-jammy-py3.14t-clang12,
pytorch-linux-jammy-rocm-n-py3,
pytorch-linux-noble-rocm-n-py3,
pytorch-linux-jammy-rocm-n-py3-benchmarks,
@ -121,22 +119,6 @@ jobs:
with:
docker-image: ${{ steps.build-docker-image.outputs.docker-image }}
- name: Generate output
if: contains(matrix.docker-image-name, 'rocm')
id: generate_output
run: |
docker_image_name="${{ matrix.docker-image-name }}"
docker_image_tag="${{ steps.build-docker-image.outputs.docker-image }}"
echo "${docker_image_name}=${docker_image_tag}" >> docker-builds-output-${docker_image_name}.txt
- name: Upload artifacts
uses: actions/upload-artifact@v4.4.0
if: contains(matrix.docker-image-name, 'rocm')
with:
name: docker-builds-artifacts-${{ matrix.docker-image-name }}
retention-days: 14
path: ./docker-builds-output-${{ matrix.docker-image-name }}.txt
- uses: nick-fields/retry@7152eba30c6575329ac0576536151aca5a72780e # v3.0.0
name: Push to https://ghcr.io/
id: push-to-ghcr-io

View File

@ -0,0 +1,55 @@
name: docker-cache-mi300
on:
# run every 6 hours
schedule:
- cron: 0 0,6,12,18 * * *
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name }}
cancel-in-progress: true
permissions:
id-token: write
contents: read
jobs:
docker-cache:
if: github.repository_owner == 'pytorch'
runs-on: rocm-docker
steps:
- name: Checkout PyTorch
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
with:
no-sudo: true
- name: configure aws credentials
id: aws_creds
uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722 # v4.1.0
with:
role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_s3_and_ecr_read_only
aws-region: us-east-1
role-duration-seconds: 18000
- name: Login to Amazon ECR
id: login-ecr
continue-on-error: false
uses: aws-actions/amazon-ecr-login@062b18b96a7aff071d4dc91bc00c4c1a7945b076 # v2.0.1
- name: Calculate docker image
id: calculate-docker-image
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
with:
docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3
push: false
- name: Pull docker image
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
with:
docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
- name: Tar and upload to S3 bucket
run: |
sudo docker save -o ~/docker-data/pytorch/pytorch_docker_image.tar ${{ steps.calculate-docker-image.outputs.docker-image }}
sudo rclone copy -P --s3-upload-concurrency 64 --s3-chunk-size 200M --s3-upload-cutoff 300M ~/docker-data/pytorch/pytorch_docker_image.tar oci:pytorchbucket0002/pytorch_docker_image --progress

View File

@ -1,105 +0,0 @@
name: docker-cache-rocm
on:
workflow_run:
workflows: [docker-builds]
branches: [main, release]
types:
- completed
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name }}
cancel-in-progress: true
permissions:
id-token: write
contents: read
actions: read
jobs:
download-docker-builds-artifacts:
if: github.repository_owner == 'pytorch'
name: download-docker-builds-artifacts
runs-on: ubuntu-latest
outputs:
pytorch-linux-jammy-rocm-n-py3: ${{ steps.process-artifacts.outputs.pytorch-linux-jammy-rocm-n-py3 }}
pytorch-linux-noble-rocm-n-py3: ${{ steps.process-artifacts.outputs.pytorch-linux-noble-rocm-n-py3 }}
pytorch-linux-jammy-rocm-n-py3-benchmarks: ${{ steps.process-artifacts.outputs.pytorch-linux-jammy-rocm-n-py3-benchmarks }}
steps:
- name: Download artifacts
uses: actions/download-artifact@v4.1.7
with:
run-id: ${{ github.event.workflow_run.id }}
path: ./docker-builds-artifacts
merge-multiple: true
github-token: ${{ secrets.GITHUB_TOKEN }}
- name: Process artifacts
id: process-artifacts
run: |
ls -R ./docker-builds-artifacts
cat ./docker-builds-artifacts/*txt >> "${GITHUB_OUTPUT}"
cat "${GITHUB_OUTPUT}"
docker-cache:
if: github.repository_owner == 'pytorch'
needs: download-docker-builds-artifacts
strategy:
fail-fast: false
matrix:
runner: [linux.rocm.gfx942.docker-cache]
docker-image: [
"${{ needs.download-docker-builds-artifacts.outputs.pytorch-linux-jammy-rocm-n-py3 }}",
"${{ needs.download-docker-builds-artifacts.outputs.pytorch-linux-noble-rocm-n-py3 }}",
"${{ needs.download-docker-builds-artifacts.outputs.pytorch-linux-jammy-rocm-n-py3-benchmarks }}"
]
runs-on: "${{ matrix.runner }}"
steps:
- name: debug
run: |
JSON_STRINGIFIED="${{ toJSON(needs.download-docker-builds-artifacts.outputs) }}"
echo "Outputs of download-docker-builds-artifacts job: ${JSON_STRINGIFIED}"
- name: configure aws credentials
id: aws_creds
uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722 # v4.1.0
with:
role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_s3_and_ecr_read_only
aws-region: us-east-1
role-duration-seconds: 18000
- name: Login to Amazon ECR
id: login-ecr
continue-on-error: false
uses: aws-actions/amazon-ecr-login@062b18b96a7aff071d4dc91bc00c4c1a7945b076 # v2.0.1
- name: Generate ghrc.io tag
id: ghcr-io-tag
run: |
ecr_image="${{ matrix.docker-image }}"
ghcr_image="ghcr.io/pytorch/ci-image:${ecr_image##*:}"
echo "ghcr_image=${ghcr_image}" >> "$GITHUB_OUTPUT"
- name: Pull docker image
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
with:
docker-image: ${{ steps.ghcr-io-tag.outputs.ghcr_image }}
- name: Save as tarball
run: |
docker_image_tag=${{ matrix.docker-image }}
docker_image_tag="${docker_image_tag#*:}" # Remove everything before and including first ":"
docker_image_tag="${docker_image_tag%-*}" # Remove everything after and including last "-"
ref_name=${{ github.event.workflow_run.head_branch }}
if [[ $ref_name =~ "release/" ]]; then
ref_suffix="release"
elif [[ $ref_name == "main" ]]; then
ref_suffix="main"
else
echo "Unexpected branch in ref_name: ${ref_name}" && exit 1
fi
docker tag ${{ steps.ghcr-io-tag.outputs.ghcr_image }} ${{ matrix.docker-image }}
# mv is atomic operation, so we use intermediate tar.tmp file to prevent read-write contention
docker save -o ~/pytorch-data/docker/${docker_image_tag}.tar.tmp ${{ matrix.docker-image }}
mv ~/pytorch-data/docker/${docker_image_tag}.tar.tmp ~/pytorch-data/docker/${docker_image_tag}_${ref_suffix}.tar

View File

@ -37,6 +37,7 @@ jobs:
needs: get-label-type
with:
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
runner: "linux.c7i.12xlarge"
build-environment: linux-jammy-cuda12.8-py3.10-gcc11-sm90-dist
docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc11
cuda-arch-list: '9.0'

View File

@ -1,4 +1,4 @@
name: inductor-rocm-mi200
name: inductor-rocm
on:
schedule:

View File

@ -5,11 +5,9 @@ on:
- cron: 0 0 * * *
push:
tags:
# NOTE: Doc build pipelines should only get triggered on:
# Major or minor release candidates builds
- v[0-9]+.[0-9]+.0+-rc[0-9]+
# Final RC for major, minor and patch releases
- v[0-9]+.[0-9]+.[0-9]+
# NOTE: Doc build pipelines should only get triggered on release candidate builds
# Release candidate tags look like: v1.11.0-rc1
- v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
- ciflow/nightly/*
workflow_dispatch:

View File

@ -85,62 +85,6 @@ jobs:
test-matrix: ${{ needs.linux-jammy-py3_10-gcc11-build.outputs.test-matrix }}
secrets: inherit
linux-jammy-py3_13t-clang12-build:
name: linux-jammy-py3.13t-clang12
uses: ./.github/workflows/_linux-build.yml
needs: get-label-type
with:
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
build-environment: linux-jammy-py3.13t-clang12
docker-image-name: ci-image:pytorch-linux-jammy-py3.13t-clang12
test-matrix: |
{ include: [
{ config: "dynamo_wrapped", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
{ config: "dynamo_wrapped", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
{ config: "dynamo_wrapped", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
]}
secrets: inherit
linux-jammy-py3_13t-clang12-test:
name: linux-jammy-py3.13t-clang12
uses: ./.github/workflows/_linux-test.yml
needs:
- linux-jammy-py3_13t-clang12-build
- target-determination
with:
build-environment: linux-jammy-py3.13t-clang12
docker-image: ${{ needs.linux-jammy-py3_13t-clang12-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-jammy-py3_13t-clang12-build.outputs.test-matrix }}
secrets: inherit
linux-jammy-py3_14t-clang12-build:
name: linux-jammy-py3.14t-clang12
uses: ./.github/workflows/_linux-build.yml
needs: get-label-type
with:
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
build-environment: linux-jammy-py3.14t-clang12
docker-image-name: ci-image:pytorch-linux-jammy-py3.14t-clang12
test-matrix: |
{ include: [
{ config: "dynamo_wrapped", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
{ config: "dynamo_wrapped", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
{ config: "dynamo_wrapped", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
]}
secrets: inherit
linux-jammy-py3_14t-clang12-test:
name: linux-jammy-py3.14t-clang12
uses: ./.github/workflows/_linux-test.yml
needs:
- linux-jammy-py3_14t-clang12-build
- target-determination
with:
build-environment: linux-jammy-py3.14t-clang12
docker-image: ${{ needs.linux-jammy-py3_14t-clang12-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-jammy-py3_14t-clang12-build.outputs.test-matrix }}
secrets: inherit
linux-docs:
name: linux-docs
uses: ./.github/workflows/_docs.yml

View File

@ -1,4 +1,4 @@
name: rocm-mi200
name: rocm
on:
push:

View File

@ -41,6 +41,7 @@ jobs:
needs: get-label-type
with:
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
runner: linux.12xlarge.memory
build-environment: linux-jammy-cuda12.8-py3.10-gcc11-sm90
docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc11
cuda-arch-list: '9.0'

View File

@ -1,83 +0,0 @@
name: trunk-rocm-mi300
on:
push:
branches:
- main
- release/*
workflow_dispatch:
schedule:
- cron: 29 8 * * * # about 1:29am PDT
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}
cancel-in-progress: true
permissions:
id-token: write
contents: read
jobs:
llm-td:
if: github.repository_owner == 'pytorch'
name: before-test
uses: ./.github/workflows/llm_td_retrieval.yml
permissions:
id-token: write
contents: read
target-determination:
name: before-test
uses: ./.github/workflows/target_determination.yml
needs: llm-td
permissions:
id-token: write
contents: read
get-label-type:
name: get-label-type
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }}
with:
triggering_actor: ${{ github.triggering_actor }}
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
curr_branch: ${{ github.head_ref || github.ref_name }}
curr_ref_type: ${{ github.ref_type }}
linux-jammy-rocm-py3_10-build:
name: linux-jammy-rocm-py3.10
uses: ./.github/workflows/_linux-build.yml
needs: get-label-type
with:
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
build-environment: linux-jammy-rocm-py3.10
docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3
sync-tag: rocm-build
test-matrix: |
{ include: [
{ config: "default", shard: 1, num_shards: 6, runner: "linux.rocm.gpu.gfx942.1.b" },
{ config: "default", shard: 2, num_shards: 6, runner: "linux.rocm.gpu.gfx942.1.b" },
{ config: "default", shard: 3, num_shards: 6, runner: "linux.rocm.gpu.gfx942.1.b" },
{ config: "default", shard: 4, num_shards: 6, runner: "linux.rocm.gpu.gfx942.1.b" },
{ config: "default", shard: 5, num_shards: 6, runner: "linux.rocm.gpu.gfx942.1.b" },
{ config: "default", shard: 6, num_shards: 6, runner: "linux.rocm.gpu.gfx942.1.b" },
{ config: "distributed", shard: 1, num_shards: 3, runner: "linux.rocm.gpu.gfx942.4.b" },
{ config: "distributed", shard: 2, num_shards: 3, runner: "linux.rocm.gpu.gfx942.4.b" },
{ config: "distributed", shard: 3, num_shards: 3, runner: "linux.rocm.gpu.gfx942.4.b" },
]}
secrets: inherit
linux-jammy-rocm-py3_10-test:
permissions:
id-token: write
contents: read
name: linux-jammy-rocm-py3.10
uses: ./.github/workflows/_rocm-test.yml
needs:
- linux-jammy-rocm-py3_10-build
- target-determination
with:
build-environment: linux-jammy-rocm-py3.10
docker-image: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.test-matrix }}
secrets: inherit

View File

@ -5,7 +5,6 @@ on:
workflows:
- pull
- trunk
- trunk-rocm-mi300
- periodic
- periodic-rocm-mi200
- periodic-rocm-mi300

View File

@ -186,8 +186,6 @@ include_patterns = [
'aten/src/ATen/native/nested/cuda/*.h',
'aten/src/ATen/native/nested/*.cpp',
'aten/src/ATen/native/nested/*.h',
'aten/src/ATen/xpu/**/*.h',
'aten/src/ATen/xpu/**/*.cpp',
'c10/**/*.cpp',
'c10/**/*.h',
'torch/*.h',

View File

@ -736,44 +736,6 @@ if(NOT DEFINED USE_BLAS)
set(USE_BLAS ON)
endif()
# Prioritized Text Linker Optimization
if(USE_PRIORITIZED_TEXT_FOR_LD)
set(LINKER_SCRIPT_FILE_IN "${CMAKE_SOURCE_DIR}/cmake/prioritized_text.txt")
set(LINKER_SCRIPT_FILE_OUT "${CMAKE_SOURCE_DIR}/cmake/linker_script.ld")
execute_process(
COMMAND ${Python_EXECUTABLE}
${CMAKE_SOURCE_DIR}/tools/setup_helpers/generate_linker_script.py
--filein "${LINKER_SCRIPT_FILE_IN}"
--fout "${LINKER_SCRIPT_FILE_OUT}"
RESULT_VARIABLE _gen_result
OUTPUT_VARIABLE _gen_output
ERROR_VARIABLE _gen_error
)
if(NOT _gen_result EQUAL 0)
message(FATAL_ERROR
"Failed to generate linker script:\n${_gen_output}\n${_gen_error}")
endif()
append_cxx_flag_if_supported("-ffunction-sections" CMAKE_CXX_FLAGS)
append_cxx_flag_if_supported("-fdata-sections" CMAKE_CXX_FLAGS)
append_c_flag_if_supported("-ffunction-sections" CMAKE_C_FLAGS)
append_c_flag_if_supported("-fdata-sections" CMAKE_C_FLAGS)
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -T${LINKER_SCRIPT_FILE_OUT}")
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -T${LINKER_SCRIPT_FILE_OUT}")
else()
if(LINUX AND CPU_AARCH64)
message(WARNING [[
It is strongly recommend to enable linker script optimization for all AArch64 Linux builds.
To do so please export USE_PRIORITIZED_TEXT_FOR_LD=1
]])
endif()
endif()
# Build libtorch mobile library, which contains ATen/TH ops and native support
# for TorchScript model, but doesn't contain not-yet-unified caffe2 ops;
if(INTERN_BUILD_MOBILE)
@ -1440,6 +1402,9 @@ if(BUILD_JNI)
add_subdirectory(android/pytorch_android)
endif()
include(cmake/Summary.cmake)
caffe2_print_configuration_summary()
# Parse custom debug info
if(DEFINED USE_CUSTOM_DEBINFO)
string(REPLACE ";" " " SOURCE_FILES "${USE_CUSTOM_DEBINFO}")
@ -1479,5 +1444,56 @@ if(BUILD_BUNDLE_PTXAS AND USE_CUDA)
DESTINATION "${CMAKE_INSTALL_BINDIR}")
endif()
include(cmake/Summary.cmake)
caffe2_print_configuration_summary()
if(USE_PRIORITIZED_TEXT_FOR_LD)
add_compile_options(
$<$<COMPILE_LANGUAGE:C,CXX>:-ffunction-sections>
$<$<COMPILE_LANGUAGE:C,CXX>:-fdata-sections>
)
set(LINKER_SCRIPT_FILE_OUT "${CMAKE_SOURCE_DIR}/cmake/linker_script.ld")
set(LINKER_SCRIPT_FILE_IN "${CMAKE_SOURCE_DIR}/cmake/prioritized_text.txt")
add_custom_command(
OUTPUT "${LINKER_SCRIPT_FILE_OUT}"
COMMAND ${Python_EXECUTABLE} ${CMAKE_SOURCE_DIR}/tools/setup_helpers/generate_linker_script.py --filein "${LINKER_SCRIPT_FILE_IN}" --fout "${LINKER_SCRIPT_FILE_OUT}"
DEPENDS ${CMAKE_SOURCE_DIR}/tools/setup_helpers/generate_linker_script.py "${LINKER_SCRIPT_FILE_IN}"
COMMENT "Generating prioritized text linker files"
VERBATIM
)
add_custom_target(generate_linker_script DEPENDS "${LINKER_SCRIPT_FILE_OUT}")
if(BUILD_PYTHON)
set(LINKER_OPT_TARGETS torch_python)
endif()
if(NOT BUILD_LIBTORCHLESS)
list(APPEND LINKER_OPT_TARGETS torch_cpu c10)
if(USE_CUDA)
list(APPEND LINKER_OPT_TARGETS torch_cuda c10_cuda)
endif()
if(USE_XPU)
list(APPEND LINKER_OPT_TARGETS torch_xpu c10_xpu)
endif()
if(USE_ROCM)
list(APPEND LINKER_OPT_TARGETS torch_hip c10_hip)
endif()
endif()
foreach(tgt IN LISTS LINKER_OPT_TARGETS)
if(TARGET ${tgt})
add_dependencies("${tgt}" generate_linker_script)
target_link_options_if_supported(${tgt} "-T,${LINKER_SCRIPT_FILE_OUT}")
set_property(TARGET ${tgt} APPEND PROPERTY LINK_DEPENDS "${LINKER_SCRIPT_FILE_OUT}")
else()
message(WARNING "Requested target '${tgt}' for linker script optimization was not found.")
endif()
endforeach()
else()
if(LINUX AND CPU_AARCH64)
message(WARNING [[
It is strongly recommend to enable linker script optimization for all AArch64 Linux builds.
To do so please export USE_PRIORITIZED_TEXT_FOR_LD=1
]])
endif()
endif()

View File

@ -37,7 +37,7 @@ Copyright (c) 2024 Tri Dao.
All rights reserved.
All contributions by Arm:
Copyright (c) 2021, 2023-2025 Arm Limited and/or its affiliates
Copyright (c) 2021, 2023-2024 Arm Limited and/or its affiliates
All contributions from Caffe:
Copyright(c) 2013, 2014, 2015, the respective contributors

View File

@ -18,8 +18,6 @@ Please report security issues using https://github.com/pytorch/pytorch/security/
All reports submitted through the security advisories mechanism would **either be made public or dismissed by the team within 90 days of the submission**. If advisory has been closed on the grounds that it is not a security issue, please do not hesitate to create an [new issue](https://github.com/pytorch/pytorch/issues/new?template=bug-report.yml) as it is still likely a valid issue within the framework.
**Note on crashes and out of bounds access**: PyTorch is a computational framework that performs operations on behalf of the caller. Like many low-level libraries, PyTorch generally does not validate all inputs to every function—the responsibility for providing valid arguments lies with the calling code. While crashes and out of bounds memory access should be reported as bugs, they are generally not considered security vulnerabilities in PyTorch's threat model.
Please refer to the following page for our responsible disclosure policy, reward guidelines, and those things that should not be reported:
https://www.facebook.com/whitehat

View File

@ -55,6 +55,14 @@ struct numeric_limits<int8_t> {
static inline __host__ __device__ int8_t upper_bound() { return INT8_MAX; }
};
template <>
struct numeric_limits<uint16_t> {
static inline __host__ __device__ uint16_t lowest() { return 0; }
static inline __host__ __device__ uint16_t max() { return UINT16_MAX; }
static inline __host__ __device__ uint16_t lower_bound() { return 0; }
static inline __host__ __device__ uint16_t upper_bound() { return UINT16_MAX; }
};
template <>
struct numeric_limits<int16_t> {
static inline __host__ __device__ int16_t lowest() { return INT16_MIN; }
@ -63,6 +71,14 @@ struct numeric_limits<int16_t> {
static inline __host__ __device__ int16_t upper_bound() { return INT16_MAX; }
};
template <>
struct numeric_limits<uint32_t> {
static inline __host__ __device__ uint32_t lowest() { return 0; }
static inline __host__ __device__ uint32_t max() { return UINT32_MAX; }
static inline __host__ __device__ uint32_t lower_bound() { return 0; }
static inline __host__ __device__ uint32_t upper_bound() { return UINT32_MAX; }
};
template <>
struct numeric_limits<int32_t> {
static inline __host__ __device__ int32_t lowest() { return INT32_MIN; }
@ -71,6 +87,21 @@ struct numeric_limits<int32_t> {
static inline __host__ __device__ int32_t upper_bound() { return INT32_MAX; }
};
template <>
struct numeric_limits<uint64_t> {
#ifdef _MSC_VER
static inline __host__ __device__ uint64_t lowest() { return 0; }
static inline __host__ __device__ uint64_t max() { return _UI64_MAX; }
static inline __host__ __device__ uint64_t lower_bound() { return 0; }
static inline __host__ __device__ uint64_t upper_bound() { return _UI64_MAX; }
#else
static inline __host__ __device__ uint64_t lowest() { return 0; }
static inline __host__ __device__ uint64_t max() { return UINT64_MAX; }
static inline __host__ __device__ uint64_t lower_bound() { return 0; }
static inline __host__ __device__ uint64_t upper_bound() { return UINT64_MAX; }
#endif
};
template <>
struct numeric_limits<int64_t> {
#ifdef _MSC_VER

View File

@ -440,7 +440,7 @@ bool MPSHeapAllocatorImpl::release_cached_buffers() {
// we need to release the lock temporarily as synchronizing may cause deadlock with completion handlers.
m_mutex.unlock();
auto stream = getDefaultMPSStream();
dispatch_sync_with_rethrow(stream->queue(), ^() {
dispatch_sync(stream->queue(), ^() {
stream->synchronize(SyncType::COMMIT_AND_WAIT);
});
m_mutex.lock();

View File

@ -110,9 +110,6 @@ class TORCH_API MPSStream {
return _stream;
}
MTLBuffer_t getErrorBuffer();
void checkLastError();
private:
Stream _stream;
MTLCommandQueue_t _commandQueue = nil;
@ -124,8 +121,6 @@ class TORCH_API MPSStream {
dispatch_queue_t _serialQueue = nullptr;
// CommitAndContinue is enabled by default
bool _enableCommitAndContinue = true;
// Buffer that contains last raised error
MTLBuffer_t _errorBuffer = nil;
// use synchronize() to access any of these commit functions outside MPSStream
void commit();
@ -160,7 +155,4 @@ class TORCH_API MPSStreamImpl {
MPSStreamImpl();
};
#ifdef __OBJC__
void dispatch_sync_with_rethrow(dispatch_queue_t queue, void (^block)());
#endif
} // namespace at::mps

View File

@ -3,13 +3,13 @@
#include <ATen/mps/MPSAllocatorInterface.h>
#include <ATen/mps/MPSProfiler.h>
#include <ATen/mps/MPSStream.h>
#include <c10/metal/error.h>
@interface MPSGraphExecutionDescriptor ()
@property(readwrite, atomic) BOOL enableCommitAndContinue;
@end
namespace at::mps {
//-----------------------------------------------------------------
// MPSStream
//-----------------------------------------------------------------
@ -30,10 +30,6 @@ MPSStream::MPSStream(Stream stream) : _stream(stream) {
// Choose level which optimizes for GPU
_compilationDescriptor.optimizationLevel = MPSGraphOptimizationLevel0;
_executionDescriptor.compilationDescriptor = _compilationDescriptor;
_errorBuffer = [MPSDevice::getInstance()->device() newBufferWithLength:sizeof(c10::metal::ErrorMessages)
options:MTLResourceStorageModeShared];
std::memset([_errorBuffer contents], 0, 1024);
}
MPSStream::~MPSStream() {
@ -42,8 +38,6 @@ MPSStream::~MPSStream() {
[_executionDescriptor release];
[_compilationDescriptor release];
_executionDescriptor = nil;
[_errorBuffer release];
_errorBuffer = nil;
_compilationDescriptor = nil;
assert(_commandBuffer == nil);
@ -110,7 +104,6 @@ void MPSStream::commitAndWait() {
[_prevCommandBuffer waitUntilCompleted];
[_prevCommandBuffer release];
_prevCommandBuffer = nil;
checkLastError();
}
if (_commandBuffer) {
@ -118,7 +111,6 @@ void MPSStream::commitAndWait() {
[_commandBuffer waitUntilCompleted];
[_commandBuffer release];
_commandBuffer = nil;
checkLastError();
}
}
@ -161,7 +153,7 @@ void MPSStream::fill(id<MTLBuffer> buffer, uint8_t value, size_t length, size_t
if (length == 0) {
return;
}
dispatch_sync_with_rethrow(_serialQueue, ^() {
dispatch_sync(_serialQueue, ^() {
@autoreleasepool {
endKernelCoalescing();
id<MTLBlitCommandEncoder> blitEncoder = [commandBuffer() blitCommandEncoder];
@ -191,7 +183,7 @@ void MPSStream::copy(id<MTLBuffer> srcBuffer,
size_t dstOffset,
uint64_t profileId,
SyncType syncType) {
dispatch_sync_with_rethrow(_serialQueue, ^() {
dispatch_sync(_serialQueue, ^() {
@autoreleasepool {
endKernelCoalescing();
id<MTLBlitCommandEncoder> blitEncoder = [commandBuffer() blitCommandEncoder];
@ -244,7 +236,7 @@ void MPSStream::executeMPSGraph(MPSGraph* mpsGraph, NSDictionary* feeds, NSDicti
auto& profiler = getMPSProfiler();
const bool isGraphProfilingEnabled = profiler.isOperationProfilingEnabled();
dispatch_sync_with_rethrow(_serialQueue, ^() {
dispatch_sync(_serialQueue, ^() {
endKernelCoalescing();
if (isGraphProfilingEnabled) {
// this function call is only relevant for interval-based Signposts
@ -274,24 +266,6 @@ void MPSStream::executeMPSGraph(MPSGraph* mpsGraph, NSDictionary* feeds, NSDicti
});
}
id<MTLBuffer> MPSStream::getErrorBuffer() {
return _errorBuffer;
}
void MPSStream::checkLastError() {
auto msgs = reinterpret_cast<c10::metal::ErrorMessages*>([_errorBuffer contents]);
const auto& msg = msgs->msg[0];
if (!msgs) {
return;
}
unsigned int count = 0;
std::swap(count, msgs->count);
if (!count) {
return;
}
throw c10::AcceleratorError({msg.func, msg.file, msg.line}, 1, msg.message);
}
//-----------------------------------------------------------------
// MPSStreamImpl
//-----------------------------------------------------------------
@ -315,19 +289,4 @@ MPSStream* getDefaultMPSStream() {
return MPSStreamImpl::getInstance();
}
// Helper methods
void dispatch_sync_with_rethrow(dispatch_queue_t queue, void (^block)()) {
__block std::optional<std::exception_ptr> block_exception;
dispatch_sync(queue, ^() {
try {
block();
} catch (...) {
block_exception = std::current_exception();
}
});
if (block_exception) {
std::rethrow_exception(*block_exception);
}
}
} // namespace at::mps

View File

@ -142,7 +142,6 @@ Tensor _pack_padded_sequence_backward_symint(const Tensor& grad, c10::SymIntArra
std::tuple<Tensor, Tensor> _pad_packed_sequence(const Tensor& data, const Tensor& _batch_sizes, bool batch_first, const Scalar& padding_value, int64_t total_length) {
auto batch_sizes_t = _batch_sizes.contiguous();
checkLongTensor(batch_sizes_t);
TORCH_CHECK(batch_sizes_t.numel() > 0, "batch_sizes can not be empty");
int64_t * batch_sizes = batch_sizes_t.data_ptr<int64_t>();
int64_t max_batch_size = batch_sizes[0];

View File

@ -23,7 +23,6 @@
#include <ATen/ops/_aminmax_native.h>
#include <ATen/ops/_assert_async_native.h>
#include <ATen/ops/_assert_scalar_native.h>
#include <ATen/ops/_async_error_native.h>
#include <ATen/ops/_functional_assert_async_native.h>
#include <ATen/ops/_functional_assert_scalar_native.h>
#include <ATen/ops/_make_per_tensor_quantized_tensor.h>
@ -480,14 +479,6 @@ Tensor isfinite(const Tensor& self) {
});
}
void _async_error(std::string_view msg) {
TORCH_CHECK(0, msg);
}
void _async_error_meta(std::string_view msg) {
// Do NOT error, it's an async error!
}
void _assert_async_cpu(const Tensor& self) {
TORCH_CHECK(
native::is_nonzero(self),

View File

@ -5,6 +5,7 @@
#include <ATen/native/ReduceOpsUtils.h>
#include <ATen/Dispatch.h>
#include <ATen/Dispatch_v2.h>
#include <ATen/Parallel.h>
#include <ATen/TensorIterator.h>
#include <ATen/OpMathType.h>
@ -78,12 +79,12 @@ void min_all_kernel_impl(Tensor& result, const Tensor& input) {
reduce_all_impl<int64_t>(result, input, upper_bound<int64_t>(),
[=](int64_t a, int64_t b) -> int64_t { return min_impl(a, b); });
} else {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "min_all", [&] {
AT_DISPATCH_V2(input.scalar_type(), "min_all", AT_WRAP([&] {
using Vec = Vectorized<opmath_type<scalar_t>>;
reduce_all_impl_vec<scalar_t>(result, input, upper_bound<scalar_t>(),
[=] (scalar_t a , scalar_t b) -> scalar_t { return min_impl(a, b); },
[=](Vec a, Vec b) -> Vec { return minimum(a, b); });
});
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kHalf, kBFloat16);
}
}
@ -103,12 +104,12 @@ void max_all_kernel_impl(Tensor& result, const Tensor& input) {
reduce_all_impl<int64_t>(result, input, lower_bound<int64_t>(),
[=](int64_t a, int64_t b) -> int64_t { return max_impl(a, b); });
} else {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "max_all", [&] {
AT_DISPATCH_V2(input.scalar_type(), "max_all", AT_WRAP([&] {
using Vec = Vectorized<opmath_type<scalar_t>>;
reduce_all_impl_vec<scalar_t>(result, input, lower_bound<scalar_t>(),
[=] (scalar_t a , scalar_t b) -> scalar_t { return max_impl(a, b); },
[=](Vec a, Vec b) -> Vec { return maximum(a, b); });
});
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kHalf, kBFloat16);
}
}
@ -199,7 +200,7 @@ void aminmax_allreduce_kernel(
}
);
} else {
AT_DISPATCH_ALL_TYPES_AND2(kBFloat16, kHalf, input.scalar_type(), "aminmax_cpu", [&] {
AT_DISPATCH_V2(input.scalar_type(), "aminmax_cpu", AT_WRAP([&] {
using Vec = Vectorized<opmath_type<scalar_t>>;
using scalar_t_pair = std::pair<scalar_t, scalar_t>;
reduce_all_impl_vec_two_outputs<scalar_t>(
@ -214,7 +215,7 @@ void aminmax_allreduce_kernel(
[=](Vec a, Vec b) -> Vec { return minimum(a, b); },
[=](Vec a, Vec b) -> Vec { return maximum(a, b); }
);
});
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kBFloat16, kHalf);
}
}

View File

@ -3,6 +3,7 @@
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/Dispatch_v2.h>
#include <ATen/OpMathType.h>
#include <ATen/cpu/vec/vec.h>
#include <ATen/cpu/vec/functional.h>
@ -347,34 +348,35 @@ struct MinValuesOps: public at::native::MinOps<scalar_t> {
};
void min_values_kernel_impl(TensorIterator& iter) {
if (iter.dtype() == kLong) {
// This case is special because of Vectorized<int64_t> does not
// handle upper_bound<int64_t>().
// See: https://github.com/pytorch/pytorch/issues/43254
using scalar_t = int64_t;
binary_kernel_reduce(
iter,
MinValuesOps<scalar_t>{},
std::pair<scalar_t, int64_t>(upper_bound<scalar_t>(), -1));
// This case is special because of Vectorized<int64_t> does not
// handle upper_bound<int64_t>().
// See: https://github.com/pytorch/pytorch/issues/43254
if (iter.dtype() == kLong || iter.dtype() == kUInt64) {
AT_DISPATCH_V2(iter.dtype(), "min_values_cpu", AT_WRAP([&iter] {
binary_kernel_reduce(
iter,
MinValuesOps<scalar_t>{},
std::pair<scalar_t, int64_t>(upper_bound<scalar_t>(), -1));
}), kLong, kUInt64);
return;
}
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.dtype(), "min_values_cpu", [&iter] {
AT_DISPATCH_V2(iter.dtype(), "min_values_cpu", AT_WRAP([&iter] {
binary_kernel_reduce_vec(
iter,
[](scalar_t a, scalar_t b) -> scalar_t { return min_impl(a, b); },
[](Vectorized<scalar_t> a, Vectorized<scalar_t> b) { return minimum(a, b); },
static_cast<double>(upper_bound<scalar_t>()));
});
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kBFloat16, kHalf, kBool);
}
void max_values_kernel_impl(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.dtype(), "max_values_cpu", [&iter] {
AT_DISPATCH_V2(iter.dtype(), "max_values_cpu", AT_WRAP([&iter] {
binary_kernel_reduce_vec(
iter,
[](scalar_t a, scalar_t b) -> scalar_t { return max_impl(a, b); },
[](Vectorized<scalar_t> a, Vectorized<scalar_t> b) { return maximum(a, b); },
lower_bound<scalar_t>());
});
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kBFloat16, kHalf, kBool);
}
void argmax_kernel_impl(TensorIterator &iter) {

View File

@ -11,6 +11,7 @@
#include <vector>
#include <ATen/Dispatch.h>
#include <ATen/Dispatch_v2.h>
#include <ATen/Parallel.h>
#include <ATen/NumericUtils.h>
#include <ATen/TensorIterator.h>
@ -106,7 +107,7 @@ void min_kernel_impl(
bool keepdim) {
int64_t self_dim_size = ensure_nonempty_size(self, dim);
AT_DISPATCH_ALL_TYPES_AND3(ScalarType::Half, ScalarType::BFloat16, ScalarType::Bool, self.scalar_type(), "min_cpu", [&] {
AT_DISPATCH_V2(self.scalar_type(), "min_cpu", AT_WRAP([&] {
compare_base_kernel<scalar_t>(result, indice, self, dim, keepdim, [&] (
scalar_t* result_data, int64_t* indice_data,
const scalar_t* self_data, auto self_dim_stride) {
@ -128,7 +129,7 @@ void min_kernel_impl(
*indice_data = index;
}
);
});
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), ScalarType::Half, ScalarType::BFloat16, ScalarType::Bool);
}
void max_kernel_impl(
@ -139,7 +140,7 @@ void max_kernel_impl(
bool keepdim) {
int64_t self_dim_size = ensure_nonempty_size(self, dim);
AT_DISPATCH_ALL_TYPES_AND3(ScalarType::Half, ScalarType::BFloat16, ScalarType::Bool, self.scalar_type(), "max_cpu", [&] {
AT_DISPATCH_V2(self.scalar_type(), "max_cpu", AT_WRAP([&] {
compare_base_kernel<scalar_t>(result, indice, self, dim, keepdim, [&] (
scalar_t* result_data, int64_t* indice_data,
const scalar_t* self_data, auto self_dim_stride) {
@ -161,7 +162,7 @@ void max_kernel_impl(
*indice_data = index;
}
);
});
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), ScalarType::Half, ScalarType::BFloat16, ScalarType::Bool);
}
void aminmax_kernel(
@ -186,7 +187,7 @@ void aminmax_kernel(
return;
}
AT_DISPATCH_ALL_TYPES_AND3(ScalarType::Bool, ScalarType::BFloat16, ScalarType::Half, self.scalar_type(), "aminmax_cpu", [&] {
AT_DISPATCH_V2(self.scalar_type(), "aminmax_cpu", AT_WRAP([&] {
compare_base_kernel<scalar_t, scalar_t>(min_result, max_result, self, wrap_dim, keepdim, [&] (
scalar_t* min_result_data, scalar_t* max_result_data,
const scalar_t* self_data, auto self_dim_stride) {
@ -209,7 +210,7 @@ void aminmax_kernel(
*max_result_data = max_number;
}
);
});
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), ScalarType::Bool, ScalarType::BFloat16, ScalarType::Half);
}
void where_kernel_impl(TensorIterator &iter) {

View File

@ -669,12 +669,9 @@ std::optional<c10::ScalarType> out_dtype) {
// _scaled_mm_allowed_device is used here within _grouped_mm_cuda which seems incorrect since scale is not used.
// the _grouped_mm_fallback should be safe for any ROCm GPU since it's just calling typical mm/bmm
bool use_fast_path = false;
// On non CK system(w/ ROCm), make sure use_fast_path is false
#if defined(USE_ROCM_CK_GEMM)
if (at::detail::getCUDAHooks().isGPUArch({"gfx942", "gfx950"})) {
use_fast_path = true;
}
#endif //USE_ROCM_CK_GEMM
#endif
const auto out_dtype_ = _resolve_grouped_mm_out_dtype(mat_a, mat_b, out_dtype);
Tensor out = create_grouped_gemm_output_tensor(mat_a, mat_b, offs, out_dtype_);
@ -683,11 +680,7 @@ std::optional<c10::ScalarType> out_dtype) {
#ifndef USE_ROCM
at::cuda::detail::bf16bf16_grouped_mm(mat_a, mat_b, offs, bias, out);
#else
#if defined(USE_ROCM_CK_GEMM)
at::hip::detail::group_gemm_ck(mat_a, mat_b, offs, bias, out);
#else
TORCH_WARN("ROCm: Group Gemm through CK not selected.");
#endif //USE_ROCM_CK_GEMM
#endif
} else {
_grouped_mm_fallback(mat_a, mat_b, offs, bias, out_dtype, out);

View File

@ -1,5 +1,6 @@
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/Dispatch_v2.h>
#include <ATen/NumericUtils.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/ReduceAllOps.h>
@ -28,22 +29,22 @@ void _min_max_values_kernel_cuda_impl(TensorIterator& iter) {
}
void aminmax_allreduce_launch_kernel(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND3(
kBFloat16, kHalf, kBool, iter.input_dtype(), "aminmax_all_cuda", [&] {
AT_DISPATCH_V2(
iter.input_dtype(), "aminmax_all_cuda", AT_WRAP([&] {
_min_max_values_kernel_cuda_impl<scalar_t>(iter);
});
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kBFloat16, kHalf, kBool);
}
void aminmax_launch_kernel(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND3(
kBFloat16, kHalf, kBool, iter.input_dtype(), "aminmax_cuda", [&]() {
AT_DISPATCH_V2(
iter.input_dtype(), "aminmax_cuda", AT_WRAP([&]() {
gpu_reduce_kernel<scalar_t, scalar_t>(
iter,
MinMaxOps<scalar_t, scalar_t, int32_t>{},
thrust::pair<scalar_t, scalar_t>(
at::numeric_limits<scalar_t>::upper_bound(),
at::numeric_limits<scalar_t>::lower_bound()));
});
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kBFloat16, kHalf, kBool);
}
} // namespace at::native

View File

@ -1,5 +1,6 @@
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/Dispatch_v2.h>
#include <ATen/NumericUtils.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/ReduceAllOps.h>
@ -33,27 +34,27 @@ void max_values_kernel_cuda_impl(TensorIterator& iter) {
}
void max_values_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND3(
kBFloat16, kHalf, kBool, iter.dtype(), "max_values_cuda", [&]() {
AT_DISPATCH_V2(
iter.dtype(), "max_values_cuda", AT_WRAP([&]() {
max_values_kernel_cuda_impl<scalar_t>(iter);
});
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kBFloat16, kHalf, kBool);
}
void max_launch_kernel(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND3(
kBFloat16, kHalf, kBool, iter.input_dtype(), "max_cuda", [&]() {
AT_DISPATCH_V2(
iter.input_dtype(), "max_cuda", AT_WRAP([&]() {
gpu_reduce_kernel<scalar_t, scalar_t>(
iter,
MaxOps<scalar_t>{},
thrust::pair<scalar_t, int64_t>(
at::numeric_limits<scalar_t>::lower_bound(), 0));
});
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kBFloat16, kHalf, kBool);
}
void max_all_launch_kernel(TensorIterator &iter) {
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.input_dtype(), "max_all_cuda", [&] {
AT_DISPATCH_V2(iter.input_dtype(), "max_all_cuda", AT_WRAP([&] {
max_values_kernel_cuda_impl<scalar_t>(iter);
});
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kBFloat16, kHalf, kBool);
}
REGISTER_DISPATCH(max_values_stub, &max_values_kernel_cuda)

View File

@ -12,6 +12,7 @@
#include <ATen/NumericUtils.h>
#include <ATen/Dispatch.h>
#include <ATen/Dispatch_v2.h>
#include <ATen/NumericUtils.h>
#include <ATen/cuda/NumericLimits.cuh>
@ -33,24 +34,24 @@ void min_values_kernel_cuda_impl(TensorIterator& iter) {
}
void min_values_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.dtype(), "min_values_cuda", [&]() {
AT_DISPATCH_V2(iter.dtype(), "min_values_cuda", AT_WRAP([&]() {
min_values_kernel_cuda_impl<scalar_t>(iter);
});
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kBFloat16, kHalf, kBool);
}
void min_launch_kernel(TensorIterator &iter) {
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.input_dtype(), "min_cuda", [&]() {
AT_DISPATCH_V2(iter.input_dtype(), "min_cuda", AT_WRAP([&]() {
gpu_reduce_kernel<scalar_t, scalar_t>(
iter,
MinOps<scalar_t>{},
thrust::pair<scalar_t, int64_t>(at::numeric_limits<scalar_t>::upper_bound(), 0));
});
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kBFloat16, kHalf, kBool);
}
void min_all_launch_kernel(TensorIterator &iter) {
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.input_dtype(), "min_all_cuda", [&] {
AT_DISPATCH_V2(iter.input_dtype(), "min_all_cuda", AT_WRAP([&] {
min_values_kernel_cuda_impl<scalar_t>(iter);
});
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kBFloat16, kHalf, kBool);
}
REGISTER_DISPATCH(min_values_stub, &min_values_kernel_cuda)

View File

@ -40,6 +40,8 @@ using namespace at::mps;
namespace at::native::mps {
void dispatch_sync_with_rethrow(dispatch_queue_t queue, void (^block)());
struct MPSScalar {
id<MTLBuffer> getMTLBuffer() const {
return __builtin_bit_cast(id<MTLBuffer>, buffer.get());

View File

@ -53,6 +53,21 @@
@end
namespace at::native::mps {
void dispatch_sync_with_rethrow(dispatch_queue_t queue, void (^block)()) {
__block std::optional<std::exception_ptr> block_exception;
dispatch_sync(queue, ^() {
try {
block();
} catch (...) {
block_exception = std::current_exception();
}
});
if (block_exception) {
std::rethrow_exception(*block_exception);
}
}
/**
* Computes distance from lowest to highest element offset in given tensor.
*/

View File

@ -1,5 +1,4 @@
#include <c10/metal/atomic.h>
#include <c10/metal/error.h>
#include <c10/metal/indexing.h>
#include <metal_stdlib>
@ -32,24 +31,10 @@ OffsetT index_apply_indices(
constant IndexAB* indices,
constant int64_t* sizes,
constant int64_t* strides,
uint num_indices,
thread bool& error,
device ErrorMessages* error_buf) {
uint num_indices) {
OffsetT rc = offs.x;
for (uint i = 0; i < num_indices; i++) {
auto idx = indices[i].indexArray[offs.y];
if (idx < -sizes[i] || idx >= sizes[i]) {
TORCH_REPORT_ERROR(
error_buf,
"index ",
idx,
" is out of bounds for dimension ",
i,
" with size ",
sizes[i]);
error = true;
break;
}
if (idx < 0) {
idx += sizes[i];
}
@ -70,7 +55,6 @@ kernel void index_select(
constant int64_t* index_sizes,
constant int64_t* index_strides,
constant uint4& ndim_nindices_numel,
device ErrorMessages* error_buffer,
uint thread_index [[thread_position_in_grid]]) {
const auto ndim = ndim_nindices_numel.x;
const auto num_indices = ndim_nindices_numel.y;
@ -81,19 +65,8 @@ kernel void index_select(
indices_strides,
ndim,
thread_index);
bool error = false;
auto input_offs = index_apply_indices<OffsetT>(
offs.yz,
indices,
index_sizes,
index_strides,
num_indices,
error,
error_buffer);
if (error) {
output[offs.x / sizeof(T)] = 0;
return;
}
offs.yz, indices, index_sizes, index_strides, num_indices);
output[offs.x / sizeof(T)] = input[input_offs / sizeof(T)];
}
@ -109,9 +82,7 @@ inline void index_put_impl(
constant int64_t* index_sizes,
constant int64_t* index_strides,
constant uint4& ndim_nindices_numel,
device ErrorMessages* error_buffer,
uint thread_index) {
bool error = false;
const auto ndim = ndim_nindices_numel.x;
const auto num_indices = ndim_nindices_numel.y;
const auto offs = index_get_offsets(
@ -122,16 +93,7 @@ inline void index_put_impl(
ndim,
thread_index);
auto output_offs = index_apply_indices<OffsetT>(
offs.xz,
indices,
index_sizes,
index_strides,
num_indices,
error,
error_buffer);
if (error) {
return;
}
offs.xz, indices, index_sizes, index_strides, num_indices);
output[output_offs / sizeof(T)] = input[offs.y / sizeof(T)];
}
@ -147,7 +109,6 @@ kernel void index_put(
constant int64_t* index_sizes,
constant int64_t* index_strides,
constant uint4& ndim_nindices_numel,
device ErrorMessages* error_buffer,
uint thread_index [[thread_position_in_grid]]) {
index_put_impl(
output,
@ -160,7 +121,6 @@ kernel void index_put(
index_sizes,
index_strides,
ndim_nindices_numel,
error_buffer,
thread_index);
}
@ -176,7 +136,6 @@ kernel void index_put_serial(
constant int64_t* index_sizes,
constant int64_t* index_strides,
constant uint4& ndim_nindices_numel,
device ErrorMessages* error_buffer,
uint thread_index [[thread_position_in_grid]]) {
(void)thread_index; // Suppress unused vairable varning
for (uint idx = 0; idx < ndim_nindices_numel.z; ++idx) {
@ -191,7 +150,6 @@ kernel void index_put_serial(
index_sizes,
index_strides,
ndim_nindices_numel,
error_buffer,
idx);
}
}
@ -208,7 +166,6 @@ kernel void index_put_accumulate(
constant int64_t* index_sizes,
constant int64_t* index_strides,
constant uint4& ndim_nindices_numel,
device ErrorMessages* error_buffer,
uint thread_index [[thread_position_in_grid]]) {
const auto ndim = ndim_nindices_numel.x;
const auto num_indices = ndim_nindices_numel.y;
@ -219,18 +176,8 @@ kernel void index_put_accumulate(
indices_strides,
ndim,
thread_index);
bool error = false;
auto output_offs = index_apply_indices<OffsetT>(
offs.xz,
indices,
index_sizes,
index_strides,
num_indices,
error,
error_buffer);
if (error) {
return;
}
offs.xz, indices, index_sizes, index_strides, num_indices);
AtomicType<T>::atomic_add(
reinterpret_cast<device AtomicType_t<T>*>(output),
output_offs / sizeof(T),
@ -250,7 +197,6 @@ kernel void index_put_accumulate(
constant int64_t* index_sizes, \
constant int64_t* index_strides, \
constant uint4& ndim_nindices_numel, \
device ErrorMessages* error_buffer, \
uint thread_index [[thread_position_in_grid]])
#define REGISTER_INDEX_OP_ALL_DTYPES(OP_NAME) \

View File

@ -220,7 +220,7 @@ Tensor _embedding_bag_dense_backward_mps(const Tensor& output_grad,
auto num_threads = (params.mode == EmbeddingBagMode::MAX) ? output_grad.numel() : num_indices * params.feature_size;
MPSStream* stream = getCurrentMPSStream();
dispatch_sync_with_rethrow(stream->queue(), ^() {
mps::dispatch_sync_with_rethrow(stream->queue(), ^() {
@autoreleasepool {
id<MTLComputeCommandEncoder> computeEncoder = stream->commandEncoder();
auto pipeline_state = lib.getPipelineStateForFunc(fmt::format("embedding_bag_backward_{}_{}",
@ -273,7 +273,7 @@ Tensor _embedding_bag_per_sample_weights_backward_mps(const Tensor& output_grad,
auto num_threads = num_indices * feature_size;
MPSStream* stream = getCurrentMPSStream();
dispatch_sync_with_rethrow(stream->queue(), ^() {
mps::dispatch_sync_with_rethrow(stream->queue(), ^() {
@autoreleasepool {
id<MTLComputeCommandEncoder> computeEncoder = stream->commandEncoder();
auto pipeline_state = lib.getPipelineStateForFunc(fmt::format("embedding_bag_per_sample_weights_backward_{}_{}",

View File

@ -179,8 +179,7 @@ static void dispatch_index_kernel(TensorIteratorBase& iter,
iter.strides(2),
index_size,
index_stride,
ndim_nindiees,
mpsStream->getErrorBuffer());
ndim_nindiees);
mtl_dispatch1DJob(computeEncoder, indexSelectPSO, serial ? 1 : iter.numel());
});
}
@ -300,7 +299,7 @@ static Tensor& nonzero_out_native_mps(const Tensor& self, Tensor& out_) {
MPSStream* stream = getCurrentMPSStream();
using CachedGraph = MPSUnaryCachedGraph;
dispatch_sync_with_rethrow(stream->queue(), ^() {
dispatch_sync(stream->queue(), ^() {
stream->synchronize(SyncType::COMMIT_AND_WAIT);
});
int64_t total_nonzero = at::count_nonzero(self).item<int64_t>();
@ -385,7 +384,7 @@ Tensor& nonzero_out_mps(const Tensor& self, Tensor& out_) {
MPSStream* stream = getCurrentMPSStream();
using CachedGraph = MPSUnaryCachedGraph;
dispatch_sync_with_rethrow(stream->queue(), ^() {
dispatch_sync(stream->queue(), ^() {
stream->synchronize(SyncType::COMMIT_AND_WAIT);
});
int64_t total_nonzero = at::count_nonzero(self).item<int64_t>();

View File

@ -923,7 +923,7 @@ std::tuple<Tensor, Tensor, Tensor> layer_norm_mps(const Tensor& input,
MPSStream* stream = getCurrentMPSStream();
TORCH_CHECK_NOT_IMPLEMENTED(input.scalar_type() != kLong, "Not implemented for long on MPS");
@autoreleasepool {
dispatch_sync_with_rethrow(stream->queue(), ^() {
mps::dispatch_sync_with_rethrow(stream->queue(), ^() {
// which kernel variant to use based on the normalized axis N size
const int N_READS = 4;
auto metalType = mps::scalarToMetalTypeString(input);

View File

@ -192,11 +192,6 @@
CompositeExplicitAutograd: _assert_tensor_metadata
Meta: _assert_tensor_metadata_meta_symint
- func: _async_error(str msg) -> ()
dispatch:
CompositeExplicitAutograd: _async_error
Meta: _async_error_meta
- func: _print(str s) -> ()
dispatch:
CompositeExplicitAutograd: _print

View File

@ -47,7 +47,6 @@
#include <c10/macros/Macros.h>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/distance.h>
#include <thrust/for_each.h>
#include <thrust/functional.h>
#include <thrust/gather.h>

View File

@ -1,3 +1,191 @@
#pragma once
#include <ATen/xpu/XPUContext.h>
#include <c10/xpu/XPUEvent.h>
#include <optional>
namespace at::xpu {
/*
* XPUEvent are movable not copyable wrappers around SYCL event. XPUEvent are
* constructed lazily when first recorded. It has a device, and this device is
* acquired from the first recording stream. Later streams that record the event
* must match the same device.
*
* Currently, XPUEvent does NOT support to export an inter-process event from
* another process via inter-process communication(IPC). So it means that
* inter-process communication for event handles between different processes is
* not available. This could impact some applications that rely on cross-process
* synchronization and communication.
*/
struct TORCH_XPU_API XPUEvent {
// Constructors
XPUEvent(bool enable_timing = false) noexcept
: enable_timing_{enable_timing} {}
~XPUEvent() {
if (isCreated()) {
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
if (C10_UNLIKELY(interp)) {
(*interp)->trace_gpu_event_deletion(
at::kXPU, reinterpret_cast<uintptr_t>(event_.get()));
}
}
}
XPUEvent(const XPUEvent&) = delete;
XPUEvent& operator=(const XPUEvent&) = delete;
XPUEvent(XPUEvent&& other) = default;
XPUEvent& operator=(XPUEvent&& other) = default;
operator sycl::event&() const {
return event();
}
std::optional<at::Device> device() const {
if (isCreated()) {
return at::Device(at::kXPU, device_index_);
} else {
return std::nullopt;
}
}
inline bool isCreated() const {
return (event_.get() != nullptr);
}
DeviceIndex device_index() const {
return device_index_;
}
sycl::event& event() const {
return *event_;
}
bool query() const {
using namespace sycl::info;
if (!isCreated()) {
return true;
}
return event().get_info<event::command_execution_status>() ==
event_command_status::complete;
}
void record() {
record(getCurrentXPUStream());
}
void recordOnce(const XPUStream& stream) {
if (!isCreated()) {
record(stream);
}
}
void record(const XPUStream& stream) {
if (!isCreated()) {
device_index_ = stream.device_index();
assignEvent(stream.queue());
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
if (C10_UNLIKELY(interp)) {
(*interp)->trace_gpu_event_creation(
at::kXPU, reinterpret_cast<uintptr_t>(event_.get()));
}
} else {
TORCH_CHECK(
device_index_ == stream.device_index(),
"Event device ",
device_index_,
" does not match recording stream's device ",
stream.device_index(),
".");
reassignEvent(stream.queue());
}
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
if (C10_UNLIKELY(interp)) {
(*interp)->trace_gpu_event_record(
at::kXPU,
reinterpret_cast<uintptr_t>(event_.get()),
reinterpret_cast<uintptr_t>(&stream.queue()));
}
}
void block(const XPUStream& stream) {
if (isCreated()) {
std::vector<sycl::event> event_list{event()};
// Make this stream wait until event_ is completed.
stream.queue().ext_oneapi_submit_barrier(event_list);
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
if (C10_UNLIKELY(interp)) {
(*interp)->trace_gpu_event_wait(
at::kXPU,
reinterpret_cast<uintptr_t>(event_.get()),
reinterpret_cast<uintptr_t>(&stream.queue()));
}
}
}
double elapsed_time(const XPUEvent& other) const {
TORCH_CHECK(
isCreated() && other.isCreated(),
"Both events must be recorded before calculating elapsed time.");
TORCH_CHECK(
query() && other.query(),
"Both events must be completed before calculating elapsed time.");
TORCH_CHECK(
enable_timing_ && other.enable_timing_,
"Both events must be created with argument 'enable_timing=True'.");
#if SYCL_COMPILER_VERSION < 20250000
TORCH_CHECK_NOT_IMPLEMENTED(
false,
"elapsed_time of XPUEvent requires PyTorch to be built with SYCL compiler version 2025.0.0 or newer.");
#endif
using namespace sycl::info::event_profiling;
// Block until both of the recorded events are completed.
uint64_t end_time_ns = other.event().get_profiling_info<command_end>();
uint64_t start_time_ns = event().get_profiling_info<command_end>();
// Return the eplased time in milliseconds.
return 1e-6 *
(static_cast<double>(end_time_ns) - static_cast<double>(start_time_ns));
}
void synchronize() const {
if (isCreated()) {
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
if (C10_UNLIKELY(interp)) {
(*interp)->trace_gpu_event_synchronization(
at::kXPU, reinterpret_cast<uintptr_t>(event_.get()));
}
event().wait_and_throw();
}
}
private:
void assignEvent(sycl::queue& queue) {
#if SYCL_COMPILER_VERSION >= 20250000
if (enable_timing_) {
event_ = std::make_unique<sycl::event>(
sycl::ext::oneapi::experimental::submit_profiling_tag(queue));
} else {
event_ = std::make_unique<sycl::event>(queue.ext_oneapi_submit_barrier());
}
#else
event_ = std::make_unique<sycl::event>(queue.ext_oneapi_submit_barrier());
#endif
}
void reassignEvent(sycl::queue& queue) {
event_.reset();
assignEvent(queue);
}
bool enable_timing_ = false;
DeviceIndex device_index_ = -1;
// Only need to track the last event, as events in an in-order queue are
// executed sequentially.
std::unique_ptr<sycl::event> event_;
};
} // namespace at::xpu

View File

@ -50,7 +50,7 @@ nfnet_l0,pass,7
repvgg_a2,pass,7
repvgg_a2,fail_accuracy,7

1 name accuracy graph_breaks
50
51
52
53
54
55
56

View File

@ -952,7 +952,7 @@ def latency_experiment_summary(suite_name, args, model, timings, **kwargs):
first_fields.append(kwargs["tag"])
headers = first_headers + ["speedup", "abs_latency"]
row = first_fields + [float(speedup), median[1] * 1000]
msg = f"{median[0] * 1000} ms, {median[1] * 1000} ms, {speedup:.3f}x"
msg = f"{speedup:.3f}x"
if args.baseline:
headers.extend(
[
@ -1010,7 +1010,7 @@ def latency_experiment_summary(suite_name, args, model, timings, **kwargs):
# Hypothetically you can use this from other places, but it's currently
# inaccessible, and when this assert fails you need to update the
# event_name here to account for the other cases you are using this
assert any([args.quantization, args.optimus])
assert args.quantization is not None
output_signpost(
dict(zip(headers, row)),
args,
@ -2288,9 +2288,11 @@ class BenchmarkRunner:
)
):
is_same = False
except Exception:
except Exception as e:
# Sometimes torch.allclose may throw RuntimeError
is_same = False
exception_string = str(e)
accuracy_status = f"fail_exception: {exception_string}"
return record_status(accuracy_status, dynamo_start_stats=start_stats)
if not is_same:
accuracy_status = "eager_two_runs_differ"
@ -2407,9 +2409,11 @@ class BenchmarkRunner:
force_max_multiplier=force_max_multiplier,
):
is_same = False
except Exception:
except Exception as e:
# Sometimes torch.allclose may throw RuntimeError
is_same = False
exception_string = str(e)
accuracy_status = f"fail_exception: {exception_string}"
return record_status(accuracy_status, dynamo_start_stats=start_stats)
if not is_same:
if self.args.skip_accuracy_check:
@ -2583,9 +2587,6 @@ class BenchmarkRunner:
**experiment_kwargs,
)
# reset dynamo
torch._dynamo.reset()
if self.args.export_aot_inductor:
optimized_model_iter_fn = optimize_ctx
else:
@ -2949,7 +2950,7 @@ class BenchmarkRunner:
status = self.check_tolerance(name, model, example_inputs, optimize_ctx)
print(status)
elif self.args.performance:
if self.args.backend in ["torchao", "optimus"]:
if self.args.backend == "torchao":
status = self.run_performance_test_non_alternate(
name, model, example_inputs, optimize_ctx, experiment, tag
)
@ -3525,12 +3526,6 @@ def parse_args(args=None):
action="store_true",
help="Measure speedup with TorchInductor",
)
group.add_argument(
"--optimus",
choices=["vertical_opt", "horizontal_opt", "all"],
default=None,
help="Measure speedup of Optimus with TorchInductor baseline",
)
group.add_argument(
"--quantization",
choices=[
@ -3788,9 +3783,6 @@ def run(runner, args, original_dir=None):
if args.inductor:
assert args.backend is None
args.backend = "inductor"
if args.optimus:
assert args.backend is None
args.backend = "optimus"
if args.quantization:
assert args.backend is None
args.backend = "torchao"
@ -4075,22 +4067,10 @@ def run(runner, args, original_dir=None):
runner.model_iter_fn = model_iter_fn_and_mark_step
optimize_ctx = torchao_optimize_ctx(args.quantization)
elif args.backend == "optimus":
from .optimus import get_baseline_ctx, get_optimus_optimize_ctx
baseline_ctx = get_baseline_ctx(
nopython=args.nopython, inductor_compile_mode=args.inductor_compile_mode
)
runner.model_iter_fn = baseline_ctx(runner.model_iter_fn)
optimize_ctx = get_optimus_optimize_ctx(
args.optimus, args.nopython, args.inductor_compile_mode
)
else:
optimize_ctx = torch._dynamo.optimize(args.backend, nopython=args.nopython)
experiment = (
speedup_experiment
if args.backend not in ["torchao", "optimus"]
else latency_experiment
speedup_experiment if args.backend != "torchao" else latency_experiment
)
if args.accuracy:
output_filename = f"accuracy_{args.backend}.csv"
@ -4111,12 +4091,7 @@ def run(runner, args, original_dir=None):
if args.only in runner.disable_cudagraph_models:
args.disable_cudagraphs = True
if (
args.inductor
or args.backend == "inductor"
or args.export_aot_inductor
or args.backend == "optimus"
):
if args.inductor or args.backend == "inductor" or args.export_aot_inductor:
inductor_config.triton.cudagraphs = not args.disable_cudagraphs
inductor_config.triton.persistent_reductions = (
not args.disable_persistent_reductions

View File

@ -1,62 +0,0 @@
import functools
import torch
def get_baseline_ctx(nopython, inductor_compile_mode):
return functools.partial(
torch.compile,
backend="inductor",
fullgraph=nopython,
mode=inductor_compile_mode,
)
def get_optimus_optimize_ctx(config, nopython, inductor_compile_mode):
if config == "vertical_opt":
optimus_inductor_config = {
"pre_grad_fusion_options": {
"normalization_pass": {},
"merge_splits_pass": {},
"split_cat_pass": {},
"unbind_stack_pass": {},
"unbind_cat_to_view_pass": {},
}
}
elif config == "horizontal_opt":
optimus_inductor_config = {
"pre_grad_fusion_options": {
"normalization_pass": {},
"batch_linear": {},
"batch_layernorm": {},
},
}
elif config == "all":
optimus_inductor_config = {
"pre_grad_fusion_options": {
"normalization_pass": {},
"batch_linear": {},
"batch_layernorm": {},
"merge_splits_pass": {},
"split_cat_pass": {},
"unbind_stack_pass": {},
"unbind_cat_to_view_pass": {},
},
}
else:
raise RuntimeError(f"Unknown optimus config: {config}")
def _inner(fn):
if "pre_grad_fusion_options" in optimus_inductor_config:
torch._inductor.config.pre_grad_fusion_options = optimus_inductor_config[
"pre_grad_fusion_options"
]
if "post_grad_fusion_options" in optimus_inductor_config:
torch._inductor.config.post_grad_fusion_options = optimus_inductor_config[
"post_grad_fusion_options"
]
return torch.compile(
fn, backend="inductor", fullgraph=nopython, mode=inductor_compile_mode
)
return _inner

View File

@ -2,7 +2,6 @@ import csv
import os
import re
import sys
from pathlib import Path
# This script takes the logs produced by the benchmark scripts (e.g.,
@ -16,7 +15,8 @@ from pathlib import Path
# This script is not very well written, feel free to rewrite it as necessary
assert len(sys.argv) == 2
full_log = Path(sys.argv[1]).read_text()
full_log = open(sys.argv[1]).read()
# If the log contains a gist URL, extract it so we can include it in the CSV
gist_url = ""

View File

@ -484,106 +484,24 @@ PyTorch,sum,sum_R256_V512_dim0_contiguousTrue_cpu,short,False,50.954394,0.000000
PyTorch,sum,sum_R256_V512_dim0_contiguousFalse_cpu,short,False,57.957757,0.000000
PyTorch,sum,sum_R256_V512_dim1_contiguousTrue_cpu,short,False,53.592068,0.000000
PyTorch,sum,sum_R256_V512_dim1_contiguousFalse_cpu,short,False,51.339726,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.bool,short,False,0.927,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.uint8,short,False,6.261,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.int8,short,False,6.351,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.int16,short,False,6.177,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.int32,short,False,6.333,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.int64,short,False,6.588,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.float16,short,False,8.117,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.bfloat16,short,False,9.358,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.float32,short,False,7.844,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.float64,short,False,8.097,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.bool,short,False,6.159,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.uint8,short,False,0.926,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.int8,short,False,6.192,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.int16,short,False,6.276,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.int32,short,False,6.461,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.int64,short,False,6.524,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.float16,short,False,8.136,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.bfloat16,short,False,6.854,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.float32,short,False,6.446,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.float64,short,False,6.829,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.bool,short,False,6.088,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.uint8,short,False,6.059,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.int8,short,False,0.922,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.int16,short,False,6.263,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.int32,short,False,6.330,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.int64,short,False,6.688,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.float16,short,False,8.176,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.bfloat16,short,False,6.959,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.float32,short,False,6.430,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.float64,short,False,6.818,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.bool,short,False,6.350,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.uint8,short,False,6.221,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.int8,short,False,6.193,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.int16,short,False,0.922,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.int32,short,False,6.263,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.int64,short,False,6.525,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.float16,short,False,7.960,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.bfloat16,short,False,6.801,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.float32,short,False,6.594,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.float64,short,False,7.089,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.bool,short,False,6.498,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.uint8,short,False,6.358,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.int8,short,False,6.390,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.int16,short,False,6.415,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.int32,short,False,0.925,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.int64,short,False,6.657,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.float16,short,False,7.954,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.bfloat16,short,False,6.930,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.float32,short,False,6.737,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.float64,short,False,6.948,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.bool,short,False,6.757,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.uint8,short,False,6.402,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.int8,short,False,6.550,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.int16,short,False,6.518,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.int32,short,False,6.766,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.int64,short,False,0.929,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.float16,short,False,8.557,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.bfloat16,short,False,9.045,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.float32,short,False,7.672,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.float64,short,False,7.276,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.bool,short,False,6.414,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.uint8,short,False,7.736,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.int8,short,False,7.889,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.int16,short,False,8.170,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.int32,short,False,7.783,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.int64,short,False,7.743,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.float16,short,False,0.927,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.bfloat16,short,False,7.018,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.float32,short,False,8.428,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.float64,short,False,6.767,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.bool,short,False,6.479,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.uint8,short,False,7.827,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.int8,short,False,6.450,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.int16,short,False,6.320,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.int32,short,False,6.385,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.int64,short,False,8.119,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.float16,short,False,8.063,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.bfloat16,short,False,0.925,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.float32,short,False,8.629,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.float64,short,False,6.638,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.bool,short,False,6.425,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.uint8,short,False,7.803,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.int8,short,False,6.502,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.int16,short,False,6.429,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.int32,short,False,6.549,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.int64,short,False,7.749,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.float16,short,False,7.301,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.bfloat16,short,False,7.682,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.float32,short,False,0.930,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.float64,short,False,6.738,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.bool,short,False,6.798,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.uint8,short,False,6.506,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.int8,short,False,6.494,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.int16,short,False,6.668,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.int32,short,False,6.696,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.int64,short,False,7.115,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.float16,short,False,7.910,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.bfloat16,short,False,7.410,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.float32,short,False,6.868,0.000000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.float64,short,False,0.924,0.000000
PyTorch,FloatToHalfTensorConversionBenchmark,FloatToHalfTensorConversionBenchmark_M8_N16_cpu,short,False,7.040985,0.000000
PyTorch,FloatToHalfTensorConversionBenchmark,FloatToHalfTensorConversionBenchmark_M8_N64_cpu,short,False,7.168604,0.000000
PyTorch,FloatToHalfTensorConversionBenchmark,FloatToHalfTensorConversionBenchmark_M8_N128_cpu,short,False,7.434442,0.000000
PyTorch,FloatToHalfTensorConversionBenchmark,FloatToHalfTensorConversionBenchmark_M16_N16_cpu,short,False,7.078318,0.000000
PyTorch,FloatToHalfTensorConversionBenchmark,FloatToHalfTensorConversionBenchmark_M16_N64_cpu,short,False,7.426670,0.000000
PyTorch,FloatToHalfTensorConversionBenchmark,FloatToHalfTensorConversionBenchmark_M16_N128_cpu,short,False,7.679027,0.000000
PyTorch,FloatToHalfTensorConversionBenchmark,FloatToHalfTensorConversionBenchmark_M32_N16_cpu,short,False,7.281365,0.000000
PyTorch,FloatToHalfTensorConversionBenchmark,FloatToHalfTensorConversionBenchmark_M32_N64_cpu,short,False,7.682783,0.000000
PyTorch,FloatToHalfTensorConversionBenchmark,FloatToHalfTensorConversionBenchmark_M32_N128_cpu,short,False,8.381938,0.000000
PyTorch,HalfToFloatTensorConversionBenchmark,HalfToFloatTensorConversionBenchmark_M8_N16_cpu,short,False,7.039854,0.000000
PyTorch,HalfToFloatTensorConversionBenchmark,HalfToFloatTensorConversionBenchmark_M8_N64_cpu,short,False,7.399855,0.000000
PyTorch,HalfToFloatTensorConversionBenchmark,HalfToFloatTensorConversionBenchmark_M8_N128_cpu,short,False,7.715193,0.000000
PyTorch,HalfToFloatTensorConversionBenchmark,HalfToFloatTensorConversionBenchmark_M16_N16_cpu,short,False,7.255140,0.000000
PyTorch,HalfToFloatTensorConversionBenchmark,HalfToFloatTensorConversionBenchmark_M16_N64_cpu,short,False,7.753522,0.000000
PyTorch,HalfToFloatTensorConversionBenchmark,HalfToFloatTensorConversionBenchmark_M16_N128_cpu,short,False,8.364281,0.000000
PyTorch,HalfToFloatTensorConversionBenchmark,HalfToFloatTensorConversionBenchmark_M32_N16_cpu,short,False,7.476377,0.000000
PyTorch,HalfToFloatTensorConversionBenchmark,HalfToFloatTensorConversionBenchmark_M32_N64_cpu,short,False,8.458564,0.000000
PyTorch,HalfToFloatTensorConversionBenchmark,HalfToFloatTensorConversionBenchmark_M32_N128_cpu,short,False,9.391939,0.000000
PyTorch,addcmul,addcmul_M1_N2_cpu_dtypetorch.float32,short,False,4.461410,0.000000
PyTorch,addcmul,addcmul_M1_N2_cpu_dtypetorch.bfloat16,short,False,4.560082,0.000000
PyTorch,addcmul,addcmul_M32_N64_cpu_dtypetorch.float32,short,False,5.141248,0.000000

1 Benchmarking Framework Benchmarking Module Name Case Name tag run_backward Execution Time Peak Memory (KB)
484 PyTorch sum sum_R256_V512_dim0_contiguousFalse_cpu short False 57.957757 0.000000
485 PyTorch sum sum_R256_V512_dim1_contiguousTrue_cpu short False 53.592068 0.000000
486 PyTorch sum sum_R256_V512_dim1_contiguousFalse_cpu short False 51.339726 0.000000
487 PyTorch TensorConversionBenchmark FloatToHalfTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.bool FloatToHalfTensorConversionBenchmark_M8_N16_cpu short False 0.927 7.040985 0.000000
488 PyTorch TensorConversionBenchmark FloatToHalfTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.uint8 FloatToHalfTensorConversionBenchmark_M8_N64_cpu short False 6.261 7.168604 0.000000
489 PyTorch TensorConversionBenchmark FloatToHalfTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.int8 FloatToHalfTensorConversionBenchmark_M8_N128_cpu short False 6.351 7.434442 0.000000
490 PyTorch TensorConversionBenchmark FloatToHalfTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.int16 FloatToHalfTensorConversionBenchmark_M16_N16_cpu short False 6.177 7.078318 0.000000
491 PyTorch TensorConversionBenchmark FloatToHalfTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.int32 FloatToHalfTensorConversionBenchmark_M16_N64_cpu short False 6.333 7.426670 0.000000
492 PyTorch TensorConversionBenchmark FloatToHalfTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.int64 FloatToHalfTensorConversionBenchmark_M16_N128_cpu short False 6.588 7.679027 0.000000
493 PyTorch TensorConversionBenchmark FloatToHalfTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.float16 FloatToHalfTensorConversionBenchmark_M32_N16_cpu short False 8.117 7.281365 0.000000
494 PyTorch TensorConversionBenchmark FloatToHalfTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.bfloat16 FloatToHalfTensorConversionBenchmark_M32_N64_cpu short False 9.358 7.682783 0.000000
495 PyTorch TensorConversionBenchmark FloatToHalfTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.float32 FloatToHalfTensorConversionBenchmark_M32_N128_cpu short False 7.844 8.381938 0.000000
496 PyTorch TensorConversionBenchmark HalfToFloatTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.float64 HalfToFloatTensorConversionBenchmark_M8_N16_cpu short False 8.097 7.039854 0.000000
497 PyTorch TensorConversionBenchmark HalfToFloatTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.bool HalfToFloatTensorConversionBenchmark_M8_N64_cpu short False 6.159 7.399855 0.000000
498 PyTorch TensorConversionBenchmark HalfToFloatTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.uint8 HalfToFloatTensorConversionBenchmark_M8_N128_cpu short False 0.926 7.715193 0.000000
499 PyTorch TensorConversionBenchmark HalfToFloatTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.int8 HalfToFloatTensorConversionBenchmark_M16_N16_cpu short False 6.192 7.255140 0.000000
500 PyTorch TensorConversionBenchmark HalfToFloatTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.int16 HalfToFloatTensorConversionBenchmark_M16_N64_cpu short False 6.276 7.753522 0.000000
501 PyTorch TensorConversionBenchmark HalfToFloatTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.int32 HalfToFloatTensorConversionBenchmark_M16_N128_cpu short False 6.461 8.364281 0.000000
502 PyTorch TensorConversionBenchmark HalfToFloatTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.int64 HalfToFloatTensorConversionBenchmark_M32_N16_cpu short False 6.524 7.476377 0.000000
503 PyTorch TensorConversionBenchmark HalfToFloatTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.float16 HalfToFloatTensorConversionBenchmark_M32_N64_cpu short False 8.136 8.458564 0.000000
504 PyTorch TensorConversionBenchmark HalfToFloatTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.bfloat16 HalfToFloatTensorConversionBenchmark_M32_N128_cpu short False 6.854 9.391939 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.float32 short False 6.446 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.float64 short False 6.829 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.bool short False 6.088 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.uint8 short False 6.059 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.int8 short False 0.922 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.int16 short False 6.263 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.int32 short False 6.330 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.int64 short False 6.688 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.float16 short False 8.176 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.bfloat16 short False 6.959 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.float32 short False 6.430 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.float64 short False 6.818 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.bool short False 6.350 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.uint8 short False 6.221 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.int8 short False 6.193 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.int16 short False 0.922 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.int32 short False 6.263 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.int64 short False 6.525 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.float16 short False 7.960 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.bfloat16 short False 6.801 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.float32 short False 6.594 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.float64 short False 7.089 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.bool short False 6.498 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.uint8 short False 6.358 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.int8 short False 6.390 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.int16 short False 6.415 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.int32 short False 0.925 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.int64 short False 6.657 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.float16 short False 7.954 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.bfloat16 short False 6.930 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.float32 short False 6.737 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.float64 short False 6.948 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.bool short False 6.757 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.uint8 short False 6.402 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.int8 short False 6.550 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.int16 short False 6.518 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.int32 short False 6.766 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.int64 short False 0.929 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.float16 short False 8.557 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.bfloat16 short False 9.045 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.float32 short False 7.672 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.float64 short False 7.276 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.bool short False 6.414 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.uint8 short False 7.736 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.int8 short False 7.889 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.int16 short False 8.170 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.int32 short False 7.783 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.int64 short False 7.743 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.float16 short False 0.927 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.bfloat16 short False 7.018 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.float32 short False 8.428 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.float64 short False 6.767 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.bool short False 6.479 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.uint8 short False 7.827 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.int8 short False 6.450 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.int16 short False 6.320 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.int32 short False 6.385 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.int64 short False 8.119 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.float16 short False 8.063 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.bfloat16 short False 0.925 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.float32 short False 8.629 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.float64 short False 6.638 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.bool short False 6.425 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.uint8 short False 7.803 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.int8 short False 6.502 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.int16 short False 6.429 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.int32 short False 6.549 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.int64 short False 7.749 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.float16 short False 7.301 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.bfloat16 short False 7.682 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.float32 short False 0.930 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.float64 short False 6.738 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.bool short False 6.798 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.uint8 short False 6.506 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.int8 short False 6.494 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.int16 short False 6.668 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.int32 short False 6.696 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.int64 short False 7.115 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.float16 short False 7.910 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.bfloat16 short False 7.410 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.float32 short False 6.868 0.000000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.float64 short False 0.924 0.000000
505 PyTorch addcmul addcmul_M1_N2_cpu_dtypetorch.float32 short False 4.461410 0.000000
506 PyTorch addcmul addcmul_M1_N2_cpu_dtypetorch.bfloat16 short False 4.560082 0.000000
507 PyTorch addcmul addcmul_M32_N64_cpu_dtypetorch.float32 short False 5.141248 0.000000

View File

@ -4,84 +4,74 @@ import torch
tensor_conversion_short_configs = op_bench.cross_product_configs(
M=[32],
N=[128],
M=(
8,
16,
32,
),
N=(
16,
64,
128,
),
device=["cpu", "cuda"],
dtype_one=[
torch.bool,
torch.uint8,
torch.int8,
torch.int16,
torch.int32,
torch.int64,
torch.half,
torch.bfloat16,
torch.float,
torch.double,
],
dtype_two=[
torch.bool,
torch.uint8,
torch.int8,
torch.int16,
torch.int32,
torch.int64,
torch.half,
torch.bfloat16,
torch.float,
torch.double,
],
tags=["short"],
)
tensor_conversion_long_configs = op_bench.cross_product_configs(
M=[1024],
N=[1024],
M=(
64,
128,
256,
512,
),
N=(
256,
512,
1024,
2048,
),
device=["cpu", "cuda"],
dtype_one=[
torch.bool,
torch.uint8,
torch.int8,
torch.int16,
torch.int32,
torch.int64,
torch.half,
torch.bfloat16,
torch.float,
torch.double,
],
dtype_two=[
torch.bool,
torch.uint8,
torch.int8,
torch.int16,
torch.int32,
torch.int64,
torch.half,
torch.bfloat16,
torch.float,
torch.double,
],
tags=["long"],
)
class TensorConversionBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, dtype_one, dtype_two, device):
class FloatToHalfTensorConversionBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, device):
self.inputs = {
"input": torch.rand(
M, N, device=device, requires_grad=False, dtype=torch.float
).to(dtype=dtype_one)
)
}
self.dtype_one = dtype_one
self.dtype_two = dtype_two
def forward(self, input):
return input.to(dtype=self.dtype_two)
return input.to(torch.half)
op_bench.generate_pt_test(tensor_conversion_short_configs, TensorConversionBenchmark)
op_bench.generate_pt_test(tensor_conversion_long_configs, TensorConversionBenchmark)
class HalfToFloatTensorConversionBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, device):
self.inputs = {
"input": torch.rand(
M, N, device=device, requires_grad=False, dtype=torch.half
)
}
def forward(self, input):
return input.to(torch.float)
op_bench.generate_pt_test(
tensor_conversion_short_configs, FloatToHalfTensorConversionBenchmark
)
op_bench.generate_pt_test(
tensor_conversion_long_configs, FloatToHalfTensorConversionBenchmark
)
op_bench.generate_pt_test(
tensor_conversion_short_configs, HalfToFloatTensorConversionBenchmark
)
op_bench.generate_pt_test(
tensor_conversion_long_configs, HalfToFloatTensorConversionBenchmark
)
if __name__ == "__main__":
op_bench.benchmark_runner.main()

View File

@ -349,106 +349,24 @@ PyTorch,sum,sum_R256_V512_dim0_contiguousTrue_cpu,short,FALSE,12.5841
PyTorch,sum,sum_R256_V512_dim0_contiguousFALSE_cpu,short,FALSE,20.8765
PyTorch,sum,sum_R256_V512_dim1_contiguousTrue_cpu,short,FALSE,15.4414
PyTorch,sum,sum_R256_V512_dim1_contiguousFALSE_cpu,short,FALSE,15.3287
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.bool,short,False,0.797
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.uint8,short,False,6.071
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.int8,short,False,6.031
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.int16,short,False,6.243
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.int32,short,False,7.231
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.int64,short,False,7.791
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.float16,short,False,12.661
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.bfloat16,short,False,11.225
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.float32,short,False,9.772
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.float64,short,False,9.872
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.bool,short,False,6.033
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.uint8,short,False,0.781
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.int8,short,False,6.060
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.int16,short,False,6.180
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.int32,short,False,7.258
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.int64,short,False,7.758
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.float16,short,False,10.504
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.bfloat16,short,False,6.749
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.float32,short,False,7.679
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.float64,short,False,7.797
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.bool,short,False,6.019
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.uint8,short,False,6.079
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.int8,short,False,0.785
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.int16,short,False,6.188
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.int32,short,False,7.288
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.int64,short,False,7.770
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.float16,short,False,10.466
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.bfloat16,short,False,6.676
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.float32,short,False,7.736
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.float64,short,False,7.780
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.bool,short,False,6.130
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.uint8,short,False,6.221
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.int8,short,False,6.101
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.int16,short,False,0.791
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.int32,short,False,6.254
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.int64,short,False,7.733
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.float16,short,False,10.562
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.bfloat16,short,False,6.704
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.float32,short,False,7.819
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.float64,short,False,8.276
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.bool,short,False,6.361
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.uint8,short,False,6.364
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.int8,short,False,6.309
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.int16,short,False,6.362
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.int32,short,False,0.791
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.int64,short,False,7.746
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.float16,short,False,9.462
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.bfloat16,short,False,6.678
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.float32,short,False,7.827
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.float64,short,False,8.200
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.bool,short,False,6.925
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.uint8,short,False,6.947
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.int8,short,False,6.962
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.int16,short,False,6.906
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.int32,short,False,7.664
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.int64,short,False,0.782
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.float16,short,False,10.528
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.bfloat16,short,False,10.123
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.float32,short,False,9.234
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.float64,short,False,8.694
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.bool,short,False,12.653
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.uint8,short,False,9.348
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.int8,short,False,8.774
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.int16,short,False,9.063
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.int32,short,False,10.012
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.int64,short,False,13.641
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.float16,short,False,0.788
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.bfloat16,short,False,13.757
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.float32,short,False,7.170
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.float64,short,False,12.511
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.bool,short,False,6.516
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.uint8,short,False,8.539
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.int8,short,False,6.483
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.int16,short,False,6.468
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.int32,short,False,7.752
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.int64,short,False,9.868
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.float16,short,False,10.556
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.bfloat16,short,False,0.792
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.float32,short,False,7.577
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.float64,short,False,8.267
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.bool,short,False,6.819
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.uint8,short,False,7.715
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.int8,short,False,6.754
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.int16,short,False,6.825
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.int32,short,False,7.790
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.int64,short,False,9.219
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.float16,short,False,5.977
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.bfloat16,short,False,7.069
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.float32,short,False,0.794
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.float64,short,False,8.301
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.bool,short,False,7.401
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.uint8,short,False,7.843
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.int8,short,False,7.117
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.int16,short,False,7.170
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.int32,short,False,8.000
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.int64,short,False,9.284
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.float16,short,False,7.179
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.bfloat16,short,False,7.645
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.float32,short,False,7.988
PyTorch,TensorConversionBenchmark,TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.float64,short,False,0.792
PyTorch,FloatToHalfTensorConversionBenchmark,FloatToHalfTensorConversionBenchmark_M8_N16_cpu,short,FALSE,5.0499
PyTorch,FloatToHalfTensorConversionBenchmark,FloatToHalfTensorConversionBenchmark_M8_N64_cpu,short,FALSE,5.3229
PyTorch,FloatToHalfTensorConversionBenchmark,FloatToHalfTensorConversionBenchmark_M8_N128_cpu,short,FALSE,5.4418
PyTorch,FloatToHalfTensorConversionBenchmark,FloatToHalfTensorConversionBenchmark_M16_N16_cpu,short,FALSE,5.0868
PyTorch,FloatToHalfTensorConversionBenchmark,FloatToHalfTensorConversionBenchmark_M16_N64_cpu,short,FALSE,5.4495
PyTorch,FloatToHalfTensorConversionBenchmark,FloatToHalfTensorConversionBenchmark_M16_N128_cpu,short,FALSE,5.5578
PyTorch,FloatToHalfTensorConversionBenchmark,FloatToHalfTensorConversionBenchmark_M32_N16_cpu,short,FALSE,5.2631
PyTorch,FloatToHalfTensorConversionBenchmark,FloatToHalfTensorConversionBenchmark_M32_N64_cpu,short,FALSE,5.5646
PyTorch,FloatToHalfTensorConversionBenchmark,FloatToHalfTensorConversionBenchmark_M32_N128_cpu,short,FALSE,5.7898
PyTorch,HalfToFloatTensorConversionBenchmark,HalfToFloatTensorConversionBenchmark_M8_N16_cpu,short,FALSE,5.0228
PyTorch,HalfToFloatTensorConversionBenchmark,HalfToFloatTensorConversionBenchmark_M8_N64_cpu,short,FALSE,5.3692
PyTorch,HalfToFloatTensorConversionBenchmark,HalfToFloatTensorConversionBenchmark_M8_N128_cpu,short,FALSE,5.4006
PyTorch,HalfToFloatTensorConversionBenchmark,HalfToFloatTensorConversionBenchmark_M16_N16_cpu,short,FALSE,5.1107
PyTorch,HalfToFloatTensorConversionBenchmark,HalfToFloatTensorConversionBenchmark_M16_N64_cpu,short,FALSE,5.4119
PyTorch,HalfToFloatTensorConversionBenchmark,HalfToFloatTensorConversionBenchmark_M16_N128_cpu,short,FALSE,5.5583
PyTorch,HalfToFloatTensorConversionBenchmark,HalfToFloatTensorConversionBenchmark_M32_N16_cpu,short,FALSE,5.3818
PyTorch,HalfToFloatTensorConversionBenchmark,HalfToFloatTensorConversionBenchmark_M32_N64_cpu,short,FALSE,5.5742
PyTorch,HalfToFloatTensorConversionBenchmark,HalfToFloatTensorConversionBenchmark_M32_N128_cpu,short,FALSE,6.8414
PyTorch,relu,"relu_dims(3,4,5)_contigFALSE_inplaceFALSE_dtypetorch.quint8",short,FALSE,9.4657
PyTorch,relu,"relu_dims(3,4,5)_contigFALSE_inplaceFALSE_dtypetorch.qint8",short,FALSE,9.4625
PyTorch,relu,"relu_dims(3,4,5)_contigFALSE_inplaceFALSE_dtypetorch.qint32",short,FALSE,9.4165

1 Benchmarking Framework Benchmarking Module Name Case Name tag run_backward Execution Time
349 PyTorch sum sum_R256_V512_dim0_contiguousFALSE_cpu short FALSE 20.8765
350 PyTorch sum sum_R256_V512_dim1_contiguousTrue_cpu short FALSE 15.4414
351 PyTorch sum sum_R256_V512_dim1_contiguousFALSE_cpu short FALSE 15.3287
352 PyTorch TensorConversionBenchmark FloatToHalfTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.bool FloatToHalfTensorConversionBenchmark_M8_N16_cpu short False FALSE 0.797 5.0499
353 PyTorch TensorConversionBenchmark FloatToHalfTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.uint8 FloatToHalfTensorConversionBenchmark_M8_N64_cpu short False FALSE 6.071 5.3229
354 PyTorch TensorConversionBenchmark FloatToHalfTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.int8 FloatToHalfTensorConversionBenchmark_M8_N128_cpu short False FALSE 6.031 5.4418
355 PyTorch TensorConversionBenchmark FloatToHalfTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.int16 FloatToHalfTensorConversionBenchmark_M16_N16_cpu short False FALSE 6.243 5.0868
356 PyTorch TensorConversionBenchmark FloatToHalfTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.int32 FloatToHalfTensorConversionBenchmark_M16_N64_cpu short False FALSE 7.231 5.4495
357 PyTorch TensorConversionBenchmark FloatToHalfTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.int64 FloatToHalfTensorConversionBenchmark_M16_N128_cpu short False FALSE 7.791 5.5578
358 PyTorch TensorConversionBenchmark FloatToHalfTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.float16 FloatToHalfTensorConversionBenchmark_M32_N16_cpu short False FALSE 12.661 5.2631
359 PyTorch TensorConversionBenchmark FloatToHalfTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.bfloat16 FloatToHalfTensorConversionBenchmark_M32_N64_cpu short False FALSE 11.225 5.5646
360 PyTorch TensorConversionBenchmark FloatToHalfTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.float32 FloatToHalfTensorConversionBenchmark_M32_N128_cpu short False FALSE 9.772 5.7898
361 PyTorch TensorConversionBenchmark HalfToFloatTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bool_dtype_twotorch.float64 HalfToFloatTensorConversionBenchmark_M8_N16_cpu short False FALSE 9.872 5.0228
362 PyTorch TensorConversionBenchmark HalfToFloatTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.bool HalfToFloatTensorConversionBenchmark_M8_N64_cpu short False FALSE 6.033 5.3692
363 PyTorch TensorConversionBenchmark HalfToFloatTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.uint8 HalfToFloatTensorConversionBenchmark_M8_N128_cpu short False FALSE 0.781 5.4006
364 PyTorch TensorConversionBenchmark HalfToFloatTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.int8 HalfToFloatTensorConversionBenchmark_M16_N16_cpu short False FALSE 6.060 5.1107
365 PyTorch TensorConversionBenchmark HalfToFloatTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.int16 HalfToFloatTensorConversionBenchmark_M16_N64_cpu short False FALSE 6.180 5.4119
366 PyTorch TensorConversionBenchmark HalfToFloatTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.int32 HalfToFloatTensorConversionBenchmark_M16_N128_cpu short False FALSE 7.258 5.5583
367 PyTorch TensorConversionBenchmark HalfToFloatTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.int64 HalfToFloatTensorConversionBenchmark_M32_N16_cpu short False FALSE 7.758 5.3818
368 PyTorch TensorConversionBenchmark HalfToFloatTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.float16 HalfToFloatTensorConversionBenchmark_M32_N64_cpu short False FALSE 10.504 5.5742
369 PyTorch TensorConversionBenchmark HalfToFloatTensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.bfloat16 HalfToFloatTensorConversionBenchmark_M32_N128_cpu short False FALSE 6.749 6.8414
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.float32 short False 7.679
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.uint8_dtype_twotorch.float64 short False 7.797
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.bool short False 6.019
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.uint8 short False 6.079
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.int8 short False 0.785
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.int16 short False 6.188
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.int32 short False 7.288
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.int64 short False 7.770
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.float16 short False 10.466
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.bfloat16 short False 6.676
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.float32 short False 7.736
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int8_dtype_twotorch.float64 short False 7.780
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.bool short False 6.130
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.uint8 short False 6.221
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.int8 short False 6.101
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.int16 short False 0.791
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.int32 short False 6.254
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.int64 short False 7.733
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.float16 short False 10.562
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.bfloat16 short False 6.704
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.float32 short False 7.819
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int16_dtype_twotorch.float64 short False 8.276
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.bool short False 6.361
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.uint8 short False 6.364
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.int8 short False 6.309
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.int16 short False 6.362
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.int32 short False 0.791
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.int64 short False 7.746
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.float16 short False 9.462
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.bfloat16 short False 6.678
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.float32 short False 7.827
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int32_dtype_twotorch.float64 short False 8.200
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.bool short False 6.925
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.uint8 short False 6.947
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.int8 short False 6.962
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.int16 short False 6.906
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.int32 short False 7.664
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.int64 short False 0.782
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.float16 short False 10.528
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.bfloat16 short False 10.123
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.float32 short False 9.234
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.int64_dtype_twotorch.float64 short False 8.694
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.bool short False 12.653
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.uint8 short False 9.348
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.int8 short False 8.774
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.int16 short False 9.063
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.int32 short False 10.012
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.int64 short False 13.641
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.float16 short False 0.788
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.bfloat16 short False 13.757
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.float32 short False 7.170
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float16_dtype_twotorch.float64 short False 12.511
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.bool short False 6.516
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.uint8 short False 8.539
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.int8 short False 6.483
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.int16 short False 6.468
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.int32 short False 7.752
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.int64 short False 9.868
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.float16 short False 10.556
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.bfloat16 short False 0.792
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.float32 short False 7.577
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.bfloat16_dtype_twotorch.float64 short False 8.267
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.bool short False 6.819
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.uint8 short False 7.715
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.int8 short False 6.754
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.int16 short False 6.825
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.int32 short False 7.790
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.int64 short False 9.219
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.float16 short False 5.977
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.bfloat16 short False 7.069
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.float32 short False 0.794
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float32_dtype_twotorch.float64 short False 8.301
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.bool short False 7.401
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.uint8 short False 7.843
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.int8 short False 7.117
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.int16 short False 7.170
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.int32 short False 8.000
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.int64 short False 9.284
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.float16 short False 7.179
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.bfloat16 short False 7.645
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.float32 short False 7.988
PyTorch TensorConversionBenchmark TensorConversionBenchmark_M32_N128_cpu_dtype_onetorch.float64_dtype_twotorch.float64 short False 0.792
370 PyTorch relu relu_dims(3,4,5)_contigFALSE_inplaceFALSE_dtypetorch.quint8 short FALSE 9.4657
371 PyTorch relu relu_dims(3,4,5)_contigFALSE_inplaceFALSE_dtypetorch.qint8 short FALSE 9.4625
372 PyTorch relu relu_dims(3,4,5)_contigFALSE_inplaceFALSE_dtypetorch.qint32 short FALSE 9.4165

View File

@ -83,13 +83,10 @@ if __name__ == "__main__":
if args.outfile == "stdout":
outfile = sys.stdout
need_close = False
elif args.outfile == "stderr":
outfile = sys.stderr
need_close = False
else:
outfile = open(args.outfile, "a")
need_close = True
test_count = args.test_count
m = args.m
@ -150,5 +147,3 @@ if __name__ == "__main__":
time,
file=outfile,
)
if need_close:
outfile.close()

View File

@ -82,13 +82,10 @@ if __name__ == "__main__":
if args.outfile == "stdout":
outfile = sys.stdout
need_close = False
elif args.outfile == "stderr":
outfile = sys.stderr
need_close = False
else:
outfile = open(args.outfile, "a")
need_close = True
test_count = args.test_count
m = args.m
@ -135,5 +132,3 @@ if __name__ == "__main__":
time_csr,
file=outfile,
)
if need_close:
outfile.close()

View File

@ -179,13 +179,10 @@ if __name__ == "__main__":
if args.outfile == "stdout":
outfile = sys.stdout
need_close = False
elif args.outfile == "stderr":
outfile = sys.stderr
need_close = False
else:
outfile = open(args.outfile, "a")
need_close = True
ops = args.ops.split(",")
@ -437,5 +434,3 @@ if __name__ == "__main__":
if op not in {"bsr_scatter_mm6", "bsr_dense_mm_with_meta"}:
# Break on operations that do not consume parameters
break
if need_close:
outfile.close()

View File

@ -1,111 +0,0 @@
#pragma once
#include <c10/metal/common.h>
namespace c10 {
namespace metal {
C10_METAL_CONSTEXPR unsigned error_message_count = 30;
struct ErrorMessage {
char file[128];
char func[128];
char message[250];
unsigned int line;
};
struct ErrorMessages {
#ifdef __METAL__
::metal::atomic<unsigned int> count;
#else
unsigned int count;
#endif
ErrorMessage msg[error_message_count];
};
#ifdef __METAL__
namespace detail {
static uint strncpy(device char* dst, constant const char* src, unsigned len) {
uint i = 0;
while (src[i] != 0 && i < len - 1) {
dst[i] = src[i];
i++;
}
dst[i] = 0;
return i;
}
inline uint print_arg(
device char* ptr,
unsigned len,
constant const char* arg) {
return strncpy(ptr, arg, len);
}
// Returns number length as string in base10
static inline uint base10_length(long num) {
uint rc = 1;
if (num < 0) {
num = -num;
rc += 1;
}
while (num > 9) {
num /= 10;
rc++;
}
return rc;
}
// Converts signed integer to string
inline uint print_arg(device char* ptr, unsigned len, long arg) {
const auto arg_len = base10_length(arg);
if (arg_len >= len)
return 0;
if (arg < 0) {
ptr[0] = '-';
arg = -arg;
}
uint idx = 1;
do {
ptr[arg_len - idx] = '0' + (arg % 10);
arg /= 10;
idx++;
} while (arg > 0);
ptr[arg_len] = 0;
return arg_len;
}
template <typename T>
inline void print_args(device char* ptr, unsigned len, T arg) {
print_arg(ptr, len, arg);
}
template <typename T, typename... Args>
inline void print_args(device char* ptr, unsigned len, T arg, Args... args) {
const auto rc = print_arg(ptr, len, arg);
print_args(ptr + rc, len - rc, args...);
}
} // namespace detail
template <typename... Args>
static void report_error(
device ErrorMessages* msgs,
constant const char* file,
int line,
constant const char* func,
Args... args) {
const auto idx =
atomic_fetch_add_explicit(&msgs->count, 1, ::metal::memory_order_relaxed);
if (idx >= error_message_count) {
return;
}
device auto* msg = &msgs->msg[idx];
detail::strncpy(msg->file, file, 128);
detail::strncpy(msg->func, func, 128);
detail::print_args(msg->message, 250, args...);
msg->line = line;
}
#define TORCH_REPORT_ERROR(buf, ...) \
::c10::metal::report_error(buf, __FILE__, __LINE__, __func__, __VA_ARGS__)
#endif
} // namespace metal
} // namespace c10

View File

@ -1,8 +1,9 @@
#include <c10/test/util/Macros.h>
#include <c10/util/Metaprogramming.h>
#include <gtest/gtest.h>
#include <torch/headeronly/util/Metaprogramming.h>
#include <cstdlib>
using namespace torch::headeronly::guts;
using namespace c10::guts;
// NOLINTBEGIN(modernize*, cppcoreguidelines-special-member-functions)
namespace {
@ -64,15 +65,6 @@ static_assert(
typename make_function_traits_t<void, typelist::typelist<int, float>>::
func_type>::value,
"");
struct Functor final {
std::string operator()(int64_t a, float b) const;
};
static_assert(
std::is_same<
std::string(int64_t, float),
typename infer_function_traits_t<Functor>::func_type>::value,
"");
} // namespace test_function_traits
struct MovableOnly {

View File

@ -1,8 +1,8 @@
#include <c10/util/TypeList.h>
#include <gtest/gtest.h>
#include <torch/headeronly/util/TypeList.h>
#include <memory>
using namespace torch::headeronly::guts::typelist;
using namespace c10::guts::typelist;
// NOLINTBEGIN(modernize-unary-static-assert)
namespace test_size {
class MyClass {};

View File

@ -1,7 +1,7 @@
#include <c10/util/TypeTraits.h>
#include <gtest/gtest.h>
#include <torch/headeronly/util/TypeTraits.h>
using namespace torch::headeronly::guts;
using namespace c10::guts;
// NOLINTBEGIN(modernize-unary-static-assert)
namespace {

View File

@ -0,0 +1 @@
#include <c10/util/Metaprogramming.h>

View File

@ -1 +1,224 @@
#include <torch/headeronly/util/Metaprogramming.h>
#pragma once
#include <c10/util/TypeList.h>
#include <type_traits>
namespace c10::guts {
/**
* Access information about result type or arguments from a function type.
* Example:
* using A = function_traits<int (float, double)>::return_type // A == int
* using A = function_traits<int (float, double)>::parameter_types::tuple_type
* // A == tuple<float, double>
*/
template <class Func>
struct function_traits {
static_assert(
!std::is_same_v<Func, Func>,
"In function_traits<Func>, Func must be a plain function type.");
};
template <class Result, class... Args>
struct function_traits<Result(Args...)> {
using func_type = Result(Args...);
using return_type = Result;
using parameter_types = typelist::typelist<Args...>;
static constexpr auto number_of_parameters = sizeof...(Args);
};
/**
* infer_function_traits: creates a `function_traits` type for a simple
* function (pointer) or functor (lambda/struct). Currently does not support
* class methods.
*/
template <typename Functor>
struct infer_function_traits {
using type = function_traits<
c10::guts::detail::strip_class_t<decltype(&Functor::operator())>>;
};
template <typename Result, typename... Args>
struct infer_function_traits<Result (*)(Args...)> {
using type = function_traits<Result(Args...)>;
};
template <typename Result, typename... Args>
struct infer_function_traits<Result(Args...)> {
using type = function_traits<Result(Args...)>;
};
template <typename T>
using infer_function_traits_t = typename infer_function_traits<T>::type;
/**
* make_function_traits: creates a `function_traits` type given a Return type
* and a typelist of Argument types
*
* Example:
* bool f(int, int);
*
* infer_function_traits_t<f> == make_function_traits_t<bool,
* typelist::typelist<int, int>>
*/
template <typename Result, typename ArgList>
struct make_function_traits {
static_assert(
false_t<ArgList>::value,
"In guts::make_function_traits<Result, TypeList>, the ArgList argument must be typelist<...>.");
};
template <typename Result, typename... Args>
struct make_function_traits<Result, typelist::typelist<Args...>> {
using type = function_traits<Result(Args...)>;
};
template <typename Result, typename ArgList>
using make_function_traits_t =
typename make_function_traits<Result, ArgList>::type;
/**
* make_offset_index_sequence<Start, N>
* Like make_index_sequence<N>, but starting from Start instead of 0.
*
* Example:
* make_offset_index_sequence<10, 3> == std::index_sequence<10, 11, 12>
*/
template <size_t Start, size_t N, size_t... Is>
struct make_offset_index_sequence_impl
: make_offset_index_sequence_impl<Start, N - 1, Start + N - 1, Is...> {
static_assert(
static_cast<int>(Start) >= 0,
"make_offset_index_sequence: Start < 0");
static_assert(static_cast<int>(N) >= 0, "make_offset_index_sequence: N < 0");
};
template <size_t Start, size_t... Is>
struct make_offset_index_sequence_impl<Start, 0, Is...> {
typedef std::index_sequence<Is...> type;
};
template <size_t Start, size_t N>
using make_offset_index_sequence =
typename make_offset_index_sequence_impl<Start, N>::type;
/**
* Use tuple_elements to extract a position-indexed subset of elements
* from the argument tuple into a result tuple.
*
* Example:
* std::tuple<int, const char*, double> t = std::make_tuple(0, "HEY", 2.0);
* std::tuple<int, double> result = tuple_elements(t, std::index_sequence<0,
* 2>());
*/
template <class Tuple, size_t... Is>
constexpr auto tuple_elements(Tuple t, std::index_sequence<Is...> /*unused*/) {
return std::tuple<std::tuple_element_t<Is, Tuple>...>(std::get<Is>(t)...);
}
/**
* Use tuple_take to extract the first or last n elements from the argument
* tuple into a result tuple.
*
* Example:
* std::tuple<int, const char*, double> t = std::make_tuple(0, "HEY", 2.0);
* std::tuple<int, const char*> first_two = tuple_take<decltype(t), 2>(t);
* std::tuple<const char*, double> last_two = tuple_take<decltype(t), -2>(t);
*/
template <class Tuple, int N, class Enable = void>
struct TupleTake {};
template <class Tuple, int N>
struct TupleTake<Tuple, N, std::enable_if_t<N >= 0, void>> {
static auto call(Tuple t) {
constexpr size_t size = std::tuple_size<Tuple>();
static_assert(N <= size, "tuple_take: N > size");
return tuple_elements(t, std::make_index_sequence<N>{});
}
};
template <class Tuple, int N>
struct TupleTake < Tuple,
N, std::enable_if_t<N<0, void>> {
static auto call(Tuple t) {
constexpr size_t size = std::tuple_size<Tuple>();
static_assert(-N <= size, "tuple_take: -N > size");
return tuple_elements(t, make_offset_index_sequence<size + N, -N>{});
}
};
template <class Tuple, int N>
auto tuple_take(Tuple t) {
return TupleTake<Tuple, N>::call(t);
}
/**
* Use tuple_slice to extract a contiguous subtuple from the argument.
*
* Example:
* std::tuple<int, const char*, double, bool> t = std::make_tuple(0,
* "HEY", 2.0, false); std::tuple<int, const char*> middle_two =
* tuple_slice<decltype(t), 1, 2>(t);
*/
template <class Tuple, size_t Start, size_t N>
constexpr auto tuple_slice(Tuple t) {
constexpr size_t size = std::tuple_size<Tuple>();
static_assert(Start + N <= size, "tuple_slice: Start + N > size");
return tuple_elements(t, make_offset_index_sequence<Start, N>{});
}
/**
* Use tuple_map to run a mapping function over a tuple to get a new tuple.
*
* Example 1:
* auto result = tuple_map(std::tuple<int32_t, int32_t, int32_t>(3, 4, 5), []
* (int32_t a) -> int16_t {return a+1;});
* // result == std::tuple<int16_t, int16_t, int16_t>(4, 5, 6)
*
* Example 2:
* struct Mapper {
* std::string operator()(int32_t a) const {
* return std::to_string(a);
* }
* int64_t operator()(const std::string& a) const {
* return atoi(a.c_str());
* }
* };
* auto result = tuple_map(std::tuple<int32_t, std::string>(3, "4"),
* Mapper());
* // result == std::tuple<std::string, int64_t>("3", 4)
*
* Example 3:
* struct A final {
* int32_t func() {
* return 5;
* }
* };
* struct B final {
* std::string func() {
* return "5";
* }
* };
* auto result = tuple_map(std::make_tuple(A(), B()), [] (auto a) { return
* a.func(); });
* // result == std::tuple<int32_t, std::string>(5, "5");
*/
namespace detail {
template <class Mapper, class... Args, size_t... Indices>
auto tuple_map(
// NOLINTNEXTLINE(cppcoreguidelines-rvalue-reference-param-not-moved)
std::tuple<Args...>&& tuple,
const Mapper& mapper,
std::index_sequence<Indices...> /*unused*/) {
return std::tuple<decltype(mapper(std::forward<Args>(std::get<Indices>(
tuple))))...>(mapper(std::forward<Args>(std::get<Indices>(tuple)))...);
}
} // namespace detail
template <class Mapper, class... Args>
auto tuple_map(std::tuple<Args...>&& tuple, const Mapper& mapper) {
return detail::tuple_map(
std::move(tuple), mapper, std::index_sequence_for<Args...>());
}
} // namespace c10::guts

View File

@ -1 +1,515 @@
#include <torch/headeronly/util/TypeList.h>
#pragma once
#include <c10/util/TypeTraits.h>
#include <algorithm>
#include <cstddef>
#include <tuple>
#include <type_traits>
#include <utility>
namespace c10::guts {
template <class... T>
struct false_t : std::false_type {};
template <template <class> class... T>
struct false_higher_t : std::false_type {};
namespace typelist {
/**
* Type holding a list of types for compile time type computations
*/
template <class... Items>
struct typelist final {
public:
typelist() = delete; // not for instantiation
};
/**
* Returns the number of types in a typelist
* Example:
* 3 == size<typelist<int, int, double>>::value
*/
template <class TypeList>
struct size final {
static_assert(
false_t<TypeList>::value,
"In typelist::size<T>, T must be typelist<...>.");
};
template <class... Types>
struct size<typelist<Types...>> final {
static constexpr size_t value = sizeof...(Types);
};
/**
* Transforms a list of types into a tuple holding these types.
* Example:
* std::tuple<int, string> == to_tuple_t<typelist<int, string>>
*/
template <class TypeList>
struct to_tuple final {
static_assert(
false_t<TypeList>::value,
"In typelist::to_tuple<T>, T must be typelist<...>.");
};
template <class... Types>
struct to_tuple<typelist<Types...>> final {
using type = std::tuple<Types...>;
};
template <class TypeList>
using to_tuple_t = typename to_tuple<TypeList>::type;
/**
* Creates a typelist containing the types of a given tuple.
* Example:
* typelist<int, string> == from_tuple_t<std::tuple<int, string>>
*/
template <class Tuple>
struct from_tuple final {
static_assert(
false_t<Tuple>::value,
"In typelist::from_tuple<T>, T must be std::tuple<...>.");
};
template <class... Types>
struct from_tuple<std::tuple<Types...>> final {
using type = typelist<Types...>;
};
template <class Tuple>
using from_tuple_t = typename from_tuple<Tuple>::type;
/**
* Concatenates multiple type lists.
* Example:
* typelist<int, string, int> == concat_t<typelist<int, string>,
* typelist<int>>
*/
template <class... TypeLists>
struct concat final {
static_assert(
false_t<TypeLists...>::value,
"In typelist::concat<T1, ...>, the T arguments each must be typelist<...>.");
};
template <class... Head1Types, class... Head2Types, class... TailLists>
struct concat<typelist<Head1Types...>, typelist<Head2Types...>, TailLists...>
final {
using type =
typename concat<typelist<Head1Types..., Head2Types...>, TailLists...>::
type;
};
template <class... HeadTypes>
struct concat<typelist<HeadTypes...>> final {
using type = typelist<HeadTypes...>;
};
template <>
struct concat<> final {
using type = typelist<>;
};
template <class... TypeLists>
using concat_t = typename concat<TypeLists...>::type;
/**
* Filters the types in a type list by a type trait.
* Examples:
* typelist<int&, const string&&> == filter_t<std::is_reference,
* typelist<void, string, int&, bool, const string&&, int>>
*/
template <template <class> class Condition, class TypeList>
struct filter final {
static_assert(
false_t<TypeList>::value,
"In typelist::filter<Condition, TypeList>, the TypeList argument must be typelist<...>.");
};
template <template <class> class Condition, class Head, class... Tail>
struct filter<Condition, typelist<Head, Tail...>> final {
static_assert(
is_type_condition<Condition>::value,
"In typelist::filter<Condition, TypeList>, the Condition argument must be a condition type trait, i.e. have a static constexpr bool ::value member.");
using type = std::conditional_t<
Condition<Head>::value,
concat_t<
typelist<Head>,
typename filter<Condition, typelist<Tail...>>::type>,
typename filter<Condition, typelist<Tail...>>::type>;
};
template <template <class> class Condition>
struct filter<Condition, typelist<>> final {
static_assert(
is_type_condition<Condition>::value,
"In typelist::filter<Condition, TypeList>, the Condition argument must be a condition type trait, i.e. have a static constexpr bool ::value member.");
using type = typelist<>;
};
template <template <class> class Condition, class TypeList>
using filter_t = typename filter<Condition, TypeList>::type;
/**
* Counts how many types in the list fulfill a type trait
* Examples:
* 2 == count_if<std::is_reference, typelist<void, string, int&, bool, const
* string&&, int>>
*/
template <template <class> class Condition, class TypeList>
struct count_if final {
static_assert(
is_type_condition<Condition>::value,
"In typelist::count_if<Condition, TypeList>, the Condition argument must be a condition type trait, i.e. have a static constexpr bool ::value member.");
static_assert(
is_instantiation_of<typelist, TypeList>::value,
"In typelist::count_if<Condition, TypeList>, the TypeList argument must be typelist<...>.");
// TODO Direct implementation might be faster
static constexpr size_t value = size<filter_t<Condition, TypeList>>::value;
};
/**
* Checks if a typelist contains a certain type.
* Examples:
* contains<typelist<int, string>, string> == true_type
* contains<typelist<int, string>, double> == false_type
*/
namespace detail {
template <class TypeList, class Type, class Enable = void>
struct contains {};
template <class Type>
struct contains<typelist<>, Type, void> : std::false_type {};
template <class Type, class Head, class... Tail>
struct contains<
typelist<Head, Tail...>,
Type,
std::enable_if_t<std::is_same_v<Head, Type>>> : std::true_type {};
template <class Type, class Head, class... Tail>
struct contains<
typelist<Head, Tail...>,
Type,
std::enable_if_t<!std::is_same_v<Head, Type>>>
: contains<typelist<Tail...>, Type> {};
} // namespace detail
template <class TypeList, class Type>
using contains = typename detail::contains<TypeList, Type>::type;
/**
* Returns true iff the type trait is true for all types in the type list
* Examples:
* true == all<std::is_reference, typelist<int&, const float&&, const
* MyClass&>>::value false == all<std::is_reference, typelist<int&, const
* float&&, MyClass>>::value
*/
template <template <class> class Condition, class TypeList>
struct all {
static_assert(
false_t<TypeList>::value,
"In typelist::all<Condition, TypeList>, the TypeList argument must be typelist<...>.");
};
template <template <class> class Condition, class... Types>
struct all<Condition, typelist<Types...>>
: std::conjunction<Condition<Types>...> {
static_assert(
is_type_condition<Condition>::value,
"In typelist::all<Condition, TypeList>, the Condition argument must be a condition type trait, i.e. have a static constexpr bool ::value member.");
};
/**
* Returns true iff the type trait is true for any type in the type list
* Examples:
* true == true_for_any_type<std::is_reference, typelist<int, const
* float&&, const MyClass>>::value false ==
* true_for_any_type<std::is_reference, typelist<int, const float,
* MyClass>>::value
*/
template <template <class> class Condition, class TypeList>
struct true_for_any_type final {
static_assert(
false_t<TypeList>::value,
"In typelist::true_for_any_type<Condition, TypeList>, the TypeList argument must be typelist<...>.");
};
template <template <class> class Condition, class... Types>
struct true_for_any_type<Condition, typelist<Types...>> final
: std::disjunction<Condition<Types>...> {
static_assert(
is_type_condition<Condition>::value,
"In typelist::true_for_any_type<Condition, TypeList>, the Condition argument must be a condition type trait, i.e. have a static constexpr bool ::value member.");
};
/**
* Maps types of a type list using a type trait
* Example:
* typelist<int&, double&, string&> == map_t<std::add_lvalue_reference_t,
* typelist<int, double, string>>
*/
template <template <class> class Mapper, class TypeList>
struct map final {
static_assert(
false_t<TypeList>::value,
"In typelist::map<Mapper, TypeList>, the TypeList argument must be typelist<...>.");
};
template <template <class> class Mapper, class... Types>
struct map<Mapper, typelist<Types...>> final {
using type = typelist<Mapper<Types>...>;
};
template <template <class> class Mapper, class TypeList>
using map_t = typename map<Mapper, TypeList>::type;
/**
* Returns the first element of a type list.
* Example:
* int == head_t<typelist<int, string>>
*/
template <class TypeList>
struct head final {
static_assert(
false_t<TypeList>::value,
"In typelist::head<T>, the T argument must be typelist<...>.");
};
template <class Head, class... Tail>
struct head<typelist<Head, Tail...>> final {
using type = Head;
};
template <class TypeList>
using head_t = typename head<TypeList>::type;
/**
* Returns the first element of a type list, or the specified default if the
* type list is empty. Example: int == head_t<bool, typelist<int, string>>
* bool == head_t<bool, typelist<>>
*/
template <class Default, class TypeList>
struct head_with_default final {
using type = Default;
};
template <class Default, class Head, class... Tail>
struct head_with_default<Default, typelist<Head, Tail...>> final {
using type = Head;
};
template <class Default, class TypeList>
using head_with_default_t = typename head_with_default<Default, TypeList>::type;
/**
* Returns the N-th element of a type list.
* Example:
* int == element_t<1, typelist<float, int, char>>
*/
/// Base template.
template <size_t Index, class TypeList>
struct element final {
static_assert(
false_t<TypeList>::value,
"In typelist::element<T>, the T argument must be typelist<...>.");
};
/// Successful case, we have reached the zero index and can "return" the head
/// type.
template <class Head, class... Tail>
struct element<0, typelist<Head, Tail...>> {
using type = Head;
};
/// Error case, we have an index but ran out of types! It will only be selected
/// if `Ts...` is actually empty!
template <size_t Index, class... Ts>
struct element<Index, typelist<Ts...>> {
static_assert(
Index < sizeof...(Ts),
"Index is out of bounds in typelist::element");
};
/// Shave off types until we hit the <0, Head, Tail...> or <Index> case.
template <size_t Index, class Head, class... Tail>
struct element<Index, typelist<Head, Tail...>>
: element<Index - 1, typelist<Tail...>> {};
/// Convenience alias.
template <size_t Index, class TypeList>
using element_t = typename element<Index, TypeList>::type;
/**
* Returns the last element of a type list.
* Example:
* int == last_t<typelist<int, string>>
*/
template <class TypeList>
struct last final {
static_assert(
false_t<TypeList>::value,
"In typelist::last<T>, the T argument must be typelist<...>.");
};
template <class Head, class... Tail>
struct last<typelist<Head, Tail...>> final {
using type = typename last<typelist<Tail...>>::type;
};
template <class Head>
struct last<typelist<Head>> final {
using type = Head;
};
template <class TypeList>
using last_t = typename last<TypeList>::type;
static_assert(std::is_same_v<int, last_t<typelist<double, float, int>>>);
/**
* Take/drop a number of arguments from a typelist.
* Example:
* typelist<int, string> == take_t<typelist<int, string, bool>, 2>
* typelist<bool> == drop_t<typelist<int, string, bool>, 2>
*/
namespace detail {
template <class TypeList, size_t offset, class IndexSequence>
struct take_elements final {};
template <class TypeList, size_t offset, size_t... Indices>
struct take_elements<TypeList, offset, std::index_sequence<Indices...>> final {
using type = typelist<typename element<offset + Indices, TypeList>::type...>;
};
} // namespace detail
template <class TypeList, size_t num>
struct take final {
static_assert(
is_instantiation_of<typelist, TypeList>::value,
"In typelist::take<T, num>, the T argument must be typelist<...>.");
static_assert(
num <= size<TypeList>::value,
"Tried to typelist::take more elements than there are in the list");
using type = typename detail::
take_elements<TypeList, 0, std::make_index_sequence<num>>::type;
};
template <class TypeList, size_t num>
using take_t = typename take<TypeList, num>::type;
template <class TypeList, size_t num>
struct drop final {
static_assert(
is_instantiation_of<typelist, TypeList>::value,
"In typelist::drop<T, num>, the T argument must be typelist<...>.");
static_assert(
num <= size<TypeList>::value,
"Tried to typelist::drop more elements than there are in the list");
using type = typename detail::take_elements<
TypeList,
num,
std::make_index_sequence<size<TypeList>::value - num>>::type;
};
template <class TypeList, size_t num>
using drop_t = typename drop<TypeList, num>::type;
/**
* Like drop, but returns an empty list rather than an assertion error if `num`
* is larger than the size of the TypeList.
* Example:
* typelist<> == drop_if_nonempty_t<typelist<string, bool>, 2>
* typelist<> == drop_if_nonempty_t<typelist<int, string, bool>, 3>
*/
template <class TypeList, size_t num>
struct drop_if_nonempty final {
static_assert(
is_instantiation_of<typelist, TypeList>::value,
"In typelist::drop<T, num>, the T argument must be typelist<...>.");
using type = typename detail::take_elements<
TypeList,
std::min(num, size<TypeList>::value),
std::make_index_sequence<
size<TypeList>::value - std::min(num, size<TypeList>::value)>>::type;
};
template <class TypeList, size_t num>
using drop_if_nonempty_t = typename drop_if_nonempty<TypeList, num>::type;
/**
* Reverses a typelist.
* Example:
* typelist<int, string> == reverse_t<typelist<string, int>>
*/
template <class TypeList>
struct reverse final {
static_assert(
false_t<TypeList>::value,
"In typelist::reverse<T>, the T argument must be typelist<...>.");
};
template <class Head, class... Tail>
struct reverse<typelist<Head, Tail...>> final {
using type =
concat_t<typename reverse<typelist<Tail...>>::type, typelist<Head>>;
};
template <>
struct reverse<typelist<>> final {
using type = typelist<>;
};
template <class TypeList>
using reverse_t = typename reverse<TypeList>::type;
/**
* Find the index of the first type in a typelist fulfilling a type trait
* condition. Example:
*
* 2 == find_if<typelist<char, int, char&, int&>, std::is_reference>::value
*/
template <class TypeList, template <class> class Condition, class Enable = void>
struct find_if final {
static_assert(
false_t<TypeList>::value,
"In typelist::find_if<TypeList, Condition>, the TypeList argument must be typelist<...>.");
};
template <template <class> class Condition>
struct find_if<typelist<>, Condition, void> final {
static_assert(
false_higher_t<Condition>::value,
"In typelist::find_if<Type/List, Condition>, didn't find any type fulfilling the Condition.");
};
template <class Head, class... Tail, template <class> class Condition>
struct find_if<
typelist<Head, Tail...>,
Condition,
std::enable_if_t<Condition<Head>::value>>
final {
static constexpr size_t value = 0;
};
template <class Head, class... Tail, template <class> class Condition>
struct find_if<
typelist<Head, Tail...>,
Condition,
std::enable_if_t<!Condition<Head>::value>>
final {
static constexpr size_t value =
1 + find_if<typelist<Tail...>, Condition>::value;
};
/**
* Maps a list of types into a list of values.
* Examples:
* // Example 1
* auto sizes =
* map_types_to_values<typelist<int64_t, bool, uint32_t>>(
* [] (auto t) { return sizeof(decltype(t)::type); }
* );
* // sizes == std::tuple<size_t, size_t, size_t>{8, 1, 4}
*
* // Example 2
* auto shared_ptrs =
* map_types_to_values<typelist<int, double>>(
* [] (auto t) { return make_shared<typename decltype(t)::type>(); }
* );
* // shared_ptrs == std::tuple<shared_ptr<int>, shared_ptr<double>>()
*/
namespace detail {
template <class T>
struct type_ final {
using type = T;
};
template <class TypeList>
struct map_types_to_values final {
static_assert(
false_t<TypeList>::value,
"In typelist::map_types_to_values<T>, the T argument must be typelist<...>.");
};
template <class... Types>
struct map_types_to_values<typelist<Types...>> final {
template <class Func>
static auto call(Func&& func) {
return std::tuple{std::forward<Func>(func)(type_<Types>())...};
}
};
} // namespace detail
template <class TypeList, class Func>
auto map_types_to_values(Func&& func) {
return detail::map_types_to_values<TypeList>::call(std::forward<Func>(func));
}
} // namespace typelist
} // namespace c10::guts

View File

@ -1 +1,151 @@
#include <torch/headeronly/util/TypeTraits.h>
#pragma once
#include <functional>
#include <type_traits>
namespace c10::guts {
/**
* is_equality_comparable<T> is true_type iff the equality operator is defined
* for T.
*/
template <class T, class Enable = void>
struct is_equality_comparable : std::false_type {};
template <class T>
struct is_equality_comparable<
T,
std::void_t<decltype(std::declval<T&>() == std::declval<T&>())>>
: std::true_type {};
template <class T>
using is_equality_comparable_t = typename is_equality_comparable<T>::type;
/**
* is_hashable<T> is true_type iff std::hash is defined for T
*/
template <class T, class Enable = void>
struct is_hashable : std::false_type {};
template <class T>
struct is_hashable<T, std::void_t<decltype(std::hash<T>()(std::declval<T&>()))>>
: std::true_type {};
template <class T>
using is_hashable_t = typename is_hashable<T>::type;
/**
* is_function_type<T> is true_type iff T is a plain function type (i.e.
* "Result(Args...)")
*/
template <class T>
struct is_function_type : std::false_type {};
template <class Result, class... Args>
struct is_function_type<Result(Args...)> : std::true_type {};
template <class T>
using is_function_type_t = typename is_function_type<T>::type;
/**
* is_instantiation_of<T, I> is true_type iff I is a template instantiation of T
* (e.g. vector<int> is an instantiation of vector) Example:
* is_instantiation_of_t<vector, vector<int>> // true
* is_instantiation_of_t<pair, pair<int, string>> // true
* is_instantiation_of_t<vector, pair<int, string>> // false
*/
template <template <class...> class Template, class T>
struct is_instantiation_of : std::false_type {};
template <template <class...> class Template, class... Args>
struct is_instantiation_of<Template, Template<Args...>> : std::true_type {};
template <template <class...> class Template, class T>
using is_instantiation_of_t = typename is_instantiation_of<Template, T>::type;
namespace detail {
/**
* strip_class: helper to remove the class type from pointers to `operator()`.
*/
template <typename T>
struct strip_class {};
template <typename Class, typename Result, typename... Args>
struct strip_class<Result (Class::*)(Args...)> {
using type = Result(Args...);
};
template <typename Class, typename Result, typename... Args>
struct strip_class<Result (Class::*)(Args...) const> {
using type = Result(Args...);
};
template <typename T>
using strip_class_t = typename strip_class<T>::type;
} // namespace detail
/**
* Evaluates to true_type, iff the given class is a Functor
* (i.e. has a call operator with some set of arguments)
*/
template <class Functor, class Enable = void>
struct is_functor : std::false_type {};
template <class Functor>
struct is_functor<
Functor,
std::enable_if_t<is_function_type<
detail::strip_class_t<decltype(&Functor::operator())>>::value>>
: std::true_type {};
/**
* lambda_is_stateless<T> is true iff the lambda type T is stateless
* (i.e. does not have a closure).
* Example:
* auto stateless_lambda = [] (int a) {return a;};
* lambda_is_stateless<decltype(stateless_lambda)> // true
* auto stateful_lambda = [&] (int a) {return a;};
* lambda_is_stateless<decltype(stateful_lambda)> // false
*/
namespace detail {
template <class LambdaType, class FuncType>
struct is_stateless_lambda__ final {
static_assert(
!std::is_same_v<LambdaType, LambdaType>,
"Base case shouldn't be hit");
};
// implementation idea: According to the C++ standard, stateless lambdas are
// convertible to function pointers
template <class LambdaType, class C, class Result, class... Args>
struct is_stateless_lambda__<LambdaType, Result (C::*)(Args...) const>
: std::is_convertible<LambdaType, Result (*)(Args...)> {};
template <class LambdaType, class C, class Result, class... Args>
struct is_stateless_lambda__<LambdaType, Result (C::*)(Args...)>
: std::is_convertible<LambdaType, Result (*)(Args...)> {};
// case where LambdaType is not even a functor
template <class LambdaType, class Enable = void>
struct is_stateless_lambda_ final : std::false_type {};
// case where LambdaType is a functor
template <class LambdaType>
struct is_stateless_lambda_<
LambdaType,
std::enable_if_t<is_functor<LambdaType>::value>>
: is_stateless_lambda__<LambdaType, decltype(&LambdaType::operator())> {};
} // namespace detail
template <class T>
using is_stateless_lambda = detail::is_stateless_lambda_<std::decay_t<T>>;
/**
* is_type_condition<C> is true_type iff C<...> is a type trait representing a
* condition (i.e. has a constexpr static bool ::value member) Example:
* is_type_condition<std::is_reference> // true
*/
template <template <class> class C, class Enable = void>
struct is_type_condition : std::false_type {};
template <template <class> class C>
struct is_type_condition<
C,
std::enable_if_t<
std::is_same_v<bool, std::remove_cv_t<decltype(C<int>::value)>>>>
: std::true_type {};
/**
* is_fundamental<T> is true_type iff the lambda type T is a fundamental type
* (that is, arithmetic type, void, or nullptr_t). Example: is_fundamental<int>
* // true We define it here to resolve a MSVC bug. See
* https://github.com/pytorch/pytorch/issues/30932 for details.
*/
template <class T>
struct is_fundamental : std::is_fundamental<T> {};
} // namespace c10::guts

View File

@ -24,7 +24,6 @@ set(C10_XPU_HEADERS
XPUCachingAllocator.h
XPUDeviceProp.h
XPUException.h
XPUEvent.h
XPUFunctions.h
XPUMacros.h
XPUStream.h

View File

@ -1,178 +0,0 @@
#pragma once
#include <c10/xpu/XPUStream.h>
namespace c10::xpu {
/*
* XPUEvent are movable not copyable wrappers around SYCL event. XPUEvent are
* constructed lazily when first recorded. It has a device, and this device is
* acquired from the first recording stream. Later streams that record the event
* must match the same device.
*
* Currently, XPUEvent does NOT support to export an inter-process event from
* another process via inter-process communication(IPC). So it means that
* inter-process communication for event handles between different processes is
* not available. This could impact some applications that rely on cross-process
* synchronization and communication.
*/
struct XPUEvent {
// Constructors
XPUEvent(bool enable_timing = false) noexcept
: enable_timing_{enable_timing} {}
~XPUEvent() {
if (isCreated()) {
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
if (C10_UNLIKELY(interp)) {
(*interp)->trace_gpu_event_deletion(
c10::kXPU, reinterpret_cast<uintptr_t>(event_.get()));
}
}
}
C10_DISABLE_COPY_AND_ASSIGN(XPUEvent);
XPUEvent(XPUEvent&& other) = default;
XPUEvent& operator=(XPUEvent&& other) = default;
operator sycl::event&() const {
return event();
}
std::optional<c10::Device> device() const {
if (isCreated()) {
return c10::Device(c10::kXPU, device_index_);
} else {
return std::nullopt;
}
}
inline bool isCreated() const {
return (event_.get() != nullptr);
}
DeviceIndex device_index() const {
return device_index_;
}
sycl::event& event() const {
return *event_;
}
bool query() const {
using namespace sycl::info;
if (!isCreated()) {
return true;
}
return event().get_info<event::command_execution_status>() ==
event_command_status::complete;
}
void record() {
record(getCurrentXPUStream());
}
void recordOnce(const XPUStream& stream) {
if (!isCreated()) {
record(stream);
}
}
void record(const XPUStream& stream) {
if (!isCreated()) {
device_index_ = stream.device_index();
assignEvent(stream.queue());
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
if (C10_UNLIKELY(interp)) {
(*interp)->trace_gpu_event_creation(
c10::kXPU, reinterpret_cast<uintptr_t>(event_.get()));
}
} else {
TORCH_CHECK(
device_index_ == stream.device_index(),
"Event device ",
device_index_,
" does not match recording stream's device ",
stream.device_index(),
".");
reassignEvent(stream.queue());
}
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
if (C10_UNLIKELY(interp)) {
(*interp)->trace_gpu_event_record(
c10::kXPU,
reinterpret_cast<uintptr_t>(event_.get()),
reinterpret_cast<uintptr_t>(&stream.queue()));
}
}
void block(const XPUStream& stream) {
if (isCreated()) {
std::vector<sycl::event> event_list{event()};
// Make this stream wait until event_ is completed.
stream.queue().ext_oneapi_submit_barrier(event_list);
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
if (C10_UNLIKELY(interp)) {
(*interp)->trace_gpu_event_wait(
c10::kXPU,
reinterpret_cast<uintptr_t>(event_.get()),
reinterpret_cast<uintptr_t>(&stream.queue()));
}
}
}
double elapsed_time(const XPUEvent& other) const {
TORCH_CHECK(
isCreated() && other.isCreated(),
"Both events must be recorded before calculating elapsed time.");
TORCH_CHECK(
query() && other.query(),
"Both events must be completed before calculating elapsed time.");
TORCH_CHECK(
enable_timing_ && other.enable_timing_,
"Both events must be created with argument 'enable_timing=True'.");
using namespace sycl::info::event_profiling;
// Block until both of the recorded events are completed.
uint64_t end_time_ns = other.event().get_profiling_info<command_end>();
uint64_t start_time_ns = event().get_profiling_info<command_end>();
// Return the eplased time in milliseconds.
return 1e-6 *
(static_cast<double>(end_time_ns) - static_cast<double>(start_time_ns));
}
void synchronize() const {
if (isCreated()) {
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
if (C10_UNLIKELY(interp)) {
(*interp)->trace_gpu_event_synchronization(
c10::kXPU, reinterpret_cast<uintptr_t>(event_.get()));
}
event().wait_and_throw();
}
}
private:
void assignEvent(sycl::queue& queue) {
if (enable_timing_) {
event_ = std::make_unique<sycl::event>(
sycl::ext::oneapi::experimental::submit_profiling_tag(queue));
} else {
event_ = std::make_unique<sycl::event>(queue.ext_oneapi_submit_barrier());
}
}
void reassignEvent(sycl::queue& queue) {
event_.reset();
assignEvent(queue);
}
bool enable_timing_ = false;
c10::DeviceIndex device_index_ = -1;
// Only need to track the last event, as events in an in-order queue are
// executed sequentially.
std::unique_ptr<sycl::event> event_;
};
} // namespace c10::xpu

View File

@ -478,7 +478,6 @@ function(torch_update_find_cuda_flags)
endfunction()
include(CheckCXXCompilerFlag)
include(CheckCCompilerFlag)
include(CheckLinkerFlag)
##############################################################################
@ -502,24 +501,6 @@ function(append_cxx_flag_if_supported flag outputvar)
endif()
endfunction()
function(append_c_flag_if_supported flag outputvar)
string(TOUPPER "HAS${flag}" _FLAG_NAME)
string(REGEX REPLACE "[=-]" "_" _FLAG_NAME "${_FLAG_NAME}")
# GCC silences unknown -Wno-XXX flags, so test the corresponding -WXXX.
if(CMAKE_C_COMPILER_ID STREQUAL "GNU")
string(REGEX REPLACE "^Wno-" "W" new_flag "${flag}")
else()
set(new_flag "${flag}")
endif()
check_c_compiler_flag("${new_flag}" ${_FLAG_NAME})
if(${_FLAG_NAME})
string(APPEND ${outputvar} " ${flag}")
set(${outputvar} "${${outputvar}}" PARENT_SCOPE)
endif()
endfunction()
function(target_compile_options_if_supported target flag)
set(_compile_options "")
append_cxx_flag_if_supported("${flag}" _compile_options)

View File

@ -1308,319 +1308,8 @@ coverage_ignore_functions = [
# torch.onnx.symbolic_opset7
"max",
"min",
# torch.onnx.symbolic_opset8
"addmm",
"bmm",
"empty",
"empty_like",
"flatten",
"full",
"full_like",
"gt",
"lt",
"matmul",
"mm",
"ones",
"ones_like",
"prelu",
"repeat",
"zeros",
"zeros_like",
# torch.onnx.symbolic_opset9
"abs",
"acos",
"adaptive_avg_pool1d",
"adaptive_avg_pool2d",
"adaptive_avg_pool3d",
"adaptive_max_pool1d",
"adaptive_max_pool2d",
"adaptive_max_pool3d",
"add",
"addcmul",
"addmm",
"alias",
"amax",
"amin",
"aminmax",
"arange",
"argmax",
"argmin",
"as_strided",
"as_tensor",
"asin",
"atan",
"atan2",
"avg_pool1d",
"avg_pool2d",
"avg_pool3d",
"baddbmm",
"batch_norm",
"bernoulli",
"bitwise_not",
"bitwise_or",
"bmm",
"broadcast_tensors",
"broadcast_to",
"bucketize",
"cat",
"cdist",
"ceil",
"clamp",
"clamp_max",
"clamp_min",
"clone",
"constant_pad_nd",
"contiguous",
"conv1d",
"conv2d",
"conv3d",
"conv_tbc",
"conv_transpose1d",
"conv_transpose2d",
"conv_transpose3d",
"convert_element_type",
"convolution",
"cos",
"cosine_similarity",
"cross",
"cumsum",
"detach",
"dim",
"div",
"dot",
"dropout",
"elu",
"embedding",
"embedding_bag",
"empty",
"empty_like",
"eq",
"erf",
"exp",
"expand",
"expand_as",
"eye",
"fill",
"flatten",
"floor",
"floor_divide",
"floordiv",
"frobenius_norm",
"full",
"full_like",
"gather",
"ge",
"gelu",
"get_pool_ceil_padding",
"glu",
"group_norm",
"gru",
"gt",
"hann_window",
"hardshrink",
"hardsigmoid",
"hardswish",
"hardtanh",
"index",
"index_add",
"index_copy",
"index_fill",
"index_put",
"index_select",
"instance_norm",
"is_floating_point",
"is_pinned",
"isnan",
"item",
"kl_div",
"layer_norm",
"le",
"leaky_relu",
"lerp",
"lift",
"linalg_cross",
"linalg_matrix_norm",
"linalg_norm",
"linalg_vector_norm",
"linear",
"linspace",
"log",
"log10",
"log1p",
"log2",
"log_sigmoid",
"log_softmax",
"logical_and",
"logical_not",
"logical_or",
"logical_xor",
"logit",
"logsumexp",
"lstm",
"lstm_cell",
"lt",
"masked_fill",
"masked_fill_",
"matmul",
"max",
"max_pool1d",
"max_pool1d_with_indices",
"max_pool2d",
"max_pool2d_with_indices",
"max_pool3d",
"max_pool3d_with_indices",
"maximum",
"meshgrid",
"min",
"minimum",
"mish",
"mm",
"movedim",
"mse_loss",
"mul",
"multinomial",
"mv",
"narrow",
"native_layer_norm",
"ne",
"neg",
"new_empty",
"new_full",
"new_ones",
"new_zeros",
"nonzero",
"nonzero_numpy",
"noop_complex_operators",
"norm",
"numel",
"numpy_T",
"one_hot",
"ones",
"ones_like",
"onnx_placeholder",
"overload_by_arg_count",
"pad",
"pairwise_distance",
"permute",
"pixel_shuffle",
"pixel_unshuffle",
"pow",
"prelu",
"prim_constant",
"prim_constant_chunk",
"prim_constant_split",
"prim_data",
"prim_device",
"prim_dtype",
"prim_if",
"prim_layout",
"prim_list_construct",
"prim_list_unpack",
"prim_loop",
"prim_max",
"prim_min",
"prim_shape",
"prim_tolist",
"prim_tuple_construct",
"prim_type",
"prim_unchecked_cast",
"prim_uninitialized",
"rand",
"rand_like",
"randint",
"randint_like",
"randn",
"randn_like",
"reciprocal",
"reflection_pad",
"relu",
"relu6",
"remainder",
"repeat",
"repeat_interleave",
"replication_pad",
"reshape",
"reshape_as",
"rnn_relu",
"rnn_tanh",
"roll",
"rrelu",
"rsqrt",
"rsub",
"scalar_tensor",
"scatter",
"scatter_add",
"select",
"selu",
"sigmoid",
"sign",
"silu",
"sin",
"size",
"slice",
"softmax",
"softplus",
"softshrink",
"sort",
"split",
"split_with_sizes",
"sqrt",
"square",
"squeeze",
"stack",
"std",
"std_mean",
"sub",
"t",
"take",
"tan",
"tanh",
"tanhshrink",
"tensor",
"threshold",
"to",
"topk",
"transpose",
"true_divide",
"type_as",
"unbind",
"unfold",
"unsafe_chunk",
"unsafe_split",
"unsafe_split_with_sizes",
"unsqueeze",
"unsupported_complex_operators",
"unused",
"upsample_bilinear2d",
"upsample_linear1d",
"upsample_nearest1d",
"upsample_nearest2d",
"upsample_nearest3d",
"upsample_trilinear3d",
"var",
"var_mean",
"view",
"view_as",
"where",
"wrap_logical_op_with_cast_to",
"wrap_logical_op_with_negation",
"zero",
"zeros",
"zeros_like",
# torch.onnx.utils
"disable_apex_o2_state_dict_hook",
"export",
"export_to_pretty_string",
"exporter_context",
"is_in_onnx_export",
"model_signature",
"register_custom_op_symbolic",
"select_model_mode_for_export",
"setup_onnx_logging",
"unconvertible_ops",
"unpack_quantized_tensor",
"warn_on_static_input_change",
# torch.onnx.verification
"check_export_model_diff",
"verify",
"verify_aten_graph",
@ -1711,32 +1400,6 @@ coverage_ignore_functions = [
"noop_context_fn",
"set_checkpoint_early_stop",
"set_device_states",
# torch.utils.collect_env
"check_release_file",
"get_cachingallocator_config",
"get_clang_version",
"get_cmake_version",
"get_conda_packages",
"get_cpu_info",
"get_cuda_module_loading_config",
"get_cudnn_version",
"get_env_info",
"get_gcc_version",
"get_gpu_info",
"get_libc_version",
"get_lsb_version",
"get_mac_version",
"get_nvidia_driver_version",
"get_nvidia_smi",
"get_os",
"get_pip_packages",
"get_platform",
"get_pretty_env_info",
"get_python_platform",
"get_running_cuda_version",
"get_windows_version",
"is_xnnpack_available",
"pretty_str",
# torch.utils.cpp_backtrace
"get_cpp_backtrace",
# torch.utils.cpp_extension
@ -1800,52 +1463,6 @@ coverage_ignore_functions = [
"apply_shuffle_seed",
"apply_shuffle_settings",
"get_all_graph_pipes",
# torch.utils.flop_counter
"addmm_flop",
"baddbmm_flop",
"bmm_flop",
"conv_backward_flop",
"conv_flop",
"conv_flop_count",
"convert_num_with_suffix",
"get_shape",
"get_suffix_str",
"mm_flop",
"normalize_tuple",
"register_flop_formula",
"sdpa_backward_flop",
"sdpa_backward_flop_count",
"sdpa_flop",
"sdpa_flop_count",
"shape_wrapper",
"transpose_shape",
# torch.utils.hipify.hipify_python
"add_dim3",
"compute_stats",
"extract_arguments",
"file_add_header",
"file_specific_replacement",
"find_bracket_group",
"find_closure_group",
"find_parentheses_group",
"fix_static_global_kernels",
"get_hip_file_path",
"hip_header_magic",
"hipify",
"is_caffe2_gpu_file",
"is_cusparse_file",
"is_out_of_place",
"is_pytorch_file",
"is_special_file",
"match_extensions",
"matched_files_iter",
"openf",
"preprocess_file_and_save_result",
"preprocessor",
"processKernelLaunches",
"replace_extern_shared",
"replace_math_functions",
"str2bool",
# torch.utils.hooks
"unserializable_hook",
"warn_if_has_hooks",

View File

@ -1,21 +0,0 @@
# torch.mtia.mtia_graph
The MTIA backend is implemented out of the tree, only interfaces are defined here.
```{eval-rst}
.. automodule:: torch.mtia.mtia_graph
```
```{eval-rst}
.. currentmodule:: torch.mtia.mtia_graph
```
```{eval-rst}
.. autoclass:: MTIAGraph
:members:
```
```{eval-rst}
.. autoclass:: graph
:members:
```

View File

@ -14,10 +14,6 @@ Utils
sdpa_kernel
SDPBackend
register_flash_attention_impl
activate_flash_attention_impl
list_flash_attention_impls
current_flash_attention_impl
Submodules
----------

View File

@ -29,7 +29,6 @@ mps
xpu
mtia
mtia.memory
mtia.mtia_graph
meta
torch.backends <backends>
torch.export <export>

View File

@ -19,6 +19,91 @@
swap_tensors
```
# torch.utils.collect_env
```{eval-rst}
.. automodule:: torch.utils.collect_env
```
```{eval-rst}
.. currentmodule:: torch.utils.collect_env
```
```{eval-rst}
.. autosummary::
:toctree: generated
:nosignatures:
check_release_file
is_xnnpack_available
pretty_str
```
# torch.utils.flop_counter
```{eval-rst}
.. automodule:: torch.utils.flop_counter
```
```{eval-rst}
.. currentmodule:: torch.utils.flop_counter
```
```{eval-rst}
.. autosummary::
:toctree: generated
:nosignatures:
baddbmm_flop
bmm_flop
conv_backward_flop
conv_flop
conv_flop_count
register_flop_formula
sdpa_backward_flop
sdpa_backward_flop_count
sdpa_flop
sdpa_flop_count
shape_wrapper
```
# torch.utils.hipify.hipify_python
```{eval-rst}
.. automodule:: torch.utils.hipify.hipify_python
```
```{eval-rst}
.. currentmodule:: torch.utils.hipify.hipify_python
```
```{eval-rst}
.. autosummary::
:toctree: generated
:nosignatures:
compute_stats
extract_arguments
file_add_header
file_specific_replacement
find_bracket_group
find_closure_group
find_parentheses_group
fix_static_global_kernels
hip_header_magic
hipify
is_caffe2_gpu_file
is_cusparse_file
is_out_of_place
is_pytorch_file
is_special_file
openf
preprocess_file_and_save_result
preprocessor
processKernelLaunches
replace_extern_shared
replace_math_functions
str2bool
```
<!-- This module needs to be documented. Adding here in the meantime
for tracking purposes -->
```{eval-rst}
@ -43,7 +128,6 @@ for tracking purposes -->
.. py:module:: torch.utils.benchmark.utils.valgrind_wrapper.timer_interface
.. py:module:: torch.utils.bundled_inputs
.. py:module:: torch.utils.checkpoint
.. py:module:: torch.utils.collect_env
.. py:module:: torch.utils.cpp_backtrace
.. py:module:: torch.utils.cpp_extension
.. py:module:: torch.utils.data.backward_compatibility
@ -80,10 +164,8 @@ for tracking purposes -->
.. py:module:: torch.utils.data.sampler
.. py:module:: torch.utils.dlpack
.. py:module:: torch.utils.file_baton
.. py:module:: torch.utils.flop_counter
.. py:module:: torch.utils.hipify.constants
.. py:module:: torch.utils.hipify.cuda_to_hip_mappings
.. py:module:: torch.utils.hipify.hipify_python
.. py:module:: torch.utils.hipify.version
.. py:module:: torch.utils.hooks
.. py:module:: torch.utils.jit.log_extract

View File

@ -260,7 +260,6 @@ select = [
"TRY401", # verbose-log-message
"UP",
"YTT",
"S101",
]
[tool.ruff.lint.pyupgrade]
@ -340,39 +339,6 @@ keep-runtime-typing = true
"tools/linter/**" = [
"LOG015" # please fix
]
"benchmarks/**" = [
"S101"
]
"test/**" = [
"S101"
]
"torchgen/**" = [
"S101"
]
"torch/**" = [
"S101"
]
"tools/**" = [
"S101"
]
"setup.py" = [
"S101"
]
"functorch/**" = [
"S101"
]
"docs/**" = [
"S101"
]
"android/**" = [
"S101"
]
".github/**" = [
"S101"
]
".ci/**" = [
"S101"
]
[tool.codespell]
ignore-words = "tools/linter/dictionary.txt"

View File

@ -10,7 +10,7 @@ tp2_dir="$top_dir/third_party"
pip install ninja
# Install onnx
pip install -e "$tp2_dir/onnx"
pip install --no-use-pep517 -e "$tp2_dir/onnx"
# Install caffe2 and pytorch
pip install -r "$top_dir/caffe2/requirements.txt"

View File

@ -17,11 +17,8 @@ set(AOTI_ABI_CHECK_TEST_SRCS
${AOTI_ABI_CHECK_TEST_ROOT}/test_headeronlyarrayref.cpp
${AOTI_ABI_CHECK_TEST_ROOT}/test_macros.cpp
${AOTI_ABI_CHECK_TEST_ROOT}/test_math.cpp
${AOTI_ABI_CHECK_TEST_ROOT}/test_metaprogramming.cpp
${AOTI_ABI_CHECK_TEST_ROOT}/test_rand.cpp
${AOTI_ABI_CHECK_TEST_ROOT}/test_scalartype.cpp
${AOTI_ABI_CHECK_TEST_ROOT}/test_typelist.cpp
${AOTI_ABI_CHECK_TEST_ROOT}/test_typetraits.cpp
${AOTI_ABI_CHECK_TEST_ROOT}/test_vec.cpp
${AOTI_ABI_CHECK_TEST_ROOT}/test_vec_half.cpp
)

View File

@ -1,6 +1,5 @@
#include <torch/csrc/inductor/aoti_torch/c/shim.h>
#include <torch/csrc/stable/accelerator.h>
#include <torch/csrc/stable/device.h>
#include <torch/csrc/stable/library.h>
#include <torch/csrc/stable/tensor.h>
#include <torch/csrc/stable/ops.h>
@ -529,149 +528,6 @@ STABLE_TORCH_LIBRARY_IMPL(libtorch_agnostic, CompositeExplicitAutograd, m) {
m.impl("make_tensor_clones_and_call_foreach", &boxed_make_tensor_clones_and_call_foreach);
}
// Test functions for torch::stable::Tensor device method
torch::stable::Device test_tensor_device(torch::stable::Tensor tensor) {
return tensor.device();
}
void boxed_test_tensor_device(
StableIValue* stack,
uint64_t num_args,
uint64_t num_outputs) {
torch::stable::Device res = test_tensor_device(
torch::stable::detail::to<torch::stable::Tensor>(stack[0]));
stack[0] = torch::stable::detail::from(res);
}
// Test functions for torch::stable::Device
torch::stable::Device test_device_constructor(
bool is_cuda,
torch::stable::DeviceIndex index,
bool use_str) {
using torch::stable::Device;
using torch::stable::DeviceType;
if (use_str) {
std::string device_str;
if (is_cuda) {
device_str = "cuda:" + std::to_string(index);
} else {
device_str = "cpu";
}
return Device(device_str);
} else {
if (is_cuda) {
return Device(DeviceType::CUDA, index);
} else {
return Device(DeviceType::CPU);
}
}
}
void boxed_test_device_constructor(
StableIValue* stack,
uint64_t num_args,
uint64_t num_outputs) {
torch::stable::Device res = test_device_constructor(
torch::stable::detail::to<bool>(stack[0]),
torch::stable::detail::to<torch::stable::DeviceIndex>(stack[1]),
torch::stable::detail::to<bool>(stack[2]));
stack[0] = torch::stable::detail::from(res);
}
bool test_device_equality(torch::stable::Device d1, torch::stable::Device d2) {
return d1 == d2;
}
void boxed_test_device_equality(
StableIValue* stack,
uint64_t num_args,
uint64_t num_outputs) {
bool res = test_device_equality(
torch::stable::detail::to<torch::stable::Device>(stack[0]),
torch::stable::detail::to<torch::stable::Device>(stack[1]));
stack[0] = torch::stable::detail::from(res);
}
torch::stable::Device test_device_set_index(
torch::stable::Device device,
torch::stable::DeviceIndex index) {
device.set_index(index);
return device;
}
void boxed_test_device_set_index(
StableIValue* stack,
uint64_t num_args,
uint64_t num_outputs) {
torch::stable::Device res = test_device_set_index(
torch::stable::detail::to<torch::stable::Device>(stack[0]),
torch::stable::detail::to<torch::stable::DeviceIndex>(stack[1]));
stack[0] = torch::stable::detail::from(res);
}
torch::stable::DeviceIndex test_device_index(torch::stable::Device device) {
return device.index();
}
void boxed_test_device_index(
StableIValue* stack,
uint64_t num_args,
uint64_t num_outputs) {
torch::stable::DeviceIndex res = test_device_index(
torch::stable::detail::to<torch::stable::Device>(stack[0]));
stack[0] = torch::stable::detail::from(res);
}
bool test_device_is_cuda(torch::stable::Device device) {
return device.is_cuda();
}
void boxed_test_device_is_cuda(
StableIValue* stack,
uint64_t num_args,
uint64_t num_outputs) {
bool res = test_device_is_cuda(
torch::stable::detail::to<torch::stable::Device>(stack[0]));
stack[0] = torch::stable::detail::from(res);
}
bool test_device_is_cpu(torch::stable::Device device) {
return device.is_cpu();
}
void boxed_test_device_is_cpu(
StableIValue* stack,
uint64_t num_args,
uint64_t num_outputs) {
bool res = test_device_is_cpu(
torch::stable::detail::to<torch::stable::Device>(stack[0]));
stack[0] = torch::stable::detail::from(res);
}
STABLE_TORCH_LIBRARY_FRAGMENT(libtorch_agnostic, m) {
m.def("test_tensor_device(Tensor t) -> Device");
m.def(
"test_device_constructor(bool is_cuda, DeviceIndex index, bool use_str) -> Device");
m.def("test_device_equality(Device d1, Device d2) -> bool");
m.def("test_device_set_index(Device device, DeviceIndex index) -> Device");
m.def("test_device_index(Device device) -> DeviceIndex");
m.def("test_device_is_cuda(Device device) -> bool");
m.def("test_device_is_cpu(Device device) -> bool");
}
STABLE_TORCH_LIBRARY_IMPL(libtorch_agnostic, CompositeExplicitAutograd, m) {
m.impl("test_tensor_device", &boxed_test_tensor_device);
m.impl("test_device_constructor", &boxed_test_device_constructor);
m.impl("test_device_equality", &boxed_test_device_equality);
m.impl("test_device_set_index", &boxed_test_device_set_index);
m.impl("test_device_index", &boxed_test_device_index);
m.impl("test_device_is_cuda", &boxed_test_device_is_cuda);
m.impl("test_device_is_cpu", &boxed_test_device_is_cpu);
}
// Test functions for torch::stable::accelerator APIs
#ifdef LAE_USE_CUDA
@ -761,66 +617,3 @@ STABLE_TORCH_LIBRARY_IMPL(libtorch_agnostic, CompositeExplicitAutograd, m) {
}
#endif // LAE_USE_CUDA
Tensor test_parallel_for(int64_t size, int64_t grain_size) {
AtenTensorHandle tensor_handle;
int64_t stride = 1;
aoti_torch_empty_strided(
1,
&size,
&stride,
aoti_torch_dtype_int64(),
aoti_torch_device_type_cpu(),
0,
&tensor_handle);
Tensor tensor(tensor_handle);
int64_t* data_ptr = reinterpret_cast<int64_t*>(tensor.data_ptr());
torch::stable::zero_(tensor);
// Use parallel_for to fill each element with its index
// If using a parallel path, the thread id is encoded in the upper 32 bits
torch::stable::parallel_for(
0, size, grain_size, [data_ptr](int64_t begin, int64_t end) {
for (auto i = begin; i < end; i++) {
STD_TORCH_CHECK(i <= UINT32_MAX);
uint32_t thread_id;
torch_get_thread_idx(&thread_id);
data_ptr[i] = i | (static_cast<int64_t>(thread_id) << 32);
}
});
return tensor;
}
void boxed_test_parallel_for(
StableIValue* stack,
uint64_t num_args,
uint64_t num_outputs) {
Tensor res = test_parallel_for(to<int64_t>(stack[0]), to<int64_t>(stack[1]));
stack[0] = from(res);
}
uint32_t test_get_num_threads() {
return torch::stable::get_num_threads();
}
void boxed_test_get_num_threads(
StableIValue* stack,
uint64_t num_args,
uint64_t num_outputs) {
uint32_t res = test_get_num_threads();
stack[0] = from(res);
}
STABLE_TORCH_LIBRARY_FRAGMENT(libtorch_agnostic, m) {
m.def("test_parallel_for(int size, int grain_size) -> Tensor");
m.def("test_get_num_threads() -> int");
}
STABLE_TORCH_LIBRARY_IMPL(libtorch_agnostic, CompositeExplicitAutograd, m) {
m.impl("test_parallel_for", &boxed_test_parallel_for);
m.impl("test_get_num_threads", &boxed_test_get_num_threads);
}

View File

@ -215,18 +215,6 @@ def test_default_constructor(defined) -> bool:
return torch.ops.libtorch_agnostic.test_default_constructor.default(defined)
def test_tensor_device(t):
"""
Tests Tensor device() method.
Args:
t: Tensor - tensor to get device from
Returns: Device - device of the tensor
"""
return torch.ops.libtorch_agnostic.test_tensor_device.default(t)
def my_pad(t) -> Tensor:
"""
Pads the input tensor with hardcoded padding parameters.
@ -387,103 +375,3 @@ def make_tensor_clones_and_call_foreach(t1, t2) -> list[Tensor]:
return torch.ops.libtorch_agnostic.make_tensor_clones_and_call_foreach.default(
t1, t2
)
def test_device_constructor(is_cuda, index, use_str):
"""
Tests creating a Device from DeviceType and index, or from a string.
Args:
is_cuda: bool - if True, creates CUDA device; if False, creates CPU device
index: int - device index
use_str: bool - if True, constructs from string; if False, constructs from DeviceType
Returns: Device - A device with the specified type and index
"""
return torch.ops.libtorch_agnostic.test_device_constructor.default(
is_cuda, index, use_str
)
def test_device_equality(d1, d2) -> bool:
"""
Tests Device equality operator.
Args:
d1: Device - first device
d2: Device - second device
Returns: bool - True if devices are equal
"""
return torch.ops.libtorch_agnostic.test_device_equality.default(d1, d2)
def test_device_set_index(device, index):
"""
Tests Device set_index() method.
Args:
device: Device - device to modify
index: int - new device index
Returns: Device - device with updated index
"""
return torch.ops.libtorch_agnostic.test_device_set_index.default(device, index)
def test_device_index(device) -> int:
"""
Tests Device index() method.
Args:
device: Device - device to query
Returns: int - device index
"""
return torch.ops.libtorch_agnostic.test_device_index.default(device)
def test_device_is_cuda(device) -> bool:
"""
Tests Device is_cuda() method.
Args:
device: Device - device to check
Returns: bool - True if device is CUDA
"""
return torch.ops.libtorch_agnostic.test_device_is_cuda.default(device)
def test_device_is_cpu(device) -> bool:
"""
Tests Device is_cpu() method.
Args:
device: Device - device to check
Returns: bool - True if device is CPU
"""
return torch.ops.libtorch_agnostic.test_device_is_cpu.default(device)
def test_parallel_for(size, grain_size) -> Tensor:
"""
Tests the parallel_for functionality by using it to fill a tensor with indices.
Args:
size: int - size of the tensor to create
grain_size: int - grain size for parallel_for
Returns: Tensor - a 1D int64 tensor where each element contains its index
(if multiple threads are used the threadid will be encoded in the upper 32 bits)
"""
return torch.ops.libtorch_agnostic.test_parallel_for.default(size, grain_size)
def test_get_num_threads() -> int:
"""
Tests the get_num_threads functionality by returning the number of threads
for the parallel backend.
Returns: int - the number of threads for the parallel backend
"""
return torch.ops.libtorch_agnostic.test_get_num_threads.default()

View File

@ -418,113 +418,6 @@ if not IS_WINDOWS:
self.assertEqual(result[0], t1 * t1)
self.assertEqual(result[1], t2 * t2)
@onlyCUDA
def test_device(self, device):
import libtorch_agnostic
cuda_device = libtorch_agnostic.ops.test_device_constructor(
is_cuda=True, index=1, use_str=False
)
self.assertEqual(cuda_device, torch.device("cuda:1"))
cuda_device = libtorch_agnostic.ops.test_device_constructor(
is_cuda=True, index=1, use_str=True
)
self.assertEqual(cuda_device, torch.device("cuda:1"))
self.assertEqual(libtorch_agnostic.ops.test_device_index(cuda_device), 1)
self.assertTrue(
libtorch_agnostic.ops.test_device_equality(
cuda_device, torch.device("cuda:1")
)
)
self.assertFalse(
libtorch_agnostic.ops.test_device_equality(
cuda_device, torch.device("cuda:0")
)
)
self.assertFalse(libtorch_agnostic.ops.test_device_is_cpu(cuda_device))
self.assertTrue(libtorch_agnostic.ops.test_device_is_cuda(cuda_device))
cuda_0_device = libtorch_agnostic.ops.test_device_set_index(cuda_device, 0)
self.assertEqual(cuda_0_device, torch.device("cuda:0"))
cpu_device = libtorch_agnostic.ops.test_device_constructor(False, 0, False)
self.assertEqual(cpu_device, torch.device("cpu"))
self.assertTrue(
libtorch_agnostic.ops.test_device_equality(
cpu_device, torch.device("cpu")
)
)
self.assertTrue(libtorch_agnostic.ops.test_device_is_cpu(cpu_device))
self.assertFalse(libtorch_agnostic.ops.test_device_is_cuda(cpu_device))
self.assertFalse(
libtorch_agnostic.ops.test_device_equality(cpu_device, cuda_device)
)
with self.assertRaisesRegex(
RuntimeError, "Device index 129 is out of range for int8_t"
):
libtorch_agnostic.ops.test_device_constructor(
is_cuda=True, index=129, use_str=False
)
with self.assertRaisesRegex(
RuntimeError, "Device index 129 is out of range for int8_t"
):
libtorch_agnostic.ops.test_device_set_index(cuda_device, 129)
@onlyCUDA
@deviceCountAtLeast(2)
def test_tensor_device(self, device):
import libtorch_agnostic
t = torch.randn(2, 3)
self.assertEqual(libtorch_agnostic.ops.test_tensor_device(t), t.device)
t_cuda = torch.randn(2, 3, device="cuda")
self.assertEqual(
libtorch_agnostic.ops.test_tensor_device(t_cuda), t_cuda.device
)
t_cuda_1 = torch.randn(2, 3, device="cuda:1")
self.assertEqual(
libtorch_agnostic.ops.test_tensor_device(t_cuda_1), t_cuda_1.device
)
@onlyCPU
# TODO: Debug this:
# Dynamo failed to run FX node with fake tensors:
# call_function libtorch_agnostic.test_parallel_for.default(*(100, 10), **{}):
# got RuntimeError('libtorch_agnostic::test_parallel_for() expected at most
# 2 argument(s) but received 3 argument(s).
# Declaration: libtorch_agnostic::test_parallel_for(int size, int grain_size) -> Tensor')
@xfailIfTorchDynamo
def test_parallel_for(self, device):
import libtorch_agnostic
num_threads = torch.get_num_threads()
size = 100
grain_size = 10
expected_num_threads_used = min(
(size + grain_size - 1) // grain_size, num_threads
)
result = libtorch_agnostic.ops.test_parallel_for(size, grain_size)
result_thread_ids = torch.unique(torch.bitwise_right_shift(result, 32))
result_values = torch.bitwise_and(result, 0xFFFFFFFF)
expected = torch.arange(size, dtype=torch.int64)
self.assertEqual(result_values, expected)
self.assertEqual(result_thread_ids, torch.arange(expected_num_threads_used))
@onlyCPU
def test_get_num_threads(self, device):
import libtorch_agnostic
num_threads = libtorch_agnostic.ops.test_get_num_threads()
expected_num_threads = torch.get_num_threads()
self.assertEqual(num_threads, expected_num_threads)
instantiate_device_type_tests(TestLibtorchAgnostic, globals(), except_for=None)
if __name__ == "__main__":

View File

@ -140,11 +140,6 @@ static void initDeviceStreamState(DeviceIndex device_index) {
static void initOpenRegStreamsOnce() {
c10::call_once(init_flag, initGlobalStreamState);
for (const auto i : c10::irange(num_devices)) {
c10::call_once(
device_flags[i], initDeviceStreamState, static_cast<DeviceIndex>(i));
}
if (current_streams) {
return;
}
@ -207,6 +202,8 @@ OpenRegStream getStreamFromPool(const int priority, DeviceIndex device_index) {
if (device_index == -1) {
device_index = current_device();
}
c10::call_once(
device_flags[device_index], initDeviceStreamState, device_index);
auto pri_idx =
std::clamp(priority, 0, max_compile_time_stream_priorities - 1);
const auto idx = get_idx(priority_counters[device_index][pri_idx]);

View File

@ -180,47 +180,6 @@ class TestTrackerFullyShard1DTrainingCore(FSDPTest):
del model
del optim
def _test_tracker_multihandler_hook(self):
"""Should run without KeyError."""
class TestModule(nn.Module):
def __init__(self, dim: int):
super().__init__()
self.norm1 = nn.RMSNorm(dim)
self.output1 = nn.Linear(dim, dim)
self.norm2 = nn.RMSNorm(dim)
self.output2 = nn.Linear(dim, dim)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.norm1(x)
x = self.output1(x)
x = self.norm2(x)
x = self.output2(x)
return x
gc.collect()
torch.manual_seed(42)
dev = torch.device(torch.accelerator.current_device_index())
with torch.device(dev):
model = TestModule(128)
mesh = init_device_mesh(dev.type, (self.world_size,))
fully_shard([model.norm1, model.output1], mesh=mesh)
fully_shard([model.norm2, model.output2], mesh=mesh)
fully_shard(model, mesh=mesh)
fmt = FSDPMemTracker(model)
with fmt:
inp = torch.randn(16, 128, device=dev)
y = model(inp)
loss = y.sum()
loss.backward()
del inp
del model
class TestTrackerFullyShard1DTrainingCompose(FSDPTest):
@property

View File

@ -1,44 +0,0 @@
# Owner(s): ["oncall: r2p"]
# This is a helper script for
# test_run.py::ElasticLaunchTest::test_virtual_local_rank. It prints out the
# generated inductor output for a simple function.
import os
from unittest.mock import patch
import torch
import torch.distributed as dist
from torch._inductor import codecache
@torch.compile
def myfn(x: torch.Tensor) -> torch.Tensor:
return x + x
dist.init_process_group(backend="nccl")
local_rank = int(os.environ.get("LOCAL_RANK", "cuda:0"))
torch.cuda.set_device(local_rank)
def print_output_code(original_fn):
def wrapper(msg, *args, **kwargs):
# Check if this is the "Output code:" message
if args and "Output code:" in msg:
print(args[0])
return wrapper
x = torch.rand(2, 2, device="cuda")
with patch.object(
codecache.output_code_log,
"debug",
side_effect=print_output_code(codecache.output_code_log.debug),
):
y = myfn(x)
dist.destroy_process_group()

View File

@ -16,7 +16,7 @@ import sys
import tempfile
import uuid
from contextlib import closing, redirect_stderr, redirect_stdout
from unittest import mock, skipIf
from unittest import mock
from unittest.mock import MagicMock, Mock, patch
import torch.distributed.run as launch
@ -28,7 +28,6 @@ from torch.distributed.elastic.utils.distributed import get_free_port
from torch.testing._internal.common_utils import (
run_tests,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TestCase,
)
@ -678,96 +677,6 @@ class ElasticLaunchTest(TestCase):
for i in range(nproc_per_node):
self.assertTrue(f"[rank{i}]: creating " in captured_out.getvalue())
@skip_but_pass_in_sandcastle_if(
TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
)
@skipIf(not TEST_CUDA, "requires CUDA")
def test_virtual_local_rank(self):
"""
Test that virtual-local-rank ensures consistent device IDs across ranks.
Without it, ranks may compile to different devices, leading to different code.
"""
run_id = str(uuid.uuid4().int)
nnodes = 1
nproc_per_node = 2
# Helper function to run and capture output
def run_test(use_virtual_local_rank):
args = [
f"--nnodes={nnodes}",
f"--nproc-per-node={nproc_per_node}",
f"--rdzv-id={run_id}",
"--monitor-interval=1",
"--start-method=spawn",
"--redirect=3",
"--tee=3",
]
if use_virtual_local_rank:
args.append("--virtual-local-rank")
args.append(path("script_deviceid.py"))
captured_out = io.StringIO()
captured_err = io.StringIO()
with redirect_stdout(captured_out), redirect_stderr(captured_err):
launch.main(args)
return captured_out.getvalue()
def split_ranks(output):
default0 = []
default1 = []
for line in output.splitlines():
if "cuda:" not in line:
continue
if line.startswith("[default0]:"):
default0.append(line[11:])
elif line.startswith("[default1]:"):
default1.append(line[11:])
return default0, default1
# First, run WITHOUT virtual-local-rank - outputs should differ
output = run_test(use_virtual_local_rank=False)
rank0, rank1 = split_ranks(output)
# Verify we actually captured compiled code from both ranks
self.assertGreater(
len(rank0), 0, "Expected to capture compiled code from rank 0"
)
self.assertGreater(
len(rank1), 0, "Expected to capture compiled code from rank 1"
)
# Without virtual-local-rank, the ranks should have DIFFERENT compiled code
# because they see different device IDs (cuda:0 vs cuda:1)
self.assertNotEqual(
rank0,
rank1,
"Expected different compiled code without --virtual-local-rank",
)
# Now run WITH virtual-local-rank - outputs should be identical
output = run_test(use_virtual_local_rank=True)
rank0, rank1 = split_ranks(output)
# Verify we actually captured compiled code from both ranks
self.assertGreater(
len(rank0),
0,
"Expected to capture compiled code from rank 0 with --virtual-local-rank",
)
self.assertGreater(
len(rank1),
0,
"Expected to capture compiled code from rank 1 with --virtual-local-rank",
)
# With virtual-local-rank, both ranks should have IDENTICAL compiled code
# because they both see cuda:0 during compilation
self.assertEqual(
rank0, rank1, "Expected identical compiled code with --virtual-local-rank"
)
if __name__ == "__main__":
run_tests()

View File

@ -1,7 +1,6 @@
# Owner(s): ["oncall: distributed"]
import contextlib
import unittest
import torch
import torch.distributed as dist
@ -24,15 +23,8 @@ from torch.testing._internal.common_utils import (
TestCase,
)
from torch.testing._internal.distributed.fake_pg import FakeStore
from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU
from torch.utils._debug_mode import (
_OpCall,
_RedistributeCall,
_TritonKernelCall,
DebugMode,
)
from torch.utils._debug_mode import _OpCall, _RedistributeCall, DebugMode
from torch.utils._python_dispatch import TorchDispatchMode
from torch.utils._triton import has_triton_package
@requires_cuda
@ -442,110 +434,6 @@ class TestDTensorDebugMode(TestCase):
][-1]
self.assertTrue("self.l2(self.l1(x))" in sum_op.fwd_stack_trace)
@unittest.skipIf(not HAS_GPU, "requires GPU")
@unittest.skipIf(not has_triton_package(), "requires triton")
def test_triton_kernel_logs(self):
import triton
from torch.testing._internal.triton_utils import add_kernel_autotuned
def call_triton(x, y):
output = torch.zeros_like(x)
n_elements = output.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),) # noqa: E731
add_kernel_autotuned[grid](x, y, output, n_elements)
return output
x = torch.randn(128, device=GPU_TYPE)
y = torch.randn(128, device=GPU_TYPE)
with DebugMode() as debug_mode:
torch.compile(call_triton)(x, y)
triton_calls = [
op for op in debug_mode.operators if isinstance(op, _TritonKernelCall)
]
self.assertGreater(len(triton_calls), 0)
self.assertIn("[triton]", triton_calls[0].render([]))
def test_check_hash_mismatches(self):
x = torch.randn(64, 64, device=GPU_TYPE)
x_different = torch.randn(64, 64, device=GPU_TYPE)
# Identical runs should have no mismatches
with DebugMode() as dm1, DebugMode.log_tensor_hashes():
x.sin().sum()
with DebugMode() as dm2, DebugMode.log_tensor_hashes():
x.sin().sum()
mismatches = DebugMode.check_hash_mismatches(dm1.logs, dm2.logs)
self.assertEqual(len(mismatches), 0)
# Different inputs should produce hash mismatches
with DebugMode() as dm3, DebugMode.log_tensor_hashes():
x_different.sin().sum()
# Check that mismatches are detected
mismatches = DebugMode.check_hash_mismatches(dm1.logs, dm3.logs)
self.assertEqual(len(mismatches), 2)
self.assertEqual(
[call["call"] for call in mismatches], ["aten::sin", "aten::sum"]
)
@unittest.skipIf(not HAS_GPU, "requires GPU")
@unittest.skipIf(not has_triton_package(), "requires triton")
def test_check_triton_hash_mismatches(self):
import triton
from torch.testing._internal.triton_utils import add_kernel_autotuned
def call_triton(x, y):
output = torch.zeros_like(x)
n_elements = output.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),) # noqa: E731
add_kernel_autotuned[grid](x, y, output, n_elements)
return output
a = torch.randn(128, device=GPU_TYPE)
b = torch.randn(128, device=GPU_TYPE)
c = torch.randn(128, device=GPU_TYPE)
# Run with hash logging to verify triton kernels can be hashed
with DebugMode() as dm_t1, DebugMode.log_tensor_hashes(hash_inputs=True):
torch.compile(call_triton)(a, b)
# Different inputs should have different hashes in triton kernels
with DebugMode() as dm_t2, DebugMode.log_tensor_hashes(hash_inputs=True):
torch.compile(call_triton)(a, c)
# Compare triton kernel hashes
mismatches = DebugMode.check_hash_mismatches(
dm_t1.logs, dm_t2.logs, compare_inputs=True
)
triton_mismatches = [m for m in mismatches if m["call_type"] == "triton kernel"]
self.assertGreater(len(triton_mismatches), 0)
# check both input & output hash mismatches are detected
self.assertGreater(len([m for m in triton_mismatches if m["is_input_hash"]]), 0)
self.assertGreater(
len([m for m in triton_mismatches if not m["is_input_hash"]]), 0
)
def test_check_structure_mismatches(self):
x = torch.randn(32, 32, device=self.device_type)
with DebugMode() as dm1, DebugMode.log_tensor_hashes():
x.sin()
with DebugMode() as dm2, DebugMode.log_tensor_hashes():
x.cos()
with DebugMode() as dm3, DebugMode.log_tensor_hashes():
x.sin().cos()
with self.assertRaisesRegex(ValueError, "Operators don't match"):
DebugMode.check_hash_mismatches(dm1.logs, dm2.logs)
with self.assertRaisesRegex(ValueError, "Log lengths don't match"):
DebugMode.check_hash_mismatches(dm1.logs, dm3.logs)
def test_pretty_print_dtensor_make_fx(self):
mesh = DeviceMesh(self.device_type, list(range(self.world_size)))

View File

@ -535,7 +535,7 @@ class DTensorExportTest(TestCase):
self.assertEqual(fn(z), gm(z)[0])
def test_dtensor_data_dependent_index_and_slice(self):
def test_dtensor_data_dependent_index(self):
device_mesh = init_device_mesh(self.device_type, mesh_shape=(self.world_size,))
class Foo(torch.nn.Module):
@ -548,35 +548,6 @@ class DTensorExportTest(TestCase):
y_dt = distribute_tensor(y, device_mesh, placements=[Replicate()])
_dynamo_graph_capture_for_export(Foo())(x_dt, y_dt)
class Bar(torch.nn.Module):
def forward(self, x):
val = torch.clamp(x.max(), min=1).item()
torch._check(val >= 1)
return x[:val]
x = torch.randint(1000, (4, 64, 16))
x_dt = distribute_tensor(x, device_mesh, placements=[Replicate()])
gm = _dynamo_graph_capture_for_export(Bar())(x_dt)
self.assertExpectedInline(
"""\
graph():
%l_flat_args_0_ : [num_users=2] = placeholder[target=arg_0]
%max_1 : [num_users=1] = call_method[target=max](args = (%l_flat_args_0_,), kwargs = {})
%clamp : [num_users=1] = call_function[target=torch.clamp](args = (%max_1,), kwargs = {min: 1})
%item : [num_users=2] = call_method[target=item](args = (%clamp,), kwargs = {})
%ge_1 : [num_users=1] = call_function[target=operator.ge](args = (%item, 1), kwargs = {})
%_assert_scalar_default : [num_users=0] = call_function[target=torch.ops.aten._assert_scalar.default](args = (%ge_1, Runtime assertion failed for expression u0 >= 1 on node 'ge_1'), kwargs = {})
%res : [num_users=2] = call_function[target=operator.getitem](args = (%l_flat_args_0_, slice(None, item, None)), kwargs = {})
%getattr_1 : [num_users=1] = call_function[target=builtins.getattr](args = (%res, _local_tensor), kwargs = {})
%sym_size_int : [num_users=2] = call_function[target=torch.ops.aten.sym_size.int](args = (%getattr_1, 0), kwargs = {})
%ge_2 : [num_users=1] = call_function[target=operator.ge](args = (%sym_size_int, 0), kwargs = {})
%_assert_scalar_default_1 : [num_users=0] = call_function[target=torch.ops.aten._assert_scalar.default](args = (%ge_2, Runtime assertion failed for expression u2 >= 0 on node 'ge_2'), kwargs = {})
%le : [num_users=1] = call_function[target=operator.le](args = (%sym_size_int, 4), kwargs = {})
%_assert_scalar_default_2 : [num_users=0] = call_function[target=torch.ops.aten._assert_scalar.default](args = (%le, Runtime assertion failed for expression u2 <= 4 on node 'le'), kwargs = {})
return (res,)""", # noqa: B950
str(gm.graph).strip(),
)
instantiate_parametrized_tests(DTensorExportTest)

View File

@ -999,25 +999,13 @@ class TestExplicitRedistribute(LocalTensorTestBase):
dx = distribute_tensor(x, device_mesh, [Shard(0)])
dA = distribute_tensor(A, device_mesh, [Replicate()])
with ExplicitRedistributionContext(strict=True):
with ExplicitRedistributionContext():
dY = torch.matmul(dx, dA_repl)
loss = dY.sum()
# we now see the error during backwards
with self.assertRaisesRegex(RuntimeError, "Implicit redistribution"):
loss.backward(retain_graph=True)
with ExplicitRedistributionContext(strict=False):
# but since it's a 'free' redistribute, we can still do it under non-strict mode
loss.backward(retain_graph=True)
with ExplicitRedistributionContext(enable=False):
# and we can disable
loss.backward(retain_graph=True)
# and re-enable
with self.assertRaisesRegex(RuntimeError, "Implicit redistribution"):
loss.backward(retain_graph=True)
loss.backward()
if __name__ == "__main__":

View File

@ -1062,307 +1062,6 @@ class TestComputeCommReorderingBucketing(TestComputeCommReorderingMultiProc):
self.assertTrue(same(out, correct))
def get_toy_model(device_type: str):
"""
Helper to construct a small multi-layer ToyModel
"""
class ToyBlock(torch.nn.Module):
def __init__(self):
super().__init__()
self.wq = torch.nn.Linear(4, 4)
self.wk = torch.nn.Linear(4, 4)
self.proj = torch.nn.Linear(4, 4)
def forward(self, x):
attn = self.wq(x) + self.wk(x)
return self.proj(torch.nn.functional.relu(attn))
class ToyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.layers = torch.nn.ModuleList([ToyBlock() for _ in range(2)])
self.norm = torch.nn.LayerNorm(4)
def forward(self, x):
for blk in self.layers:
x = blk(x)
return self.norm(x)
model = ToyModel().to(device_type)
return model
def apply_manual_reordering_and_get_graph(graph, module_bucket_plans, out_li) -> None:
gm = graph.owning_module
from torch._inductor.fx_passes.overlap_manual_scheduling import (
ManualOverlapScheduler,
)
for node in list(gm.graph.nodes):
if (
node.name == "all_gather_into_tensor"
or node.name == "all_gather_into_tensor_1"
or node.name == "wait_tensor"
or node.name == "wait_tensor_1"
):
node.meta["nn_module_stack"] = {"test": ["module_1", ""]}
if (
node.name == "all_gather_into_tensor_2"
or node.name == "all_gather_into_tensor_3"
or node.name == "wait_tensor_2"
or node.name == "wait_tensor_3"
):
node.meta["nn_module_stack"] = {"test": ["module_2", ""]}
overlapped_gm = ManualOverlapScheduler(
gm, module_bucket_plans, insert_overlap_deps=False
).run()
overlapped_gm.graph.lint()
out_li.append(overlapped_gm.graph)
def run_and_get_manual_aten_graph(fn, module_bucket_plans, *inputs):
li = []
apply = functools.partial(
apply_manual_reordering_and_get_graph,
module_bucket_plans=module_bucket_plans,
out_li=li,
)
with torch._inductor.config.patch(post_grad_custom_post_pass=apply):
out = fn(*inputs)
return out, li[0]
class TestManualOverlapBucketing(TestComputeCommReorderingMultiProc):
"""
Tests for manual overlap scheduling and subgraph utilities.
"""
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
def test_make_graph_view_and_get_subgraph_by_path(self):
from torch._inductor.fx_passes.graph_view import (
get_subgraph_by_path,
make_graph_view,
)
model = get_toy_model(device_type)
gm = torch.fx.symbolic_trace(model)
graph_view = make_graph_view(gm.graph)
# Fetch subgraph for first transformer layer
sub_nodes = get_subgraph_by_path(graph_view, "layers.0.wq")
self.assertEqual([n.name for n in sub_nodes], ["layers_0_wq"])
# Fetch multiple paths at once
multi_nodes = get_subgraph_by_path(graph_view, ["layers.0.wq", "layers.0.proj"])
self.assertEqual(
[n.name for n in multi_nodes], ["layers_0_wq", "layers_0_proj"]
)
# Fetch non existing paths
non_exist_nodes = get_subgraph_by_path(graph_view, "nonexistent.module.path")
self.assertEqual(non_exist_nodes, [])
# Fetch mixed of existing and non existing paths
mixed_nodes = get_subgraph_by_path(
graph_view, ["layers.0.wq", "nonexistent.module.path"]
)
self.assertEqual([n.name for n in mixed_nodes], ["layers_0_wq"])
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
def test_manual_reordering_bucketing_pass_separate_buckets(
self,
):
def func(a, b, c, d, *, ranks):
# All 4 all-gathers are independent - COULD be bucketed together
ag1 = _functional_collectives.all_gather_tensor(a, 0, ranks)
ag2 = _functional_collectives.all_gather_tensor(b, 0, ranks)
ag3 = _functional_collectives.all_gather_tensor(c[:4], 0, ranks)
ag4 = _functional_collectives.all_gather_tensor(d[:4], 0, ranks)
# First compute - can hide ag1 and ag2
e = a * 5 # Use a to avoid fusion
mm1 = torch.matmul(e, e.T)
# Force ag1/ag2 to complete before mm2 (but ag3/ag4 can still be deferred)
# Use first 8x8 elements to match mm1's shape
intermediate = ag1[:8, :8] + ag2[:8, :8]
# Second compute - depends on ag1/ag2 through intermediate, can hide ag3/ag4
mm2 = torch.matmul(mm1 + intermediate, c[:8])
# Use all results
result = (
ag1.sum() * 1.1
+ ag2.sum() * 1.2
+ ag3.sum() * 1.3
+ ag4.sum() * 1.4
+ mm1.sum()
+ mm2.sum()
)
return result
with _dynamo_dist_per_rank_init(
self.rank,
self.world_size,
self.backend(device_type),
fake_pg=not at_least_x_gpu(2),
):
a = torch.ones(8, 8, dtype=torch.float, device=device_type)
b = torch.ones(8, 8, dtype=torch.float, device=device_type) * 2
c = torch.ones(8, 8, dtype=torch.float, device=device_type) * 3
d = torch.ones(8, 8, dtype=torch.float, device=device_type) * 4
ranks = list(range(self.world_size))
func_c = functools.partial(func, ranks=ranks)
compiled = torch.compile(func_c)
out, aten_graph = run_and_get_manual_aten_graph(
compiled, ["module_1", "module_2"], a, b, c, d
)
(
FileCheck()
.check("_pre_bucket_all_gather")
.check("all_gather_into_tensor_out")
.check("_pre_bucket_all_gather_1")
.check("all_gather_into_tensor_out_1")
.check("wait_tensor_4")
.check("wait_tensor_5")
.run(str(aten_graph))
)
correct = func(a, b, c, d, ranks=ranks)
self.assertTrue(same(out, correct))
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
def test_bucketing_reordering_pass_no_bucket(
self,
):
def func(a, b, c, d, *, ranks):
# All 4 all-gathers are independent - COULD be bucketed together
ag1 = _functional_collectives.all_gather_tensor(a, 0, ranks)
ag2 = _functional_collectives.all_gather_tensor(b, 0, ranks)
ag3 = _functional_collectives.all_gather_tensor(c[:4], 0, ranks)
ag4 = _functional_collectives.all_gather_tensor(d[:4], 0, ranks)
# First compute - can hide ag1 and ag2
e = a * 5 # Use a to avoid fusion
mm1 = torch.matmul(e, e.T)
# Force ag1/ag2 to complete before mm2 (but ag3/ag4 can still be deferred)
# Use first 8x8 elements to match mm1's shape
intermediate = ag1[:8, :8] + ag2[:8, :8]
# Second compute - depends on ag1/ag2 through intermediate, can hide ag3/ag4
mm2 = torch.matmul(mm1 + intermediate, c[:8])
# Use all results
result = (
ag1.sum() * 1.1
+ ag2.sum() * 1.2
+ ag3.sum() * 1.3
+ ag4.sum() * 1.4
+ mm1.sum()
+ mm2.sum()
)
return result
with _dynamo_dist_per_rank_init(
self.rank,
self.world_size,
self.backend(device_type),
fake_pg=not at_least_x_gpu(2),
):
a = torch.ones(8, 8, dtype=torch.float, device=device_type)
b = torch.ones(8, 8, dtype=torch.float, device=device_type) * 2
c = torch.ones(8, 8, dtype=torch.float, device=device_type) * 3
d = torch.ones(8, 8, dtype=torch.float, device=device_type) * 4
ranks = list(range(self.world_size))
func_c = functools.partial(func, ranks=ranks)
compiled = torch.compile(func_c)
out, aten_graph = run_and_get_manual_aten_graph(compiled, [], a, b, c, d)
(
FileCheck()
.check("all_gather_into_tensor")
.check("all_gather_into_tensor_1")
.check("all_gather_into_tensor_2")
.check("all_gather_into_tensor_3")
.check("wait_tensor")
.check("wait_tensor_1")
.check("wait_tensor_2")
.check("wait_tensor_3")
.run(str(aten_graph))
)
correct = func(a, b, c, d, ranks=ranks)
self.assertTrue(same(out, correct))
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
def test_bucketing_reordering_pass_single_bucket(
self,
):
def func(a, b, c, d, *, ranks):
# All 4 all-gathers are independent - COULD be bucketed together
ag1 = _functional_collectives.all_gather_tensor(a, 0, ranks)
ag2 = _functional_collectives.all_gather_tensor(b, 0, ranks)
ag3 = _functional_collectives.all_gather_tensor(c[:4], 0, ranks)
ag4 = _functional_collectives.all_gather_tensor(d[:4], 0, ranks)
# First compute - can hide ag1 and ag2
e = a * 5 # Use a to avoid fusion
mm1 = torch.matmul(e, e.T)
# Force ag1/ag2 to complete before mm2 (but ag3/ag4 can still be deferred)
# Use first 8x8 elements to match mm1's shape
intermediate = ag1[:8, :8] + ag2[:8, :8]
# Second compute - depends on ag1/ag2 through intermediate, can hide ag3/ag4
mm2 = torch.matmul(mm1 + intermediate, c[:8])
# Use all results
result = (
ag1.sum() * 1.1
+ ag2.sum() * 1.2
+ ag3.sum() * 1.3
+ ag4.sum() * 1.4
+ mm1.sum()
+ mm2.sum()
)
return result
with _dynamo_dist_per_rank_init(
self.rank,
self.world_size,
self.backend(device_type),
fake_pg=not at_least_x_gpu(2),
):
a = torch.ones(8, 8, dtype=torch.float, device=device_type)
b = torch.ones(8, 8, dtype=torch.float, device=device_type) * 2
c = torch.ones(8, 8, dtype=torch.float, device=device_type) * 3
d = torch.ones(8, 8, dtype=torch.float, device=device_type) * 4
ranks = list(range(self.world_size))
func_c = functools.partial(func, ranks=ranks)
compiled = torch.compile(func_c)
out, aten_graph = run_and_get_manual_aten_graph(
compiled, [["module_1", "module_2"]], a, b, c, d
)
(
FileCheck()
.check("_pre_bucket_all_gather")
.check("all_gather_into_tensor_out")
.check("wait_tensor_4")
.run(str(aten_graph))
)
correct = func(a, b, c, d, ranks=ranks)
self.assertTrue(same(out, correct))
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests

View File

@ -1341,11 +1341,13 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
assert counter.op_count == 3 # It generates 2 getattr to unpack the array
assert same(out, correct)
# This doesn't work in all cases, and now we properly loudly error.
# See: https://github.com/pytorch/pytorch/issues/151240
# When differentiable funcols are implemented can revert.
@unittest.expectedFailure
def test_backwards(self):
"""
It's probably not that common to need backwards support for collectives.
However, I wanted to at least see if it was possible to support it as a design goal.
"""
def func(inp):
ar = _functional_collectives.all_reduce(inp, "sum", "0")
return ar

View File

@ -950,7 +950,7 @@ SeqNr|OrigAten|SrcFn|FwdSrcFn
2|aten.threshold_backward.default||relu
1|aten.native_batch_norm_backward.default||batch_norm
0|aten.convolution_backward.default||conv2d
11|aten.add.Tensor||
11|aten.add.Tensor||l1_loss
"""
),
)

View File

@ -2363,34 +2363,6 @@ class FunctionTests(torch._dynamo.test_case.TestCase):
self.assertTrue(same(output, expected))
assert cnt.frame_count == 1
@unittest.skipIf(sys.version_info < (3, 13), "math.fma introduced in python 3.13")
def test_math_fma(self):
def fma_func(a, b, c):
return math.fma(a, b, c)
# Test with scalar constants (constant folding path)
cnt = torch._dynamo.testing.CompileCounter()
cfma_scalars = torch._dynamo.optimize_assert(cnt)(fma_func)
assert cnt.frame_count == 0
expected = fma_func(2.0, 3.0, 4.0)
output = cfma_scalars(2.0, 3.0, 4.0)
self.assertEqual(output, expected)
assert cnt.frame_count == 0
# Test with tensors (Inductor path)
cnt2 = torch._dynamo.testing.CompileCounter()
cfma_tensors = torch._dynamo.optimize_assert(cnt2)(fma_func)
assert cnt2.frame_count == 0
x = torch.tensor(2.0)
y = torch.tensor(3.0)
z = torch.tensor(4.0)
expected_tensors = x * y + z
output_tensors = cfma_tensors(x, y, z)
torch.testing.assert_close(output_tensors, expected_tensors)
assert cnt2.frame_count == 1
@make_test
def test_numpy_meshgrid(x, y):
r1, r2 = np.meshgrid(x.numpy(), y.numpy())

View File

@ -5788,20 +5788,6 @@ utils_device.CURRENT_DEVICE == None""".split("\n"):
self.assertTrue(torch.allclose(dynamo_output, output))
def test_repr(self):
class Config:
def __repr__(self):
return "Config()"
def forward(x, config):
return x * len(repr(config))
config = Config()
x = torch.randn(2, 2)
compiled = torch.compile(forward, fullgraph=True)
compiled(x, config)
def test_nn_functional_reduction(self):
def fn(loss, reduction):
reduction_enum = F._Reduction.get_enum(reduction)

View File

@ -335,59 +335,6 @@ class <lambda>(torch.nn.Module):
""",
)
@requires_cuda
@requires_multigpu()
def test_new_event_api(self) -> None:
from torch._dynamo.graph_bytecode_inputs import get_external_object_by_index
from torch._dynamo.variables.streams import new_event
def event_generation_backend(gm, *args, **kwargs): # type: ignore[no-untyped-def]
e0_ind = new_event()
with torch.Stream(device="cuda:1"):
get_external_object_by_index(e0_ind).record()
e1_ind = new_event()
self.assertNotEqual(e0_ind, e1_ind)
self.assertNotEqual(
get_external_object_by_index(e0_ind),
get_external_object_by_index(e1_ind),
)
with gm.graph.inserting_after(next(iter(gm.graph.nodes))):
gm.graph.call_function(
get_external_object_by_index, args=(1,), kwargs={}
)
return gm
@torch.compile(backend=event_generation_backend)
def fn(x):
return x + 1
fn(torch.ones(2, 2, device="cuda:0"))
@requires_cuda
def test_new_stream_api(self) -> None:
from torch._dynamo.graph_bytecode_inputs import get_external_object_by_index
from torch._dynamo.variables.streams import new_stream
def stream_generation_backend(gm, *args, **kwargs): # type: ignore[no-untyped-def]
s0_ind = new_stream()
s1_ind = new_stream()
self.assertNotEqual(s0_ind, s1_ind)
self.assertNotEqual(
get_external_object_by_index(s0_ind),
get_external_object_by_index(s1_ind),
)
with gm.graph.inserting_after(next(iter(gm.graph.nodes))):
gm.graph.call_function(
get_external_object_by_index, args=(1,), kwargs={}
)
return gm
@torch.compile(backend=stream_generation_backend)
def fn(x):
return x + 1
fn(torch.ones(2, 2, device="cuda:0"))
@requires_cuda
def test_stream_with_mutation(self):
def fn(x, y):
@ -576,23 +523,6 @@ class <lambda>(torch.nn.Module):
torch.accelerator.set_stream(original_stream)
reset_user_object_tracking()
@requires_cuda
def test_run_opcheck_wait_record_stream(self):
from torch._dynamo.variables.streams import wait_stream
from torch.library import opcheck
s0 = torch.Stream()
s1 = torch.Stream()
s2 = torch.Stream()
store_user_object_weakrefs(s0, s1, s2)
sample_inputs = [
(0, 1),
(2, 0),
]
for args in sample_inputs:
opcheck(wait_stream, args)
@requires_cuda
def test_inductor_lowering(self):
with patch("torch._inductor.config.implicit_fallbacks", False):

Some files were not shown because too many files have changed in this diff Show More