mirror of
https://github.com/pytorch/pytorch.git
synced 2025-11-02 14:34:54 +08:00
Compare commits
1 Commits
ffast_math
...
mlazos/mod
| Author | SHA1 | Date | |
|---|---|---|---|
| 3494fa6450 |
@ -373,13 +373,6 @@ case "$image" in
|
||||
CONDA_CMAKE=yes
|
||||
EXECUTORCH=yes
|
||||
;;
|
||||
pytorch-linux-jammy-py3.12-halide)
|
||||
CUDA_VERSION=12.4
|
||||
ANACONDA_PYTHON_VERSION=3.12
|
||||
GCC_VERSION=11
|
||||
CONDA_CMAKE=yes
|
||||
HALIDE=yes
|
||||
;;
|
||||
pytorch-linux-focal-linter)
|
||||
# TODO: Use 3.9 here because of this issue https://github.com/python/mypy/issues/13627.
|
||||
# We will need to update mypy version eventually, but that's for another day. The task
|
||||
@ -497,7 +490,6 @@ docker build \
|
||||
--build-arg "DOCS=${DOCS}" \
|
||||
--build-arg "INDUCTOR_BENCHMARKS=${INDUCTOR_BENCHMARKS}" \
|
||||
--build-arg "EXECUTORCH=${EXECUTORCH}" \
|
||||
--build-arg "HALIDE=${HALIDE}" \
|
||||
--build-arg "XPU_VERSION=${XPU_VERSION}" \
|
||||
--build-arg "ACL=${ACL:-}" \
|
||||
--build-arg "SKIP_SCCACHE_INSTALL=${SKIP_SCCACHE_INSTALL:-}" \
|
||||
|
||||
@ -1 +1 @@
|
||||
172574a6be5910a4609e4ed1bef2b6b8475ddb3d
|
||||
d4b3e5cc607e97afdba79dc90f8ef968142f347c
|
||||
|
||||
@ -1 +0,0 @@
|
||||
340136fec6d3ebc73e7a19eba1663e9b0ba8ab2d
|
||||
@ -1 +1 @@
|
||||
aac14a3b93f11d781d1d5ebc5400b15ae8df5185
|
||||
b8c64f64c18d8cac598b3adb355c21e7439c21de
|
||||
|
||||
@ -37,9 +37,6 @@ install_conda_dependencies() {
|
||||
|
||||
install_pip_dependencies() {
|
||||
pushd executorch/.ci/docker
|
||||
# Install PyTorch CPU build beforehand to avoid installing the much bigger CUDA
|
||||
# binaries later, ExecuTorch only needs CPU
|
||||
pip_install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
|
||||
# Install all Python dependencies
|
||||
pip_install -r requirements-ci.txt
|
||||
popd
|
||||
@ -47,14 +44,13 @@ install_pip_dependencies() {
|
||||
|
||||
setup_executorch() {
|
||||
pushd executorch
|
||||
# Setup swiftshader and Vulkan SDK which are required to build the Vulkan delegate
|
||||
as_jenkins bash .ci/scripts/setup-vulkan-linux-deps.sh
|
||||
source .ci/scripts/utils.sh
|
||||
|
||||
export PYTHON_EXECUTABLE=python
|
||||
export EXECUTORCH_BUILD_PYBIND=ON
|
||||
export CMAKE_ARGS="-DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON"
|
||||
install_flatc_from_source
|
||||
pip_install .
|
||||
|
||||
as_jenkins .ci/scripts/setup-linux.sh cmake
|
||||
# Make sure that all the newly generate files are owned by Jenkins
|
||||
chown -R jenkins .
|
||||
popd
|
||||
}
|
||||
|
||||
|
||||
@ -1,46 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh"
|
||||
|
||||
COMMIT=$(get_pinned_commit halide)
|
||||
test -n "$COMMIT"
|
||||
|
||||
# activate conda to populate CONDA_PREFIX
|
||||
test -n "$ANACONDA_PYTHON_VERSION"
|
||||
eval "$(conda shell.bash hook)"
|
||||
conda activate py_$ANACONDA_PYTHON_VERSION
|
||||
|
||||
if [ -n "${UBUNTU_VERSION}" ];then
|
||||
apt update
|
||||
apt-get install -y lld liblld-15-dev libpng-dev libjpeg-dev libgl-dev \
|
||||
libopenblas-dev libeigen3-dev libatlas-base-dev libzstd-dev
|
||||
fi
|
||||
|
||||
conda_install numpy scipy imageio cmake ninja
|
||||
|
||||
git clone --depth 1 --branch release/16.x --recursive https://github.com/llvm/llvm-project.git
|
||||
cmake -DCMAKE_BUILD_TYPE=Release \
|
||||
-DLLVM_ENABLE_PROJECTS="clang" \
|
||||
-DLLVM_TARGETS_TO_BUILD="X86;NVPTX" \
|
||||
-DLLVM_ENABLE_TERMINFO=OFF -DLLVM_ENABLE_ASSERTIONS=ON \
|
||||
-DLLVM_ENABLE_EH=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_BUILD_32_BITS=OFF \
|
||||
-S llvm-project/llvm -B llvm-build -G Ninja
|
||||
cmake --build llvm-build
|
||||
cmake --install llvm-build --prefix llvm-install
|
||||
export LLVM_ROOT=`pwd`/llvm-install
|
||||
export LLVM_CONFIG=$LLVM_ROOT/bin/llvm-config
|
||||
|
||||
git clone https://github.com/halide/Halide.git
|
||||
pushd Halide
|
||||
git checkout ${COMMIT} && git submodule update --init --recursive
|
||||
pip_install -r requirements.txt
|
||||
cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -S . -B build
|
||||
cmake --build build
|
||||
test -e ${CONDA_PREFIX}/lib/python3 || ln -s python${ANACONDA_PYTHON_VERSION} ${CONDA_PREFIX}/lib/python3
|
||||
cmake --install build --prefix ${CONDA_PREFIX}
|
||||
chown -R jenkins ${CONDA_PREFIX}
|
||||
popd
|
||||
rm -rf Halide llvm-build llvm-project llvm-install
|
||||
|
||||
python -c "import halide" # check for errors
|
||||
@ -33,9 +33,7 @@ pip_install coloredlogs packaging
|
||||
pip_install onnxruntime==1.18
|
||||
pip_install onnx==1.16.0
|
||||
# pip_install "onnxscript@git+https://github.com/microsoft/onnxscript@3e869ef8ccf19b5ebd21c10d3e9c267c9a9fa729" --no-deps
|
||||
pip_install onnxscript==0.1.0.dev20240613 --no-deps
|
||||
# required by onnxscript
|
||||
pip_install ml_dtypes
|
||||
pip_install onnxscript==0.1.0.dev20240523 --no-deps
|
||||
|
||||
# Cache the transformers model to be used later by ONNX tests. We need to run the transformers
|
||||
# package to download the model. By default, the model is cached at ~/.cache/huggingface/hub/
|
||||
|
||||
@ -85,10 +85,10 @@ librosa>=0.6.2 ; python_version < "3.11"
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
mypy==1.10.0
|
||||
mypy==1.9.0
|
||||
# Pin MyPy version because new errors are likely to appear with each release
|
||||
#Description: linter
|
||||
#Pinned versions: 1.10.0
|
||||
#Pinned versions: 1.9.0
|
||||
#test that import: test_typing.py, test_type_hints.py
|
||||
|
||||
networkx==2.8.8
|
||||
|
||||
@ -103,14 +103,6 @@ COPY triton_version.txt triton_version.txt
|
||||
RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi
|
||||
RUN rm install_triton.sh common_utils.sh triton.txt triton_version.txt
|
||||
|
||||
ARG HALIDE
|
||||
# Build and install halide
|
||||
COPY ./common/install_halide.sh install_halide.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ci_commit_pins/halide.txt halide.txt
|
||||
RUN if [ -n "${HALIDE}" ]; then bash ./install_halide.sh; fi
|
||||
RUN rm install_halide.sh common_utils.sh halide.txt
|
||||
|
||||
# Install ccache/sccache (do this last, so we get priority in PATH)
|
||||
COPY ./common/install_cache.sh install_cache.sh
|
||||
ENV PATH /opt/cache/bin:$PATH
|
||||
|
||||
@ -155,14 +155,6 @@ COPY ci_commit_pins/executorch.txt executorch.txt
|
||||
RUN if [ -n "${EXECUTORCH}" ]; then bash ./install_executorch.sh; fi
|
||||
RUN rm install_executorch.sh common_utils.sh executorch.txt
|
||||
|
||||
ARG HALIDE
|
||||
# Build and install halide
|
||||
COPY ./common/install_halide.sh install_halide.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ci_commit_pins/halide.txt halide.txt
|
||||
RUN if [ -n "${HALIDE}" ]; then bash ./install_halide.sh; fi
|
||||
RUN rm install_halide.sh common_utils.sh halide.txt
|
||||
|
||||
ARG ONNX
|
||||
# Install ONNX dependencies
|
||||
COPY ./common/install_onnx.sh ./common/common_utils.sh ./
|
||||
|
||||
@ -284,26 +284,12 @@ else
|
||||
# Which should be backward compatible with Numpy-1.X
|
||||
python -mpip install --pre numpy==2.0.0rc1
|
||||
fi
|
||||
|
||||
WERROR=1 python setup.py clean
|
||||
|
||||
if [[ "$USE_SPLIT_BUILD" == "true" ]]; then
|
||||
BUILD_LIBTORCH_WHL=1 BUILD_PYTHON_ONLY=0 python setup.py bdist_wheel
|
||||
BUILD_LIBTORCH_WHL=0 BUILD_PYTHON_ONLY=1 python setup.py bdist_wheel --cmake
|
||||
else
|
||||
WERROR=1 python setup.py bdist_wheel
|
||||
fi
|
||||
WERROR=1 python setup.py bdist_wheel
|
||||
else
|
||||
python setup.py clean
|
||||
if [[ "$BUILD_ENVIRONMENT" == *xla* ]]; then
|
||||
source .ci/pytorch/install_cache_xla.sh
|
||||
fi
|
||||
if [[ "$USE_SPLIT_BUILD" == "true" ]]; then
|
||||
echo "USE_SPLIT_BUILD cannot be used with xla or rocm"
|
||||
exit 1
|
||||
else
|
||||
python setup.py bdist_wheel
|
||||
fi
|
||||
python setup.py bdist_wheel
|
||||
fi
|
||||
pip_install_whl "$(echo dist/*.whl)"
|
||||
|
||||
@ -342,10 +328,9 @@ else
|
||||
CUSTOM_OP_TEST="$PWD/test/custom_operator"
|
||||
python --version
|
||||
SITE_PACKAGES="$(python -c 'from distutils.sysconfig import get_python_lib; print(get_python_lib())')"
|
||||
|
||||
mkdir -p "$CUSTOM_OP_BUILD"
|
||||
pushd "$CUSTOM_OP_BUILD"
|
||||
cmake "$CUSTOM_OP_TEST" -DCMAKE_PREFIX_PATH="$SITE_PACKAGES/torch;$SITE_PACKAGES" -DPython_EXECUTABLE="$(which python)" \
|
||||
cmake "$CUSTOM_OP_TEST" -DCMAKE_PREFIX_PATH="$SITE_PACKAGES/torch" -DPython_EXECUTABLE="$(which python)" \
|
||||
-DCMAKE_MODULE_PATH="$CUSTOM_TEST_MODULE_PATH" -DUSE_ROCM="$CUSTOM_TEST_USE_ROCM"
|
||||
make VERBOSE=1
|
||||
popd
|
||||
@ -358,7 +343,7 @@ else
|
||||
SITE_PACKAGES="$(python -c 'from distutils.sysconfig import get_python_lib; print(get_python_lib())')"
|
||||
mkdir -p "$JIT_HOOK_BUILD"
|
||||
pushd "$JIT_HOOK_BUILD"
|
||||
cmake "$JIT_HOOK_TEST" -DCMAKE_PREFIX_PATH="$SITE_PACKAGES/torch;$SITE_PACKAGES" -DPython_EXECUTABLE="$(which python)" \
|
||||
cmake "$JIT_HOOK_TEST" -DCMAKE_PREFIX_PATH="$SITE_PACKAGES/torch" -DPython_EXECUTABLE="$(which python)" \
|
||||
-DCMAKE_MODULE_PATH="$CUSTOM_TEST_MODULE_PATH" -DUSE_ROCM="$CUSTOM_TEST_USE_ROCM"
|
||||
make VERBOSE=1
|
||||
popd
|
||||
@ -370,7 +355,7 @@ else
|
||||
python --version
|
||||
mkdir -p "$CUSTOM_BACKEND_BUILD"
|
||||
pushd "$CUSTOM_BACKEND_BUILD"
|
||||
cmake "$CUSTOM_BACKEND_TEST" -DCMAKE_PREFIX_PATH="$SITE_PACKAGES/torch;$SITE_PACKAGES" -DPython_EXECUTABLE="$(which python)" \
|
||||
cmake "$CUSTOM_BACKEND_TEST" -DCMAKE_PREFIX_PATH="$SITE_PACKAGES/torch" -DPython_EXECUTABLE="$(which python)" \
|
||||
-DCMAKE_MODULE_PATH="$CUSTOM_TEST_MODULE_PATH" -DUSE_ROCM="$CUSTOM_TEST_USE_ROCM"
|
||||
make VERBOSE=1
|
||||
popd
|
||||
|
||||
@ -56,29 +56,9 @@ function assert_git_not_dirty() {
|
||||
function pip_install_whl() {
|
||||
# This is used to install PyTorch and other build artifacts wheel locally
|
||||
# without using any network connection
|
||||
|
||||
# Convert the input arguments into an array
|
||||
local args=("$@")
|
||||
|
||||
# Check if the first argument contains multiple paths separated by spaces
|
||||
if [[ "${args[0]}" == *" "* ]]; then
|
||||
# Split the string by spaces into an array
|
||||
IFS=' ' read -r -a paths <<< "${args[0]}"
|
||||
# Loop through each path and install individually
|
||||
for path in "${paths[@]}"; do
|
||||
echo "Installing $path"
|
||||
python3 -mpip install --no-index --no-deps "$path"
|
||||
done
|
||||
else
|
||||
# Loop through each argument and install individually
|
||||
for path in "${args[@]}"; do
|
||||
echo "Installing $path"
|
||||
python3 -mpip install --no-index --no-deps "$path"
|
||||
done
|
||||
fi
|
||||
python3 -mpip install --no-index --no-deps "$@"
|
||||
}
|
||||
|
||||
|
||||
function pip_install() {
|
||||
# retry 3 times
|
||||
# old versions of pip don't have the "--progress-bar" flag
|
||||
@ -208,6 +188,28 @@ function clone_pytorch_xla() {
|
||||
fi
|
||||
}
|
||||
|
||||
function checkout_install_torchdeploy() {
|
||||
local commit
|
||||
commit=$(get_pinned_commit multipy)
|
||||
pushd ..
|
||||
git clone --recurse-submodules https://github.com/pytorch/multipy.git
|
||||
pushd multipy
|
||||
git checkout "${commit}"
|
||||
python multipy/runtime/example/generate_examples.py
|
||||
BUILD_CUDA_TESTS=1 pip install -e .
|
||||
popd
|
||||
popd
|
||||
}
|
||||
|
||||
function test_torch_deploy(){
|
||||
pushd ..
|
||||
pushd multipy
|
||||
./multipy/runtime/build/test_deploy
|
||||
./multipy/runtime/build/test_deploy_gpu
|
||||
popd
|
||||
popd
|
||||
}
|
||||
|
||||
function checkout_install_torchbench() {
|
||||
local commit
|
||||
commit=$(get_pinned_commit torchbench)
|
||||
|
||||
@ -289,9 +289,6 @@ test_python_shard() {
|
||||
|
||||
# Bare --include flag is not supported and quoting for lint ends up with flag not being interpreted correctly
|
||||
# shellcheck disable=SC2086
|
||||
|
||||
# modify LD_LIBRARY_PATH to ensure it has the conda env.
|
||||
# This set of tests has been shown to be buggy without it for the split-build
|
||||
time python test/run_test.py --exclude-jit-executor --exclude-distributed-tests $INCLUDE_CLAUSE --shard "$1" "$NUM_TEST_SHARDS" --verbose $PYTHON_TEST_EXTRA_OPTION
|
||||
|
||||
assert_git_not_dirty
|
||||
@ -350,31 +347,17 @@ test_inductor_distributed() {
|
||||
assert_git_not_dirty
|
||||
}
|
||||
|
||||
test_inductor_shard() {
|
||||
if [[ -z "$NUM_TEST_SHARDS" ]]; then
|
||||
echo "NUM_TEST_SHARDS must be defined to run a Python test shard"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
test_inductor() {
|
||||
python tools/dynamo/verify_dynamo.py
|
||||
python test/run_test.py --inductor \
|
||||
--include test_modules test_ops test_ops_gradients test_torch \
|
||||
--shard "$1" "$NUM_TEST_SHARDS" \
|
||||
--verbose
|
||||
|
||||
python test/run_test.py --inductor --include test_modules test_ops test_ops_gradients test_torch --verbose
|
||||
# Do not add --inductor for the following inductor unit tests, otherwise we will fail because of nested dynamo state
|
||||
python test/run_test.py \
|
||||
--include inductor/test_torchinductor inductor/test_torchinductor_opinfo inductor/test_aot_inductor \
|
||||
--shard "$1" "$NUM_TEST_SHARDS" \
|
||||
--verbose
|
||||
}
|
||||
python test/run_test.py --include inductor/test_torchinductor inductor/test_torchinductor_opinfo inductor/test_aot_inductor --verbose
|
||||
|
||||
test_inductor_aoti() {
|
||||
# docker build uses bdist_wheel which does not work with test_aot_inductor
|
||||
# TODO: need a faster way to build
|
||||
if [[ "$BUILD_ENVIRONMENT" != *rocm* ]]; then
|
||||
BUILD_AOT_INDUCTOR_TEST=1 python setup.py develop
|
||||
CPP_TESTS_DIR="${BUILD_BIN_DIR}" LD_LIBRARY_PATH="${TORCH_LIB_DIR}" python test/run_test.py --cpp --verbose -i cpp/test_aoti_abi_check cpp/test_aoti_inference
|
||||
BUILD_AOT_INDUCTOR_TEST=1 python setup.py develop
|
||||
CPP_TESTS_DIR="${BUILD_BIN_DIR}" LD_LIBRARY_PATH="${TORCH_LIB_DIR}" python test/run_test.py --cpp --verbose -i cpp/test_aoti_abi_check cpp/test_aoti_inference
|
||||
fi
|
||||
}
|
||||
|
||||
@ -567,11 +550,6 @@ test_inductor_micro_benchmark() {
|
||||
python benchmarks/gpt_fast/benchmark.py --output "${TEST_REPORTS_DIR}/gpt_fast_benchmark.csv"
|
||||
}
|
||||
|
||||
test_inductor_halide() {
|
||||
python test/run_test.py --include inductor/test_halide.py --verbose
|
||||
assert_git_not_dirty
|
||||
}
|
||||
|
||||
test_dynamo_benchmark() {
|
||||
# Usage: test_dynamo_benchmark huggingface 0
|
||||
TEST_REPORTS_DIR=$(pwd)/test/test-reports
|
||||
@ -1191,21 +1169,15 @@ test_executorch() {
|
||||
|
||||
pushd /executorch
|
||||
|
||||
export PYTHON_EXECUTABLE=python
|
||||
export EXECUTORCH_BUILD_PYBIND=ON
|
||||
export CMAKE_ARGS="-DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON"
|
||||
|
||||
# NB: We need to rebuild ExecuTorch runner here because it depends on PyTorch
|
||||
# from the PR
|
||||
# NB: We need to build ExecuTorch runner here and not inside the Docker image
|
||||
# because it depends on PyTorch
|
||||
# shellcheck disable=SC1091
|
||||
source .ci/scripts/setup-linux.sh cmake
|
||||
|
||||
echo "Run ExecuTorch unit tests"
|
||||
pytest -v -n auto
|
||||
# shellcheck disable=SC1091
|
||||
LLVM_PROFDATA=llvm-profdata-12 LLVM_COV=llvm-cov-12 bash test/run_oss_cpp_tests.sh
|
||||
source .ci/scripts/utils.sh
|
||||
build_executorch_runner "cmake"
|
||||
|
||||
echo "Run ExecuTorch regression tests for some models"
|
||||
# NB: This is a sample model, more can be added here
|
||||
export PYTHON_EXECUTABLE=python
|
||||
# TODO(huydhn): Add more coverage here using ExecuTorch's gather models script
|
||||
# shellcheck disable=SC1091
|
||||
source .ci/scripts/test.sh mv3 cmake xnnpack-quantization-delegation ''
|
||||
@ -1265,10 +1237,11 @@ elif [[ "$TEST_CONFIG" == distributed ]]; then
|
||||
if [[ "${SHARD_NUMBER}" == 1 ]]; then
|
||||
test_rpc
|
||||
fi
|
||||
elif [[ "$TEST_CONFIG" == deploy ]]; then
|
||||
checkout_install_torchdeploy
|
||||
test_torch_deploy
|
||||
elif [[ "${TEST_CONFIG}" == *inductor_distributed* ]]; then
|
||||
test_inductor_distributed
|
||||
elif [[ "${TEST_CONFIG}" == *inductor-halide* ]]; then
|
||||
test_inductor_halide
|
||||
elif [[ "${TEST_CONFIG}" == *inductor-micro-benchmark* ]]; then
|
||||
test_inductor_micro_benchmark
|
||||
elif [[ "${TEST_CONFIG}" == *huggingface* ]]; then
|
||||
@ -1313,14 +1286,10 @@ elif [[ "${TEST_CONFIG}" == *torchbench* ]]; then
|
||||
elif [[ "${TEST_CONFIG}" == *inductor_cpp_wrapper_abi_compatible* ]]; then
|
||||
install_torchvision
|
||||
test_inductor_cpp_wrapper_abi_compatible
|
||||
elif [[ "${TEST_CONFIG}" == *inductor* && "${SHARD_NUMBER}" == 1 && $NUM_TEST_SHARDS -gt 1 ]]; then
|
||||
elif [[ "${TEST_CONFIG}" == *inductor* && "${SHARD_NUMBER}" == 1 ]]; then
|
||||
install_torchvision
|
||||
test_inductor_shard 1
|
||||
test_inductor_aoti
|
||||
test_inductor
|
||||
test_inductor_distributed
|
||||
elif [[ "${TEST_CONFIG}" == *inductor* && "${SHARD_NUMBER}" -gt 1 && $NUM_TEST_SHARDS -gt 1 ]]; then
|
||||
install_torchvision
|
||||
test_inductor_shard "${SHARD_NUMBER}"
|
||||
elif [[ "${TEST_CONFIG}" == *dynamo* && "${SHARD_NUMBER}" == 1 && $NUM_TEST_SHARDS -gt 1 ]]; then
|
||||
install_torchvision
|
||||
test_dynamo_shard 1
|
||||
|
||||
@ -33,9 +33,9 @@ if [[ -z "$DOCKER_IMAGE" ]]; then
|
||||
if [[ "$PACKAGE_TYPE" == conda ]]; then
|
||||
export DOCKER_IMAGE="pytorch/conda-cuda"
|
||||
elif [[ "$DESIRED_CUDA" == cpu ]]; then
|
||||
export DOCKER_IMAGE="pytorch/manylinux:cpu"
|
||||
export DOCKER_IMAGE="pytorch/manylinux-cpu"
|
||||
else
|
||||
export DOCKER_IMAGE="pytorch/manylinux-builder:${DESIRED_CUDA:2}"
|
||||
export DOCKER_IMAGE="pytorch/manylinux-cuda${DESIRED_CUDA:2}"
|
||||
fi
|
||||
fi
|
||||
|
||||
@ -75,9 +75,9 @@ export PYTORCH_BUILD_NUMBER=1
|
||||
TRITON_VERSION=$(cat $PYTORCH_ROOT/.ci/docker/triton_version.txt)
|
||||
|
||||
# Here PYTORCH_EXTRA_INSTALL_REQUIREMENTS is already set for the all the wheel builds hence append TRITON_CONSTRAINT
|
||||
TRITON_CONSTRAINT="platform_system == 'Linux' and platform_machine == 'x86_64' and python_version < '3.13'"
|
||||
if [[ "$PACKAGE_TYPE" =~ .*wheel.* && -n "${PYTORCH_EXTRA_INSTALL_REQUIREMENTS:-}" ]]; then
|
||||
# Only linux Python < 3.13 are supported wheels for triton
|
||||
TRITON_CONSTRAINT="platform_system == 'Linux' and platform_machine == 'x86_64' and python_version < '3.13'"
|
||||
TRITON_REQUIREMENT="triton==${TRITON_VERSION}; ${TRITON_CONSTRAINT}"
|
||||
if [[ -n "$PYTORCH_BUILD_VERSION" && "$PYTORCH_BUILD_VERSION" =~ .*dev.* ]]; then
|
||||
TRITON_SHORTHASH=$(cut -c1-10 $PYTORCH_ROOT/.ci/docker/ci_commit_pins/triton.txt)
|
||||
@ -87,11 +87,11 @@ if [[ "$PACKAGE_TYPE" =~ .*wheel.* && -n "${PYTORCH_EXTRA_INSTALL_REQUIREMENTS:
|
||||
fi
|
||||
|
||||
# Set triton via PYTORCH_EXTRA_INSTALL_REQUIREMENTS for triton rocm package
|
||||
if [[ "$PACKAGE_TYPE" =~ .*wheel.* && -n "$PYTORCH_BUILD_VERSION" && "$PYTORCH_BUILD_VERSION" =~ .*rocm.* && $(uname) == "Linux" ]]; then
|
||||
TRITON_REQUIREMENT="pytorch-triton-rocm==${TRITON_VERSION}; ${TRITON_CONSTRAINT}"
|
||||
if [[ "$PACKAGE_TYPE" =~ .*wheel.* && -n "$PYTORCH_BUILD_VERSION" && "$PYTORCH_BUILD_VERSION" =~ .*rocm.* && $(uname) == "Linux" && "$DESIRED_PYTHON" != "3.12" ]]; then
|
||||
TRITON_REQUIREMENT="pytorch-triton-rocm==${TRITON_VERSION}"
|
||||
if [[ -n "$PYTORCH_BUILD_VERSION" && "$PYTORCH_BUILD_VERSION" =~ .*dev.* ]]; then
|
||||
TRITON_SHORTHASH=$(cut -c1-10 $PYTORCH_ROOT/.ci/docker/ci_commit_pins/triton-rocm.txt)
|
||||
TRITON_REQUIREMENT="pytorch-triton-rocm==${TRITON_VERSION}+${TRITON_SHORTHASH}; ${TRITON_CONSTRAINT}"
|
||||
TRITON_REQUIREMENT="pytorch-triton-rocm==${TRITON_VERSION}+${TRITON_SHORTHASH}"
|
||||
fi
|
||||
if [[ -z "${PYTORCH_EXTRA_INSTALL_REQUIREMENTS:-}" ]]; then
|
||||
export PYTORCH_EXTRA_INSTALL_REQUIREMENTS="${TRITON_REQUIREMENT}"
|
||||
|
||||
6
.github/actions/diskspace-cleanup/action.yml
vendored
6
.github/actions/diskspace-cleanup/action.yml
vendored
@ -14,14 +14,12 @@ runs:
|
||||
- name: Cleans up diskspace
|
||||
shell: bash
|
||||
run: |
|
||||
set -ex
|
||||
diskspace_cutoff=${{ inputs.diskspace-cutoff }}
|
||||
docker_root_dir=$(docker info -f '{{.DockerRootDir}}')
|
||||
diskspace=$(df -H --output=pcent ${docker_root_dir} | sed -n 2p | sed 's/%//' | sed 's/ //')
|
||||
diskspace=$(df -H / --output=pcent | sed -n 2p | sed 's/%//' | sed 's/ //')
|
||||
msg="Please file an issue on pytorch/pytorch reporting the faulty runner. Include a link to the runner logs so the runner can be identified"
|
||||
if [[ "$diskspace" -ge "$diskspace_cutoff" ]] ; then
|
||||
docker system prune -af
|
||||
diskspace_new=$(df -H --output=pcent ${docker_root_dir} | sed -n 2p | sed 's/%//' | sed 's/ //')
|
||||
diskspace_new=$(df -H / --output=pcent | sed -n 2p | sed 's/%//' | sed 's/ //')
|
||||
if [[ "$diskspace_new" -gt "$diskspace_cutoff" ]] ; then
|
||||
echo "Error: Available diskspace is less than $diskspace_cutoff percent. Not enough diskspace."
|
||||
echo "$msg"
|
||||
|
||||
21
.github/actions/linux-build/action.yml
vendored
21
.github/actions/linux-build/action.yml
vendored
@ -52,13 +52,6 @@ inputs:
|
||||
description: Hugging Face Hub token
|
||||
required: false
|
||||
default: ""
|
||||
use_split_build:
|
||||
description: |
|
||||
[Experimental] Build a libtorch only wheel and build pytorch such that
|
||||
are built from the libtorch wheel.
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
outputs:
|
||||
docker-image:
|
||||
value: ${{ steps.calculate-docker-image.outputs.docker-image }}
|
||||
@ -151,7 +144,6 @@ runs:
|
||||
DEBUG: ${{ inputs.build-with-debug == 'true' && '1' || '0' }}
|
||||
OUR_GITHUB_JOB_ID: ${{ steps.get-job-id.outputs.job-id }}
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ inputs.HUGGING_FACE_HUB_TOKEN }}
|
||||
USE_SPLIT_BUILD: ${{ inputs.use_split_build }}
|
||||
shell: bash
|
||||
run: |
|
||||
# detached container should get cleaned up by teardown_ec2_linux
|
||||
@ -171,7 +163,6 @@ runs:
|
||||
-e PR_LABELS \
|
||||
-e OUR_GITHUB_JOB_ID \
|
||||
-e HUGGING_FACE_HUB_TOKEN \
|
||||
-e USE_SPLIT_BUILD \
|
||||
--env-file="/tmp/github_env_${GITHUB_RUN_ID}" \
|
||||
--security-opt seccomp=unconfined \
|
||||
--cap-add=SYS_PTRACE \
|
||||
@ -192,7 +183,7 @@ runs:
|
||||
|
||||
- name: Store PyTorch Build Artifacts on S3
|
||||
uses: seemethere/upload-artifact-s3@v5
|
||||
if: inputs.build-generates-artifacts == 'true' && steps.build.outcome != 'skipped' && inputs.use_split_build != 'true'
|
||||
if: inputs.build-generates-artifacts == 'true' && steps.build.outcome != 'skipped'
|
||||
with:
|
||||
name: ${{ inputs.build-environment }}
|
||||
retention-days: 14
|
||||
@ -200,16 +191,6 @@ runs:
|
||||
path: artifacts.zip
|
||||
s3-bucket: ${{ inputs.s3-bucket }}
|
||||
|
||||
- name: Store PyTorch Build Artifacts on S3 for split build
|
||||
uses: seemethere/upload-artifact-s3@v5
|
||||
if: inputs.build-generates-artifacts == 'true' && steps.build.outcome != 'skipped' && inputs.use_split_build == 'true'
|
||||
with:
|
||||
name: ${{ inputs.build-environment }}-experimental-split-build
|
||||
retention-days: 14
|
||||
if-no-files-found: error
|
||||
path: artifacts.zip
|
||||
s3-bucket: ${{ inputs.s3-bucket }}
|
||||
|
||||
- name: Upload sccache stats
|
||||
if: steps.build.outcome != 'skipped'
|
||||
uses: seemethere/upload-artifact-s3@v5
|
||||
|
||||
2
.github/ci_commit_pins/torchbench.txt
vendored
2
.github/ci_commit_pins/torchbench.txt
vendored
@ -1 +1 @@
|
||||
23512dbebd44a11eb84afbf53c3c071dd105297e
|
||||
d6015d42d9a1834bc7595c4bd6852562fb80b30b
|
||||
|
||||
3
.github/merge_rules.yaml
vendored
3
.github/merge_rules.yaml
vendored
@ -27,9 +27,11 @@
|
||||
- third_party/onnx
|
||||
- caffe2/python/onnx/**
|
||||
approved_by:
|
||||
- BowenBao
|
||||
- justinchuby
|
||||
- liqunfu
|
||||
- shubhambhokare1
|
||||
- thiagocrepaldi
|
||||
- titaiwangms
|
||||
- wschin
|
||||
- xadupre
|
||||
@ -242,7 +244,6 @@
|
||||
- torch/csrc/xpu/**
|
||||
- torch/xpu/**
|
||||
- test/xpu/**
|
||||
- test/test_xpu.py
|
||||
- third_party/xpu.txt
|
||||
- .ci/docker/ci_commit_pins/triton-xpu.txt
|
||||
approved_by:
|
||||
|
||||
1
.github/pytorch-probot.yml
vendored
1
.github/pytorch-probot.yml
vendored
@ -26,4 +26,3 @@ retryable_workflows:
|
||||
- windows-binary
|
||||
labeler_config: labeler.yml
|
||||
label_to_label_config: label_to_label.yml
|
||||
mergebot: True
|
||||
|
||||
114
.github/scripts/cherry_pick.py
vendored
114
.github/scripts/cherry_pick.py
vendored
@ -3,11 +3,11 @@
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
from typing import Any, cast, Dict, List, Optional
|
||||
from typing import Any, Optional
|
||||
|
||||
from urllib.error import HTTPError
|
||||
|
||||
from github_utils import gh_fetch_url, gh_post_pr_comment, gh_query_issues_by_labels
|
||||
from github_utils import gh_fetch_url, gh_post_pr_comment
|
||||
|
||||
from gitutils import get_git_remote_name, get_git_repo_dir, GitRepo
|
||||
from trymerge import get_pr_commit_sha, GitHubPR
|
||||
@ -19,7 +19,6 @@ REQUIRES_ISSUE = {
|
||||
"critical",
|
||||
"fixnewfeature",
|
||||
}
|
||||
RELEASE_BRANCH_REGEX = re.compile(r"release/(?P<version>.+)")
|
||||
|
||||
|
||||
def parse_args() -> Any:
|
||||
@ -59,33 +58,6 @@ def get_merge_commit_sha(repo: GitRepo, pr: GitHubPR) -> Optional[str]:
|
||||
return commit_sha if pr.is_closed() else None
|
||||
|
||||
|
||||
def get_release_version(onto_branch: str) -> Optional[str]:
|
||||
"""
|
||||
Return the release version if the target branch is a release branch
|
||||
"""
|
||||
m = re.match(RELEASE_BRANCH_REGEX, onto_branch)
|
||||
return m.group("version") if m else ""
|
||||
|
||||
|
||||
def get_tracker_issues(
|
||||
org: str, project: str, onto_branch: str
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Find the tracker issue from the repo. The tracker issue needs to have the title
|
||||
like [VERSION] Release Tracker following the convention on PyTorch
|
||||
"""
|
||||
version = get_release_version(onto_branch)
|
||||
if not version:
|
||||
return []
|
||||
|
||||
tracker_issues = gh_query_issues_by_labels(org, project, labels=["release tracker"])
|
||||
if not tracker_issues:
|
||||
return []
|
||||
|
||||
# Figure out the tracker issue from the list by looking at the title
|
||||
return [issue for issue in tracker_issues if version in issue.get("title", "")]
|
||||
|
||||
|
||||
def cherry_pick(
|
||||
github_actor: str,
|
||||
repo: GitRepo,
|
||||
@ -105,49 +77,17 @@ def cherry_pick(
|
||||
)
|
||||
|
||||
try:
|
||||
org, project = repo.gh_owner_and_name()
|
||||
|
||||
cherry_pick_pr = ""
|
||||
if not dry_run:
|
||||
org, project = repo.gh_owner_and_name()
|
||||
cherry_pick_pr = submit_pr(repo, pr, cherry_pick_branch, onto_branch)
|
||||
|
||||
tracker_issues_comments = []
|
||||
tracker_issues = get_tracker_issues(org, project, onto_branch)
|
||||
for issue in tracker_issues:
|
||||
issue_number = int(str(issue.get("number", "0")))
|
||||
if not issue_number:
|
||||
continue
|
||||
msg = f"The cherry pick PR is at {cherry_pick_pr}"
|
||||
if fixes:
|
||||
msg += f" and it is linked with issue {fixes}"
|
||||
elif classification in REQUIRES_ISSUE:
|
||||
msg += f" and it is recommended to link a {classification} cherry pick PR with an issue"
|
||||
|
||||
res = cast(
|
||||
Dict[str, Any],
|
||||
post_tracker_issue_comment(
|
||||
org,
|
||||
project,
|
||||
issue_number,
|
||||
pr.pr_num,
|
||||
cherry_pick_pr,
|
||||
classification,
|
||||
fixes,
|
||||
dry_run,
|
||||
),
|
||||
)
|
||||
|
||||
comment_url = res.get("html_url", "")
|
||||
if comment_url:
|
||||
tracker_issues_comments.append(comment_url)
|
||||
|
||||
msg = f"The cherry pick PR is at {cherry_pick_pr}"
|
||||
if fixes:
|
||||
msg += f" and it is linked with issue {fixes}."
|
||||
elif classification in REQUIRES_ISSUE:
|
||||
msg += f" and it is recommended to link a {classification} cherry pick PR with an issue."
|
||||
|
||||
if tracker_issues_comments:
|
||||
msg += " The following tracker issues are updated:\n"
|
||||
for tracker_issues_comment in tracker_issues_comments:
|
||||
msg += f"* {tracker_issues_comment}\n"
|
||||
|
||||
post_pr_comment(org, project, pr.pr_num, msg, dry_run)
|
||||
post_comment(org, project, pr.pr_num, msg)
|
||||
|
||||
finally:
|
||||
if current_branch:
|
||||
@ -219,9 +159,7 @@ def submit_pr(
|
||||
raise RuntimeError(msg) from error
|
||||
|
||||
|
||||
def post_pr_comment(
|
||||
org: str, project: str, pr_num: int, msg: str, dry_run: bool = False
|
||||
) -> List[Dict[str, Any]]:
|
||||
def post_comment(org: str, project: str, pr_num: int, msg: str) -> None:
|
||||
"""
|
||||
Post a comment on the PR itself to point to the cherry picking PR when success
|
||||
or print the error when failure
|
||||
@ -244,35 +182,7 @@ def post_pr_comment(
|
||||
comment = "\n".join(
|
||||
(f"### Cherry picking #{pr_num}", f"{msg}", "", f"{internal_debugging}")
|
||||
)
|
||||
return gh_post_pr_comment(org, project, pr_num, comment, dry_run)
|
||||
|
||||
|
||||
def post_tracker_issue_comment(
|
||||
org: str,
|
||||
project: str,
|
||||
issue_num: int,
|
||||
pr_num: int,
|
||||
cherry_pick_pr: str,
|
||||
classification: str,
|
||||
fixes: str,
|
||||
dry_run: bool = False,
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Post a comment on the tracker issue (if any) to record the cherry pick
|
||||
"""
|
||||
comment = "\n".join(
|
||||
(
|
||||
"Link to landed trunk PR (if applicable):",
|
||||
f"* https://github.com/{org}/{project}/pull/{pr_num}",
|
||||
"",
|
||||
"Link to release branch PR:",
|
||||
f"* {cherry_pick_pr}",
|
||||
"",
|
||||
"Criteria Category:",
|
||||
" - ".join((classification.capitalize(), fixes.capitalize())),
|
||||
)
|
||||
)
|
||||
return gh_post_pr_comment(org, project, issue_num, comment, dry_run)
|
||||
gh_post_pr_comment(org, project, pr_num, comment)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
@ -304,7 +214,7 @@ def main() -> None:
|
||||
|
||||
except RuntimeError as error:
|
||||
if not args.dry_run:
|
||||
post_pr_comment(org, project, pr_num, str(error))
|
||||
post_comment(org, project, pr_num, str(error))
|
||||
else:
|
||||
raise error
|
||||
|
||||
|
||||
@ -347,6 +347,10 @@ def generate_wheels_matrix(
|
||||
for python_version in python_versions:
|
||||
for arch_version in arches:
|
||||
gpu_arch_type = arch_type(arch_version)
|
||||
# Disable py3.12 builds for ROCm because of triton dependency
|
||||
# on llnl-hatchet, which doesn't have py3.12 wheels available
|
||||
if gpu_arch_type == "rocm" and python_version == "3.12":
|
||||
continue
|
||||
gpu_arch_version = (
|
||||
""
|
||||
if arch_version == "cpu"
|
||||
|
||||
70
.github/scripts/get_workflow_type.py
vendored
70
.github/scripts/get_workflow_type.py
vendored
@ -1,16 +1,14 @@
|
||||
import json
|
||||
from argparse import ArgumentParser
|
||||
from typing import Any, Tuple
|
||||
from typing import Any
|
||||
|
||||
from github import Auth, Github
|
||||
from github.Issue import Issue
|
||||
|
||||
|
||||
WORKFLOW_LABEL_META = "" # use meta runners
|
||||
WORKFLOW_LABEL_LF = "lf." # use runners from the linux foundation
|
||||
LABEL_TYPE_KEY = "label_type"
|
||||
MESSAGE_KEY = "message"
|
||||
MESSAGE = "" # Debug message to return to the caller
|
||||
WORKFLOW_TYPE_LABEL = "label"
|
||||
WORKFLOW_TYPE_RG = "rg"
|
||||
WORKFLOW_TYPE_BOTH = "both"
|
||||
|
||||
|
||||
def parse_args() -> Any:
|
||||
@ -50,50 +48,48 @@ def is_exception_branch(branch: str) -> bool:
|
||||
return branch.split("/")[0] in {"main", "nightly", "release", "landchecks"}
|
||||
|
||||
|
||||
def get_workflow_type(issue: Issue, username: str) -> Tuple[str, str]:
|
||||
def get_workflow_type(issue: Issue, username: str) -> str:
|
||||
user_list = issue.get_comments()[0].body.split("\r\n")
|
||||
try:
|
||||
user_list = issue.get_comments()[0].body.split()
|
||||
|
||||
if user_list[0] == "!":
|
||||
MESSAGE = "LF Workflows are disabled for everyone. Using meta runners."
|
||||
return WORKFLOW_LABEL_META, MESSAGE
|
||||
elif user_list[0] == "*":
|
||||
MESSAGE = "LF Workflows are enabled for everyone. Using LF runners."
|
||||
return WORKFLOW_LABEL_LF, MESSAGE
|
||||
elif username in user_list:
|
||||
MESSAGE = f"LF Workflows are enabled for {username}. Using LF runners."
|
||||
return WORKFLOW_LABEL_LF, MESSAGE
|
||||
else:
|
||||
MESSAGE = f"LF Workflows are disabled for {username}. Using meta runners."
|
||||
return WORKFLOW_LABEL_META, MESSAGE
|
||||
run_option = issue.get_comments()[1].body.split("\r\n")[0]
|
||||
except Exception as e:
|
||||
MESSAGE = f"Failed to get determine workflow type. Falling back to meta runners. Exception: {e}"
|
||||
return WORKFLOW_LABEL_META, MESSAGE
|
||||
run_option = "single"
|
||||
|
||||
if user_list[0] == "!":
|
||||
# Use old runners for everyone
|
||||
return WORKFLOW_TYPE_LABEL
|
||||
elif user_list[1] == "*":
|
||||
if run_option == WORKFLOW_TYPE_BOTH:
|
||||
# Use ARC runners and old runners for everyone
|
||||
return WORKFLOW_TYPE_BOTH
|
||||
else:
|
||||
# Use only ARC runners for everyone
|
||||
return WORKFLOW_TYPE_RG
|
||||
elif username in user_list:
|
||||
if run_option == WORKFLOW_TYPE_BOTH:
|
||||
# Use ARC runners and old runners for a specific user
|
||||
return WORKFLOW_TYPE_BOTH
|
||||
else:
|
||||
# Use only ARC runners for a specific user
|
||||
return WORKFLOW_TYPE_RG
|
||||
else:
|
||||
# Use old runners by default
|
||||
return WORKFLOW_TYPE_LABEL
|
||||
|
||||
|
||||
def main() -> None:
|
||||
args = parse_args()
|
||||
|
||||
if is_exception_branch(args.github_branch):
|
||||
output = {
|
||||
LABEL_TYPE_KEY: WORKFLOW_LABEL_META,
|
||||
MESSAGE_KEY: f"Exception branch: '{args.github_branch}', using meta runners",
|
||||
}
|
||||
output = {"workflow_type": WORKFLOW_TYPE_LABEL}
|
||||
else:
|
||||
try:
|
||||
gh = get_gh_client(args.github_token)
|
||||
# The default issue we use - https://github.com/pytorch/test-infra/issues/5132
|
||||
issue = get_issue(gh, args.github_repo, args.github_issue)
|
||||
label_type, message = get_workflow_type(issue, args.github_user)
|
||||
output = {
|
||||
LABEL_TYPE_KEY: label_type,
|
||||
MESSAGE_KEY: message,
|
||||
}
|
||||
|
||||
output = {"workflow_type": get_workflow_type(issue, args.github_user)}
|
||||
except Exception as e:
|
||||
output = {
|
||||
LABEL_TYPE_KEY: WORKFLOW_LABEL_META,
|
||||
MESSAGE_KEY: f"Failed to get issue. Falling back to meta runners. Exception: {e}",
|
||||
}
|
||||
output = {"workflow_type": WORKFLOW_TYPE_LABEL}
|
||||
|
||||
json_output = json.dumps(output)
|
||||
print(json_output)
|
||||
|
||||
9
.github/scripts/github_utils.py
vendored
9
.github/scripts/github_utils.py
vendored
@ -202,12 +202,3 @@ def gh_update_pr_state(org: str, repo: str, pr_num: int, state: str = "open") ->
|
||||
)
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
def gh_query_issues_by_labels(
|
||||
org: str, repo: str, labels: List[str], state: str = "open"
|
||||
) -> List[Dict[str, Any]]:
|
||||
url = f"{GITHUB_API_URL}/repos/{org}/{repo}/issues"
|
||||
return gh_fetch_json(
|
||||
url, method="GET", params={"labels": ",".join(labels), "state": state}
|
||||
)
|
||||
|
||||
1
.github/scripts/lintrunner.sh
vendored
1
.github/scripts/lintrunner.sh
vendored
@ -29,7 +29,6 @@ python3 -m tools.pyi.gen_pyi \
|
||||
--native-functions-path aten/src/ATen/native/native_functions.yaml \
|
||||
--tags-path aten/src/ATen/native/tags.yaml \
|
||||
--deprecated-functions-path "tools/autograd/deprecated.yaml"
|
||||
python3 torch/utils/data/datapipes/gen_pyi.py
|
||||
|
||||
RC=0
|
||||
# Run lintrunner on all files
|
||||
|
||||
3
.github/scripts/test_trymerge.py
vendored
3
.github/scripts/test_trymerge.py
vendored
@ -180,9 +180,6 @@ def mock_gh_get_info() -> Any:
|
||||
return {
|
||||
"closed": False,
|
||||
"isCrossRepository": False,
|
||||
"headRefName": "foo",
|
||||
"baseRefName": "bar",
|
||||
"baseRepository": {"defaultBranchRef": {"name": "bar"}},
|
||||
"files": {"nodes": [], "pageInfo": {"hasNextPage": False}},
|
||||
"changedFiles": 0,
|
||||
}
|
||||
|
||||
9
.github/scripts/trymerge.py
vendored
9
.github/scripts/trymerge.py
vendored
@ -2330,15 +2330,6 @@ def main() -> None:
|
||||
dry_run=args.dry_run,
|
||||
)
|
||||
return
|
||||
if not pr.is_ghstack_pr() and pr.base_ref() != pr.default_branch():
|
||||
gh_post_pr_comment(
|
||||
org,
|
||||
project,
|
||||
args.pr_num,
|
||||
f"PR targets {pr.base_ref()} rather than {pr.default_branch()}, refusing merge request",
|
||||
dry_run=args.dry_run,
|
||||
)
|
||||
return
|
||||
|
||||
if args.check_mergeability:
|
||||
if pr.is_ghstack_pr():
|
||||
|
||||
8
.github/workflows/_linux-build-label.yml
vendored
8
.github/workflows/_linux-build-label.yml
vendored
@ -56,13 +56,6 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
use_split_build:
|
||||
description: |
|
||||
[Experimental] Build a libtorch only wheel and build pytorch such that
|
||||
are built from the libtorch wheel.
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
secrets:
|
||||
HUGGING_FACE_HUB_TOKEN:
|
||||
required: false
|
||||
@ -114,4 +107,3 @@ jobs:
|
||||
aws-role-to-assume: ${{ inputs.aws-role-to-assume }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
||||
use_split_build: ${{ inputs.use_split_build }}
|
||||
|
||||
10
.github/workflows/_linux-build.yml
vendored
10
.github/workflows/_linux-build.yml
vendored
@ -64,14 +64,6 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
use_split_build:
|
||||
description: |
|
||||
[Experimental] Build a libtorch only wheel and build pytorch such that
|
||||
are built from the libtorch wheel.
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
secrets:
|
||||
HUGGING_FACE_HUB_TOKEN:
|
||||
required: false
|
||||
@ -189,7 +181,6 @@ jobs:
|
||||
DEBUG: ${{ inputs.build-with-debug && '1' || '0' }}
|
||||
OUR_GITHUB_JOB_ID: ${{ steps.get-job-id.outputs.job-id }}
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
||||
USE_SPLIT_BUILD: ${{ inputs.use_split_build }}
|
||||
run: |
|
||||
# detached container should get cleaned up by teardown_ec2_linux
|
||||
container_name=$(docker run \
|
||||
@ -208,7 +199,6 @@ jobs:
|
||||
-e PR_LABELS \
|
||||
-e OUR_GITHUB_JOB_ID \
|
||||
-e HUGGING_FACE_HUB_TOKEN \
|
||||
-e USE_SPLIT_BUILD \
|
||||
--env-file="/tmp/github_env_${GITHUB_RUN_ID}" \
|
||||
--security-opt seccomp=unconfined \
|
||||
--cap-add=SYS_PTRACE \
|
||||
|
||||
137
.github/workflows/_runner-determinator.yml
vendored
137
.github/workflows/_runner-determinator.yml
vendored
@ -15,142 +15,27 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
default: "5132"
|
||||
description: |
|
||||
Fetch's GitHub Issue from pytorch/test-infra
|
||||
Example: https://github.com/pytorch/test-infra/issues/5132
|
||||
|
||||
outputs:
|
||||
label-type:
|
||||
workflow-type:
|
||||
description: Type of runners to use
|
||||
value: ${{ jobs.runner-determinator.outputs.label-type }}
|
||||
value: ${{ jobs.runner-determinator.outputs.workflow-type }}
|
||||
|
||||
jobs:
|
||||
runner-determinator:
|
||||
runs-on: linux.4xlarge
|
||||
outputs:
|
||||
label-type: ${{ steps.set-condition.outputs.label-type }}
|
||||
workflow-type: ${{ steps.set-condition.outputs.workflow-type }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
ISSUE_NUMBER: ${{ inputs.issue_number }}
|
||||
USERNAME: ${{ inputs.user_name }}
|
||||
steps:
|
||||
# - name: Checkout PyTorch
|
||||
# uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
# with:
|
||||
# fetch-depth: 1
|
||||
# submodules: true
|
||||
|
||||
# TODO: Remove the hardcoded step below
|
||||
# Hardcoding below is temporary for testing ALI runners
|
||||
# This file below should match the script found in .github/scripts/get_workflow_type.py
|
||||
- name: Hardcode runner-determinator script
|
||||
run: |
|
||||
cat <<EOF > get_workflow_type.py
|
||||
import json
|
||||
from argparse import ArgumentParser
|
||||
from typing import Any, Tuple
|
||||
|
||||
from github import Auth, Github
|
||||
from github.Issue import Issue
|
||||
|
||||
|
||||
WORKFLOW_LABEL_META = "" # use meta runners
|
||||
WORKFLOW_LABEL_LF = "lf." # use runners from the linux foundation
|
||||
LABEL_TYPE_KEY = "label_type"
|
||||
MESSAGE_KEY = "message"
|
||||
MESSAGE = "" # Debug message to return to the caller
|
||||
|
||||
|
||||
def parse_args() -> Any:
|
||||
parser = ArgumentParser("Get dynamic rollout settings")
|
||||
parser.add_argument("--github-token", type=str, required=True, help="GitHub token")
|
||||
parser.add_argument(
|
||||
"--github-repo",
|
||||
type=str,
|
||||
required=False,
|
||||
default="pytorch/test-infra",
|
||||
help="GitHub repo to get the issue",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--github-issue", type=int, required=True, help="GitHub issue umber"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--github-user", type=str, required=True, help="GitHub username"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--github-branch", type=str, required=True, help="Current GitHub branch"
|
||||
)
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def get_gh_client(github_token: str) -> Github:
|
||||
auth = Auth.Token(github_token)
|
||||
return Github(auth=auth)
|
||||
|
||||
|
||||
def get_issue(gh: Github, repo: str, issue_num: int) -> Issue:
|
||||
repo = gh.get_repo(repo)
|
||||
return repo.get_issue(number=issue_num)
|
||||
|
||||
|
||||
def is_exception_branch(branch: str) -> bool:
|
||||
return branch.split("/")[0] in {"main", "nightly", "release", "landchecks"}
|
||||
|
||||
|
||||
def get_workflow_type(issue: Issue, username: str) -> Tuple[str, str]:
|
||||
try:
|
||||
user_list = issue.get_comments()[0].body.split()
|
||||
|
||||
if user_list[0] == "!":
|
||||
MESSAGE = "LF Workflows are disabled for everyone. Using meta runners."
|
||||
return WORKFLOW_LABEL_META, MESSAGE
|
||||
elif user_list[0] == "*":
|
||||
MESSAGE = "LF Workflows are enabled for everyone. Using LF runners."
|
||||
return WORKFLOW_LABEL_LF, MESSAGE
|
||||
elif username in user_list:
|
||||
MESSAGE = f"LF Workflows are enabled for {username}. Using LF runners."
|
||||
return WORKFLOW_LABEL_LF, MESSAGE
|
||||
else:
|
||||
MESSAGE = f"LF Workflows are disabled for {username}. Using meta runners."
|
||||
return WORKFLOW_LABEL_META, MESSAGE
|
||||
except Exception as e:
|
||||
MESSAGE = f"Failed to get determine workflow type. Falling back to meta runners. Exception: {e}"
|
||||
return WORKFLOW_LABEL_META, MESSAGE
|
||||
|
||||
|
||||
def main() -> None:
|
||||
args = parse_args()
|
||||
|
||||
if is_exception_branch(args.github_branch):
|
||||
output = {
|
||||
LABEL_TYPE_KEY: WORKFLOW_LABEL_META,
|
||||
MESSAGE_KEY: f"Exception branch: '{args.github_branch}', using meta runners",
|
||||
}
|
||||
else:
|
||||
try:
|
||||
gh = get_gh_client(args.github_token)
|
||||
# The default issue we use - https://github.com/pytorch/test-infra/issues/5132
|
||||
issue = get_issue(gh, args.github_repo, args.github_issue)
|
||||
label_type, message = get_workflow_type(issue, args.github_user)
|
||||
output = {
|
||||
LABEL_TYPE_KEY: label_type,
|
||||
MESSAGE_KEY: message,
|
||||
}
|
||||
except Exception as e:
|
||||
output = {
|
||||
LABEL_TYPE_KEY: WORKFLOW_LABEL_META,
|
||||
MESSAGE_KEY: f"Failed to get issue. Falling back to meta runners. Exception: {e}",
|
||||
}
|
||||
|
||||
json_output = json.dumps(output)
|
||||
print(json_output)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
EOF
|
||||
cat get_workflow_type.py
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
with:
|
||||
fetch-depth: 1
|
||||
submodules: true
|
||||
|
||||
- name: Install dependencies
|
||||
run: python3 -m pip install urllib3==1.26.18 PyGithub==2.3.0
|
||||
@ -161,7 +46,7 @@ jobs:
|
||||
curr_branch="${{ inputs.curr_branch }}"
|
||||
echo "Current branch is '$curr_branch'"
|
||||
|
||||
output="$(python3 get_workflow_type.py \
|
||||
output="$(python3 .github/scripts/get_workflow_type.py \
|
||||
--github-token "$GITHUB_TOKEN" \
|
||||
--github-issue "$ISSUE_NUMBER" \
|
||||
--github-branch "$curr_branch" \
|
||||
@ -169,5 +54,5 @@ jobs:
|
||||
|
||||
echo "Output: '${output}'"
|
||||
|
||||
LABEL_TYPE=$(echo "${output}" | jq -r '.label_type')
|
||||
echo "label-type=$LABEL_TYPE" >> "$GITHUB_OUTPUT"
|
||||
WORKFLOW_TYPE=$(echo "${output}" | jq -r '.workflow_type')
|
||||
echo "workflow-type=$WORKFLOW_TYPE" >> "$GITHUB_OUTPUT"
|
||||
|
||||
4
.github/workflows/_win-build.yml
vendored
4
.github/workflows/_win-build.yml
vendored
@ -47,9 +47,6 @@ jobs:
|
||||
timeout-minutes: 240
|
||||
outputs:
|
||||
test-matrix: ${{ steps.filter.outputs.test-matrix }}
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
# Duplicated in win-test because this MUST go before a checkout
|
||||
- name: Enable git symlinks on Windows and disable fsmonitor daemon
|
||||
@ -92,7 +89,6 @@ jobs:
|
||||
|
||||
- name: Parse ref
|
||||
id: parse-ref
|
||||
shell: bash
|
||||
run: python3 .github/scripts/parse_ref.py
|
||||
|
||||
- name: Get workflow job id
|
||||
|
||||
4
.github/workflows/_win-test.yml
vendored
4
.github/workflows/_win-test.yml
vendored
@ -41,9 +41,6 @@ jobs:
|
||||
fail-fast: false
|
||||
runs-on: ${{ matrix.runner }}
|
||||
timeout-minutes: ${{ matrix.mem_leak_check == 'mem_leak_check' && 600 || inputs.timeout-minutes }}
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
# Duplicated in win-build because this MUST go before a checkout
|
||||
- name: Enable git symlinks on Windows and disable fsmonitor daemon
|
||||
@ -227,7 +224,6 @@ jobs:
|
||||
|
||||
- name: Parse ref
|
||||
id: parse-ref
|
||||
shell: bash
|
||||
run: python3 .github/scripts/parse_ref.py
|
||||
|
||||
- name: Uninstall PyTorch
|
||||
|
||||
1
.github/workflows/docker-builds.yml
vendored
1
.github/workflows/docker-builds.yml
vendored
@ -54,7 +54,6 @@ jobs:
|
||||
pytorch-linux-focal-py3-clang9-android-ndk-r21e,
|
||||
pytorch-linux-jammy-py3.8-gcc11,
|
||||
pytorch-linux-jammy-py3.8-gcc11-inductor-benchmarks,
|
||||
pytorch-linux-jammy-py3.12-halide,
|
||||
pytorch-linux-jammy-xpu-2024.0-py3,
|
||||
pytorch-linux-jammy-py3-clang15-asan,
|
||||
pytorch-linux-focal-py3-clang10-onnx,
|
||||
|
||||
206
.github/workflows/generated-linux-binary-manywheel-nightly.yml
generated
vendored
206
.github/workflows/generated-linux-binary-manywheel-nightly.yml
generated
vendored
@ -2410,209 +2410,3 @@ jobs:
|
||||
conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
|
||||
conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
|
||||
uses: ./.github/workflows/_binary-upload.yml
|
||||
|
||||
manywheel-py3_12-rocm6_0-build:
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
uses: ./.github/workflows/_binary-build-linux.yml
|
||||
with:
|
||||
PYTORCH_ROOT: /pytorch
|
||||
BUILDER_ROOT: /builder
|
||||
PACKAGE_TYPE: manywheel
|
||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: rocm6.0
|
||||
GPU_ARCH_VERSION: 6.0
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: manywheel-py3_12-rocm6_0
|
||||
build_environment: linux-binary-manywheel
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_12-rocm6_0-test: # Testing
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: manywheel-py3_12-rocm6_0-build
|
||||
runs-on: linux.rocm.gpu
|
||||
timeout-minutes: 240
|
||||
env:
|
||||
PYTORCH_ROOT: /pytorch
|
||||
BUILDER_ROOT: /builder
|
||||
PACKAGE_TYPE: manywheel
|
||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: rocm6.0
|
||||
GPU_ARCH_VERSION: 6.0
|
||||
GPU_ARCH_TYPE: rocm
|
||||
SKIP_ALL_TESTS: 1
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
|
||||
DESIRED_PYTHON: "3.12"
|
||||
steps:
|
||||
- name: Setup ROCm
|
||||
uses: ./.github/actions/setup-rocm
|
||||
- uses: actions/download-artifact@v3
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: manywheel-py3_12-rocm6_0
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
- name: Clean PyTorch checkout
|
||||
run: |
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: pytorch
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
quiet-checkout: true
|
||||
- name: Clean pytorch/builder checkout
|
||||
run: |
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: builder
|
||||
- name: ROCm set GPU_FLAG
|
||||
run: |
|
||||
echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri --group-add video --group-add daemon" >> "${GITHUB_ENV}"
|
||||
- name: Pull Docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
with:
|
||||
docker-image: pytorch/manylinux-builder:rocm6.0-main
|
||||
- name: Test Pytorch binary
|
||||
uses: ./pytorch/.github/actions/test-pytorch-binary
|
||||
- name: Teardown ROCm
|
||||
uses: ./.github/actions/teardown-rocm
|
||||
manywheel-py3_12-rocm6_0-upload: # Uploading
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
needs: manywheel-py3_12-rocm6_0-test
|
||||
with:
|
||||
PYTORCH_ROOT: /pytorch
|
||||
BUILDER_ROOT: /builder
|
||||
PACKAGE_TYPE: manywheel
|
||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: rocm6.0
|
||||
GPU_ARCH_VERSION: 6.0
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: manywheel-py3_12-rocm6_0
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
|
||||
conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
|
||||
uses: ./.github/workflows/_binary-upload.yml
|
||||
|
||||
manywheel-py3_12-rocm6_1-build:
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
uses: ./.github/workflows/_binary-build-linux.yml
|
||||
with:
|
||||
PYTORCH_ROOT: /pytorch
|
||||
BUILDER_ROOT: /builder
|
||||
PACKAGE_TYPE: manywheel
|
||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: rocm6.1
|
||||
GPU_ARCH_VERSION: 6.1
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: manywheel-py3_12-rocm6_1
|
||||
build_environment: linux-binary-manywheel
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_12-rocm6_1-test: # Testing
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: manywheel-py3_12-rocm6_1-build
|
||||
runs-on: linux.rocm.gpu
|
||||
timeout-minutes: 240
|
||||
env:
|
||||
PYTORCH_ROOT: /pytorch
|
||||
BUILDER_ROOT: /builder
|
||||
PACKAGE_TYPE: manywheel
|
||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: rocm6.1
|
||||
GPU_ARCH_VERSION: 6.1
|
||||
GPU_ARCH_TYPE: rocm
|
||||
SKIP_ALL_TESTS: 1
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
|
||||
DESIRED_PYTHON: "3.12"
|
||||
steps:
|
||||
- name: Setup ROCm
|
||||
uses: ./.github/actions/setup-rocm
|
||||
- uses: actions/download-artifact@v3
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: manywheel-py3_12-rocm6_1
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
- name: Clean PyTorch checkout
|
||||
run: |
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: pytorch
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
quiet-checkout: true
|
||||
- name: Clean pytorch/builder checkout
|
||||
run: |
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: builder
|
||||
- name: ROCm set GPU_FLAG
|
||||
run: |
|
||||
echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri --group-add video --group-add daemon" >> "${GITHUB_ENV}"
|
||||
- name: Pull Docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
with:
|
||||
docker-image: pytorch/manylinux-builder:rocm6.1-main
|
||||
- name: Test Pytorch binary
|
||||
uses: ./pytorch/.github/actions/test-pytorch-binary
|
||||
- name: Teardown ROCm
|
||||
uses: ./.github/actions/teardown-rocm
|
||||
manywheel-py3_12-rocm6_1-upload: # Uploading
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
needs: manywheel-py3_12-rocm6_1-test
|
||||
with:
|
||||
PYTORCH_ROOT: /pytorch
|
||||
BUILDER_ROOT: /builder
|
||||
PACKAGE_TYPE: manywheel
|
||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: rocm6.1
|
||||
GPU_ARCH_VERSION: 6.1
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: manywheel-py3_12-rocm6_1
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
|
||||
conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
|
||||
uses: ./.github/workflows/_binary-upload.yml
|
||||
|
||||
6
.github/workflows/inductor-cu124.yml
vendored
6
.github/workflows/inductor-cu124.yml
vendored
@ -28,8 +28,7 @@ jobs:
|
||||
cuda-arch-list: '8.6'
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "inductor", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_distributed", shard: 1, num_shards: 1, runner: "linux.g5.12xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_torchbench", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
@ -96,8 +95,7 @@ jobs:
|
||||
cuda-arch-list: '8.6'
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "inductor", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
]}
|
||||
|
||||
linux-focal-cuda12_4-py3_12-gcc9-inductor-test:
|
||||
|
||||
26
.github/workflows/inductor-periodic.yml
vendored
26
.github/workflows/inductor-periodic.yml
vendored
@ -56,29 +56,3 @@ jobs:
|
||||
test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-periodic-dynamo-benchmarks-build.outputs.test-matrix }}
|
||||
secrets:
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
||||
|
||||
linux-focal-cuda12_1-py3_10-gcc9-inductor-build-gcp:
|
||||
name: cuda12.1-py3.10-gcc9-sm80
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9-inductor-benchmarks
|
||||
cuda-arch-list: '8.0'
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "inductor_torchbench_smoketest_perf", shard: 1, num_shards: 1, runner: "linux.gcp.a100" },
|
||||
]}
|
||||
secrets:
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
||||
|
||||
linux-focal-cuda12_1-py3_10-gcc9-inductor-test-gcp:
|
||||
name: cuda12.1-py3.10-gcc9-sm80
|
||||
uses: ./.github/workflows/_linux-test.yml
|
||||
needs: linux-focal-cuda12_1-py3_10-gcc9-inductor-build-gcp
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80
|
||||
docker-image: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-inductor-build-gcp.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-inductor-build-gcp.outputs.test-matrix }}
|
||||
use-gha: anything-non-empty-to-use-gha
|
||||
secrets:
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
||||
|
||||
52
.github/workflows/inductor.yml
vendored
52
.github/workflows/inductor.yml
vendored
@ -48,8 +48,7 @@ jobs:
|
||||
cuda-arch-list: '8.6'
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "inductor", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_distributed", shard: 1, num_shards: 1, runner: "linux.g5.12xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_timm", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
@ -82,6 +81,32 @@ jobs:
|
||||
secrets:
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
||||
|
||||
linux-focal-cuda12_1-py3_10-gcc9-inductor-build-gcp:
|
||||
name: cuda12.1-py3.10-gcc9-sm80
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9-inductor-benchmarks
|
||||
cuda-arch-list: '8.0'
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "inductor_torchbench_smoketest_perf", shard: 1, num_shards: 1, runner: "linux.gcp.a100" },
|
||||
]}
|
||||
secrets:
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
||||
|
||||
linux-focal-cuda12_1-py3_10-gcc9-inductor-test-gcp:
|
||||
name: cuda12.1-py3.10-gcc9-sm80
|
||||
uses: ./.github/workflows/_linux-test.yml
|
||||
needs: linux-focal-cuda12_1-py3_10-gcc9-inductor-build-gcp
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80
|
||||
docker-image: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-inductor-build-gcp.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-inductor-build-gcp.outputs.test-matrix }}
|
||||
use-gha: anything-non-empty-to-use-gha
|
||||
secrets:
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
||||
|
||||
linux-focal-cuda12_1-py3_12-gcc9-inductor-build:
|
||||
name: cuda12.1-py3.12-gcc9-sm86
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
@ -91,8 +116,7 @@ jobs:
|
||||
cuda-arch-list: '8.6'
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "inductor", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
]}
|
||||
|
||||
linux-focal-cuda12_1-py3_12-gcc9-inductor-test:
|
||||
@ -104,26 +128,6 @@ jobs:
|
||||
docker-image: ${{ needs.linux-focal-cuda12_1-py3_12-gcc9-inductor-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-cuda12_1-py3_12-gcc9-inductor-build.outputs.test-matrix }}
|
||||
|
||||
linux-jammy-cpu-py3_12-inductor-halide-build:
|
||||
name: linux-jammy-cpu-py3.12-gcc11-inductor-halide
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
build-environment: linux-jammy-py3.12-gcc11
|
||||
docker-image-name: pytorch-linux-jammy-py3.12-halide
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "inductor-halide", shard: 1, num_shards: 1, runner: "linux.12xlarge" },
|
||||
]}
|
||||
|
||||
linux-jammy-cpu-py3_12-inductor-halide-test:
|
||||
name: linux-jammy-cpu-py3.12-gcc11-inductor-halide
|
||||
uses: ./.github/workflows/_linux-test.yml
|
||||
needs: linux-jammy-cpu-py3_12-inductor-halide-build
|
||||
with:
|
||||
build-environment: linux-jammy-py3.12-gcc11
|
||||
docker-image: ${{ needs.linux-jammy-cpu-py3_12-inductor-halide-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-jammy-cpu-py3_12-inductor-halide-build.outputs.test-matrix }}
|
||||
|
||||
linux-focal-cuda12_4-py3_10-gcc9-inductor-build:
|
||||
# Should be synced with the one in inductor-periodic.yml but this only runs inductor_timm
|
||||
name: cuda12.4-py3.10-gcc9-sm86
|
||||
|
||||
8
.github/workflows/lint.yml
vendored
8
.github/workflows/lint.yml
vendored
@ -19,10 +19,10 @@ jobs:
|
||||
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
|
||||
with:
|
||||
timeout: 120
|
||||
runner: lf.linux.2xlarge
|
||||
runner: linux.2xlarge
|
||||
docker-image: pytorch-linux-jammy-cuda11.8-cudnn9-py3.9-linter
|
||||
# NB: A shallow checkout won't work here because calculate-docker-image requires a full checkout
|
||||
# to run git rev-parse HEAD~:.ci/docker when a new image is needed.
|
||||
# to run git rev-parse HEAD~:.ci/docker when a new image is needed
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
@ -35,7 +35,7 @@ jobs:
|
||||
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
|
||||
with:
|
||||
timeout: 120
|
||||
runner: lf.linux.2xlarge
|
||||
runner: linux.2xlarge
|
||||
docker-image: pytorch-linux-jammy-cuda11.8-cudnn9-py3.9-linter
|
||||
# NB: A shallow checkout won't work here because calculate-docker-image requires a full checkout
|
||||
# to run git rev-parse HEAD~:.ci/docker when a new image is needed
|
||||
@ -49,7 +49,7 @@ jobs:
|
||||
quick-checks:
|
||||
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
|
||||
with:
|
||||
runner: lf.linux.2xlarge
|
||||
runner: linux.2xlarge
|
||||
docker-image: pytorch-linux-focal-linter
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
|
||||
1
.github/workflows/periodic.yml
vendored
1
.github/workflows/periodic.yml
vendored
@ -73,6 +73,7 @@ jobs:
|
||||
{ config: "default", shard: 3, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 4, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 5, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "deploy", shard: 1, num_shards: 1, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "nogpu_AVX512", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
|
||||
{ config: "nogpu_NO_AVX2", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
|
||||
{ config: "jit_legacy", shard: 1, num_shards: 1, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
|
||||
55
.github/workflows/pull.yml
vendored
55
.github/workflows/pull.yml
vendored
@ -270,6 +270,7 @@ jobs:
|
||||
{ config: "default", shard: 3, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 4, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 5, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "deploy", shard: 1, num_shards: 1, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
]}
|
||||
|
||||
linux-focal-cuda12_1-py3_10-gcc9-test:
|
||||
@ -487,57 +488,3 @@ jobs:
|
||||
build-environment: linux-jammy-py3-clang12-executorch
|
||||
docker-image: ${{ needs.linux-jammy-py3-clang12-executorch-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-jammy-py3-clang12-executorch-build.outputs.test-matrix }}
|
||||
|
||||
linux-focal-cuda12_1-py3_10-gcc9-experimental-split-build:
|
||||
name: linux-focal-cuda12.1-py3.10-gcc9-experimental-split-build
|
||||
uses: ./.github/workflows/_linux-build-label.yml
|
||||
with:
|
||||
use_split_build: true
|
||||
build-environment: linux-focal-cuda12.1-py3.10-gcc9
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "default", shard: 1, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 2, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 3, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 4, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 5, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
]}
|
||||
|
||||
linux-focal-cuda12_4-py3_10-gcc9-experimental-split-build-test:
|
||||
name: linux-focal-cuda12.1-py3.10-gcc9-experimental-split-build
|
||||
uses: ./.github/workflows/_linux-test.yml
|
||||
needs:
|
||||
- linux-focal-cuda12_1-py3_10-gcc9-experimental-split-build
|
||||
- target-determination
|
||||
with:
|
||||
timeout-minutes: 360
|
||||
build-environment: linux-focal-cuda12.1-py3.10-gcc9-experimental-split-build
|
||||
docker-image: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-experimental-split-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-experimental-split-build.outputs.test-matrix }}
|
||||
|
||||
linux-focal-py3_12-clang10-experimental-split-build:
|
||||
name: linux-focal-py3.12-clang10-experimental-split-build
|
||||
uses: ./.github/workflows/_linux-build-label.yml
|
||||
with:
|
||||
use_split_build: True
|
||||
build-environment: linux-focal-py3.12-clang10
|
||||
docker-image-name: pytorch-linux-focal-py3.12-clang10
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "default", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
|
||||
{ config: "default", shard: 2, num_shards: 3, runner: "linux.2xlarge" },
|
||||
{ config: "default", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
|
||||
{ config: "dynamo", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
|
||||
{ config: "dynamo", shard: 2, num_shards: 3, runner: "linux.2xlarge" },
|
||||
{ config: "dynamo", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
|
||||
]}
|
||||
linux-focal-py3_12-clang10-experimental-split-build-test:
|
||||
name: linux-focal-py3.12-clang10-experimental-split-build
|
||||
uses: ./.github/workflows/_linux-test.yml
|
||||
needs: linux-focal-py3_12-clang10-experimental-split-build
|
||||
with:
|
||||
build-environment: linux-focal-py3.12-clang10-experimental-split-build
|
||||
docker-image: ${{ needs.linux-focal-py3_12-clang10-experimental-split-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-py3_12-clang10-experimental-split-build.outputs.test-matrix }}
|
||||
timeout-minutes: 600
|
||||
|
||||
6
.github/workflows/slow.yml
vendored
6
.github/workflows/slow.yml
vendored
@ -97,8 +97,7 @@ jobs:
|
||||
docker-image-name: pytorch-linux-focal-py3.8-clang10
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "slow", shard: 1, num_shards: 2, runner: "linux.2xlarge" },
|
||||
{ config: "slow", shard: 2, num_shards: 2, runner: "linux.2xlarge" },
|
||||
{ config: "slow", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
|
||||
]}
|
||||
|
||||
linux-focal-py3_8-clang10-test:
|
||||
@ -120,8 +119,7 @@ jobs:
|
||||
docker-image-name: pytorch-linux-focal-rocm-n-py3
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "slow", shard: 1, num_shards: 2, runner: "linux.rocm.gpu" },
|
||||
{ config: "slow", shard: 2, num_shards: 2, runner: "linux.rocm.gpu" },
|
||||
{ config: "slow", shard: 1, num_shards: 1, runner: "linux.rocm.gpu" },
|
||||
]}
|
||||
|
||||
linux-focal-rocm6_1-py3_8-test:
|
||||
|
||||
186
.lintrunner.toml
186
.lintrunner.toml
@ -68,7 +68,6 @@ include_patterns = [
|
||||
'aten/src/ATen/native/cudnn/*.cpp',
|
||||
'c10/**/*.h',
|
||||
'c10/**/*.cpp',
|
||||
'distributed/c10d/*SymmetricMemory.*',
|
||||
'torch/csrc/**/*.h',
|
||||
'torch/csrc/**/*.hpp',
|
||||
'torch/csrc/**/*.cpp',
|
||||
@ -137,7 +136,7 @@ init_command = [
|
||||
'numpy==1.24.3 ; python_version == "3.8"',
|
||||
'numpy==1.26.0 ; python_version >= "3.9"',
|
||||
'expecttest==0.1.6',
|
||||
'mypy==1.10.0',
|
||||
'mypy==1.9.0',
|
||||
'sympy==1.11.1',
|
||||
'types-requests==2.27.25',
|
||||
'types-PyYAML==6.0.7',
|
||||
@ -217,6 +216,7 @@ exclude_patterns = [
|
||||
'c10/util/complex_math.h',
|
||||
'c10/util/complex_utils.h',
|
||||
'c10/util/flat_hash_map.h',
|
||||
'c10/util/Float8*.h',
|
||||
'c10/util/logging*.h',
|
||||
'c10/util/hash.h',
|
||||
'c10/util/strong_type.h',
|
||||
@ -224,6 +224,7 @@ exclude_patterns = [
|
||||
'c10/util/win32-headers.h',
|
||||
'c10/util/*inl.h',
|
||||
'c10/test/**/*.h',
|
||||
'aten/src/ATen/core/TensorImpl_test.cpp',
|
||||
'third_party/**/*',
|
||||
'torch/csrc/api/**',
|
||||
'torch/csrc/autograd/generated/**',
|
||||
@ -234,6 +235,7 @@ exclude_patterns = [
|
||||
'torch/csrc/jit/serialization/import_legacy.cpp',
|
||||
'torch/csrc/jit/serialization/export.cpp',
|
||||
'torch/csrc/lazy/**/*',
|
||||
'torch/csrc/mps/**/*',
|
||||
]
|
||||
init_command = [
|
||||
'python3',
|
||||
@ -997,6 +999,7 @@ command = [
|
||||
]
|
||||
exclude_patterns = [
|
||||
'tools/gen_vulkan_spv.py',
|
||||
'torch/__init__.py', # Skip this file to format because it's part of the public API
|
||||
# We don't care too much about files in this directory, don't enforce
|
||||
# formatting on them
|
||||
'caffe2/**/*.py',
|
||||
@ -1391,15 +1394,115 @@ exclude_patterns = [
|
||||
'torch/cuda/_memory_viz.py', # mypy: Value of type "object" is not indexable
|
||||
'torch/distributed/__init__.py',
|
||||
'torch/distributed/_composable_state.py',
|
||||
'torch/distributed/_shard/__init__.py',
|
||||
'torch/distributed/_shard/_utils.py',
|
||||
'torch/distributed/_shard/api.py',
|
||||
'torch/distributed/_shard/checkpoint/__init__.py',
|
||||
'torch/distributed/_shard/common_op_utils.py',
|
||||
'torch/distributed/_shard/metadata.py',
|
||||
'torch/distributed/_shard/op_registry_utils.py',
|
||||
'torch/distributed/_shard/sharded_optim/__init__.py',
|
||||
'torch/distributed/_shard/sharded_optim/api.py',
|
||||
'torch/distributed/_shard/sharded_tensor/__init__.py',
|
||||
'torch/distributed/_shard/sharded_tensor/_ops/__init__.py',
|
||||
'torch/distributed/_shard/sharded_tensor/_ops/_common.py',
|
||||
'torch/distributed/_shard/sharded_tensor/_ops/binary_cmp.py',
|
||||
'torch/distributed/_shard/sharded_tensor/_ops/init.py',
|
||||
'torch/distributed/_shard/sharded_tensor/_ops/misc_ops.py',
|
||||
'torch/distributed/_shard/sharded_tensor/_ops/tensor_ops.py',
|
||||
'torch/distributed/_shard/sharded_tensor/api.py',
|
||||
'torch/distributed/_shard/sharded_tensor/logger.py',
|
||||
'torch/distributed/_shard/sharded_tensor/logging_handlers.py',
|
||||
'torch/distributed/_shard/sharded_tensor/metadata.py',
|
||||
'torch/distributed/_shard/sharded_tensor/reshard.py',
|
||||
'torch/distributed/_shard/sharded_tensor/shard.py',
|
||||
'torch/distributed/_shard/sharded_tensor/utils.py',
|
||||
'torch/distributed/_shard/sharder.py',
|
||||
'torch/distributed/_shard/sharding_plan/__init__.py',
|
||||
'torch/distributed/_shard/sharding_plan/api.py',
|
||||
'torch/distributed/_shard/sharding_spec/__init__.py',
|
||||
'torch/distributed/_shard/sharding_spec/_internals.py',
|
||||
'torch/distributed/_shard/sharding_spec/api.py',
|
||||
'torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py',
|
||||
'torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__init__.py',
|
||||
'torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/_common.py',
|
||||
'torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding.py',
|
||||
'torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding_bag.py',
|
||||
'torch/distributed/_sharded_tensor/__init__.py',
|
||||
'torch/distributed/_sharding_spec/__init__.py',
|
||||
'torch/distributed/_tools/__init__.py',
|
||||
'torch/distributed/_tools/memory_tracker.py',
|
||||
'torch/distributed/algorithms/__init__.py',
|
||||
'torch/distributed/algorithms/_checkpoint/__init__.py',
|
||||
'torch/distributed/algorithms/_checkpoint/checkpoint_wrapper.py',
|
||||
'torch/distributed/algorithms/_comm_hooks/__init__.py',
|
||||
'torch/distributed/algorithms/_comm_hooks/default_hooks.py',
|
||||
'torch/distributed/algorithms/_optimizer_overlap/__init__.py',
|
||||
'torch/distributed/algorithms/_optimizer_overlap/optimizer_overlap.py',
|
||||
'torch/distributed/algorithms/_quantization/__init__.py',
|
||||
'torch/distributed/algorithms/_quantization/quantization.py',
|
||||
'torch/distributed/algorithms/ddp_comm_hooks/__init__.py',
|
||||
'torch/distributed/algorithms/ddp_comm_hooks/ddp_zero_hook.py',
|
||||
'torch/distributed/algorithms/ddp_comm_hooks/debugging_hooks.py',
|
||||
'torch/distributed/algorithms/ddp_comm_hooks/default_hooks.py',
|
||||
'torch/distributed/algorithms/ddp_comm_hooks/mixed_precision_hooks.py',
|
||||
'torch/distributed/algorithms/ddp_comm_hooks/optimizer_overlap_hooks.py',
|
||||
'torch/distributed/algorithms/ddp_comm_hooks/post_localSGD_hook.py',
|
||||
'torch/distributed/algorithms/ddp_comm_hooks/powerSGD_hook.py',
|
||||
'torch/distributed/algorithms/ddp_comm_hooks/quantization_hooks.py',
|
||||
'torch/distributed/algorithms/join.py',
|
||||
'torch/distributed/algorithms/model_averaging/__init__.py',
|
||||
'torch/distributed/algorithms/model_averaging/averagers.py',
|
||||
'torch/distributed/algorithms/model_averaging/hierarchical_model_averager.py',
|
||||
'torch/distributed/algorithms/model_averaging/utils.py',
|
||||
'torch/distributed/argparse_util.py',
|
||||
'torch/distributed/autograd/__init__.py',
|
||||
'torch/distributed/benchmarks/benchmark_ddp_rpc.py',
|
||||
'torch/distributed/c10d_logger.py',
|
||||
'torch/distributed/collective_utils.py',
|
||||
'torch/distributed/constants.py',
|
||||
'torch/distributed/distributed_c10d.py',
|
||||
'torch/distributed/elastic/__init__.py',
|
||||
'torch/distributed/elastic/agent/__init__.py',
|
||||
'torch/distributed/elastic/agent/server/__init__.py',
|
||||
'torch/distributed/elastic/agent/server/api.py',
|
||||
'torch/distributed/elastic/agent/server/local_elastic_agent.py',
|
||||
'torch/distributed/elastic/events/__init__.py',
|
||||
'torch/distributed/elastic/events/api.py',
|
||||
'torch/distributed/elastic/events/handlers.py',
|
||||
'torch/distributed/elastic/metrics/__init__.py',
|
||||
'torch/distributed/elastic/metrics/api.py',
|
||||
'torch/distributed/elastic/multiprocessing/__init__.py',
|
||||
'torch/distributed/elastic/multiprocessing/api.py',
|
||||
'torch/distributed/elastic/multiprocessing/errors/__init__.py',
|
||||
'torch/distributed/elastic/multiprocessing/errors/error_handler.py',
|
||||
'torch/distributed/elastic/multiprocessing/errors/handlers.py',
|
||||
'torch/distributed/elastic/multiprocessing/redirects.py',
|
||||
'torch/distributed/elastic/multiprocessing/tail_log.py',
|
||||
'torch/distributed/elastic/rendezvous/__init__.py',
|
||||
'torch/distributed/elastic/rendezvous/api.py',
|
||||
'torch/distributed/elastic/rendezvous/c10d_rendezvous_backend.py',
|
||||
'torch/distributed/elastic/rendezvous/dynamic_rendezvous.py',
|
||||
'torch/distributed/elastic/rendezvous/etcd_rendezvous.py',
|
||||
'torch/distributed/elastic/rendezvous/etcd_rendezvous_backend.py',
|
||||
'torch/distributed/elastic/rendezvous/etcd_server.py',
|
||||
'torch/distributed/elastic/rendezvous/etcd_store.py',
|
||||
'torch/distributed/elastic/rendezvous/registry.py',
|
||||
'torch/distributed/elastic/rendezvous/static_tcp_rendezvous.py',
|
||||
'torch/distributed/elastic/rendezvous/utils.py',
|
||||
'torch/distributed/elastic/timer/__init__.py',
|
||||
'torch/distributed/elastic/timer/api.py',
|
||||
'torch/distributed/elastic/timer/file_based_local_timer.py',
|
||||
'torch/distributed/elastic/timer/local_timer.py',
|
||||
'torch/distributed/elastic/utils/__init__.py',
|
||||
'torch/distributed/elastic/utils/api.py',
|
||||
'torch/distributed/elastic/utils/data/__init__.py',
|
||||
'torch/distributed/elastic/utils/data/cycling_iterator.py',
|
||||
'torch/distributed/elastic/utils/data/elastic_distributed_sampler.py',
|
||||
'torch/distributed/elastic/utils/distributed.py',
|
||||
'torch/distributed/elastic/utils/log_level.py',
|
||||
'torch/distributed/elastic/utils/logging.py',
|
||||
'torch/distributed/elastic/utils/store.py',
|
||||
'torch/distributed/examples/memory_tracker_example.py',
|
||||
'torch/distributed/launch.py',
|
||||
'torch/distributed/launcher/__init__.py',
|
||||
@ -1413,11 +1516,48 @@ exclude_patterns = [
|
||||
'torch/distributed/nn/jit/instantiator.py',
|
||||
'torch/distributed/nn/jit/templates/__init__.py',
|
||||
'torch/distributed/nn/jit/templates/remote_module_template.py',
|
||||
'torch/distributed/optim/__init__.py',
|
||||
'torch/distributed/optim/apply_optimizer_in_backward.py',
|
||||
'torch/distributed/optim/functional_adadelta.py',
|
||||
'torch/distributed/optim/functional_adagrad.py',
|
||||
'torch/distributed/optim/functional_adam.py',
|
||||
'torch/distributed/optim/functional_adamax.py',
|
||||
'torch/distributed/optim/functional_adamw.py',
|
||||
'torch/distributed/optim/functional_rmsprop.py',
|
||||
'torch/distributed/optim/functional_rprop.py',
|
||||
'torch/distributed/optim/functional_sgd.py',
|
||||
'torch/distributed/optim/named_optimizer.py',
|
||||
'torch/distributed/optim/optimizer.py',
|
||||
'torch/distributed/optim/post_localSGD_optimizer.py',
|
||||
'torch/distributed/optim/utils.py',
|
||||
'torch/distributed/optim/zero_redundancy_optimizer.py',
|
||||
'torch/distributed/remote_device.py',
|
||||
'torch/distributed/rendezvous.py',
|
||||
'torch/distributed/rpc/__init__.py',
|
||||
'torch/distributed/rpc/_testing/__init__.py',
|
||||
'torch/distributed/rpc/_testing/faulty_agent_backend_registry.py',
|
||||
'torch/distributed/rpc/_utils.py',
|
||||
'torch/distributed/rpc/api.py',
|
||||
'torch/distributed/rpc/backend_registry.py',
|
||||
'torch/distributed/rpc/constants.py',
|
||||
'torch/distributed/rpc/functions.py',
|
||||
'torch/distributed/rpc/internal.py',
|
||||
'torch/distributed/rpc/options.py',
|
||||
'torch/distributed/rpc/rref_proxy.py',
|
||||
'torch/distributed/rpc/server_process_global_profiler.py',
|
||||
'torch/distributed/run.py',
|
||||
'torch/distributed/tensor/__init__.py',
|
||||
'torch/distributed/tensor/parallel/__init__.py',
|
||||
'torch/distributed/tensor/parallel/_utils.py',
|
||||
'torch/distributed/tensor/parallel/_view_with_dim_change.py',
|
||||
'torch/distributed/tensor/parallel/api.py',
|
||||
'torch/distributed/tensor/parallel/fsdp.py',
|
||||
'torch/distributed/tensor/parallel/input_reshard.py',
|
||||
'torch/distributed/tensor/parallel/multihead_attention_tp.py',
|
||||
'torch/distributed/tensor/parallel/style.py',
|
||||
'torch/fft/__init__.py',
|
||||
'torch/func/__init__.py',
|
||||
'torch/functional.py',
|
||||
'torch/futures/__init__.py',
|
||||
'torch/fx/__init__.py',
|
||||
'torch/fx/_compatibility.py',
|
||||
@ -1503,11 +1643,15 @@ exclude_patterns = [
|
||||
'torch/fx/subgraph_rewriter.py',
|
||||
'torch/fx/tensor_type.py',
|
||||
'torch/fx/traceback.py',
|
||||
'torch/hub.py',
|
||||
'torch/library.py',
|
||||
'torch/linalg/__init__.py',
|
||||
'torch/monitor/__init__.py',
|
||||
'torch/nested/__init__.py',
|
||||
'torch/nn/__init__.py',
|
||||
'torch/nn/_reduction.py',
|
||||
'torch/nn/backends/__init__.py',
|
||||
'torch/nn/backends/thnn.py',
|
||||
'torch/nn/common_types.py',
|
||||
'torch/nn/cpp.py',
|
||||
'torch/nn/functional.py',
|
||||
@ -1555,6 +1699,13 @@ exclude_patterns = [
|
||||
'torch/nn/modules/transformer.py',
|
||||
'torch/nn/modules/upsampling.py',
|
||||
'torch/nn/modules/utils.py',
|
||||
'torch/nn/parallel/__init__.py',
|
||||
'torch/nn/parallel/_functions.py',
|
||||
'torch/nn/parallel/comm.py',
|
||||
'torch/nn/parallel/data_parallel.py',
|
||||
'torch/nn/parallel/parallel_apply.py',
|
||||
'torch/nn/parallel/replicate.py',
|
||||
'torch/nn/parallel/scatter_gather.py',
|
||||
'torch/nn/parameter.py',
|
||||
'torch/nn/qat/__init__.py',
|
||||
'torch/nn/qat/dynamic/__init__.py',
|
||||
@ -1593,6 +1744,35 @@ exclude_patterns = [
|
||||
'torch/nn/quantized/modules/normalization.py',
|
||||
'torch/nn/quantized/modules/rnn.py',
|
||||
'torch/nn/quantized/modules/utils.py',
|
||||
'torch/nn/utils/__init__.py',
|
||||
'torch/nn/utils/_deprecation_utils.py',
|
||||
'torch/nn/utils/_expanded_weights/__init__.py',
|
||||
'torch/nn/utils/_expanded_weights/conv_expanded_weights.py',
|
||||
'torch/nn/utils/_expanded_weights/conv_utils.py',
|
||||
'torch/nn/utils/_expanded_weights/embedding_expanded_weights.py',
|
||||
'torch/nn/utils/_expanded_weights/expanded_weights_impl.py',
|
||||
'torch/nn/utils/_expanded_weights/expanded_weights_utils.py',
|
||||
'torch/nn/utils/_expanded_weights/group_norm_expanded_weights.py',
|
||||
'torch/nn/utils/_expanded_weights/instance_norm_expanded_weights.py',
|
||||
'torch/nn/utils/_expanded_weights/layer_norm_expanded_weights.py',
|
||||
'torch/nn/utils/_expanded_weights/linear_expanded_weights.py',
|
||||
'torch/nn/utils/_per_sample_grad.py',
|
||||
'torch/nn/utils/clip_grad.py',
|
||||
'torch/nn/utils/convert_parameters.py',
|
||||
'torch/nn/utils/fusion.py',
|
||||
'torch/nn/utils/init.py',
|
||||
'torch/nn/utils/memory_format.py',
|
||||
'torch/nn/utils/parametrizations.py',
|
||||
'torch/nn/utils/parametrize.py',
|
||||
'torch/nn/utils/prune.py',
|
||||
'torch/nn/utils/rnn.py',
|
||||
'torch/nn/utils/spectral_norm.py',
|
||||
'torch/nn/utils/weight_norm.py',
|
||||
'torch/overrides.py',
|
||||
'torch/quasirandom.py',
|
||||
'torch/random.py',
|
||||
'torch/return_types.py',
|
||||
'torch/serialization.py',
|
||||
'torch/signal/__init__.py',
|
||||
'torch/signal/windows/__init__.py',
|
||||
'torch/signal/windows/windows.py',
|
||||
@ -1609,7 +1789,9 @@ exclude_patterns = [
|
||||
'torch/testing/_internal/codegen/__init__.py',
|
||||
'torch/testing/_internal/codegen/random_topo_test.py',
|
||||
'torch/testing/_internal/common_cuda.py',
|
||||
'torch/testing/_internal/common_device_type.py',
|
||||
'torch/testing/_internal/common_distributed.py',
|
||||
'torch/testing/_internal/common_dtype.py',
|
||||
'torch/testing/_internal/common_jit.py',
|
||||
'torch/testing/_internal/common_methods_invocations.py',
|
||||
'torch/testing/_internal/common_modules.py',
|
||||
|
||||
@ -461,6 +461,7 @@ filegroup(
|
||||
filegroup(
|
||||
name = "caffe2_perfkernels_srcs",
|
||||
srcs = [
|
||||
"caffe2/perfkernels/embedding_lookup.cc",
|
||||
"caffe2/perfkernels/embedding_lookup_idx.cc",
|
||||
],
|
||||
)
|
||||
@ -498,6 +499,7 @@ cc_library(
|
||||
hdrs = [
|
||||
"caffe2/core/common.h",
|
||||
"caffe2/perfkernels/common.h",
|
||||
"caffe2/perfkernels/embedding_lookup.h",
|
||||
"caffe2/perfkernels/embedding_lookup_idx.h",
|
||||
"caffe2/utils/fixed_divisor.h",
|
||||
] + glob([
|
||||
@ -744,7 +746,6 @@ cc_library(
|
||||
"torch/csrc/cuda/python_nccl.cpp",
|
||||
"torch/csrc/cuda/nccl.cpp",
|
||||
"torch/csrc/distributed/c10d/intra_node_comm.cu",
|
||||
"torch/csrc/distributed/c10d/CUDASymmetricMemory.cu",
|
||||
"torch/csrc/distributed/c10d/Utils.cu",
|
||||
"torch/csrc/distributed/c10d/quantization/quantization_gpu.cu",
|
||||
],
|
||||
|
||||
22
CODEOWNERS
22
CODEOWNERS
@ -43,12 +43,12 @@ nn/qat/ @jerryzh168
|
||||
/torch/csrc/distributed/rpc/tensorpipe_agent.h @jiayisuse @osalpekar @lw
|
||||
|
||||
# ONNX Export
|
||||
/torch/_dynamo/backends/onnxrt.py @wschin @xadupre
|
||||
/torch/csrc/jit/passes/onnx.h @titaiwangms @shubhambhokare1 @xadupre
|
||||
/torch/csrc/jit/passes/onnx.cpp @titaiwangms @shubhambhokare1 @xadupre
|
||||
/torch/csrc/jit/passes/onnx/ @titaiwangms @shubhambhokare1 @xadupre
|
||||
/torch/onnx/ @titaiwangms @shubhambhokare1 @justinchuby @wschin @xadupre
|
||||
/test/onnx/ @titaiwangms @shubhambhokare1 @justinchuby @wschin @xadupre
|
||||
/torch/_dynamo/backends/onnxrt.py @bowenbao @thiagocrepaldi @wschin
|
||||
/torch/csrc/jit/passes/onnx.h @bowenbao @thiagocrepaldi
|
||||
/torch/csrc/jit/passes/onnx.cpp @bowenbao @thiagocrepaldi
|
||||
/torch/csrc/jit/passes/onnx/ @bowenbao @thiagocrepaldi
|
||||
/torch/onnx/ @bowenbao @thiagocrepaldi @wschin
|
||||
/test/onnx/ @bowenbao @thiagocrepaldi @wschin
|
||||
|
||||
# CI
|
||||
/.ci @pytorch/pytorch-dev-infra
|
||||
@ -57,7 +57,6 @@ nn/qat/ @jerryzh168
|
||||
/.ci/docker/ @jeffdaily
|
||||
/.ci/docker/ci_commit_pins/triton.txt @desertfire @Chillee @eellison @shunting314 @bertmaher @jeffdaily @jataylo @jithunnair-amd @pruthvistony
|
||||
/.ci/docker/ci_commit_pins/triton-rocm.txt @jeffdaily @jataylo @jithunnair-amd @pruthvistony
|
||||
/.ci/docker/ci_commit_pins/triton-xpu.txt @EikanWang @gujinghui
|
||||
|
||||
# Github Actions
|
||||
# This list is for people wanting to be notified every time there's a change
|
||||
@ -133,15 +132,6 @@ caffe2/operators/hip @jeffdaily @jithunnair-amd
|
||||
caffe2/operators/rnn/hip @jeffdaily @jithunnair-amd
|
||||
caffe2/utils/hip @jeffdaily @jithunnair-amd
|
||||
|
||||
# XPU-specific files
|
||||
/aten/src/ATen/xpu/ @EikanWang @gujinghui
|
||||
/c10/xpu/ @EikanWang @gujinghui
|
||||
/torch/csrc/xpu/ @EikanWang @gujinghui
|
||||
/torch/xpu/ @EikanWang @gujinghui
|
||||
/test/xpu/ @EikanWang @gujinghui
|
||||
/test/test_xpu.py @EikanWang @gujinghui
|
||||
/third_party/xpu.txt @EikanWang @gujinghui
|
||||
|
||||
# torch.export
|
||||
/torch/export/ @avikchaudhuri @gmagogsfm @tugsbayasgalan @zhxchen17
|
||||
/torch/_export/ @avikchaudhuri @gmagogsfm @tugsbayasgalan @zhxchen17
|
||||
|
||||
@ -77,11 +77,6 @@ RUN case ${TARGETPLATFORM} in \
|
||||
esac && \
|
||||
/opt/conda/bin/conda clean -ya
|
||||
RUN /opt/conda/bin/pip install torchelastic
|
||||
RUN IS_CUDA=$(python -c 'import torch ; print(torch.cuda._is_compiled())'); \
|
||||
echo "Is torch compiled with cuda: ${IS_CUDA}"; \
|
||||
if test "${IS_CUDA}" != "True" -a ! -z "${CUDA_VERSION}"; then \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
FROM ${BASE_IMAGE} as official
|
||||
ARG PYTORCH_VERSION
|
||||
|
||||
@ -290,7 +290,7 @@ After the final RC is created. The following tasks should be performed :
|
||||
|
||||
* Create validation issue for the release, see for example [Validations for 2.1.2 release](https://github.com/pytorch/pytorch/issues/114904) and perform required validations.
|
||||
|
||||
* Run performance tests in [benchmark repository](https://github.com/pytorch/benchmark). Make sure there are no performance regressions.
|
||||
* Run performance tests in [benchmark repository](https://github.com/pytorch/benchmark). Make sure there are no prerformance regressions.
|
||||
|
||||
* Prepare and stage PyPI binaries for promotion. This is done with this script:
|
||||
[`pytorch/builder:release/pypi/promote_pypi_to_staging.sh`](https://github.com/pytorch/builder/blob/main/release/pypi/promote_pypi_to_staging.sh)
|
||||
@ -429,12 +429,12 @@ need to support these particular versions of software.
|
||||
|
||||
## Operating Systems
|
||||
Supported OS flavors are summarized in the table below:
|
||||
| Operating System family | Architecture | Notes |
|
||||
| Operating System family | Architectrue | Notes |
|
||||
| --- | --- | --- |
|
||||
| Linux | aarch64, x86_64 | Wheels are manylinux2014 compatible, i.e. they should be runnable on any Linux system with glibc-2.17 or above. |
|
||||
| MacOS | arm64 | Builds should be compatible with MacOS 11 (Big Sur) or newer, but are actively tested against MacOS 14 (Sonoma). |
|
||||
| MacOS | x86_64 | Requires MacOS Catalina or above, not supported after 2.2, see https://github.com/pytorch/pytorch/issues/114602 |
|
||||
| Windows | x86_64 | Builds are compatible with Windows-10 or newer. |
|
||||
| Windows | x86_64 | Buils are compatible with Windows-10 or newer. |
|
||||
|
||||
# Submitting Tutorials
|
||||
|
||||
|
||||
26
SECURITY.md
26
SECURITY.md
@ -6,7 +6,7 @@
|
||||
- [Untrusted inputs](#untrusted-inputs)
|
||||
- [Data privacy](#data-privacy)
|
||||
- [Using distributed features](#using-distributed-features)
|
||||
- [**CI/CD security principles**](#cicd-security-principles)
|
||||
|
||||
## Reporting Security Issues
|
||||
|
||||
Beware that none of the topics under [Using Pytorch Securely](#using-pytorch-securely) are considered vulnerabilities of Pytorch.
|
||||
@ -61,27 +61,3 @@ If applicable, prepare your model against bad inputs and prompt injections. Some
|
||||
PyTorch can be used for distributed computing, and as such there is a `torch.distributed` package. PyTorch Distributed features are intended for internal communication only. They are not built for use in untrusted environments or networks.
|
||||
|
||||
For performance reasons, none of the PyTorch Distributed primitives (including c10d, RPC, and TCPStore) include any authorization protocol and will send messages unencrypted. They accept connections from anywhere, and execute the workload sent without performing any checks. Therefore, if you run a PyTorch Distributed program on your network, anybody with access to the network can execute arbitrary code with the privileges of the user running PyTorch.
|
||||
|
||||
## CI/CD security principles
|
||||
_Audience_: Contributors and reviewers, especially if modifying the workflow files/build system.
|
||||
|
||||
PyTorch CI/CD security philosophy is based on finding a balance between open and transparent CI pipelines while keeping the environment efficient and safe.
|
||||
|
||||
PyTorch testing requirements are complex, and a large part of the code base can only be tested on specialized powerful hardware, such as GPU, making it a lucrative target for resource misuse. To prevent this, we require workflow run approval for PRs from non-member contributors. To keep the volume of those approvals relatively low, we easily extend write permissions to the repository to regular contributors.
|
||||
|
||||
More widespread write access to the repo presents challenges when it comes to reviewing changes, merging code into trunk, and creating releases. [Protected branches](https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/managing-protected-branches/about-protected-branches) are used to restrict the ability to merge to the trunk/release branches only to the repository administrators and merge bot. The merge bot is responsible for mechanistically merging the change and validating reviews against the path-based rules defined in [merge_rules.yml](https://github.com/pytorch/pytorch/blob/main/.github/merge_rules.yaml). Once a PR has been reviewed by person(s) mentioned in these rules, leaving a `@pytorchbot merge` comment on the PR will initiate the merge process. To protect merge bot credentials from leaking, merge actions must be executed only on ephemeral runners (see definition below) using a specialized deployment environment.
|
||||
|
||||
To speed up the CI system, build steps of the workflow rely on the distributed caching mechanism backed by [sccache](https://github.com/mozilla/sccache), making them susceptible to cache corruption compromises. For that reason binary artifacts generated during CI should not be executed in an environment that contains an access to any sensitive/non-public information and should not be published for use by general audience. One should not have any expectation about the lifetime of those artifacts, although in practice they likely remain accessible for about two weeks after the PR has been closed.
|
||||
|
||||
To speed up CI system setup, PyTorch relies heavily on Docker to pre-build and pre-install the dependencies. To prevent a potentially malicious PR from altering ones that were published in the past, ECR has been configured to use immutable tags.
|
||||
|
||||
To improve runner availability and more efficient resource utilization, some of the CI runners are non-ephemeral, i.e., workflow steps from completely unrelated PRs could be scheduled sequentially on the same runner, making them susceptible to reverse shell attacks. For that reason, PyTorch does not rely on the repository secrets mechanism, as these can easily be compromised in such attacks.
|
||||
|
||||
### Release pipelines security
|
||||
|
||||
To ensure safe binary releases, PyTorch release pipelines are built on the following principles:
|
||||
- All binary builds/upload jobs must be run on ephemeral runners, i.e., on a machine that is allocated from the cloud to do the build and released back to the cloud after the build is finished. This protects those builds from interference from external actors, who potentially can get reverse shell access to a non-ephemeral runner and wait there for a binary build.
|
||||
- All binary builds are cold-start builds, i.e., distributed caching/incremental builds are not permitted. This renders builds much slower than incremental CI builds but isolates them from potential compromises of the intermediate artifacts caching systems.
|
||||
- All upload jobs are executed in a [deployment environments](https://docs.github.com/en/actions/deployment/targeting-different-environments/using-environments-for-deployment) that are restricted to protected branches
|
||||
- Security credentials needed to upload binaries to PyPI/conda or stable indexes `download.pytorch.org/whl` are never uploaded to repo secrets storage/environment. This requires an extra manual step to publish the release but ensures that access to those would not be compromised by deliberate/accidental leaks of secrets stored in the cloud.
|
||||
- No binary artifacts should be published to GitHub releases pages, as these are overwritable by anyone with write permission to the repo.
|
||||
|
||||
@ -53,6 +53,11 @@ if(NOT BUILD_LITE_INTERPRETER)
|
||||
file(GLOB_RECURSE ATen_CORE_TEST_SRCS "core/*_test.cpp")
|
||||
endif()
|
||||
EXCLUDE(ATen_CORE_SRCS "${ATen_CORE_SRCS}" ${ATen_CORE_TEST_SRCS})
|
||||
# Exclude TensorImpl_test.cpp if compiling without Caffe2
|
||||
if(NOT BUILD_LITE_INTERPRETER)
|
||||
file(GLOB_RECURSE ATen_CORE_EXCLUDED_TEST_SRCS "core/TensorImpl_test.cpp")
|
||||
EXCLUDE(ATen_CORE_TEST_SRCS "${ATen_CORE_TEST_SRCS}" ${ATen_CORE_EXCLUDED_TEST_SRCS})
|
||||
endif()
|
||||
|
||||
file(GLOB base_h "*.h" "detail/*.h" "cpu/*.h" "cpu/vec/vec512/*.h" "cpu/vec/vec256/*.h" "cpu/vec/vec256/vsx/*.h" "cpu/vec/vec256/zarch/*.h" "cpu/vec/*.h" "quantized/*.h" "functorch/*.h")
|
||||
file(GLOB base_cpp "*.cpp" "detail/*.cpp" "cpu/*.cpp" "functorch/*.cpp")
|
||||
@ -468,7 +473,6 @@ endif()
|
||||
|
||||
if(USE_CUDA AND NOT USE_ROCM)
|
||||
list(APPEND ATen_CUDA_INCLUDE ${CMAKE_CURRENT_SOURCE_DIR}/../../../third_party/cutlass/include)
|
||||
list(APPEND ATen_CUDA_INCLUDE ${CMAKE_CURRENT_SOURCE_DIR}/../../../third_party/cutlass/tools/util/include)
|
||||
if($ENV{ATEN_STATIC_CUDA})
|
||||
list(APPEND ATen_CUDA_DEPENDENCY_LIBS
|
||||
${CUDA_LIBRARIES}
|
||||
|
||||
@ -56,14 +56,6 @@ void Context::setDeterministicCuDNN(bool b) {
|
||||
deterministic_cudnn = b;
|
||||
}
|
||||
|
||||
bool Context::deterministicMkldnn() const {
|
||||
return deterministic_mkldnn;
|
||||
}
|
||||
|
||||
void Context::setDeterministicMkldnn(bool b) {
|
||||
deterministic_mkldnn = b;
|
||||
}
|
||||
|
||||
bool Context::deterministicAlgorithms() const {
|
||||
return _deterministic_algorithms;
|
||||
}
|
||||
|
||||
@ -188,8 +188,6 @@ class TORCH_API Context {
|
||||
void setBenchmarkLimitCuDNN(int);
|
||||
bool deterministicCuDNN() const;
|
||||
void setDeterministicCuDNN(bool);
|
||||
bool deterministicMkldnn() const;
|
||||
void setDeterministicMkldnn(bool);
|
||||
bool userEnabledNNPACK() const;
|
||||
void setUserEnabledNNPACK(bool e);
|
||||
|
||||
@ -360,14 +358,13 @@ class TORCH_API Context {
|
||||
c10::once_flag thp_init;
|
||||
bool enabled_cudnn = true;
|
||||
bool deterministic_cudnn = false;
|
||||
bool deterministic_mkldnn = false;
|
||||
bool _deterministic_algorithms = false;
|
||||
bool _deterministic_algorithms_warn_only = false;
|
||||
bool _deterministic_fill_uninitialized_memory = true;
|
||||
bool enabled_flashSDP = true;
|
||||
bool enabled_mem_efficientSDP = true;
|
||||
bool enabled_mathSDP = true;
|
||||
bool enabled_cudnnSDP = false;
|
||||
bool enabled_cudnnSDP = true;
|
||||
#ifdef USE_ROCM
|
||||
bool benchmark_cudnn = true;
|
||||
#else
|
||||
|
||||
@ -303,7 +303,7 @@ Tensor FunctionalInverses::_nested_view_from_buffer_inverse(const Tensor& base,
|
||||
return Tensor();
|
||||
}
|
||||
|
||||
Tensor FunctionalInverses::_nested_view_from_jagged_inverse(const Tensor& base, const Tensor& mutated_view, InverseReturnMode inverse_return_mode, const Tensor& offsets, const Tensor& dummy, const std::optional<Tensor>& lengths, int64_t ragged_idx, const c10::optional<Tensor>& min_seqlen, const c10::optional<Tensor>& max_seqlen) {
|
||||
Tensor FunctionalInverses::_nested_view_from_jagged_inverse(const Tensor& base, const Tensor& mutated_view, InverseReturnMode inverse_return_mode, const Tensor& offsets, const Tensor& dummy, const std::optional<Tensor>& lengths, int64_t ragged_idx) {
|
||||
auto values = at::_nested_get_values(mutated_view);
|
||||
if (inverse_return_mode != InverseReturnMode::NeverView) {
|
||||
return values;
|
||||
@ -317,12 +317,7 @@ Tensor FunctionalInverses::_nested_get_values_inverse(const Tensor& base, const
|
||||
auto lengths = at::_nested_get_lengths(base);
|
||||
auto ragged_idx = at::_nested_get_ragged_idx(base);
|
||||
auto dummy = at::_nested_get_jagged_dummy(base);
|
||||
auto min_seqlen = at::_nested_get_min_seqlen(base);
|
||||
auto max_seqlen = at::_nested_get_max_seqlen(base);
|
||||
auto nt = at::_nested_view_from_jagged(
|
||||
mutated_view, offsets, dummy, lengths, ragged_idx,
|
||||
(min_seqlen.defined() ? c10::optional<Tensor>(min_seqlen) : c10::nullopt),
|
||||
(max_seqlen.defined() ? c10::optional<Tensor>(max_seqlen) : c10::nullopt));
|
||||
auto nt = at::_nested_view_from_jagged(mutated_view, offsets, dummy, lengths, ragged_idx);
|
||||
|
||||
if (inverse_return_mode != InverseReturnMode::NeverView) {
|
||||
return nt;
|
||||
|
||||
@ -55,10 +55,6 @@ class TORCH_API MapAllocator {
|
||||
return base_ptr_;
|
||||
}
|
||||
|
||||
int flags() const {
|
||||
return flags_;
|
||||
}
|
||||
|
||||
static MapAllocator* fromDataPtr(const at::DataPtr&);
|
||||
static at::DataPtr makeDataPtr(
|
||||
c10::string_view filename,
|
||||
|
||||
@ -35,12 +35,6 @@ void SavedTensorDefaultHooks::enable() {
|
||||
tls.disabled_error_message = c10::nullopt;
|
||||
}
|
||||
|
||||
/* static */ bool SavedTensorDefaultHooks::set_tracing(bool is_tracing) {
|
||||
bool prior = tls.is_tracing;
|
||||
tls.is_tracing = is_tracing;
|
||||
return prior;
|
||||
}
|
||||
|
||||
const std::optional<std::string>& SavedTensorDefaultHooks::get_disabled_error_message() {
|
||||
return tls.disabled_error_message;
|
||||
}
|
||||
@ -65,20 +59,25 @@ void SavedTensorDefaultHooks::push_hooks(PyObject* pack_hook, PyObject* unpack_h
|
||||
tls.stack.emplace(pack_hook, unpack_hook);
|
||||
}
|
||||
|
||||
std::pair<PyObject*, PyObject*> SavedTensorDefaultHooks::pop_hooks() {
|
||||
void SavedTensorDefaultHooks::pop_hooks() {
|
||||
// Reference counting is handled by the caller of `pop_hooks`
|
||||
TORCH_INTERNAL_ASSERT(is_initialized && !tls.stack.empty());
|
||||
std::pair<PyObject*, PyObject*> hooks = tls.stack.top();
|
||||
tls.stack.pop();
|
||||
return hooks;
|
||||
}
|
||||
|
||||
std::pair<PyObject*, PyObject*> SavedTensorDefaultHooks::get_hooks() {
|
||||
// For tls.is_tracing, see NOTE: [Deferring tensor pack/unpack hooks until runtime]
|
||||
if (!is_initialized || tls.stack.empty() || tls.is_tracing) {
|
||||
if (!is_initialized || tls.stack.empty()) {
|
||||
return std::make_pair(nullptr, nullptr);
|
||||
}
|
||||
return tls.stack.top();
|
||||
}
|
||||
|
||||
std::stack<std::pair<PyObject*, PyObject*>> SavedTensorDefaultHooks::get_stack() {
|
||||
return tls.stack;
|
||||
}
|
||||
|
||||
void SavedTensorDefaultHooks::set_stack(std::stack<std::pair<PyObject*, PyObject*>> stack_) {
|
||||
tls.stack = std::move(stack_);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@ -22,18 +22,17 @@ struct TORCH_API SavedTensorDefaultHooksTLS {
|
||||
// We did this for efficiency (so we didn't have to keep a separate bool
|
||||
// around)
|
||||
std::optional<std::string> disabled_error_message;
|
||||
|
||||
// See NOTE: [Deferring tensor pack/unpack hooks until runtime]
|
||||
bool is_tracing = false;
|
||||
};
|
||||
|
||||
} // namespace impl
|
||||
|
||||
struct TORCH_API SavedTensorDefaultHooks {
|
||||
static void push_hooks(PyObject* pack_hook, PyObject* unpack_hook);
|
||||
static std::pair<PyObject*, PyObject*> pop_hooks();
|
||||
static void pop_hooks();
|
||||
static std::pair<PyObject*, PyObject*> get_hooks();
|
||||
static void lazy_initialize();
|
||||
static std::stack<std::pair<PyObject*, PyObject*>> get_stack();
|
||||
static void set_stack(std::stack<std::pair<PyObject*, PyObject*>>);
|
||||
|
||||
static const impl::SavedTensorDefaultHooksTLS& get_tls_state();
|
||||
static void set_tls_state(const impl::SavedTensorDefaultHooksTLS& tls);
|
||||
@ -43,20 +42,11 @@ struct TORCH_API SavedTensorDefaultHooks {
|
||||
// hooks, especially if their feature does not work with it. If they are
|
||||
// disabled, then the following will raise an error:
|
||||
// - Attempting to push_hooks
|
||||
// - calling disable(message) with a non-zero stack (hooks) size
|
||||
// - calling disable(message) with a non-zero stack (from get_stack) size
|
||||
static void disable(const std::string& error_message);
|
||||
static void enable();
|
||||
static bool is_enabled();
|
||||
static const std::optional<std::string>& get_disabled_error_message();
|
||||
|
||||
// NOTE: [Deferring tensor pack/unpack hooks until runtime]
|
||||
// To preserve eager semantics of pack/unpack hooks firing only once per saved
|
||||
// variable, Dynamo/AOTAutograd need to defer hook firing until runtime. Using
|
||||
// disable() would loud error at trace time, and pushing a no-op hook would
|
||||
// fail when the traced code is wrapped in a disable_saved_tensors_hooks ctx.
|
||||
// To do so, we disable these hooks during tracing. See
|
||||
// https://github.com/pytorch/pytorch/issues/113263.
|
||||
static bool set_tracing(bool is_tracing);
|
||||
};
|
||||
|
||||
} // namespace at
|
||||
|
||||
@ -478,6 +478,8 @@ namespace impl {
|
||||
// (maybe except for some internal prim ops).
|
||||
using GenericList = List<IValue>;
|
||||
|
||||
const IValue* ptr_to_first_element(const GenericList& list);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -350,4 +350,11 @@ void List<T>::unsafeSetElementType(TypePtr t) {
|
||||
impl_->elementType = std::move(t);
|
||||
}
|
||||
|
||||
namespace impl {
|
||||
|
||||
inline const IValue* ptr_to_first_element(const GenericList& list) {
|
||||
return &list.impl_->list[0];
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
7
aten/src/ATen/core/TensorImpl_test.cpp
Normal file
7
aten/src/ATen/core/TensorImpl_test.cpp
Normal file
@ -0,0 +1,7 @@
|
||||
#include <caffe2/core/tensor.h>
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
TEST(TensorImplTest, Caffe2Constructor) {
|
||||
caffe2::Tensor tensor(caffe2::CPU);
|
||||
ASSERT_EQ(tensor.strides()[0], 1);
|
||||
}
|
||||
@ -2,10 +2,6 @@
|
||||
#if !defined(__s390x__ ) && !defined(__powerpc__)
|
||||
#include <cpuinfo.h>
|
||||
#endif
|
||||
#if defined(__linux__)
|
||||
#include <sys/syscall.h>
|
||||
#include <unistd.h>
|
||||
#endif
|
||||
|
||||
namespace at::cpu {
|
||||
bool is_cpu_support_avx2() {
|
||||
@ -24,7 +20,7 @@ bool is_cpu_support_avx512() {
|
||||
#endif
|
||||
}
|
||||
|
||||
bool is_cpu_support_avx512_vnni() {
|
||||
bool is_cpu_support_vnni() {
|
||||
#if !defined(__s390x__) && !defined(__powerpc__)
|
||||
return cpuinfo_initialize() && cpuinfo_has_x86_avx512vnni();
|
||||
#else
|
||||
@ -32,47 +28,4 @@ bool is_cpu_support_avx512_vnni() {
|
||||
#endif
|
||||
}
|
||||
|
||||
bool is_cpu_support_amx_tile() {
|
||||
#if !defined(__s390x__) && !defined(__powerpc__)
|
||||
return cpuinfo_initialize() && cpuinfo_has_x86_amx_tile();
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool init_amx() {
|
||||
if (!is_cpu_support_amx_tile()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
#if defined(__linux__) && !defined(__ANDROID__)
|
||||
#define XFEATURE_XTILECFG 17
|
||||
#define XFEATURE_XTILEDATA 18
|
||||
#define XFEATURE_MASK_XTILECFG (1 << XFEATURE_XTILECFG)
|
||||
#define XFEATURE_MASK_XTILEDATA (1 << XFEATURE_XTILEDATA)
|
||||
#define XFEATURE_MASK_XTILE (XFEATURE_MASK_XTILECFG | XFEATURE_MASK_XTILEDATA)
|
||||
|
||||
#define ARCH_GET_XCOMP_PERM 0x1022
|
||||
#define ARCH_REQ_XCOMP_PERM 0x1023
|
||||
|
||||
unsigned long bitmask = 0;
|
||||
// Request permission to use AMX instructions
|
||||
long rc = syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_PERM, XFEATURE_XTILEDATA);
|
||||
if (rc) {
|
||||
return false;
|
||||
}
|
||||
// Check if the system supports AMX instructions
|
||||
rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_PERM, &bitmask);
|
||||
if (rc) {
|
||||
return false;
|
||||
}
|
||||
if (bitmask & XFEATURE_MASK_XTILE) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace at::cpu
|
||||
|
||||
@ -8,12 +8,6 @@ TORCH_API bool is_cpu_support_avx2();
|
||||
TORCH_API bool is_cpu_support_avx512();
|
||||
|
||||
// Detect if CPU support Vector Neural Network Instruction.
|
||||
TORCH_API bool is_cpu_support_avx512_vnni();
|
||||
|
||||
// Detect if CPU support Advanced Matrix Extension.
|
||||
TORCH_API bool is_cpu_support_amx_tile();
|
||||
|
||||
// Enable the system to use AMX instructions.
|
||||
TORCH_API bool init_amx();
|
||||
TORCH_API bool is_cpu_support_vnni();
|
||||
|
||||
} // namespace at::cpu
|
||||
|
||||
@ -794,16 +794,12 @@ Vectorized<BFloat16> inline clamp_min(const Vectorized<BFloat16>& a, const Vecto
|
||||
template <>
|
||||
inline void convert(const BFloat16* src, BFloat16* dst, int64_t n) {
|
||||
int64_t i;
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll
|
||||
#endif
|
||||
for (i = 0; i <= (n - Vectorized<BFloat16>::size()); i += Vectorized<BFloat16>::size()) {
|
||||
auto vsrc = _mm256_loadu_si256(reinterpret_cast<__m256i*>((void*)(src + i)));
|
||||
_mm256_storeu_si256(reinterpret_cast<__m256i*>((void*)(dst + i)), vsrc);
|
||||
}
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll
|
||||
#endif
|
||||
for (; i < n; i++) {
|
||||
dst[i] = src[i];
|
||||
}
|
||||
@ -996,16 +992,12 @@ Vectorized<Half> inline clamp_min(const Vectorized<Half>& a, const Vectorized<Ha
|
||||
template <>
|
||||
inline void convert(const Half* src, Half* dst, int64_t n) {
|
||||
int64_t i;
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll
|
||||
#endif
|
||||
for (i = 0; i <= (n - Vectorized<Half>::size()); i += Vectorized<Half>::size()) {
|
||||
auto vsrc = _mm256_loadu_si256(reinterpret_cast<__m256i*>((void*)(src + i)));
|
||||
_mm256_storeu_si256(reinterpret_cast<__m256i*>((void*)(dst + i)), vsrc);
|
||||
}
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll
|
||||
#endif
|
||||
for (; i < n; i++) {
|
||||
dst[i] = src[i];
|
||||
}
|
||||
|
||||
@ -416,15 +416,11 @@ inline Vectorized<double> Vectorized<double>::le(const Vectorized<double>& other
|
||||
template <>
|
||||
inline void convert(const double* src, double* dst, int64_t n) {
|
||||
int64_t i;
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll
|
||||
#endif
|
||||
for (i = 0; i <= (n - Vectorized<double>::size()); i += Vectorized<double>::size()) {
|
||||
_mm256_storeu_pd(dst + i, _mm256_loadu_pd(src + i));
|
||||
}
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll
|
||||
#endif
|
||||
for (; i < n; i++) {
|
||||
dst[i] = src[i];
|
||||
}
|
||||
|
||||
@ -512,15 +512,11 @@ inline Vectorized<float> Vectorized<float>::le(const Vectorized<float>& other) c
|
||||
template <>
|
||||
inline void convert(const float* src, float* dst, int64_t n) {
|
||||
int64_t i;
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll
|
||||
#endif
|
||||
for (i = 0; i <= (n - Vectorized<float>::size()); i += Vectorized<float>::size()) {
|
||||
_mm256_storeu_ps(dst + i, _mm256_loadu_ps(src + i));
|
||||
}
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll
|
||||
#endif
|
||||
for (; i < n; i++) {
|
||||
dst[i] = src[i];
|
||||
}
|
||||
|
||||
@ -823,16 +823,12 @@ inline Vectorized<float> Vectorized<float>::le(const Vectorized<float>& other) c
|
||||
template <>
|
||||
inline void convert(const float* src, int32_t* dst, int64_t n) {
|
||||
int64_t i;
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll
|
||||
#endif
|
||||
for (i = 0; i <= (n - Vectorized<float>::size()); i += Vectorized<float>::size()) {
|
||||
vst1q_s32(dst + i, vcvtq_s32_f32(vld1q_f32(src + i)));
|
||||
vst1q_s32(dst + i + 4, vcvtq_s32_f32(vld1q_f32(src + i + 4)));
|
||||
}
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll
|
||||
#endif
|
||||
for (; i < n; i++) {
|
||||
dst[i] = static_cast<int32_t>(src[i]);
|
||||
}
|
||||
@ -841,16 +837,12 @@ inline void convert(const float* src, int32_t* dst, int64_t n) {
|
||||
template <>
|
||||
inline void convert(const int32_t* src, float* dst, int64_t n) {
|
||||
int64_t i;
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll
|
||||
#endif
|
||||
for (i = 0; i <= (n - Vectorized<float>::size()); i += Vectorized<float>::size()) {
|
||||
vst1q_f32(dst + i, vcvtq_f32_s32(vld1q_s32(src + i)));
|
||||
vst1q_f32(dst + i + 4, vcvtq_f32_s32(vld1q_s32(src + i + 4)));
|
||||
}
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll
|
||||
#endif
|
||||
for (; i < n; i++) {
|
||||
dst[i] = static_cast<float>(src[i]);
|
||||
}
|
||||
|
||||
@ -765,17 +765,13 @@ inline Vectorized<c10::Half> Vectorized<c10::Half>::le(
|
||||
template <>
|
||||
inline void convert(const float16_t* src, int16_t* dst, int64_t n) {
|
||||
int64_t i;
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll
|
||||
#endif
|
||||
for (i = 0; i <= (n - Vectorized<c10::Half>::size());
|
||||
i += Vectorized<c10::Half>::size()) {
|
||||
vst1q_s16(dst + i, vcvtq_s16_f16(vld1q_f16(src + i)));
|
||||
vst1q_s16(dst + i + 8, vcvtq_s16_f16(vld1q_f16(src + i + 8)));
|
||||
}
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll
|
||||
#endif
|
||||
for (; i < n; i++) {
|
||||
dst[i] = static_cast<int16_t>(src[i]);
|
||||
}
|
||||
@ -784,17 +780,13 @@ inline void convert(const float16_t* src, int16_t* dst, int64_t n) {
|
||||
template <>
|
||||
inline void convert(const int16_t* src, float16_t* dst, int64_t n) {
|
||||
int64_t i;
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll
|
||||
#endif
|
||||
for (i = 0; i <= (n - Vectorized<c10::Half>::size());
|
||||
i += Vectorized<c10::Half>::size()) {
|
||||
vst1q_f16(dst + i, vcvtq_f16_s16(vld1q_s16(src + i)));
|
||||
vst1q_f16(dst + i + 8, vcvtq_f16_s16(vld1q_s16(src + i + 8)));
|
||||
}
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll
|
||||
#endif
|
||||
for (; i < n; i++) {
|
||||
dst[i] = static_cast<float16_t>(src[i]);
|
||||
}
|
||||
|
||||
@ -765,10 +765,115 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented<T>()>> {
|
||||
const ElementType& operator[](int idx) const = delete;
|
||||
ElementType& operator[](int idx) = delete;
|
||||
|
||||
Vectorized<T> C10_ALWAYS_INLINE operator+(const Vectorized<T>& other) const {
|
||||
return Vectorized<T>{_vec0 + other._vec0, _vec1 + other._vec1};
|
||||
}
|
||||
|
||||
Vectorized<T> C10_ALWAYS_INLINE operator-(const Vectorized<T>& other) const {
|
||||
return Vectorized<T>{_vec0 - other._vec0, _vec1 - other._vec1};
|
||||
}
|
||||
|
||||
Vectorized<T> C10_ALWAYS_INLINE operator*(const Vectorized<T>& other) const {
|
||||
return Vectorized<T>{_vec0 * other._vec0, _vec1 * other._vec1};
|
||||
}
|
||||
|
||||
Vectorized<T> C10_ALWAYS_INLINE operator/(const Vectorized<T>& other) const {
|
||||
return Vectorized<T>{_vec0 / other._vec0, _vec1 / other._vec1};
|
||||
}
|
||||
|
||||
Vectorized<T> C10_ALWAYS_INLINE operator&(const Vectorized<T>& other) const {
|
||||
return Vectorized<T>{
|
||||
(vtype)(vecb0() & other.vecb0()), (vtype)(vecb1() & other.vecb1())};
|
||||
}
|
||||
|
||||
Vectorized<T> C10_ALWAYS_INLINE operator|(const Vectorized<T>& other) const {
|
||||
return Vectorized<T>{
|
||||
(vtype)(vecb0() | other.vecb0()), (vtype)(vecb1() | other.vecb1())};
|
||||
}
|
||||
|
||||
Vectorized<T> C10_ALWAYS_INLINE operator^(const Vectorized<T>& other) const {
|
||||
return Vectorized<T>{
|
||||
(vtype)(vecb0() ^ other.vecb0()), (vtype)(vecb1() ^ other.vecb1())};
|
||||
}
|
||||
|
||||
Vectorized<T> C10_ALWAYS_INLINE operator<<(const Vectorized<T> &other) const {
|
||||
constexpr ElementType max_shift = sizeof(ElementType) * CHAR_BIT;
|
||||
|
||||
ElementType a_array[Vectorized<T>::size()];
|
||||
ElementType b_array[Vectorized<T>::size()];
|
||||
ElementType c_array[Vectorized<T>::size()];
|
||||
|
||||
store(a_array);
|
||||
other.store(b_array);
|
||||
|
||||
for (int i = 0; i != Vectorized<T>::size(); i++) {
|
||||
T shift = b_array[i];
|
||||
if ((static_cast<std::make_signed_t<T>>(shift) < 0) || (shift >= max_shift)) {
|
||||
c_array[i] = 0;
|
||||
} else {
|
||||
c_array[i] = static_cast<std::make_unsigned_t<T>>(a_array[i]) << shift;
|
||||
}
|
||||
}
|
||||
|
||||
return loadu(c_array);
|
||||
}
|
||||
|
||||
Vectorized<T> C10_ALWAYS_INLINE operator>>(const Vectorized<T> &other) const {
|
||||
// right shift value to retain sign bit for signed and no bits for unsigned
|
||||
constexpr ElementType max_shift = sizeof(T) * CHAR_BIT - std::is_signed_v<T>;
|
||||
|
||||
ElementType a_array[Vectorized<T>::size()];
|
||||
ElementType b_array[Vectorized<T>::size()];
|
||||
ElementType c_array[Vectorized<T>::size()];
|
||||
|
||||
store(a_array);
|
||||
other.store(b_array);
|
||||
|
||||
for (int i = 0; i != Vectorized<T>::size(); i++) {
|
||||
T shift = b_array[i];
|
||||
if ((static_cast<std::make_signed_t<T>>(shift) < 0) || (shift >= max_shift)) {
|
||||
c_array[i] = a_array[i] >> max_shift;
|
||||
} else {
|
||||
c_array[i] = a_array[i] >> shift;
|
||||
}
|
||||
}
|
||||
|
||||
return loadu(c_array);
|
||||
}
|
||||
|
||||
Vectorized<T> _not() const {
|
||||
return {(vtype)vec_nor(vecb0(), vecb0()), (vtype)vec_nor(vecb1(), vecb1())};
|
||||
}
|
||||
|
||||
Vectorized<T> C10_ALWAYS_INLINE operator==(const Vectorized<T>& other) const {
|
||||
return Vectorized<T>{
|
||||
vec_cmpeq(_vec0, other._vec0), vec_cmpeq(_vec1, other._vec1)};
|
||||
}
|
||||
|
||||
Vectorized<T> C10_ALWAYS_INLINE operator!=(const Vectorized<T>& other) const {
|
||||
return Vectorized<T>{
|
||||
vec_cmpeq(_vec0, other._vec0), vec_cmpeq(_vec1, other._vec1)}
|
||||
._not();
|
||||
}
|
||||
Vectorized<T> C10_ALWAYS_INLINE operator>(const Vectorized<T>& other) const {
|
||||
return Vectorized<T>{
|
||||
vec_cmpgt(_vec0, other._vec0), vec_cmpgt(_vec1, other._vec1)};
|
||||
}
|
||||
Vectorized<T> C10_ALWAYS_INLINE operator>=(const Vectorized<T>& other) const {
|
||||
return Vectorized<T>{
|
||||
vec_cmpge(_vec0, other._vec0), vec_cmpge(_vec1, other._vec1)};
|
||||
}
|
||||
|
||||
Vectorized<T> C10_ALWAYS_INLINE operator<(const Vectorized<T>& other) const {
|
||||
return Vectorized<T>{
|
||||
vec_cmplt(_vec0, other._vec0), vec_cmplt(_vec1, other._vec1)};
|
||||
}
|
||||
|
||||
Vectorized<T> C10_ALWAYS_INLINE operator<=(const Vectorized<T>& other) const {
|
||||
return Vectorized<T>{
|
||||
vec_cmple(_vec0, other._vec0), vec_cmple(_vec1, other._vec1)};
|
||||
}
|
||||
|
||||
Vectorized<T> C10_ALWAYS_INLINE eq(const Vectorized<T>& other) const {
|
||||
return (*this == other) & Vectorized<T>((T)1.0);
|
||||
}
|
||||
@ -1305,153 +1410,30 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented<T>()>> {
|
||||
}
|
||||
};
|
||||
|
||||
#define ZVECTOR_OPERATORS(typex) \
|
||||
template <> \
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator+(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
return Vectorized<typex>{a.vec0() + b.vec0(), a.vec1() + b.vec1()}; \
|
||||
} \
|
||||
\
|
||||
template <> \
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator-(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
return Vectorized<typex>{a.vec0() - b.vec0(), a.vec1() - b.vec1()}; \
|
||||
} \
|
||||
\
|
||||
template <> \
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator*(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
return Vectorized<typex>{a.vec0() * b.vec0(), a.vec1() * b.vec1()}; \
|
||||
} \
|
||||
\
|
||||
template <> \
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator/(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
return Vectorized<typex>{a.vec0() / b.vec0(), a.vec1() / b.vec1()}; \
|
||||
} \
|
||||
\
|
||||
template <> \
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator&(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
return Vectorized<typex>{ \
|
||||
(Vectorized<typex>::vtype)(a.vecb0() & b.vecb0()), \
|
||||
(Vectorized<typex>::vtype)(a.vecb1() & b.vecb1())}; \
|
||||
} \
|
||||
\
|
||||
template <> \
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator|(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
return Vectorized<typex>{ \
|
||||
(Vectorized<typex>::vtype)(a.vecb0() | b.vecb0()), \
|
||||
(Vectorized<typex>::vtype)(a.vecb1() | b.vecb1())}; \
|
||||
} \
|
||||
\
|
||||
template <> \
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator^(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
return Vectorized<typex>{ \
|
||||
(Vectorized<typex>::vtype)(a.vecb0() ^ b.vecb0()), \
|
||||
(Vectorized<typex>::vtype)(a.vecb1() ^ b.vecb1())}; \
|
||||
} \
|
||||
\
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator==(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
return Vectorized<typex>{ \
|
||||
vec_cmpeq(a.vec0(), b.vec0()), vec_cmpeq(a.vec1(), b.vec1())}; \
|
||||
} \
|
||||
\
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator!=(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
return Vectorized<typex>{ \
|
||||
vec_cmpeq(a.vec0(), b.vec0()), vec_cmpeq(a.vec1(), b.vec1())} \
|
||||
._not(); \
|
||||
} \
|
||||
\
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator>(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
return Vectorized<typex>{ \
|
||||
vec_cmpgt(a.vec0(), b.vec0()), vec_cmpgt(a.vec1(), b.vec1())}; \
|
||||
} \
|
||||
\
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator>=(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
return Vectorized<typex>{ \
|
||||
vec_cmpge(a.vec0(), b.vec0()), vec_cmpge(a.vec1(), b.vec1())}; \
|
||||
} \
|
||||
\
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator<(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
return Vectorized<typex>{ \
|
||||
vec_cmplt(a.vec0(), b.vec0()), vec_cmplt(a.vec1(), b.vec1())}; \
|
||||
} \
|
||||
\
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator<=(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
return Vectorized<typex>{ \
|
||||
vec_cmple(a.vec0(), b.vec0()), vec_cmple(a.vec1(), b.vec1())}; \
|
||||
}
|
||||
template <>
|
||||
inline Vectorized<int64_t> operator~(const Vectorized<int64_t>& a) {
|
||||
return a._not();
|
||||
}
|
||||
|
||||
ZVECTOR_OPERATORS(float)
|
||||
ZVECTOR_OPERATORS(double)
|
||||
ZVECTOR_OPERATORS(int8_t)
|
||||
ZVECTOR_OPERATORS(uint8_t)
|
||||
ZVECTOR_OPERATORS(uint16_t)
|
||||
ZVECTOR_OPERATORS(int16_t)
|
||||
ZVECTOR_OPERATORS(int32_t)
|
||||
ZVECTOR_OPERATORS(int64_t)
|
||||
template <>
|
||||
inline Vectorized<int32_t> operator~(const Vectorized<int32_t>& a) {
|
||||
return a._not();
|
||||
}
|
||||
|
||||
#undef ZVECTOR_OPERATORS
|
||||
template <>
|
||||
inline Vectorized<int16_t> operator~(const Vectorized<int16_t>& a) {
|
||||
return a._not();
|
||||
}
|
||||
|
||||
#define ZVECTOR_OPERATORS(typex) \
|
||||
template <> \
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator<<(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
constexpr Vectorized<typex>::ElementType max_shift \
|
||||
= sizeof(Vectorized<typex>::ElementType) * CHAR_BIT; \
|
||||
\
|
||||
Vectorized<typex>::ElementType a_array[Vectorized<typex>::size()]; \
|
||||
Vectorized<typex>::ElementType b_array[Vectorized<typex>::size()]; \
|
||||
Vectorized<typex>::ElementType c_array[Vectorized<typex>::size()]; \
|
||||
\
|
||||
a.store(a_array); \
|
||||
b.store(b_array); \
|
||||
\
|
||||
for (int i = 0; i != Vectorized<typex>::size(); i++) { \
|
||||
typex shift = b_array[i]; \
|
||||
if ((static_cast<std::make_signed_t<typex>>(shift) < 0) || (shift >= max_shift)) { \
|
||||
c_array[i] = 0; \
|
||||
} else { \
|
||||
c_array[i] = static_cast<std::make_unsigned_t<typex>>(a_array[i]) << shift; \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
return Vectorized<typex>::loadu(c_array); \
|
||||
} \
|
||||
\
|
||||
template <> \
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator>>(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
/* right shift value to retain sign bit for signed and no bits for unsigned */ \
|
||||
constexpr Vectorized<typex>::ElementType max_shift \
|
||||
= sizeof(typex) * CHAR_BIT - std::is_signed_v<typex>; \
|
||||
\
|
||||
Vectorized<typex>::ElementType a_array[Vectorized<typex>::size()]; \
|
||||
Vectorized<typex>::ElementType b_array[Vectorized<typex>::size()]; \
|
||||
Vectorized<typex>::ElementType c_array[Vectorized<typex>::size()]; \
|
||||
\
|
||||
a.store(a_array); \
|
||||
b.store(b_array); \
|
||||
\
|
||||
for (int i = 0; i != Vectorized<typex>::size(); i++) { \
|
||||
typex shift = b_array[i]; \
|
||||
if ((static_cast<std::make_signed_t<typex>>(shift) < 0) || (shift >= max_shift)) { \
|
||||
c_array[i] = a_array[i] >> max_shift; \
|
||||
} else { \
|
||||
c_array[i] = a_array[i] >> shift; \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
return Vectorized<typex>::loadu(c_array); \
|
||||
} \
|
||||
\
|
||||
template <> \
|
||||
inline Vectorized<typex> operator~(const Vectorized<typex>& a) { \
|
||||
return a._not(); \
|
||||
}
|
||||
template <>
|
||||
inline Vectorized<int8_t> operator~(const Vectorized<int8_t>& a) {
|
||||
return a._not();
|
||||
}
|
||||
|
||||
ZVECTOR_OPERATORS(int8_t)
|
||||
ZVECTOR_OPERATORS(uint8_t)
|
||||
ZVECTOR_OPERATORS(uint16_t)
|
||||
ZVECTOR_OPERATORS(int16_t)
|
||||
ZVECTOR_OPERATORS(int32_t)
|
||||
ZVECTOR_OPERATORS(int64_t)
|
||||
|
||||
#undef ZVECTOR_OPERATORS
|
||||
template <>
|
||||
inline Vectorized<uint8_t> operator~(const Vectorized<uint8_t>& a) {
|
||||
return a._not();
|
||||
}
|
||||
|
||||
#define DEFINE_MAXMIN_FUNCS(operand_type) \
|
||||
template <> \
|
||||
@ -1994,6 +1976,55 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented_quant<T>()>> {
|
||||
return Vectorized<U>{ret};
|
||||
}
|
||||
|
||||
Vectorized<T> C10_ALWAYS_INLINE operator+(const Vectorized<T>& other) const {
|
||||
return Vectorized<T>{_vec + other._vec};
|
||||
}
|
||||
|
||||
Vectorized<T> C10_ALWAYS_INLINE operator-(const Vectorized<T>& other) const {
|
||||
return Vectorized<T>{_vec - other._vec};
|
||||
}
|
||||
|
||||
Vectorized<T> C10_ALWAYS_INLINE operator*(const Vectorized<T>& other) const {
|
||||
return Vectorized<T>{_vec * other._vec};
|
||||
}
|
||||
|
||||
Vectorized<T> C10_ALWAYS_INLINE operator/(const Vectorized<T>& other) const {
|
||||
return Vectorized<T>{_vec / other._vec};
|
||||
}
|
||||
|
||||
Vectorized<T> C10_ALWAYS_INLINE operator&(const Vectorized<T>& other) const {
|
||||
return Vectorized<T>{_vec & other._vec};
|
||||
}
|
||||
|
||||
Vectorized<T> C10_ALWAYS_INLINE operator|(const Vectorized<T>& other) const {
|
||||
return Vectorized<T>{_vec | other._vec};
|
||||
}
|
||||
|
||||
Vectorized<T> C10_ALWAYS_INLINE operator^(const Vectorized<T>& other) const {
|
||||
return Vectorized<T>{_vec ^ other._vec};
|
||||
}
|
||||
Vectorized<T> C10_ALWAYS_INLINE operator==(const Vectorized<T>& other) const {
|
||||
return Vectorized<T>{_vec == other._vec};
|
||||
}
|
||||
|
||||
Vectorized<T> C10_ALWAYS_INLINE operator!=(const Vectorized<T>& other) const {
|
||||
return Vectorized<T>{_vec != other._vec};
|
||||
}
|
||||
Vectorized<T> C10_ALWAYS_INLINE operator>(const Vectorized<T>& other) const {
|
||||
return Vectorized<T>{_vec > other._vec};
|
||||
}
|
||||
Vectorized<T> C10_ALWAYS_INLINE operator>=(const Vectorized<T>& other) const {
|
||||
return Vectorized<T>{_vec >= other._vec};
|
||||
}
|
||||
|
||||
Vectorized<T> C10_ALWAYS_INLINE operator<(const Vectorized<T>& other) const {
|
||||
return Vectorized<T>{_vec < other._vec};
|
||||
}
|
||||
|
||||
Vectorized<T> C10_ALWAYS_INLINE operator<=(const Vectorized<T>& other) const {
|
||||
return Vectorized<T>{_vec <= other._vec};
|
||||
}
|
||||
|
||||
Vectorized<T> C10_ALWAYS_INLINE eq(const Vectorized<T>& other) const {
|
||||
return Vectorized<T>{_vec.eq(other._vec)};
|
||||
}
|
||||
@ -2030,72 +2061,6 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented_quant<T>()>> {
|
||||
}
|
||||
};
|
||||
|
||||
#define ZVECTOR_OPERATORS(typex) \
|
||||
template <> \
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator+(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
return Vectorized<typex>{a.vec() + b.vec()}; \
|
||||
} \
|
||||
\
|
||||
template <> \
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator-(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
return Vectorized<typex>{a.vec() - b.vec()}; \
|
||||
} \
|
||||
\
|
||||
template <> \
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator*(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
return Vectorized<typex>{a.vec() * b.vec()}; \
|
||||
} \
|
||||
\
|
||||
template <> \
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator/(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
return Vectorized<typex>{a.vec() / b.vec()}; \
|
||||
} \
|
||||
\
|
||||
template <> \
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator&(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
return Vectorized<typex>{a.vec() & b.vec()}; \
|
||||
} \
|
||||
\
|
||||
template <> \
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator|(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
return Vectorized<typex>{a.vec() | b.vec()}; \
|
||||
} \
|
||||
\
|
||||
template <> \
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator^(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
return Vectorized<typex>{a.vec() ^ b.vec()}; \
|
||||
} \
|
||||
\
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator==(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
return Vectorized<typex>{a.vec() == b.vec()}; \
|
||||
} \
|
||||
\
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator!=(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
return Vectorized<typex>{a.vec() != b.vec()}; \
|
||||
} \
|
||||
\
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator>(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
return Vectorized<typex>{a.vec() > b.vec()}; \
|
||||
} \
|
||||
\
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator>=(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
return Vectorized<typex>{a.vec() >= b.vec()}; \
|
||||
} \
|
||||
\
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator<(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
return Vectorized<typex>{a.vec() < b.vec()}; \
|
||||
} \
|
||||
\
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator<=(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
return Vectorized<typex>{a.vec() <= b.vec()}; \
|
||||
}
|
||||
|
||||
ZVECTOR_OPERATORS(c10::qint32)
|
||||
ZVECTOR_OPERATORS(c10::qint8)
|
||||
ZVECTOR_OPERATORS(c10::quint8)
|
||||
|
||||
#undef ZVECTOR_OPERATORS
|
||||
|
||||
DEFINE_CLAMP_MAXMIN_FUNCS(c10::quint8)
|
||||
DEFINE_CLAMP_MAXMIN_FUNCS(c10::qint8)
|
||||
DEFINE_CLAMP_MAXMIN_FUNCS(c10::qint32)
|
||||
@ -2399,6 +2364,35 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented_complex<T>()>> {
|
||||
return Vectorized<T>{a00, a01};
|
||||
}
|
||||
|
||||
Vectorized<T> C10_ALWAYS_INLINE operator+(const Vectorized<T>& other) const {
|
||||
return Vectorized<T>{_vec + other._vec};
|
||||
}
|
||||
|
||||
Vectorized<T> C10_ALWAYS_INLINE operator-(const Vectorized<T>& other) const {
|
||||
return Vectorized<T>{_vec - other._vec};
|
||||
}
|
||||
|
||||
Vectorized<T> inline operator*(const Vectorized<T>& b) const {
|
||||
//(a + bi) * (c + di) = (ac - bd) + (ad + bc)i
|
||||
vinner_type bv = b.vec();
|
||||
#if !defined(ZVECTOR_SIMULATE_X86_MULT)
|
||||
// this is more z arch friendly than simulating horizontal from x86
|
||||
vinner_type vi = bv.mergeo();
|
||||
vinner_type vr = bv.mergee();
|
||||
vi = vi ^ rsign_mask<underline_type>();
|
||||
vinner_type ret = _vec * vr;
|
||||
vinner_type vx_swapped = _vec.swapped();
|
||||
ret = fmadd(vx_swapped, vi, ret);
|
||||
#else
|
||||
vinner_type ac_bd = _vec * b;
|
||||
vinner_type d_c = bv.swapped();
|
||||
d_c = d_c ^ isign_mask<underline_type>();
|
||||
vinner_type ad_bc = _vec * d_c;
|
||||
vinner_type ret = vinner_type::horizontal_sub_perm(ac_bd, ad_bc);
|
||||
#endif
|
||||
return Vectorized<T>{ret};
|
||||
}
|
||||
|
||||
template <
|
||||
typename U = T,
|
||||
std::enable_if_t<std::is_same<U, c10::complex<float>>::value, int> = 0>
|
||||
@ -2424,6 +2418,29 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented_complex<T>()>> {
|
||||
return { v0, v1 };
|
||||
}
|
||||
|
||||
Vectorized<T> inline operator/(const Vectorized<T>& b) const {
|
||||
// Unfortunately, this breaks some tests
|
||||
// Implement it like it's done for avx2
|
||||
auto fabs_cd = b.vec().abs(); // |c| |d|
|
||||
auto fabs_dc = fabs_cd.swapped(); // |d| |c|
|
||||
auto scale = vinner_type {1.0} / maximum(fabs_cd, fabs_dc); // 1/sc 1/sc
|
||||
auto a2 = vec() * scale; // a/sc b/sc
|
||||
auto b2 = b.vec() * scale; // c/sc d/sc
|
||||
auto acbd2 = a2 * b2; // ac/sc^2 bd/sc^2
|
||||
|
||||
auto dc2 = b2.swapped(); // d/sc c/sc
|
||||
dc2 = Vectorized<T>::real_neg(dc2); // -d/|c,d| c/sc
|
||||
auto adbc2 = a2 * dc2; // -ad/sc^2 bc/sc^2
|
||||
auto sum1 = acbd2 + acbd2.swapped(); // (ac+bd)/sc^2 (ac+bd)/sc^2
|
||||
auto sum2 = adbc2 + adbc2.swapped(); // (bc-ad)/sc^2 (bc-ad)/sc^2
|
||||
auto res2 = vinner_type::mergee(sum1, sum2); // (ac+bd)/sc^2 (bc-ad)/sc^2
|
||||
|
||||
// get the denominator
|
||||
auto denom2 = Vectorized<T>{b2}.abs_2_(); // (c^2+d^2)/sc^2 (c^2+d^2)/sc^2
|
||||
res2 = res2 / denom2;
|
||||
return Vectorized<T>{ res2 };
|
||||
}
|
||||
|
||||
Vectorized<T> angle2_() const {
|
||||
auto b_a = _vec.swapped(); // b a
|
||||
return Vectorized<T>{_vec.atan2(b_a).swapped()};
|
||||
@ -2511,6 +2528,25 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented_complex<T>()>> {
|
||||
return Vectorized<T>{_vec.trunc()};
|
||||
}
|
||||
|
||||
Vectorized<T> C10_ALWAYS_INLINE operator&(const Vectorized<T>& other) const {
|
||||
return Vectorized<T>{_vec & other._vec};
|
||||
}
|
||||
|
||||
Vectorized<T> C10_ALWAYS_INLINE operator|(const Vectorized<T>& other) const {
|
||||
return Vectorized<T>{_vec | other._vec};
|
||||
}
|
||||
|
||||
Vectorized<T> C10_ALWAYS_INLINE operator^(const Vectorized<T>& other) const {
|
||||
return Vectorized<T>{_vec ^ other._vec};
|
||||
}
|
||||
Vectorized<T> C10_ALWAYS_INLINE operator==(const Vectorized<T>& other) const {
|
||||
return Vectorized<T>{_vec == other._vec};
|
||||
}
|
||||
|
||||
Vectorized<T> C10_ALWAYS_INLINE operator!=(const Vectorized<T>& other) const {
|
||||
return Vectorized<T>{_vec != other._vec};
|
||||
}
|
||||
|
||||
Vectorized<T> C10_ALWAYS_INLINE eq(const Vectorized<T>& other) const {
|
||||
auto eq = _vec.eq(other._vec); // compares real and imag individually
|
||||
// If both real numbers and imag numbers are equal, then the complex numbers are equal
|
||||
@ -2612,6 +2648,22 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented_complex<T>()>> {
|
||||
return sqrt().reciprocal();
|
||||
}
|
||||
|
||||
Vectorized<T> operator<(const Vectorized<T>& other) const {
|
||||
TORCH_CHECK(false, "not supported for complex numbers");
|
||||
}
|
||||
|
||||
Vectorized<T> operator<=(const Vectorized<T>& other) const {
|
||||
TORCH_CHECK(false, "not supported for complex numbers");
|
||||
}
|
||||
|
||||
Vectorized<T> operator>(const Vectorized<T>& other) const {
|
||||
TORCH_CHECK(false, "not supported for complex numbers");
|
||||
}
|
||||
|
||||
Vectorized<T> operator>=(const Vectorized<T>& other) const {
|
||||
TORCH_CHECK(false, "not supported for complex numbers");
|
||||
}
|
||||
|
||||
Vectorized<T> lt(const Vectorized<T>& other) const {
|
||||
TORCH_CHECK(false, "not supported for complex numbers");
|
||||
}
|
||||
@ -2629,101 +2681,6 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented_complex<T>()>> {
|
||||
}
|
||||
};
|
||||
|
||||
#define ZVECTOR_OPERATORS(typex) \
|
||||
template <> \
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator+(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
return Vectorized<typex>{a.vec() + b.vec()}; \
|
||||
} \
|
||||
\
|
||||
template <> \
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator-(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
return Vectorized<typex>{a.vec() - b.vec()}; \
|
||||
} \
|
||||
\
|
||||
template <> \
|
||||
Vectorized<typex> inline operator*(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
/* (a + bi) * (c + di) = (ac - bd) + (ad + bc)i */ \
|
||||
Vectorized<typex>::vinner_type bv = b.vec(); \
|
||||
\
|
||||
/* this is more z arch friendly than simulating horizontal from x86 */ \
|
||||
Vectorized<typex>::vinner_type vi = bv.mergeo(); \
|
||||
Vectorized<typex>::vinner_type vr = bv.mergee(); \
|
||||
vi = vi ^ Vectorized<typex>::vinner_type(rsign_mask<Vectorized<typex>::underline_type>()); \
|
||||
Vectorized<typex>::vinner_type ret = a.vec() * vr; \
|
||||
Vectorized<typex>::vinner_type vx_swapped = a.vec().swapped(); \
|
||||
ret = fmadd(vx_swapped, vi, ret); \
|
||||
\
|
||||
return Vectorized<typex>{ret}; \
|
||||
} \
|
||||
\
|
||||
template <> \
|
||||
Vectorized<typex> inline operator/(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
/* Unfortunately, this breaks some tests */ \
|
||||
/* Implement it like it's done for avx2 */ \
|
||||
auto fabs_cd = b.vec().abs(); /* |c| |d| */ \
|
||||
auto fabs_dc = fabs_cd.swapped(); /* |d| |c| */ \
|
||||
auto scale = Vectorized<typex>::vinner_type {1.0} / maximum(fabs_cd, fabs_dc); /* 1/sc 1/sc */ \
|
||||
auto a2 = a.vec() * scale; /* a/sc b/sc */ \
|
||||
auto b2 = b.vec() * scale; /* c/sc d/sc */ \
|
||||
auto acbd2 = a2 * b2; /* ac/sc^2 bd/sc^2 */ \
|
||||
\
|
||||
auto dc2 = b2.swapped(); /* d/sc c/sc */ \
|
||||
dc2 = Vectorized<typex>::real_neg(dc2); /* -d/|c,d| c/sc */ \
|
||||
auto adbc2 = a2 * dc2; /* -ad/sc^2 bc/sc^2 */ \
|
||||
auto sum1 = acbd2 + acbd2.swapped(); /* (ac+bd)/sc^2 (ac+bd)/sc^2 */ \
|
||||
auto sum2 = adbc2 + adbc2.swapped(); /* (bc-ad)/sc^2 (bc-ad)/sc^2 */ \
|
||||
auto res2 = Vectorized<typex>::vinner_type::mergee(sum1, sum2); /* (ac+bd)/sc^2 (bc-ad)/sc^2 */ \
|
||||
\
|
||||
/* get the denominator */ \
|
||||
Vectorized<typex>::vinner_type denom2 = Vectorized<typex>{b2}.abs_2_(); /* (c^2+d^2)/sc^2 (c^2+d^2)/sc^2 */ \
|
||||
res2 = res2 / denom2; \
|
||||
return Vectorized<typex>{ res2 }; \
|
||||
} \
|
||||
\
|
||||
template <> \
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator&(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
return Vectorized<typex>{a.vec() & b.vec()}; \
|
||||
} \
|
||||
\
|
||||
template <> \
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator|(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
return Vectorized<typex>{a.vec() | b.vec()}; \
|
||||
} \
|
||||
\
|
||||
template <> \
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator^(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
return Vectorized<typex>{a.vec() ^ b.vec()}; \
|
||||
} \
|
||||
\
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator==(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
return Vectorized<typex>{a.vec() == b.vec()}; \
|
||||
} \
|
||||
\
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator!=(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
return Vectorized<typex>{a.vec() != b.vec()}; \
|
||||
} \
|
||||
\
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator<(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
TORCH_CHECK(false, "not supported for complex numbers"); \
|
||||
} \
|
||||
\
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator<=(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
TORCH_CHECK(false, "not supported for complex numbers"); \
|
||||
} \
|
||||
\
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator>(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
TORCH_CHECK(false, "not supported for complex numbers"); \
|
||||
} \
|
||||
\
|
||||
Vectorized<typex> C10_ALWAYS_INLINE operator>=(const Vectorized<typex>& a, const Vectorized<typex>& b) { \
|
||||
TORCH_CHECK(false, "not supported for complex numbers"); \
|
||||
}
|
||||
|
||||
ZVECTOR_OPERATORS(c10::complex<float>)
|
||||
ZVECTOR_OPERATORS(c10::complex<double>)
|
||||
|
||||
#undef ZVECTOR_OPERATORS
|
||||
|
||||
template <typename T, std::enable_if_t<(sizeof(T) == 8), int> = 0>
|
||||
std::pair<Vectorized<T>, Vectorized<T>> inline inner_interleave2(
|
||||
const Vectorized<T>& a,
|
||||
|
||||
@ -914,16 +914,12 @@ Vectorized<BFloat16> inline clamp_min(const Vectorized<BFloat16>& a, const Vecto
|
||||
template <>
|
||||
inline void convert(const BFloat16* src, BFloat16* dst, int64_t n) {
|
||||
int64_t i;
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll
|
||||
#endif
|
||||
for (i = 0; i <= (n - Vectorized<BFloat16>::size()); i += Vectorized<BFloat16>::size()) {
|
||||
auto vsrc = _mm512_loadu_si512(reinterpret_cast<__m512i*>((void*)(src + i)));
|
||||
_mm512_storeu_si512(reinterpret_cast<__m512i*>((void*)(dst + i)), vsrc);
|
||||
}
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll
|
||||
#endif
|
||||
for (; i < n; i++) {
|
||||
dst[i] = src[i];
|
||||
}
|
||||
@ -990,9 +986,7 @@ static inline void _transpose_mxn_half_16_16(__m256i t[], __m512i u[]) {
|
||||
// j0-j15 n0-n15
|
||||
// k0-k15 o0-o15
|
||||
// l0-l15 p0-p15
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll(4)
|
||||
#endif
|
||||
for (int i = 0; i < 4; i++) {
|
||||
r[i] = _mm512_inserti64x4(_mm512_castsi256_si512(t[i]), t[i + 4], 0x01);
|
||||
r[i + 4] = _mm512_inserti64x4(_mm512_castsi256_si512(t[i + 8]), t[i + 12], 0x01);
|
||||
@ -1004,9 +998,7 @@ static inline void _transpose_mxn_half_16_16(__m256i t[], __m512i u[]) {
|
||||
// u3: c4c5 d4b5 c6c7 d6b7 c12c13 d12d13 c14c15 d14d15 g4g5 h4h5 g6g7 h6h7 g12g13 h12h13 g14g15 h14h15
|
||||
// i j m n
|
||||
// k l o p
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll(4)
|
||||
#endif
|
||||
for (int i = 0; i < 8; i += 2) {
|
||||
u[i] = _mm512_unpacklo_epi32(r[i], r[i + 1]);
|
||||
u[i + 1] = _mm512_unpackhi_epi32(r[i], r[i + 1]);
|
||||
@ -1069,9 +1061,7 @@ static inline void _transpose_mxn_half_16_16(__m256i t[], __m512i u[]) {
|
||||
// 12-- 13--
|
||||
// 6-- 7--
|
||||
// 14-- 15--
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll(4)
|
||||
#endif
|
||||
for (int i = 0; i < 4; i++) {
|
||||
u[i] = _mm512_permutex2var_epi16(r[i], const1, r[i + 4]);
|
||||
u[i + 4] = _mm512_permutex2var_epi16(r[i], const2, r[i + 4]);
|
||||
@ -1105,9 +1095,7 @@ inline void transpose_mxn<BFloat16, 16, 16>(
|
||||
// n: n0 n1 n2 n3 n4 n5 n6 n7 n8 n9 n10 n11 n12 n13 n14 n15
|
||||
// o: o0 o1 o2 o3 o4 o5 o6 o7 o8 o9 o10 o11 o12 o13 o14 o15
|
||||
// p: p0 p1 p2 p3 p4 p5 p6 p7 p8 p9 p10 p11 p12 p13 p14 p15
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll(16)
|
||||
#endif
|
||||
for (int i = 0; i < 16; i++) {
|
||||
t[i] = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(src + i * ld_src));
|
||||
}
|
||||
@ -1115,9 +1103,7 @@ inline void transpose_mxn<BFloat16, 16, 16>(
|
||||
__m512i u[8];
|
||||
_transpose_mxn_half_16_16(t, u);
|
||||
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll(8)
|
||||
#endif
|
||||
for (int i = 0; i < 8; i++) {
|
||||
_mm256_storeu_si256(
|
||||
reinterpret_cast<__m256i*>(dst + (i * 2) * ld_dst),
|
||||
@ -1139,9 +1125,7 @@ inline void transpose_mxn<Half, 16, 16>(
|
||||
__m256i t[16];
|
||||
// load from src to registers
|
||||
// Same matrix indices as above transpose_mxn<BFloat16, 16, 16>
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll(16)
|
||||
#endif
|
||||
for (int i = 0; i < 16; i++) {
|
||||
t[i] = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(src + i * ld_src));
|
||||
}
|
||||
@ -1149,9 +1133,7 @@ inline void transpose_mxn<Half, 16, 16>(
|
||||
__m512i u[8];
|
||||
_transpose_mxn_half_16_16(t, u);
|
||||
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll(8)
|
||||
#endif
|
||||
for (int i = 0; i < 8; i++) {
|
||||
_mm256_storeu_si256(
|
||||
reinterpret_cast<__m256i*>(dst + (i * 2) * ld_dst),
|
||||
@ -1182,9 +1164,7 @@ static inline void _transpose_mxn_half_32_32(__m512i r[], __m512i d[]) {
|
||||
// t[16]: 512 544 513 545 514 546 515 547 520 552 521 553 522 554 523 555 528 ... 571
|
||||
// ...
|
||||
// t[31]: 964 996 965 997 966 998 967 999 972 1004 973 1005 974 1006 975 1007 980 ... 1023
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll(16)
|
||||
#endif
|
||||
for (int i = 0; i < 16; ++i) {
|
||||
d[i * 2] = _mm512_unpacklo_epi16(r[i * 2], r[i * 2 + 1]);
|
||||
d[i * 2 + 1] = _mm512_unpackhi_epi16(r[i * 2], r[i * 2 + 1]);
|
||||
@ -1209,9 +1189,7 @@ static inline void _transpose_mxn_half_32_32(__m512i r[], __m512i d[]) {
|
||||
// t[16]: 512 544 576 608 513 545 577 609 520 552 584 616 521 553 585 617 528 ... 633
|
||||
// ...
|
||||
// t[31]: 902 934 966 998 903 935 967 999 910 942 974 1006 911 943 975 1007 918 ... 1023
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll(8)
|
||||
#endif
|
||||
for (int i = 0; i < 8; ++i) {
|
||||
r[i * 4] = _mm512_unpacklo_epi32(d[i * 4], d[i * 4 + 2]);
|
||||
r[i * 4 + 1] = _mm512_unpackhi_epi32(d[i * 4], d[i * 4 + 2]);
|
||||
@ -1238,9 +1216,7 @@ static inline void _transpose_mxn_half_32_32(__m512i r[], __m512i d[]) {
|
||||
// t[16]: 512 544 576 608 640 672 704 736 520 552 584 616 648 680 712 744 528 ... 760
|
||||
// ...
|
||||
// t[31]: 775 807 839 871 903 935 967 999 783 815 847 879 911 943 975 1007 791 ... 1023
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll(4)
|
||||
#endif
|
||||
for (int i = 0; i < 4; ++i) {
|
||||
d[i * 8] = _mm512_unpacklo_epi64(r[i * 8], r[i * 8 + 4]);
|
||||
d[i * 8 + 1] = _mm512_unpackhi_epi64(r[i * 8], r[i * 8 + 4]);
|
||||
@ -1289,9 +1265,7 @@ static inline void _transpose_mxn_half_32_32(__m512i r[], __m512i d[]) {
|
||||
0x000000000000000a,
|
||||
0x0000000000000003,
|
||||
0x0000000000000002);
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll(8)
|
||||
#endif
|
||||
for (int i = 0; i < 8; ++i) {
|
||||
r[i] = _mm512_permutex2var_epi64(d[i], /*idx*/const1, d[i + 8]);
|
||||
r[i + 8] = _mm512_permutex2var_epi64(d[i], /*idx*/const2, d[i + 8]);
|
||||
@ -1336,9 +1310,7 @@ static inline void _transpose_mxn_half_32_32(__m512i r[], __m512i d[]) {
|
||||
0x0000000000000006,
|
||||
0x0000000000000005,
|
||||
0x0000000000000004);
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll(16)
|
||||
#endif
|
||||
for (int i = 0; i < 16; ++i) {
|
||||
d[i] = _mm512_permutex2var_epi64(r[i], /*idx*/const3, r[i + 16]);
|
||||
d[i + 16] = _mm512_permutex2var_epi64(r[i], /*idx*/const4, r[i + 16]);
|
||||
@ -1355,9 +1327,7 @@ inline void transpose_mxn<BFloat16, 32, 32>(
|
||||
int64_t ld_dst) {
|
||||
// Load from memory
|
||||
__m512i r[32];
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll(32)
|
||||
#endif
|
||||
for (int i = 0; i < 32; ++i) {
|
||||
r[i] = _mm512_loadu_si512(reinterpret_cast<const __m512i*>(src + i* ld_src));
|
||||
}
|
||||
@ -1366,9 +1336,7 @@ inline void transpose_mxn<BFloat16, 32, 32>(
|
||||
_transpose_mxn_half_32_32(r, d);
|
||||
|
||||
// Store to dst
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll(32)
|
||||
#endif
|
||||
for (int i = 0; i < 32; ++i) {
|
||||
_mm512_storeu_si512(dst + i* ld_dst, d[i]);
|
||||
}
|
||||
@ -1382,9 +1350,7 @@ inline void transpose_mxn<Half, 32, 32>(
|
||||
int64_t ld_dst) {
|
||||
// Load from memory
|
||||
__m512i r[32];
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll(32)
|
||||
#endif
|
||||
for (int i = 0; i < 32; ++i) {
|
||||
r[i] = _mm512_loadu_si512(reinterpret_cast<const __m512i*>(src + i* ld_src));
|
||||
}
|
||||
@ -1393,9 +1359,7 @@ inline void transpose_mxn<Half, 32, 32>(
|
||||
_transpose_mxn_half_32_32(r, d);
|
||||
|
||||
// Store to dst
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll(32)
|
||||
#endif
|
||||
for (int i = 0; i < 32; ++i) {
|
||||
_mm512_storeu_si512(dst + i* ld_dst, d[i]);
|
||||
}
|
||||
@ -1550,16 +1514,12 @@ Vectorized<Half> inline clamp_min(const Vectorized<Half>& a, const Vectorized<Ha
|
||||
template <>
|
||||
inline void convert(const Half* src, Half* dst, int64_t n) {
|
||||
int64_t i;
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll
|
||||
#endif
|
||||
for (i = 0; i <= (n - Vectorized<Half>::size()); i += Vectorized<Half>::size()) {
|
||||
auto vsrc = _mm512_loadu_si512(reinterpret_cast<__m512i*>((void*)(src + i)));
|
||||
_mm512_storeu_si512(reinterpret_cast<__m512i*>((void*)(dst + i)), vsrc);
|
||||
}
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll
|
||||
#endif
|
||||
for (; i < n; i++) {
|
||||
dst[i] = src[i];
|
||||
}
|
||||
|
||||
@ -443,15 +443,11 @@ inline Vectorized<double> Vectorized<double>::le(const Vectorized<double>& other
|
||||
template <>
|
||||
inline void convert(const double* src, double* dst, int64_t n) {
|
||||
int64_t i;
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll
|
||||
#endif
|
||||
for (i = 0; i <= (n - Vectorized<double>::size()); i += Vectorized<double>::size()) {
|
||||
_mm512_storeu_pd(dst + i, _mm512_loadu_pd(src + i));
|
||||
}
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll
|
||||
#endif
|
||||
for (; i < n; i++) {
|
||||
dst[i] = src[i];
|
||||
}
|
||||
|
||||
@ -552,15 +552,11 @@ inline Vectorized<float> Vectorized<float>::le(const Vectorized<float>& other) c
|
||||
template <>
|
||||
inline void convert(const float* src, float* dst, int64_t n) {
|
||||
int64_t i;
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll
|
||||
#endif
|
||||
for (i = 0; i <= (n - Vectorized<float>::size()); i += Vectorized<float>::size()) {
|
||||
_mm512_storeu_ps(dst + i, _mm512_loadu_ps(src + i));
|
||||
}
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll
|
||||
#endif
|
||||
for (; i < n; i++) {
|
||||
dst[i] = src[i];
|
||||
}
|
||||
|
||||
@ -42,15 +42,6 @@
|
||||
#define __FORCE_INLINE __forceinline
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_FULL_VER)
|
||||
/*
|
||||
https://learn.microsoft.com/en-us/cpp/overview/compiler-versions?view=msvc-170
|
||||
Use _MSC_FULL_VER to identify current compiler is msvc,
|
||||
Windows llvm will not have this defination.
|
||||
*/
|
||||
#define __msvc_cl__
|
||||
#endif
|
||||
|
||||
// These macros helped us unify vec_base.h
|
||||
#ifdef CPU_CAPABILITY_AVX512
|
||||
#if defined(__GNUC__)
|
||||
|
||||
@ -127,9 +127,7 @@ class VecMask {
|
||||
static VecMask<T, N> from(U* b) {
|
||||
using int_t = int_same_size_t<T>;
|
||||
__at_align__ T mask[size()];
|
||||
#ifndef __msvc_cl__
|
||||
#pragma unroll
|
||||
#endif
|
||||
for (int i = 0; i < size(); i++) {
|
||||
*(int_t*)(mask + i) = b[i] ? ~(int_t)0 : (int_t)0;
|
||||
}
|
||||
@ -259,7 +257,6 @@ VEC_MASK_DEFINE_BINARY_OP_WITH_EXPR_GLOBAL(operator<, ~a& b)
|
||||
VEC_MASK_DEFINE_BINARY_OP_WITH_EXPR_GLOBAL(operator==, ~(a ^ b))
|
||||
VEC_MASK_DEFINE_BINARY_OP_WITH_EXPR_GLOBAL(operator>=, (a == b) | (a > b))
|
||||
VEC_MASK_DEFINE_BINARY_OP_WITH_EXPR_GLOBAL(operator<=, (a == b) | (a < b))
|
||||
VEC_MASK_DEFINE_BINARY_OP_WITH_EXPR_GLOBAL(operator!=, (a ^ b))
|
||||
|
||||
#undef VEC_MASK_DEFINE_UNARY_OP_GLOBAL
|
||||
#undef VEC_MASK_DEFINE_BINARY_OP_GLOBAL
|
||||
|
||||
@ -334,13 +334,7 @@ static inline __device__ void gpuAtomicAddNoReturn(double *address, double val)
|
||||
|
||||
/* Special case fp32 atomic. */
|
||||
#if defined(USE_ROCM)
|
||||
static inline __device__ void gpuAtomicAddNoReturn(float *address, float val) {
|
||||
#if defined(__gfx908__)
|
||||
atomicAddNoRet(address, val);
|
||||
#else
|
||||
(void)unsafeAtomicAdd(address, val);
|
||||
#endif
|
||||
}
|
||||
static inline __device__ void gpuAtomicAddNoReturn(float *address, float val) { atomicAddNoRet(address, val); }
|
||||
#else
|
||||
static inline __device__ void gpuAtomicAddNoReturn(float *address, float val) { gpuAtomicAdd(address, val); }
|
||||
#endif
|
||||
|
||||
@ -152,6 +152,9 @@ void CUDAGeneratorState::register_graph(cuda::CUDAGraph* graph) {
|
||||
* Unregisters a CUDA graph from the RNG state.
|
||||
*/
|
||||
void CUDAGeneratorState::unregister_graph(cuda::CUDAGraph* graph) {
|
||||
// Ensures that the RNG state is not currently being captured.
|
||||
at::cuda::assertNotCapturing(
|
||||
"Cannot unregister the state during capturing stage.");
|
||||
// Verify the graph was previously registered.
|
||||
TORCH_CHECK(
|
||||
registered_graphs_.find(graph) != registered_graphs_.end(),
|
||||
|
||||
@ -170,43 +170,6 @@ CUDA_STUB3(cuLinkComplete, CUlinkState, void **, size_t *);
|
||||
CUDA_STUB3(cuFuncSetAttribute, CUfunction, CUfunction_attribute, int);
|
||||
CUDA_STUB3(cuFuncGetAttribute, int*, CUfunction_attribute, CUfunction);
|
||||
|
||||
#if defined(CUDA_VERSION) && CUDA_VERSION >= 12000
|
||||
CUresult CUDAAPI
|
||||
cuTensorMapEncodeTiled(
|
||||
CUtensorMap* tensorMap,
|
||||
CUtensorMapDataType tensorDataType,
|
||||
cuuint32_t tensorRank,
|
||||
void* globalAddress,
|
||||
const cuuint64_t* globalDim,
|
||||
const cuuint64_t* globalStrides,
|
||||
const cuuint32_t* boxDim,
|
||||
const cuuint32_t* elementStrides,
|
||||
CUtensorMapInterleave interleave,
|
||||
CUtensorMapSwizzle swizzle,
|
||||
CUtensorMapL2promotion l2Promotion,
|
||||
CUtensorMapFloatOOBfill oobFill) {
|
||||
auto fn = reinterpret_cast<decltype(&cuTensorMapEncodeTiled)>(
|
||||
getCUDALibrary().sym(__func__));
|
||||
if (!fn)
|
||||
throw std::runtime_error("Can't get cuTensorMapEncodeTiled");
|
||||
lazyNVRTC.cuTensorMapEncodeTiled = fn;
|
||||
return fn(
|
||||
tensorMap,
|
||||
tensorDataType,
|
||||
tensorRank,
|
||||
globalAddress,
|
||||
globalDim,
|
||||
globalStrides,
|
||||
boxDim,
|
||||
elementStrides,
|
||||
interleave,
|
||||
swizzle,
|
||||
l2Promotion,
|
||||
oobFill);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
// Irregularly shaped functions
|
||||
CUresult CUDAAPI cuLaunchKernel(CUfunction f,
|
||||
unsigned int gridDimX,
|
||||
|
||||
@ -59,25 +59,16 @@ namespace at { namespace cuda {
|
||||
_(cuLinkAddData) \
|
||||
_(cuLinkComplete) \
|
||||
_(cuFuncSetAttribute) \
|
||||
_(cuFuncGetAttribute) \
|
||||
|
||||
#if defined(CUDA_VERSION) && CUDA_VERSION >= 12000
|
||||
#define AT_FORALL_NVRTC_EXTENDED(_) \
|
||||
AT_FORALL_NVRTC_BASE(_) \
|
||||
_(cuTensorMapEncodeTiled)
|
||||
#else
|
||||
#define AT_FORALL_NVRTC_EXTENDED(_) \
|
||||
AT_FORALL_NVRTC_BASE(_)
|
||||
#endif
|
||||
_(cuFuncGetAttribute)
|
||||
|
||||
#if defined(CUDA_VERSION) && CUDA_VERSION >= 11010
|
||||
#define AT_FORALL_NVRTC(_) \
|
||||
AT_FORALL_NVRTC_EXTENDED(_) \
|
||||
AT_FORALL_NVRTC_BASE(_) \
|
||||
_(nvrtcGetCUBINSize) \
|
||||
_(nvrtcGetCUBIN)
|
||||
#else
|
||||
#define AT_FORALL_NVRTC(_) \
|
||||
AT_FORALL_NVRTC_EXTENDED(_)
|
||||
AT_FORALL_NVRTC_BASE(_)
|
||||
#endif
|
||||
|
||||
#else
|
||||
|
||||
@ -57,9 +57,6 @@ struct TORCH_API MPSHooksInterface : AcceleratorHooksInterface {
|
||||
virtual size_t getDriverAllocatedMemory() const {
|
||||
FAIL_MPSHOOKS_FUNC(__func__);
|
||||
}
|
||||
virtual size_t getRecommendedMaxMemory() const {
|
||||
FAIL_MPSHOOKS_FUNC(__func__);
|
||||
}
|
||||
virtual void setMemoryFraction(double /*ratio*/) const {
|
||||
FAIL_MPSHOOKS_FUNC(__func__);
|
||||
}
|
||||
|
||||
@ -324,8 +324,6 @@ TORCH_LIBRARY_IMPL(aten, FuncTorchBatchedDecomposition, m) {
|
||||
OP_DECOMPOSE(type_as);
|
||||
OP_DECOMPOSE(linalg_diagonal);
|
||||
OP_DECOMPOSE(diagonal_copy);
|
||||
OP_DECOMPOSE(alias_copy);
|
||||
m.impl("as_strided_copy", native::as_strided_copy_symint);
|
||||
m.impl("pad", native::pad_symint);
|
||||
m.impl("_pad_circular", native::_pad_circular_symint);
|
||||
OP_DECOMPOSE(swapdims_);
|
||||
|
||||
@ -308,8 +308,6 @@ public:
|
||||
// total GPU memory allocated in the process by Metal driver; including
|
||||
// implicit allocations from MPS/MPSGraph frameworks and MPSHeapAllocatorImpl.
|
||||
size_t getDriverAllocatedMemory() const { return current_allocated_size(); }
|
||||
// recommended Max memory for Metal
|
||||
size_t getRecommendedMaxMemory() const { return max_device_size(); }
|
||||
// (see enum DebugVerbosity for description)
|
||||
uint32_t getDebugVerbosity() const { return m_debug_verbosity; }
|
||||
// returns the device that we allocate from
|
||||
|
||||
@ -794,9 +794,6 @@ struct TORCH_API MPSAllocator final : public IMPSAllocator {
|
||||
size_t getDriverAllocatedMemory() const override {
|
||||
return _getAllocImpl().getDriverAllocatedMemory();
|
||||
}
|
||||
size_t getRecommendedMaxMemory() const override {
|
||||
return _getAllocImpl().getRecommendedMaxMemory();
|
||||
}
|
||||
ssize_t getLowWatermarkValue() const override {
|
||||
return _getAllocImpl().getLowWatermarkValue();
|
||||
}
|
||||
|
||||
@ -33,7 +33,6 @@ public:
|
||||
virtual size_t getTotalAllocatedMemory() const = 0;
|
||||
virtual size_t getCurrentAllocatedMemory() const = 0;
|
||||
virtual size_t getDriverAllocatedMemory() const = 0;
|
||||
virtual size_t getRecommendedMaxMemory() const = 0;
|
||||
virtual std::pair<const void*, uint32_t> getSharedBufferPtr(const void* ptr) const = 0;
|
||||
virtual bool recordEvents(c10::ArrayRef<const void*> buffers) const = 0;
|
||||
virtual bool waitForEvents(c10::ArrayRef<const void*> buffers) const = 0;
|
||||
|
||||
@ -32,7 +32,6 @@ struct MPSHooks : public at::MPSHooksInterface {
|
||||
void emptyCache() const override;
|
||||
size_t getCurrentAllocatedMemory() const override;
|
||||
size_t getDriverAllocatedMemory() const override;
|
||||
size_t getRecommendedMaxMemory() const override;
|
||||
void setMemoryFraction(double ratio) const override;
|
||||
|
||||
// MPSProfiler interface
|
||||
|
||||
@ -80,10 +80,6 @@ size_t MPSHooks::getDriverAllocatedMemory() const {
|
||||
return at::mps::getIMPSAllocator()->getDriverAllocatedMemory();
|
||||
}
|
||||
|
||||
size_t MPSHooks::getRecommendedMaxMemory() const {
|
||||
return at::mps::getIMPSAllocator()->getRecommendedMaxMemory();
|
||||
}
|
||||
|
||||
void MPSHooks::setMemoryFraction(double ratio) const {
|
||||
at::mps::getIMPSAllocator()->setHighWatermarkRatio(ratio);
|
||||
}
|
||||
|
||||
@ -4,7 +4,6 @@
|
||||
#include <ATen/OpMathType.h>
|
||||
#include <ATen/Parallel.h>
|
||||
#include <c10/core/ScalarType.h>
|
||||
#include <c10/macros/Macros.h>
|
||||
#include <c10/util/Exception.h>
|
||||
#include <c10/util/Unroll.h>
|
||||
#include <c10/util/complex.h>
|
||||
@ -17,7 +16,6 @@
|
||||
#include <arm_neon.h>
|
||||
#endif
|
||||
|
||||
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wunused-function")
|
||||
namespace {
|
||||
|
||||
/// Wrapper for const_cast<T*> with type-inference.
|
||||
@ -969,4 +967,3 @@ INSTANTIATE_VDOT_IMPL(c10::complex<double>);
|
||||
#undef INSTANTIATE_DOT_IMPL
|
||||
|
||||
} // namespace at::native
|
||||
C10_DIAGNOSTIC_POP()
|
||||
|
||||
@ -18,8 +18,10 @@ enum class GridSamplerPadding {Zeros, Border, Reflection};
|
||||
using detail::GridSamplerInterpolation;
|
||||
using detail::GridSamplerPadding;
|
||||
|
||||
namespace {
|
||||
|
||||
// See NOTE [ grid_sampler Native Functions ].
|
||||
inline void check_grid_sampler_common(
|
||||
void check_grid_sampler_common(
|
||||
const TensorBase& input,
|
||||
const TensorBase& grid
|
||||
) {
|
||||
@ -58,7 +60,7 @@ inline void check_grid_sampler_common(
|
||||
}
|
||||
|
||||
// See NOTE [ grid_sampler Native Functions ].
|
||||
inline void check_grid_sampler_2d(
|
||||
void check_grid_sampler_2d(
|
||||
const TensorBase& input,
|
||||
const TensorBase& grid
|
||||
) {
|
||||
@ -70,7 +72,7 @@ inline void check_grid_sampler_2d(
|
||||
}
|
||||
|
||||
// See NOTE [ grid_sampler Native Functions ].
|
||||
inline void check_grid_sampler_3d(
|
||||
void check_grid_sampler_3d(
|
||||
const TensorBase& input,
|
||||
const TensorBase& grid,
|
||||
int64_t interpolation_mode
|
||||
@ -89,7 +91,7 @@ inline void check_grid_sampler_3d(
|
||||
|
||||
// See NOTE [ grid_sampler Native Functions ].
|
||||
// cudnn does not support inputs larger than 1024.
|
||||
inline bool cond_cudnn_grid_sampler(
|
||||
bool cond_cudnn_grid_sampler(
|
||||
const TensorBase& input,
|
||||
const TensorBase& grid
|
||||
) {
|
||||
@ -102,4 +104,6 @@ inline bool cond_cudnn_grid_sampler(
|
||||
input.sym_size(1) <= 1024);
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
} // namespace at::native
|
||||
|
||||
@ -5,7 +5,8 @@
|
||||
#include <ATen/TensorUtils.h>
|
||||
|
||||
namespace at::native {
|
||||
inline void multilabel_margin_loss_shape_check(
|
||||
namespace {
|
||||
static C10_UNUSED void multilabel_margin_loss_shape_check(
|
||||
int64_t& nframe,
|
||||
int64_t& dim,
|
||||
const int64_t& ndims,
|
||||
@ -34,7 +35,7 @@ namespace at::native {
|
||||
}
|
||||
}
|
||||
|
||||
inline void multi_margin_loss_shape_check(
|
||||
static C10_UNUSED void multi_margin_loss_shape_check(
|
||||
int64_t& nframe,
|
||||
int64_t& dim,
|
||||
const int64_t& ndims,
|
||||
@ -66,4 +67,6 @@ namespace at::native {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
} // anonymous namespace
|
||||
} // namespace at::native
|
||||
|
||||
@ -525,10 +525,10 @@ static Tensor cross_entropy_loss_prob_target(
|
||||
|
||||
switch (reduction) {
|
||||
case Reduction::Mean:
|
||||
if (input.sym_numel()==0){
|
||||
if (input.numel()==0){
|
||||
return -(input * target * weight_).sum().fill_(std::numeric_limits<double>::quiet_NaN());
|
||||
} else {
|
||||
return -(input * target * weight_).sum() / (input.sym_numel() / n_classes);
|
||||
return -(input * target * weight_).sum() / (input.numel() / n_classes);
|
||||
}
|
||||
case Reduction::Sum:
|
||||
return -(input * target * weight_).sum();
|
||||
@ -540,10 +540,10 @@ static Tensor cross_entropy_loss_prob_target(
|
||||
} else {
|
||||
switch (reduction) {
|
||||
case Reduction::Mean:
|
||||
if (input.sym_numel()==0){
|
||||
if (input.numel()==0){
|
||||
return -(input * target).sum().fill_(std::numeric_limits<double>::quiet_NaN());
|
||||
} else {
|
||||
return -(input * target).sum() / (input.sym_numel() / n_classes);
|
||||
return -(input * target).sum() / (input.numel() / n_classes);
|
||||
}
|
||||
case Reduction::Sum:
|
||||
return -(input * target).sum();
|
||||
|
||||
@ -7,7 +7,7 @@
|
||||
|
||||
namespace at::native {
|
||||
|
||||
inline void check_max_pool1d(
|
||||
static void check_max_pool1d(
|
||||
const Tensor& self,
|
||||
IntArrayRef kernel_size,
|
||||
IntArrayRef stride,
|
||||
|
||||
@ -1195,6 +1195,15 @@ Tensor istft(const Tensor& self, const int64_t n_fft, const optional<int64_t> ho
|
||||
#undef REPR
|
||||
}
|
||||
|
||||
static Tensor istft(const Tensor& self, const int64_t n_fft, const optional<int64_t> hop_lengthOpt,
|
||||
const optional<int64_t> win_lengthOpt, const Tensor& window,
|
||||
const bool center, const bool normalized, const optional<bool> onesidedOpt,
|
||||
const optional<int64_t> lengthOpt) {
|
||||
return at::native::istft(
|
||||
self, n_fft, hop_lengthOpt, win_lengthOpt, window, center, normalized,
|
||||
onesidedOpt, lengthOpt, /*return_complex=*/false);
|
||||
}
|
||||
|
||||
void _fft_fill_with_conjugate_symmetry_(const Tensor& input, IntArrayRef dim_) {
|
||||
const auto input_sizes = input.sizes();
|
||||
const auto input_strides = input.strides();
|
||||
|
||||
@ -103,10 +103,10 @@ inline void check_supported_max_int_with_precision(int64_t n, const Tensor& tens
|
||||
// with max value if it is integer type
|
||||
inline Tensor& fill_empty_deterministic_(Tensor& tensor) {
|
||||
if (tensor.is_floating_point() || tensor.is_complex()) {
|
||||
AT_DISPATCH_V2(
|
||||
tensor.scalar_type(), "fill_empty_deterministic_", AT_WRAP([&]() {
|
||||
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(
|
||||
kBFloat16, kHalf, tensor.scalar_type(), "fill_empty_deterministic_", [&]() {
|
||||
tensor.fill_(std::numeric_limits<scalar_t>::quiet_NaN());
|
||||
}), AT_EXPAND(AT_FLOATING_TYPES), AT_EXPAND(AT_COMPLEX_TYPES), AT_EXPAND(AT_FLOAT8_TYPES), kBFloat16, kHalf);
|
||||
});
|
||||
} else {
|
||||
AT_DISPATCH_V2(
|
||||
tensor.scalar_type(), "fill_empty_deterministic_", AT_WRAP([&]() {
|
||||
|
||||
@ -210,6 +210,7 @@
|
||||
#include <ATen/ops/zeros_native.h>
|
||||
#endif
|
||||
|
||||
#include <c10/util/StringUtil.h>
|
||||
#include <algorithm>
|
||||
#include <cstdint>
|
||||
#include <utility>
|
||||
@ -420,9 +421,8 @@ Tensor& set_storage_meta__symint(Tensor& result, Storage storage, c10::SymInt st
|
||||
// it. TODO: Actually this might not quite be correct if we use special
|
||||
// pointers to track whether or not fake cuda tensors are pinned or not
|
||||
const auto itemsize = result.dtype().itemsize();
|
||||
c10::SymInt new_size_bytes = result.is_contiguous()
|
||||
? at::detail::computeStorageNbytesContiguous(size, itemsize, std::move(storage_offset))
|
||||
: at::detail::computeStorageNbytes(size, stride, itemsize, std::move(storage_offset));
|
||||
c10::SymInt new_size_bytes = at::detail::computeStorageNbytes(
|
||||
size, stride, itemsize, std::move(storage_offset));
|
||||
// TODO: When there are unbacked SymInts, we unconditionally skip the
|
||||
// setter. This is technically wrong, but we cannot conveniently test
|
||||
// the real condition in many cases, because a lot of people are using
|
||||
@ -1619,7 +1619,7 @@ Tensor alias_with_sizes_and_strides(
|
||||
|
||||
Tensor reshape_symint(const Tensor& self, c10::SymIntArrayRef proposed_shape) {
|
||||
if (self.is_sparse()) {
|
||||
TORCH_CHECK(false, "reshape is not implemented for sparse tensors");
|
||||
AT_ERROR("reshape is not implemented for sparse tensors");
|
||||
}
|
||||
|
||||
if (self.is_contiguous() && !self.is_mkldnn()) {
|
||||
@ -1682,7 +1682,7 @@ Tensor _reshape_copy_symint(const Tensor& self, c10::SymIntArrayRef proposed_sha
|
||||
// minimize breakages.
|
||||
Tensor reshape(const Tensor& self, IntArrayRef proposed_shape) {
|
||||
if (self.is_sparse()) {
|
||||
TORCH_CHECK(false, "reshape is not implemented for sparse tensors");
|
||||
AT_ERROR("reshape is not implemented for sparse tensors");
|
||||
}
|
||||
DimVector shape = infer_size_dv(proposed_shape, self.numel());
|
||||
|
||||
|
||||
@ -103,7 +103,7 @@ DECLARE_DISPATCH(upsampling_bicubic2d, upsample_bicubic2d_kernel);
|
||||
DECLARE_DISPATCH(_upsampling_bicubic2d_aa, _upsample_bicubic2d_aa_kernel);
|
||||
DECLARE_DISPATCH(_upsampling_bicubic2d_aa, _upsample_bicubic2d_aa_backward_kernel);
|
||||
|
||||
inline C10_UNUSED std::array<int64_t, 3> upsample_1d_common_check(IntArrayRef input_size, IntArrayRef output_size) {
|
||||
static C10_UNUSED std::array<int64_t, 3> upsample_1d_common_check(IntArrayRef input_size, IntArrayRef output_size) {
|
||||
TORCH_CHECK(
|
||||
output_size.size() == 1,
|
||||
"It is expected output_size equals to 1, but got size ",
|
||||
@ -131,7 +131,7 @@ inline C10_UNUSED std::array<int64_t, 3> upsample_1d_common_check(IntArrayRef in
|
||||
return {nbatch, channels, output_width};
|
||||
}
|
||||
|
||||
inline C10_UNUSED std::array<int64_t, 4> upsample_2d_common_check(IntArrayRef input_size, IntArrayRef output_size) {
|
||||
static C10_UNUSED std::array<int64_t, 4> upsample_2d_common_check(IntArrayRef input_size, IntArrayRef output_size) {
|
||||
TORCH_CHECK(
|
||||
output_size.size() == 2,
|
||||
"It is expected output_size equals to 2, but got size ",
|
||||
@ -167,7 +167,7 @@ inline C10_UNUSED std::array<int64_t, 4> upsample_2d_common_check(IntArrayRef in
|
||||
return {nbatch, channels, output_height, output_width};
|
||||
}
|
||||
|
||||
inline C10_UNUSED
|
||||
static C10_UNUSED
|
||||
std::array<int64_t, 5> upsample_3d_common_check(IntArrayRef input_size, IntArrayRef output_size) {
|
||||
TORCH_CHECK(
|
||||
output_size.size() == 3,
|
||||
@ -365,7 +365,7 @@ inline int64_t nearest_exact_idx(
|
||||
typedef int64_t (*nearest_idx_fn_t)(int64_t, int64_t, int64_t, std::optional<double>);
|
||||
|
||||
template <typename scalar_t>
|
||||
scalar_t upsample_get_value_bounded(
|
||||
static scalar_t upsample_get_value_bounded(
|
||||
scalar_t* data,
|
||||
int64_t width,
|
||||
int64_t height,
|
||||
@ -377,7 +377,7 @@ scalar_t upsample_get_value_bounded(
|
||||
}
|
||||
|
||||
template <typename scalar_t>
|
||||
void upsample_increment_value_bounded(
|
||||
static void upsample_increment_value_bounded(
|
||||
scalar_t* data,
|
||||
int64_t width,
|
||||
int64_t height,
|
||||
@ -392,17 +392,17 @@ void upsample_increment_value_bounded(
|
||||
// Based on
|
||||
// https://en.wikipedia.org/wiki/Bicubic_interpolation#Bicubic_convolution_algorithm
|
||||
template <typename scalar_t>
|
||||
scalar_t cubic_convolution1(scalar_t x, scalar_t A) {
|
||||
inline scalar_t cubic_convolution1(scalar_t x, scalar_t A) {
|
||||
return ((A + 2) * x - (A + 3)) * x * x + 1;
|
||||
}
|
||||
|
||||
template <typename scalar_t>
|
||||
scalar_t cubic_convolution2(scalar_t x, scalar_t A) {
|
||||
inline scalar_t cubic_convolution2(scalar_t x, scalar_t A) {
|
||||
return ((A * x - 5 * A) * x + 8 * A) * x - 4 * A;
|
||||
}
|
||||
|
||||
template <typename scalar_t>
|
||||
void get_cubic_upsample_coefficients(
|
||||
inline void get_cubic_upsample_coefficients(
|
||||
scalar_t coeffs[4],
|
||||
scalar_t t) {
|
||||
scalar_t A = -0.75;
|
||||
|
||||
@ -190,7 +190,8 @@ void gemm_transa_(
|
||||
}
|
||||
|
||||
template <typename scalar_t, typename opmath_t>
|
||||
void gemm_transb_impl(
|
||||
typename std::enable_if<std::is_same<scalar_t, opmath_t>::value, void>::type
|
||||
gemm_transb_(
|
||||
TransposeType transb,
|
||||
int64_t m,
|
||||
int64_t n,
|
||||
@ -200,9 +201,12 @@ void gemm_transb_impl(
|
||||
int64_t lda,
|
||||
const scalar_t* b,
|
||||
int64_t ldb,
|
||||
/* we expect pre-applied beta */
|
||||
opmath_t* c,
|
||||
opmath_t beta,
|
||||
scalar_t* c,
|
||||
int64_t ldc) {
|
||||
// c *= beta
|
||||
scale_(m, n, beta, c, ldc);
|
||||
|
||||
// c += alpha * (a @ b.T)
|
||||
for (const auto l : c10::irange(k)) {
|
||||
for (const auto j : c10::irange(n)) {
|
||||
@ -221,27 +225,6 @@ void gemm_transb_impl(
|
||||
}
|
||||
}
|
||||
|
||||
template <typename scalar_t, typename opmath_t>
|
||||
typename std::enable_if<std::is_same<scalar_t, opmath_t>::value, void>::type
|
||||
gemm_transb_(
|
||||
TransposeType transb,
|
||||
int64_t m,
|
||||
int64_t n,
|
||||
int64_t k,
|
||||
opmath_t alpha,
|
||||
const scalar_t* a,
|
||||
int64_t lda,
|
||||
const scalar_t* b,
|
||||
int64_t ldb,
|
||||
opmath_t beta,
|
||||
scalar_t* c,
|
||||
int64_t ldc) {
|
||||
// c *= beta
|
||||
scale_(m, n, beta, c, ldc);
|
||||
|
||||
gemm_transb_impl(transb, m, n, k, alpha, a, lda, b, ldb, c, ldc);
|
||||
}
|
||||
|
||||
// std::is_same<scalar_t, at::BFloat16> || std::is_same<scalar_t, at::Half>
|
||||
template <typename scalar_t, typename opmath_t>
|
||||
typename std::enable_if<!std::is_same<scalar_t, opmath_t>::value, void>::type
|
||||
@ -258,45 +241,19 @@ gemm_transb_(
|
||||
opmath_t beta,
|
||||
scalar_t* c,
|
||||
int64_t ldc) {
|
||||
// We need to calculate full-precision dot products for correctness;
|
||||
// users notice error accumulation with reduced-width types (e.g.,
|
||||
// https://github.com/pytorch/pytorch/issues/95125 and
|
||||
// https://github.com/pytorch/pytorch/issues/83863, which were filed
|
||||
// when we used gemm_transb_impl naively, accumulating into
|
||||
// float16/bfloat16). The straightforward way to do this is to use
|
||||
// the vector dot column algorithm anyway, but this gives terrible
|
||||
// performance because of the non-contiguous matrix
|
||||
// access. Therefore, we instead elect to allocate temporary space
|
||||
// to hold the output at higher-precision so that we can accumulate
|
||||
// into it using the above cache-friendly "load one vector element,
|
||||
// FMA it with an entire matrix row into the entire result vector"
|
||||
// algorithm instead.
|
||||
const auto c_size = m * n;
|
||||
auto c_accum = std::make_unique<opmath_t[]>(c_size);
|
||||
if (beta == 1) {
|
||||
// c += alpha * (a @ b.T)
|
||||
for (const auto i : c10::irange(m)) {
|
||||
for (const auto j : c10::irange(n)) {
|
||||
for (const auto i : c10::irange(m)) {
|
||||
c_accum[j * m + i] = c[j * ldc + i];
|
||||
const auto dot = sum(k, [&](int64_t l) -> opmath_t {
|
||||
return static_cast<opmath_t>(a[l * lda + i]) *
|
||||
static_cast<opmath_t>(transb == TransposeType::ConjTranspose ? conj_impl(b[l * ldb + j]) : b[l * ldb + j]);
|
||||
});
|
||||
if (beta == opmath_t(0)) {
|
||||
c[j * ldc + i] = alpha * dot;
|
||||
} else {
|
||||
c[j * ldc + i] = beta * c[j * ldc + i] + alpha * dot;
|
||||
}
|
||||
}
|
||||
} else if (beta == 0) {
|
||||
for (const auto j : c10::irange(n)) {
|
||||
for (const auto i : c10::irange(m)) {
|
||||
c_accum[j * m + i] = 0;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (const auto j : c10::irange(n)) {
|
||||
for (const auto i : c10::irange(m)) {
|
||||
c_accum[j * m + i] = beta * c[j * ldc + i];
|
||||
}
|
||||
}
|
||||
}
|
||||
gemm_transb_impl(transb, m, n, k, alpha, a, lda, b, ldb, c_accum.get(), m);
|
||||
for (const auto j : c10::irange(n)) {
|
||||
for (const auto i : c10::irange(m)) {
|
||||
c[j * ldc + i] = c_accum[j * m + i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -43,14 +43,6 @@ void fill_kernel(TensorIterator& iter, const Scalar& value_scalar) {
|
||||
fill_non_native_type<at::BFloat16>(iter, value_scalar);
|
||||
} else if (iter.dtype() == ScalarType::ComplexHalf) {
|
||||
fill_non_native_type<c10::complex<at::Half>>(iter, value_scalar);
|
||||
} else if (iter.dtype() == ScalarType::Float8_e4m3fn) {
|
||||
fill_non_native_type<at::Float8_e4m3fn>(iter, value_scalar);
|
||||
} else if (iter.dtype() == ScalarType::Float8_e5m2) {
|
||||
fill_non_native_type<at::Float8_e5m2>(iter, value_scalar);
|
||||
} else if (iter.dtype() == ScalarType::Float8_e4m3fnuz) {
|
||||
fill_non_native_type<at::Float8_e4m3fnuz>(iter, value_scalar);
|
||||
} else if (iter.dtype() == ScalarType::Float8_e5m2fnuz) {
|
||||
fill_non_native_type<at::Float8_e5m2fnuz>(iter, value_scalar);
|
||||
} else {
|
||||
AT_DISPATCH_V2(
|
||||
iter.dtype(), "fill_cpu", AT_WRAP([&]() {
|
||||
|
||||
@ -22,9 +22,7 @@ namespace at::native {
|
||||
namespace {
|
||||
|
||||
// out = val * a + b
|
||||
// is_b_stride_zero: If the stride of b is 0 (mask broadcasting case),
|
||||
// take b as a scalar pointer.
|
||||
template <bool is_b_stride_zero, typename T1, typename T2>
|
||||
template <typename T1, typename T2>
|
||||
inline void _scale_attn_mask_fusion_kernel(
|
||||
T1* a,
|
||||
T2* b,
|
||||
@ -33,31 +31,20 @@ inline void _scale_attn_mask_fusion_kernel(
|
||||
T1& val) {
|
||||
const auto vec_size1 = at::vec::Vectorized<T1>::size();
|
||||
const auto vec_size2 = at::vec::Vectorized<T2>::size();
|
||||
constexpr int64_t T1_n =
|
||||
(vec_size2 == vec_size1 * 2 && is_reduced_floating_point_v<T2>) ? 2 : 1;
|
||||
constexpr int64_t T1_n = (vec_size2 == vec_size1 * 2 && is_reduced_floating_point_v<T2>) ? 2 : 1;
|
||||
constexpr int64_t T2_n = 1;
|
||||
auto vec_scale = at::vec::VectorizedN<T1, T1_n>(val);
|
||||
int64_t i = 0;
|
||||
for (; i < size - (size % vec_size2); i += vec_size2) {
|
||||
auto a_n = at::vec::VectorizedN<T1, T1_n>::loadu(a + i);
|
||||
at::vec::VectorizedN<T2, T2_n> b_n;
|
||||
if constexpr(is_b_stride_zero) {
|
||||
b_n = at::vec::VectorizedN<T2, T2_n>((T1)b[0]);
|
||||
} else {
|
||||
b_n = at::vec::VectorizedN<T2, T2_n>::loadu(b + i);
|
||||
}
|
||||
auto b_n = at::vec::VectorizedN<T2, T2_n>::loadu(b + i);
|
||||
auto b_n_convert = at::vec::convert<T1, T1_n, T2, T2_n, true>(b_n);
|
||||
auto res = a_n * vec_scale + b_n_convert;
|
||||
res.store(out + i);
|
||||
}
|
||||
for (; i < size; i++) {
|
||||
auto tmp0 = a[i];
|
||||
T1 tmp1;
|
||||
if constexpr(is_b_stride_zero) {
|
||||
tmp1 = (T1)b[0];
|
||||
} else {
|
||||
tmp1 = (T1)b[i];
|
||||
}
|
||||
auto tmp1 = (T1) b[i];
|
||||
out[i] = tmp0 * val + tmp1;
|
||||
}
|
||||
}
|
||||
@ -249,13 +236,7 @@ void cpu_flash_attention(
|
||||
? attn_mask.value().stride(1)
|
||||
: 0;
|
||||
int64_t mStrideM =
|
||||
(has_attn_mask && attn_mask.value().size(2) > 1)
|
||||
? attn_mask.value().stride(2)
|
||||
: 0;
|
||||
int64_t mStrideN =
|
||||
(has_attn_mask && attn_mask.value().size(3) > 1)
|
||||
? attn_mask.value().stride(3)
|
||||
: 0;
|
||||
has_attn_mask ? attn_mask.value().stride(2) : 0;
|
||||
|
||||
int64_t qSplitSize = q_split_size > qSize ? qSize : q_split_size;
|
||||
int64_t kvSplitSize = kv_split_size > kvSize ? kvSize : kv_split_size;
|
||||
@ -342,23 +323,13 @@ void cpu_flash_attention(
|
||||
// qk <- qk * scaling + attn_mask
|
||||
if (has_attn_mask) {
|
||||
for (int64_t row = 0; row < qBlockSize; ++row) {
|
||||
if (mStrideN == 0) {
|
||||
_scale_attn_mask_fusion_kernel</*is_stride_0*/ true>(
|
||||
_scale_attn_mask_fusion_kernel(
|
||||
qk_data + row * kvBlockSize,
|
||||
mask_data + i * mStrideB + j * mStrideH +
|
||||
(m + row) * mStrideM,
|
||||
(m + row) * mStrideM + n,
|
||||
kvBlockSize,
|
||||
qk_data + row * kvBlockSize,
|
||||
scaling_factor);
|
||||
} else {
|
||||
_scale_attn_mask_fusion_kernel</*is_stride_0*/ false>(
|
||||
qk_data + row * kvBlockSize,
|
||||
mask_data + i * mStrideB + j * mStrideH +
|
||||
(m + row) * mStrideM + n,
|
||||
kvBlockSize,
|
||||
qk_data + row * kvBlockSize,
|
||||
scaling_factor);
|
||||
}
|
||||
}
|
||||
}
|
||||
// Update coefficients with Softmax
|
||||
@ -502,13 +473,7 @@ void cpu_flash_attention_backward(
|
||||
? attn_mask.value().stride(1)
|
||||
: 0;
|
||||
int64_t mStrideM =
|
||||
(has_attn_mask && attn_mask.value().size(2) > 1)
|
||||
? attn_mask.value().stride(2)
|
||||
: 0;
|
||||
int64_t mStrideN =
|
||||
(has_attn_mask && attn_mask.value().size(3) > 1)
|
||||
? attn_mask.value().stride(3)
|
||||
: 0;
|
||||
has_attn_mask ? attn_mask.value().stride(2) : 0;
|
||||
|
||||
int64_t grad_qStrideB = grad_q.stride(0);
|
||||
int64_t grad_qStrideM = grad_q.stride(1);
|
||||
@ -611,23 +576,13 @@ void cpu_flash_attention_backward(
|
||||
if (has_attn_mask) {
|
||||
accum_t one = accum_t(1);
|
||||
for (const auto row : c10::irange(qBlockSize)) {
|
||||
if (mStrideN == 0) {
|
||||
_scale_attn_mask_fusion_kernel</*is_stride_0*/ true>(
|
||||
attn_data + row * kvBlockSize,
|
||||
mask_data + i * mStrideB + j * mStrideH +
|
||||
(m + row) * mStrideM,
|
||||
kvBlockSize,
|
||||
attn_data + row * kvBlockSize,
|
||||
one);
|
||||
} else {
|
||||
_scale_attn_mask_fusion_kernel</*is_stride_0*/ false>(
|
||||
_scale_attn_mask_fusion_kernel(
|
||||
attn_data + row * kvBlockSize,
|
||||
mask_data + i * mStrideB + j * mStrideH +
|
||||
(m + row) * mStrideM + n,
|
||||
kvBlockSize,
|
||||
attn_data + row * kvBlockSize,
|
||||
one);
|
||||
}
|
||||
}
|
||||
}
|
||||
// restore self attention after softmax from logsumexp
|
||||
|
||||
@ -1,7 +1,3 @@
|
||||
#include <cstdint>
|
||||
#include <c10/util/Exception.h>
|
||||
#include <c10/core/Scalar.h>
|
||||
#include <c10/core/ScalarType.h>
|
||||
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
|
||||
#include <ATen/core/Tensor.h>
|
||||
#include <ATen/core/NamedTensor.h>
|
||||
@ -14,7 +10,6 @@
|
||||
#include <ATen/cuda/tunable/TunableGemm.h>
|
||||
#include <ATen/native/Resize.h>
|
||||
#include <c10/util/MaybeOwned.h>
|
||||
#include <ATen/native/cuda/RowwiseScaledMM.h>
|
||||
|
||||
#ifndef AT_PER_OPERATOR_HEADERS
|
||||
#include <ATen/Functions.h>
|
||||
@ -824,116 +819,34 @@ static bool _scaled_mm_allowed_device() {
|
||||
#endif
|
||||
}
|
||||
|
||||
namespace{
|
||||
|
||||
enum class ScalingType {
|
||||
TensorWise,
|
||||
RowWise,
|
||||
Error
|
||||
};
|
||||
/*
|
||||
* Scaling Type Determination:
|
||||
* ---------------------------
|
||||
* Conditions and corresponding Scaling Types:
|
||||
*
|
||||
* - If scale_a.numel() == 1 && scale_b.numel() == 1:
|
||||
* - Returns TensorWise.
|
||||
*
|
||||
* - Else if scale_a.dim() == 1 && scale_a.size(0) == dim_m && scale_b.size(0) == dim_n:
|
||||
* - Returns RowWise.
|
||||
*
|
||||
* - Otherwise:
|
||||
* - Returns Error.
|
||||
*/
|
||||
|
||||
// Validates the scale tensors to scaled_mm
|
||||
// And returns the type of scaling/which kernel to use
|
||||
ScalingType get_scaling_type(
|
||||
const at::Tensor& scale_a,
|
||||
const at::Tensor& scale_b,
|
||||
int64_t dim_m,
|
||||
int64_t dim_n) {
|
||||
// Both Per-Tensor and Row-wise scaling expect fp32 tensors
|
||||
TORCH_CHECK(
|
||||
scale_a.scalar_type() == kFloat && scale_b.scalar_type() == kFloat,
|
||||
"Both scale_a and scale_b must be float (fp32) tensors.");
|
||||
|
||||
|
||||
// Check the singluar scale case for per-tensor scaling
|
||||
if (scale_a.numel() == 1 && scale_b.numel() == 1) {
|
||||
return ScalingType::TensorWise;
|
||||
} else if (scale_a.dim() == 1 && scale_a.size(0) == dim_m) {
|
||||
// Check the per-row scaling case
|
||||
#if !defined(USE_ROCM) && !defined(_MSC_VER) || \
|
||||
(defined(USE_ROCM) && ROCM_VERSION >= 60000)
|
||||
TORCH_CHECK(
|
||||
scale_a.dim() == 1 && scale_b.dim() == 1,
|
||||
"Both scale_a and scale_b must be 1-dimensional tensors");
|
||||
TORCH_CHECK(
|
||||
scale_b.size(0) == dim_n,
|
||||
"For row-wise scaling, scale_b must have size ",
|
||||
dim_n,
|
||||
" but got ",
|
||||
scale_b.size(0),
|
||||
".");
|
||||
TORCH_CHECK(
|
||||
scale_a.is_contiguous() && scale_b.is_contiguous(),
|
||||
"Both scale_a and scale_b must be contiguous.");
|
||||
return ScalingType::RowWise;
|
||||
#else
|
||||
TORCH_CHECK(false, "Per-row scaling is not supported for this platform!");
|
||||
return ScalingType::Error;
|
||||
#endif // !defined(USE_ROCM) && !defined(_MSC_VER) || (defined(USE_ROCM) &&
|
||||
// ROCM_VERSION >= 60000)
|
||||
} else {
|
||||
// Prettier Error Case messaging
|
||||
TORCH_CHECK(
|
||||
false,
|
||||
"For row-wise scaling, scale_a must be size ",
|
||||
dim_m,
|
||||
" but got ",
|
||||
scale_a.numel(),
|
||||
" and scale_b must be size ",
|
||||
dim_n,
|
||||
" but got ",
|
||||
scale_b.numel(),
|
||||
".");
|
||||
// Unreachable
|
||||
return ScalingType::RowWise;
|
||||
}
|
||||
return ScalingType::Error;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
// Computes matrix multiply + bias while applying scaling to input and output matrices and computes amax
|
||||
// Scales are only applicable when matrices are of Float8 type and assumbed to be equal to 1.0 by default.
|
||||
// If output matrix type is 16 or 32-bit type, neither scale_result is applied nor amax is computed.
|
||||
// Known limitations:
|
||||
// - Only works if mat1 is row-major and mat2 is column-major
|
||||
// - Only works if matrices sizes are divisible by 32
|
||||
// - If 1-dimensional tensors are used then scale_a should be size = mat1.size(0)
|
||||
// and scale_b should have size = to mat2.size(1)
|
||||
//
|
||||
// Arguments:
|
||||
// - `mat1`: the first operand of the matrix multiply, can be type `torch.float8_e4m3fn` or `torch.float8_e5m2`
|
||||
// - `mat2`: the second operand of the matrix multiply, can be type `torch.float8_e4m3fn` or `torch.float8_e5m2`
|
||||
// - `bias`: the bias, can be type `torch.float16` or `torch.bfloat16`
|
||||
// - `out_dtype`: the output dtype, can either be a float8 or a higher precision floating point type
|
||||
// - `scale_a`: a scalar or 1-dimensional tensor with the inverse scale of `mat1`, only needed if `mat1` is a float8 type
|
||||
// - `scale_b`: a scalar or 1-dimensional tensor with the inverse scale of `mat2`, only needed if `mat2` is a float8 type
|
||||
// - `scale_result`: a scalar tensor with the scale of the output, only utilized if the output is a float8 type
|
||||
// - `scale_a`: a scalar tensor with the inverse scale of `mat1`, only needed if `mat1` is a float8 type
|
||||
// - `scale_b`: a scalar tensor with the inverse scale of `mat2`, only needed if `mat2` is a float8 type
|
||||
// - `scale_result`: a scalar tensor with the scale of the output, only set if the output is a float8 type
|
||||
// - `use_fast_accum`: if true, enables fast float8 accumulation
|
||||
// - `out`: a reference to the output tensor
|
||||
// - `amax`: a reference to the amax tensor of the output, only needed if the output is a float8 type and will be updated inplace
|
||||
|
||||
Tensor&
|
||||
std::tuple<Tensor&, Tensor&>
|
||||
_scaled_mm_out_cuda(const Tensor& mat1, const Tensor& mat2,
|
||||
const Tensor& scale_a,
|
||||
const Tensor& scale_b,
|
||||
const std::optional<at::Tensor>& bias,
|
||||
const std::optional<at::Tensor>& scale_result,
|
||||
std::optional<c10::ScalarType> out_dtype,
|
||||
const std::optional<at::Tensor>& scale_a,
|
||||
const std::optional<at::Tensor>& scale_b,
|
||||
const std::optional<at::Tensor>& scale_result,
|
||||
bool use_fast_accum,
|
||||
Tensor& out) {
|
||||
Tensor& out, Tensor& amax) {
|
||||
// Check sizes
|
||||
bool allowed_device = _scaled_mm_allowed_device();
|
||||
TORCH_CHECK(allowed_device, "torch._scaled_mm is only supported on CUDA devices with compute capability >= 9.0 or 8.9, or ROCm MI300+");
|
||||
@ -942,11 +855,10 @@ _scaled_mm_out_cuda(const Tensor& mat1, const Tensor& mat2,
|
||||
TORCH_CHECK(
|
||||
mat1.sizes()[1] == mat2.sizes()[0], "mat1 and mat2 shapes cannot be multiplied (",
|
||||
mat1.sizes()[0], "x", mat1.sizes()[1], " and ", mat2.sizes()[0], "x", mat2.sizes()[1], ")");
|
||||
|
||||
// Check what type of scaling we are doing based on inputs
|
||||
ScalingType scaling_choice = get_scaling_type(scale_a, scale_b, mat1.size(0), mat2.size(1));
|
||||
TORCH_INTERNAL_ASSERT(scaling_choice != ScalingType::Error, "Scaling type not supported");
|
||||
|
||||
TORCH_CHECK(!scale_a || (scale_a->numel() == 1 && scale_a->scalar_type() == kFloat),
|
||||
"scale_a must be float scalar");
|
||||
TORCH_CHECK(!scale_b || (scale_b->numel() == 1 && scale_b->scalar_type() == kFloat),
|
||||
"scale_b must be a float scalar");
|
||||
TORCH_CHECK(!scale_result || (scale_result->numel() == 1 && scale_result->scalar_type() == kFloat),
|
||||
"scale_result must be a float scalar");
|
||||
TORCH_CHECK(!bias || bias->numel() == mat2.sizes()[1], "Bias must be size ", mat2.sizes()[1],
|
||||
@ -963,6 +875,7 @@ _scaled_mm_out_cuda(const Tensor& mat1, const Tensor& mat2,
|
||||
mat2.sizes()[1], " must be divisible by 16");
|
||||
// Check types
|
||||
TORCH_CHECK(!out_dtype || *out_dtype == out.scalar_type(), "out_dtype must match output matrix type");
|
||||
TORCH_CHECK(amax.scalar_type() == kFloat, "amax must be a float scalar");
|
||||
TORCH_CHECK(isFloat8Type(mat1.scalar_type()), "Expected mat1 to be Float8 matrix got ", mat1.scalar_type());
|
||||
TORCH_CHECK(isFloat8Type(mat2.scalar_type()), "Expected mat2 to be Float8 matrix got ", mat2.scalar_type());
|
||||
// Type restrictions imposed by CuBLASLt as of CUDA-12.1
|
||||
@ -980,39 +893,23 @@ _scaled_mm_out_cuda(const Tensor& mat1, const Tensor& mat2,
|
||||
}
|
||||
{
|
||||
auto bias_ = bias.value_or(Tensor());
|
||||
auto scale_a_ = scale_a.value_or(Tensor());
|
||||
auto scale_b_ = scale_b.value_or(Tensor());
|
||||
auto scale_result_ = scale_result.value_or(Tensor());
|
||||
|
||||
TensorArg targs[]{{out, "out", 0}, {mat1, "mat1", 1}, {mat2, "mat2", 2},
|
||||
{bias_, "bias", 3}, {scale_a, "scale_a", 4}, {scale_b, "scale_b", 5},
|
||||
{scale_result_, "scale_result", 6}};
|
||||
TensorArg targs[]{{out, "out", 0}, {amax, "amax", 1}, {mat1, "mat1", 2}, {mat2, "mat2", 3},
|
||||
{bias_, "bias", 4}, {scale_a_, "scale_a", 5}, {scale_b_, "scale_b", 6},
|
||||
{scale_result_, "scale_result", 7}};
|
||||
checkAllSameGPU(__func__, targs);
|
||||
}
|
||||
// Validation checks have passed lets resize the output to actual size
|
||||
|
||||
IntArrayRef mat1_sizes = mat1.sizes();
|
||||
IntArrayRef mat2_sizes = mat2.sizes();
|
||||
at::native::resize_output(out, {mat1_sizes[0], mat2_sizes[1]});
|
||||
|
||||
// We are doing row-wise scaling
|
||||
if (scaling_choice == ScalingType::RowWise) {
|
||||
TORCH_CHECK(out.dtype() == kBFloat16, "Only bf16 high precsion output types are supported for row-wise scaling.");
|
||||
at::cuda::detail::f8f8bf16_rowwise(
|
||||
mat1,
|
||||
mat2,
|
||||
scale_a,
|
||||
scale_b,
|
||||
bias,
|
||||
use_fast_accum,
|
||||
out);
|
||||
return out;
|
||||
}
|
||||
at::native::resize_output(amax, {});
|
||||
|
||||
cublasCommonArgs args(mat1, mat2, out);
|
||||
const auto out_dtype_ = args.result->scalar_type();
|
||||
TORCH_CHECK(args.transa == 't' && args.transb == 'n', "Only multiplication of row-major and column-major matrices is supported by cuBLASLt");
|
||||
|
||||
// Some scaled_gemms require an amax to populate lets create one here
|
||||
Tensor amax = at::empty({0}, mat1.options().dtype(ScalarType::Float));
|
||||
|
||||
#ifdef USE_ROCM
|
||||
auto tuning_ctx = at::cuda::tunable::getTuningContext();
|
||||
if (tuning_ctx->IsTunableOpEnabled()) {
|
||||
@ -1055,11 +952,11 @@ _scaled_mm_out_cuda(const Tensor& mat1, const Tensor& mat2,
|
||||
params.n = args.n;
|
||||
params.k = args.k;
|
||||
params.a = args.mata->data_ptr();
|
||||
params.a_scale_ptr = scale_a.data_ptr();
|
||||
params.a_scale_ptr = scale_a ? scale_a->data_ptr() : nullptr;
|
||||
params.lda = args.lda;
|
||||
params.a_dtype = args.mata->scalar_type();
|
||||
params.b = args.matb->data_ptr();
|
||||
params.b_scale_ptr = scale_b.data_ptr();
|
||||
params.b_scale_ptr = scale_b ? scale_b->data_ptr() : nullptr;
|
||||
params.ldb = args.ldb;
|
||||
params.b_dtype = args.matb->scalar_type();
|
||||
params.bias_ptr = bias ? bias->data_ptr(): nullptr;
|
||||
@ -1104,11 +1001,11 @@ _scaled_mm_out_cuda(const Tensor& mat1, const Tensor& mat2,
|
||||
args.n,
|
||||
args.k,
|
||||
args.mata->data_ptr(),
|
||||
scale_a.data_ptr(),
|
||||
scale_a ? scale_a->data_ptr() : nullptr,
|
||||
args.lda,
|
||||
args.mata->scalar_type(),
|
||||
args.matb->data_ptr(),
|
||||
scale_b.data_ptr(),
|
||||
scale_b ? scale_b->data_ptr() : nullptr,
|
||||
args.ldb,
|
||||
args.matb->scalar_type(),
|
||||
bias ? bias->data_ptr(): nullptr,
|
||||
@ -1125,20 +1022,26 @@ _scaled_mm_out_cuda(const Tensor& mat1, const Tensor& mat2,
|
||||
use_fast_accum);
|
||||
}
|
||||
|
||||
return out;
|
||||
#if defined(USE_ROCM) && ROCM_VERSION >= 60000 && ROCM_VERSION < 60200
|
||||
// ROCm's hipBLASLt does not support amax before 6.2, so calculate separately
|
||||
amax = at::max(at::abs(out.to(kFloat)));
|
||||
#endif
|
||||
|
||||
return {out, amax};
|
||||
}
|
||||
|
||||
Tensor
|
||||
std::tuple<Tensor, Tensor>
|
||||
_scaled_mm_cuda(const Tensor& mat_a, const Tensor& mat_b,
|
||||
const Tensor& scale_a,
|
||||
const Tensor& scale_b,
|
||||
const std::optional<at::Tensor>& bias,
|
||||
const std::optional<at::Tensor>& scale_result,
|
||||
std::optional<c10::ScalarType> out_dtype,
|
||||
const std::optional<at::Tensor>& scale_a,
|
||||
const std::optional<at::Tensor>& scale_b,
|
||||
const std::optional<at::Tensor>& scale_result,
|
||||
bool use_fast_accum) {
|
||||
const auto out_dtype_ = out_dtype.value_or(mat_a.scalar_type());
|
||||
Tensor out = at::empty({0}, mat_a.options().dtype(out_dtype_));
|
||||
return _scaled_mm_out_cuda(mat_a, mat_b, scale_a, scale_b, bias, scale_result, out_dtype, use_fast_accum, out);
|
||||
Tensor amax = at::empty({0}, mat_a.options().dtype(ScalarType::Float));
|
||||
return _scaled_mm_out_cuda(mat_a, mat_b, bias, out_dtype, scale_a, scale_b, scale_result, use_fast_accum, out, amax);
|
||||
}
|
||||
|
||||
} // namespace at::native
|
||||
|
||||
@ -191,47 +191,11 @@ std::vector<Tensor> foreach_scalar_pow_list_kernel_cuda(
|
||||
// In the case of division, integer inputs will result in float.
|
||||
// Currently multi tensor apply can only return result of the same type as
|
||||
// input.
|
||||
//
|
||||
// Implement via multiply with reciprocal as it's faster and makes it match
|
||||
// the behavior of regular Tensor div by scalar. Loses one bit of
|
||||
// precision.
|
||||
Scalar scalar_reciprocal(const Scalar& scalar) {
|
||||
if (scalar.isFloatingPoint()) {
|
||||
return Scalar(1. / scalar.toDouble());
|
||||
} else if (scalar.isIntegral(/*includeBool*/ true)) {
|
||||
return Scalar(1. / static_cast<double>(scalar.toLong()));
|
||||
} else if (scalar.isComplex()) {
|
||||
return Scalar(1. / scalar.toComplexDouble());
|
||||
}
|
||||
TORCH_INTERNAL_ASSERT(
|
||||
false, "divison with ", scalar.type(), " not supported");
|
||||
}
|
||||
|
||||
void foreach_tensor_div_scalar_kernel_cuda_(
|
||||
TensorList tensors,
|
||||
const Scalar& scalar) {
|
||||
check_foreach_api_restrictions(tensors);
|
||||
if (!can_use_fast_route(tensors, scalar, true)) {
|
||||
return at::native::foreach_tensor_mul_scalar_kernel_slow_(
|
||||
tensors, scalar_reciprocal(scalar));
|
||||
}
|
||||
|
||||
all_types_complex_bool_half_bfloat16_<std::multiplies>(
|
||||
tensors, scalar_reciprocal(scalar));
|
||||
}
|
||||
|
||||
std::vector<Tensor> foreach_tensor_div_scalar_kernel_cuda(
|
||||
TensorList tensors,
|
||||
const Scalar& scalar) {
|
||||
check_foreach_api_restrictions(tensors);
|
||||
if (!can_use_fast_route(tensors, scalar, true)) {
|
||||
return at::native::foreach_tensor_mul_scalar_kernel_slow(
|
||||
tensors, scalar_reciprocal(scalar));
|
||||
}
|
||||
|
||||
return all_types_complex_bool_half_bfloat16<std::multiplies>(
|
||||
tensors, scalar_reciprocal(scalar));
|
||||
}
|
||||
FOREACH_BINARY_OP_SCALAR(
|
||||
all_types_complex_bool_half_bfloat16,
|
||||
div,
|
||||
std::divides,
|
||||
/*div_op*/ true);
|
||||
|
||||
// In the case of subtraction, we dont allow scalar to be boolean following the
|
||||
// torch.sub logic
|
||||
|
||||
@ -807,7 +807,6 @@ struct ReduceOp {
|
||||
bool is_last_block_done = mark_block_finished();
|
||||
|
||||
if (is_last_block_done) {
|
||||
__threadfence(); // complete the acquire pattern after atomic
|
||||
value = ident;
|
||||
if (config.should_block_x_reduce()) {
|
||||
index_t input_offset = threadIdx.x + threadIdx.y * blockDim.x;
|
||||
|
||||
@ -1,536 +0,0 @@
|
||||
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
|
||||
#include <ATen/Dispatch.h>
|
||||
#include <ATen/core/Tensor.h>
|
||||
#include <ATen/cuda/CUDAContext.h>
|
||||
#include <ATen/cuda/nvrtc_stub/ATenNVRTC.h>
|
||||
|
||||
// Determine if the architecture supports rowwise scaled mm
|
||||
// Currenlty failing on windows with: https://github.com/NVIDIA/cutlass/issues/1571
|
||||
#if !defined(USE_ROCM) && !defined(_WIN32) && defined(CUDA_VERSION) && CUDA_VERSION >= 12000
|
||||
|
||||
#define BUILD_ROWWISE_FP8_KERNEL
|
||||
#endif
|
||||
|
||||
#if defined(BUILD_ROWWISE_FP8_KERNEL)
|
||||
|
||||
// We are going to override the cuTensorMapEncodeTiled driver api with our lazy loader
|
||||
static CUresult CUDAAPI nvrtc_cuTensorMapEncodeTiled(
|
||||
CUtensorMap* tensorMap,
|
||||
CUtensorMapDataType tensorDataType,
|
||||
cuuint32_t tensorRank,
|
||||
void* globalAddress,
|
||||
const cuuint64_t* globalDim,
|
||||
const cuuint64_t* globalStrides,
|
||||
const cuuint32_t* boxDim,
|
||||
const cuuint32_t* elementStrides,
|
||||
CUtensorMapInterleave interleave,
|
||||
CUtensorMapSwizzle swizzle,
|
||||
CUtensorMapL2promotion l2Promotion,
|
||||
CUtensorMapFloatOOBfill oobFill) {
|
||||
return at::globalContext().getNVRTC().cuTensorMapEncodeTiled(
|
||||
tensorMap,
|
||||
tensorDataType,
|
||||
tensorRank,
|
||||
globalAddress,
|
||||
globalDim,
|
||||
globalStrides,
|
||||
boxDim,
|
||||
elementStrides,
|
||||
interleave,
|
||||
swizzle,
|
||||
l2Promotion,
|
||||
oobFill);
|
||||
}
|
||||
|
||||
|
||||
#include <cutlass/core_io.h>
|
||||
#include <cutlass/cutlass.h>
|
||||
#include <cutlass/gemm/device/gemm.h>
|
||||
#include <cutlass/half.h>
|
||||
#include <cutlass/numeric_types.h>
|
||||
#include <cutlass/trace.h>
|
||||
#include <cutlass/util/host_tensor.h>
|
||||
|
||||
// Rename the global function symbol
|
||||
#define cuTensorMapEncodeTiled nvrtc_cuTensorMapEncodeTiled
|
||||
#include <cute/tensor.hpp>
|
||||
#undef cuTensorMapEncodeTiled
|
||||
// Set everything back to normal
|
||||
|
||||
#include <cutlass/gemm/collective/collective_builder.hpp>
|
||||
#include <cutlass/gemm/device/gemm_universal_adapter.h>
|
||||
#include <cutlass/epilogue/collective/collective_builder.hpp>
|
||||
|
||||
#include <cute/atom/mma_atom.hpp>
|
||||
#include <cutlass/gemm/dispatch_policy.hpp>
|
||||
#include <cutlass/gemm/kernel/gemm_universal.hpp>
|
||||
#include <cutlass/util/packed_stride.hpp>
|
||||
|
||||
|
||||
namespace {
|
||||
// Cutlass rowwise kernel
|
||||
template <
|
||||
int TB_M,
|
||||
int TB_N,
|
||||
int TB_K,
|
||||
int TBS_M,
|
||||
int TBS_N,
|
||||
int TBS_K,
|
||||
bool PONG,
|
||||
bool FAST_ACCUM,
|
||||
bool USE_BIAS,
|
||||
typename INPUT_DTYPE,
|
||||
typename BIAS_DTYPE>
|
||||
void f8f8bf16_rowwise_impl(
|
||||
at::Tensor XQ, // FP8
|
||||
at::Tensor WQ, // FP8
|
||||
at::Tensor x_scale,
|
||||
at::Tensor w_scale,
|
||||
c10::optional<at::Tensor> bias,
|
||||
at::Tensor out) {
|
||||
int M = XQ.size(0);
|
||||
int N = WQ.size(1);
|
||||
int K = XQ.size(1);
|
||||
|
||||
TORCH_CHECK(XQ.is_cuda() && XQ.is_contiguous());
|
||||
TORCH_CHECK(
|
||||
WQ.is_cuda() && WQ.ndimension() == 2 && WQ.stride(1) == WQ.size(0) &&
|
||||
WQ.stride(0) == 1);
|
||||
|
||||
// auto Y = at::empty({M, N}, XQ.options().dtype(at::kBFloat16));
|
||||
|
||||
using ElementInputA = INPUT_DTYPE;
|
||||
using LayoutInputA = cutlass::layout::RowMajor;
|
||||
constexpr int AlignmentInputA = 16 / sizeof(ElementInputA);
|
||||
|
||||
using ElementInputB = cutlass::float_e4m3_t;
|
||||
using LayoutInputB = cutlass::layout::ColumnMajor;
|
||||
constexpr int AlignmentInputB = 16 / sizeof(ElementInputB);
|
||||
|
||||
using ElementBias = BIAS_DTYPE;
|
||||
|
||||
using ElementOutput = cutlass::bfloat16_t;
|
||||
using LayoutOutput = cutlass::layout::RowMajor;
|
||||
constexpr int AlignmentOutput = 16 / sizeof(ElementOutput);
|
||||
|
||||
using ElementAccumulator = float;
|
||||
using ElementComputeEpilogue = float;
|
||||
using ArchTag = cutlass::arch::Sm90; // Tag indicating the minimum SM that
|
||||
// supports the intended feature
|
||||
using OperatorClass = cutlass::arch::OpClassTensorOp;
|
||||
using TileShape = cute::Shape<
|
||||
cute::Int<TB_M>,
|
||||
cute::Int<TB_N>,
|
||||
cute::Int<TB_K>>; // Threadblock-level
|
||||
// tile size
|
||||
using ClusterShape = cute::Shape<
|
||||
cute::Int<TBS_M>,
|
||||
cute::Int<TBS_N>,
|
||||
cute::Int<TBS_K>>; // Shape of the
|
||||
// threadblocks in a
|
||||
// cluster
|
||||
using KernelSchedule = cutlass::gemm::collective::
|
||||
KernelScheduleAuto; // Kernel to launch based on the default setting in
|
||||
// the Collective Builder
|
||||
|
||||
// Implement rowwise scaling epilogue.
|
||||
using XScale = cutlass::epilogue::fusion::Sm90ColBroadcast<
|
||||
0,
|
||||
TileShape,
|
||||
ElementComputeEpilogue,
|
||||
cute::Stride<cute::Int<1>, cute::Int<0>, cute::Int<0>>>;
|
||||
|
||||
using WScale = cutlass::epilogue::fusion::Sm90RowBroadcast<
|
||||
PONG ? 2 : 1,
|
||||
TileShape,
|
||||
ElementComputeEpilogue,
|
||||
cute::Stride<cute::Int<0>, cute::Int<1>, cute::Int<0>>>;
|
||||
|
||||
using Bias = cutlass::epilogue::fusion::Sm90RowBroadcast<
|
||||
PONG ? 2 : 1,
|
||||
TileShape,
|
||||
ElementBias,
|
||||
cute::Stride<cute::Int<0>, cute::Int<1>, cute::Int<0>>>;
|
||||
|
||||
using Accum = cutlass::epilogue::fusion::Sm90AccFetch;
|
||||
|
||||
using Compute0 = cutlass::epilogue::fusion::Sm90Compute<
|
||||
cutlass::multiplies,
|
||||
ElementComputeEpilogue, // First stage output type.
|
||||
ElementComputeEpilogue, // First stage input types.
|
||||
cutlass::FloatRoundStyle::round_to_nearest>;
|
||||
|
||||
using EVTCompute0 =
|
||||
cutlass::epilogue::fusion::Sm90EVT<Compute0, WScale, Accum>;
|
||||
|
||||
using Compute1 = cutlass::epilogue::fusion::Sm90Compute<
|
||||
cutlass::multiplies,
|
||||
cute::conditional_t< // Second stage output type.
|
||||
USE_BIAS,
|
||||
ElementBias,
|
||||
ElementOutput>,
|
||||
ElementComputeEpilogue, // Second stage input types.
|
||||
cutlass::FloatRoundStyle::round_to_nearest>;
|
||||
|
||||
using EVTCompute1 =
|
||||
cutlass::epilogue::fusion::Sm90EVT<Compute1, XScale, EVTCompute0>;
|
||||
|
||||
using ComputeBias = cutlass::epilogue::fusion::Sm90Compute<
|
||||
cutlass::plus,
|
||||
ElementOutput, // Final (optional) stage output type.
|
||||
ElementBias, // Final stage input types.
|
||||
cutlass::FloatRoundStyle::round_to_nearest>;
|
||||
|
||||
using EVTComputeBias =
|
||||
cutlass::epilogue::fusion::Sm90EVT<ComputeBias, Bias, EVTCompute1>;
|
||||
|
||||
using EpilogueEVT =
|
||||
cute::conditional_t<USE_BIAS, EVTComputeBias, EVTCompute1>;
|
||||
|
||||
using CollectiveEpilogue =
|
||||
typename cutlass::epilogue::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm90,
|
||||
cutlass::arch::OpClassTensorOp,
|
||||
TileShape,
|
||||
ClusterShape,
|
||||
cutlass::epilogue::collective::EpilogueTileAuto,
|
||||
ElementAccumulator,
|
||||
ElementComputeEpilogue,
|
||||
ElementOutput,
|
||||
LayoutOutput,
|
||||
AlignmentOutput,
|
||||
ElementOutput,
|
||||
LayoutOutput,
|
||||
AlignmentOutput,
|
||||
cutlass::epilogue::TmaWarpSpecialized,
|
||||
EpilogueEVT>::CollectiveOp;
|
||||
|
||||
using DefaultSchedule = cutlass::gemm::KernelTmaWarpSpecialized;
|
||||
using PongSchedule = cutlass::gemm::KernelTmaWarpSpecializedPingpong;
|
||||
using FastDefaultSchedule =
|
||||
cutlass::gemm::KernelTmaWarpSpecializedFP8FastAccum;
|
||||
using FastPongSchedule =
|
||||
cutlass::gemm::KernelTmaWarpSpecializedPingpongFP8FastAccum;
|
||||
using SlowAccum = cute::conditional_t<PONG, PongSchedule, DefaultSchedule>;
|
||||
using FastAccum =
|
||||
cute::conditional_t<PONG, FastPongSchedule, FastDefaultSchedule>;
|
||||
using MainLoopSchedule =
|
||||
cute::conditional_t<FAST_ACCUM, FastAccum, SlowAccum>;
|
||||
|
||||
using CollectiveMainloop =
|
||||
typename cutlass::gemm::collective::CollectiveBuilder<
|
||||
ArchTag,
|
||||
OperatorClass,
|
||||
ElementInputA,
|
||||
LayoutInputA,
|
||||
AlignmentInputA,
|
||||
ElementInputB,
|
||||
LayoutInputB,
|
||||
AlignmentInputB,
|
||||
ElementAccumulator,
|
||||
TileShape,
|
||||
ClusterShape,
|
||||
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(
|
||||
sizeof(typename CollectiveEpilogue::SharedStorage))>,
|
||||
MainLoopSchedule>::CollectiveOp;
|
||||
|
||||
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
|
||||
cute::Shape<int, int, int>,
|
||||
CollectiveMainloop,
|
||||
CollectiveEpilogue>;
|
||||
|
||||
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
|
||||
|
||||
using StrideInputA = typename Gemm::GemmKernel::StrideA;
|
||||
using StrideInputB = typename Gemm::GemmKernel::StrideB;
|
||||
using StrideOutput = typename Gemm::GemmKernel::StrideC;
|
||||
|
||||
StrideInputA stride_a = cutlass::make_cute_packed_stride(
|
||||
StrideInputA{}, cute::make_shape(M, K, 1));
|
||||
StrideInputB stride_b = cutlass::make_cute_packed_stride(
|
||||
StrideInputB{}, cute::make_shape(N, K, 1));
|
||||
StrideOutput stride_output = cutlass::make_cute_packed_stride(
|
||||
StrideOutput{}, cute::make_shape(M, N, 1));
|
||||
|
||||
typename Gemm::Arguments arguments{
|
||||
cutlass::gemm::GemmUniversalMode::kGemm,
|
||||
{M, N, K},
|
||||
{reinterpret_cast<ElementInputA*>(XQ.data_ptr()),
|
||||
stride_a,
|
||||
reinterpret_cast<ElementInputB*>(WQ.data_ptr()),
|
||||
stride_b},
|
||||
{{}, // Epilogue thread we populate below.
|
||||
(ElementOutput*)out.data_ptr<at::BFloat16>(),
|
||||
stride_output,
|
||||
(ElementOutput*)out.data_ptr<at::BFloat16>(),
|
||||
stride_output}};
|
||||
|
||||
if constexpr (USE_BIAS) {
|
||||
arguments.epilogue.thread = {
|
||||
{reinterpret_cast<ElementBias*>(bias.value().data_ptr())}, // bias
|
||||
// compute_1
|
||||
{
|
||||
{reinterpret_cast<ElementComputeEpilogue*>(
|
||||
x_scale.data_ptr())}, // x_scale
|
||||
// compute_0
|
||||
{
|
||||
{reinterpret_cast<ElementComputeEpilogue*>(
|
||||
w_scale.data_ptr())}, // w_scale
|
||||
{}, // Accumulator
|
||||
{} // Multiplies
|
||||
},
|
||||
{}, // Multiplies
|
||||
},
|
||||
{}, // Plus
|
||||
};
|
||||
} else {
|
||||
arguments.epilogue.thread = {
|
||||
{reinterpret_cast<ElementComputeEpilogue*>(
|
||||
x_scale.data_ptr())}, // x_scale
|
||||
// compute_0
|
||||
{
|
||||
{reinterpret_cast<ElementComputeEpilogue*>(
|
||||
w_scale.data_ptr())}, // w_scale
|
||||
{}, // Accumulator
|
||||
{} // Multiplies
|
||||
},
|
||||
{}, // Multiplies
|
||||
};
|
||||
}
|
||||
|
||||
Gemm gemm;
|
||||
|
||||
// Using the arguments, query for extra workspace required for matrix
|
||||
// multiplication computation
|
||||
size_t workspace_size = Gemm::get_workspace_size(arguments);
|
||||
|
||||
// Allocate workspace memory
|
||||
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
|
||||
|
||||
// Check the problem size is supported or not
|
||||
cutlass::Status status = gemm.can_implement(arguments);
|
||||
if (status != cutlass::Status::kSuccess) {
|
||||
throw std::runtime_error("cutlass cannot implement");
|
||||
}
|
||||
|
||||
// Initialize CUTLASS kernel with arguments and workspace pointer
|
||||
status = gemm.initialize(arguments, workspace.get());
|
||||
if (status != cutlass::Status::kSuccess) {
|
||||
throw std::runtime_error("cutlass cannot initialize");
|
||||
}
|
||||
|
||||
status = gemm(at::cuda::getCurrentCUDAStream());
|
||||
if (status != cutlass::Status::kSuccess) {
|
||||
throw std::runtime_error(
|
||||
std::string("cutlass cannot run") +
|
||||
cutlass::cutlassGetStatusString(status));
|
||||
}
|
||||
C10_CUDA_KERNEL_LAUNCH_CHECK();
|
||||
}
|
||||
|
||||
// FP8 Rowwise Cutlass kernel dispatch.
|
||||
enum class KernelMode { Small, Large, Default };
|
||||
|
||||
KernelMode get_kernel_mode(at::Tensor XQ, at::Tensor WQ) {
|
||||
auto M = XQ.size(0);
|
||||
auto K = XQ.size(1);
|
||||
auto N = WQ.size(0);
|
||||
// Use a large kernel if at least two shapes are large....
|
||||
bool use_large_kernel =
|
||||
((M >= 2048 && K >= 2048) || (M >= 2048 && N >= 2048) ||
|
||||
(K >= 2048 && N >= 2048));
|
||||
if (M <= 128 || N <= 128) {
|
||||
return KernelMode::Small;
|
||||
} else if (use_large_kernel) {
|
||||
return KernelMode::Large;
|
||||
} else {
|
||||
return KernelMode::Default;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename InputDType, bool FastAccum, bool UseBias, typename BiasDType>
|
||||
void dispatch_fp8_rowwise_kernel(
|
||||
at::Tensor XQ,
|
||||
at::Tensor WQ,
|
||||
at::Tensor x_scale,
|
||||
at::Tensor w_scale,
|
||||
c10::optional<at::Tensor> bias,
|
||||
at::Tensor out) {
|
||||
KernelMode kernel = get_kernel_mode(XQ, WQ);
|
||||
if (kernel == KernelMode::Small) {
|
||||
return f8f8bf16_rowwise_impl<
|
||||
64,
|
||||
128,
|
||||
128,
|
||||
2,
|
||||
1,
|
||||
1,
|
||||
false,
|
||||
FastAccum,
|
||||
UseBias,
|
||||
InputDType,
|
||||
BiasDType>(XQ, WQ, x_scale, w_scale, bias, out);
|
||||
} else if (kernel == KernelMode::Large) {
|
||||
return f8f8bf16_rowwise_impl<
|
||||
128,
|
||||
128,
|
||||
128,
|
||||
2,
|
||||
1,
|
||||
1,
|
||||
true,
|
||||
FastAccum,
|
||||
UseBias,
|
||||
InputDType,
|
||||
BiasDType>(XQ, WQ, x_scale, w_scale, bias, out);
|
||||
} else {
|
||||
return f8f8bf16_rowwise_impl<
|
||||
128,
|
||||
128,
|
||||
128,
|
||||
1,
|
||||
2,
|
||||
1,
|
||||
false,
|
||||
FastAccum,
|
||||
UseBias,
|
||||
InputDType,
|
||||
BiasDType>(XQ, WQ, x_scale, w_scale, bias, out);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
#endif // !defined(USE_ROCM)
|
||||
|
||||
namespace at::cuda::detail {
|
||||
void f8f8bf16_rowwise(
|
||||
at::Tensor XQ, // FP8
|
||||
at::Tensor WQ, // FP8
|
||||
at::Tensor x_scale, // FP32
|
||||
at::Tensor w_scale, // FP32
|
||||
c10::optional<at::Tensor> bias, // BF16
|
||||
bool use_fast_accum,
|
||||
at::Tensor& out) {
|
||||
#if defined(BUILD_ROWWISE_FP8_KERNEL)
|
||||
// Check datatypes.
|
||||
TORCH_CHECK(
|
||||
x_scale.dtype() == at::kFloat && w_scale.dtype() == at::kFloat,
|
||||
"Scale tensors must be float32.");
|
||||
if (bias.has_value()) {
|
||||
TORCH_CHECK(
|
||||
bias.value().dtype() == at::kFloat ||
|
||||
bias.value().dtype() == at::kBFloat16,
|
||||
"Bias type must be bfloat16 or float32 if provided.");
|
||||
}
|
||||
// Extract problem size.
|
||||
int M = XQ.size(0);
|
||||
int N = WQ.size(1);
|
||||
int K = XQ.size(1);
|
||||
|
||||
bool use_bias = bias.has_value();
|
||||
bool bf16_bias = use_bias && bias.value().dtype() == at::kBFloat16;
|
||||
|
||||
// Templatize based on input dtype.
|
||||
bool use_e5m2 = XQ.dtype() == at::kFloat8_e5m2;
|
||||
TORCH_CHECK(WQ.dtype() == at::kFloat8_e4m3fn, "For row-wise scaling the second input is required to be a float8_e4m3fn dtype.");
|
||||
|
||||
if (use_bias) {
|
||||
if (bf16_bias) {
|
||||
if (use_fast_accum) {
|
||||
if (use_e5m2) {
|
||||
return dispatch_fp8_rowwise_kernel<
|
||||
cutlass::float_e5m2_t,
|
||||
true,
|
||||
true,
|
||||
cutlass::bfloat16_t>(XQ, WQ, x_scale, w_scale, bias, out);
|
||||
} else {
|
||||
return dispatch_fp8_rowwise_kernel<
|
||||
cutlass::float_e4m3_t,
|
||||
true,
|
||||
true,
|
||||
cutlass::bfloat16_t>(XQ, WQ, x_scale, w_scale, bias, out);
|
||||
}
|
||||
} else {
|
||||
if (use_e5m2) {
|
||||
return dispatch_fp8_rowwise_kernel<
|
||||
cutlass::float_e5m2_t,
|
||||
false,
|
||||
true,
|
||||
cutlass::bfloat16_t>(XQ, WQ, x_scale, w_scale, bias, out);
|
||||
} else {
|
||||
return dispatch_fp8_rowwise_kernel<
|
||||
cutlass::float_e4m3_t,
|
||||
false,
|
||||
true,
|
||||
cutlass::bfloat16_t>(XQ, WQ, x_scale, w_scale, bias, out);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (use_fast_accum) {
|
||||
if (use_e5m2) {
|
||||
return dispatch_fp8_rowwise_kernel<
|
||||
cutlass::float_e5m2_t,
|
||||
true,
|
||||
true,
|
||||
float>(XQ, WQ, x_scale, w_scale, bias, out);
|
||||
} else {
|
||||
return dispatch_fp8_rowwise_kernel<
|
||||
cutlass::float_e4m3_t,
|
||||
true,
|
||||
true,
|
||||
float>(XQ, WQ, x_scale, w_scale, bias, out);
|
||||
}
|
||||
} else {
|
||||
if (use_e5m2) {
|
||||
return dispatch_fp8_rowwise_kernel<
|
||||
cutlass::float_e5m2_t,
|
||||
false,
|
||||
true,
|
||||
float>(XQ, WQ, x_scale, w_scale, bias, out);
|
||||
} else {
|
||||
return dispatch_fp8_rowwise_kernel<
|
||||
cutlass::float_e4m3_t,
|
||||
false,
|
||||
true,
|
||||
float>(XQ, WQ, x_scale, w_scale, bias, out);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (use_fast_accum) {
|
||||
if (use_e5m2) {
|
||||
return dispatch_fp8_rowwise_kernel<
|
||||
cutlass::float_e5m2_t,
|
||||
true,
|
||||
false,
|
||||
float>(XQ, WQ, x_scale, w_scale, bias, out);
|
||||
} else {
|
||||
return dispatch_fp8_rowwise_kernel<
|
||||
cutlass::float_e4m3_t,
|
||||
true,
|
||||
false,
|
||||
float>(XQ, WQ, x_scale, w_scale, bias, out);
|
||||
}
|
||||
} else {
|
||||
if (use_e5m2) {
|
||||
return dispatch_fp8_rowwise_kernel<
|
||||
cutlass::float_e5m2_t,
|
||||
false,
|
||||
false,
|
||||
float>(XQ, WQ, x_scale, w_scale, bias, out);
|
||||
} else {
|
||||
return dispatch_fp8_rowwise_kernel<
|
||||
cutlass::float_e4m3_t,
|
||||
false,
|
||||
false,
|
||||
float>(XQ, WQ, x_scale, w_scale, bias, out);
|
||||
}
|
||||
}
|
||||
}
|
||||
#else // BUILD_ROWWISE_FP8_KERNEL
|
||||
TORCH_CHECK(false, "Rowwise scaling is not currenlty supported on your device");
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace at::cuda::detail
|
||||
@ -1,15 +0,0 @@
|
||||
#pragma once
|
||||
#include <ATen/core/TensorBase.h>
|
||||
#include <c10/util/Optional.h>
|
||||
|
||||
|
||||
namespace at::cuda::detail {
|
||||
TORCH_API void f8f8bf16_rowwise(
|
||||
at::Tensor XQ, // FP8
|
||||
at::Tensor WQ, // FP8
|
||||
at::Tensor x_scale, // FP32
|
||||
at::Tensor w_scale, // FP32
|
||||
c10::optional<at::Tensor> bias, // BF16
|
||||
bool use_fast_accum,
|
||||
at::Tensor& out);
|
||||
} // at::cuda::detail
|
||||
@ -863,8 +863,8 @@ Tensor host_softmax(const Tensor & input_, const int64_t dim_, const bool half_t
|
||||
auto max_elements_per_smem = (at::cuda::getCurrentDeviceProperties()->sharedMemPerBlock -
|
||||
smem_reduction_sz) / sizeof(scalar_t);
|
||||
|
||||
bool can_use_smem = (size_t) dim_size < max_elements_per_smem;
|
||||
can_use_smem &= !(reinterpret_cast<uintptr_t>(input_ptr) % ALIGN_BYTES);
|
||||
bool can_use_smem = dim_size < max_elements_per_smem;
|
||||
can_use_smem &= !(reinterpret_cast<const uintptr_t>(input_ptr) % ALIGN_BYTES);
|
||||
can_use_smem &= (!(reinterpret_cast<uintptr_t>(output_ptr) % ALIGN_BYTES));
|
||||
can_use_smem &= !(dim_size % ILP);
|
||||
|
||||
@ -899,8 +899,8 @@ Tensor host_softmax(const Tensor & input_, const int64_t dim_, const bool half_t
|
||||
auto max_elements_per_smem = (at::cuda::getCurrentDeviceProperties()->sharedMemPerBlock -
|
||||
smem_reduction_sz) / sizeof(scalar_t);
|
||||
|
||||
bool can_use_smem = (size_t) dim_size < max_elements_per_smem;
|
||||
can_use_smem &= !(reinterpret_cast<uintptr_t>(input_ptr) % ALIGN_BYTES);
|
||||
bool can_use_smem = dim_size < max_elements_per_smem;
|
||||
can_use_smem &= !(reinterpret_cast<const uintptr_t>(input_ptr) % ALIGN_BYTES);
|
||||
can_use_smem &= (!(reinterpret_cast<uintptr_t>(output_ptr) % ALIGN_BYTES));
|
||||
can_use_smem &= !(dim_size % ILP);
|
||||
|
||||
|
||||
@ -595,7 +595,6 @@ struct ReduceJitOp {
|
||||
bool is_last_block_done = mark_block_finished();
|
||||
|
||||
if (is_last_block_done) {
|
||||
__threadfence(); //complete acquire pattern
|
||||
value = ident;
|
||||
if (config.should_block_x_reduce()) {
|
||||
uint32_t input_offset = threadIdx.x + threadIdx.y * blockDim.x;
|
||||
|
||||
@ -14,7 +14,6 @@ using namespace at::cuda::detail;
|
||||
|
||||
// Kernel for fast unfold+copy on volumes
|
||||
template <typename T>
|
||||
C10_LAUNCH_BOUNDS_1(1024)
|
||||
__global__ void vol2col_kernel(
|
||||
const int64_t n,
|
||||
const T* data_vol,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user