mirror of
https://github.com/pytorch/pytorch.git
synced 2025-11-06 17:24:59 +08:00
Compare commits
170 Commits
ciflow/bin
...
cpp-docs-d
| Author | SHA1 | Date | |
|---|---|---|---|
| 2913cdf29d | |||
| 0661a232a5 | |||
| 5db844dafa | |||
| 73efad99d7 | |||
| df1268c311 | |||
| 84f9f1541d | |||
| 27c0c126bf | |||
| 670873155a | |||
| 923737c510 | |||
| 13d5b14a73 | |||
| a35a42b21c | |||
| 15956bc1e8 | |||
| b319ea1111 | |||
| ce4c68a5f6 | |||
| c6da4a59a3 | |||
| 53f75cd5ba | |||
| 527b1109a8 | |||
| 3144713325 | |||
| eefa16342c | |||
| d02f68f484 | |||
| 68eb55c4b2 | |||
| 8d4b8ab430 | |||
| afd50bdd29 | |||
| 56dfd4c74b | |||
| 24db5c4451 | |||
| cc8bfd1206 | |||
| c45b156605 | |||
| 8fff7e36b4 | |||
| 82fa2aa269 | |||
| 09e0285608 | |||
| d980d8dc79 | |||
| c7d00de115 | |||
| d3cf90ada5 | |||
| 0e1a88904f | |||
| 3232caa078 | |||
| a6c6acea9d | |||
| 55be1cc739 | |||
| 344cebda52 | |||
| ba72c6b981 | |||
| 888efcc453 | |||
| 24aa9a2ef7 | |||
| f70faf2b9a | |||
| 167e64ba1a | |||
| 875b18d53c | |||
| eec3749c44 | |||
| 40133fe966 | |||
| f288433d3e | |||
| 864633fca0 | |||
| c21868b435 | |||
| a0a8eca01a | |||
| 0958f307d9 | |||
| 7551507c41 | |||
| f92834d477 | |||
| e1fc01bef8 | |||
| 22a745737a | |||
| ee708ea96c | |||
| 64819e3701 | |||
| 79ff2c66c8 | |||
| 665a411351 | |||
| 5c89bdb461 | |||
| 7b64ad906c | |||
| d944279def | |||
| 5048e4701d | |||
| 616314cfd5 | |||
| 2b7e4c3ef2 | |||
| 6c98657239 | |||
| 86b2d82e84 | |||
| eea8ff2d34 | |||
| 11f73d78c8 | |||
| 7d1b976146 | |||
| 27cfdd9e77 | |||
| 01d8d8584b | |||
| b8855e7b0b | |||
| 6725ee89c8 | |||
| 3a38ec78e1 | |||
| 77b9399d83 | |||
| 83cd626365 | |||
| 5125872aeb | |||
| c10975d2e6 | |||
| 68e31e2f81 | |||
| ee1bc3f0d5 | |||
| 612ead1619 | |||
| 3af1f7bbf4 | |||
| 71a2e93547 | |||
| c76199980d | |||
| e3bd7bd1f4 | |||
| aa4a8c9b92 | |||
| fa0fd6be13 | |||
| 2f3f88f445 | |||
| d67d807270 | |||
| bcad4f2e68 | |||
| 5b17ef30d0 | |||
| 7b2992685b | |||
| f3fa560dec | |||
| 984b096d10 | |||
| 104b868618 | |||
| 94f2657c4b | |||
| 3f6538febd | |||
| f33abae695 | |||
| 73da7a40b6 | |||
| 335b5c7d4b | |||
| 76bb27e248 | |||
| a2da69385a | |||
| d177900723 | |||
| 61bcc8d75a | |||
| 1656b253c5 | |||
| 5d6230779d | |||
| a4077b568f | |||
| ae038f871b | |||
| defac66e39 | |||
| 061fa73c97 | |||
| 9501405de6 | |||
| e0791fc11d | |||
| e1d011d6eb | |||
| 3f5401020b | |||
| 5a3930abbc | |||
| a5f00077fc | |||
| 69fb3ebb5d | |||
| 1c4ced2eaf | |||
| 392acee68a | |||
| fee1ac927d | |||
| 4a7fefd7c7 | |||
| 3b4315940d | |||
| 3eddf04922 | |||
| 7c203b8420 | |||
| 3ca216ae17 | |||
| 9c22bbb2dc | |||
| 6268883f9c | |||
| 16212f0d6b | |||
| c8adc08b3b | |||
| 23b57a445c | |||
| 6c7cad6972 | |||
| bb54296258 | |||
| 5e05a0ae99 | |||
| 298666631b | |||
| e471800dce | |||
| 18f4259626 | |||
| d962bed157 | |||
| 76780b1a3d | |||
| cee03634da | |||
| bc03d7c974 | |||
| f013e804c8 | |||
| 0674e0a0f1 | |||
| b7d348a907 | |||
| 9f9dbe0a9a | |||
| a19e92d433 | |||
| c3dc0c7089 | |||
| 04d6a6f339 | |||
| 0573747b6a | |||
| a663eb9c80 | |||
| 764c54ecae | |||
| 0d81bb7f9c | |||
| 82fafb3304 | |||
| 401c2f9657 | |||
| 13549e0e10 | |||
| 82d86bacf3 | |||
| 3b5d38a3bc | |||
| 84776e1374 | |||
| b3861ac8e7 | |||
| 4cc64d6234 | |||
| 1aef88c72d | |||
| f0745ddb11 | |||
| 4316df857c | |||
| 9d6597b1e9 | |||
| e8fadba28c | |||
| 60333de85d | |||
| 3dc92d69ed | |||
| f91899ca6c | |||
| e2dc32f4ba | |||
| 83cc38d9c1 |
@ -13,3 +13,4 @@ exclude:
|
|||||||
- "**/benchmarks/**"
|
- "**/benchmarks/**"
|
||||||
- "**/test_*.py"
|
- "**/test_*.py"
|
||||||
- "**/*_test.py"
|
- "**/*_test.py"
|
||||||
|
- "tools/**"
|
||||||
|
|||||||
@ -149,7 +149,7 @@ FROM cpu_final as rocm_final
|
|||||||
ARG ROCM_VERSION=6.0
|
ARG ROCM_VERSION=6.0
|
||||||
ARG PYTORCH_ROCM_ARCH
|
ARG PYTORCH_ROCM_ARCH
|
||||||
ENV PYTORCH_ROCM_ARCH ${PYTORCH_ROCM_ARCH}
|
ENV PYTORCH_ROCM_ARCH ${PYTORCH_ROCM_ARCH}
|
||||||
ARG DEVTOOLSET_VERSION=11
|
ARG DEVTOOLSET_VERSION=13
|
||||||
ENV LDFLAGS="-Wl,-rpath=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib64 -Wl,-rpath=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib"
|
ENV LDFLAGS="-Wl,-rpath=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib64 -Wl,-rpath=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib"
|
||||||
# Somewhere in ROCm stack, we still use non-existing /opt/rocm/hip path,
|
# Somewhere in ROCm stack, we still use non-existing /opt/rocm/hip path,
|
||||||
# below workaround helps avoid error
|
# below workaround helps avoid error
|
||||||
|
|||||||
@ -1,15 +1,11 @@
|
|||||||
sphinx==5.3.0
|
sphinx==7.2.6
|
||||||
#Description: This is used to generate PyTorch docs
|
#Description: This is used to generate PyTorch docs
|
||||||
#Pinned versions: 5.3.0
|
#Pinned versions: 7.2.6
|
||||||
|
|
||||||
standard-imghdr==3.13.0; python_version >= "3.13"
|
pytorch_sphinx_theme2==0.2.0
|
||||||
#Description: This is needed by Sphinx, so it needs to be added here.
|
#Description: This is needed to generate PyTorch docs
|
||||||
# The reasons are as follows:
|
#Pinned versions: 0.2.0
|
||||||
# 1) This module has been removed from the Python standard library since Python 3.13(https://peps.python.org/pep-0594/#imghdr);
|
|
||||||
# 2) The current version of Sphinx (5.3.0) is not compatible with Python 3.13.
|
|
||||||
# Once Sphinx is upgraded to a version compatible with Python 3.13 or later, we can remove this dependency.
|
|
||||||
|
|
||||||
-e git+https://github.com/pytorch/pytorch_sphinx_theme.git@71e55749be14ceb56e7f8211a9fb649866b87ad4#egg=pytorch_sphinx_theme2
|
|
||||||
# TODO: sphinxcontrib.katex 0.9.0 adds a local KaTeX server to speed up pre-rendering
|
# TODO: sphinxcontrib.katex 0.9.0 adds a local KaTeX server to speed up pre-rendering
|
||||||
# but it doesn't seem to work and hangs around idly. The initial thought that it is probably
|
# but it doesn't seem to work and hangs around idly. The initial thought that it is probably
|
||||||
# something related to Docker setup. We can investigate this later.
|
# something related to Docker setup. We can investigate this later.
|
||||||
@ -36,17 +32,17 @@ tensorboard==2.18.0 ; python_version >= "3.13"
|
|||||||
#Description: This is used to generate PyTorch docs
|
#Description: This is used to generate PyTorch docs
|
||||||
#Pinned versions: 2.13.0
|
#Pinned versions: 2.13.0
|
||||||
|
|
||||||
breathe==4.34.0
|
breathe==4.36.0
|
||||||
#Description: This is used to generate PyTorch C++ docs
|
#Description: This is used to generate PyTorch C++ docs
|
||||||
#Pinned versions: 4.34.0
|
#Pinned versions: 4.36.0
|
||||||
|
|
||||||
exhale==0.2.3
|
exhale==0.3.7
|
||||||
#Description: This is used to generate PyTorch C++ docs
|
#Description: This is used to generate PyTorch C++ docs
|
||||||
#Pinned versions: 0.2.3
|
#Pinned versions: 0.3.7
|
||||||
|
|
||||||
docutils==0.16
|
docutils==0.20
|
||||||
#Description: This is used to generate PyTorch C++ docs
|
#Description: This is used to generate PyTorch C++ docs
|
||||||
#Pinned versions: 0.16
|
#Pinned versions: 0.20
|
||||||
|
|
||||||
bs4==0.0.1
|
bs4==0.0.1
|
||||||
#Description: This is used to generate PyTorch C++ docs
|
#Description: This is used to generate PyTorch C++ docs
|
||||||
@ -56,13 +52,13 @@ IPython==8.12.0
|
|||||||
#Description: This is used to generate PyTorch functorch docs
|
#Description: This is used to generate PyTorch functorch docs
|
||||||
#Pinned versions: 8.12.0
|
#Pinned versions: 8.12.0
|
||||||
|
|
||||||
myst-nb==0.17.2
|
myst-nb==1.3.0
|
||||||
#Description: This is used to generate PyTorch functorch and torch.compile docs.
|
#Description: This is used to generate PyTorch functorch and torch.compile docs.
|
||||||
#Pinned versions: 0.17.2
|
#Pinned versions: 1.3.0
|
||||||
|
|
||||||
# The following are required to build torch.distributed.elastic.rendezvous.etcd* docs
|
# The following are required to build torch.distributed.elastic.rendezvous.etcd* docs
|
||||||
python-etcd==0.4.5
|
python-etcd==0.4.5
|
||||||
sphinx-copybutton==0.5.0
|
sphinx-copybutton==0.5.0
|
||||||
sphinx-design==0.4.0
|
sphinx-design==0.6.1
|
||||||
sphinxcontrib-mermaid==1.0.0
|
sphinxcontrib-mermaid==1.0.0
|
||||||
myst-parser==0.18.1
|
myst-parser==4.0.1
|
||||||
|
|||||||
@ -42,7 +42,7 @@ declare -f -t trap_add
|
|||||||
function assert_git_not_dirty() {
|
function assert_git_not_dirty() {
|
||||||
# TODO: we should add an option to `build_amd.py` that reverts the repo to
|
# TODO: we should add an option to `build_amd.py` that reverts the repo to
|
||||||
# an unmodified state.
|
# an unmodified state.
|
||||||
if [[ "$BUILD_ENVIRONMENT" != *rocm* ]] && [[ "$BUILD_ENVIRONMENT" != *xla* ]] && [[ "$BUILD_ENVIRONMENT" != *win* ]] ; then
|
if [[ "$BUILD_ENVIRONMENT" != *rocm* ]] && [[ "$BUILD_ENVIRONMENT" != *xla* ]] ; then
|
||||||
git_status=$(git status --porcelain | grep -v '?? third_party' || true)
|
git_status=$(git status --porcelain | grep -v '?? third_party' || true)
|
||||||
if [[ $git_status ]]; then
|
if [[ $git_status ]]; then
|
||||||
echo "Build left local git repository checkout dirty"
|
echo "Build left local git repository checkout dirty"
|
||||||
|
|||||||
@ -89,23 +89,41 @@ if [ "$is_main_doc" = true ]; then
|
|||||||
|
|
||||||
make coverage
|
make coverage
|
||||||
# Now we have the coverage report, we need to make sure it is empty.
|
# Now we have the coverage report, we need to make sure it is empty.
|
||||||
# Count the number of lines in the file and turn that number into a variable
|
# Sphinx 7.2.6+ format: python.txt contains a statistics table with a TOTAL row
|
||||||
# $lines. The `cut -f1 ...` is to only parse the number, not the filename
|
# showing the undocumented count in the third column.
|
||||||
# Skip the report header by subtracting 2: the header will be output even if
|
# Example: | TOTAL | 99.83% | 2 |
|
||||||
# there are no undocumented items.
|
|
||||||
#
|
#
|
||||||
# Also: see docs/source/conf.py for "coverage_ignore*" items, which should
|
# Also: see docs/source/conf.py for "coverage_ignore*" items, which should
|
||||||
# be documented then removed from there.
|
# be documented then removed from there.
|
||||||
lines=$(wc -l build/coverage/python.txt 2>/dev/null |cut -f1 -d' ')
|
|
||||||
undocumented=$((lines - 2))
|
# Extract undocumented count from TOTAL row in Sphinx 7.2.6 statistics table
|
||||||
if [ $undocumented -lt 0 ]; then
|
# The table format is: | Module | Coverage | Undocumented |
|
||||||
|
# Extract the third column (undocumented count) from the TOTAL row
|
||||||
|
undocumented=$(grep "| TOTAL" build/coverage/python.txt | awk -F'|' '{print $4}' | tr -d ' ')
|
||||||
|
|
||||||
|
if [ -z "$undocumented" ] || ! [[ "$undocumented" =~ ^[0-9]+$ ]]; then
|
||||||
echo coverage output not found
|
echo coverage output not found
|
||||||
exit 1
|
exit 1
|
||||||
elif [ $undocumented -gt 0 ]; then
|
elif [ "$undocumented" -gt 0 ]; then
|
||||||
echo undocumented objects found:
|
set +x # Disable command echoing for cleaner output
|
||||||
cat build/coverage/python.txt
|
echo ""
|
||||||
|
echo "====================="
|
||||||
|
echo "UNDOCUMENTED OBJECTS:"
|
||||||
|
echo "====================="
|
||||||
|
echo ""
|
||||||
|
# Find the line number of the TOTAL row and print only what comes after it
|
||||||
|
total_line=$(grep -n "| TOTAL" build/coverage/python.txt | cut -d: -f1)
|
||||||
|
if [ -n "$total_line" ]; then
|
||||||
|
# Print only the detailed list (skip the statistics table)
|
||||||
|
tail -n +$((total_line + 2)) build/coverage/python.txt
|
||||||
|
else
|
||||||
|
# Fallback to showing entire file if TOTAL line not found
|
||||||
|
cat build/coverage/python.txt
|
||||||
|
fi
|
||||||
|
echo ""
|
||||||
echo "Make sure you've updated relevant .rsts in docs/source!"
|
echo "Make sure you've updated relevant .rsts in docs/source!"
|
||||||
echo "You can reproduce locally by running 'cd docs && make coverage && cat build/coverage/python.txt'"
|
echo "You can reproduce locally by running 'cd docs && make coverage && tail -n +\$((grep -n \"| TOTAL\" build/coverage/python.txt | cut -d: -f1) + 2)) build/coverage/python.txt'"
|
||||||
|
set -x # Re-enable command echoing
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
|
|||||||
@ -337,7 +337,7 @@ test_python() {
|
|||||||
|
|
||||||
test_python_smoke() {
|
test_python_smoke() {
|
||||||
# Smoke tests for H100/B200
|
# Smoke tests for H100/B200
|
||||||
time python test/run_test.py --include test_matmul_cuda test_scaled_matmul_cuda inductor/test_fp8 inductor/test_max_autotune $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
|
time python test/run_test.py --include test_matmul_cuda test_scaled_matmul_cuda inductor/test_fp8 inductor/test_max_autotune inductor/test_cutedsl_grouped_mm $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
|
||||||
assert_git_not_dirty
|
assert_git_not_dirty
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1653,7 +1653,7 @@ test_operator_microbenchmark() {
|
|||||||
|
|
||||||
cd "${TEST_DIR}"/benchmarks/operator_benchmark
|
cd "${TEST_DIR}"/benchmarks/operator_benchmark
|
||||||
|
|
||||||
for OP_BENCHMARK_TESTS in matmul mm addmm bmm; do
|
for OP_BENCHMARK_TESTS in matmul mm addmm bmm conv; do
|
||||||
$TASKSET python -m pt.${OP_BENCHMARK_TESTS}_test --tag-filter long \
|
$TASKSET python -m pt.${OP_BENCHMARK_TESTS}_test --tag-filter long \
|
||||||
--output-json-for-dashboard "${TEST_REPORTS_DIR}/operator_microbenchmark_${OP_BENCHMARK_TESTS}_compile.json" \
|
--output-json-for-dashboard "${TEST_REPORTS_DIR}/operator_microbenchmark_${OP_BENCHMARK_TESTS}_compile.json" \
|
||||||
--benchmark-name "PyTorch operator microbenchmark" --use-compile
|
--benchmark-name "PyTorch operator microbenchmark" --use-compile
|
||||||
|
|||||||
@ -31,24 +31,23 @@ if "%USE_XPU%"=="1" (
|
|||||||
if errorlevel 1 exit /b 1
|
if errorlevel 1 exit /b 1
|
||||||
)
|
)
|
||||||
|
|
||||||
|
:: Miniconda has been installed as part of the Windows AMI with all the dependencies.
|
||||||
|
:: We just need to activate it here
|
||||||
|
call %INSTALLER_DIR%\activate_miniconda3.bat
|
||||||
|
if errorlevel 1 goto fail
|
||||||
|
if not errorlevel 0 goto fail
|
||||||
|
|
||||||
:: Update CMake
|
:: Update CMake
|
||||||
|
:: TODO: Investigate why this helps MKL detection, even when CMake from choco is not used
|
||||||
call choco upgrade -y cmake --no-progress --installargs 'ADD_CMAKE_TO_PATH=System' --apply-install-arguments-to-dependencies --version=3.27.9
|
call choco upgrade -y cmake --no-progress --installargs 'ADD_CMAKE_TO_PATH=System' --apply-install-arguments-to-dependencies --version=3.27.9
|
||||||
if errorlevel 1 goto fail
|
if errorlevel 1 goto fail
|
||||||
if not errorlevel 0 goto fail
|
if not errorlevel 0 goto fail
|
||||||
|
|
||||||
call pip install mkl==2024.2.0 mkl-static==2024.2.0 mkl-include==2024.2.0 ninja typing-extensions
|
:: TODO: Move to .ci/docker/requirements-ci.txt
|
||||||
call pip install -r .ci/docker/requirements-ci.txt
|
call pip install mkl==2024.2.0 mkl-static==2024.2.0 mkl-include==2024.2.0
|
||||||
SET CMAKE_LIBRARY_PATH=%PYTHON_PATH%\Library\lib
|
|
||||||
SET CMAKE_INCLUDE_PATH=%PYTHON_PATH%\Library\include
|
|
||||||
if errorlevel 1 goto fail
|
if errorlevel 1 goto fail
|
||||||
if not errorlevel 0 goto fail
|
if not errorlevel 0 goto fail
|
||||||
|
|
||||||
:: Install libuv
|
|
||||||
curl -k https://s3.amazonaws.com/ossci-windows/libuv-1.40.0-h8ffe710_0.tar.bz2 -o libuv-1.40.0-h8ffe710_0.tar.bz2
|
|
||||||
7z x -aoa libuv-1.40.0-h8ffe710_0.tar.bz2
|
|
||||||
tar -xvf libuv-1.40.0-h8ffe710_0.tar -C %PYTHON_PATH%
|
|
||||||
set libuv_ROOT=%PYTHON_PATH%\Library
|
|
||||||
|
|
||||||
:: Override VS env here
|
:: Override VS env here
|
||||||
pushd .
|
pushd .
|
||||||
if "%VC_VERSION%" == "" (
|
if "%VC_VERSION%" == "" (
|
||||||
|
|||||||
@ -0,0 +1,30 @@
|
|||||||
|
if "%BUILD_ENVIRONMENT%"=="" (
|
||||||
|
set CONDA_PARENT_DIR=%CD%
|
||||||
|
) else (
|
||||||
|
set CONDA_PARENT_DIR=C:\Jenkins
|
||||||
|
)
|
||||||
|
set CONDA_ROOT_DIR=%CONDA_PARENT_DIR%\Miniconda3
|
||||||
|
|
||||||
|
:: Be conservative here when rolling out the new AMI with conda. This will try
|
||||||
|
:: to install conda as before if it couldn't find the conda installation. This
|
||||||
|
:: can be removed eventually after we gain enough confidence in the AMI
|
||||||
|
if not exist %CONDA_ROOT_DIR% (
|
||||||
|
set INSTALL_FRESH_CONDA=1
|
||||||
|
)
|
||||||
|
|
||||||
|
if "%INSTALL_FRESH_CONDA%"=="1" (
|
||||||
|
curl --retry 3 --retry-all-errors -k https://repo.anaconda.com/miniconda/Miniconda3-latest-Windows-x86_64.exe --output %TMP_DIR_WIN%\Miniconda3-latest-Windows-x86_64.exe
|
||||||
|
if errorlevel 1 exit /b
|
||||||
|
if not errorlevel 0 exit /b
|
||||||
|
|
||||||
|
%TMP_DIR_WIN%\Miniconda3-latest-Windows-x86_64.exe /InstallationType=JustMe /RegisterPython=0 /S /AddToPath=0 /D=%CONDA_ROOT_DIR%
|
||||||
|
if errorlevel 1 exit /b
|
||||||
|
if not errorlevel 0 exit /b
|
||||||
|
)
|
||||||
|
|
||||||
|
:: Activate conda so that we can use its commands, i.e. conda, python, pip
|
||||||
|
call %CONDA_ROOT_DIR%\Scripts\activate.bat %CONDA_ROOT_DIR%
|
||||||
|
:: Activate conda so that we can use its commands, i.e. conda, python, pip
|
||||||
|
call conda activate py_tmp
|
||||||
|
|
||||||
|
call pip install -r .ci/docker/requirements-ci.txt
|
||||||
@ -3,6 +3,19 @@ set PATH=C:\Program Files\CMake\bin;C:\Program Files\7-Zip;C:\ProgramData\chocol
|
|||||||
:: Install Miniconda3
|
:: Install Miniconda3
|
||||||
set INSTALLER_DIR=%SCRIPT_HELPERS_DIR%\installation-helpers
|
set INSTALLER_DIR=%SCRIPT_HELPERS_DIR%\installation-helpers
|
||||||
|
|
||||||
|
:: Miniconda has been installed as part of the Windows AMI with all the dependencies.
|
||||||
|
:: We just need to activate it here
|
||||||
|
call %INSTALLER_DIR%\activate_miniconda3.bat
|
||||||
|
if errorlevel 1 exit /b
|
||||||
|
if not errorlevel 0 exit /b
|
||||||
|
|
||||||
|
:: PyTorch is now installed using the standard wheel on Windows into the conda environment.
|
||||||
|
:: However, the test scripts are still frequently referring to the workspace temp directory
|
||||||
|
:: build\torch. Rather than changing all these references, making a copy of torch folder
|
||||||
|
:: from conda to the current workspace is easier. The workspace will be cleaned up after
|
||||||
|
:: the job anyway
|
||||||
|
xcopy /s %CONDA_ROOT_DIR%\envs\py_tmp\Lib\site-packages\torch %TMP_DIR_WIN%\build\torch\
|
||||||
|
|
||||||
pushd .
|
pushd .
|
||||||
if "%VC_VERSION%" == "" (
|
if "%VC_VERSION%" == "" (
|
||||||
call "C:\Program Files (x86)\Microsoft Visual Studio\%VC_YEAR%\%VC_PRODUCT%\VC\Auxiliary\Build\vcvarsall.bat" x64
|
call "C:\Program Files (x86)\Microsoft Visual Studio\%VC_YEAR%\%VC_PRODUCT%\VC\Auxiliary\Build\vcvarsall.bat" x64
|
||||||
|
|||||||
@ -40,11 +40,6 @@ fi
|
|||||||
# TODO: Move this to .ci/docker/requirements-ci.txt
|
# TODO: Move this to .ci/docker/requirements-ci.txt
|
||||||
python -m pip install "psutil==5.9.1" nvidia-ml-py "pytest-shard==0.1.2"
|
python -m pip install "psutil==5.9.1" nvidia-ml-py "pytest-shard==0.1.2"
|
||||||
|
|
||||||
# Install expecttest to merge https://github.com/pytorch/pytorch/pull/155308
|
|
||||||
python -m pip install expecttest==0.3.0
|
|
||||||
|
|
||||||
python -m pip install -r $SCRIPT_PARENT_DIR/../docker/requirements-ci.txt
|
|
||||||
|
|
||||||
run_tests() {
|
run_tests() {
|
||||||
# Run nvidia-smi if available
|
# Run nvidia-smi if available
|
||||||
for path in '/c/Program Files/NVIDIA Corporation/NVSMI/nvidia-smi.exe' /c/Windows/System32/nvidia-smi.exe; do
|
for path in '/c/Program Files/NVIDIA Corporation/NVSMI/nvidia-smi.exe' /c/Windows/System32/nvidia-smi.exe; do
|
||||||
|
|||||||
@ -22,10 +22,8 @@ curl --retry 3 -kL "%PYTHON_INSTALLER_URL%" --output python-amd64.exe
|
|||||||
if errorlevel 1 exit /b 1
|
if errorlevel 1 exit /b 1
|
||||||
|
|
||||||
start /wait "" python-amd64.exe /quiet InstallAllUsers=1 PrependPath=0 Include_test=0 %ADDITIONAL_OPTIONS% TargetDir=%CD%\Python
|
start /wait "" python-amd64.exe /quiet InstallAllUsers=1 PrependPath=0 Include_test=0 %ADDITIONAL_OPTIONS% TargetDir=%CD%\Python
|
||||||
|
|
||||||
if errorlevel 1 exit /b 1
|
if errorlevel 1 exit /b 1
|
||||||
|
|
||||||
|
|
||||||
set "PATH=%CD%\Python\Scripts;%CD%\Python;%PATH%"
|
set "PATH=%CD%\Python\Scripts;%CD%\Python;%PATH%"
|
||||||
%PYTHON_EXEC% -m pip install --upgrade pip setuptools packaging wheel build
|
%PYTHON_EXEC% -m pip install --upgrade pip setuptools packaging wheel build
|
||||||
if errorlevel 1 exit /b 1
|
if errorlevel 1 exit /b 1
|
||||||
|
|||||||
@ -60,9 +60,11 @@ performance-*,
|
|||||||
readability-container-size-empty,
|
readability-container-size-empty,
|
||||||
readability-delete-null-pointer,
|
readability-delete-null-pointer,
|
||||||
readability-duplicate-include,
|
readability-duplicate-include,
|
||||||
|
readability-named-parameter,
|
||||||
readability-misplaced-array-index,
|
readability-misplaced-array-index,
|
||||||
readability-redundant*,
|
readability-redundant*,
|
||||||
readability-simplify-subscript-expr,
|
readability-simplify-subscript-expr,
|
||||||
|
readability-static-definition-in-anonymous-namespace
|
||||||
readability-string-compare,
|
readability-string-compare,
|
||||||
-readability-redundant-access-specifiers,
|
-readability-redundant-access-specifiers,
|
||||||
-readability-redundant-control-flow,
|
-readability-redundant-control-flow,
|
||||||
|
|||||||
319
.claude/skills/add-uint-support/SKILL.md
Normal file
319
.claude/skills/add-uint-support/SKILL.md
Normal file
@ -0,0 +1,319 @@
|
|||||||
|
---
|
||||||
|
name: add-uint-support
|
||||||
|
description: Add unsigned integer (uint) type support to PyTorch operators by updating AT_DISPATCH macros. Use when adding support for uint16, uint32, uint64 types to operators, kernels, or when user mentions enabling unsigned types, barebones unsigned types, or uint support.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Add Unsigned Integer (uint) Support to Operators
|
||||||
|
|
||||||
|
This skill helps add support for unsigned integer types (uint16, uint32, uint64) to PyTorch operators by updating their AT_DISPATCH macros.
|
||||||
|
|
||||||
|
## When to use this skill
|
||||||
|
|
||||||
|
Use this skill when:
|
||||||
|
- Adding uint16, uint32, or uint64 support to an operator
|
||||||
|
- User mentions "unsigned types", "uint support", "barebones unsigned types"
|
||||||
|
- Enabling support for kUInt16, kUInt32, kUInt64 in kernels
|
||||||
|
- Working with operator implementations that need expanded type coverage
|
||||||
|
|
||||||
|
## Quick reference
|
||||||
|
|
||||||
|
**Add unsigned types to existing dispatch:**
|
||||||
|
```cpp
|
||||||
|
// Before
|
||||||
|
AT_DISPATCH_V2(dtype, "op", AT_WRAP([&]() {
|
||||||
|
kernel<scalar_t>();
|
||||||
|
}), AT_EXPAND(AT_ALL_TYPES));
|
||||||
|
|
||||||
|
// After (method 1: add unsigned types explicitly)
|
||||||
|
AT_DISPATCH_V2(dtype, "op", AT_WRAP([&]() {
|
||||||
|
kernel<scalar_t>();
|
||||||
|
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES));
|
||||||
|
|
||||||
|
// After (method 2: use V2 integral types if AT_INTEGRAL_TYPES present)
|
||||||
|
AT_DISPATCH_V2(dtype, "op", AT_WRAP([&]() {
|
||||||
|
kernel<scalar_t>();
|
||||||
|
}), AT_EXPAND(AT_INTEGRAL_TYPES_V2), AT_EXPAND(AT_FLOATING_TYPES));
|
||||||
|
```
|
||||||
|
|
||||||
|
## Type group reference
|
||||||
|
|
||||||
|
**Unsigned type groups:**
|
||||||
|
- `AT_BAREBONES_UNSIGNED_TYPES`: kUInt16, kUInt32, kUInt64
|
||||||
|
- `AT_INTEGRAL_TYPES_V2`: AT_INTEGRAL_TYPES + AT_BAREBONES_UNSIGNED_TYPES
|
||||||
|
|
||||||
|
**Relationship:**
|
||||||
|
```cpp
|
||||||
|
AT_INTEGRAL_TYPES // kByte, kChar, kInt, kLong, kShort
|
||||||
|
AT_BAREBONES_UNSIGNED_TYPES // kUInt16, kUInt32, kUInt64
|
||||||
|
AT_INTEGRAL_TYPES_V2 // INTEGRAL_TYPES + BAREBONES_UNSIGNED_TYPES
|
||||||
|
```
|
||||||
|
|
||||||
|
## Instructions
|
||||||
|
|
||||||
|
### Step 1: Determine if conversion to V2 is needed
|
||||||
|
|
||||||
|
Check if the file uses AT_DISPATCH_V2:
|
||||||
|
|
||||||
|
**If using old AT_DISPATCH:**
|
||||||
|
- First convert to AT_DISPATCH_V2 using the at-dispatch-v2 skill
|
||||||
|
- Then proceed with adding uint support
|
||||||
|
|
||||||
|
**If already using AT_DISPATCH_V2:**
|
||||||
|
- Proceed directly to Step 2
|
||||||
|
|
||||||
|
### Step 2: Analyze the current dispatch macro
|
||||||
|
|
||||||
|
Identify what type groups are currently in use:
|
||||||
|
|
||||||
|
```cpp
|
||||||
|
AT_DISPATCH_V2(dtype, "op", AT_WRAP([&]() {
|
||||||
|
// body
|
||||||
|
}), AT_EXPAND(AT_ALL_TYPES), kHalf, kBFloat16);
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
Current type coverage
|
||||||
|
```
|
||||||
|
|
||||||
|
Common patterns:
|
||||||
|
- `AT_EXPAND(AT_ALL_TYPES)` → includes AT_INTEGRAL_TYPES + AT_FLOATING_TYPES
|
||||||
|
- `AT_EXPAND(AT_INTEGRAL_TYPES)` → signed integers only
|
||||||
|
- `AT_EXPAND(AT_FLOATING_TYPES)` → floating point types
|
||||||
|
|
||||||
|
### Step 3: Choose the uint addition method
|
||||||
|
|
||||||
|
Two approaches:
|
||||||
|
|
||||||
|
**Method 1: Add AT_BAREBONES_UNSIGNED_TYPES explicitly**
|
||||||
|
- Use when: You want to be explicit about adding uint support
|
||||||
|
- Add `AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES)` to the type list
|
||||||
|
|
||||||
|
**Method 2: Substitute AT_INTEGRAL_TYPES with AT_INTEGRAL_TYPES_V2**
|
||||||
|
- Use when: The dispatch already uses `AT_EXPAND(AT_INTEGRAL_TYPES)`
|
||||||
|
- More concise: replaces one type group with its superset
|
||||||
|
- Only applicable if AT_INTEGRAL_TYPES is present
|
||||||
|
|
||||||
|
### Step 4: Apply the transformation
|
||||||
|
|
||||||
|
**Method 1 example:**
|
||||||
|
```cpp
|
||||||
|
// Before
|
||||||
|
AT_DISPATCH_V2(
|
||||||
|
dtype,
|
||||||
|
"min_values_cuda",
|
||||||
|
AT_WRAP([&]() {
|
||||||
|
kernel_impl<scalar_t>(iter);
|
||||||
|
}),
|
||||||
|
AT_EXPAND(AT_ALL_TYPES),
|
||||||
|
kBFloat16, kHalf, kBool
|
||||||
|
);
|
||||||
|
|
||||||
|
// After (add unsigned types)
|
||||||
|
AT_DISPATCH_V2(
|
||||||
|
dtype,
|
||||||
|
"min_values_cuda",
|
||||||
|
AT_WRAP([&]() {
|
||||||
|
kernel_impl<scalar_t>(iter);
|
||||||
|
}),
|
||||||
|
AT_EXPAND(AT_ALL_TYPES),
|
||||||
|
AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES),
|
||||||
|
kBFloat16, kHalf, kBool
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
**Method 2 example:**
|
||||||
|
```cpp
|
||||||
|
// Before
|
||||||
|
AT_DISPATCH_V2(
|
||||||
|
dtype,
|
||||||
|
"integral_op",
|
||||||
|
AT_WRAP([&]() {
|
||||||
|
kernel<scalar_t>();
|
||||||
|
}),
|
||||||
|
AT_EXPAND(AT_INTEGRAL_TYPES)
|
||||||
|
);
|
||||||
|
|
||||||
|
// After (substitute with V2)
|
||||||
|
AT_DISPATCH_V2(
|
||||||
|
dtype,
|
||||||
|
"integral_op",
|
||||||
|
AT_WRAP([&]() {
|
||||||
|
kernel<scalar_t>();
|
||||||
|
}),
|
||||||
|
AT_EXPAND(AT_INTEGRAL_TYPES_V2)
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 5: Handle AT_ALL_TYPES vs individual type groups
|
||||||
|
|
||||||
|
If the dispatch uses `AT_EXPAND(AT_ALL_TYPES)`:
|
||||||
|
- `AT_ALL_TYPES` = `AT_INTEGRAL_TYPES` + `AT_FLOATING_TYPES`
|
||||||
|
- To add uint: add `AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES)` to the list
|
||||||
|
|
||||||
|
If the dispatch separately lists INTEGRAL and FLOATING:
|
||||||
|
```cpp
|
||||||
|
// Before
|
||||||
|
AT_EXPAND(AT_INTEGRAL_TYPES), AT_EXPAND(AT_FLOATING_TYPES)
|
||||||
|
|
||||||
|
// After (Method 2 preferred)
|
||||||
|
AT_EXPAND(AT_INTEGRAL_TYPES_V2), AT_EXPAND(AT_FLOATING_TYPES)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 6: Verify all dispatch sites
|
||||||
|
|
||||||
|
Check the file for ALL dispatch macros that need uint support:
|
||||||
|
- Some operators have multiple dispatch sites (CPU, CUDA, different functions)
|
||||||
|
- Apply the transformation consistently across all sites
|
||||||
|
- Ensure each gets the same type coverage updates
|
||||||
|
|
||||||
|
### Step 7: Validate the changes
|
||||||
|
|
||||||
|
Check that:
|
||||||
|
- [ ] AT_DISPATCH_V2 format is used (not old AT_DISPATCH)
|
||||||
|
- [ ] Unsigned types are added via one of the two methods
|
||||||
|
- [ ] All relevant dispatch sites in the file are updated
|
||||||
|
- [ ] Type groups use `AT_EXPAND()`
|
||||||
|
- [ ] Arguments are properly formatted and comma-separated
|
||||||
|
|
||||||
|
## Common patterns
|
||||||
|
|
||||||
|
### Pattern 1: AT_ALL_TYPES + extras
|
||||||
|
|
||||||
|
```cpp
|
||||||
|
// Before
|
||||||
|
AT_DISPATCH_V2(dtype, "op", AT_WRAP([&]() {
|
||||||
|
kernel<scalar_t>();
|
||||||
|
}), AT_EXPAND(AT_ALL_TYPES), kHalf, kBFloat16);
|
||||||
|
|
||||||
|
// After
|
||||||
|
AT_DISPATCH_V2(dtype, "op", AT_WRAP([&]() {
|
||||||
|
kernel<scalar_t>();
|
||||||
|
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kHalf, kBFloat16);
|
||||||
|
```
|
||||||
|
|
||||||
|
### Pattern 2: Separate INTEGRAL + FLOATING
|
||||||
|
|
||||||
|
```cpp
|
||||||
|
// Before
|
||||||
|
AT_DISPATCH_V2(dtype, "op", AT_WRAP([&]() {
|
||||||
|
kernel<scalar_t>();
|
||||||
|
}), AT_EXPAND(AT_INTEGRAL_TYPES), AT_EXPAND(AT_FLOATING_TYPES));
|
||||||
|
|
||||||
|
// After
|
||||||
|
AT_DISPATCH_V2(dtype, "op", AT_WRAP([&]() {
|
||||||
|
kernel<scalar_t>();
|
||||||
|
}), AT_EXPAND(AT_INTEGRAL_TYPES_V2), AT_EXPAND(AT_FLOATING_TYPES));
|
||||||
|
```
|
||||||
|
|
||||||
|
### Pattern 3: Old dispatch needs conversion first
|
||||||
|
|
||||||
|
```cpp
|
||||||
|
// Before (needs v2 conversion first)
|
||||||
|
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, dtype, "op", [&]() {
|
||||||
|
kernel<scalar_t>();
|
||||||
|
});
|
||||||
|
|
||||||
|
// After v2 conversion
|
||||||
|
AT_DISPATCH_V2(dtype, "op", AT_WRAP([&]() {
|
||||||
|
kernel<scalar_t>();
|
||||||
|
}), AT_EXPAND(AT_ALL_TYPES), kHalf, kBFloat16);
|
||||||
|
|
||||||
|
// After adding uint support
|
||||||
|
AT_DISPATCH_V2(dtype, "op", AT_WRAP([&]() {
|
||||||
|
kernel<scalar_t>();
|
||||||
|
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kHalf, kBFloat16);
|
||||||
|
```
|
||||||
|
|
||||||
|
## Multiple dispatch sites example
|
||||||
|
|
||||||
|
For a file with multiple functions:
|
||||||
|
|
||||||
|
```cpp
|
||||||
|
void min_values_kernel_cuda(TensorIterator& iter) {
|
||||||
|
AT_DISPATCH_V2(iter.dtype(), "min_values_cuda", AT_WRAP([&]() {
|
||||||
|
impl<scalar_t>(iter);
|
||||||
|
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kBFloat16, kHalf);
|
||||||
|
// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
// Added uint support
|
||||||
|
}
|
||||||
|
|
||||||
|
void min_launch_kernel(TensorIterator &iter) {
|
||||||
|
AT_DISPATCH_V2(iter.input_dtype(), "min_cuda", AT_WRAP([&]() {
|
||||||
|
gpu_reduce_kernel<scalar_t>(iter);
|
||||||
|
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kBFloat16, kHalf);
|
||||||
|
// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
// Added uint support here too
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Decision tree
|
||||||
|
|
||||||
|
Use this decision tree to determine the approach:
|
||||||
|
|
||||||
|
```
|
||||||
|
Is the file using AT_DISPATCH_V2?
|
||||||
|
├─ No → Use at-dispatch-v2 skill first, then continue
|
||||||
|
└─ Yes
|
||||||
|
└─ Does it use AT_EXPAND(AT_INTEGRAL_TYPES)?
|
||||||
|
├─ Yes → Replace with AT_EXPAND(AT_INTEGRAL_TYPES_V2)
|
||||||
|
└─ No → Add AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES) to type list
|
||||||
|
```
|
||||||
|
|
||||||
|
## Edge cases
|
||||||
|
|
||||||
|
### Case 1: Dispatch with only floating types
|
||||||
|
|
||||||
|
If the operator only supports floating point types, don't add uint support:
|
||||||
|
|
||||||
|
```cpp
|
||||||
|
// Leave as-is - floating point only operator
|
||||||
|
AT_DISPATCH_V2(dtype, "float_op", AT_WRAP([&]() {
|
||||||
|
kernel<scalar_t>();
|
||||||
|
}), AT_EXPAND(AT_FLOATING_TYPES), kHalf);
|
||||||
|
```
|
||||||
|
|
||||||
|
### Case 2: Complex types present
|
||||||
|
|
||||||
|
Unsigned types work alongside complex types:
|
||||||
|
|
||||||
|
```cpp
|
||||||
|
AT_DISPATCH_V2(dtype, "op", AT_WRAP([&]() {
|
||||||
|
kernel<scalar_t>();
|
||||||
|
}), AT_EXPAND(AT_ALL_TYPES),
|
||||||
|
AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES),
|
||||||
|
AT_EXPAND(AT_COMPLEX_TYPES),
|
||||||
|
kHalf, kBFloat16);
|
||||||
|
```
|
||||||
|
|
||||||
|
### Case 3: Already has uint support
|
||||||
|
|
||||||
|
Check if uint types are already present:
|
||||||
|
- If `AT_INTEGRAL_TYPES_V2` is used → already has uint support
|
||||||
|
- If `AT_BAREBONES_UNSIGNED_TYPES` is already in list → already has uint support
|
||||||
|
- Skip the file if uint support is already present
|
||||||
|
|
||||||
|
## Workflow
|
||||||
|
|
||||||
|
When asked to add uint support:
|
||||||
|
|
||||||
|
1. Read the target file
|
||||||
|
2. Check if using AT_DISPATCH_V2:
|
||||||
|
- If not → use at-dispatch-v2 skill first
|
||||||
|
3. Identify all dispatch macro sites
|
||||||
|
4. For each dispatch:
|
||||||
|
- Analyze current type groups
|
||||||
|
- Choose method (add BAREBONES_UNSIGNED or upgrade to V2)
|
||||||
|
- Apply transformation with Edit tool
|
||||||
|
5. Show the user the changes
|
||||||
|
6. Explain what was modified
|
||||||
|
|
||||||
|
## Important notes
|
||||||
|
|
||||||
|
- Always check if v2 conversion is needed first
|
||||||
|
- Apply changes consistently across all dispatch sites in the file
|
||||||
|
- Method 2 (AT_INTEGRAL_TYPES_V2) is cleaner when applicable
|
||||||
|
- Method 1 (explicit AT_BAREBONES_UNSIGNED_TYPES) is more explicit
|
||||||
|
- Unsigned types are: kUInt16, kUInt32, kUInt64 (not kByte which is uint8)
|
||||||
|
- Some operators may not semantically support unsigned types - use judgment
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
|
||||||
|
After adding uint support, the operator should accept uint16, uint32, and uint64 tensors. The user is responsible for functional testing.
|
||||||
305
.claude/skills/at-dispatch-v2/SKILL.md
Normal file
305
.claude/skills/at-dispatch-v2/SKILL.md
Normal file
@ -0,0 +1,305 @@
|
|||||||
|
---
|
||||||
|
name: at-dispatch-v2
|
||||||
|
description: Convert PyTorch AT_DISPATCH macros to AT_DISPATCH_V2 format in ATen C++ code. Use when porting AT_DISPATCH_ALL_TYPES_AND*, AT_DISPATCH_FLOATING_TYPES*, or other dispatch macros to the new v2 API. For ATen kernel files, CUDA kernels, and native operator implementations.
|
||||||
|
---
|
||||||
|
|
||||||
|
# AT_DISPATCH to AT_DISPATCH_V2 Converter
|
||||||
|
|
||||||
|
This skill helps convert PyTorch's legacy AT_DISPATCH macros to the new AT_DISPATCH_V2 format, as defined in `aten/src/ATen/Dispatch_v2.h`.
|
||||||
|
|
||||||
|
## When to use this skill
|
||||||
|
|
||||||
|
Use this skill when:
|
||||||
|
- Converting AT_DISPATCH_* macros to AT_DISPATCH_V2
|
||||||
|
- Porting ATen kernels to use the new dispatch API
|
||||||
|
- Working with files in `aten/src/ATen/native/` that use dispatch macros
|
||||||
|
- User mentions "AT_DISPATCH", "dispatch v2", "Dispatch_v2.h", or macro conversion
|
||||||
|
|
||||||
|
## Quick reference
|
||||||
|
|
||||||
|
**Old format:**
|
||||||
|
```cpp
|
||||||
|
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, dtype, "kernel_name", [&]() {
|
||||||
|
// lambda body
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
**New format:**
|
||||||
|
```cpp
|
||||||
|
AT_DISPATCH_V2(dtype, "kernel_name", AT_WRAP([&]() {
|
||||||
|
// lambda body
|
||||||
|
}), AT_EXPAND(AT_ALL_TYPES), kBFloat16, kHalf, kBool);
|
||||||
|
```
|
||||||
|
|
||||||
|
## Key transformations
|
||||||
|
|
||||||
|
1. **Reorder arguments**: `scalar_type` and `name` come first, then lambda, then types
|
||||||
|
2. **Wrap the lambda**: Use `AT_WRAP(lambda)` to handle internal commas
|
||||||
|
3. **Expand type groups**: Use `AT_EXPAND(AT_ALL_TYPES)` instead of implicit expansion
|
||||||
|
4. **List individual types**: Add extra types (kHalf, kBFloat16, etc.) after expanded groups
|
||||||
|
5. **Add include**: `#include <ATen/Dispatch_v2.h>` near other Dispatch includes
|
||||||
|
|
||||||
|
## Instructions
|
||||||
|
|
||||||
|
### Step 1: Add the Dispatch_v2.h include
|
||||||
|
|
||||||
|
Add the v2 header near the existing `#include <ATen/Dispatch.h>`:
|
||||||
|
|
||||||
|
```cpp
|
||||||
|
#include <ATen/Dispatch.h>
|
||||||
|
#include <ATen/Dispatch_v2.h>
|
||||||
|
```
|
||||||
|
|
||||||
|
Keep the old Dispatch.h include for now (other code may still need it).
|
||||||
|
|
||||||
|
### Step 2: Identify the old dispatch pattern
|
||||||
|
|
||||||
|
Common patterns to convert:
|
||||||
|
|
||||||
|
- `AT_DISPATCH_ALL_TYPES_AND{2,3,4}(type1, type2, ..., scalar_type, name, lambda)`
|
||||||
|
- `AT_DISPATCH_FLOATING_TYPES_AND{2,3}(type1, type2, ..., scalar_type, name, lambda)`
|
||||||
|
- `AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND{2,3}(type1, ..., scalar_type, name, lambda)`
|
||||||
|
- `AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND{2,3}(type1, ..., scalar_type, name, lambda)`
|
||||||
|
|
||||||
|
### Step 3: Map the old macro to type groups
|
||||||
|
|
||||||
|
Identify which type group macro corresponds to the base types:
|
||||||
|
|
||||||
|
| Old macro base | AT_DISPATCH_V2 type group |
|
||||||
|
|----------------|---------------------------|
|
||||||
|
| `ALL_TYPES` | `AT_EXPAND(AT_ALL_TYPES)` |
|
||||||
|
| `FLOATING_TYPES` | `AT_EXPAND(AT_FLOATING_TYPES)` |
|
||||||
|
| `INTEGRAL_TYPES` | `AT_EXPAND(AT_INTEGRAL_TYPES)` |
|
||||||
|
| `COMPLEX_TYPES` | `AT_EXPAND(AT_COMPLEX_TYPES)` |
|
||||||
|
| `ALL_TYPES_AND_COMPLEX` | `AT_EXPAND(AT_ALL_TYPES_AND_COMPLEX)` |
|
||||||
|
|
||||||
|
For combined patterns, use multiple `AT_EXPAND()` entries:
|
||||||
|
```cpp
|
||||||
|
// Old: AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(...)
|
||||||
|
// New: AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_COMPLEX_TYPES), type1, type2
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 4: Extract the individual types
|
||||||
|
|
||||||
|
From `AT_DISPATCH_*_AND2(type1, type2, ...)` or `AT_DISPATCH_*_AND3(type1, type2, type3, ...)`, extract the individual types (type1, type2, etc.).
|
||||||
|
|
||||||
|
These become the trailing arguments after the type group:
|
||||||
|
```cpp
|
||||||
|
AT_DISPATCH_V2(..., AT_EXPAND(AT_ALL_TYPES), kBFloat16, kHalf, kBool)
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
Individual types from AND3
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 5: Transform to AT_DISPATCH_V2
|
||||||
|
|
||||||
|
Apply the transformation:
|
||||||
|
|
||||||
|
**Pattern:**
|
||||||
|
```cpp
|
||||||
|
AT_DISPATCH_V2(
|
||||||
|
scalar_type, // 1st: The dtype expression
|
||||||
|
"name", // 2nd: The debug string
|
||||||
|
AT_WRAP(lambda), // 3rd: The lambda wrapped in AT_WRAP
|
||||||
|
type_groups, // 4th+: Type groups with AT_EXPAND()
|
||||||
|
individual_types // Last: Individual types
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Example transformation:**
|
||||||
|
```cpp
|
||||||
|
// BEFORE
|
||||||
|
AT_DISPATCH_ALL_TYPES_AND3(
|
||||||
|
kBFloat16, kHalf, kBool,
|
||||||
|
iter.dtype(),
|
||||||
|
"min_values_cuda",
|
||||||
|
[&]() {
|
||||||
|
min_values_kernel_cuda_impl<scalar_t>(iter);
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
// AFTER
|
||||||
|
AT_DISPATCH_V2(
|
||||||
|
iter.dtype(),
|
||||||
|
"min_values_cuda",
|
||||||
|
AT_WRAP([&]() {
|
||||||
|
min_values_kernel_cuda_impl<scalar_t>(iter);
|
||||||
|
}),
|
||||||
|
AT_EXPAND(AT_ALL_TYPES),
|
||||||
|
kBFloat16, kHalf, kBool
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 6: Handle multi-line lambdas
|
||||||
|
|
||||||
|
For lambdas with internal commas or complex expressions, AT_WRAP is essential:
|
||||||
|
|
||||||
|
```cpp
|
||||||
|
AT_DISPATCH_V2(
|
||||||
|
dtype,
|
||||||
|
"complex_kernel",
|
||||||
|
AT_WRAP([&]() {
|
||||||
|
gpu_reduce_kernel<scalar_t, scalar_t>(
|
||||||
|
iter,
|
||||||
|
MinOps<scalar_t>{},
|
||||||
|
thrust::pair<scalar_t, int64_t>(upper_bound(), 0) // Commas inside!
|
||||||
|
);
|
||||||
|
}),
|
||||||
|
AT_EXPAND(AT_ALL_TYPES)
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 7: Verify the conversion
|
||||||
|
|
||||||
|
Check that:
|
||||||
|
- [ ] `AT_WRAP()` wraps the entire lambda
|
||||||
|
- [ ] Type groups use `AT_EXPAND()`
|
||||||
|
- [ ] Individual types don't have `AT_EXPAND()` (just `kBFloat16`, not `AT_EXPAND(kBFloat16)`)
|
||||||
|
- [ ] Argument order is: scalar_type, name, lambda, types
|
||||||
|
- [ ] Include added: `#include <ATen/Dispatch_v2.h>`
|
||||||
|
|
||||||
|
## Type group reference
|
||||||
|
|
||||||
|
Available type group macros (use with `AT_EXPAND()`):
|
||||||
|
|
||||||
|
```cpp
|
||||||
|
AT_INTEGRAL_TYPES // kByte, kChar, kInt, kLong, kShort
|
||||||
|
AT_FLOATING_TYPES // kDouble, kFloat
|
||||||
|
AT_COMPLEX_TYPES // kComplexDouble, kComplexFloat
|
||||||
|
AT_QINT_TYPES // kQInt8, kQUInt8, kQInt32
|
||||||
|
AT_ALL_TYPES // INTEGRAL_TYPES + FLOATING_TYPES
|
||||||
|
AT_ALL_TYPES_AND_COMPLEX // ALL_TYPES + COMPLEX_TYPES
|
||||||
|
AT_INTEGRAL_TYPES_V2 // INTEGRAL_TYPES + unsigned types
|
||||||
|
AT_BAREBONES_UNSIGNED_TYPES // kUInt16, kUInt32, kUInt64
|
||||||
|
AT_FLOAT8_TYPES // Float8 variants
|
||||||
|
```
|
||||||
|
|
||||||
|
## Common patterns
|
||||||
|
|
||||||
|
### Pattern: AT_DISPATCH_ALL_TYPES_AND2
|
||||||
|
|
||||||
|
```cpp
|
||||||
|
// Before
|
||||||
|
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, dtype, "op", [&]() {
|
||||||
|
kernel<scalar_t>(data);
|
||||||
|
});
|
||||||
|
|
||||||
|
// After
|
||||||
|
AT_DISPATCH_V2(dtype, "op", AT_WRAP([&]() {
|
||||||
|
kernel<scalar_t>(data);
|
||||||
|
}), AT_EXPAND(AT_ALL_TYPES), kHalf, kBFloat16);
|
||||||
|
```
|
||||||
|
|
||||||
|
### Pattern: AT_DISPATCH_FLOATING_TYPES_AND3
|
||||||
|
|
||||||
|
```cpp
|
||||||
|
// Before
|
||||||
|
AT_DISPATCH_FLOATING_TYPES_AND3(kHalf, kBFloat16, kFloat8_e4m3fn,
|
||||||
|
tensor.scalar_type(), "float_op", [&] {
|
||||||
|
process<scalar_t>(tensor);
|
||||||
|
});
|
||||||
|
|
||||||
|
// After
|
||||||
|
AT_DISPATCH_V2(tensor.scalar_type(), "float_op", AT_WRAP([&] {
|
||||||
|
process<scalar_t>(tensor);
|
||||||
|
}), AT_EXPAND(AT_FLOATING_TYPES), kHalf, kBFloat16, kFloat8_e4m3fn);
|
||||||
|
```
|
||||||
|
|
||||||
|
### Pattern: AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2
|
||||||
|
|
||||||
|
```cpp
|
||||||
|
// Before
|
||||||
|
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
|
||||||
|
kComplexHalf, kHalf,
|
||||||
|
self.scalar_type(),
|
||||||
|
"complex_op",
|
||||||
|
[&] {
|
||||||
|
result = compute<scalar_t>(self);
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
// After
|
||||||
|
AT_DISPATCH_V2(
|
||||||
|
self.scalar_type(),
|
||||||
|
"complex_op",
|
||||||
|
AT_WRAP([&] {
|
||||||
|
result = compute<scalar_t>(self);
|
||||||
|
}),
|
||||||
|
AT_EXPAND(AT_ALL_TYPES),
|
||||||
|
AT_EXPAND(AT_COMPLEX_TYPES),
|
||||||
|
kComplexHalf,
|
||||||
|
kHalf
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
## Edge cases
|
||||||
|
|
||||||
|
### Case 1: No extra types (rare)
|
||||||
|
|
||||||
|
```cpp
|
||||||
|
// Before
|
||||||
|
AT_DISPATCH_ALL_TYPES(dtype, "op", [&]() { kernel<scalar_t>(); });
|
||||||
|
|
||||||
|
// After
|
||||||
|
AT_DISPATCH_V2(dtype, "op", AT_WRAP([&]() {
|
||||||
|
kernel<scalar_t>();
|
||||||
|
}), AT_EXPAND(AT_ALL_TYPES));
|
||||||
|
```
|
||||||
|
|
||||||
|
### Case 2: Many individual types (AND4, AND5, etc.)
|
||||||
|
|
||||||
|
```cpp
|
||||||
|
// Before
|
||||||
|
AT_DISPATCH_FLOATING_TYPES_AND4(kHalf, kBFloat16, kFloat8_e4m3fn, kFloat8_e5m2,
|
||||||
|
dtype, "float8_op", [&]() { kernel<scalar_t>(); });
|
||||||
|
|
||||||
|
// After
|
||||||
|
AT_DISPATCH_V2(dtype, "float8_op", AT_WRAP([&]() {
|
||||||
|
kernel<scalar_t>();
|
||||||
|
}), AT_EXPAND(AT_FLOATING_TYPES), kHalf, kBFloat16, kFloat8_e4m3fn, kFloat8_e5m2);
|
||||||
|
```
|
||||||
|
|
||||||
|
### Case 3: Lambda with no captures
|
||||||
|
|
||||||
|
```cpp
|
||||||
|
// Before
|
||||||
|
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBool, dtype, "op", []() {
|
||||||
|
static_kernel<scalar_t>();
|
||||||
|
});
|
||||||
|
|
||||||
|
// After
|
||||||
|
AT_DISPATCH_V2(dtype, "op", AT_WRAP([]() {
|
||||||
|
static_kernel<scalar_t>();
|
||||||
|
}), AT_EXPAND(AT_ALL_TYPES), kHalf, kBool);
|
||||||
|
```
|
||||||
|
|
||||||
|
## Benefits of AT_DISPATCH_V2
|
||||||
|
|
||||||
|
1. **No arity in macro name**: Don't need different macros for AND2, AND3, AND4
|
||||||
|
2. **Composable type sets**: Mix and match type groups with `AT_EXPAND()`
|
||||||
|
3. **Extensible**: Easy to add more types without hitting macro limits
|
||||||
|
4. **Clearer**: Type groups are explicit, not implicit in macro name
|
||||||
|
|
||||||
|
## Important notes
|
||||||
|
|
||||||
|
- Keep `#include <ATen/Dispatch.h>` - other code may need it
|
||||||
|
- The `AT_WRAP()` is mandatory - prevents comma parsing issues in the lambda
|
||||||
|
- Type groups need `AT_EXPAND()`, individual types don't
|
||||||
|
- The v2 API is in `aten/src/ATen/Dispatch_v2.h` - refer to it for full docs
|
||||||
|
- See the header file for the Python script to regenerate the macro implementation
|
||||||
|
|
||||||
|
## Workflow
|
||||||
|
|
||||||
|
When asked to convert AT_DISPATCH macros:
|
||||||
|
|
||||||
|
1. Read the file to identify all AT_DISPATCH uses
|
||||||
|
2. Add `#include <ATen/Dispatch_v2.h>` if not present
|
||||||
|
3. For each dispatch macro:
|
||||||
|
- Identify the pattern and extract components
|
||||||
|
- Map the base type group
|
||||||
|
- Extract individual types
|
||||||
|
- Construct the AT_DISPATCH_V2 call
|
||||||
|
- Apply with Edit tool
|
||||||
|
4. Show the user the complete converted file
|
||||||
|
5. Explain what was changed
|
||||||
|
|
||||||
|
Do NOT compile or test the code - focus on accurate conversion only.
|
||||||
14
.github/actions/filter-test-configs/action.yml
vendored
14
.github/actions/filter-test-configs/action.yml
vendored
@ -57,10 +57,16 @@ outputs:
|
|||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
- name: Install Dependencies
|
- uses: nick-fields/retry@v3.0.0
|
||||||
id: install-dependencies
|
name: Setup dependencies
|
||||||
shell: bash
|
env:
|
||||||
run: |
|
GITHUB_TOKEN: ${{ inputs.github-token }}
|
||||||
|
with:
|
||||||
|
shell: bash
|
||||||
|
timeout_minutes: 10
|
||||||
|
max_attempts: 5
|
||||||
|
retry_wait_seconds: 30
|
||||||
|
command: |
|
||||||
set -eux
|
set -eux
|
||||||
# PyYAML 6.0 doesn't work with MacOS x86 anymore
|
# PyYAML 6.0 doesn't work with MacOS x86 anymore
|
||||||
# This must run on Python-3.7 (AmazonLinux2) so can't use request=3.32.2
|
# This must run on Python-3.7 (AmazonLinux2) so can't use request=3.32.2
|
||||||
|
|||||||
54
.github/actions/setup-win/action.yml
vendored
54
.github/actions/setup-win/action.yml
vendored
@ -37,6 +37,19 @@ runs:
|
|||||||
run: |
|
run: |
|
||||||
Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
|
Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
|
||||||
|
|
||||||
|
- name: Setup conda
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
# Windows conda is baked into the AMI at this location
|
||||||
|
CONDA="C:\Jenkins\Miniconda3\condabin\conda.bat"
|
||||||
|
|
||||||
|
{
|
||||||
|
echo "CONDA=${CONDA}";
|
||||||
|
echo "CONDA_RUN=${CONDA} run --no-capture-output";
|
||||||
|
echo "CONDA_BUILD=${CONDA} run conda-build";
|
||||||
|
echo "CONDA_INSTALL=${CONDA} install";
|
||||||
|
} >> "${GITHUB_ENV}"
|
||||||
|
|
||||||
- name: Setup Python3
|
- name: Setup Python3
|
||||||
env:
|
env:
|
||||||
PYTHON_VERSION: ${{ inputs.python-version }}
|
PYTHON_VERSION: ${{ inputs.python-version }}
|
||||||
@ -44,12 +57,40 @@ runs:
|
|||||||
run: |
|
run: |
|
||||||
set +e
|
set +e
|
||||||
set -x
|
set -x
|
||||||
export DESIRED_PYTHON=3.10
|
|
||||||
echo "DESIRED_PYTHON=3.10" | tee -a "${GITHUB_ENV}"
|
# Create new py_tmp env with python-version
|
||||||
.ci/pytorch/windows/internal/install_python.bat
|
${CONDA} create -y -n py_tmp python=${PYTHON_VERSION} intel-openmp libuv
|
||||||
echo "PATH=$(pwd)/Python/Scripts;$(pwd)/Python;$(pwd);/usr/bin/;${PATH}" | tee -a "${GITHUB_ENV}"
|
|
||||||
echo "PYTHON_PATH=$(pwd)/Python" | tee -a "${GITHUB_ENV}"
|
PYTHON3=$(${CONDA_RUN} -n py_tmp which python3)
|
||||||
ln -s "$(pwd)/Python/python.exe" "$(pwd)/Python/python3.exe"
|
EXIT_CODE=$?
|
||||||
|
|
||||||
|
if [[ "${EXIT_CODE}" == "0" ]]; then
|
||||||
|
echo "Found Python3 at ${PYTHON3}, adding it into GITHUB_PATH"
|
||||||
|
|
||||||
|
PYTHON_PATH=$(dirname "${PYTHON3}")
|
||||||
|
echo "${PYTHON_PATH}" >> "${GITHUB_PATH}"
|
||||||
|
else
|
||||||
|
# According to https://docs.conda.io/en/latest/miniconda.html, we are using the Miniconda3
|
||||||
|
# installation, which is Python 3 based. Its Python is default to Python 3. Further, there
|
||||||
|
# is also the Miniconda installation that is Python 2 based, and both can be installed if
|
||||||
|
# needed. In both cases, Python binary is just called python
|
||||||
|
PYTHON=$(${CONDA_RUN} -n py_tmp which python)
|
||||||
|
EXIT_CODE=$?
|
||||||
|
|
||||||
|
if [[ "${EXIT_CODE}" == "0" ]]; then
|
||||||
|
echo "Found Python at ${PYTHON}, set Python3 alias and add it into GITHUB_PATH"
|
||||||
|
|
||||||
|
PYTHON3=$(echo "${PYTHON}" | sed "s/python/python3/")
|
||||||
|
# It's difficult to setup alias across GitHub action steps, so I just add a softlink
|
||||||
|
# here pointing to Python
|
||||||
|
ln -s "${PYTHON}" "${PYTHON3}"
|
||||||
|
|
||||||
|
PYTHON_PATH=$(dirname "${PYTHON}")
|
||||||
|
echo "${PYTHON_PATH}" >> "${GITHUB_PATH}"
|
||||||
|
else
|
||||||
|
echo "Found no Python using ${CONDA_RUN}"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
- name: Get temporary directory used by Windows Python
|
- name: Get temporary directory used by Windows Python
|
||||||
shell: bash
|
shell: bash
|
||||||
@ -63,7 +104,6 @@ runs:
|
|||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
shell: powershell
|
shell: powershell
|
||||||
run: |
|
run: |
|
||||||
Set-Alias -Name python3 -Value python
|
|
||||||
Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TMPDIR,"C:\Jenkins\Miniconda3" -ErrorAction Ignore
|
Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TMPDIR,"C:\Jenkins\Miniconda3" -ErrorAction Ignore
|
||||||
# Let's both exclude the path and disable Windows Defender completely just to be sure
|
# Let's both exclude the path and disable Windows Defender completely just to be sure
|
||||||
# that it doesn't interfere
|
# that it doesn't interfere
|
||||||
|
|||||||
25
.github/actions/teardown-win/action.yml
vendored
25
.github/actions/teardown-win/action.yml
vendored
@ -28,20 +28,25 @@ runs:
|
|||||||
# retry this step several time similar to how checkout-pytorch GHA does
|
# retry this step several time similar to how checkout-pytorch GHA does
|
||||||
- name: Cleanup workspace
|
- name: Cleanup workspace
|
||||||
if: always()
|
if: always()
|
||||||
|
uses: nick-fields/retry@v3.0.0
|
||||||
env:
|
env:
|
||||||
EXTRA_DELETE_DIR: ${{ inputs.extra-delete-dir }}
|
EXTRA_DELETE_DIR: ${{ inputs.extra-delete-dir }}
|
||||||
shell: bash
|
with:
|
||||||
run: |
|
shell: bash
|
||||||
set +e
|
timeout_minutes: 5
|
||||||
set -x
|
max_attempts: 3
|
||||||
|
retry_wait_seconds: 90
|
||||||
|
command: |
|
||||||
|
set +e
|
||||||
|
set -x
|
||||||
|
|
||||||
if [ -n "${EXTRA_DELETE_DIR}" ]; then
|
if [ -n "${EXTRA_DELETE_DIR}" ]; then
|
||||||
# It's ok to fail to clean up the extra directory on Windows as it only contains
|
# It's ok to fail to clean up the extra directory on Windows as it only contains
|
||||||
# the build artifacts and doesn't take up much space, i.e. /c/5053411580/build-results
|
# the build artifacts and doesn't take up much space, i.e. /c/5053411580/build-results
|
||||||
rm -rf "${EXTRA_DELETE_DIR}" || true
|
rm -rf "${EXTRA_DELETE_DIR}" || true
|
||||||
fi
|
fi
|
||||||
|
|
||||||
rm -rf ./*
|
rm -rf ./*
|
||||||
|
|
||||||
- name: Print all processes locking the runner workspace
|
- name: Print all processes locking the runner workspace
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
|
|||||||
2
.github/ci_commit_pins/vision.txt
vendored
2
.github/ci_commit_pins/vision.txt
vendored
@ -1 +1 @@
|
|||||||
218d2ab791d437309f91e0486eb9fa7f00badc17
|
cfbc5c2f1c798991715a6b06bb3ce46478c4487c
|
||||||
|
|||||||
2
.github/ci_commit_pins/xla.txt
vendored
2
.github/ci_commit_pins/xla.txt
vendored
@ -1 +1 @@
|
|||||||
df6798dfb931ce7c7fe5bed2447cd1092a5981af
|
c8b09f5f77d6bf6fb7ed7a9aa83e5d8156b3a5e9
|
||||||
|
|||||||
@ -28,7 +28,7 @@ CUDA_ARCHES_FULL_VERSION = {
|
|||||||
"12.6": "12.6.3",
|
"12.6": "12.6.3",
|
||||||
"12.8": "12.8.1",
|
"12.8": "12.8.1",
|
||||||
"12.9": "12.9.1",
|
"12.9": "12.9.1",
|
||||||
"13.0": "13.0.2",
|
"13.0": "13.0.0",
|
||||||
}
|
}
|
||||||
CUDA_ARCHES_CUDNN_VERSION = {
|
CUDA_ARCHES_CUDNN_VERSION = {
|
||||||
"12.6": "9",
|
"12.6": "9",
|
||||||
|
|||||||
15
.github/workflows/_win-test.yml
vendored
15
.github/workflows/_win-test.yml
vendored
@ -103,13 +103,6 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
cuda-version: ${{ inputs.cuda-version }}
|
cuda-version: ${{ inputs.cuda-version }}
|
||||||
|
|
||||||
# TODO: Move to a requirements.txt file for windows
|
|
||||||
- name: Install pip dependencies
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
set -eu
|
|
||||||
python3 -m pip install 'xdoctest>=1.1.0'
|
|
||||||
|
|
||||||
- name: Get workflow job id
|
- name: Get workflow job id
|
||||||
id: get-job-id
|
id: get-job-id
|
||||||
uses: ./.github/actions/get-workflow-job-id
|
uses: ./.github/actions/get-workflow-job-id
|
||||||
@ -130,8 +123,9 @@ jobs:
|
|||||||
if: ${{ !inputs.disable-monitor }}
|
if: ${{ !inputs.disable-monitor }}
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
run: |
|
run: |
|
||||||
python3 -m pip install psutil==5.9.1 dataclasses_json==0.6.7 nvidia-ml-py==11.525.84
|
# Windows conda doesn't have python3 binary, only python, but it's python3
|
||||||
python3 -m tools.stats.monitor --log-interval "$MONITOR_LOG_INTERVAL" --data-collect-interval "$MONITOR_DATA_COLLECT_INTERVAL" > usage_log.txt 2>&1 &
|
${CONDA_RUN} python -m pip install psutil==5.9.8 dataclasses_json==0.6.7 nvidia-ml-py==11.525.84
|
||||||
|
${CONDA_RUN} python -m tools.stats.monitor --log-interval "$MONITOR_LOG_INTERVAL" --data-collect-interval "$MONITOR_DATA_COLLECT_INTERVAL" > usage_log.txt 2>&1 &
|
||||||
echo "monitor-script-pid=${!}" >> "${GITHUB_OUTPUT}"
|
echo "monitor-script-pid=${!}" >> "${GITHUB_OUTPUT}"
|
||||||
|
|
||||||
- name: Download PyTorch Build Artifacts
|
- name: Download PyTorch Build Artifacts
|
||||||
@ -210,8 +204,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
pushd "${PYTORCH_FINAL_PACKAGE_DIR}"
|
pushd "${PYTORCH_FINAL_PACKAGE_DIR}"
|
||||||
# shellcheck disable=SC2046,SC2102
|
# shellcheck disable=SC2046,SC2102
|
||||||
python3 -mpip install -vvv $(echo *.whl)[opt-einsum,optree] optree==0.13.0
|
python3 -mpip install $(echo *.whl)[opt-einsum,optree] optree==0.13.0
|
||||||
python3 -mpip install mkl==2024.2.0 mkl-static==2024.2.0 mkl-include==2024.2.0
|
|
||||||
popd
|
popd
|
||||||
|
|
||||||
.ci/pytorch/win-test.sh
|
.ci/pytorch/win-test.sh
|
||||||
|
|||||||
1
.github/workflows/docker-release.yml
vendored
1
.github/workflows/docker-release.yml
vendored
@ -8,6 +8,7 @@ on:
|
|||||||
- docker.Makefile
|
- docker.Makefile
|
||||||
- .github/workflows/docker-release.yml
|
- .github/workflows/docker-release.yml
|
||||||
- .github/scripts/generate_docker_release_matrix.py
|
- .github/scripts/generate_docker_release_matrix.py
|
||||||
|
- .github/scripts/generate_binary_build_matrix.py
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- nightly
|
- nightly
|
||||||
|
|||||||
3
.github/workflows/inductor-rocm.yml
vendored
3
.github/workflows/inductor-rocm.yml
vendored
@ -1,9 +1,10 @@
|
|||||||
name: inductor-rocm
|
name: inductor-rocm
|
||||||
|
|
||||||
on:
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: 0 * * * *
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
|
||||||
- release/*
|
- release/*
|
||||||
tags:
|
tags:
|
||||||
- ciflow/inductor-rocm/*
|
- ciflow/inductor-rocm/*
|
||||||
|
|||||||
8
.github/workflows/inductor-unittest.yml
vendored
8
.github/workflows/inductor-unittest.yml
vendored
@ -115,10 +115,10 @@ jobs:
|
|||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||||
test-matrix: |
|
test-matrix: |
|
||||||
{ include: [
|
{ include: [
|
||||||
{ config: "inductor_amx", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.8xlarge.amx" },
|
{ config: "inductor_amx", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge.amx" },
|
||||||
{ config: "inductor_amx", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.8xlarge.amx" },
|
{ config: "inductor_amx", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge.amx" },
|
||||||
{ config: "inductor_avx2", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.10xlarge.avx2" },
|
{ config: "inductor_avx2", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge.avx2" },
|
||||||
{ config: "inductor_avx2", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.10xlarge.avx2" },
|
{ config: "inductor_avx2", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge.avx2" },
|
||||||
]}
|
]}
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
|
|||||||
14
.github/workflows/inductor.yml
vendored
14
.github/workflows/inductor.yml
vendored
@ -84,13 +84,13 @@ jobs:
|
|||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||||
test-matrix: |
|
test-matrix: |
|
||||||
{ include: [
|
{ include: [
|
||||||
{ config: "cpu_inductor_torchbench", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.8xlarge.amx" },
|
{ config: "cpu_inductor_torchbench", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge.amx" },
|
||||||
{ config: "cpu_inductor_torchbench", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.8xlarge.amx" },
|
{ config: "cpu_inductor_torchbench", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge.amx" },
|
||||||
{ config: "dynamic_cpu_inductor_huggingface", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.8xlarge.amx" },
|
{ config: "dynamic_cpu_inductor_huggingface", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge.amx" },
|
||||||
{ config: "dynamic_cpu_inductor_timm", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.8xlarge.amx" },
|
{ config: "dynamic_cpu_inductor_timm", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge.amx" },
|
||||||
{ config: "dynamic_cpu_inductor_timm", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.8xlarge.amx" },
|
{ config: "dynamic_cpu_inductor_timm", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge.amx" },
|
||||||
{ config: "dynamic_cpu_inductor_torchbench", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.8xlarge.amx" },
|
{ config: "dynamic_cpu_inductor_torchbench", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge.amx" },
|
||||||
{ config: "dynamic_cpu_inductor_torchbench", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.8xlarge.amx" },
|
{ config: "dynamic_cpu_inductor_torchbench", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge.amx" },
|
||||||
{ config: "inductor_torchbench_cpu_smoketest_perf", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.24xl.spr-metal" },
|
{ config: "inductor_torchbench_cpu_smoketest_perf", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.24xl.spr-metal" },
|
||||||
]}
|
]}
|
||||||
build-additional-packages: "vision audio torchao"
|
build-additional-packages: "vision audio torchao"
|
||||||
|
|||||||
15
.github/workflows/lint.yml
vendored
15
.github/workflows/lint.yml
vendored
@ -76,11 +76,12 @@ jobs:
|
|||||||
|
|
||||||
# NOTE: mypy needs its own job because it depends on --all-files, without assessing all files it sometimes
|
# NOTE: mypy needs its own job because it depends on --all-files, without assessing all files it sometimes
|
||||||
# fails to find types when it should
|
# fails to find types when it should
|
||||||
lintrunner-mypy:
|
# NOTE: We should be able to disable this and consolidate with Pyrefly
|
||||||
|
lintrunner-pyrefly:
|
||||||
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
|
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
|
||||||
name: lintrunner-mypy-${{ needs.get-changed-files.outputs.changed-files == '*' && 'all' || 'partial' }}
|
name: lintrunner-pyrefly-${{ needs.get-changed-files.outputs.changed-files == '*' && 'all' || 'partial' }}
|
||||||
needs: [get-label-type, get-changed-files]
|
needs: [get-label-type, get-changed-files]
|
||||||
# Only run if there are changed files relevant to mypy
|
# Only run if there are changed files relevant to pyrefly
|
||||||
if: |
|
if: |
|
||||||
github.repository_owner == 'pytorch' && (
|
github.repository_owner == 'pytorch' && (
|
||||||
needs.get-changed-files.outputs.changed-files == '*' ||
|
needs.get-changed-files.outputs.changed-files == '*' ||
|
||||||
@ -98,8 +99,8 @@ jobs:
|
|||||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||||
script: |
|
script: |
|
||||||
CHANGED_FILES="${{ needs.get-changed-files.outputs.changed-files }}"
|
CHANGED_FILES="${{ needs.get-changed-files.outputs.changed-files }}"
|
||||||
echo "Running mypy"
|
echo "Running pyrefly"
|
||||||
ADDITIONAL_LINTRUNNER_ARGS="--take MYPY,MYPYSTRICT --all-files" .github/scripts/lintrunner.sh
|
ADDITIONAL_LINTRUNNER_ARGS="--take PYREFLY --all-files" .github/scripts/lintrunner.sh
|
||||||
|
|
||||||
lintrunner-noclang:
|
lintrunner-noclang:
|
||||||
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
|
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
|
||||||
@ -118,9 +119,9 @@ jobs:
|
|||||||
CHANGED_FILES="${{ needs.get-changed-files.outputs.changed-files }}"
|
CHANGED_FILES="${{ needs.get-changed-files.outputs.changed-files }}"
|
||||||
echo "Running all other linters"
|
echo "Running all other linters"
|
||||||
if [ "$CHANGED_FILES" = '*' ]; then
|
if [ "$CHANGED_FILES" = '*' ]; then
|
||||||
ADDITIONAL_LINTRUNNER_ARGS="--skip CLANGTIDY,CLANGFORMAT,MYPY,MYPYSTRICT,PYREFLY --all-files" .github/scripts/lintrunner.sh
|
ADDITIONAL_LINTRUNNER_ARGS="--skip CLANGTIDY,CLANGFORMAT,PYREFLY --all-files" .github/scripts/lintrunner.sh
|
||||||
else
|
else
|
||||||
ADDITIONAL_LINTRUNNER_ARGS="--skip CLANGTIDY,CLANGFORMAT,MYPY,MYPYSTRICT,PYREFLY ${CHANGED_FILES}" .github/scripts/lintrunner.sh
|
ADDITIONAL_LINTRUNNER_ARGS="--skip CLANGTIDY,CLANGFORMAT,PYREFLY ${CHANGED_FILES}" .github/scripts/lintrunner.sh
|
||||||
fi
|
fi
|
||||||
|
|
||||||
quick-checks:
|
quick-checks:
|
||||||
|
|||||||
2
.github/workflows/nightly.yml
vendored
2
.github/workflows/nightly.yml
vendored
@ -41,7 +41,7 @@ jobs:
|
|||||||
uses: ./.github/workflows/_linux-build.yml
|
uses: ./.github/workflows/_linux-build.yml
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
with:
|
with:
|
||||||
runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge"
|
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||||
build-environment: linux-jammy-py3.10-gcc11
|
build-environment: linux-jammy-py3.10-gcc11
|
||||||
docker-image-name: ci-image:pytorch-linux-jammy-py3.10-gcc11
|
docker-image-name: ci-image:pytorch-linux-jammy-py3.10-gcc11
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|||||||
8
.github/workflows/pull.yml
vendored
8
.github/workflows/pull.yml
vendored
@ -66,10 +66,10 @@ jobs:
|
|||||||
{ config: "default", shard: 5, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
{ config: "default", shard: 5, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||||
{ config: "docs_test", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
{ config: "docs_test", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||||
{ config: "jit_legacy", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
{ config: "jit_legacy", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||||
{ config: "backwards_compat", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
{ config: "backwards_compat", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.c7i.2xlarge" },
|
||||||
{ config: "distributed", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
{ config: "distributed", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||||
{ config: "distributed", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
{ config: "distributed", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||||
{ config: "numpy_2_x", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
{ config: "numpy_2_x", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.c7i.2xlarge" },
|
||||||
]}
|
]}
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
@ -167,8 +167,8 @@ jobs:
|
|||||||
docker-image-name: ci-image:pytorch-linux-jammy-py3-clang12-onnx
|
docker-image-name: ci-image:pytorch-linux-jammy-py3-clang12-onnx
|
||||||
test-matrix: |
|
test-matrix: |
|
||||||
{ include: [
|
{ include: [
|
||||||
{ config: "default", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
{ config: "default", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.c7i.2xlarge" },
|
||||||
{ config: "default", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
{ config: "default", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.c7i.2xlarge" },
|
||||||
]}
|
]}
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
|
|||||||
2
.github/workflows/rocm.yml
vendored
2
.github/workflows/rocm.yml
vendored
@ -3,13 +3,13 @@ name: rocm
|
|||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
|
||||||
- release/*
|
- release/*
|
||||||
tags:
|
tags:
|
||||||
- ciflow/rocm/*
|
- ciflow/rocm/*
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: 29 8 * * * # about 1:29am PDT
|
- cron: 29 8 * * * # about 1:29am PDT
|
||||||
|
- cron: 0 * * * *
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}
|
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}
|
||||||
|
|||||||
3
.github/workflows/trunk.yml
vendored
3
.github/workflows/trunk.yml
vendored
@ -204,6 +204,7 @@ jobs:
|
|||||||
{ include: [
|
{ include: [
|
||||||
{ config: "default", shard: 1, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" },
|
{ config: "default", shard: 1, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" },
|
||||||
{ config: "default", shard: 2, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" },
|
{ config: "default", shard: 2, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" },
|
||||||
|
{ config: "distributed", shard: 1, num_shards: 1, runner: "linux.rocm.gpu.gfx942.4" },
|
||||||
]}
|
]}
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
@ -221,7 +222,7 @@ jobs:
|
|||||||
build-environment: linux-jammy-rocm-py3.10
|
build-environment: linux-jammy-rocm-py3.10
|
||||||
docker-image: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.docker-image }}
|
docker-image: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.docker-image }}
|
||||||
test-matrix: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.test-matrix }}
|
test-matrix: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.test-matrix }}
|
||||||
tests-to-include: "test_nn test_torch test_cuda test_ops test_unary_ufuncs test_binary_ufuncs test_autograd inductor/test_torchinductor"
|
tests-to-include: "test_nn test_torch test_cuda test_ops test_unary_ufuncs test_binary_ufuncs test_autograd inductor/test_torchinductor distributed/test_c10d_common distributed/test_c10d_nccl"
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
inductor-build:
|
inductor-build:
|
||||||
|
|||||||
2
.gitignore
vendored
2
.gitignore
vendored
@ -127,6 +127,7 @@ torch/test/
|
|||||||
torch/utils/benchmark/utils/valgrind_wrapper/callgrind.h
|
torch/utils/benchmark/utils/valgrind_wrapper/callgrind.h
|
||||||
torch/utils/benchmark/utils/valgrind_wrapper/valgrind.h
|
torch/utils/benchmark/utils/valgrind_wrapper/valgrind.h
|
||||||
torch/version.py
|
torch/version.py
|
||||||
|
torch/_inductor/kernel/vendored_templates/*
|
||||||
minifier_launcher.py
|
minifier_launcher.py
|
||||||
aten/src/ATen/native/transformers/hip/flash_attn/ck/fmha_fwd_d*
|
aten/src/ATen/native/transformers/hip/flash_attn/ck/fmha_fwd_d*
|
||||||
aten/src/ATen/native/transformers/hip/flash_attn/ck/fmha_bwd_d*
|
aten/src/ATen/native/transformers/hip/flash_attn/ck/fmha_bwd_d*
|
||||||
@ -398,3 +399,4 @@ CLAUDE.local.md
|
|||||||
/test_*.py
|
/test_*.py
|
||||||
/debug_*.py
|
/debug_*.py
|
||||||
CLAUDE_CONTEXT/
|
CLAUDE_CONTEXT/
|
||||||
|
/.claude/settings.local.json
|
||||||
|
|||||||
@ -121,94 +121,6 @@ command = [
|
|||||||
]
|
]
|
||||||
is_formatter = true
|
is_formatter = true
|
||||||
|
|
||||||
[[linter]]
|
|
||||||
code = 'MYPY'
|
|
||||||
include_patterns = [
|
|
||||||
'setup.py',
|
|
||||||
'functorch/dim/**/*.py',
|
|
||||||
'torch/**/*.py',
|
|
||||||
'torch/**/*.pyi',
|
|
||||||
'caffe2/**/*.py',
|
|
||||||
'caffe2/**/*.pyi',
|
|
||||||
'test/test_bundled_images.py',
|
|
||||||
'test/test_bundled_inputs.py',
|
|
||||||
'test/test_complex.py',
|
|
||||||
'test/test_datapipe.py',
|
|
||||||
'test/test_futures.py',
|
|
||||||
'test/test_numpy_interop.py',
|
|
||||||
'test/test_torch.py',
|
|
||||||
'test/test_type_hints.py',
|
|
||||||
'test/test_type_info.py',
|
|
||||||
'test/test_utils.py',
|
|
||||||
]
|
|
||||||
exclude_patterns = [
|
|
||||||
'**/fb/**',
|
|
||||||
]
|
|
||||||
command = [
|
|
||||||
'python3',
|
|
||||||
'tools/linter/adapters/mypy_linter.py',
|
|
||||||
'--config=mypy.ini',
|
|
||||||
'--',
|
|
||||||
'@{{PATHSFILE}}'
|
|
||||||
]
|
|
||||||
init_command = [
|
|
||||||
'python3',
|
|
||||||
'tools/linter/adapters/pip_init.py',
|
|
||||||
'--dry-run={{DRYRUN}}',
|
|
||||||
'numpy==1.26.4 ; python_version >= "3.10" and python_version <= "3.11"',
|
|
||||||
'numpy==2.1.0 ; python_version >= "3.12"',
|
|
||||||
'expecttest==0.3.0',
|
|
||||||
'mypy==1.16.0',
|
|
||||||
'sympy==1.13.3',
|
|
||||||
'types-requests==2.27.25',
|
|
||||||
'types-pyyaml==6.0.2',
|
|
||||||
'types-tabulate==0.8.8',
|
|
||||||
'types-protobuf==5.29.1.20250403',
|
|
||||||
'types-setuptools==79.0.0.20250422',
|
|
||||||
'types-jinja2==2.11.9',
|
|
||||||
'types-colorama==0.4.6',
|
|
||||||
'filelock==3.18.0',
|
|
||||||
'junitparser==2.1.1',
|
|
||||||
'rich==14.1.0',
|
|
||||||
'pyyaml==6.0.2',
|
|
||||||
'optree==0.13.0',
|
|
||||||
'dataclasses-json==0.6.7',
|
|
||||||
'pandas==2.2.3',
|
|
||||||
]
|
|
||||||
|
|
||||||
[[linter]]
|
|
||||||
code = 'MYPYSTRICT'
|
|
||||||
include_patterns = [
|
|
||||||
'.github/**/*.py',
|
|
||||||
'benchmarks/instruction_counts/**/*.py',
|
|
||||||
'tools/**/*.py',
|
|
||||||
'torchgen/**/*.py',
|
|
||||||
'torch/utils/_pytree.py',
|
|
||||||
'torch/utils/_cxx_pytree.py',
|
|
||||||
'torch/utils/benchmark/utils/common.py',
|
|
||||||
'torch/utils/benchmark/utils/timer.py',
|
|
||||||
'torch/utils/benchmark/utils/valgrind_wrapper/**/*.py',
|
|
||||||
]
|
|
||||||
exclude_patterns = [
|
|
||||||
# (linbinyu) copied from internal repo
|
|
||||||
'**/fb/**',
|
|
||||||
'tools/code_analyzer/gen_operators_yaml.py',
|
|
||||||
'tools/dynamo/verify_dynamo.py',
|
|
||||||
'tools/gen_vulkan_spv.py',
|
|
||||||
'tools/test/gen_operators_yaml_test.py',
|
|
||||||
'tools/test/gen_oplist_test.py',
|
|
||||||
'tools/test/test_selective_build.py',
|
|
||||||
'tools/experimental/torchfuzz/**',
|
|
||||||
]
|
|
||||||
command = [
|
|
||||||
'python3',
|
|
||||||
'tools/linter/adapters/mypy_linter.py',
|
|
||||||
'--config=mypy-strict.ini',
|
|
||||||
'--code=MYPYSTRICT',
|
|
||||||
'--',
|
|
||||||
'@{{PATHSFILE}}'
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
[[linter]]
|
[[linter]]
|
||||||
code = 'PYREFLY'
|
code = 'PYREFLY'
|
||||||
@ -230,6 +142,7 @@ init_command = [
|
|||||||
'python3',
|
'python3',
|
||||||
'tools/linter/adapters/pip_init.py',
|
'tools/linter/adapters/pip_init.py',
|
||||||
'--dry-run={{DRYRUN}}',
|
'--dry-run={{DRYRUN}}',
|
||||||
|
'numpy==1.26.4 ; python_version >= "3.10" and python_version <= "3.11"',
|
||||||
'numpy==2.1.0 ; python_version >= "3.12"',
|
'numpy==2.1.0 ; python_version >= "3.12"',
|
||||||
'expecttest==0.3.0',
|
'expecttest==0.3.0',
|
||||||
'pyrefly==0.36.2',
|
'pyrefly==0.36.2',
|
||||||
@ -298,7 +211,6 @@ exclude_patterns = [
|
|||||||
'**/*pb.h',
|
'**/*pb.h',
|
||||||
'**/*inl.h',
|
'**/*inl.h',
|
||||||
'aten/src/ATen/cpu/FlushDenormal.cpp',
|
'aten/src/ATen/cpu/FlushDenormal.cpp',
|
||||||
'aten/src/ATen/cpu/Utils.cpp',
|
|
||||||
'aten/src/ATen/cpu/vml.h',
|
'aten/src/ATen/cpu/vml.h',
|
||||||
'aten/src/ATen/CPUFixedAllocator.h',
|
'aten/src/ATen/CPUFixedAllocator.h',
|
||||||
'aten/src/ATen/Parallel*.h',
|
'aten/src/ATen/Parallel*.h',
|
||||||
@ -317,8 +229,6 @@ exclude_patterns = [
|
|||||||
'c10/util/win32-headers.h',
|
'c10/util/win32-headers.h',
|
||||||
'c10/test/**/*.h',
|
'c10/test/**/*.h',
|
||||||
'third_party/**/*',
|
'third_party/**/*',
|
||||||
'torch/csrc/api/include/torch/nn/modules/common.h',
|
|
||||||
'torch/csrc/api/include/torch/linalg.h',
|
|
||||||
'torch/csrc/autograd/generated/**',
|
'torch/csrc/autograd/generated/**',
|
||||||
'torch/csrc/distributed/**/*.cu',
|
'torch/csrc/distributed/**/*.cu',
|
||||||
'torch/csrc/distributed/c10d/WinSockUtils.hpp',
|
'torch/csrc/distributed/c10d/WinSockUtils.hpp',
|
||||||
@ -330,7 +240,6 @@ exclude_patterns = [
|
|||||||
'torch/csrc/utils/generated_serialization_types.h',
|
'torch/csrc/utils/generated_serialization_types.h',
|
||||||
'torch/csrc/utils/pythoncapi_compat.h',
|
'torch/csrc/utils/pythoncapi_compat.h',
|
||||||
'torch/csrc/inductor/aoti_runtime/sycl_runtime_wrappers.h',
|
'torch/csrc/inductor/aoti_runtime/sycl_runtime_wrappers.h',
|
||||||
'aten/src/ATen/ExpandBase.h',
|
|
||||||
]
|
]
|
||||||
init_command = [
|
init_command = [
|
||||||
'python3',
|
'python3',
|
||||||
|
|||||||
@ -11,7 +11,6 @@ aspects of contributing to PyTorch.
|
|||||||
<!-- toc -->
|
<!-- toc -->
|
||||||
|
|
||||||
- [Developing PyTorch](#developing-pytorch)
|
- [Developing PyTorch](#developing-pytorch)
|
||||||
- [Setup the development environment](#setup-the-development-environment)
|
|
||||||
- [Tips and Debugging](#tips-and-debugging)
|
- [Tips and Debugging](#tips-and-debugging)
|
||||||
- [Nightly Checkout & Pull](#nightly-checkout--pull)
|
- [Nightly Checkout & Pull](#nightly-checkout--pull)
|
||||||
- [Codebase structure](#codebase-structure)
|
- [Codebase structure](#codebase-structure)
|
||||||
@ -67,23 +66,6 @@ aspects of contributing to PyTorch.
|
|||||||
|
|
||||||
Follow the instructions for [installing PyTorch from source](https://github.com/pytorch/pytorch#from-source). If you get stuck when developing PyTorch on your machine, check out the [tips and debugging](#tips-and-debugging) section below for common solutions.
|
Follow the instructions for [installing PyTorch from source](https://github.com/pytorch/pytorch#from-source). If you get stuck when developing PyTorch on your machine, check out the [tips and debugging](#tips-and-debugging) section below for common solutions.
|
||||||
|
|
||||||
### Setup the development environment
|
|
||||||
|
|
||||||
First, you need to [fork the PyTorch project on GitHub](https://github.com/pytorch/pytorch/fork) and follow the instructions at [Connecting to GitHub with SSH](https://docs.github.com/en/authentication/connecting-to-github-with-ssh) to setup your SSH authentication credentials.
|
|
||||||
|
|
||||||
Then clone the PyTorch project and setup the development environment:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
git clone git@github.com:<USERNAME>/pytorch.git
|
|
||||||
cd pytorch
|
|
||||||
git remote add upstream git@github.com:pytorch/pytorch.git
|
|
||||||
|
|
||||||
make setup-env
|
|
||||||
# Or run `make setup-env-cuda` for pre-built CUDA binaries
|
|
||||||
# Or run `make setup-env-rocm` for pre-built ROCm binaries
|
|
||||||
source venv/bin/activate # or `. .\venv\Scripts\activate` on Windows
|
|
||||||
```
|
|
||||||
|
|
||||||
### Tips and Debugging
|
### Tips and Debugging
|
||||||
|
|
||||||
* If you want to have no-op incremental rebuilds (which are fast), see [Make no-op build fast](#make-no-op-build-fast) below.
|
* If you want to have no-op incremental rebuilds (which are fast), see [Make no-op build fast](#make-no-op-build-fast) below.
|
||||||
|
|||||||
20
SECURITY.md
20
SECURITY.md
@ -1,7 +1,7 @@
|
|||||||
# Security Policy
|
# Security Policy
|
||||||
|
|
||||||
- [**Reporting a Vulnerability**](#reporting-a-vulnerability)
|
- [**Reporting a Vulnerability**](#reporting-a-vulnerability)
|
||||||
- [**Using Pytorch Securely**](#using-pytorch-securely)
|
- [**Using PyTorch Securely**](#using-pytorch-securely)
|
||||||
- [Untrusted models](#untrusted-models)
|
- [Untrusted models](#untrusted-models)
|
||||||
- [TorchScript models](#torchscript-models)
|
- [TorchScript models](#torchscript-models)
|
||||||
- [Untrusted inputs](#untrusted-inputs)
|
- [Untrusted inputs](#untrusted-inputs)
|
||||||
@ -10,28 +10,28 @@
|
|||||||
- [**CI/CD security principles**](#cicd-security-principles)
|
- [**CI/CD security principles**](#cicd-security-principles)
|
||||||
## Reporting Security Issues
|
## Reporting Security Issues
|
||||||
|
|
||||||
Beware that none of the topics under [Using Pytorch Securely](#using-pytorch-securely) are considered vulnerabilities of Pytorch.
|
Beware that none of the topics under [Using PyTorch Securely](#using-pytorch-securely) are considered vulnerabilities of PyTorch.
|
||||||
|
|
||||||
However, if you believe you have found a security vulnerability in PyTorch, we encourage you to let us know right away. We will investigate all legitimate reports and do our best to quickly fix the problem.
|
However, if you believe you have found a security vulnerability in PyTorch, we encourage you to let us know right away. We will investigate all legitimate reports and do our best to quickly fix the problem.
|
||||||
|
|
||||||
Please report security issues using https://github.com/pytorch/pytorch/security/advisories/new
|
Please report security issues using https://github.com/pytorch/pytorch/security/advisories/new
|
||||||
|
|
||||||
All reports submitted thru the security advisories mechanism would **either be made public or dismissed by the team within 90 days of the submission**. If advisory has been closed on the grounds that it is not a security issue, please do not hesitate to create an [new issue](https://github.com/pytorch/pytorch/issues/new?template=bug-report.yml) as it is still likely a valid issue within the framework.
|
All reports submitted through the security advisories mechanism would **either be made public or dismissed by the team within 90 days of the submission**. If advisory has been closed on the grounds that it is not a security issue, please do not hesitate to create an [new issue](https://github.com/pytorch/pytorch/issues/new?template=bug-report.yml) as it is still likely a valid issue within the framework.
|
||||||
|
|
||||||
Please refer to the following page for our responsible disclosure policy, reward guidelines, and those things that should not be reported:
|
Please refer to the following page for our responsible disclosure policy, reward guidelines, and those things that should not be reported:
|
||||||
|
|
||||||
https://www.facebook.com/whitehat
|
https://www.facebook.com/whitehat
|
||||||
|
|
||||||
|
|
||||||
## Using Pytorch Securely
|
## Using PyTorch Securely
|
||||||
**Pytorch models are programs**, so treat its security seriously -- running untrusted models is equivalent to running untrusted code. In general we recommend that model weights and the python code for the model are distributed independently. That said, be careful about where you get the python code from and who wrote it (preferentially check for a provenance or checksums, do not run any pip installed package).
|
**PyTorch models are programs**, so treat its security seriously -- running untrusted models is equivalent to running untrusted code. In general we recommend that model weights and the python code for the model are distributed independently. That said, be careful about where you get the python code from and who wrote it (preferentially check for a provenance or checksums, do not run any pip installed package).
|
||||||
|
|
||||||
### Untrusted models
|
### Untrusted models
|
||||||
Be careful when running untrusted models. This classification includes models created by unknown developers or utilizing data obtained from unknown sources[^data-poisoning-sources].
|
Be careful when running untrusted models. This classification includes models created by unknown developers or utilizing data obtained from unknown sources[^data-poisoning-sources].
|
||||||
|
|
||||||
**Prefer to execute untrusted models within a secure, isolated environment such as a sandbox** (e.g., containers, virtual machines). This helps protect your system from potentially malicious code. You can find further details and instructions in [this page](https://developers.google.com/code-sandboxing).
|
**Prefer to execute untrusted models within a secure, isolated environment such as a sandbox** (e.g., containers, virtual machines). This helps protect your system from potentially malicious code. You can find further details and instructions in [this page](https://developers.google.com/code-sandboxing).
|
||||||
|
|
||||||
**Be mindful of risky model formats**. Give preference to share and load weights with the appropriate format for your use case. [safetensors](https://huggingface.co/docs/safetensors/en/index) gives the most safety but is the most restricted in what it supports. [`torch.load`](https://pytorch.org/docs/stable/generated/torch.load.html#torch.load) has a significantly larger surface of attack but is more flexible in what it can serialize. See the documentation for more details.
|
**Be mindful of risky model formats**. Give preference to share and load weights with the appropriate format for your use case. [Safetensors](https://huggingface.co/docs/safetensors/en/index) gives the most safety but is the most restricted in what it supports. [`torch.load`](https://pytorch.org/docs/stable/generated/torch.load.html#torch.load) has a significantly larger surface of attack but is more flexible in what it can serialize. See the documentation for more details.
|
||||||
|
|
||||||
Even for more secure serialization formats, unexpected inputs to the downstream system can cause diverse security threats (e.g. denial of service, out of bound reads/writes) and thus we recommend extensive validation of any untrusted inputs.
|
Even for more secure serialization formats, unexpected inputs to the downstream system can cause diverse security threats (e.g. denial of service, out of bound reads/writes) and thus we recommend extensive validation of any untrusted inputs.
|
||||||
|
|
||||||
@ -43,7 +43,7 @@ Important Note: The trustworthiness of a model is not binary. You must always de
|
|||||||
|
|
||||||
### TorchScript models
|
### TorchScript models
|
||||||
|
|
||||||
TorchScript models should treated the same way as locally executable code from an unknown source. Only run TorchScript models if you trust the provider. Please note, that tools for introspecting TorchScript models (such as `torch.utils.model_dump`) may also execute partial or full code stored in those models, therefore they should be used only if you trust the provider of the binary you are about to load.
|
TorchScript models should be treated the same way as locally executable code from an unknown source. Only run TorchScript models if you trust the provider. Please note, that tools for introspecting TorchScript models (such as `torch.utils.model_dump`) may also execute partial or full code stored in those models, therefore they should be used only if you trust the provider of the binary you are about to load.
|
||||||
|
|
||||||
### Untrusted inputs during training and prediction
|
### Untrusted inputs during training and prediction
|
||||||
|
|
||||||
@ -59,9 +59,9 @@ If applicable, prepare your model against bad inputs and prompt injections. Some
|
|||||||
|
|
||||||
### Data privacy
|
### Data privacy
|
||||||
|
|
||||||
**Take special security measures if your model if you train models with sensitive data**. Prioritize [sandboxing](https://developers.google.com/code-sandboxing) your models and:
|
**Take special security measures if you train your models with sensitive data**. Prioritize [sandboxing](https://developers.google.com/code-sandboxing) your models and:
|
||||||
- Do not feed sensitive data to untrusted model (even if runs in a sandboxed environment)
|
- Do not feed sensitive data to an untrusted model (even if runs in a sandboxed environment)
|
||||||
- If you consider publishing a model that was partially trained with sensitive data, be aware that data can potentially be recovered from the trained weights (especially if model overfits).
|
- If you consider publishing a model that was partially trained with sensitive data, be aware that data can potentially be recovered from the trained weights (especially if the model overfits).
|
||||||
|
|
||||||
### Using distributed features
|
### Using distributed features
|
||||||
|
|
||||||
|
|||||||
@ -260,7 +260,7 @@ IF(USE_FBGEMM_GENAI)
|
|||||||
if(USE_CUDA)
|
if(USE_CUDA)
|
||||||
# To avoid increasing the build time/binary size unnecessarily, use an allow-list of kernels to build.
|
# To avoid increasing the build time/binary size unnecessarily, use an allow-list of kernels to build.
|
||||||
# If you want to integrate a kernel from FBGEMM into torch, you have to add it here.
|
# If you want to integrate a kernel from FBGEMM into torch, you have to add it here.
|
||||||
set(FBGEMM_CUTLASS_KERNELS_REGEX ".*(mx8mx8bf16_grouped|f4f4bf16_grouped).*")
|
set(FBGEMM_CUTLASS_KERNELS_REGEX ".*(mx8mx8bf16_grouped|f4f4bf16_grouped|f4f4bf16).*")
|
||||||
file(GLOB_RECURSE fbgemm_genai_native_cuda_cu
|
file(GLOB_RECURSE fbgemm_genai_native_cuda_cu
|
||||||
"${FBGEMM_GENAI_SRCS}/cutlass_extensions/*.cu"
|
"${FBGEMM_GENAI_SRCS}/cutlass_extensions/*.cu"
|
||||||
"${FBGEMM_GENAI_SRCS}/cutlass_extensions/**/*.cu")
|
"${FBGEMM_GENAI_SRCS}/cutlass_extensions/**/*.cu")
|
||||||
|
|||||||
@ -181,7 +181,7 @@ c10::intrusive_ptr<c10::TensorImpl> CPUGeneratorImpl::get_state() const {
|
|||||||
static const size_t size = sizeof(CPUGeneratorImplState);
|
static const size_t size = sizeof(CPUGeneratorImplState);
|
||||||
static_assert(std::is_standard_layout_v<CPUGeneratorImplState>, "CPUGeneratorImplState is not a PODType");
|
static_assert(std::is_standard_layout_v<CPUGeneratorImplState>, "CPUGeneratorImplState is not a PODType");
|
||||||
|
|
||||||
auto state_tensor = at::detail::empty_cpu({(int64_t)size}, ScalarType::Byte, std::nullopt, std::nullopt, std::nullopt, std::nullopt);
|
auto state_tensor = at::detail::empty_cpu({static_cast<int64_t>(size)}, ScalarType::Byte, std::nullopt, std::nullopt, std::nullopt, std::nullopt);
|
||||||
auto rng_state = state_tensor.data_ptr();
|
auto rng_state = state_tensor.data_ptr();
|
||||||
|
|
||||||
// accumulate generator data to be copied into byte tensor
|
// accumulate generator data to be copied into byte tensor
|
||||||
|
|||||||
@ -23,8 +23,6 @@ C10_DIAGNOSTIC_POP()
|
|||||||
#endif
|
#endif
|
||||||
namespace at {
|
namespace at {
|
||||||
|
|
||||||
namespace {
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
These const variables defined the fp32 precisions for different backend
|
These const variables defined the fp32 precisions for different backend
|
||||||
We have "generic", "cuda", "mkldnn" backend now and we can choose fp32
|
We have "generic", "cuda", "mkldnn" backend now and we can choose fp32
|
||||||
@ -41,16 +39,6 @@ namespace {
|
|||||||
->rnn
|
->rnn
|
||||||
*/
|
*/
|
||||||
|
|
||||||
C10_ALWAYS_INLINE void warn_deprecated_fp32_precision_api(){
|
|
||||||
TORCH_WARN_ONCE(
|
|
||||||
"Please use the new API settings to control TF32 behavior, such as torch.backends.cudnn.conv.fp32_precision = 'tf32' "
|
|
||||||
"or torch.backends.cuda.matmul.fp32_precision = 'ieee'. Old settings, e.g, torch.backends.cuda.matmul.allow_tf32 = True, "
|
|
||||||
"torch.backends.cudnn.allow_tf32 = True, allowTF32CuDNN() and allowTF32CuBLAS() will be deprecated after Pytorch 2.9. Please see "
|
|
||||||
"https://pytorch.org/docs/main/notes/cuda.html#tensorfloat-32-tf32-on-ampere-and-later-devices"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
Float32Backend str2backend(const std::string& name) {
|
Float32Backend str2backend(const std::string& name) {
|
||||||
if (name == "generic")
|
if (name == "generic")
|
||||||
return Float32Backend::GENERIC;
|
return Float32Backend::GENERIC;
|
||||||
@ -206,7 +194,6 @@ bool Context::allowTF32CuDNN(std::optional<Float32Op> op) const {
|
|||||||
} else {
|
} else {
|
||||||
return float32Precision(Float32Backend::CUDA, op.value()) == Float32Precision::TF32;
|
return float32Precision(Float32Backend::CUDA, op.value()) == Float32Precision::TF32;
|
||||||
}
|
}
|
||||||
warn_deprecated_fp32_precision_api();
|
|
||||||
return allow_tf32_cudnn;
|
return allow_tf32_cudnn;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -214,7 +201,6 @@ void Context::setAllowTF32CuDNN(bool b) {
|
|||||||
setFloat32Precision(Float32Backend::CUDA, Float32Op::RNN, b ? Float32Precision::TF32 : Float32Precision::NONE);
|
setFloat32Precision(Float32Backend::CUDA, Float32Op::RNN, b ? Float32Precision::TF32 : Float32Precision::NONE);
|
||||||
setFloat32Precision(Float32Backend::CUDA, Float32Op::CONV, b ? Float32Precision::TF32 : Float32Precision::NONE);
|
setFloat32Precision(Float32Backend::CUDA, Float32Op::CONV, b ? Float32Precision::TF32 : Float32Precision::NONE);
|
||||||
allow_tf32_cudnn = b;
|
allow_tf32_cudnn = b;
|
||||||
warn_deprecated_fp32_precision_api();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Context::setSDPPriorityOrder(const std::vector<int64_t>& order) {
|
void Context::setSDPPriorityOrder(const std::vector<int64_t>& order) {
|
||||||
@ -223,7 +209,7 @@ void Context::setSDPPriorityOrder(const std::vector<int64_t>& order) {
|
|||||||
"setSDPPriority order expected ", sdp_priority_order.size() - 1, " but got ",
|
"setSDPPriority order expected ", sdp_priority_order.size() - 1, " but got ",
|
||||||
at::num_sdp_backends, " unique backends specified in priority order.");
|
at::num_sdp_backends, " unique backends specified in priority order.");
|
||||||
for (uint32_t i = 0; i < order.size(); i++) {
|
for (uint32_t i = 0; i < order.size(); i++) {
|
||||||
sdp_priority_order[i] = (at::SDPBackend) order[i];
|
sdp_priority_order[i] = static_cast<at::SDPBackend>(order[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -325,7 +311,6 @@ bool Context::allowTF32CuBLAS() const {
|
|||||||
"Current status indicate that you have used mix of the legacy and new APIs to set the TF32 status for cublas matmul. ",
|
"Current status indicate that you have used mix of the legacy and new APIs to set the TF32 status for cublas matmul. ",
|
||||||
"We suggest only using the new API to set the TF32 flag. See also: ",
|
"We suggest only using the new API to set the TF32 flag. See also: ",
|
||||||
"https://pytorch.org/docs/main/notes/cuda.html#tensorfloat-32-tf32-on-ampere-and-later-devices");
|
"https://pytorch.org/docs/main/notes/cuda.html#tensorfloat-32-tf32-on-ampere-and-later-devices");
|
||||||
warn_deprecated_fp32_precision_api();
|
|
||||||
return allow_tf32_new;
|
return allow_tf32_new;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -349,7 +334,6 @@ Float32MatmulPrecision Context::float32MatmulPrecision() const {
|
|||||||
"Current status indicate that you have used mix of the legacy and new APIs to set the matmul precision. ",
|
"Current status indicate that you have used mix of the legacy and new APIs to set the matmul precision. ",
|
||||||
"We suggest only using the new API for matmul precision. See also: ",
|
"We suggest only using the new API for matmul precision. See also: ",
|
||||||
"https://pytorch.org/docs/main/notes/cuda.html#tensorfloat-32-tf32-on-ampere-and-later-devices");
|
"https://pytorch.org/docs/main/notes/cuda.html#tensorfloat-32-tf32-on-ampere-and-later-devices");
|
||||||
warn_deprecated_fp32_precision_api();
|
|
||||||
return float32_matmul_precision;
|
return float32_matmul_precision;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -377,7 +361,6 @@ Float32Precision Context::float32Precision(Float32Backend backend, Float32Op op)
|
|||||||
|
|
||||||
void Context::setFloat32MatmulPrecision(const std::string &s) {
|
void Context::setFloat32MatmulPrecision(const std::string &s) {
|
||||||
auto match = [this](const std::string & s_) {
|
auto match = [this](const std::string & s_) {
|
||||||
warn_deprecated_fp32_precision_api();
|
|
||||||
// TODO: consider if CuDNN field needs to also be set for potential future CuDNN ops like multi-headed attention
|
// TODO: consider if CuDNN field needs to also be set for potential future CuDNN ops like multi-headed attention
|
||||||
if (s_ == "highest") {
|
if (s_ == "highest") {
|
||||||
float32_matmul_precision = at::Float32MatmulPrecision::HIGHEST;
|
float32_matmul_precision = at::Float32MatmulPrecision::HIGHEST;
|
||||||
@ -825,6 +808,14 @@ void Context::setDisplayVmapFallbackWarnings(bool enabled) {
|
|||||||
display_vmap_fallback_warnings_ = enabled;
|
display_vmap_fallback_warnings_ = enabled;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool Context::warnOnAccumulateGradStreamMismatch() const {
|
||||||
|
return warn_on_accumulate_grad_stream_mismatch_;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Context::setWarnOnAccumulateGradStreamMismatch(bool enabled) {
|
||||||
|
warn_on_accumulate_grad_stream_mismatch_ = enabled;
|
||||||
|
}
|
||||||
|
|
||||||
bool Context::isDefaultMobileCPUAllocatorSet() {
|
bool Context::isDefaultMobileCPUAllocatorSet() {
|
||||||
return prev_allocator_ptr_ != nullptr;
|
return prev_allocator_ptr_ != nullptr;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -404,6 +404,9 @@ class TORCH_API Context {
|
|||||||
void setDisplayVmapFallbackWarnings(bool enabled);
|
void setDisplayVmapFallbackWarnings(bool enabled);
|
||||||
bool areVmapFallbackWarningsEnabled() const;
|
bool areVmapFallbackWarningsEnabled() const;
|
||||||
|
|
||||||
|
void setWarnOnAccumulateGradStreamMismatch(bool enabled);
|
||||||
|
bool warnOnAccumulateGradStreamMismatch() const;
|
||||||
|
|
||||||
bool isDefaultMobileCPUAllocatorSet();
|
bool isDefaultMobileCPUAllocatorSet();
|
||||||
void setDefaultMobileCPUAllocator();
|
void setDefaultMobileCPUAllocator();
|
||||||
void unsetDefaultMobileCPUAllocator();
|
void unsetDefaultMobileCPUAllocator();
|
||||||
@ -494,6 +497,7 @@ class TORCH_API Context {
|
|||||||
bool release_original_weights = false;
|
bool release_original_weights = false;
|
||||||
#endif
|
#endif
|
||||||
bool display_vmap_fallback_warnings_ = false;
|
bool display_vmap_fallback_warnings_ = false;
|
||||||
|
bool warn_on_accumulate_grad_stream_mismatch_ = true;
|
||||||
std::atomic<at::QEngine> quantized_engine = at::QEngine::NoQEngine;
|
std::atomic<at::QEngine> quantized_engine = at::QEngine::NoQEngine;
|
||||||
bool enable_sparse_tensor_invariant_checks = false;
|
bool enable_sparse_tensor_invariant_checks = false;
|
||||||
bool allow_fp16_reduction_cpu = false;
|
bool allow_fp16_reduction_cpu = false;
|
||||||
|
|||||||
@ -197,6 +197,7 @@ inline at::ScalarType scalar_type(at::ScalarType s) {
|
|||||||
/* don't use TYPE again in case it is an expensive or side-effect op */ \
|
/* don't use TYPE again in case it is an expensive or side-effect op */ \
|
||||||
at::ScalarType _st = ::detail::scalar_type(the_type); \
|
at::ScalarType _st = ::detail::scalar_type(the_type); \
|
||||||
RECORD_KERNEL_FUNCTION_DTYPE(at_dispatch_name, _st); \
|
RECORD_KERNEL_FUNCTION_DTYPE(at_dispatch_name, _st); \
|
||||||
|
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wswitch-enum") \
|
||||||
switch (_st) { \
|
switch (_st) { \
|
||||||
__VA_ARGS__ \
|
__VA_ARGS__ \
|
||||||
default: \
|
default: \
|
||||||
@ -208,6 +209,7 @@ inline at::ScalarType scalar_type(at::ScalarType s) {
|
|||||||
toString(_st), \
|
toString(_st), \
|
||||||
"'"); \
|
"'"); \
|
||||||
} \
|
} \
|
||||||
|
C10_DIAGNOSTIC_POP() \
|
||||||
}()
|
}()
|
||||||
|
|
||||||
#define AT_DISPATCH_CASE_FLOATING_TYPES(...) \
|
#define AT_DISPATCH_CASE_FLOATING_TYPES(...) \
|
||||||
|
|||||||
@ -252,13 +252,13 @@ MapAllocator::MapAllocator(WithFd /*unused*/, std::string_view filename, int fd,
|
|||||||
if (!(flags_ & ALLOCATOR_MAPPED_FROMFD)) {
|
if (!(flags_ & ALLOCATOR_MAPPED_FROMFD)) {
|
||||||
if (flags_ & ALLOCATOR_MAPPED_SHARED) {
|
if (flags_ & ALLOCATOR_MAPPED_SHARED) {
|
||||||
// NOLINTNEXTLINE(bugprone-assignment-in-if-condition)
|
// NOLINTNEXTLINE(bugprone-assignment-in-if-condition)
|
||||||
if ((fd = open(filename_.c_str(), flags, (mode_t)0600)) == -1) {
|
if ((fd = open(filename_.c_str(), flags, static_cast<mode_t>(0600))) == -1) {
|
||||||
TORCH_CHECK(false, "unable to open file <", filename_, "> in read-write mode: ", c10::utils::str_error(errno), " (", errno, ")");
|
TORCH_CHECK(false, "unable to open file <", filename_, "> in read-write mode: ", c10::utils::str_error(errno), " (", errno, ")");
|
||||||
}
|
}
|
||||||
} else if (flags_ & ALLOCATOR_MAPPED_SHAREDMEM) {
|
} else if (flags_ & ALLOCATOR_MAPPED_SHAREDMEM) {
|
||||||
#ifdef HAVE_SHM_OPEN
|
#ifdef HAVE_SHM_OPEN
|
||||||
// NOLINTNEXTLINE(bugprone-assignment-in-if-condition)
|
// NOLINTNEXTLINE(bugprone-assignment-in-if-condition)
|
||||||
if((fd = shm_open(filename_.c_str(), flags, (mode_t)0600)) == -1) {
|
if((fd = shm_open(filename_.c_str(), flags, static_cast<mode_t>(0600))) == -1) {
|
||||||
TORCH_CHECK(false, "unable to open shared memory object <", filename_, "> in read-write mode: ", c10::utils::str_error(errno), " (", errno, ")");
|
TORCH_CHECK(false, "unable to open shared memory object <", filename_, "> in read-write mode: ", c10::utils::str_error(errno), " (", errno, ")");
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
@ -503,7 +503,7 @@ RefcountedMapAllocator::RefcountedMapAllocator(WithFd /*unused*/, const char *fi
|
|||||||
|
|
||||||
void RefcountedMapAllocator::initializeAlloc() {
|
void RefcountedMapAllocator::initializeAlloc() {
|
||||||
TORCH_CHECK(base_ptr_, "base_ptr_ is null");
|
TORCH_CHECK(base_ptr_, "base_ptr_ is null");
|
||||||
MapInfo *map_info = (MapInfo*)base_ptr_;
|
MapInfo *map_info = static_cast<MapInfo*>(base_ptr_);
|
||||||
|
|
||||||
#ifdef _WIN32
|
#ifdef _WIN32
|
||||||
ReleaseContext* r_ctx = new ReleaseContext;
|
ReleaseContext* r_ctx = new ReleaseContext;
|
||||||
@ -539,7 +539,7 @@ void RefcountedMapAllocator::close() {
|
|||||||
}
|
}
|
||||||
#else /* _WIN32 */
|
#else /* _WIN32 */
|
||||||
|
|
||||||
MapInfo *info = (MapInfo*)(data);
|
MapInfo *info = static_cast<MapInfo*>(data);
|
||||||
if (--info->refcount == 0) {
|
if (--info->refcount == 0) {
|
||||||
#ifdef HAVE_SHM_UNLINK
|
#ifdef HAVE_SHM_UNLINK
|
||||||
if (shm_unlink(filename_.c_str()) == -1) {
|
if (shm_unlink(filename_.c_str()) == -1) {
|
||||||
|
|||||||
@ -862,7 +862,7 @@ void TensorIteratorBase::narrow(int dim, int64_t start, int64_t size) {
|
|||||||
shape_[dim] = size;
|
shape_[dim] = size;
|
||||||
view_offsets_[dim] += start;
|
view_offsets_[dim] += start;
|
||||||
for (auto& op : operands_) {
|
for (auto& op : operands_) {
|
||||||
op.data = ((char*)op.data) + op.stride_bytes[dim] * start;
|
op.data = (static_cast<char*>(op.data)) + op.stride_bytes[dim] * start;
|
||||||
}
|
}
|
||||||
if (size == 1 && !is_reduction_) {
|
if (size == 1 && !is_reduction_) {
|
||||||
coalesce_dimensions();
|
coalesce_dimensions();
|
||||||
@ -873,7 +873,7 @@ void TensorIteratorBase::select_all_keeping_dim(int start_dim, IntArrayRef indic
|
|||||||
TORCH_INTERNAL_ASSERT(start_dim <= ndim());
|
TORCH_INTERNAL_ASSERT(start_dim <= ndim());
|
||||||
for (const auto i : c10::irange(start_dim, ndim())) {
|
for (const auto i : c10::irange(start_dim, ndim())) {
|
||||||
for (auto& op : operands_) {
|
for (auto& op : operands_) {
|
||||||
op.data = ((char*)op.data) + op.stride_bytes[i] * indices[i - start_dim];
|
op.data = (static_cast<char*>(op.data)) + op.stride_bytes[i] * indices[i - start_dim];
|
||||||
}
|
}
|
||||||
shape_[i] = 1;
|
shape_[i] = 1;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -41,7 +41,7 @@ inline void serial_for_each(
|
|||||||
IntArrayRef strides,
|
IntArrayRef strides,
|
||||||
char** base_ptrs,
|
char** base_ptrs,
|
||||||
size_t ntensors,
|
size_t ntensors,
|
||||||
typename TensorIteratorBase::loop2d_t loop,
|
TensorIteratorBase::loop2d_t loop,
|
||||||
Range range) {
|
Range range) {
|
||||||
const auto ndim = shape.size();
|
const auto ndim = shape.size();
|
||||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
||||||
|
|||||||
@ -72,10 +72,16 @@ TORCH_LIBRARY_IMPL(aten, VmapMode, m) {
|
|||||||
m.impl("random_", unsupportedRandomOp_<Tensor&, std::optional<Generator>>);
|
m.impl("random_", unsupportedRandomOp_<Tensor&, std::optional<Generator>>);
|
||||||
|
|
||||||
m.impl("rand_like", unsupportedRandomOp<const Tensor&, TENSOROPTIONS, std::optional<MemoryFormat>>);
|
m.impl("rand_like", unsupportedRandomOp<const Tensor&, TENSOROPTIONS, std::optional<MemoryFormat>>);
|
||||||
|
m.impl("rand_like.generator", unsupportedRandomOp<const Tensor&, std::optional<Generator>, TENSOROPTIONS, std::optional<MemoryFormat>>);
|
||||||
m.impl("randn_like", unsupportedRandomOp<const Tensor&, TENSOROPTIONS, std::optional<MemoryFormat>>);
|
m.impl("randn_like", unsupportedRandomOp<const Tensor&, TENSOROPTIONS, std::optional<MemoryFormat>>);
|
||||||
|
m.impl("randn_like.generator", unsupportedRandomOp<const Tensor&, std::optional<Generator>, TENSOROPTIONS, std::optional<MemoryFormat>>);
|
||||||
|
|
||||||
m.impl("randint_like", unsupportedRandomOp<const Tensor&, int64_t, TENSOROPTIONS, std::optional<MemoryFormat>>);
|
m.impl("randint_like", unsupportedRandomOp<const Tensor&, int64_t, TENSOROPTIONS, std::optional<MemoryFormat>>);
|
||||||
|
m.impl("randint_like.Tensor", unsupportedRandomOp<const Tensor&, const Tensor&, TENSOROPTIONS, std::optional<MemoryFormat>>);
|
||||||
m.impl("randint_like.low_dtype", unsupportedRandomOp<const Tensor&, int64_t, int64_t, TENSOROPTIONS, std::optional<MemoryFormat>>);
|
m.impl("randint_like.low_dtype", unsupportedRandomOp<const Tensor&, int64_t, int64_t, TENSOROPTIONS, std::optional<MemoryFormat>>);
|
||||||
|
m.impl("randint_like.generator", unsupportedRandomOp<const Tensor&, int64_t, std::optional<Generator>, TENSOROPTIONS, std::optional<MemoryFormat>>);
|
||||||
|
m.impl("randint_like.Tensor_generator", unsupportedRandomOp<const Tensor&, const Tensor&, std::optional<Generator>, TENSOROPTIONS, std::optional<MemoryFormat>>);
|
||||||
|
m.impl("randint_like.low_generator_dtype", unsupportedRandomOp<const Tensor&, int64_t, int64_t, std::optional<Generator>, TENSOROPTIONS, std::optional<MemoryFormat>>);
|
||||||
|
|
||||||
m.impl("rand", unsupportedRandomOp<IntArrayRef, TENSOROPTIONS>);
|
m.impl("rand", unsupportedRandomOp<IntArrayRef, TENSOROPTIONS>);
|
||||||
m.impl("rand.generator", unsupportedRandomOp<IntArrayRef, std::optional<Generator>, TENSOROPTIONS>);
|
m.impl("rand.generator", unsupportedRandomOp<IntArrayRef, std::optional<Generator>, TENSOROPTIONS>);
|
||||||
|
|||||||
@ -190,12 +190,14 @@ class IListRef;
|
|||||||
* it to a function (e.g. `ImplT::<dispatch-function>(this_)`).
|
* it to a function (e.g. `ImplT::<dispatch-function>(this_)`).
|
||||||
*/
|
*/
|
||||||
#define TORCH_ILISTREF_UNWRAP(TAG, BODY) \
|
#define TORCH_ILISTREF_UNWRAP(TAG, BODY) \
|
||||||
|
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wswitch-enum") \
|
||||||
switch (TAG) { \
|
switch (TAG) { \
|
||||||
TORCH_ILISTREF_FORALL_TAGS(TORCH_ILISTREF_UNWRAP_CASE, BODY) \
|
TORCH_ILISTREF_FORALL_TAGS(TORCH_ILISTREF_UNWRAP_CASE, BODY) \
|
||||||
break; \
|
break; \
|
||||||
default: \
|
default: \
|
||||||
TORCH_INTERNAL_ASSERT(false, "invalid IListRef tag."); \
|
TORCH_INTERNAL_ASSERT(false, "invalid IListRef tag."); \
|
||||||
}
|
} \
|
||||||
|
C10_DIAGNOSTIC_POP()
|
||||||
|
|
||||||
enum class IListRefTag {
|
enum class IListRefTag {
|
||||||
#define DEFINE_TAG(tag, ...) tag,
|
#define DEFINE_TAG(tag, ...) tag,
|
||||||
|
|||||||
@ -56,7 +56,7 @@ C10_HOST_DEVICE inline T uniform_int_full_range(V val) {
|
|||||||
* in this overloaded version
|
* in this overloaded version
|
||||||
*/
|
*/
|
||||||
template <typename T, typename V>
|
template <typename T, typename V>
|
||||||
C10_HOST_DEVICE inline std::enable_if_t<!(std::is_floating_point_v<T>), T>uniform_int(V val) {
|
C10_HOST_DEVICE inline std::enable_if_t<!std::is_floating_point_v<T>, T>uniform_int(V val) {
|
||||||
if constexpr (std::is_same_v<T, bool>) {
|
if constexpr (std::is_same_v<T, bool>) {
|
||||||
return static_cast<bool>(val & 1);
|
return static_cast<bool>(val & 1);
|
||||||
} else if constexpr (std::is_same_v<T, int64_t>) {
|
} else if constexpr (std::is_same_v<T, int64_t>) {
|
||||||
|
|||||||
@ -114,25 +114,25 @@ inline typename remove_symint<T>::type unpackSymInt(T x) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <>
|
template <>
|
||||||
inline typename remove_symint<c10::SymInt>::type unpackSymInt(c10::SymInt x) {
|
inline remove_symint<c10::SymInt>::type unpackSymInt(c10::SymInt x) {
|
||||||
return x.guard_int(__FILE__, __LINE__);
|
return x.guard_int(__FILE__, __LINE__);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <>
|
template <>
|
||||||
inline typename remove_symint<c10::SymIntArrayRef>::type unpackSymInt(
|
inline remove_symint<c10::SymIntArrayRef>::type unpackSymInt(
|
||||||
c10::SymIntArrayRef x) {
|
c10::SymIntArrayRef x) {
|
||||||
return C10_AS_INTARRAYREF_SLOW(x);
|
return C10_AS_INTARRAYREF_SLOW(x);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <>
|
template <>
|
||||||
inline typename remove_symint<std::optional<c10::SymInt>>::type unpackSymInt(
|
inline remove_symint<std::optional<c10::SymInt>>::type unpackSymInt(
|
||||||
std::optional<c10::SymInt> x) {
|
std::optional<c10::SymInt> x) {
|
||||||
return x.has_value() ? std::make_optional(x->guard_int(__FILE__, __LINE__))
|
return x.has_value() ? std::make_optional(x->guard_int(__FILE__, __LINE__))
|
||||||
: std::nullopt;
|
: std::nullopt;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <>
|
template <>
|
||||||
inline typename remove_symint<at::OptionalSymIntArrayRef>::type unpackSymInt(
|
inline remove_symint<at::OptionalSymIntArrayRef>::type unpackSymInt(
|
||||||
at::OptionalSymIntArrayRef x) {
|
at::OptionalSymIntArrayRef x) {
|
||||||
return x.has_value() ? std::make_optional(C10_AS_INTARRAYREF_SLOW(*x))
|
return x.has_value() ? std::make_optional(C10_AS_INTARRAYREF_SLOW(*x))
|
||||||
: std::nullopt;
|
: std::nullopt;
|
||||||
|
|||||||
@ -631,8 +631,8 @@ call_functor_with_args_from_stack_(
|
|||||||
Stack* stack,
|
Stack* stack,
|
||||||
std::index_sequence<ivalue_arg_indices...> /*unused*/,
|
std::index_sequence<ivalue_arg_indices...> /*unused*/,
|
||||||
guts::typelist::typelist<ArgTypes...>* /*unused*/) {
|
guts::typelist::typelist<ArgTypes...>* /*unused*/) {
|
||||||
(void)(stack); // when sizeof...(ivalue_arg_indices) == 0, this argument would
|
(void)stack; // when sizeof...(ivalue_arg_indices) == 0, this argument would
|
||||||
// be unused and we have to silence the compiler warning.
|
// be unused and we have to silence the compiler warning.
|
||||||
|
|
||||||
// We're explicitly filtering out DispatchKeySet from the argument list.
|
// We're explicitly filtering out DispatchKeySet from the argument list.
|
||||||
// Some kernels take a DispatchKeySet as their first argument in order to
|
// Some kernels take a DispatchKeySet as their first argument in order to
|
||||||
|
|||||||
@ -18,6 +18,7 @@ struct TORCH_API EnumType : public NamedType {
|
|||||||
TypePtr value,
|
TypePtr value,
|
||||||
std::vector<EnumNameValue> enum_names_values,
|
std::vector<EnumNameValue> enum_names_values,
|
||||||
std::weak_ptr<::torch::jit::CompilationUnit> cu) {
|
std::weak_ptr<::torch::jit::CompilationUnit> cu) {
|
||||||
|
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wswitch-enum")
|
||||||
switch (value->kind()) {
|
switch (value->kind()) {
|
||||||
case TypeKind::IntType:
|
case TypeKind::IntType:
|
||||||
case TypeKind::FloatType:
|
case TypeKind::FloatType:
|
||||||
@ -34,6 +35,7 @@ struct TORCH_API EnumType : public NamedType {
|
|||||||
value->str(),
|
value->str(),
|
||||||
"', only int, float and string are supported");
|
"', only int, float and string are supported");
|
||||||
}
|
}
|
||||||
|
C10_DIAGNOSTIC_POP()
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string str() const override {
|
std::string str() const override {
|
||||||
|
|||||||
@ -601,8 +601,8 @@ std::ostream& IValue::repr(
|
|||||||
double d = v.toDouble();
|
double d = v.toDouble();
|
||||||
int c = std::fpclassify(d);
|
int c = std::fpclassify(d);
|
||||||
if ((c == FP_NORMAL || c == FP_ZERO ) && std::abs(d) < 1e10) {
|
if ((c == FP_NORMAL || c == FP_ZERO ) && std::abs(d) < 1e10) {
|
||||||
int64_t i = int64_t(d);
|
int64_t i = static_cast<int64_t>(d);
|
||||||
if (double(i) == d) {
|
if (static_cast<double>(i) == d) {
|
||||||
// -0.0 (signed zero) needs to be parsed as -0.
|
// -0.0 (signed zero) needs to be parsed as -0.
|
||||||
if (i == 0 && std::signbit(d)) {
|
if (i == 0 && std::signbit(d)) {
|
||||||
return out << "-" << i << ".";
|
return out << "-" << i << ".";
|
||||||
@ -799,8 +799,8 @@ std::ostream& operator<<(std::ostream & out, const IValue & v) {
|
|||||||
double d = v.toDouble();
|
double d = v.toDouble();
|
||||||
int c = std::fpclassify(d);
|
int c = std::fpclassify(d);
|
||||||
if (c == FP_NORMAL || c == FP_ZERO) {
|
if (c == FP_NORMAL || c == FP_ZERO) {
|
||||||
int64_t i = int64_t(d);
|
int64_t i = static_cast<int64_t>(d);
|
||||||
if (double(i) == d) {
|
if (static_cast<double>(i) == d) {
|
||||||
return out << i << ".";
|
return out << i << ".";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -41,7 +41,7 @@ void standardizeVectorForUnion(std::vector<TypePtr>* to_flatten);
|
|||||||
inline bool is_contiguous_strides(
|
inline bool is_contiguous_strides(
|
||||||
const IntArrayRef sizes,
|
const IntArrayRef sizes,
|
||||||
const IntArrayRef strides) {
|
const IntArrayRef strides) {
|
||||||
int n_dim = static_cast<int>(sizes.size());
|
size_t n_dim = sizes.size();
|
||||||
if (n_dim == 0) {
|
if (n_dim == 0) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -50,7 +50,7 @@ inline bool is_contiguous_strides(
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = n_dim - 2; i >= 0; i--) {
|
for (int i = static_cast<int>(n_dim) - 2; i >= 0; i--) {
|
||||||
if (strides[i] != strides[i + 1] * sizes[i + 1]) {
|
if (strides[i] != strides[i + 1] * sizes[i + 1]) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -922,6 +922,7 @@ struct TORCH_API DictType : public SharedType {
|
|||||||
if (auto dyn = key->castRaw<DynamicType>()) {
|
if (auto dyn = key->castRaw<DynamicType>()) {
|
||||||
kind = dyn->dynamicKind();
|
kind = dyn->dynamicKind();
|
||||||
}
|
}
|
||||||
|
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wswitch-enum")
|
||||||
switch (kind) {
|
switch (kind) {
|
||||||
case TypeKind::AnyType:
|
case TypeKind::AnyType:
|
||||||
case TypeKind::IntType:
|
case TypeKind::IntType:
|
||||||
@ -938,6 +939,7 @@ struct TORCH_API DictType : public SharedType {
|
|||||||
key->str(),
|
key->str(),
|
||||||
"', only int, float, complex, Tensor, device and string keys are supported");
|
"', only int, float, complex, Tensor, device and string keys are supported");
|
||||||
}
|
}
|
||||||
|
C10_DIAGNOSTIC_POP()
|
||||||
}
|
}
|
||||||
|
|
||||||
// aligned with the format in FunctionSchema
|
// aligned with the format in FunctionSchema
|
||||||
@ -2371,7 +2373,7 @@ private:
|
|||||||
};
|
};
|
||||||
|
|
||||||
template<>
|
template<>
|
||||||
inline typename detail::CastReturnType<NamedType>::type Type::cast<NamedType>() {
|
inline detail::CastReturnType<NamedType>::type Type::cast<NamedType>() {
|
||||||
if (kind() == TypeKind::TupleType || kind() == TypeKind::FunctionType ||
|
if (kind() == TypeKind::TupleType || kind() == TypeKind::FunctionType ||
|
||||||
kind() == TypeKind::ClassType || kind() == TypeKind::InterfaceType) {
|
kind() == TypeKind::ClassType || kind() == TypeKind::InterfaceType) {
|
||||||
return std::static_pointer_cast<NamedType>(static_cast<NamedType *>(this)->shared_from_this());
|
return std::static_pointer_cast<NamedType>(static_cast<NamedType *>(this)->shared_from_this());
|
||||||
@ -2380,7 +2382,7 @@ inline typename detail::CastReturnType<NamedType>::type Type::cast<NamedType>()
|
|||||||
}
|
}
|
||||||
|
|
||||||
template<>
|
template<>
|
||||||
inline typename detail::CastConstReturnType<NamedType>::type Type::cast<NamedType>() const {
|
inline detail::CastConstReturnType<NamedType>::type Type::cast<NamedType>() const {
|
||||||
if (kind() == TypeKind::TupleType || kind() == TypeKind::FunctionType ||
|
if (kind() == TypeKind::TupleType || kind() == TypeKind::FunctionType ||
|
||||||
kind() == TypeKind::ClassType || kind() == TypeKind::InterfaceType) {
|
kind() == TypeKind::ClassType || kind() == TypeKind::InterfaceType) {
|
||||||
return std::static_pointer_cast<const NamedType>(static_cast<const NamedType *>(this)->shared_from_this());
|
return std::static_pointer_cast<const NamedType>(static_cast<const NamedType *>(this)->shared_from_this());
|
||||||
|
|||||||
@ -191,22 +191,37 @@ inline void convert(const at::Half* src, bool* dst, int64_t n) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
#ifdef __ARM_FEATURE_BF16
|
|
||||||
CONVERT_TEMPLATE(bfloat16_t, uint8_t)
|
template <typename to_type>
|
||||||
CONVERT_TEMPLATE(bfloat16_t, int8_t)
|
inline void convertFromBf16Impl(
|
||||||
CONVERT_TEMPLATE(bfloat16_t, int16_t)
|
const c10::BFloat16* __restrict src,
|
||||||
CONVERT_TEMPLATE(bfloat16_t, int32_t)
|
to_type* __restrict dst,
|
||||||
CONVERT_TEMPLATE(bfloat16_t, int64_t)
|
int64_t n) {
|
||||||
CONVERT_TEMPLATE(bfloat16_t, bfloat16_t)
|
const uint16_t* srcPtr = reinterpret_cast<const uint16_t*>(src);
|
||||||
CONVERT_TEMPLATE(bfloat16_t, float)
|
uint64_t len = static_cast<uint64_t>(n);
|
||||||
CONVERT_TEMPLATE(bfloat16_t, double)
|
for (uint64_t i = 0; i < len; i++) {
|
||||||
CONVERT_TEMPLATE(uint8_t, bfloat16_t)
|
uint32_t tmp = static_cast<uint32_t>(srcPtr[i]) << 16;
|
||||||
CONVERT_TEMPLATE(int8_t, bfloat16_t)
|
float tmpF;
|
||||||
CONVERT_TEMPLATE(int16_t, bfloat16_t)
|
__builtin_memcpy(&tmpF, &tmp, sizeof(float));
|
||||||
CONVERT_TEMPLATE(int32_t, bfloat16_t)
|
dst[i] = static_cast<to_type>(tmpF);
|
||||||
CONVERT_TEMPLATE(int64_t, bfloat16_t)
|
}
|
||||||
CONVERT_TEMPLATE(float, bfloat16_t)
|
}
|
||||||
CONVERT_TEMPLATE(double, bfloat16_t)
|
#define CONVERT_FROM_BF16_TEMPLATE(to_type) \
|
||||||
|
template <> \
|
||||||
|
inline void convert(const c10::BFloat16* src, to_type* dst, int64_t n) { \
|
||||||
|
return convertFromBf16Impl<to_type>(src, dst, n); \
|
||||||
|
}
|
||||||
|
|
||||||
|
CONVERT_FROM_BF16_TEMPLATE(uint8_t)
|
||||||
|
CONVERT_FROM_BF16_TEMPLATE(int8_t)
|
||||||
|
CONVERT_FROM_BF16_TEMPLATE(int16_t)
|
||||||
|
CONVERT_FROM_BF16_TEMPLATE(int32_t)
|
||||||
|
CONVERT_FROM_BF16_TEMPLATE(int64_t)
|
||||||
|
CONVERT_FROM_BF16_TEMPLATE(float)
|
||||||
|
CONVERT_FROM_BF16_TEMPLATE(double)
|
||||||
|
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
|
||||||
|
CONVERT_FROM_BF16_TEMPLATE(float16_t)
|
||||||
|
#endif
|
||||||
|
|
||||||
inline void convertBoolToBfloat16Impl(
|
inline void convertBoolToBfloat16Impl(
|
||||||
const bool* __restrict src,
|
const bool* __restrict src,
|
||||||
@ -247,8 +262,6 @@ inline void convert(const c10::BFloat16* src, bool* dst, int64_t n) {
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
template <typename src_t>
|
template <typename src_t>
|
||||||
struct VecConvert<
|
struct VecConvert<
|
||||||
float,
|
float,
|
||||||
|
|||||||
@ -514,7 +514,7 @@ struct Vectorized<c10::qint8> : public Vectorizedqi {
|
|||||||
|
|
||||||
using float_vec_return_type = std::array<Vectorized<float>, kFloatNumVecs>;
|
using float_vec_return_type = std::array<Vectorized<float>, kFloatNumVecs>;
|
||||||
using int_vec_return_type = std::array<Vectorized<c10::qint32>, kIntNumVecs>;
|
using int_vec_return_type = std::array<Vectorized<c10::qint32>, kIntNumVecs>;
|
||||||
using value_type = typename c10::qint8::underlying;
|
using value_type = c10::qint8::underlying;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
using Vectorizedqi::Vectorizedqi;
|
using Vectorizedqi::Vectorizedqi;
|
||||||
@ -727,7 +727,7 @@ struct Vectorized<c10::quint8> : public Vectorizedqi {
|
|||||||
|
|
||||||
using float_vec_return_type = std::array<Vectorized<float>, kFloatNumVecs>;
|
using float_vec_return_type = std::array<Vectorized<float>, kFloatNumVecs>;
|
||||||
using int_vec_return_type = std::array<Vectorized<c10::qint32>, kIntNumVecs>;
|
using int_vec_return_type = std::array<Vectorized<c10::qint32>, kIntNumVecs>;
|
||||||
using value_type = typename c10::quint8::underlying;
|
using value_type = c10::quint8::underlying;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
using Vectorizedqi::Vectorizedqi;
|
using Vectorizedqi::Vectorizedqi;
|
||||||
|
|||||||
@ -567,7 +567,7 @@ struct Vectorized<c10::qint8> : public Vectorizedqi {
|
|||||||
|
|
||||||
using float_vec_return_type = std::array<Vectorized<float>, 4>;
|
using float_vec_return_type = std::array<Vectorized<float>, 4>;
|
||||||
using int_vec_return_type = std::array<Vectorized<c10::qint32>, 4>;
|
using int_vec_return_type = std::array<Vectorized<c10::qint32>, 4>;
|
||||||
using value_type = typename c10::qint8::underlying;
|
using value_type = c10::qint8::underlying;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
using Vectorizedqi::Vectorizedqi;
|
using Vectorizedqi::Vectorizedqi;
|
||||||
@ -804,7 +804,7 @@ struct Vectorized<c10::quint8> : public Vectorizedqi {
|
|||||||
|
|
||||||
using float_vec_return_type = std::array<Vectorized<float>, 4>;
|
using float_vec_return_type = std::array<Vectorized<float>, 4>;
|
||||||
using int_vec_return_type = std::array<Vectorized<c10::qint32>, 4>;
|
using int_vec_return_type = std::array<Vectorized<c10::qint32>, 4>;
|
||||||
using value_type = typename c10::quint8::underlying;
|
using value_type = c10::quint8::underlying;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
using Vectorizedqi::Vectorizedqi;
|
using Vectorizedqi::Vectorizedqi;
|
||||||
|
|||||||
@ -672,7 +672,7 @@ struct Vectorized {
|
|||||||
return map(std::sqrt);
|
return map(std::sqrt);
|
||||||
}
|
}
|
||||||
Vectorized<T> reciprocal() const {
|
Vectorized<T> reciprocal() const {
|
||||||
return map([](T x) { return (T)(1) / x; });
|
return map([](T x) { return (T)1 / x; });
|
||||||
}
|
}
|
||||||
Vectorized<T> rsqrt() const {
|
Vectorized<T> rsqrt() const {
|
||||||
return map([](T x) { return (T)1 / std::sqrt(x); });
|
return map([](T x) { return (T)1 / std::sqrt(x); });
|
||||||
|
|||||||
@ -46,7 +46,7 @@ inline void vrsqrt(scalar_t* out, scalar_t* in, int64_t size) {
|
|||||||
parallel_for(0, size, 2048, [out, in](int64_t begin, int64_t end) {
|
parallel_for(0, size, 2048, [out, in](int64_t begin, int64_t end) {
|
||||||
map(
|
map(
|
||||||
[](const Vectorized<scalar_t>& x) {
|
[](const Vectorized<scalar_t>& x) {
|
||||||
return Vectorized<scalar_t>((scalar_t)(1)) / x.sqrt();
|
return Vectorized<scalar_t>((scalar_t)1) / x.sqrt();
|
||||||
},
|
},
|
||||||
out + begin,
|
out + begin,
|
||||||
in + begin,
|
in + begin,
|
||||||
|
|||||||
@ -194,8 +194,8 @@ void CUDAGeneratorState::unregister_graph(cuda::CUDAGraph* graph) {
|
|||||||
void CUDAGeneratorState::capture_prologue() {
|
void CUDAGeneratorState::capture_prologue() {
|
||||||
capturing_ = true;
|
capturing_ = true;
|
||||||
offset_intragraph_ = 0;
|
offset_intragraph_ = 0;
|
||||||
seed_extragraph_.fill_(int64_t(seed_));
|
seed_extragraph_.fill_(static_cast<int64_t>(seed_));
|
||||||
offset_extragraph_.fill_(int64_t(0));
|
offset_extragraph_.fill_(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -216,8 +216,8 @@ void CUDAGeneratorState::replay_prologue(uint64_t wholegraph_increment) {
|
|||||||
at::cuda::assertNotCapturing(
|
at::cuda::assertNotCapturing(
|
||||||
"Cannot prepare for replay during capturing stage.");
|
"Cannot prepare for replay during capturing stage.");
|
||||||
if (wholegraph_increment) {
|
if (wholegraph_increment) {
|
||||||
seed_extragraph_.fill_(int64_t(seed_));
|
seed_extragraph_.fill_(static_cast<int64_t>(seed_));
|
||||||
offset_extragraph_.fill_(int64_t(philox_offset_per_thread_));
|
offset_extragraph_.fill_(static_cast<int64_t>(philox_offset_per_thread_));
|
||||||
// Applies the total increment achieved during previous captures to update the
|
// Applies the total increment achieved during previous captures to update the
|
||||||
// offset.
|
// offset.
|
||||||
increase(wholegraph_increment);
|
increase(wholegraph_increment);
|
||||||
@ -329,7 +329,7 @@ c10::intrusive_ptr<c10::TensorImpl> CUDAGeneratorImpl::get_state() const {
|
|||||||
constexpr size_t offset_size = sizeof(int64_t);
|
constexpr size_t offset_size = sizeof(int64_t);
|
||||||
constexpr size_t total_size = seed_size + offset_size;
|
constexpr size_t total_size = seed_size + offset_size;
|
||||||
|
|
||||||
auto state_tensor = at::detail::empty_cpu({(int64_t)total_size}, ScalarType::Byte, std::nullopt, std::nullopt, std::nullopt, std::nullopt);
|
auto state_tensor = at::detail::empty_cpu({static_cast<int64_t>(total_size)}, ScalarType::Byte, std::nullopt, std::nullopt, std::nullopt, std::nullopt);
|
||||||
auto rng_state = state_tensor.data_ptr<uint8_t>();
|
auto rng_state = state_tensor.data_ptr<uint8_t>();
|
||||||
auto current_seed = this->current_seed();
|
auto current_seed = this->current_seed();
|
||||||
auto offset = static_cast<int64_t>(this->philox_offset_per_thread()); // Note that old THCGeneratorState had offset as std::atomic<int64_t>
|
auto offset = static_cast<int64_t>(this->philox_offset_per_thread()); // Note that old THCGeneratorState had offset as std::atomic<int64_t>
|
||||||
|
|||||||
@ -1,6 +1,6 @@
|
|||||||
#include <ATen/cuda/CUDAGreenContext.h>
|
#include <ATen/cuda/CUDAGreenContext.h>
|
||||||
|
|
||||||
#if defined(CUDA_VERSION) && !defined(USE_ROCM) && defined(PYTORCH_C10_DRIVER_API_SUPPORTED)
|
#if defined(CUDA_VERSION) && (CUDA_VERSION >= 12030) && !defined(USE_ROCM) && defined(PYTORCH_C10_DRIVER_API_SUPPORTED)
|
||||||
#include <c10/cuda/driver_api.h>
|
#include <c10/cuda/driver_api.h>
|
||||||
#include <stdexcept>
|
#include <stdexcept>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|||||||
@ -155,8 +155,8 @@ size_t parseChosenWorkspaceSize() {
|
|||||||
while (next != end) {
|
while (next != end) {
|
||||||
std::smatch match = *next;
|
std::smatch match = *next;
|
||||||
TORCH_CHECK(match.size() == 3, "Expected CUBLAS_WORKSPACE_SPACE_CONFIG match of size 3 (Format :SIZE:COUNT)");
|
TORCH_CHECK(match.size() == 3, "Expected CUBLAS_WORKSPACE_SPACE_CONFIG match of size 3 (Format :SIZE:COUNT)");
|
||||||
size_t curr_size = (size_t) std::stoi(match.str(1));
|
size_t curr_size = std::stoull(match.str(1));
|
||||||
size_t count = (size_t) std::stoi(match.str(2));
|
size_t count = std::stoull(match.str(2));
|
||||||
total_size += curr_size * 1024 * count;
|
total_size += curr_size * 1024 * count;
|
||||||
next++;
|
next++;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -2,8 +2,6 @@
|
|||||||
#include <ATen/Tensor.h>
|
#include <ATen/Tensor.h>
|
||||||
#include <ATen/cuda/Exceptions.h>
|
#include <ATen/cuda/Exceptions.h>
|
||||||
|
|
||||||
#include <mutex>
|
|
||||||
|
|
||||||
namespace at {
|
namespace at {
|
||||||
namespace cuda {
|
namespace cuda {
|
||||||
namespace detail {
|
namespace detail {
|
||||||
@ -12,39 +10,36 @@ __device__ __constant__ float cublas_one_device;
|
|||||||
__device__ __constant__ float cublas_zero_device;
|
__device__ __constant__ float cublas_zero_device;
|
||||||
|
|
||||||
float *get_cublas_device_one() {
|
float *get_cublas_device_one() {
|
||||||
static c10::once_flag init_flag;
|
static float *ptr = nullptr;
|
||||||
|
static auto init_flag = [&]() {
|
||||||
c10::call_once(init_flag, []() {
|
|
||||||
const float one = 1.f;
|
const float one = 1.f;
|
||||||
AT_CUDA_CHECK(cudaMemcpyToSymbol(cublas_one_device, &one, sizeof(float)));
|
AT_CUDA_CHECK(cudaMemcpyToSymbol(cublas_one_device, &one, sizeof(float)));
|
||||||
});
|
AT_CUDA_CHECK(cudaGetSymbolAddress(reinterpret_cast<void**>(&ptr), cublas_one_device));
|
||||||
|
return true;
|
||||||
|
}();
|
||||||
|
|
||||||
float *ptr;
|
|
||||||
AT_CUDA_CHECK(cudaGetSymbolAddress(reinterpret_cast<void**>(&ptr), cublas_one_device));
|
|
||||||
return ptr;
|
return ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
float *get_cublas_device_zero() {
|
float *get_cublas_device_zero() {
|
||||||
static c10::once_flag init_flag;
|
static float *ptr = nullptr;
|
||||||
|
static auto init_flag = [&]() {
|
||||||
c10::call_once(init_flag, []() {
|
|
||||||
const float zero = 0.f;
|
const float zero = 0.f;
|
||||||
AT_CUDA_CHECK(cudaMemcpyToSymbol(cublas_zero_device, &zero, sizeof(float)));
|
AT_CUDA_CHECK(cudaMemcpyToSymbol(cublas_zero_device, &zero, sizeof(float)));
|
||||||
});
|
AT_CUDA_CHECK(cudaGetSymbolAddress(reinterpret_cast<void**>(&ptr), cublas_zero_device));
|
||||||
|
return true;
|
||||||
|
}();
|
||||||
|
|
||||||
float *ptr;
|
|
||||||
AT_CUDA_CHECK(cudaGetSymbolAddress(reinterpret_cast<void**>(&ptr), cublas_zero_device));
|
|
||||||
return ptr;
|
return ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
float *get_user_alpha_ptr() {
|
float *get_user_alpha_ptr() {
|
||||||
static float *alpha_ptr;
|
static float *alpha_ptr;
|
||||||
|
|
||||||
static c10::once_flag init_flag;
|
static bool init_flag [[maybe_unused]] = []() {
|
||||||
|
|
||||||
c10::call_once(init_flag, []() {
|
|
||||||
AT_CUDA_CHECK(cudaMalloc(&alpha_ptr, sizeof(float)));
|
AT_CUDA_CHECK(cudaMalloc(&alpha_ptr, sizeof(float)));
|
||||||
});
|
return true;
|
||||||
|
}();
|
||||||
|
|
||||||
return alpha_ptr;
|
return alpha_ptr;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -3,6 +3,7 @@
|
|||||||
#include <ATen/ATen.h>
|
#include <ATen/ATen.h>
|
||||||
#include <c10/util/irange.h>
|
#include <c10/util/irange.h>
|
||||||
|
|
||||||
|
#include <array>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
|
|
||||||
@ -136,9 +137,9 @@ void FilterDescriptor::set(const at::Tensor &t, const at::MemoryFormat memory_fo
|
|||||||
"Weight strides: ", t.strides(), "\n",
|
"Weight strides: ", t.strides(), "\n",
|
||||||
"cuDNN suggested memory_format: ", memory_format);
|
"cuDNN suggested memory_format: ", memory_format);
|
||||||
|
|
||||||
int size[CUDNN_DIM_MAX];
|
std::array<int, CUDNN_DIM_MAX> size;
|
||||||
for (const auto i : c10::irange(dim)) {
|
for (const auto i : c10::irange(dim)) {
|
||||||
size[i] = (int) t.size(i);
|
size[i] = static_cast<int>(t.size(i));
|
||||||
}
|
}
|
||||||
for (const auto i : c10::irange(dim, pad)) {
|
for (const auto i : c10::irange(dim, pad)) {
|
||||||
size[i] = 1;
|
size[i] = 1;
|
||||||
@ -156,7 +157,7 @@ void FilterDescriptor::set(const at::Tensor &t, const at::MemoryFormat memory_fo
|
|||||||
default:
|
default:
|
||||||
TORCH_INTERNAL_ASSERT(false, "unsupported memory_format for cuDNN filters");
|
TORCH_INTERNAL_ASSERT(false, "unsupported memory_format for cuDNN filters");
|
||||||
}
|
}
|
||||||
set(getDataType(t), static_cast<int>(dim), size, filter_format);
|
set(getDataType(t), static_cast<int>(dim), size.data(), filter_format);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string cudnnMemoryFormatToString(cudnnTensorFormat_t tformat) {
|
std::string cudnnMemoryFormatToString(cudnnTensorFormat_t tformat) {
|
||||||
|
|||||||
@ -9,8 +9,8 @@
|
|||||||
|
|
||||||
#include <c10/core/Allocator.h>
|
#include <c10/core/Allocator.h>
|
||||||
|
|
||||||
#include <c10/util/python_stub.h>
|
|
||||||
#include <ATen/detail/AcceleratorHooksInterface.h>
|
#include <ATen/detail/AcceleratorHooksInterface.h>
|
||||||
|
#include <c10/util/python_stub.h>
|
||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
namespace at {
|
namespace at {
|
||||||
@ -26,8 +26,7 @@ constexpr const char* MTIA_HELP =
|
|||||||
struct TORCH_API MTIAHooksInterface : AcceleratorHooksInterface {
|
struct TORCH_API MTIAHooksInterface : AcceleratorHooksInterface {
|
||||||
// this fails the implementation if MTIAHooks functions are called, but
|
// this fails the implementation if MTIAHooks functions are called, but
|
||||||
// MTIA backend is not present.
|
// MTIA backend is not present.
|
||||||
#define FAIL_MTIAHOOKS_FUNC(func) \
|
#define FAIL_MTIAHOOKS_FUNC(func) TORCH_CHECK(false, "Cannot execute ", func, "() without MTIA backend.");
|
||||||
TORCH_CHECK(false, "Cannot execute ", func, "() without MTIA backend.");
|
|
||||||
|
|
||||||
~MTIAHooksInterface() override = default;
|
~MTIAHooksInterface() override = default;
|
||||||
|
|
||||||
@ -92,7 +91,7 @@ struct TORCH_API MTIAHooksInterface : AcceleratorHooksInterface {
|
|||||||
return c10::Stream::unpack3(-1, 0, c10::DeviceType::MTIA);
|
return c10::Stream::unpack3(-1, 0, c10::DeviceType::MTIA);
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void setCurrentStream(const c10::Stream& /*stream*/ ) const {
|
virtual void setCurrentStream(const c10::Stream& /*stream*/) const {
|
||||||
FAIL_MTIAHOOKS_FUNC(__func__);
|
FAIL_MTIAHOOKS_FUNC(__func__);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -124,11 +123,9 @@ struct TORCH_API MTIAHooksInterface : AcceleratorHooksInterface {
|
|||||||
FAIL_MTIAHOOKS_FUNC(__func__);
|
FAIL_MTIAHOOKS_FUNC(__func__);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
virtual void recordMemoryHistory(const std::optional<std::string>& /*enabled*/,
|
||||||
virtual void recordMemoryHistory(
|
const std::string& /*stacks*/,
|
||||||
const std::optional<std::string>& /*enabled*/,
|
size_t /*max_entries*/) const {
|
||||||
const std::string& /*stacks*/,
|
|
||||||
size_t /*max_entries*/) const {
|
|
||||||
FAIL_MTIAHOOKS_FUNC(__func__);
|
FAIL_MTIAHOOKS_FUNC(__func__);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -159,6 +156,10 @@ struct TORCH_API MTIAHooksInterface : AcceleratorHooksInterface {
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
virtual void mtiagraphDestroy(int64_t handle) const {
|
||||||
|
FAIL_MTIAHOOKS_FUNC(__func__);
|
||||||
|
}
|
||||||
|
|
||||||
virtual void mtiagraphCaptureBegin(int64_t handle, MempoolId_t pool) const {
|
virtual void mtiagraphCaptureBegin(int64_t handle, MempoolId_t pool) const {
|
||||||
FAIL_MTIAHOOKS_FUNC(__func__);
|
FAIL_MTIAHOOKS_FUNC(__func__);
|
||||||
}
|
}
|
||||||
@ -187,8 +188,7 @@ struct TORCH_API MTIAHooksInterface : AcceleratorHooksInterface {
|
|||||||
struct TORCH_API MTIAHooksArgs {};
|
struct TORCH_API MTIAHooksArgs {};
|
||||||
|
|
||||||
TORCH_DECLARE_REGISTRY(MTIAHooksRegistry, MTIAHooksInterface, MTIAHooksArgs);
|
TORCH_DECLARE_REGISTRY(MTIAHooksRegistry, MTIAHooksInterface, MTIAHooksArgs);
|
||||||
#define REGISTER_MTIA_HOOKS(clsname) \
|
#define REGISTER_MTIA_HOOKS(clsname) C10_REGISTER_CLASS(MTIAHooksRegistry, clsname, clsname)
|
||||||
C10_REGISTER_CLASS(MTIAHooksRegistry, clsname, clsname)
|
|
||||||
|
|
||||||
namespace detail {
|
namespace detail {
|
||||||
TORCH_API const MTIAHooksInterface& getMTIAHooks();
|
TORCH_API const MTIAHooksInterface& getMTIAHooks();
|
||||||
|
|||||||
@ -198,7 +198,7 @@ static void autogradBasedTransformSendToNext(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Step 6
|
// Step 6
|
||||||
stack->erase(stack->end() - std::ptrdiff_t(args_size + ret_size), stack->end() - std::ptrdiff_t(ret_size));
|
stack->erase(stack->end() - static_cast<std::ptrdiff_t>(args_size + ret_size), stack->end() - static_cast<std::ptrdiff_t>(ret_size));
|
||||||
}
|
}
|
||||||
|
|
||||||
void GradInterpreterPtr::processImpl(
|
void GradInterpreterPtr::processImpl(
|
||||||
|
|||||||
@ -443,14 +443,14 @@ static bool has_same_shape(
|
|||||||
if (!tensor.defined()) {
|
if (!tensor.defined()) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (rankWithoutBatchDim(tensor, tensor_bdim) != (int64_t) normalized_shape.size()) {
|
if (rankWithoutBatchDim(tensor, tensor_bdim) != static_cast<int64_t>(normalized_shape.size())) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
const auto tensor_shape = tensor.sizes();
|
const auto tensor_shape = tensor.sizes();
|
||||||
for (const auto i : c10::irange(normalized_shape.size())) {
|
for (const auto i : c10::irange(normalized_shape.size())) {
|
||||||
auto j = i;
|
auto j = i;
|
||||||
// (0, 1, 2), 1 -> (0, 2, 3)
|
// (0, 1, 2), 1 -> (0, 2, 3)
|
||||||
if (tensor_bdim.has_value() && (int64_t)i >= tensor_bdim.value()) {
|
if (tensor_bdim.has_value() && static_cast<int64_t>(i) >= tensor_bdim.value()) {
|
||||||
j = j + 1;
|
j = j + 1;
|
||||||
}
|
}
|
||||||
if (normalized_shape[i] != tensor_shape[j]) {
|
if (normalized_shape[i] != tensor_shape[j]) {
|
||||||
|
|||||||
@ -135,7 +135,7 @@ static void boxed_reduction_batch_rule(const c10::OperatorHandle& op, torch::jit
|
|||||||
reduction_case = ReductionCase::DimArray;
|
reduction_case = ReductionCase::DimArray;
|
||||||
dims = arguments[dim_arg_pos].toIntList().vec();
|
dims = arguments[dim_arg_pos].toIntList().vec();
|
||||||
if (dims.empty()) {
|
if (dims.empty()) {
|
||||||
auto all_dims = range(0, std::max((int64_t)1, logical_dim));
|
auto all_dims = range(0, std::max(static_cast<int64_t>(1), logical_dim));
|
||||||
dims = std::vector<int64_t>(all_dims.begin(), all_dims.end());
|
dims = std::vector<int64_t>(all_dims.begin(), all_dims.end());
|
||||||
}
|
}
|
||||||
} else if (arguments[dim_arg_pos].isInt()) {
|
} else if (arguments[dim_arg_pos].isInt()) {
|
||||||
|
|||||||
@ -432,7 +432,7 @@ namespace {
|
|||||||
// Eg. Given `indexed_shape.size()` is 5 and
|
// Eg. Given `indexed_shape.size()` is 5 and
|
||||||
// shape of `values` is (N, 2, 3), then following block
|
// shape of `values` is (N, 2, 3), then following block
|
||||||
// will reshape `values` to (N, 1, 1, 2, 3).
|
// will reshape `values` to (N, 1, 1, 2, 3).
|
||||||
if ( (int64_t) indexed_shape.size() > values_.dim()) {
|
if ( static_cast<int64_t>(indexed_shape.size()) > values_.dim()) {
|
||||||
auto values_sizes = values_.sym_sizes();
|
auto values_sizes = values_.sym_sizes();
|
||||||
|
|
||||||
// number of unit dims (for broadcasting value to indexed_shape)
|
// number of unit dims (for broadcasting value to indexed_shape)
|
||||||
|
|||||||
@ -109,7 +109,7 @@ std::tuple<Tensor, std::optional<int64_t>> repeat_batch_rule(
|
|||||||
SymDimVector sizes_with_bdim = { sizes.begin(), sizes.end() };
|
SymDimVector sizes_with_bdim = { sizes.begin(), sizes.end() };
|
||||||
sizes_with_bdim.insert(sizes_with_bdim.begin(), 1);
|
sizes_with_bdim.insert(sizes_with_bdim.begin(), 1);
|
||||||
auto self_ = moveBatchDimToFront(self, self_bdim);
|
auto self_ = moveBatchDimToFront(self, self_bdim);
|
||||||
while (self_.dim() < (int64_t)sizes_with_bdim.size()) {
|
while (self_.dim() < static_cast<int64_t>(sizes_with_bdim.size())) {
|
||||||
self_ = self_.unsqueeze(1);
|
self_ = self_.unsqueeze(1);
|
||||||
}
|
}
|
||||||
return std::make_tuple(self_.repeat_symint(sizes_with_bdim), 0);
|
return std::make_tuple(self_.repeat_symint(sizes_with_bdim), 0);
|
||||||
|
|||||||
@ -191,7 +191,7 @@ static void batchedTensorInplaceForLoopFallback(const c10::OperatorHandle& op, t
|
|||||||
// simplicity. When that is not the case, this code should be updated.
|
// simplicity. When that is not the case, this code should be updated.
|
||||||
const auto& argument = (*stack)[arguments_begin + arg_idx];
|
const auto& argument = (*stack)[arguments_begin + arg_idx];
|
||||||
if (batched_tensor_inputs_pos_iter == batched_tensor_inputs_position.end()
|
if (batched_tensor_inputs_pos_iter == batched_tensor_inputs_position.end()
|
||||||
|| (int64_t)arg_idx != *batched_tensor_inputs_pos_iter) {
|
|| static_cast<int64_t>(arg_idx) != *batched_tensor_inputs_pos_iter) {
|
||||||
// argument isn't a BatchedTensor
|
// argument isn't a BatchedTensor
|
||||||
torch::jit::push(stack, argument);
|
torch::jit::push(stack, argument);
|
||||||
continue;
|
continue;
|
||||||
@ -345,7 +345,7 @@ void batchedTensorForLoopFallback(const c10::OperatorHandle& op, torch::jit::Sta
|
|||||||
// simplicity. When that is not the case, this code should be updated.
|
// simplicity. When that is not the case, this code should be updated.
|
||||||
const auto& argument = (*stack)[arguments_begin + arg_idx];
|
const auto& argument = (*stack)[arguments_begin + arg_idx];
|
||||||
if (batched_tensor_inputs_pos_iter == batched_tensor_inputs_position.end()
|
if (batched_tensor_inputs_pos_iter == batched_tensor_inputs_position.end()
|
||||||
|| (int64_t)arg_idx != *batched_tensor_inputs_pos_iter) {
|
|| static_cast<int64_t>(arg_idx) != *batched_tensor_inputs_pos_iter) {
|
||||||
// argument isn't a BatchedTensor
|
// argument isn't a BatchedTensor
|
||||||
torch::jit::push(stack, argument);
|
torch::jit::push(stack, argument);
|
||||||
continue;
|
continue;
|
||||||
@ -473,7 +473,7 @@ void batchedNestedTensorForLoopFallback(const c10::OperatorHandle& op, torch::ji
|
|||||||
// simplicity. When that is not the case, this code should be updated.
|
// simplicity. When that is not the case, this code should be updated.
|
||||||
const auto& argument = (*stack)[arguments_begin + arg_idx];
|
const auto& argument = (*stack)[arguments_begin + arg_idx];
|
||||||
if (batched_tensor_inputs_pos_iter == batched_tensor_inputs_position.end()
|
if (batched_tensor_inputs_pos_iter == batched_tensor_inputs_position.end()
|
||||||
|| (int64_t)arg_idx != *batched_tensor_inputs_pos_iter) {
|
|| static_cast<int64_t>(arg_idx) != *batched_tensor_inputs_pos_iter) {
|
||||||
// argument isn't a BatchedTensor
|
// argument isn't a BatchedTensor
|
||||||
torch::jit::push(stack, argument);
|
torch::jit::push(stack, argument);
|
||||||
continue;
|
continue;
|
||||||
|
|||||||
@ -157,7 +157,7 @@ Tensor& squeeze__batching_rule(Tensor& self) {
|
|||||||
const auto physical_shape = batched->value().sizes();
|
const auto physical_shape = batched->value().sizes();
|
||||||
auto how_many_dims_of_size_1_before_bdim = 0;
|
auto how_many_dims_of_size_1_before_bdim = 0;
|
||||||
for (const auto i : c10::irange(0, physical_shape.size())) {
|
for (const auto i : c10::irange(0, physical_shape.size())) {
|
||||||
if ((int64_t)i == bdim) {
|
if (static_cast<int64_t>(i) == bdim) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (physical_shape[i] == 1) {
|
if (physical_shape[i] == 1) {
|
||||||
@ -573,7 +573,7 @@ Tensor cat_batching_rule(const ITensorListRef& tensors, int64_t dim) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
auto new_dim = bdim_size.has_value() ? dim + 1 : dim;
|
auto new_dim = bdim_size.has_value() ? dim + 1 : dim;
|
||||||
std::optional<int64_t> new_bdim = bdim_size.has_value() ? std::make_optional((int64_t)0) : std::nullopt;
|
std::optional<int64_t> new_bdim = bdim_size.has_value() ? std::make_optional(static_cast<int64_t>(0)) : std::nullopt;
|
||||||
auto result = at::cat(tensors_to_cat, new_dim);
|
auto result = at::cat(tensors_to_cat, new_dim);
|
||||||
return makeBatched(result, new_bdim, get_current_level());
|
return makeBatched(result, new_bdim, get_current_level());
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,7 +1,5 @@
|
|||||||
// Copyright © 2022 Apple Inc.
|
// Copyright © 2022 Apple Inc.
|
||||||
|
|
||||||
#include <c10/util/CallOnce.h>
|
|
||||||
|
|
||||||
#include <ATen/mps/IndexKernels.h>
|
#include <ATen/mps/IndexKernels.h>
|
||||||
#include <ATen/mps/MPSAllocatorInterface.h>
|
#include <ATen/mps/MPSAllocatorInterface.h>
|
||||||
#include <ATen/mps/MPSDevice.h>
|
#include <ATen/mps/MPSDevice.h>
|
||||||
@ -10,9 +8,6 @@
|
|||||||
|
|
||||||
namespace at::mps {
|
namespace at::mps {
|
||||||
|
|
||||||
static std::unique_ptr<MPSDevice> mps_device;
|
|
||||||
static c10::once_flag mpsdev_init;
|
|
||||||
|
|
||||||
static inline MTLLanguageVersion getMetalLanguageVersion(const id<MTLDevice>& device) {
|
static inline MTLLanguageVersion getMetalLanguageVersion(const id<MTLDevice>& device) {
|
||||||
// MPS Advanced Indexing needs at least Metal 2.0 (support for Argument Buffers and function constants)
|
// MPS Advanced Indexing needs at least Metal 2.0 (support for Argument Buffers and function constants)
|
||||||
// host_name attribute needs at least Metal 2.2 and ulong needs Metal 2.3 (supported on MacOS 11+
|
// host_name attribute needs at least Metal 2.2 and ulong needs Metal 2.3 (supported on MacOS 11+
|
||||||
@ -21,8 +16,8 @@ static inline MTLLanguageVersion getMetalLanguageVersion(const id<MTLDevice>& de
|
|||||||
}
|
}
|
||||||
|
|
||||||
MPSDevice* MPSDevice::getInstance() {
|
MPSDevice* MPSDevice::getInstance() {
|
||||||
c10::call_once(mpsdev_init, [] { mps_device = std::unique_ptr<MPSDevice>(new MPSDevice()); });
|
static MPSDevice mps_device;
|
||||||
return mps_device.get();
|
return &mps_device;
|
||||||
}
|
}
|
||||||
|
|
||||||
MPSDevice::~MPSDevice() {
|
MPSDevice::~MPSDevice() {
|
||||||
|
|||||||
@ -198,9 +198,9 @@ void avg_pool3d_out_frame(
|
|||||||
int64_t hend = std::min(hstart + kH, iheight + padH);
|
int64_t hend = std::min(hstart + kH, iheight + padH);
|
||||||
int64_t wend = std::min(wstart + kW, iwidth + padW);
|
int64_t wend = std::min(wstart + kW, iwidth + padW);
|
||||||
int64_t pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart);
|
int64_t pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart);
|
||||||
tstart = std::max(tstart, (int64_t) 0);
|
tstart = std::max(tstart, static_cast<int64_t>(0));
|
||||||
hstart = std::max(hstart, (int64_t) 0);
|
hstart = std::max(hstart, static_cast<int64_t>(0));
|
||||||
wstart = std::max(wstart, (int64_t) 0);
|
wstart = std::max(wstart, static_cast<int64_t>(0));
|
||||||
tend = std::min(tend, itime);
|
tend = std::min(tend, itime);
|
||||||
hend = std::min(hend, iheight);
|
hend = std::min(hend, iheight);
|
||||||
wend = std::min(wend, iwidth);
|
wend = std::min(wend, iwidth);
|
||||||
@ -377,9 +377,9 @@ void avg_pool3d_backward_out_frame(
|
|||||||
int64_t hend = std::min(hstart + kH, iheight + padH);
|
int64_t hend = std::min(hstart + kH, iheight + padH);
|
||||||
int64_t wend = std::min(wstart + kW, iwidth + padW);
|
int64_t wend = std::min(wstart + kW, iwidth + padW);
|
||||||
int64_t pool_size = (tend -tstart) * (hend - hstart) * (wend - wstart);
|
int64_t pool_size = (tend -tstart) * (hend - hstart) * (wend - wstart);
|
||||||
tstart = std::max(tstart, (int64_t) 0);
|
tstart = std::max(tstart, static_cast<int64_t>(0));
|
||||||
hstart = std::max(hstart, (int64_t) 0);
|
hstart = std::max(hstart, static_cast<int64_t>(0));
|
||||||
wstart = std::max(wstart, (int64_t) 0);
|
wstart = std::max(wstart, static_cast<int64_t>(0));
|
||||||
tend = std::min(tend, itime);
|
tend = std::min(tend, itime);
|
||||||
hend = std::min(hend, iheight);
|
hend = std::min(hend, iheight);
|
||||||
wend = std::min(wend, iwidth);
|
wend = std::min(wend, iwidth);
|
||||||
|
|||||||
@ -2917,9 +2917,7 @@ static Tensor& linalg_eig_make_complex_eigenvectors(Tensor& complex_vectors, con
|
|||||||
DEFINE_DISPATCH(linalg_eig_stub);
|
DEFINE_DISPATCH(linalg_eig_stub);
|
||||||
|
|
||||||
static std::tuple<Tensor&, Tensor&> linalg_eig_out_info(const Tensor& input, Tensor& values, Tensor& vectors, Tensor& infos, bool compute_eigenvectors) {
|
static std::tuple<Tensor&, Tensor&> linalg_eig_out_info(const Tensor& input, Tensor& values, Tensor& vectors, Tensor& infos, bool compute_eigenvectors) {
|
||||||
// MAGMA doesn't have GPU interface for GEEV routine, it requires inputs to be on CPU
|
auto options = input.options();
|
||||||
// therefore we create all intermediate tensors on CPU
|
|
||||||
auto options = input.options().device(at::kCPU);
|
|
||||||
|
|
||||||
// These internal asserts make explicit the assumptions in the implementation
|
// These internal asserts make explicit the assumptions in the implementation
|
||||||
// Error check with the actual error messages are done on the higher level of the hierarchy of calls
|
// Error check with the actual error messages are done on the higher level of the hierarchy of calls
|
||||||
@ -2928,16 +2926,13 @@ static std::tuple<Tensor&, Tensor&> linalg_eig_out_info(const Tensor& input, Ten
|
|||||||
|
|
||||||
// for real-valued 'input', eigenvalues can be real-valued or complex-valued
|
// for real-valued 'input', eigenvalues can be real-valued or complex-valued
|
||||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY((toComplexType(input.scalar_type()) == values.scalar_type()) || (input.scalar_type() == values.scalar_type()));
|
TORCH_INTERNAL_ASSERT_DEBUG_ONLY((toComplexType(input.scalar_type()) == values.scalar_type()) || (input.scalar_type() == values.scalar_type()));
|
||||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(values.device() == at::kCPU);
|
|
||||||
|
|
||||||
// for real-valued 'input', eigenvectors can be real-valued or complex-valued
|
// for real-valued 'input', eigenvectors can be real-valued or complex-valued
|
||||||
if (compute_eigenvectors) {
|
if (compute_eigenvectors) {
|
||||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY((toComplexType(input.scalar_type()) == vectors.scalar_type()) || (input.scalar_type() == vectors.scalar_type()));
|
TORCH_INTERNAL_ASSERT_DEBUG_ONLY((toComplexType(input.scalar_type()) == vectors.scalar_type()) || (input.scalar_type() == vectors.scalar_type()));
|
||||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(vectors.device() == at::kCPU);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.scalar_type() == at::kInt);
|
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.scalar_type() == at::kInt);
|
||||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.device() == at::kCPU);
|
|
||||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.numel() == std::max<int64_t>(1, batchCount(input)));
|
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.numel() == std::max<int64_t>(1, batchCount(input)));
|
||||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.is_contiguous());
|
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.is_contiguous());
|
||||||
|
|
||||||
@ -2986,15 +2981,7 @@ static std::tuple<Tensor&, Tensor&> linalg_eig_out_info(const Tensor& input, Ten
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// MAGMA uses a hybrid CPU-GPU algorithm that performs well only for large matrices
|
linalg_eig_stub(input.device().type(), real_imag_values, maybe_complex_vectors, infos, input, compute_eigenvectors);
|
||||||
// See: https://github.com/pytorch/pytorch/pull/52491#issuecomment-795685687
|
|
||||||
// Here we call CPU path for matrices smaller than 2048x2048
|
|
||||||
// that should be in general significantly faster than calling MAGMA
|
|
||||||
if (input.size(-1) <= 2048) {
|
|
||||||
linalg_eig_stub(at::kCPU, real_imag_values, maybe_complex_vectors, infos, input.to(kCPU), compute_eigenvectors);
|
|
||||||
} else {
|
|
||||||
linalg_eig_stub(input.device().type(), real_imag_values, maybe_complex_vectors, infos, input, compute_eigenvectors);
|
|
||||||
}
|
|
||||||
|
|
||||||
// if input is not complex we need to do some post-processing
|
// if input is not complex we need to do some post-processing
|
||||||
if (!input.is_complex()) {
|
if (!input.is_complex()) {
|
||||||
@ -3019,7 +3006,14 @@ static std::tuple<Tensor&, Tensor&> linalg_eig_out_info(const Tensor& input, Ten
|
|||||||
}
|
}
|
||||||
if (compute_eigenvectors) {
|
if (compute_eigenvectors) {
|
||||||
if (vectors.is_complex()) {
|
if (vectors.is_complex()) {
|
||||||
vectors = linalg_eig_make_complex_eigenvectors(vectors, values, maybe_complex_vectors);
|
// We move to the CPU because linalg_eig_make_complex_eigenvectors requires it.
|
||||||
|
// Performance note: this function could be implemented via a TensorIterator,
|
||||||
|
// which would avoid an explicit host-device synchronization.
|
||||||
|
auto vectors_cpu = vectors.cpu();
|
||||||
|
auto values_cpu = values.cpu();
|
||||||
|
auto maybe_complex_vectors_cpu = maybe_complex_vectors.cpu();
|
||||||
|
vectors_cpu = linalg_eig_make_complex_eigenvectors(vectors_cpu, values_cpu, maybe_complex_vectors_cpu);
|
||||||
|
vectors.copy_(vectors_cpu);
|
||||||
} else {
|
} else {
|
||||||
TORCH_CHECK(false, "torch.linalg.eig: imaginary part of eigenvectors is non-zero, can't safely cast eigenvectors to non-complex dtype.")
|
TORCH_CHECK(false, "torch.linalg.eig: imaginary part of eigenvectors is non-zero, can't safely cast eigenvectors to non-complex dtype.")
|
||||||
}
|
}
|
||||||
@ -3039,8 +3033,7 @@ std::tuple<Tensor&, Tensor&> linalg_eig_out(const Tensor& input, Tensor& values,
|
|||||||
checkSameDevice("torch.linalg.eig", values, input, "eigenvalues");
|
checkSameDevice("torch.linalg.eig", values, input, "eigenvalues");
|
||||||
checkSameDevice("torch.linalg.eig", vectors, input, "eigenvectors");
|
checkSameDevice("torch.linalg.eig", vectors, input, "eigenvectors");
|
||||||
|
|
||||||
// MAGMA doesn't have GPU interface for GEEV routine, it requires inputs to be on CPU
|
auto options = input.options();
|
||||||
auto options = input.options().device(at::kCPU);
|
|
||||||
auto infos = at::zeros({std::max<int64_t>(1, batchCount(input))}, options.dtype(kInt));
|
auto infos = at::zeros({std::max<int64_t>(1, batchCount(input))}, options.dtype(kInt));
|
||||||
|
|
||||||
// if result is not empty and not in batched column major format we have to allocate a temporary tensor
|
// if result is not empty and not in batched column major format we have to allocate a temporary tensor
|
||||||
@ -3129,8 +3122,7 @@ Tensor& linalg_eigvals_out(const Tensor& input, Tensor& values) {
|
|||||||
checkLinalgCompatibleDtype("torch.linalg.eigvals", values.scalar_type(), toComplexType(input.scalar_type()), "eigenvalues");
|
checkLinalgCompatibleDtype("torch.linalg.eigvals", values.scalar_type(), toComplexType(input.scalar_type()), "eigenvalues");
|
||||||
checkSameDevice("torch.linalg.eigvals", values, input, "eigenvalues");
|
checkSameDevice("torch.linalg.eigvals", values, input, "eigenvalues");
|
||||||
|
|
||||||
// MAGMA doesn't have GPU interface for GEEV routine, it requires inputs to be on CPU
|
auto options = input.options();
|
||||||
auto options = input.options().device(at::kCPU);
|
|
||||||
auto infos = at::zeros({std::max<int64_t>(1, batchCount(input))}, options.dtype(kInt));
|
auto infos = at::zeros({std::max<int64_t>(1, batchCount(input))}, options.dtype(kInt));
|
||||||
|
|
||||||
bool values_expected_type = (values.scalar_type() == toComplexType(input.scalar_type()));
|
bool values_expected_type = (values.scalar_type() == toComplexType(input.scalar_type()));
|
||||||
@ -3159,6 +3151,7 @@ Tensor& linalg_eigvals_out(const Tensor& input, Tensor& values) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Tensor vectors;
|
Tensor vectors;
|
||||||
|
vectors = at::empty({0}, input.options());
|
||||||
if (values_tmp_needed) {
|
if (values_tmp_needed) {
|
||||||
Tensor values_tmp = at::empty({0}, options.dtype(values_type));
|
Tensor values_tmp = at::empty({0}, options.dtype(values_type));
|
||||||
std::tie(values_tmp, std::ignore) = linalg_eig_out_info(input, values_tmp, vectors, infos, /*compute_eigenvectors=*/false);
|
std::tie(values_tmp, std::ignore) = linalg_eig_out_info(input, values_tmp, vectors, infos, /*compute_eigenvectors=*/false);
|
||||||
|
|||||||
@ -946,10 +946,10 @@ void apply_lu_factor(const Tensor& input, const Tensor& pivots, const Tensor& in
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
// avoid overflow
|
// avoid overflow
|
||||||
float matrix_rank = float(std::min(m, n));
|
auto matrix_rank = std::min(m, n);
|
||||||
// A heuristic tested on a 32 core/socket ICX system
|
// A heuristic tested on a 32 core/socket ICX system
|
||||||
// https://github.com/pytorch/pytorch/pull/93037#discussion_r1090112948
|
// https://github.com/pytorch/pytorch/pull/93037#discussion_r1090112948
|
||||||
int64_t chunk_size_per_thread = int64_t(
|
int64_t chunk_size_per_thread = static_cast<int64_t>(
|
||||||
std::min(1.0, 3200.0 / (matrix_rank * matrix_rank * matrix_rank)));
|
std::min(1.0, 3200.0 / (matrix_rank * matrix_rank * matrix_rank)));
|
||||||
int64_t grain_size = chunk_size_per_thread * at::get_num_threads();
|
int64_t grain_size = chunk_size_per_thread * at::get_num_threads();
|
||||||
at::parallel_for(0, batch_size, grain_size, loop);
|
at::parallel_for(0, batch_size, grain_size, loop);
|
||||||
|
|||||||
@ -267,7 +267,7 @@ _scaled_mm_out_cpu_emulated(const Tensor& mat1, const Tensor& mat2,
|
|||||||
|
|
||||||
float input_scale = scale_a.item<float>();
|
float input_scale = scale_a.item<float>();
|
||||||
float weight_scale = scale_b.item<float>();
|
float weight_scale = scale_b.item<float>();
|
||||||
float output_scale = float(1.0);
|
float output_scale = 1.0f;
|
||||||
if (scale_result.has_value() &&
|
if (scale_result.has_value() &&
|
||||||
(*out_dtype == ScalarType::Float8_e4m3fn ||
|
(*out_dtype == ScalarType::Float8_e4m3fn ||
|
||||||
*out_dtype == ScalarType::Float8_e5m2)) {
|
*out_dtype == ScalarType::Float8_e5m2)) {
|
||||||
|
|||||||
@ -331,7 +331,7 @@ bool gemv_use_fast_path<double>(
|
|||||||
[[maybe_unused]] double beta,
|
[[maybe_unused]] double beta,
|
||||||
int64_t incy) {
|
int64_t incy) {
|
||||||
return gemv_use_fast_path<float>(
|
return gemv_use_fast_path<float>(
|
||||||
trans, m, n, (float)alpha, lda, incx, (float)beta, incy);
|
trans, m, n, static_cast<float>(alpha), lda, incx, static_cast<float>(beta), incy);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <>
|
template <>
|
||||||
@ -523,8 +523,8 @@ static inline void scal(int64_t n, scalar_t a, scalar_t *x, int64_t incx)
|
|||||||
if (n == 1) incx = 1;
|
if (n == 1) incx = 1;
|
||||||
#if AT_BUILD_WITH_BLAS()
|
#if AT_BUILD_WITH_BLAS()
|
||||||
if (blas_impl::scal_use_fast_path<scalar_t>(n, incx)) {
|
if (blas_impl::scal_use_fast_path<scalar_t>(n, incx)) {
|
||||||
int i_n = (int)n;
|
int i_n = static_cast<int>(n);
|
||||||
int i_incx = (int)incx;
|
int i_incx = static_cast<int>(incx);
|
||||||
blas_impl::scal_fast_path<scalar_t>(&i_n, &a, x, &i_incx);
|
blas_impl::scal_fast_path<scalar_t>(&i_n, &a, x, &i_incx);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -545,11 +545,11 @@ void gemv(char trans, int64_t m, int64_t n, scalar_t alpha, const scalar_t *a, i
|
|||||||
#if AT_BUILD_WITH_BLAS()
|
#if AT_BUILD_WITH_BLAS()
|
||||||
if (blas_impl::gemv_use_fast_path<scalar_t>(trans, m, n, alpha, lda, incx, beta, incy)) {
|
if (blas_impl::gemv_use_fast_path<scalar_t>(trans, m, n, alpha, lda, incx, beta, incy)) {
|
||||||
TORCH_CHECK(lda >= std::max<int64_t>(1L, m), "lda should be at least max(1,", m, "), but have ", lda);
|
TORCH_CHECK(lda >= std::max<int64_t>(1L, m), "lda should be at least max(1,", m, "), but have ", lda);
|
||||||
int i_m = (int)m;
|
int i_m = static_cast<int>(m);
|
||||||
int i_n = (int)n;
|
int i_n = static_cast<int>(n);
|
||||||
int i_lda = (int)lda;
|
int i_lda = static_cast<int>(lda);
|
||||||
int i_incx = (int)incx;
|
int i_incx = static_cast<int>(incx);
|
||||||
int i_incy = (int)incy;
|
int i_incy = static_cast<int>(incy);
|
||||||
blas_impl::gemv_fast_path<scalar_t>(&trans, &i_m, &i_n, &alpha, a, &i_lda, x, &i_incx, &beta, y, &i_incy);
|
blas_impl::gemv_fast_path<scalar_t>(&trans, &i_m, &i_n, &alpha, a, &i_lda, x, &i_incx, &beta, y, &i_incy);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -680,9 +680,9 @@ void axpy(int64_t n, double a, const double *x, int64_t incx, double *y, int64_t
|
|||||||
#if AT_BUILD_WITH_BLAS()
|
#if AT_BUILD_WITH_BLAS()
|
||||||
if( (n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
|
if( (n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
|
||||||
{
|
{
|
||||||
int i_n = (int)n;
|
int i_n = static_cast<int>(n);
|
||||||
int i_incx = (int)incx;
|
int i_incx = static_cast<int>(incx);
|
||||||
int i_incy = (int)incy;
|
int i_incy = static_cast<int>(incy);
|
||||||
#if C10_IOS
|
#if C10_IOS
|
||||||
cblas_daxpy(i_n, a, x, i_incx, y, i_incy);
|
cblas_daxpy(i_n, a, x, i_incx, y, i_incy);
|
||||||
#else
|
#else
|
||||||
@ -705,9 +705,9 @@ void axpy(int64_t n, float a, const float *x, int64_t incx, float *y, int64_t in
|
|||||||
#if AT_BUILD_WITH_BLAS()
|
#if AT_BUILD_WITH_BLAS()
|
||||||
if( (n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
|
if( (n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
|
||||||
{
|
{
|
||||||
int i_n = (int)n;
|
int i_n = static_cast<int>(n);
|
||||||
int i_incx = (int)incx;
|
int i_incx = static_cast<int>(incx);
|
||||||
int i_incy = (int)incy;
|
int i_incy = static_cast<int>(incy);
|
||||||
#if C10_IOS
|
#if C10_IOS
|
||||||
cblas_saxpy(i_n, a, x, i_incx, y, i_incy);
|
cblas_saxpy(i_n, a, x, i_incx, y, i_incy);
|
||||||
#else
|
#else
|
||||||
@ -730,9 +730,9 @@ void axpy(int64_t n, c10::complex<double> a, const c10::complex<double> *x, int6
|
|||||||
#if AT_BUILD_WITH_BLAS()
|
#if AT_BUILD_WITH_BLAS()
|
||||||
if( (n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
|
if( (n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
|
||||||
{
|
{
|
||||||
int i_n = (int)n;
|
int i_n = static_cast<int>(n);
|
||||||
int i_incx = (int)incx;
|
int i_incx = static_cast<int>(incx);
|
||||||
int i_incy = (int)incy;
|
int i_incy = static_cast<int>(incy);
|
||||||
#if C10_IOS
|
#if C10_IOS
|
||||||
cblas_zaxpy(i_n, &a, x, i_incx, y, i_incy);
|
cblas_zaxpy(i_n, &a, x, i_incx, y, i_incy);
|
||||||
#else
|
#else
|
||||||
@ -755,9 +755,9 @@ void axpy(int64_t n, c10::complex<float> a, const c10::complex<float> *x, int64_
|
|||||||
#if AT_BUILD_WITH_BLAS()
|
#if AT_BUILD_WITH_BLAS()
|
||||||
if( (n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
|
if( (n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
|
||||||
{
|
{
|
||||||
int i_n = (int)n;
|
int i_n = static_cast<int>(n);
|
||||||
int i_incx = (int)incx;
|
int i_incx = static_cast<int>(incx);
|
||||||
int i_incy = (int)incy;
|
int i_incy = static_cast<int>(incy);
|
||||||
#if C10_IOS
|
#if C10_IOS
|
||||||
cblas_caxpy(i_n, &a, x, i_incx, y, i_incy);
|
cblas_caxpy(i_n, &a, x, i_incx, y, i_incy);
|
||||||
#else
|
#else
|
||||||
@ -781,9 +781,9 @@ void copy(int64_t n, const double *x, int64_t incx, double *y, int64_t incy) {
|
|||||||
}
|
}
|
||||||
#if AT_BUILD_WITH_BLAS()
|
#if AT_BUILD_WITH_BLAS()
|
||||||
if( (n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) ) {
|
if( (n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) ) {
|
||||||
int i_n = (int)n;
|
int i_n = static_cast<int>(n);
|
||||||
int i_incx = (int)incx;
|
int i_incx = static_cast<int>(incx);
|
||||||
int i_incy = (int)incy;
|
int i_incy = static_cast<int>(incy);
|
||||||
#if C10_IOS
|
#if C10_IOS
|
||||||
cblas_dcopy(i_n, x, i_incx, y, i_incy);
|
cblas_dcopy(i_n, x, i_incx, y, i_incy);
|
||||||
#else
|
#else
|
||||||
@ -805,9 +805,9 @@ void copy(int64_t n, const float *x, int64_t incx, float *y, int64_t incy) {
|
|||||||
}
|
}
|
||||||
#if AT_BUILD_WITH_BLAS()
|
#if AT_BUILD_WITH_BLAS()
|
||||||
if( (n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) ) {
|
if( (n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) ) {
|
||||||
int i_n = (int)n;
|
int i_n = static_cast<int>(n);
|
||||||
int i_incx = (int)incx;
|
int i_incx = static_cast<int>(incx);
|
||||||
int i_incy = (int)incy;
|
int i_incy = static_cast<int>(incy);
|
||||||
#if C10_IOS
|
#if C10_IOS
|
||||||
cblas_scopy(i_n, x, i_incx, y, i_incy);
|
cblas_scopy(i_n, x, i_incx, y, i_incy);
|
||||||
#else
|
#else
|
||||||
@ -829,9 +829,9 @@ void copy(int64_t n, const c10::complex<double> *x, int64_t incx, c10::complex<d
|
|||||||
}
|
}
|
||||||
#if AT_BUILD_WITH_BLAS()
|
#if AT_BUILD_WITH_BLAS()
|
||||||
if( (n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) ) {
|
if( (n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) ) {
|
||||||
int i_n = (int)n;
|
int i_n = static_cast<int>(n);
|
||||||
int i_incx = (int)incx;
|
int i_incx = static_cast<int>(incx);
|
||||||
int i_incy = (int)incy;
|
int i_incy = static_cast<int>(incy);
|
||||||
#if C10_IOS
|
#if C10_IOS
|
||||||
cblas_zcopy(i_n, x, i_incx, y, i_incy);
|
cblas_zcopy(i_n, x, i_incx, y, i_incy);
|
||||||
#else
|
#else
|
||||||
@ -853,9 +853,9 @@ void copy(int64_t n, const c10::complex<float> *x, int64_t incx, c10::complex<fl
|
|||||||
}
|
}
|
||||||
#if AT_BUILD_WITH_BLAS()
|
#if AT_BUILD_WITH_BLAS()
|
||||||
if( (n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) ) {
|
if( (n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) ) {
|
||||||
int i_n = (int)n;
|
int i_n = static_cast<int>(n);
|
||||||
int i_incx = (int)incx;
|
int i_incx = static_cast<int>(incx);
|
||||||
int i_incy = (int)incy;
|
int i_incy = static_cast<int>(incy);
|
||||||
#if C10_IOS
|
#if C10_IOS
|
||||||
cblas_ccopy(i_n, &x, i_incx, y, i_incy);
|
cblas_ccopy(i_n, &x, i_incx, y, i_incy);
|
||||||
#else
|
#else
|
||||||
@ -1082,7 +1082,7 @@ struct Brgemm : public KernelCache <BrgemmKey, GemmHelper> {
|
|||||||
M,
|
M,
|
||||||
N,
|
N,
|
||||||
K,
|
K,
|
||||||
int64_t(1),
|
1,
|
||||||
ld_a,
|
ld_a,
|
||||||
ld_b,
|
ld_b,
|
||||||
ld_c,
|
ld_c,
|
||||||
@ -1096,7 +1096,7 @@ struct Brgemm : public KernelCache <BrgemmKey, GemmHelper> {
|
|||||||
M,
|
M,
|
||||||
N,
|
N,
|
||||||
K,
|
K,
|
||||||
int64_t(1),
|
1,
|
||||||
ld_a,
|
ld_a,
|
||||||
ld_b,
|
ld_b,
|
||||||
ld_c,
|
ld_c,
|
||||||
|
|||||||
@ -487,17 +487,17 @@ static Tensor _grid_sampler_2d_cpu_quantized(
|
|||||||
int64_t out_sC = output.stride(1);
|
int64_t out_sC = output.stride(1);
|
||||||
int64_t out_sH = output.stride(2);
|
int64_t out_sH = output.stride(2);
|
||||||
int64_t out_sW = output.stride(3);
|
int64_t out_sW = output.stride(3);
|
||||||
uint8_t* inp_ptr = (uint8_t*)input.data_ptr<quint8>();
|
const uint8_t* inp_ptr = input.const_data_ptr<uint8_t>();
|
||||||
uint8_t* out_ptr = (uint8_t*)output.data_ptr<quint8>();
|
uint8_t* out_ptr = output.data_ptr<uint8_t>();
|
||||||
float* grid_ptr = grid.data_ptr<float>();
|
const float* grid_ptr = grid.const_data_ptr<float>();
|
||||||
at::parallel_for(0, N, 0, [&](int64_t start, int64_t end) {
|
at::parallel_for(0, N, 0, [&](int64_t start, int64_t end) {
|
||||||
for (const auto n : c10::irange(start, end)) {
|
for (const auto n : c10::irange(start, end)) {
|
||||||
float* grid_ptr_N = grid_ptr + n * grid_sN;
|
const float* grid_ptr_N = grid_ptr + n * grid_sN;
|
||||||
uint8_t* inp_ptr_N = inp_ptr + n * inp_sN;
|
const uint8_t* inp_ptr_N = inp_ptr + n * inp_sN;
|
||||||
for (const auto h : c10::irange(out_H)) {
|
for (const auto h : c10::irange(out_H)) {
|
||||||
for (const auto w : c10::irange(out_W)) {
|
for (const auto w : c10::irange(out_W)) {
|
||||||
// get the corresponding input x, y, z coordinates from grid
|
// get the corresponding input x, y, z coordinates from grid
|
||||||
float* grid_ptr_NHW = grid_ptr_N + h * grid_sH + w * grid_sW;
|
const float* grid_ptr_NHW = grid_ptr_N + h * grid_sH + w * grid_sW;
|
||||||
float x = *grid_ptr_NHW;
|
float x = *grid_ptr_NHW;
|
||||||
float y = grid_ptr_NHW[grid_sCoor];
|
float y = grid_ptr_NHW[grid_sCoor];
|
||||||
|
|
||||||
@ -527,7 +527,7 @@ static Tensor _grid_sampler_2d_cpu_quantized(
|
|||||||
float se = (ix - ix_nw) * (iy - iy_nw);
|
float se = (ix - ix_nw) * (iy - iy_nw);
|
||||||
|
|
||||||
// calculate bilinear weighted pixel value and set output pixel
|
// calculate bilinear weighted pixel value and set output pixel
|
||||||
uint8_t* inp_ptr_NC = inp_ptr_N;
|
const uint8_t* inp_ptr_NC = inp_ptr_N;
|
||||||
uint8_t* out_ptr_NCHW =
|
uint8_t* out_ptr_NCHW =
|
||||||
out_ptr + n * out_sN + h * out_sH + w * out_sW;
|
out_ptr + n * out_sN + h * out_sH + w * out_sW;
|
||||||
for (int64_t c = 0; c < C;
|
for (int64_t c = 0; c < C;
|
||||||
|
|||||||
@ -318,7 +318,7 @@ static std::vector<Tensor>& histogramdd_bin_edges_out(const Tensor& self, IntArr
|
|||||||
|
|
||||||
const int64_t N = self.size(-1);
|
const int64_t N = self.size(-1);
|
||||||
const int64_t M = std::accumulate(self.sizes().begin(), self.sizes().end() - 1,
|
const int64_t M = std::accumulate(self.sizes().begin(), self.sizes().end() - 1,
|
||||||
(int64_t)1, std::multiplies<int64_t>());
|
static_cast<int64_t>(1), std::multiplies<int64_t>());
|
||||||
Tensor reshaped_self = self.reshape({ M, N });
|
Tensor reshaped_self = self.reshape({ M, N });
|
||||||
|
|
||||||
auto outer_bin_edges = select_outer_bin_edges(reshaped_self, range);
|
auto outer_bin_edges = select_outer_bin_edges(reshaped_self, range);
|
||||||
|
|||||||
@ -40,7 +40,7 @@ Tensor do_trapezoid(const Tensor& y, const Tensor& dx, int64_t dim) {
|
|||||||
// When dx is constant, the above formula simplifies
|
// When dx is constant, the above formula simplifies
|
||||||
// to dx * [(\sum_{i=1}^n y_i) - (y_1 + y_n)/2]
|
// to dx * [(\sum_{i=1}^n y_i) - (y_1 + y_n)/2]
|
||||||
Tensor do_trapezoid(const Tensor& y, double dx, int64_t dim) {
|
Tensor do_trapezoid(const Tensor& y, double dx, int64_t dim) {
|
||||||
return (y.sum(dim) - (y.select(dim, 0) + y.select(dim, -1)) * (0.5)) * dx;
|
return (y.sum(dim) - (y.select(dim, 0) + y.select(dim, -1)) * 0.5) * dx;
|
||||||
}
|
}
|
||||||
|
|
||||||
Tensor zeros_like_except(const Tensor& y, int64_t dim) {
|
Tensor zeros_like_except(const Tensor& y, int64_t dim) {
|
||||||
|
|||||||
@ -201,7 +201,7 @@ static Tensor sumproduct_pair(const Tensor& left_, const Tensor& right_, IntArra
|
|||||||
out_size.reserve(out_num_dim);
|
out_size.reserve(out_num_dim);
|
||||||
for (auto& d : lro) out_size.push_back(left.sym_size(d));
|
for (auto& d : lro) out_size.push_back(left.sym_size(d));
|
||||||
for (auto& d : lo) out_size.push_back(left.sym_size(d));
|
for (auto& d : lo) out_size.push_back(left.sym_size(d));
|
||||||
for (auto& d : sum_dims_) { out_size.emplace_back(1); (void)(d); }; // avoid warning about not using d
|
for (auto& d : sum_dims_) { out_size.emplace_back(1); (void)d; }; // avoid warning about not using d
|
||||||
for (auto& d : ro) out_size.push_back(right.sym_size(d));
|
for (auto& d : ro) out_size.push_back(right.sym_size(d));
|
||||||
|
|
||||||
std::vector<int64_t> lpermutation(lro);
|
std::vector<int64_t> lpermutation(lro);
|
||||||
@ -640,7 +640,7 @@ Tensor einsum(std::string_view equation, TensorList operands, at::OptionalIntArr
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return ops[0];
|
return std::move(ops[0]);
|
||||||
}
|
}
|
||||||
|
|
||||||
// _trilinear computes a trilinear einstein sum with an unrolled dimension
|
// _trilinear computes a trilinear einstein sum with an unrolled dimension
|
||||||
@ -805,7 +805,7 @@ Tensor tensordot(const Tensor& input1, const Tensor& input2, IntArrayRef dims1,
|
|||||||
std::vector<SymInt> rsizes; // rsizes: sizes of the result
|
std::vector<SymInt> rsizes; // rsizes: sizes of the result
|
||||||
p1.reserve(input1.dim());
|
p1.reserve(input1.dim());
|
||||||
p2.reserve(input2.dim());
|
p2.reserve(input2.dim());
|
||||||
rsizes.reserve(input1.dim() + input2.dim() - (int64_t) dims1.size());
|
rsizes.reserve(input1.dim() + input2.dim() - static_cast<int64_t>(dims1.size()));
|
||||||
SymInt size1 = 1; // number of non-contracted elements in input1
|
SymInt size1 = 1; // number of non-contracted elements in input1
|
||||||
SymInt size2 = 1; // number of non-contracted elements in input2
|
SymInt size2 = 1; // number of non-contracted elements in input2
|
||||||
|
|
||||||
|
|||||||
@ -1655,7 +1655,7 @@ static inline void baddbmm_cpu_kernel(const Tensor& result, const Tensor& self,
|
|||||||
auto s0 = self.accessor<const scalar_t, 3>();
|
auto s0 = self.accessor<const scalar_t, 3>();
|
||||||
auto m0 = mat2.accessor<const scalar_t, 3>();
|
auto m0 = mat2.accessor<const scalar_t, 3>();
|
||||||
|
|
||||||
int64_t grain_size = std::max(internal::GRAIN_SIZE / (is * js * ks), (int64_t)1);
|
int64_t grain_size = std::max(internal::GRAIN_SIZE / (is * js * ks), static_cast<int64_t>(1));
|
||||||
using opmath_t = at::opmath_type<scalar_t>;
|
using opmath_t = at::opmath_type<scalar_t>;
|
||||||
parallel_for(0, bs, grain_size, [&](int64_t b_begin, int64_t b_end) {
|
parallel_for(0, bs, grain_size, [&](int64_t b_begin, int64_t b_end) {
|
||||||
for (const auto b : c10::irange(b_begin, b_end)) {
|
for (const auto b : c10::irange(b_begin, b_end)) {
|
||||||
|
|||||||
@ -235,7 +235,7 @@ void nll_loss_out_frame(
|
|||||||
|
|
||||||
constexpr int64_t cascade_sum_num_levels = 8;
|
constexpr int64_t cascade_sum_num_levels = 8;
|
||||||
const int64_t level_power =
|
const int64_t level_power =
|
||||||
std::max(int64_t(4), utils::CeilLog2(batch_size) / cascade_sum_num_levels);
|
std::max(static_cast<int64_t>(4), utils::CeilLog2(batch_size) / cascade_sum_num_levels);
|
||||||
const int64_t level_step = (1 << level_power);
|
const int64_t level_step = (1 << level_power);
|
||||||
const int64_t level_mask = level_step - 1;
|
const int64_t level_mask = level_step - 1;
|
||||||
|
|
||||||
|
|||||||
@ -129,7 +129,7 @@ void nll_loss2d_forward_out_frame(
|
|||||||
for (const auto b : c10::irange(start, end)) {
|
for (const auto b : c10::irange(start, end)) {
|
||||||
for (const auto h : c10::irange(H)) {
|
for (const auto h : c10::irange(H)) {
|
||||||
for (const auto w : c10::irange(W)) {
|
for (const auto w : c10::irange(W)) {
|
||||||
const int64_t cur_target = (int64_t)target_acc[b][h][w];
|
const int64_t cur_target = target_acc[b][h][w];
|
||||||
|
|
||||||
if (cur_target == ignore_index) {
|
if (cur_target == ignore_index) {
|
||||||
output_acc[b][h][w] = static_cast<scalar_t>(0);
|
output_acc[b][h][w] = static_cast<scalar_t>(0);
|
||||||
@ -188,7 +188,7 @@ void nll_loss2d_forward_out_frame(
|
|||||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
|
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
|
||||||
scalar_t loss_partial_sums[cascade_sum_num_levels] = {0};
|
scalar_t loss_partial_sums[cascade_sum_num_levels] = {0};
|
||||||
const int64_t level_power =
|
const int64_t level_power =
|
||||||
std::max(int64_t(4), utils::CeilLog2(numiter) / cascade_sum_num_levels);
|
std::max(static_cast<int64_t>(4), utils::CeilLog2(numiter) / cascade_sum_num_levels);
|
||||||
const int64_t level_step = (1 << level_power);
|
const int64_t level_step = (1 << level_power);
|
||||||
const int64_t level_mask = level_step - 1;
|
const int64_t level_mask = level_step - 1;
|
||||||
|
|
||||||
|
|||||||
@ -192,7 +192,7 @@ Date: February 1996
|
|||||||
x = x - (std::erf(x) - y) / ((static_cast<T>(2.0)/static_cast<T>(std::sqrt(c10::pi<double>)))*std::exp(-x*x));
|
x = x - (std::erf(x) - y) / ((static_cast<T>(2.0)/static_cast<T>(std::sqrt(c10::pi<double>)))*std::exp(-x*x));
|
||||||
x = x - (std::erf(x) - y) / ((static_cast<T>(2.0)/static_cast<T>(std::sqrt(c10::pi<double>)))*std::exp(-x*x));
|
x = x - (std::erf(x) - y) / ((static_cast<T>(2.0)/static_cast<T>(std::sqrt(c10::pi<double>)))*std::exp(-x*x));
|
||||||
|
|
||||||
return(x);
|
return x;
|
||||||
}
|
}
|
||||||
|
|
||||||
#undef CENTRAL_RANGE
|
#undef CENTRAL_RANGE
|
||||||
@ -3819,7 +3819,7 @@ inline C10_HOST_DEVICE T shifted_chebyshev_polynomial_v_forward(T x, int64_t n)
|
|||||||
|
|
||||||
if ((n > 6) && (std::abs(x + x - T(1.0)) < T(1.0))) {
|
if ((n > 6) && (std::abs(x + x - T(1.0)) < T(1.0))) {
|
||||||
if (std::sin(std::acos(x + x - T(1.0)) / T(2.0)) != T(1.0)) {
|
if (std::sin(std::acos(x + x - T(1.0)) / T(2.0)) != T(1.0)) {
|
||||||
return std::cos(((n) + T(0.5)) * std::acos(x + x - T(1.0))) / std::cos(std::acos(x + x - T(1.0)) / T(2.0));
|
return std::cos((n + T(0.5)) * std::acos(x + x - T(1.0))) / std::cos(std::acos(x + x - T(1.0)) / T(2.0));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (n % 2 == 0) {
|
if (n % 2 == 0) {
|
||||||
|
|||||||
@ -193,22 +193,22 @@ Tensor _nnpack_spatial_convolution(
|
|||||||
const size_t input_channels = input.size(1);
|
const size_t input_channels = input.size(1);
|
||||||
const size_t output_channels = weight.size(0);
|
const size_t output_channels = weight.size(0);
|
||||||
const struct nnp_size input_size = {
|
const struct nnp_size input_size = {
|
||||||
.width = (size_t)input.size(3),
|
.width = static_cast<size_t>(input.size(3)),
|
||||||
.height = (size_t)input.size(2),
|
.height = static_cast<size_t>(input.size(2)),
|
||||||
};
|
};
|
||||||
const struct nnp_padding input_padding = {
|
const struct nnp_padding input_padding = {
|
||||||
.top = (size_t)padding[0],
|
.top = static_cast<size_t>(padding[0]),
|
||||||
.right = (size_t)padding[1],
|
.right = static_cast<size_t>(padding[1]),
|
||||||
.bottom = (size_t)padding[0],
|
.bottom = static_cast<size_t>(padding[0]),
|
||||||
.left = (size_t)padding[1],
|
.left = static_cast<size_t>(padding[1]),
|
||||||
};
|
};
|
||||||
const struct nnp_size kernel_size = {
|
const struct nnp_size kernel_size = {
|
||||||
.width = (size_t)weight.size(3),
|
.width = static_cast<size_t>(weight.size(3)),
|
||||||
.height = (size_t)weight.size(2),
|
.height = static_cast<size_t>(weight.size(2)),
|
||||||
};
|
};
|
||||||
const struct nnp_size output_size = {
|
const struct nnp_size output_size = {
|
||||||
.width = (size_t)output.size(3),
|
.width = static_cast<size_t>(output.size(3)),
|
||||||
.height = (size_t)output.size(2),
|
.height = static_cast<size_t>(output.size(2)),
|
||||||
};
|
};
|
||||||
const nnp_size output_subsample = {
|
const nnp_size output_subsample = {
|
||||||
.width = static_cast<std::size_t>(stride[1]),
|
.width = static_cast<std::size_t>(stride[1]),
|
||||||
|
|||||||
@ -248,8 +248,8 @@ void slow_conv_transpose3d_out_cpu_template(
|
|||||||
Tensor weight = weight_.contiguous();
|
Tensor weight = weight_.contiguous();
|
||||||
Tensor bias = bias_.defined() ? bias_.contiguous() : bias_;
|
Tensor bias = bias_.defined() ? bias_.contiguous() : bias_;
|
||||||
|
|
||||||
const int n_input_plane = (int)weight.size(0);
|
const auto n_input_plane = weight.size(0);
|
||||||
const int n_output_plane = (int)weight.size(1);
|
const auto n_output_plane = weight.size(1);
|
||||||
|
|
||||||
bool is_batch = false;
|
bool is_batch = false;
|
||||||
if (input.dim() == 4) {
|
if (input.dim() == 4) {
|
||||||
|
|||||||
@ -84,8 +84,8 @@ static std::vector<int64_t> aligned_size(
|
|||||||
DimnameList aligned_names,
|
DimnameList aligned_names,
|
||||||
bool is_aligning_two_tensors) {
|
bool is_aligning_two_tensors) {
|
||||||
std::vector<int64_t> expanded_sizes(aligned_names.size(), 1);
|
std::vector<int64_t> expanded_sizes(aligned_names.size(), 1);
|
||||||
ptrdiff_t dim = (ptrdiff_t)tensor_sizes.size() - 1;
|
ptrdiff_t dim = static_cast<ptrdiff_t>(tensor_sizes.size()) - 1;
|
||||||
ptrdiff_t idx = (ptrdiff_t)aligned_names.size() - 1;
|
ptrdiff_t idx = static_cast<ptrdiff_t>(aligned_names.size()) - 1;
|
||||||
for (; idx >= 0 && dim >= 0; --idx) {
|
for (; idx >= 0 && dim >= 0; --idx) {
|
||||||
if (tensor_names[dim] != aligned_names[idx]) {
|
if (tensor_names[dim] != aligned_names[idx]) {
|
||||||
continue;
|
continue;
|
||||||
|
|||||||
@ -25,7 +25,7 @@ std::tuple<Tensor, Tensor> _rowwise_prune_helper(
|
|||||||
auto mask_contig = mask.contiguous();
|
auto mask_contig = mask.contiguous();
|
||||||
auto mask_data = mask_contig.data_ptr<bool>();
|
auto mask_data = mask_contig.data_ptr<bool>();
|
||||||
for (const auto i : c10::irange(mask.numel())) {
|
for (const auto i : c10::irange(mask.numel())) {
|
||||||
num_non_masked_rows += (((mask_data[i] == true)) ? 1 : 0);
|
num_non_masked_rows += ((mask_data[i] == true) ? 1 : 0);
|
||||||
}
|
}
|
||||||
int num_cols = weights.size(1);
|
int num_cols = weights.size(1);
|
||||||
auto pruned_2d_tensor = at::empty({num_non_masked_rows, num_cols},
|
auto pruned_2d_tensor = at::empty({num_non_masked_rows, num_cols},
|
||||||
|
|||||||
@ -176,7 +176,7 @@ void host_softmax(
|
|||||||
scalar_t* input_data_base = input.data_ptr<scalar_t>();
|
scalar_t* input_data_base = input.data_ptr<scalar_t>();
|
||||||
scalar_t* output_data_base = output.data_ptr<scalar_t>();
|
scalar_t* output_data_base = output.data_ptr<scalar_t>();
|
||||||
bool* mask_data_base = mask;
|
bool* mask_data_base = mask;
|
||||||
int64_t grain_size = std::min(internal::GRAIN_SIZE / dim_size, (int64_t)1);
|
int64_t grain_size = std::min(internal::GRAIN_SIZE / dim_size, static_cast<int64_t>(1));
|
||||||
parallel_for(
|
parallel_for(
|
||||||
0, outer_size * inner_size, grain_size,
|
0, outer_size * inner_size, grain_size,
|
||||||
[&](int64_t begin, int64_t end) {
|
[&](int64_t begin, int64_t end) {
|
||||||
@ -265,7 +265,7 @@ void host_softmax_backward(
|
|||||||
scalar_t* output_data_base = output.data_ptr<scalar_t>();
|
scalar_t* output_data_base = output.data_ptr<scalar_t>();
|
||||||
scalar_t* gradOutput_data_base = grad.data_ptr<scalar_t>();
|
scalar_t* gradOutput_data_base = grad.data_ptr<scalar_t>();
|
||||||
bool* mask_data_base = mask;
|
bool* mask_data_base = mask;
|
||||||
int64_t grain_size = std::min(internal::GRAIN_SIZE / dim_size, (int64_t)1);
|
int64_t grain_size = std::min(internal::GRAIN_SIZE / dim_size, static_cast<int64_t>(1));
|
||||||
parallel_for(
|
parallel_for(
|
||||||
0, outer_size * inner_size, grain_size, [&](int64_t begin, int64_t end) {
|
0, outer_size * inner_size, grain_size, [&](int64_t begin, int64_t end) {
|
||||||
for (const auto i : c10::irange(begin, end)) {
|
for (const auto i : c10::irange(begin, end)) {
|
||||||
|
|||||||
@ -1701,13 +1701,13 @@ Tensor& index_select_out_cpu_(
|
|||||||
TORCH_CHECK_INDEX(
|
TORCH_CHECK_INDEX(
|
||||||
(self_i >= 0) && (self_i < self_dim_size),
|
(self_i >= 0) && (self_i < self_dim_size),
|
||||||
"index out of range in self");
|
"index out of range in self");
|
||||||
auto self_data = static_cast<const char*>(selfSlice_data) +
|
auto self_data = const_cast<char*>(static_cast<const char*>(
|
||||||
|
selfSlice_data)) +
|
||||||
self_i * self_stride_bytes;
|
self_i * self_stride_bytes;
|
||||||
auto result_data = static_cast<char*>(resultSlice_data) +
|
auto result_data = static_cast<char*>(resultSlice_data) +
|
||||||
i * result_stride_bytes;
|
i * result_stride_bytes;
|
||||||
sub_iter.unsafe_replace_operand(0, result_data);
|
sub_iter.unsafe_replace_operand(0, result_data);
|
||||||
sub_iter.unsafe_replace_operand(
|
sub_iter.unsafe_replace_operand(1, self_data);
|
||||||
1, const_cast<char*>(self_data));
|
|
||||||
copy_stub(sub_iter.device_type(), sub_iter, false);
|
copy_stub(sub_iter.device_type(), sub_iter, false);
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|||||||
@ -11,6 +11,7 @@
|
|||||||
#include <ATen/SparseCsrTensorUtils.h>
|
#include <ATen/SparseCsrTensorUtils.h>
|
||||||
#include <ATen/TensorOperators.h>
|
#include <ATen/TensorOperators.h>
|
||||||
#include <ATen/TracerMode.h>
|
#include <ATen/TracerMode.h>
|
||||||
|
#include <ATen/core/Generator.h>
|
||||||
#include <ATen/core/Tensor.h>
|
#include <ATen/core/Tensor.h>
|
||||||
#include <ATen/native/UnaryOps.h>
|
#include <ATen/native/UnaryOps.h>
|
||||||
#include <c10/core/ScalarType.h>
|
#include <c10/core/ScalarType.h>
|
||||||
@ -1089,6 +1090,7 @@ Tensor& rand_out(
|
|||||||
|
|
||||||
Tensor rand_like(
|
Tensor rand_like(
|
||||||
const Tensor& self,
|
const Tensor& self,
|
||||||
|
std::optional<Generator> generator,
|
||||||
std::optional<ScalarType> dtype,
|
std::optional<ScalarType> dtype,
|
||||||
std::optional<Layout> layout,
|
std::optional<Layout> layout,
|
||||||
std::optional<Device> device,
|
std::optional<Device> device,
|
||||||
@ -1100,7 +1102,24 @@ Tensor rand_like(
|
|||||||
pin_memory);
|
pin_memory);
|
||||||
|
|
||||||
auto result = at::empty_like(self, options, optional_memory_format);
|
auto result = at::empty_like(self, options, optional_memory_format);
|
||||||
return result.uniform_(0, 1, std::nullopt);
|
return result.uniform_(0, 1, std::move(generator));
|
||||||
|
}
|
||||||
|
|
||||||
|
Tensor rand_like(
|
||||||
|
const Tensor& self,
|
||||||
|
std::optional<ScalarType> dtype,
|
||||||
|
std::optional<Layout> layout,
|
||||||
|
std::optional<Device> device,
|
||||||
|
std::optional<bool> pin_memory,
|
||||||
|
std::optional<c10::MemoryFormat> optional_memory_format) {
|
||||||
|
return native::rand_like(
|
||||||
|
self,
|
||||||
|
static_cast<std::optional<Generator>>(std::nullopt),
|
||||||
|
dtype,
|
||||||
|
layout,
|
||||||
|
device,
|
||||||
|
pin_memory,
|
||||||
|
optional_memory_format);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ randint ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ randint ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
@ -1197,7 +1216,9 @@ Tensor& randint_out(
|
|||||||
|
|
||||||
Tensor randint_like(
|
Tensor randint_like(
|
||||||
const Tensor& self,
|
const Tensor& self,
|
||||||
|
int64_t low,
|
||||||
int64_t high,
|
int64_t high,
|
||||||
|
std::optional<Generator> generator,
|
||||||
std::optional<ScalarType> dtype,
|
std::optional<ScalarType> dtype,
|
||||||
std::optional<Layout> layout,
|
std::optional<Layout> layout,
|
||||||
std::optional<Device> device,
|
std::optional<Device> device,
|
||||||
@ -1209,7 +1230,71 @@ Tensor randint_like(
|
|||||||
pin_memory);
|
pin_memory);
|
||||||
|
|
||||||
auto result = at::empty_like(self, options, optional_memory_format);
|
auto result = at::empty_like(self, options, optional_memory_format);
|
||||||
return result.random_(0, high, std::nullopt);
|
return result.random_(low, high, std::move(generator));
|
||||||
|
}
|
||||||
|
|
||||||
|
Tensor randint_like(
|
||||||
|
const Tensor& self,
|
||||||
|
int64_t low,
|
||||||
|
int64_t high,
|
||||||
|
std::optional<ScalarType> dtype,
|
||||||
|
std::optional<Layout> layout,
|
||||||
|
std::optional<Device> device,
|
||||||
|
std::optional<bool> pin_memory,
|
||||||
|
std::optional<c10::MemoryFormat> optional_memory_format) {
|
||||||
|
return native::randint_like(
|
||||||
|
self,
|
||||||
|
low,
|
||||||
|
high,
|
||||||
|
static_cast<std::optional<Generator>>(std::nullopt),
|
||||||
|
dtype,
|
||||||
|
layout,
|
||||||
|
device,
|
||||||
|
pin_memory,
|
||||||
|
optional_memory_format);
|
||||||
|
}
|
||||||
|
|
||||||
|
Tensor randint_like(
|
||||||
|
const Tensor& self,
|
||||||
|
int64_t high,
|
||||||
|
std::optional<ScalarType> dtype,
|
||||||
|
std::optional<Layout> layout,
|
||||||
|
std::optional<Device> device,
|
||||||
|
std::optional<bool> pin_memory,
|
||||||
|
std::optional<c10::MemoryFormat> optional_memory_format) {
|
||||||
|
// See [Note: hacky wrapper removal for TensorOptions]
|
||||||
|
return native::randint_like(
|
||||||
|
self,
|
||||||
|
0,
|
||||||
|
high,
|
||||||
|
static_cast<std::optional<Generator>>(std::nullopt),
|
||||||
|
dtype,
|
||||||
|
layout,
|
||||||
|
device,
|
||||||
|
pin_memory,
|
||||||
|
optional_memory_format);
|
||||||
|
}
|
||||||
|
|
||||||
|
Tensor randint_like(
|
||||||
|
const Tensor& self,
|
||||||
|
int64_t high,
|
||||||
|
std::optional<Generator> generator,
|
||||||
|
std::optional<ScalarType> dtype,
|
||||||
|
std::optional<Layout> layout,
|
||||||
|
std::optional<Device> device,
|
||||||
|
std::optional<bool> pin_memory,
|
||||||
|
std::optional<c10::MemoryFormat> optional_memory_format) {
|
||||||
|
// See [Note: hacky wrapper removal for TensorOptions]
|
||||||
|
return native::randint_like(
|
||||||
|
self,
|
||||||
|
0,
|
||||||
|
high,
|
||||||
|
generator,
|
||||||
|
dtype,
|
||||||
|
layout,
|
||||||
|
device,
|
||||||
|
pin_memory,
|
||||||
|
optional_memory_format);
|
||||||
}
|
}
|
||||||
|
|
||||||
Tensor randint_like(
|
Tensor randint_like(
|
||||||
@ -1226,7 +1311,9 @@ Tensor randint_like(
|
|||||||
int64_t high_scalar = high.item<int64_t>();
|
int64_t high_scalar = high.item<int64_t>();
|
||||||
return at::native::randint_like(
|
return at::native::randint_like(
|
||||||
self,
|
self,
|
||||||
|
0,
|
||||||
high_scalar,
|
high_scalar,
|
||||||
|
static_cast<std::optional<Generator>>(std::nullopt),
|
||||||
dtype,
|
dtype,
|
||||||
layout,
|
layout,
|
||||||
device,
|
device,
|
||||||
@ -1236,20 +1323,27 @@ Tensor randint_like(
|
|||||||
|
|
||||||
Tensor randint_like(
|
Tensor randint_like(
|
||||||
const Tensor& self,
|
const Tensor& self,
|
||||||
int64_t low,
|
const Tensor& high,
|
||||||
int64_t high,
|
std::optional<Generator> generator,
|
||||||
std::optional<ScalarType> dtype,
|
std::optional<ScalarType> dtype,
|
||||||
std::optional<Layout> layout,
|
std::optional<Layout> layout,
|
||||||
std::optional<Device> device,
|
std::optional<Device> device,
|
||||||
std::optional<bool> pin_memory,
|
std::optional<bool> pin_memory,
|
||||||
std::optional<c10::MemoryFormat> optional_memory_format) {
|
std::optional<c10::MemoryFormat> optional_memory_format) {
|
||||||
// See [Note: hacky wrapper removal for TensorOptions]
|
TORCH_CHECK(
|
||||||
TensorOptions options =
|
high.numel() == 1 && high.ndimension() == 0 && high.device().is_cpu(),
|
||||||
TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(
|
"high must be a scalar tensor and on CPU");
|
||||||
pin_memory);
|
int64_t high_scalar = high.item<int64_t>();
|
||||||
|
return at::native::randint_like(
|
||||||
auto result = at::empty_like(self, options, optional_memory_format);
|
self,
|
||||||
return result.random_(low, high, std::nullopt);
|
0,
|
||||||
|
high_scalar,
|
||||||
|
generator,
|
||||||
|
dtype,
|
||||||
|
layout,
|
||||||
|
device,
|
||||||
|
pin_memory,
|
||||||
|
optional_memory_format);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ randn ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ randn ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
@ -1327,6 +1421,7 @@ Tensor& normal_out(
|
|||||||
|
|
||||||
Tensor randn_like(
|
Tensor randn_like(
|
||||||
const Tensor& self,
|
const Tensor& self,
|
||||||
|
std::optional<Generator> generator,
|
||||||
std::optional<ScalarType> dtype,
|
std::optional<ScalarType> dtype,
|
||||||
std::optional<Layout> layout,
|
std::optional<Layout> layout,
|
||||||
std::optional<Device> device,
|
std::optional<Device> device,
|
||||||
@ -1338,7 +1433,24 @@ Tensor randn_like(
|
|||||||
pin_memory);
|
pin_memory);
|
||||||
|
|
||||||
auto result = at::empty_like(self, options, optional_memory_format);
|
auto result = at::empty_like(self, options, optional_memory_format);
|
||||||
return result.normal_(0, 1, std::nullopt);
|
return result.normal_(0, 1, std::move(generator));
|
||||||
|
}
|
||||||
|
|
||||||
|
Tensor randn_like(
|
||||||
|
const Tensor& self,
|
||||||
|
std::optional<ScalarType> dtype,
|
||||||
|
std::optional<Layout> layout,
|
||||||
|
std::optional<Device> device,
|
||||||
|
std::optional<bool> pin_memory,
|
||||||
|
std::optional<c10::MemoryFormat> optional_memory_format) {
|
||||||
|
return native::randn_like(
|
||||||
|
self,
|
||||||
|
static_cast<std::optional<Generator>>(std::nullopt),
|
||||||
|
dtype,
|
||||||
|
layout,
|
||||||
|
device,
|
||||||
|
pin_memory,
|
||||||
|
optional_memory_format);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ randperm ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ randperm ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
@ -1382,7 +1494,7 @@ void randperm_cpu(Tensor& result, int64_t n, CPUGeneratorImpl* generator) {
|
|||||||
// use no-initialization Fischer-Yates variant
|
// use no-initialization Fischer-Yates variant
|
||||||
// https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle#The_.22inside-out.22_algorithm
|
// https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle#The_.22inside-out.22_algorithm
|
||||||
for (int64_t i = 0; i < n; i++) {
|
for (int64_t i = 0; i < n; i++) {
|
||||||
int64_t z = (int64_t)(generator->random64() % (i + 1));
|
int64_t z = static_cast<int64_t>(generator->random64() % (i + 1));
|
||||||
r__data[i * r__stride_0] = i;
|
r__data[i * r__stride_0] = i;
|
||||||
r__data[i * r__stride_0] = r__data[z * r__stride_0];
|
r__data[i * r__stride_0] = r__data[z * r__stride_0];
|
||||||
r__data[z * r__stride_0] = i;
|
r__data[z * r__stride_0] = i;
|
||||||
|
|||||||
@ -40,7 +40,7 @@ at::Tensor PackedLinearWeightQnnp::apply_dynamic_impl<false>(
|
|||||||
"quantized_sparse_linear(): Input tensor rank should be >= 2");
|
"quantized_sparse_linear(): Input tensor rank should be >= 2");
|
||||||
|
|
||||||
const auto rows_input = c10::multiply_integers(input.sizes().begin(), input.sizes().end() - 1);
|
const auto rows_input = c10::multiply_integers(input.sizes().begin(), input.sizes().end() - 1);
|
||||||
const auto cols_input = static_cast<int64_t>(input.size(input.dim() - 1));
|
const auto cols_input = input.size(input.dim() - 1);
|
||||||
TORCH_CHECK(
|
TORCH_CHECK(
|
||||||
cols_input == input_channels_,
|
cols_input == input_channels_,
|
||||||
"quantized_sparse_linear: Input tensor's last and weight tensor's"
|
"quantized_sparse_linear: Input tensor's last and weight tensor's"
|
||||||
|
|||||||
@ -65,8 +65,8 @@ LinearPackedSerializationType PackedLinearWeight::unpack() {
|
|||||||
#ifdef USE_PYTORCH_QNNPACK
|
#ifdef USE_PYTORCH_QNNPACK
|
||||||
|
|
||||||
LinearPackedSerializationType PackedLinearWeightQnnp::unpack() {
|
LinearPackedSerializationType PackedLinearWeightQnnp::unpack() {
|
||||||
const int64_t N = static_cast<int64_t>(output_channels_);
|
const int64_t N = output_channels_;
|
||||||
const int64_t K = static_cast<int64_t>(input_channels_);
|
const int64_t K = input_channels_;
|
||||||
|
|
||||||
float* w_scales_ptr = w_scales_.data_ptr<float>();
|
float* w_scales_ptr = w_scales_.data_ptr<float>();
|
||||||
|
|
||||||
|
|||||||
@ -998,7 +998,7 @@ void softplus_backward_kernel(TensorIteratorBase& iter, const Scalar& beta_, con
|
|||||||
auto threshold = threshold_.to<float>();
|
auto threshold = threshold_.to<float>();
|
||||||
const Vec beta_vec(beta);
|
const Vec beta_vec(beta);
|
||||||
const Vec threshold_vec(threshold);
|
const Vec threshold_vec(threshold);
|
||||||
const Vec one_vec(static_cast<float>(1.0));
|
const Vec one_vec(1.0f);
|
||||||
cpu_kernel_vec(
|
cpu_kernel_vec(
|
||||||
iter,
|
iter,
|
||||||
[beta, threshold](scalar_t a, scalar_t b) -> scalar_t {
|
[beta, threshold](scalar_t a, scalar_t b) -> scalar_t {
|
||||||
|
|||||||
@ -17,7 +17,7 @@ static inline void cpu_atomic_add_float(float* dst, float fvalue)
|
|||||||
} uf32_t;
|
} uf32_t;
|
||||||
|
|
||||||
uf32_t new_value, old_value;
|
uf32_t new_value, old_value;
|
||||||
std::atomic<unsigned>* dst_intV = (std::atomic<unsigned>*)(dst);
|
std::atomic<unsigned>* dst_intV = (std::atomic<unsigned>*)dst;
|
||||||
|
|
||||||
old_value.floatV = *dst;
|
old_value.floatV = *dst;
|
||||||
new_value.floatV = old_value.floatV + fvalue;
|
new_value.floatV = old_value.floatV + fvalue;
|
||||||
|
|||||||
@ -851,7 +851,7 @@ void sigmoid_backward_kernel(TensorIteratorBase& iter) {
|
|||||||
});
|
});
|
||||||
});
|
});
|
||||||
} else if (iter.dtype() == kBFloat16) {
|
} else if (iter.dtype() == kBFloat16) {
|
||||||
auto one_vec = Vectorized<float>((float)(1));
|
auto one_vec = Vectorized<float>((float)1);
|
||||||
cpu_kernel_vec(
|
cpu_kernel_vec(
|
||||||
iter,
|
iter,
|
||||||
[=](BFloat16 a, BFloat16 b) -> BFloat16 {
|
[=](BFloat16 a, BFloat16 b) -> BFloat16 {
|
||||||
|
|||||||
@ -77,9 +77,7 @@ static void reduced_float_copy_kernel(TensorIteratorBase &iter, bool requires_ne
|
|||||||
|
|
||||||
int64_t grain_size = at::internal::GRAIN_SIZE;
|
int64_t grain_size = at::internal::GRAIN_SIZE;
|
||||||
|
|
||||||
auto loop = [strides_in, requires_neg](char** base, const int64_t* strides, int64_t size0, int64_t size1) {
|
auto loop = [strides_in, requires_neg](char** data, const int64_t* strides, int64_t size0, int64_t size1) {
|
||||||
std::array<char*, 2> data;
|
|
||||||
std::copy_n(base, 2, data.data());
|
|
||||||
const int64_t *outer_strides = &strides[2];
|
const int64_t *outer_strides = &strides[2];
|
||||||
|
|
||||||
for ([[maybe_unused]] const auto it : c10::irange(size1)) {
|
for ([[maybe_unused]] const auto it : c10::irange(size1)) {
|
||||||
@ -146,9 +144,7 @@ static void reduced_float_copy_kernel(TensorIteratorBase &iter, bool requires_ne
|
|||||||
|
|
||||||
int64_t grain_size = at::internal::GRAIN_SIZE;
|
int64_t grain_size = at::internal::GRAIN_SIZE;
|
||||||
|
|
||||||
auto loop = [strides_in, requires_neg](char** base, const int64_t* strides, int64_t size0, int64_t size1) {
|
auto loop = [strides_in, requires_neg](char** data, const int64_t* strides, int64_t size0, int64_t size1) {
|
||||||
std::array<char*, 2> data;
|
|
||||||
std::copy_n(base, 2, data.data());
|
|
||||||
const int64_t *outer_strides = &strides[2];
|
const int64_t *outer_strides = &strides[2];
|
||||||
|
|
||||||
for ([[maybe_unused]] const auto it : c10::irange(size1)) {
|
for ([[maybe_unused]] const auto it : c10::irange(size1)) {
|
||||||
|
|||||||
@ -493,40 +493,33 @@ void cpu_hflip_vec(at::TensorIterator& iter) {
|
|||||||
|
|
||||||
for ([[maybe_unused]] const auto j : c10::irange(size1)) {
|
for ([[maybe_unused]] const auto j : c10::irange(size1)) {
|
||||||
// vectorized loop with negative stride for output
|
// vectorized loop with negative stride for output
|
||||||
char** C10_RESTRICT data_ = data_arr.data();
|
|
||||||
int64_t n = size0;
|
int64_t n = size0;
|
||||||
|
|
||||||
char* C10_RESTRICT data[ntensors];
|
|
||||||
for (const auto arg : c10::irange(ntensors)) {
|
|
||||||
data[arg] = data_[arg];
|
|
||||||
}
|
|
||||||
|
|
||||||
int64_t i = 0;
|
int64_t i = 0;
|
||||||
|
|
||||||
// data[0] unaligned pre-pass
|
// data_arr[0] unaligned pre-pass
|
||||||
int64_t offset = (j * n + (n - i - Vec::size())) % 32;
|
int64_t offset = (j * n + (n - i - Vec::size())) % 32;
|
||||||
offset = (offset >= n) ? n : offset;
|
offset = (offset >= n) ? n : offset;
|
||||||
for (; i < offset; i++) {
|
for (; i < offset; i++) {
|
||||||
scalar_t* out_ptr = (scalar_t*)(data[0] - i * stride);
|
scalar_t* out_ptr = (scalar_t*)(data_arr[0] - i * stride);
|
||||||
*out_ptr = c10::load((scalar_t *)(data[1] + i * stride));
|
*out_ptr = c10::load((scalar_t *)(data_arr[1] + i * stride));
|
||||||
}
|
}
|
||||||
// Empirically found that it is faster to process 3 data items together vs 2 or 4
|
// Empirically found that it is faster to process 3 data items together vs 2 or 4
|
||||||
for (; i <= n - 3 * Vec::size(); i += 3 * Vec::size()) {
|
for (; i <= n - 3 * Vec::size(); i += 3 * Vec::size()) {
|
||||||
auto out1 = Vec::loadu(data[1] + i * stride);
|
auto out1 = Vec::loadu(data_arr[1] + i * stride);
|
||||||
auto out2 = Vec::loadu(data[1] + (i + Vec::size()) * stride);
|
auto out2 = Vec::loadu(data_arr[1] + (i + Vec::size()) * stride);
|
||||||
auto out3 = Vec::loadu(data[1] + (i + 2 * Vec::size()) * stride);
|
auto out3 = Vec::loadu(data_arr[1] + (i + 2 * Vec::size()) * stride);
|
||||||
// flip the vector: 1234 -> 4321
|
// flip the vector: 1234 -> 4321
|
||||||
out1 = flip(out1);
|
out1 = flip(out1);
|
||||||
out2 = flip(out2);
|
out2 = flip(out2);
|
||||||
out3 = flip(out3);
|
out3 = flip(out3);
|
||||||
out1.store(data[0] - (i + Vec::size() - 1) * stride);
|
out1.store(data_arr[0] - (i + Vec::size() - 1) * stride);
|
||||||
out2.store(data[0] - (i + 2 * Vec::size() - 1) * stride);
|
out2.store(data_arr[0] - (i + 2 * Vec::size() - 1) * stride);
|
||||||
out3.store(data[0] - (i + 3 * Vec::size() - 1) * stride);
|
out3.store(data_arr[0] - (i + 3 * Vec::size() - 1) * stride);
|
||||||
}
|
}
|
||||||
if (i < n) {
|
if (i < n) {
|
||||||
for (; i < n; i++) {
|
for (; i < n; i++) {
|
||||||
scalar_t* out_ptr = (scalar_t*)(data[0] - i * stride);
|
scalar_t* out_ptr = (scalar_t*)(data_arr[0] - i * stride);
|
||||||
*out_ptr = c10::load((scalar_t *)(data[1] + i * stride));
|
*out_ptr = c10::load((scalar_t *)(data_arr[1] + i * stride));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -560,15 +553,8 @@ void cpu_vflip_memcpy(at::TensorIterator& iter) {
|
|||||||
const int64_t stride = strides[0];
|
const int64_t stride = strides[0];
|
||||||
|
|
||||||
for ([[maybe_unused]] const auto j : c10::irange(size1)) {
|
for ([[maybe_unused]] const auto j : c10::irange(size1)) {
|
||||||
char** C10_RESTRICT data_ = data_arr.data();
|
|
||||||
int64_t n = size0;
|
int64_t n = size0;
|
||||||
|
memcpy(data_arr[0], data_arr[1], n * stride);
|
||||||
char* C10_RESTRICT data[ntensors];
|
|
||||||
for (const auto arg : c10::irange(ntensors)) {
|
|
||||||
data[arg] = data_[arg];
|
|
||||||
}
|
|
||||||
|
|
||||||
memcpy(data[0], data[1], n * stride);
|
|
||||||
|
|
||||||
// advance:
|
// advance:
|
||||||
for (const auto arg : c10::irange(data_arr.size())) {
|
for (const auto arg : c10::irange(data_arr.size())) {
|
||||||
|
|||||||
@ -92,7 +92,8 @@ void addcdiv_cpu_kernel(TensorIteratorBase& iter, const Scalar& value) {
|
|||||||
|
|
||||||
void smooth_l1_backward_cpu_kernel(TensorIterator& iter, const Scalar& norm, double beta) {
|
void smooth_l1_backward_cpu_kernel(TensorIterator& iter, const Scalar& norm, double beta) {
|
||||||
ScalarType dtype = iter.dtype(0);
|
ScalarType dtype = iter.dtype(0);
|
||||||
if (dtype == kBFloat16) {
|
if (at::isReducedFloatingType(dtype)) {
|
||||||
|
AT_DISPATCH_REDUCED_FLOATING_TYPES(dtype, "smooth_l1_backward_cpu_out", [&]() {
|
||||||
auto norm_val = norm.to<float>();
|
auto norm_val = norm.to<float>();
|
||||||
float beta_val(beta);
|
float beta_val(beta);
|
||||||
auto norm_val_vec = Vectorized<float>(norm_val);
|
auto norm_val_vec = Vectorized<float>(norm_val);
|
||||||
@ -101,9 +102,9 @@ void smooth_l1_backward_cpu_kernel(TensorIterator& iter, const Scalar& norm, dou
|
|||||||
const auto zero_vec = Vectorized<float>(0);
|
const auto zero_vec = Vectorized<float>(0);
|
||||||
const auto pos_1_vec = Vectorized<float>(1);
|
const auto pos_1_vec = Vectorized<float>(1);
|
||||||
cpu_kernel_vec(iter,
|
cpu_kernel_vec(iter,
|
||||||
[=](BFloat16 input, BFloat16 target, BFloat16 grad_output) -> BFloat16 {
|
[=](scalar_t input, scalar_t target, scalar_t grad_output) -> scalar_t {
|
||||||
const auto x = float(input) - float(target);
|
const auto x = float(input) - float(target);
|
||||||
if (x <= -beta){
|
if (x <= -beta) {
|
||||||
return -norm_val * float(grad_output);
|
return -norm_val * float(grad_output);
|
||||||
}else if (x >= beta){
|
}else if (x >= beta){
|
||||||
return norm_val * float(grad_output);
|
return norm_val * float(grad_output);
|
||||||
@ -112,14 +113,14 @@ void smooth_l1_backward_cpu_kernel(TensorIterator& iter, const Scalar& norm, dou
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
[norm_val_vec, beta_val_vec, neg_1_vec, zero_vec, pos_1_vec](
|
[norm_val_vec, beta_val_vec, neg_1_vec, zero_vec, pos_1_vec](
|
||||||
Vectorized<BFloat16> input, Vectorized<BFloat16> target, Vectorized<BFloat16> grad_output) -> Vectorized<BFloat16> {
|
Vectorized<scalar_t> input, Vectorized<scalar_t> target, Vectorized<scalar_t> grad_output) -> Vectorized<scalar_t> {
|
||||||
// using two blendv calls to simulate the 3 cases
|
// using two blendv calls to simulate the 3 cases
|
||||||
// 1 if x >= beta
|
// 1 if x >= beta
|
||||||
// -1 if x <= -beta
|
// -1 if x <= -beta
|
||||||
// x / beta if |x| < beta
|
// x / beta if |x| < beta
|
||||||
auto [input0, input1] = convert_bfloat16_float(input);
|
auto [input0, input1] = convert_to_float(input);
|
||||||
auto [target0, target1] = convert_bfloat16_float(target);
|
auto [target0, target1] = convert_to_float(target);
|
||||||
auto [grad_output0, grad_output1] = convert_bfloat16_float(grad_output);
|
auto [grad_output0, grad_output1] = convert_to_float(grad_output);
|
||||||
auto x = input0 - target0;
|
auto x = input0 - target0;
|
||||||
auto pos_or_neg_1_vec = Vectorized<float>::blendv(
|
auto pos_or_neg_1_vec = Vectorized<float>::blendv(
|
||||||
neg_1_vec, pos_1_vec, x > zero_vec);
|
neg_1_vec, pos_1_vec, x > zero_vec);
|
||||||
@ -135,11 +136,12 @@ void smooth_l1_backward_cpu_kernel(TensorIterator& iter, const Scalar& norm, dou
|
|||||||
output = Vectorized<float>::blendv(
|
output = Vectorized<float>::blendv(
|
||||||
x / beta_val_vec, pos_or_neg_1_vec, x_abs >= beta_val_vec);
|
x / beta_val_vec, pos_or_neg_1_vec, x_abs >= beta_val_vec);
|
||||||
input1 = norm_val_vec * output * grad_output1;
|
input1 = norm_val_vec * output * grad_output1;
|
||||||
return convert_float_bfloat16(input0, input1);
|
return convert_from_float<scalar_t>(input0, input1);
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
});
|
||||||
} else {
|
} else {
|
||||||
AT_DISPATCH_ALL_TYPES_AND(kHalf, dtype, "smooth_l1_backward_cpu_out", [&] {
|
AT_DISPATCH_ALL_TYPES(dtype, "smooth_l1_backward_cpu_out", [&] {
|
||||||
auto norm_val = norm.to<scalar_t>();
|
auto norm_val = norm.to<scalar_t>();
|
||||||
scalar_t beta_val(beta);
|
scalar_t beta_val(beta);
|
||||||
auto norm_val_vec = Vectorized<scalar_t>(norm_val);
|
auto norm_val_vec = Vectorized<scalar_t>(norm_val);
|
||||||
|
|||||||
@ -298,7 +298,7 @@ void unfolded2d_copy(
|
|||||||
memcpy(
|
memcpy(
|
||||||
dst + (size_t)y * output_width + x,
|
dst + (size_t)y * output_width + x,
|
||||||
src + (size_t)iy * input_width + ix,
|
src + (size_t)iy * input_width + ix,
|
||||||
sizeof(scalar_t) * (1));
|
sizeof(scalar_t) * 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -317,7 +317,7 @@ void unfolded2d_copy(
|
|||||||
memcpy(
|
memcpy(
|
||||||
dst + (size_t)y * output_width + x,
|
dst + (size_t)y * output_width + x,
|
||||||
src + (size_t)iy * input_width + ix + x * dW,
|
src + (size_t)iy * input_width + ix + x * dW,
|
||||||
sizeof(scalar_t) * (1));
|
sizeof(scalar_t) * 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -342,7 +342,7 @@ void upsample_avx_bilinear_bicubic_uint8(
|
|||||||
|
|
||||||
if (need_horizontal) {
|
if (need_horizontal) {
|
||||||
int interp_dim = 3;
|
int interp_dim = 3;
|
||||||
auto stride = (skip_unpacking) ? num_channels : 4;
|
auto stride = skip_unpacking ? num_channels : 4;
|
||||||
std::tie(horiz_indices_weights, ksize_horiz, horiz_weights_precision) =
|
std::tie(horiz_indices_weights, ksize_horiz, horiz_weights_precision) =
|
||||||
F::compute_index_ranges_int16_weights(
|
F::compute_index_ranges_int16_weights(
|
||||||
/*input_size=*/xin,
|
/*input_size=*/xin,
|
||||||
@ -358,7 +358,7 @@ void upsample_avx_bilinear_bicubic_uint8(
|
|||||||
|
|
||||||
if (need_vertical) {
|
if (need_vertical) {
|
||||||
int interp_dim = 2;
|
int interp_dim = 2;
|
||||||
auto stride = (skip_unpacking) ? num_channels * xout : 4 * xout;
|
auto stride = skip_unpacking ? num_channels * xout : 4 * xout;
|
||||||
std::tie(vert_indices_weights, ksize_vert, vert_weights_precision) =
|
std::tie(vert_indices_weights, ksize_vert, vert_weights_precision) =
|
||||||
F::compute_index_ranges_int16_weights(
|
F::compute_index_ranges_int16_weights(
|
||||||
/*input_size=*/yin,
|
/*input_size=*/yin,
|
||||||
@ -377,17 +377,17 @@ void upsample_avx_bilinear_bicubic_uint8(
|
|||||||
// horizontal-only or vertical-only interpolation, and if the tensor doesn't
|
// horizontal-only or vertical-only interpolation, and if the tensor doesn't
|
||||||
// need repacking
|
// need repacking
|
||||||
if (need_horizontal && (need_vertical || !skip_packing)) {
|
if (need_horizontal && (need_vertical || !skip_packing)) {
|
||||||
auto c = (skip_unpacking) ? num_channels : 4;
|
auto c = skip_unpacking ? num_channels : 4;
|
||||||
buffer_horiz = at::empty({c, yin, xout}, input.options());
|
buffer_horiz = at::empty({c, yin, xout}, input.options());
|
||||||
}
|
}
|
||||||
if (need_vertical && !skip_packing) {
|
if (need_vertical && !skip_packing) {
|
||||||
auto c = (skip_unpacking) ? num_channels : 4;
|
auto c = skip_unpacking ? num_channels : 4;
|
||||||
buffer_vert = at::empty({c, yout, xout}, input.options());
|
buffer_vert = at::empty({c, yout, xout}, input.options());
|
||||||
}
|
}
|
||||||
|
|
||||||
for (const auto i : c10::irange(batch_size)) {
|
for (const auto i : c10::irange(batch_size)) {
|
||||||
|
|
||||||
at::Tensor unpacked_input = (skip_unpacking) ? input[i] : unpack_rgb(input[i]);
|
at::Tensor unpacked_input = skip_unpacking ? input[i] : unpack_rgb(input[i]);
|
||||||
at::Tensor unpacked_output;
|
at::Tensor unpacked_output;
|
||||||
|
|
||||||
if (need_horizontal) {
|
if (need_horizontal) {
|
||||||
@ -411,7 +411,7 @@ void upsample_avx_bilinear_bicubic_uint8(
|
|||||||
unpacked_output = unpacked_input = unpacked_output_temp;
|
unpacked_output = unpacked_input = unpacked_output_temp;
|
||||||
}
|
}
|
||||||
if (need_vertical) {
|
if (need_vertical) {
|
||||||
unpacked_output = (skip_packing) ? output[i] : buffer_vert;
|
unpacked_output = skip_packing ? output[i] : buffer_vert;
|
||||||
|
|
||||||
ImagingResampleVertical(
|
ImagingResampleVertical(
|
||||||
unpacked_output,
|
unpacked_output,
|
||||||
@ -502,7 +502,7 @@ void ImagingResampleHorizontalConvolution8u4x(
|
|||||||
// RGBA: b4_delta = b4_delta_soft = 3
|
// RGBA: b4_delta = b4_delta_soft = 3
|
||||||
// RGB : b4_delta = 5
|
// RGB : b4_delta = 5
|
||||||
// RGB : b4_delta_soft = 4
|
// RGB : b4_delta_soft = 4
|
||||||
const auto b4_delta = (stride == 4) ? 3 : ((is_last_line) ? 5 : 4);
|
const auto b4_delta = (stride == 4) ? 3 : (is_last_line ? 5 : 4);
|
||||||
|
|
||||||
// In block 2 (2 means we process 2 weights values together), we read input data
|
// In block 2 (2 means we process 2 weights values together), we read input data
|
||||||
// with _mm_loadl_epi64, i.e. 8 bytes, per one line:
|
// with _mm_loadl_epi64, i.e. 8 bytes, per one line:
|
||||||
@ -515,7 +515,7 @@ void ImagingResampleHorizontalConvolution8u4x(
|
|||||||
// RGBA: b2_delta = b2_delta_soft = 1
|
// RGBA: b2_delta = b2_delta_soft = 1
|
||||||
// RGB : b2_delta = 2
|
// RGB : b2_delta = 2
|
||||||
// RGB : b2_delta_soft = 1
|
// RGB : b2_delta_soft = 1
|
||||||
const auto b2_delta = (stride == 4) ? 1 : ((is_last_line) ? 2 : 1);
|
const auto b2_delta = (stride == 4) ? 1 : (is_last_line ? 2 : 1);
|
||||||
|
|
||||||
const auto max_out_x_strided = out_xsize * stride;
|
const auto max_out_x_strided = out_xsize * stride;
|
||||||
const auto max_in_x_strided = in_xsize * stride;
|
const auto max_in_x_strided = in_xsize * stride;
|
||||||
@ -819,7 +819,7 @@ void ImagingResampleHorizontalConvolution8u(
|
|||||||
// RGBA: b8_delta = b8_delta_soft = 7
|
// RGBA: b8_delta = b8_delta_soft = 7
|
||||||
// RGB : b8_delta = 10
|
// RGB : b8_delta = 10
|
||||||
// RGB : b8_delta_soft = 9
|
// RGB : b8_delta_soft = 9
|
||||||
const auto b8_delta = (stride == 4) ? 7 : ((is_last_line) ? 10 : 9);
|
const auto b8_delta = (stride == 4) ? 7 : (is_last_line ? 10 : 9);
|
||||||
|
|
||||||
// In block 4 (4 means we process 4 weight values together), we read
|
// In block 4 (4 means we process 4 weight values together), we read
|
||||||
// 16 bytes of input data.
|
// 16 bytes of input data.
|
||||||
@ -832,7 +832,7 @@ void ImagingResampleHorizontalConvolution8u(
|
|||||||
// RGBA: b4_delta = b4_delta_soft = 3
|
// RGBA: b4_delta = b4_delta_soft = 3
|
||||||
// RGB : b4_delta = 5
|
// RGB : b4_delta = 5
|
||||||
// RGB : b4_delta_soft = 4
|
// RGB : b4_delta_soft = 4
|
||||||
const auto b4_delta = (stride == 4) ? 3 : ((is_last_line) ? 5 : 4);
|
const auto b4_delta = (stride == 4) ? 3 : (is_last_line ? 5 : 4);
|
||||||
|
|
||||||
// In block 2 (2 means we process 2 weight values together), we read
|
// In block 2 (2 means we process 2 weight values together), we read
|
||||||
// 8 bytes of input data.
|
// 8 bytes of input data.
|
||||||
@ -845,7 +845,7 @@ void ImagingResampleHorizontalConvolution8u(
|
|||||||
// RGBA: b2_delta = b2_delta_soft = 1
|
// RGBA: b2_delta = b2_delta_soft = 1
|
||||||
// RGB : b2_delta = 2
|
// RGB : b2_delta = 2
|
||||||
// RGB : b2_delta_soft = 1
|
// RGB : b2_delta_soft = 1
|
||||||
const auto b2_delta = (stride == 4) ? 1 : ((is_last_line) ? 2 : 1);
|
const auto b2_delta = (stride == 4) ? 1 : (is_last_line ? 2 : 1);
|
||||||
|
|
||||||
const auto max_out_x_strided = out_xsize * stride;
|
const auto max_out_x_strided = out_xsize * stride;
|
||||||
const auto max_in_x_strided = in_xsize * stride;
|
const auto max_in_x_strided = in_xsize * stride;
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user