mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Migrate smoke tests to pytorch/pytorch (#142482)
Related to https://github.com/pytorch/builder/issues/2054
This should fix nightly xpu failure: https://github.com/pytorch/pytorch/actions/runs/12251477588/job/34180135207 and rocm failure: https://github.com/pytorch/pytorch/actions/runs/12251477588/job/34182185374 due to missing : `` /builder/check_binary.sh``
Builder Scripts revision: 3468139e81
Pull Request resolved: https://github.com/pytorch/pytorch/pull/142482
Approved by: https://github.com/chuanqi129, https://github.com/kit1980, https://github.com/malfet, https://github.com/jeffdaily, https://github.com/huydhn
This commit is contained in:
committed by
PyTorch MergeBot
parent
117b6c3e2c
commit
f57606ab85
@ -490,9 +490,9 @@ if [[ -z "$BUILD_PYTHONLESS" ]]; then
|
||||
echo "$(date) :: Running tests"
|
||||
pushd "$PYTORCH_ROOT"
|
||||
|
||||
#TODO: run_tests.sh and check_binary.sh should be moved to pytorch/pytorch project
|
||||
|
||||
LD_LIBRARY_PATH=/usr/local/nvidia/lib64 \
|
||||
"/builder/run_tests.sh" manywheel "${py_majmin}" "$DESIRED_CUDA"
|
||||
"${PYTORCH_ROOT}/.ci/pytorch/run_tests.sh" manywheel "${py_majmin}" "$DESIRED_CUDA"
|
||||
popd
|
||||
echo "$(date) :: Finished tests"
|
||||
fi
|
||||
|
394
.ci/pytorch/check_binary.sh
Executable file
394
.ci/pytorch/check_binary.sh
Executable file
@ -0,0 +1,394 @@
|
||||
#!/bin/bash
|
||||
|
||||
# shellcheck disable=SC2086,SC2006,SC2207,SC2076,SC2155,SC2046,SC1091,SC2143
|
||||
# TODO: Re-enable shellchecks above
|
||||
|
||||
set -eux -o pipefail
|
||||
|
||||
# This script checks the following things on binaries
|
||||
# 1. The gcc abi matches DESIRED_DEVTOOLSET
|
||||
# 2. MacOS binaries do not link against OpenBLAS
|
||||
# 3. There are no protobuf symbols of any sort anywhere (turned off, because
|
||||
# this is currently not true)
|
||||
# 4. Standard Python imports work
|
||||
# 5. MKL is available everywhere except for MacOS wheels
|
||||
# 6. XNNPACK is available everywhere except for MacOS wheels
|
||||
# 7. CUDA is setup correctly and does not hang
|
||||
# 8. Magma is available for CUDA builds
|
||||
# 9. CuDNN is available for CUDA builds
|
||||
#
|
||||
# This script needs the env variables DESIRED_PYTHON, DESIRED_CUDA,
|
||||
# DESIRED_DEVTOOLSET and PACKAGE_TYPE
|
||||
#
|
||||
# This script expects PyTorch to be installed into the active Python (the
|
||||
# Python returned by `which python`). Or, if this is testing a libtorch
|
||||
# Pythonless binary, then it expects to be in the root folder of the unzipped
|
||||
# libtorch package.
|
||||
|
||||
|
||||
if [[ -z ${DESIRED_PYTHON:-} ]]; then
|
||||
export DESIRED_PYTHON=${MATRIX_PYTHON_VERSION:-}
|
||||
fi
|
||||
if [[ -z ${DESIRED_CUDA:-} ]]; then
|
||||
export DESIRED_CUDA=${MATRIX_DESIRED_CUDA:-}
|
||||
fi
|
||||
if [[ -z ${DESIRED_DEVTOOLSET:-} ]]; then
|
||||
export DESIRED_DEVTOOLSET=${MATRIX_DESIRED_DEVTOOLSET:-}
|
||||
fi
|
||||
if [[ -z ${PACKAGE_TYPE:-} ]]; then
|
||||
export PACKAGE_TYPE=${MATRIX_PACKAGE_TYPE:-}
|
||||
fi
|
||||
|
||||
# The install root depends on both the package type and the os
|
||||
# All MacOS packages use conda, even for the wheel packages.
|
||||
if [[ "$PACKAGE_TYPE" == libtorch ]]; then
|
||||
# NOTE: Only $PWD works on both CentOS and Ubuntu
|
||||
export install_root="$PWD"
|
||||
else
|
||||
|
||||
if [[ $DESIRED_PYTHON =~ ([0-9].[0-9]+)t ]]; then
|
||||
# For python that is maj.mint keep original version
|
||||
py_dot="$DESIRED_PYTHON"
|
||||
elif [[ $DESIRED_PYTHON =~ ([0-9].[0-9]+) ]]; then
|
||||
# Strip everything but major.minor from DESIRED_PYTHON version
|
||||
py_dot="${BASH_REMATCH[0]}"
|
||||
else
|
||||
echo "Unexpected ${DESIRED_PYTHON} format"
|
||||
exit 1
|
||||
fi
|
||||
export install_root="$(dirname $(which python))/../lib/python${py_dot}/site-packages/torch/"
|
||||
fi
|
||||
|
||||
###############################################################################
|
||||
# Setup XPU ENV
|
||||
###############################################################################
|
||||
if [[ "$DESIRED_CUDA" == 'xpu' ]]; then
|
||||
set +u
|
||||
# Refer https://www.intel.com/content/www/us/en/developer/articles/tool/pytorch-prerequisites-for-intel-gpus.html
|
||||
source /opt/intel/oneapi/compiler/latest/env/vars.sh
|
||||
source /opt/intel/oneapi/pti/latest/env/vars.sh
|
||||
fi
|
||||
|
||||
###############################################################################
|
||||
# Check GCC ABI
|
||||
###############################################################################
|
||||
|
||||
# NOTE [ Building libtorch with old vs. new gcc ABI ]
|
||||
#
|
||||
# Packages built with one version of ABI could not be linked against by client
|
||||
# C++ libraries that were compiled using the other version of ABI. Since both
|
||||
# gcc ABIs are still common in the wild, we need to support both ABIs. Currently:
|
||||
#
|
||||
# - All the nightlies built on CentOS 7 + devtoolset7 use the old gcc ABI.
|
||||
# - All the nightlies built on Ubuntu 16.04 + gcc 5.4 use the new gcc ABI.
|
||||
|
||||
echo "Checking that the gcc ABI is what we expect"
|
||||
if [[ "$(uname)" != 'Darwin' ]]; then
|
||||
function is_expected() {
|
||||
if [[ "$DESIRED_DEVTOOLSET" == *"cxx11-abi"* || "$DESIRED_CUDA" == *"rocm"* ]]; then
|
||||
if [[ "$1" -gt 0 || "$1" == "ON " ]]; then
|
||||
echo 1
|
||||
fi
|
||||
else
|
||||
if [[ -z "$1" || "$1" == 0 || "$1" == "OFF" ]]; then
|
||||
echo 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# First we check that the env var in TorchConfig.cmake is correct
|
||||
|
||||
# We search for D_GLIBCXX_USE_CXX11_ABI=1 in torch/TorchConfig.cmake
|
||||
torch_config="${install_root}/share/cmake/Torch/TorchConfig.cmake"
|
||||
if [[ ! -f "$torch_config" ]]; then
|
||||
echo "No TorchConfig.cmake found!"
|
||||
ls -lah "$install_root/share/cmake/Torch"
|
||||
exit 1
|
||||
fi
|
||||
echo "Checking the TorchConfig.cmake"
|
||||
cat "$torch_config"
|
||||
|
||||
# The sed call below is
|
||||
# don't print lines by default (only print the line we want)
|
||||
# -n
|
||||
# execute the following expression
|
||||
# e
|
||||
# replace lines that match with the first capture group and print
|
||||
# s/.*D_GLIBCXX_USE_CXX11_ABI=\(.\)".*/\1/p
|
||||
# any characters, D_GLIBCXX_USE_CXX11_ABI=, exactly one any character, a
|
||||
# quote, any characters
|
||||
# Note the exactly one single character after the '='. In the case that the
|
||||
# variable is not set the '=' will be followed by a '"' immediately and the
|
||||
# line will fail the match and nothing will be printed; this is what we
|
||||
# want. Otherwise it will capture the 0 or 1 after the '='.
|
||||
# /.*D_GLIBCXX_USE_CXX11_ABI=\(.\)".*/
|
||||
# replace the matched line with the capture group and print
|
||||
# /\1/p
|
||||
actual_gcc_abi="$(sed -ne 's/.*D_GLIBCXX_USE_CXX11_ABI=\(.\)".*/\1/p' < "$torch_config")"
|
||||
if [[ "$(is_expected "$actual_gcc_abi")" != 1 ]]; then
|
||||
echo "gcc ABI $actual_gcc_abi not as expected."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# We also check that there are [not] cxx11 symbols in libtorch
|
||||
#
|
||||
echo "Checking that symbols in libtorch.so have the right gcc abi"
|
||||
python3 "$(dirname ${BASH_SOURCE[0]})/smoke_test/check_binary_symbols.py"
|
||||
|
||||
echo "cxx11 symbols seem to be in order"
|
||||
fi # if on Darwin
|
||||
|
||||
###############################################################################
|
||||
# Check for no OpenBLAS
|
||||
# TODO Check for no Protobuf symbols (not finished)
|
||||
# Print *all* runtime dependencies
|
||||
###############################################################################
|
||||
# We have to loop through all shared libraries for this
|
||||
if [[ "$(uname)" == 'Darwin' ]]; then
|
||||
all_dylibs=($(find "$install_root" -name '*.dylib'))
|
||||
for dylib in "${all_dylibs[@]}"; do
|
||||
echo "All dependencies of $dylib are $(otool -L $dylib) with rpath $(otool -l $dylib | grep LC_RPATH -A2)"
|
||||
|
||||
# Check that OpenBlas is not linked to on Macs
|
||||
echo "Checking the OpenBLAS is not linked to"
|
||||
if [[ -n "$(otool -L $dylib | grep -i openblas)" ]]; then
|
||||
echo "ERROR: Found openblas as a dependency of $dylib"
|
||||
echo "Full dependencies is: $(otool -L $dylib)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check for protobuf symbols
|
||||
#proto_symbols="$(nm $dylib | grep protobuf)" || true
|
||||
#if [[ -n "$proto_symbols" ]]; then
|
||||
# echo "ERROR: Detected protobuf symbols in $dylib"
|
||||
# echo "Symbols are $proto_symbols"
|
||||
# exit 1
|
||||
#fi
|
||||
done
|
||||
else
|
||||
all_libs=($(find "$install_root" -name '*.so'))
|
||||
for lib in "${all_libs[@]}"; do
|
||||
echo "All dependencies of $lib are $(ldd $lib) with runpath $(objdump -p $lib | grep RUNPATH)"
|
||||
|
||||
# Check for protobuf symbols
|
||||
#proto_symbols=$(nm $lib | grep protobuf) || true
|
||||
#if [[ -n "$proto_symbols" ]]; then
|
||||
# echo "ERROR: Detected protobuf symbols in $lib"
|
||||
# echo "Symbols are $proto_symbols"
|
||||
# exit 1
|
||||
#fi
|
||||
done
|
||||
fi
|
||||
|
||||
setup_link_flags () {
|
||||
REF_LIB="-Wl,-R${install_root}/lib"
|
||||
if [[ "$(uname)" == 'Darwin' ]]; then
|
||||
REF_LIB="-Wl,-rpath ${install_root}/lib"
|
||||
fi
|
||||
ADDITIONAL_LINKER_FLAGS=""
|
||||
if [[ "$(uname)" == 'Linux' ]]; then
|
||||
ADDITIONAL_LINKER_FLAGS="-Wl,--no-as-needed"
|
||||
fi
|
||||
C10_LINK_FLAGS=""
|
||||
if [ -f "${install_root}/lib/libc10.so" ] || [ -f "${install_root}/lib/libc10.dylib" ]; then
|
||||
C10_LINK_FLAGS="-lc10"
|
||||
fi
|
||||
TORCH_CPU_LINK_FLAGS=""
|
||||
if [ -f "${install_root}/lib/libtorch_cpu.so" ] || [ -f "${install_root}/lib/libtorch_cpu.dylib" ]; then
|
||||
TORCH_CPU_LINK_FLAGS="-ltorch_cpu"
|
||||
fi
|
||||
TORCH_CUDA_LINK_FLAGS=""
|
||||
if [ -f "${install_root}/lib/libtorch_cuda.so" ] || [ -f "${install_root}/lib/libtorch_cuda.dylib" ]; then
|
||||
TORCH_CUDA_LINK_FLAGS="-ltorch_cuda"
|
||||
elif [ -f "${install_root}/lib/libtorch_cuda_cpp.so" ] && [ -f "${install_root}/lib/libtorch_cuda_cpp.so" ] || \
|
||||
[ -f "${install_root}/lib/libtorch_cuda_cu.dylib" ] && [ -f "${install_root}/lib/libtorch_cuda_cu.dylib" ]; then
|
||||
TORCH_CUDA_LINK_FLAGS="-ltorch_cuda_cpp -ltorch_cuda_cu"
|
||||
fi
|
||||
}
|
||||
|
||||
TEST_CODE_DIR="$(dirname $(realpath ${BASH_SOURCE[0]}))/test_example_code"
|
||||
build_and_run_example_cpp () {
|
||||
if [[ "$DESIRED_DEVTOOLSET" == *"cxx11-abi"* ]]; then
|
||||
GLIBCXX_USE_CXX11_ABI=1
|
||||
else
|
||||
GLIBCXX_USE_CXX11_ABI=0
|
||||
fi
|
||||
setup_link_flags
|
||||
g++ ${TEST_CODE_DIR}/$1.cpp -I${install_root}/include -I${install_root}/include/torch/csrc/api/include -D_GLIBCXX_USE_CXX11_ABI=$GLIBCXX_USE_CXX11_ABI -std=gnu++17 -L${install_root}/lib ${REF_LIB} ${ADDITIONAL_LINKER_FLAGS} -ltorch $TORCH_CPU_LINK_FLAGS $TORCH_CUDA_LINK_FLAGS $C10_LINK_FLAGS -o $1
|
||||
./$1
|
||||
}
|
||||
|
||||
build_example_cpp_with_incorrect_abi () {
|
||||
if [[ "$DESIRED_DEVTOOLSET" == *"cxx11-abi"* ]]; then
|
||||
GLIBCXX_USE_CXX11_ABI=0
|
||||
else
|
||||
GLIBCXX_USE_CXX11_ABI=1
|
||||
fi
|
||||
set +e
|
||||
setup_link_flags
|
||||
g++ ${TEST_CODE_DIR}/$1.cpp -I${install_root}/include -I${install_root}/include/torch/csrc/api/include -D_GLIBCXX_USE_CXX11_ABI=$GLIBCXX_USE_CXX11_ABI -std=gnu++17 -L${install_root}/lib ${REF_LIB} ${ADDITIONAL_LINKER_FLAGS} -ltorch $TORCH_CPU_LINK_FLAGS $TORCH_CUDA_LINK_FLAGS $C10_LINK_FLAGS -o $1
|
||||
ERRCODE=$?
|
||||
set -e
|
||||
if [ "$ERRCODE" -eq "0" ]; then
|
||||
echo "Building example with incorrect ABI didn't throw error. Aborting."
|
||||
exit 1
|
||||
else
|
||||
echo "Building example with incorrect ABI throws expected error. Proceeding."
|
||||
fi
|
||||
}
|
||||
|
||||
###############################################################################
|
||||
# Check simple Python/C++ calls
|
||||
###############################################################################
|
||||
if [[ "$PACKAGE_TYPE" == 'libtorch' ]]; then
|
||||
# NS: Set LD_LIBRARY_PATH for CUDA builds, but perhaps it should be removed
|
||||
if [[ "$DESIRED_CUDA" == "cu"* ]]; then
|
||||
export LD_LIBRARY_PATH=/usr/local/cuda/lib64
|
||||
fi
|
||||
build_and_run_example_cpp simple-torch-test
|
||||
# `_GLIBCXX_USE_CXX11_ABI` is always ignored by gcc in devtoolset7, so we test
|
||||
# the expected failure case for Ubuntu 16.04 + gcc 5.4 only.
|
||||
if [[ "$DESIRED_DEVTOOLSET" == *"cxx11-abi"* ]]; then
|
||||
build_example_cpp_with_incorrect_abi simple-torch-test
|
||||
fi
|
||||
else
|
||||
pushd /tmp
|
||||
python -c 'import torch'
|
||||
popd
|
||||
fi
|
||||
|
||||
###############################################################################
|
||||
# Check torch.git_version
|
||||
###############################################################################
|
||||
if [[ "$PACKAGE_TYPE" != 'libtorch' ]]; then
|
||||
pushd /tmp
|
||||
python -c 'import torch; assert torch.version.git_version != "Unknown"'
|
||||
python -c 'import torch; assert torch.version.git_version != None'
|
||||
popd
|
||||
fi
|
||||
|
||||
|
||||
###############################################################################
|
||||
# Check for MKL
|
||||
###############################################################################
|
||||
|
||||
if [[ "$PACKAGE_TYPE" == 'libtorch' ]]; then
|
||||
echo "Checking that MKL is available"
|
||||
build_and_run_example_cpp check-torch-mkl
|
||||
elif [[ "$(uname -m)" != "arm64" && "$(uname -m)" != "s390x" ]]; then
|
||||
if [[ "$(uname)" != 'Darwin' || "$PACKAGE_TYPE" != *wheel ]]; then
|
||||
if [[ "$(uname -m)" == "aarch64" ]]; then
|
||||
echo "Checking that MKLDNN is available on aarch64"
|
||||
pushd /tmp
|
||||
python -c 'import torch; exit(0 if torch.backends.mkldnn.is_available() else 1)'
|
||||
popd
|
||||
else
|
||||
echo "Checking that MKL is available"
|
||||
pushd /tmp
|
||||
python -c 'import torch; exit(0 if torch.backends.mkl.is_available() else 1)'
|
||||
popd
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
###############################################################################
|
||||
# Check for XNNPACK
|
||||
###############################################################################
|
||||
|
||||
if [[ "$PACKAGE_TYPE" == 'libtorch' ]]; then
|
||||
echo "Checking that XNNPACK is available"
|
||||
build_and_run_example_cpp check-torch-xnnpack
|
||||
else
|
||||
if [[ "$(uname)" != 'Darwin' || "$PACKAGE_TYPE" != *wheel ]] && [[ "$(uname -m)" != "s390x" ]]; then
|
||||
echo "Checking that XNNPACK is available"
|
||||
pushd /tmp
|
||||
python -c 'import torch.backends.xnnpack; exit(0 if torch.backends.xnnpack.enabled else 1)'
|
||||
popd
|
||||
fi
|
||||
fi
|
||||
|
||||
###############################################################################
|
||||
# Check CUDA configured correctly
|
||||
###############################################################################
|
||||
# Skip these for Windows machines without GPUs
|
||||
if [[ "$OSTYPE" == "msys" ]]; then
|
||||
GPUS=$(wmic path win32_VideoController get name)
|
||||
if [[ ! "$GPUS" == *NVIDIA* ]]; then
|
||||
echo "Skip CUDA tests for machines without a Nvidia GPU card"
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
# Test that CUDA builds are setup correctly
|
||||
if [[ "$DESIRED_CUDA" != 'cpu' && "$DESIRED_CUDA" != 'xpu' && "$DESIRED_CUDA" != 'cpu-cxx11-abi' && "$DESIRED_CUDA" != *"rocm"* && "$(uname -m)" != "s390x" ]]; then
|
||||
if [[ "$PACKAGE_TYPE" == 'libtorch' ]]; then
|
||||
build_and_run_example_cpp check-torch-cuda
|
||||
else
|
||||
pushd /tmp
|
||||
echo "Checking that CUDA archs are setup correctly"
|
||||
timeout 20 python -c 'import torch; torch.randn([3,5]).cuda()'
|
||||
|
||||
# These have to run after CUDA is initialized
|
||||
|
||||
echo "Checking that magma is available"
|
||||
python -c 'import torch; torch.rand(1).cuda(); exit(0 if torch.cuda.has_magma else 1)'
|
||||
|
||||
echo "Checking that CuDNN is available"
|
||||
python -c 'import torch; exit(0 if torch.backends.cudnn.is_available() else 1)'
|
||||
|
||||
# Validates builds is free of linker regressions reported in https://github.com/pytorch/pytorch/issues/57744
|
||||
echo "Checking that exception handling works"
|
||||
python -c "import torch; from unittest import TestCase;TestCase().assertRaises(RuntimeError, lambda:torch.eye(7, 7, device='cuda:7'))"
|
||||
|
||||
echo "Checking that basic RNN works"
|
||||
python ${TEST_CODE_DIR}/rnn_smoke.py
|
||||
|
||||
echo "Checking that basic CNN works"
|
||||
python "${TEST_CODE_DIR}/cnn_smoke.py"
|
||||
|
||||
echo "Test that linalg works"
|
||||
python -c "import torch;x=torch.rand(3,3,device='cuda');print(torch.linalg.svd(torch.mm(x.t(), x)))"
|
||||
|
||||
popd
|
||||
fi # if libtorch
|
||||
fi # if cuda
|
||||
|
||||
##########################
|
||||
# Run parts of smoke tests
|
||||
##########################
|
||||
if [[ "$PACKAGE_TYPE" != 'libtorch' ]]; then
|
||||
pushd "$(dirname ${BASH_SOURCE[0]})/smoke_test"
|
||||
python -c "from smoke_test import test_linalg; test_linalg()"
|
||||
if [[ "$DESIRED_CUDA" == *cuda* ]]; then
|
||||
python -c "from smoke_test import test_linalg; test_linalg('cuda')"
|
||||
fi
|
||||
popd
|
||||
fi
|
||||
|
||||
###############################################################################
|
||||
# Check PyTorch supports TCP_TLS gloo transport
|
||||
###############################################################################
|
||||
|
||||
if [[ "$(uname)" == 'Linux' && "$PACKAGE_TYPE" != 'libtorch' ]]; then
|
||||
GLOO_CHECK="import torch.distributed as dist
|
||||
try:
|
||||
dist.init_process_group('gloo', rank=0, world_size=1)
|
||||
except RuntimeError as e:
|
||||
print(e)
|
||||
"
|
||||
RESULT=`GLOO_DEVICE_TRANSPORT=TCP_TLS MASTER_ADDR=localhost MASTER_PORT=63945 python -c "$GLOO_CHECK"`
|
||||
GLOO_TRANSPORT_IS_NOT_SUPPORTED='gloo transport is not supported'
|
||||
if [[ "$RESULT" =~ "$GLOO_TRANSPORT_IS_NOT_SUPPORTED" ]]; then
|
||||
echo "PyTorch doesn't support TLS_TCP transport, please build with USE_GLOO_WITH_OPENSSL=1"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
###############################################################################
|
||||
# Check for C++ ABI compatibility between gcc7 and gcc9 compiled binaries
|
||||
###############################################################################
|
||||
if [[ "$(uname)" == 'Linux' && ("$PACKAGE_TYPE" == 'conda' || "$PACKAGE_TYPE" == 'manywheel')]]; then
|
||||
pushd /tmp
|
||||
python -c "import torch; exit(0 if torch.compiled_with_cxx11_abi() else (0 if torch._C._PYBIND11_BUILD_ABI == '_cxxabi1011' else 1))"
|
||||
popd
|
||||
fi
|
436
.ci/pytorch/run_tests.sh
Executable file
436
.ci/pytorch/run_tests.sh
Executable file
@ -0,0 +1,436 @@
|
||||
#!/bin/bash
|
||||
# shellcheck disable=SC2086,SC2048,SC2068,SC2145,SC2034,SC2207,SC2143
|
||||
# TODO: Re-enable shellchecks above
|
||||
|
||||
set -eux -o pipefail
|
||||
|
||||
# Essentially runs pytorch/test/run_test.py, but keeps track of which tests to
|
||||
# skip in a centralized place.
|
||||
#
|
||||
# TODO Except for a few tests, this entire file is a giant TODO. Why are these
|
||||
# tests # failing?
|
||||
# TODO deal with Windows
|
||||
|
||||
# This script expects to be in the pytorch root folder
|
||||
if [[ ! -d 'test' || ! -f 'test/run_test.py' ]]; then
|
||||
echo "builder/test.sh expects to be run from the Pytorch root directory " \
|
||||
"but I'm actually in $(pwd)"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
# Allow master skip of all tests
|
||||
if [[ -n "${SKIP_ALL_TESTS:-}" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# If given specific test params then just run those
|
||||
if [[ -n "${RUN_TEST_PARAMS:-}" ]]; then
|
||||
echo "$(date) :: Calling user-command $(pwd)/test/run_test.py ${RUN_TEST_PARAMS[@]}"
|
||||
python test/run_test.py ${RUN_TEST_PARAMS[@]}
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Function to retry functions that sometimes timeout or have flaky failures
|
||||
retry () {
|
||||
$* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*)
|
||||
}
|
||||
|
||||
# Parameters
|
||||
##############################################################################
|
||||
if [[ "$#" != 3 ]]; then
|
||||
if [[ -z "${DESIRED_PYTHON:-}" || -z "${DESIRED_CUDA:-}" || -z "${PACKAGE_TYPE:-}" ]]; then
|
||||
echo "USAGE: run_tests.sh PACKAGE_TYPE DESIRED_PYTHON DESIRED_CUDA"
|
||||
echo "The env variable PACKAGE_TYPE must be set to 'conda' or 'manywheel' or 'libtorch'"
|
||||
echo "The env variable DESIRED_PYTHON must be set like '2.7mu' or '3.6m' etc"
|
||||
echo "The env variable DESIRED_CUDA must be set like 'cpu' or 'cu80' etc"
|
||||
exit 1
|
||||
fi
|
||||
package_type="$PACKAGE_TYPE"
|
||||
py_ver="$DESIRED_PYTHON"
|
||||
cuda_ver="$DESIRED_CUDA"
|
||||
else
|
||||
package_type="$1"
|
||||
py_ver="$2"
|
||||
cuda_ver="$3"
|
||||
fi
|
||||
|
||||
if [[ "$cuda_ver" == 'cpu-cxx11-abi' ]]; then
|
||||
cuda_ver="cpu"
|
||||
fi
|
||||
|
||||
# cu80, cu90, cu100, cpu
|
||||
if [[ ${#cuda_ver} -eq 4 ]]; then
|
||||
cuda_ver_majmin="${cuda_ver:2:1}.${cuda_ver:3:1}"
|
||||
elif [[ ${#cuda_ver} -eq 5 ]]; then
|
||||
cuda_ver_majmin="${cuda_ver:2:2}.${cuda_ver:4:1}"
|
||||
fi
|
||||
|
||||
NUMPY_PACKAGE=""
|
||||
if [[ ${py_ver} == "3.10" ]]; then
|
||||
PROTOBUF_PACKAGE="protobuf>=3.17.2"
|
||||
NUMPY_PACKAGE="numpy>=1.21.2"
|
||||
else
|
||||
PROTOBUF_PACKAGE="protobuf=3.14.0"
|
||||
fi
|
||||
|
||||
# Environment initialization
|
||||
if [[ "$(uname)" == Darwin ]]; then
|
||||
# Install the testing dependencies
|
||||
retry conda install -yq future hypothesis ${NUMPY_PACKAGE} ${PROTOBUF_PACKAGE} pytest setuptools six typing_extensions pyyaml
|
||||
else
|
||||
retry pip install -qr requirements.txt || true
|
||||
retry pip install -q hypothesis protobuf pytest setuptools || true
|
||||
numpy_ver=1.15
|
||||
case "$(python --version 2>&1)" in
|
||||
*2* | *3.5* | *3.6*)
|
||||
numpy_ver=1.11
|
||||
;;
|
||||
esac
|
||||
retry pip install -q "numpy==${numpy_ver}" || true
|
||||
fi
|
||||
|
||||
echo "Testing with:"
|
||||
pip freeze
|
||||
conda list || true
|
||||
|
||||
##############################################################################
|
||||
# Smoke tests
|
||||
##############################################################################
|
||||
# TODO use check_binary.sh, which requires making sure it runs on Windows
|
||||
pushd /
|
||||
echo "Smoke testing imports"
|
||||
python -c 'import torch'
|
||||
|
||||
# Test that MKL is there
|
||||
if [[ "$(uname)" == 'Darwin' && "$package_type" == *wheel ]]; then
|
||||
echo 'Not checking for MKL on Darwin wheel packages'
|
||||
else
|
||||
echo "Checking that MKL is available"
|
||||
python -c 'import torch; exit(0 if torch.backends.mkl.is_available() else 1)'
|
||||
fi
|
||||
|
||||
if [[ "$OSTYPE" == "msys" ]]; then
|
||||
GPUS=$(wmic path win32_VideoController get name)
|
||||
if [[ ! "$GPUS" == *NVIDIA* ]]; then
|
||||
echo "Skip CUDA tests for machines without a Nvidia GPU card"
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
# Test that the version number is consistent during building and testing
|
||||
if [[ "$PYTORCH_BUILD_NUMBER" -gt 1 ]]; then
|
||||
expected_version="${PYTORCH_BUILD_VERSION}.post${PYTORCH_BUILD_NUMBER}"
|
||||
else
|
||||
expected_version="${PYTORCH_BUILD_VERSION}"
|
||||
fi
|
||||
echo "Checking that we are testing the package that is just built"
|
||||
python -c "import torch; exit(0 if torch.__version__ == '$expected_version' else 1)"
|
||||
|
||||
# Test that CUDA builds are setup correctly
|
||||
if [[ "$cuda_ver" != 'cpu' ]]; then
|
||||
cuda_installed=1
|
||||
nvidia-smi || cuda_installed=0
|
||||
if [[ "$cuda_installed" == 0 ]]; then
|
||||
echo "Skip CUDA tests for machines without a Nvidia GPU card"
|
||||
else
|
||||
# Test CUDA archs
|
||||
echo "Checking that CUDA archs are setup correctly"
|
||||
timeout 20 python -c 'import torch; torch.randn([3,5]).cuda()'
|
||||
|
||||
# These have to run after CUDA is initialized
|
||||
echo "Checking that magma is available"
|
||||
python -c 'import torch; torch.rand(1).cuda(); exit(0 if torch.cuda.has_magma else 1)'
|
||||
echo "Checking that CuDNN is available"
|
||||
python -c 'import torch; exit(0 if torch.backends.cudnn.is_available() else 1)'
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check that OpenBlas is not linked to on MacOS
|
||||
if [[ "$(uname)" == 'Darwin' ]]; then
|
||||
echo "Checking the OpenBLAS is not linked to"
|
||||
all_dylibs=($(find "$(python -c "import site; print(site.getsitepackages()[0])")"/torch -name '*.dylib'))
|
||||
for dylib in "${all_dylibs[@]}"; do
|
||||
if [[ -n "$(otool -L $dylib | grep -i openblas)" ]]; then
|
||||
echo "Found openblas as a dependency of $dylib"
|
||||
echo "Full dependencies is: $(otool -L $dylib)"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Checking that OpenMP is available"
|
||||
python -c "import torch; exit(0 if torch.backends.openmp.is_available() else 1)"
|
||||
fi
|
||||
|
||||
popd
|
||||
|
||||
# TODO re-enable the other tests after the nightlies are moved to CI. This is
|
||||
# because the binaries keep breaking, often from additional tests, that aren't
|
||||
# real problems. Once these are on circleci and a smoke-binary-build is added
|
||||
# to PRs then this should stop happening and these can be re-enabled.
|
||||
echo "Not running unit tests. Hopefully these problems are caught by CI"
|
||||
exit 0
|
||||
|
||||
|
||||
##############################################################################
|
||||
# Running unit tests (except not right now)
|
||||
##############################################################################
|
||||
echo "$(date) :: Starting tests for $package_type package for python$py_ver and $cuda_ver"
|
||||
|
||||
# We keep track of exact tests to skip, as otherwise we would be hardly running
|
||||
# any tests. But b/c of issues working with pytest/normal-python-test/ and b/c
|
||||
# of special snowflake tests in test/run_test.py we also take special care of
|
||||
# those
|
||||
tests_to_skip=()
|
||||
|
||||
#
|
||||
# Entire file exclusions
|
||||
##############################################################################
|
||||
entire_file_exclusions=("-x")
|
||||
|
||||
# cpp_extensions doesn't work with pytest, so we exclude it from the pytest run
|
||||
# here and then manually run it later. Note that this is only because this
|
||||
# entire_fil_exclusions flag is only passed to the pytest run
|
||||
entire_file_exclusions+=("cpp_extensions")
|
||||
|
||||
# TODO temporary line to fix next days nightlies, but should be removed when
|
||||
# issue is fixed
|
||||
entire_file_exclusions+=('type_info')
|
||||
|
||||
if [[ "$cuda_ver" == 'cpu' ]]; then
|
||||
# test/test_cuda.py exits early if the installed torch is not built with
|
||||
# CUDA, but the exit doesn't work when running with pytest, so pytest will
|
||||
# still try to run all the CUDA tests and then fail
|
||||
entire_file_exclusions+=("cuda")
|
||||
entire_file_exclusions+=("nccl")
|
||||
fi
|
||||
|
||||
if [[ "$(uname)" == 'Darwin' || "$OSTYPE" == "msys" ]]; then
|
||||
# pytest on Mac doesn't like the exits in these files
|
||||
entire_file_exclusions+=('c10d')
|
||||
entire_file_exclusions+=('distributed')
|
||||
|
||||
# pytest doesn't mind the exit but fails the tests. On Mac we run this
|
||||
# later without pytest
|
||||
entire_file_exclusions+=('thd_distributed')
|
||||
fi
|
||||
|
||||
|
||||
#
|
||||
# Universal flaky tests
|
||||
##############################################################################
|
||||
|
||||
# RendezvousEnvTest sometimes hangs forever
|
||||
# Otherwise it will fail on CUDA with
|
||||
# Traceback (most recent call last):
|
||||
# File "test_c10d.py", line 179, in test_common_errors
|
||||
# next(gen)
|
||||
# AssertionError: ValueError not raised
|
||||
tests_to_skip+=('RendezvousEnvTest and test_common_errors')
|
||||
|
||||
# This hung forever once on conda_3.5_cu92
|
||||
tests_to_skip+=('TestTorch and test_sum_dim')
|
||||
|
||||
# test_trace_warn isn't actually flaky, but it doesn't work with pytest so we
|
||||
# just skip it
|
||||
tests_to_skip+=('TestJit and test_trace_warn')
|
||||
#
|
||||
# Python specific flaky tests
|
||||
##############################################################################
|
||||
|
||||
# test_dataloader.py:721: AssertionError
|
||||
# looks like a timeout, but interestingly only appears on python 3
|
||||
if [[ "$py_ver" == 3* ]]; then
|
||||
tests_to_skip+=('TestDataLoader and test_proper_exit')
|
||||
fi
|
||||
|
||||
#
|
||||
# CUDA flaky tests, all package types
|
||||
##############################################################################
|
||||
if [[ "$cuda_ver" != 'cpu' ]]; then
|
||||
|
||||
#
|
||||
# DistributedDataParallelTest
|
||||
# All of these seem to fail
|
||||
tests_to_skip+=('DistributedDataParallelTest')
|
||||
|
||||
#
|
||||
# RendezvousEnvTest
|
||||
# Traceback (most recent call last):
|
||||
# File "test_c10d.py", line 201, in test_nominal
|
||||
# store0, rank0, size0 = next(gen0)
|
||||
# File "/opt/python/cp36-cp36m/lib/python3.6/site-packages/torch/distributed/rendezvous.py", line 131, in _env_rendezvous_handler
|
||||
# store = TCPStore(master_addr, master_port, start_daemon)
|
||||
# RuntimeError: Address already in use
|
||||
tests_to_skip+=('RendezvousEnvTest and test_nominal')
|
||||
|
||||
#
|
||||
# TestCppExtension
|
||||
#
|
||||
# Traceback (most recent call last):
|
||||
# File "test_cpp_extensions.py", line 134, in test_jit_cudnn_extension
|
||||
# with_cuda=True)
|
||||
# File "/opt/python/cp35-cp35m/lib/python3.5/site-packages/torch/utils/cpp_extension.py", line 552, in load
|
||||
# with_cuda)
|
||||
# File "/opt/python/cp35-cp35m/lib/python3.5/site-packages/torch/utils/cpp_extension.py", line 729, in _jit_compile
|
||||
# return _import_module_from_library(name, build_directory)
|
||||
# File "/opt/python/cp35-cp35m/lib/python3.5/site-packages/torch/utils/cpp_extension.py", line 867, in _import_module_from_library
|
||||
# return imp.load_module(module_name, file, path, description)
|
||||
# File "/opt/python/cp35-cp35m/lib/python3.5/imp.py", line 243, in load_module
|
||||
# return load_dynamic(name, filename, file)
|
||||
# File "/opt/python/cp35-cp35m/lib/python3.5/imp.py", line 343, in load_dynamic
|
||||
# return _load(spec)
|
||||
# File "<frozen importlib._bootstrap>", line 693, in _load
|
||||
# File "<frozen importlib._bootstrap>", line 666, in _load_unlocked
|
||||
# File "<frozen importlib._bootstrap>", line 577, in module_from_spec
|
||||
# File "<frozen importlib._bootstrap_external>", line 938, in create_module
|
||||
# File "<frozen importlib._bootstrap>", line 222, in _call_with_frames_removed
|
||||
# ImportError: libcudnn.so.7: cannot open shared object file: No such file or directory
|
||||
tests_to_skip+=('TestCppExtension and test_jit_cudnn_extension')
|
||||
|
||||
#
|
||||
# TestCuda
|
||||
#
|
||||
|
||||
# 3.7_cu80
|
||||
# RuntimeError: CUDA error: out of memory
|
||||
tests_to_skip+=('TestCuda and test_arithmetic_large_tensor')
|
||||
|
||||
# 3.7_cu80
|
||||
# RuntimeError: cuda runtime error (2) : out of memory at /opt/conda/conda-bld/pytorch-nightly_1538097262541/work/aten/src/THC/THCTensorCopy.cu:205
|
||||
tests_to_skip+=('TestCuda and test_autogpu')
|
||||
|
||||
#
|
||||
# TestDistBackend
|
||||
#
|
||||
|
||||
# Traceback (most recent call last):
|
||||
# File "test_thd_distributed.py", line 1046, in wrapper
|
||||
# self._join_and_reduce(fn)
|
||||
# File "test_thd_distributed.py", line 1108, in _join_and_reduce
|
||||
# self.assertEqual(p.exitcode, first_process.exitcode)
|
||||
# File "/pytorch/test/common.py", line 399, in assertEqual
|
||||
# super(TestCase, self).assertEqual(x, y, message)
|
||||
# AssertionError: None != 77 :
|
||||
tests_to_skip+=('TestDistBackend and test_all_gather_group')
|
||||
tests_to_skip+=('TestDistBackend and test_all_reduce_group_max')
|
||||
tests_to_skip+=('TestDistBackend and test_all_reduce_group_min')
|
||||
tests_to_skip+=('TestDistBackend and test_all_reduce_group_sum')
|
||||
tests_to_skip+=('TestDistBackend and test_all_reduce_group_product')
|
||||
tests_to_skip+=('TestDistBackend and test_barrier_group')
|
||||
tests_to_skip+=('TestDistBackend and test_broadcast_group')
|
||||
|
||||
# Traceback (most recent call last):
|
||||
# File "test_thd_distributed.py", line 1046, in wrapper
|
||||
# self._join_and_reduce(fn)
|
||||
# File "test_thd_distributed.py", line 1108, in _join_and_reduce
|
||||
# self.assertEqual(p.exitcode, first_process.exitcode)
|
||||
# File "/pytorch/test/common.py", line 397, in assertEqual
|
||||
# super(TestCase, self).assertLessEqual(abs(x - y), prec, message)
|
||||
# AssertionError: 12 not less than or equal to 1e-05
|
||||
tests_to_skip+=('TestDistBackend and test_barrier')
|
||||
|
||||
# Traceback (most recent call last):
|
||||
# File "test_distributed.py", line 1267, in wrapper
|
||||
# self._join_and_reduce(fn)
|
||||
# File "test_distributed.py", line 1350, in _join_and_reduce
|
||||
# self.assertEqual(p.exitcode, first_process.exitcode)
|
||||
# File "/pytorch/test/common.py", line 399, in assertEqual
|
||||
# super(TestCase, self).assertEqual(x, y, message)
|
||||
# AssertionError: None != 1
|
||||
tests_to_skip+=('TestDistBackend and test_broadcast')
|
||||
|
||||
# Memory leak very similar to all the conda ones below, but appears on manywheel
|
||||
# 3.6m_cu80
|
||||
# AssertionError: 1605632 not less than or equal to 1e-05 : __main__.TestEndToEndHybridFrontendModels.test_vae_cuda leaked 1605632 bytes CUDA memory on device 0
|
||||
tests_to_skip+=('TestEndToEndHybridFrontendModels and test_vae_cuda')
|
||||
|
||||
# ________________________ TestNN.test_embedding_bag_cuda ________________________
|
||||
#
|
||||
# self = <test_nn.TestNN testMethod=test_embedding_bag_cuda>
|
||||
# dtype = torch.float32
|
||||
#
|
||||
# @unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
|
||||
# @repeat_test_for_types(ALL_TENSORTYPES)
|
||||
# @skipIfRocm
|
||||
# def test_embedding_bag_cuda(self, dtype=torch.float):
|
||||
# self._test_EmbeddingBag(True, 'sum', False, dtype)
|
||||
# self._test_EmbeddingBag(True, 'mean', False, dtype)
|
||||
# self._test_EmbeddingBag(True, 'max', False, dtype)
|
||||
# if dtype != torch.half:
|
||||
# # torch.cuda.sparse.HalfTensor is not enabled.
|
||||
# self._test_EmbeddingBag(True, 'sum', True, dtype)
|
||||
# > self._test_EmbeddingBag(True, 'mean', True, dtype)
|
||||
#
|
||||
# test_nn.py:2144:
|
||||
# _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
||||
# test_nn.py:2062: in _test_EmbeddingBag
|
||||
# _test_vs_Embedding(N, D, B, L)
|
||||
# test_nn.py:2059: in _test_vs_Embedding
|
||||
# self.assertEqual(es_weight_grad, e.weight.grad, needed_prec)
|
||||
# common.py:373: in assertEqual
|
||||
# assertTensorsEqual(x, y)
|
||||
# common.py:365: in assertTensorsEqual
|
||||
# self.assertLessEqual(max_err, prec, message)
|
||||
# E AssertionError: tensor(0.0000, device='cuda:0', dtype=torch.float32) not less than or equal to 2e-05 :
|
||||
# 1 failed, 1202 passed, 19 skipped, 2 xfailed, 796 warnings in 1166.73 seconds =
|
||||
# Traceback (most recent call last):
|
||||
# File "test/run_test.py", line 391, in <module>
|
||||
# main()
|
||||
# File "test/run_test.py", line 383, in main
|
||||
# raise RuntimeError(message)
|
||||
tests_to_skip+=('TestNN and test_embedding_bag_cuda')
|
||||
fi
|
||||
|
||||
##############################################################################
|
||||
# MacOS specific flaky tests
|
||||
##############################################################################
|
||||
|
||||
if [[ "$(uname)" == 'Darwin' ]]; then
|
||||
# TestCppExtensions by default uses a temp folder in /tmp. This doesn't
|
||||
# work for this Mac machine cause there is only one machine and /tmp is
|
||||
# shared. (All the linux builds are on docker so have their own /tmp).
|
||||
tests_to_skip+=('TestCppExtension')
|
||||
fi
|
||||
|
||||
# Turn the set of tests to skip into an invocation that pytest understands
|
||||
excluded_tests_logic=''
|
||||
for exclusion in "${tests_to_skip[@]}"; do
|
||||
if [[ -z "$excluded_tests_logic" ]]; then
|
||||
# Only true for i==0
|
||||
excluded_tests_logic="not ($exclusion)"
|
||||
else
|
||||
excluded_tests_logic="$excluded_tests_logic and not ($exclusion)"
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
##############################################################################
|
||||
# Run the tests
|
||||
##############################################################################
|
||||
echo
|
||||
echo "$(date) :: Calling 'python test/run_test.py -v -p pytest ${entire_file_exclusions[@]} -- --disable-pytest-warnings -k '$excluded_tests_logic'"
|
||||
|
||||
python test/run_test.py -v -p pytest ${entire_file_exclusions[@]} -- --disable-pytest-warnings -k "'" "$excluded_tests_logic" "'"
|
||||
|
||||
echo
|
||||
echo "$(date) :: Finished 'python test/run_test.py -v -p pytest ${entire_file_exclusions[@]} -- --disable-pytest-warnings -k '$excluded_tests_logic'"
|
||||
|
||||
# cpp_extensions don't work with pytest, so we run them without pytest here,
|
||||
# except there's a failure on CUDA builds (documented above), and
|
||||
# cpp_extensions doesn't work on a shared mac machine (also documented above)
|
||||
if [[ "$cuda_ver" == 'cpu' && "$(uname)" != 'Darwin' ]]; then
|
||||
echo
|
||||
echo "$(date) :: Calling 'python test/run_test.py -v -i cpp_extensions'"
|
||||
python test/run_test.py -v -i cpp_extensions
|
||||
echo
|
||||
echo "$(date) :: Finished 'python test/run_test.py -v -i cpp_extensions'"
|
||||
fi
|
||||
|
||||
# thd_distributed can run on Mac but not in pytest
|
||||
if [[ "$(uname)" == 'Darwin' ]]; then
|
||||
echo
|
||||
echo "$(date) :: Calling 'python test/run_test.py -v -i thd_distributed'"
|
||||
python test/run_test.py -v -i thd_distributed
|
||||
echo
|
||||
echo "$(date) :: Finished 'python test/run_test.py -v -i thd_distributed'"
|
||||
fi
|
130
.ci/pytorch/smoke_test/check_binary_symbols.py
Executable file
130
.ci/pytorch/smoke_test/check_binary_symbols.py
Executable file
@ -0,0 +1,130 @@
|
||||
#!/usr/bin/env python3
|
||||
import concurrent.futures
|
||||
import distutils.sysconfig
|
||||
import functools
|
||||
import itertools
|
||||
import os
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Any, List, Tuple
|
||||
|
||||
|
||||
# We also check that there are [not] cxx11 symbols in libtorch
|
||||
#
|
||||
# To check whether it is using cxx11 ABI, check non-existence of symbol:
|
||||
PRE_CXX11_SYMBOLS = (
|
||||
"std::basic_string<",
|
||||
"std::list",
|
||||
)
|
||||
# To check whether it is using pre-cxx11 ABI, check non-existence of symbol:
|
||||
CXX11_SYMBOLS = (
|
||||
"std::__cxx11::basic_string",
|
||||
"std::__cxx11::list",
|
||||
)
|
||||
# NOTE: Checking the above symbols in all namespaces doesn't work, because
|
||||
# devtoolset7 always produces some cxx11 symbols even if we build with old ABI,
|
||||
# and CuDNN always has pre-cxx11 symbols even if we build with new ABI using gcc 5.4.
|
||||
# Instead, we *only* check the above symbols in the following namespaces:
|
||||
LIBTORCH_NAMESPACE_LIST = (
|
||||
"c10::",
|
||||
"at::",
|
||||
"caffe2::",
|
||||
"torch::",
|
||||
)
|
||||
|
||||
|
||||
def _apply_libtorch_symbols(symbols):
|
||||
return [
|
||||
re.compile(f"{x}.*{y}")
|
||||
for (x, y) in itertools.product(LIBTORCH_NAMESPACE_LIST, symbols)
|
||||
]
|
||||
|
||||
|
||||
LIBTORCH_CXX11_PATTERNS = _apply_libtorch_symbols(CXX11_SYMBOLS)
|
||||
|
||||
LIBTORCH_PRE_CXX11_PATTERNS = _apply_libtorch_symbols(PRE_CXX11_SYMBOLS)
|
||||
|
||||
|
||||
@functools.lru_cache(100)
|
||||
def get_symbols(lib: str) -> List[Tuple[str, str, str]]:
|
||||
from subprocess import check_output
|
||||
|
||||
lines = check_output(f'nm "{lib}"|c++filt', shell=True)
|
||||
return [x.split(" ", 2) for x in lines.decode("latin1").split("\n")[:-1]]
|
||||
|
||||
|
||||
def grep_symbols(lib: str, patterns: List[Any]) -> List[str]:
|
||||
def _grep_symbols(
|
||||
symbols: List[Tuple[str, str, str]], patterns: List[Any]
|
||||
) -> List[str]:
|
||||
rc = []
|
||||
for _s_addr, _s_type, s_name in symbols:
|
||||
for pattern in patterns:
|
||||
if pattern.match(s_name):
|
||||
rc.append(s_name)
|
||||
continue
|
||||
return rc
|
||||
|
||||
all_symbols = get_symbols(lib)
|
||||
num_workers = 32
|
||||
chunk_size = (len(all_symbols) + num_workers - 1) // num_workers
|
||||
|
||||
def _get_symbols_chunk(i):
|
||||
return all_symbols[i * chunk_size : (i + 1) * chunk_size]
|
||||
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=32) as executor:
|
||||
tasks = [
|
||||
executor.submit(_grep_symbols, _get_symbols_chunk(i), patterns)
|
||||
for i in range(num_workers)
|
||||
]
|
||||
return functools.reduce(list.__add__, (x.result() for x in tasks), [])
|
||||
|
||||
|
||||
def check_lib_symbols_for_abi_correctness(lib: str, pre_cxx11_abi: bool = True) -> None:
|
||||
print(f"lib: {lib}")
|
||||
cxx11_symbols = grep_symbols(lib, LIBTORCH_CXX11_PATTERNS)
|
||||
pre_cxx11_symbols = grep_symbols(lib, LIBTORCH_PRE_CXX11_PATTERNS)
|
||||
num_cxx11_symbols = len(cxx11_symbols)
|
||||
num_pre_cxx11_symbols = len(pre_cxx11_symbols)
|
||||
print(f"num_cxx11_symbols: {num_cxx11_symbols}")
|
||||
print(f"num_pre_cxx11_symbols: {num_pre_cxx11_symbols}")
|
||||
if pre_cxx11_abi:
|
||||
if num_cxx11_symbols > 0:
|
||||
raise RuntimeError(
|
||||
f"Found cxx11 symbols, but there shouldn't be any, see: {cxx11_symbols[:100]}"
|
||||
)
|
||||
if num_pre_cxx11_symbols < 1000:
|
||||
raise RuntimeError("Didn't find enough pre-cxx11 symbols.")
|
||||
# Check for no recursive iterators, regression test for https://github.com/pytorch/pytorch/issues/133437
|
||||
rec_iter_symbols = grep_symbols(
|
||||
lib, [re.compile("std::filesystem::recursive_directory_iterator.*")]
|
||||
)
|
||||
if len(rec_iter_symbols) > 0:
|
||||
raise RuntimeError(
|
||||
f"recursive_directory_iterator in used pre-CXX11 binaries, see; {rec_iter_symbols}"
|
||||
)
|
||||
else:
|
||||
if num_pre_cxx11_symbols > 0:
|
||||
raise RuntimeError(
|
||||
f"Found pre-cxx11 symbols, but there shouldn't be any, see: {pre_cxx11_symbols[:100]}"
|
||||
)
|
||||
if num_cxx11_symbols < 100:
|
||||
raise RuntimeError("Didn't find enought cxx11 symbols")
|
||||
|
||||
|
||||
def main() -> None:
|
||||
if "install_root" in os.environ:
|
||||
install_root = Path(os.getenv("install_root")) # noqa: SIM112
|
||||
else:
|
||||
if os.getenv("PACKAGE_TYPE") == "libtorch":
|
||||
install_root = Path(os.getcwd())
|
||||
else:
|
||||
install_root = Path(distutils.sysconfig.get_python_lib()) / "torch"
|
||||
|
||||
libtorch_cpu_path = install_root / "lib" / "libtorch_cpu.so"
|
||||
pre_cxx11_abi = "cxx11-abi" not in os.getenv("DESIRED_DEVTOOLSET", "")
|
||||
check_lib_symbols_for_abi_correctness(libtorch_cpu_path, pre_cxx11_abi)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
205
.ci/pytorch/smoke_test/max_autotune.py
Normal file
205
.ci/pytorch/smoke_test/max_autotune.py
Normal file
@ -0,0 +1,205 @@
|
||||
import argparse
|
||||
|
||||
from torchvision import datasets, transforms
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
import torch.optim as optim
|
||||
from torch.optim.lr_scheduler import StepLR
|
||||
|
||||
|
||||
class Net(nn.Module):
|
||||
def __init__(self):
|
||||
super(Net, self).__init__() # noqa: UP008
|
||||
self.conv1 = nn.Conv2d(1, 32, 3, 1)
|
||||
self.conv2 = nn.Conv2d(32, 64, 3, 1)
|
||||
self.dropout1 = nn.Dropout(0.25)
|
||||
self.dropout2 = nn.Dropout(0.5)
|
||||
self.fc1 = nn.Linear(9216, 128)
|
||||
self.fc2 = nn.Linear(128, 10)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.conv1(x)
|
||||
x = F.relu(x)
|
||||
x = self.conv2(x)
|
||||
x = F.relu(x)
|
||||
x = F.max_pool2d(x, 2)
|
||||
x = self.dropout1(x)
|
||||
x = torch.flatten(x, 1)
|
||||
x = self.fc1(x)
|
||||
x = F.relu(x)
|
||||
x = self.dropout2(x)
|
||||
x = self.fc2(x)
|
||||
output = F.log_softmax(x, dim=1)
|
||||
return output
|
||||
|
||||
|
||||
def train(args, model, device, train_loader, optimizer, epoch):
|
||||
model.train()
|
||||
for batch_idx, (data, target) in enumerate(train_loader):
|
||||
data, target = data.to(device), target.to(device)
|
||||
optimizer.zero_grad()
|
||||
output = model(data)
|
||||
loss = F.nll_loss(output, target)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
if batch_idx % args.log_interval == 0:
|
||||
print(
|
||||
f"Train Epoch: {epoch} [{batch_idx * len(data)}/{len(train_loader.dataset)} ({100. * batch_idx / len(train_loader):.0f}%)]\tLoss: {loss.item():.6f}" # noqa: B950
|
||||
)
|
||||
if args.dry_run:
|
||||
break
|
||||
|
||||
|
||||
def test(model, device, test_loader):
|
||||
model.eval()
|
||||
test_loss = 0
|
||||
correct = 0
|
||||
with torch.no_grad():
|
||||
for data, target in test_loader:
|
||||
data, target = data.to(device), target.to(device)
|
||||
output = model(data)
|
||||
test_loss += F.nll_loss(
|
||||
output, target, reduction="sum"
|
||||
).item() # sum up batch loss
|
||||
pred = output.argmax(
|
||||
dim=1, keepdim=True
|
||||
) # get the index of the max log-probability
|
||||
correct += pred.eq(target.view_as(pred)).sum().item()
|
||||
|
||||
test_loss /= len(test_loader.dataset)
|
||||
|
||||
print(
|
||||
f"\nTest set: Average loss: {test_loss:.4f}, Accuracy: {correct}/{len(test_loader.dataset)} ({100. * correct / len(test_loader.dataset):.0f}%)\n" # noqa: B950
|
||||
)
|
||||
|
||||
|
||||
def timed(fn):
|
||||
start = torch.cuda.Event(enable_timing=True)
|
||||
end = torch.cuda.Event(enable_timing=True)
|
||||
start.record()
|
||||
result = fn()
|
||||
end.record()
|
||||
torch.cuda.synchronize()
|
||||
return result, start.elapsed_time(end) / 1000
|
||||
|
||||
|
||||
def main():
|
||||
# Training settings
|
||||
parser = argparse.ArgumentParser(description="PyTorch MNIST Example")
|
||||
parser.add_argument(
|
||||
"--batch-size",
|
||||
type=int,
|
||||
default=64,
|
||||
metavar="N",
|
||||
help="input batch size for training (default: 64)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--test-batch-size",
|
||||
type=int,
|
||||
default=1000,
|
||||
metavar="N",
|
||||
help="input batch size for testing (default: 1000)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--epochs",
|
||||
type=int,
|
||||
default=4,
|
||||
metavar="N",
|
||||
help="number of epochs to train (default: 14)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--lr",
|
||||
type=float,
|
||||
default=1.0,
|
||||
metavar="LR",
|
||||
help="learning rate (default: 1.0)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--gamma",
|
||||
type=float,
|
||||
default=0.7,
|
||||
metavar="M",
|
||||
help="Learning rate step gamma (default: 0.7)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no-cuda", action="store_true", default=False, help="disables CUDA training"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no-mps",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="disables macOS GPU training",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--dry-run",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="quickly check a single pass",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--seed", type=int, default=1, metavar="S", help="random seed (default: 1)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--log-interval",
|
||||
type=int,
|
||||
default=100,
|
||||
metavar="N",
|
||||
help="how many batches to wait before logging training status",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--save-model",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="For Saving the current Model",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
use_cuda = not args.no_cuda and torch.cuda.is_available()
|
||||
use_mps = not args.no_mps and torch.backends.mps.is_available()
|
||||
|
||||
torch.manual_seed(args.seed)
|
||||
torch.backends.cuda.matmul.allow_tf32 = True
|
||||
|
||||
if use_cuda:
|
||||
device = torch.device("cuda")
|
||||
elif use_mps:
|
||||
device = torch.device("mps")
|
||||
else:
|
||||
device = torch.device("cpu")
|
||||
|
||||
train_kwargs = {"batch_size": args.batch_size}
|
||||
test_kwargs = {"batch_size": args.test_batch_size}
|
||||
if use_cuda:
|
||||
cuda_kwargs = {"num_workers": 1, "pin_memory": True, "shuffle": True}
|
||||
train_kwargs.update(cuda_kwargs)
|
||||
test_kwargs.update(cuda_kwargs)
|
||||
|
||||
transform = transforms.Compose(
|
||||
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
|
||||
)
|
||||
dataset1 = datasets.MNIST("../data", train=True, download=True, transform=transform)
|
||||
dataset2 = datasets.MNIST("../data", train=False, transform=transform)
|
||||
train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
|
||||
test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
|
||||
|
||||
model = Net().to(device)
|
||||
opt_model = torch.compile(model, mode="max-autotune")
|
||||
optimizer = optim.Adadelta(opt_model.parameters(), lr=args.lr)
|
||||
|
||||
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
|
||||
for epoch in range(1, args.epochs + 1):
|
||||
print(
|
||||
f"Training Time: {timed(lambda: train(args, opt_model, device, train_loader, optimizer, epoch))[1]}"
|
||||
)
|
||||
print(
|
||||
f"Evaluation Time: {timed(lambda: test(opt_model, device, test_loader))[1]}"
|
||||
)
|
||||
scheduler.step()
|
||||
|
||||
if args.save_model:
|
||||
torch.save(opt_model.state_dict(), "mnist_cnn.pt")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
385
.ci/pytorch/smoke_test/smoke_test.py
Normal file
385
.ci/pytorch/smoke_test/smoke_test.py
Normal file
@ -0,0 +1,385 @@
|
||||
import argparse
|
||||
import importlib
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
import torch._dynamo
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
|
||||
if "MATRIX_GPU_ARCH_VERSION" in os.environ:
|
||||
gpu_arch_ver = os.getenv("MATRIX_GPU_ARCH_VERSION")
|
||||
else:
|
||||
gpu_arch_ver = os.getenv("GPU_ARCH_VERSION") # Use fallback if available
|
||||
gpu_arch_type = os.getenv("MATRIX_GPU_ARCH_TYPE")
|
||||
channel = os.getenv("MATRIX_CHANNEL")
|
||||
package_type = os.getenv("MATRIX_PACKAGE_TYPE")
|
||||
target_os = os.getenv("TARGET_OS", sys.platform)
|
||||
BASE_DIR = Path(__file__).parent.parent.parent
|
||||
|
||||
is_cuda_system = gpu_arch_type == "cuda"
|
||||
NIGHTLY_ALLOWED_DELTA = 3
|
||||
|
||||
MODULES = [
|
||||
{
|
||||
"name": "torchvision",
|
||||
"repo": "https://github.com/pytorch/vision.git",
|
||||
"smoke_test": "./vision/test/smoke_test.py",
|
||||
"extension": "extension",
|
||||
"repo_name": "vision",
|
||||
},
|
||||
{
|
||||
"name": "torchaudio",
|
||||
"repo": "https://github.com/pytorch/audio.git",
|
||||
"smoke_test": "./audio/test/smoke_test/smoke_test.py --no-ffmpeg",
|
||||
"extension": "_extension",
|
||||
"repo_name": "audio",
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
class Net(nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.conv1 = nn.Conv2d(1, 32, 3, 1)
|
||||
self.conv2 = nn.Conv2d(32, 64, 3, 1)
|
||||
self.fc1 = nn.Linear(9216, 1)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.conv1(x)
|
||||
x = self.conv2(x)
|
||||
x = F.max_pool2d(x, 2)
|
||||
x = torch.flatten(x, 1)
|
||||
output = self.fc1(x)
|
||||
return output
|
||||
|
||||
|
||||
def load_json_from_basedir(filename: str):
|
||||
try:
|
||||
with open(BASE_DIR / filename) as fptr:
|
||||
return json.load(fptr)
|
||||
except FileNotFoundError as exc:
|
||||
raise ImportError(f"File {filename} not found error: {exc.strerror}") from exc
|
||||
except json.JSONDecodeError as exc:
|
||||
raise ImportError(f"Invalid JSON {filename}") from exc
|
||||
|
||||
|
||||
def read_release_matrix():
|
||||
return load_json_from_basedir("release_matrix.json")
|
||||
|
||||
|
||||
def test_numpy():
|
||||
import numpy as np
|
||||
|
||||
x = np.arange(5)
|
||||
torch.tensor(x)
|
||||
|
||||
|
||||
def check_version(package: str) -> None:
|
||||
release_version = os.getenv("RELEASE_VERSION")
|
||||
# if release_version is specified, use it to validate the packages
|
||||
if release_version:
|
||||
release_matrix = read_release_matrix()
|
||||
stable_version = release_matrix["torch"]
|
||||
else:
|
||||
stable_version = os.getenv("MATRIX_STABLE_VERSION")
|
||||
|
||||
# only makes sense to check nightly package where dates are known
|
||||
if channel == "nightly":
|
||||
check_nightly_binaries_date(package)
|
||||
elif stable_version is not None:
|
||||
if not torch.__version__.startswith(stable_version):
|
||||
raise RuntimeError(
|
||||
f"Torch version mismatch, expected {stable_version} for channel {channel}. But its {torch.__version__}"
|
||||
)
|
||||
|
||||
if release_version and package == "all":
|
||||
for module in MODULES:
|
||||
imported_module = importlib.import_module(module["name"])
|
||||
module_version = imported_module.__version__
|
||||
if not module_version.startswith(release_matrix[module["name"]]):
|
||||
raise RuntimeError(
|
||||
f"{module['name']} version mismatch, expected: \
|
||||
{release_matrix[module['name']]} for channel {channel}. But its {module_version}"
|
||||
)
|
||||
else:
|
||||
print(f"{module['name']} version actual: {module_version} expected: \
|
||||
{release_matrix[module['name']]} for channel {channel}.")
|
||||
|
||||
else:
|
||||
print(f"Skip version check for channel {channel} as stable version is None")
|
||||
|
||||
|
||||
def check_nightly_binaries_date(package: str) -> None:
|
||||
from datetime import datetime
|
||||
|
||||
format_dt = "%Y%m%d"
|
||||
|
||||
date_t_str = re.findall("dev\\d+", torch.__version__)
|
||||
date_t_delta = datetime.now() - datetime.strptime(date_t_str[0][3:], format_dt)
|
||||
if date_t_delta.days >= NIGHTLY_ALLOWED_DELTA:
|
||||
raise RuntimeError(
|
||||
f"the binaries are from {date_t_str} and are more than {NIGHTLY_ALLOWED_DELTA} days old!"
|
||||
)
|
||||
|
||||
if package == "all":
|
||||
for module in MODULES:
|
||||
imported_module = importlib.import_module(module["name"])
|
||||
module_version = imported_module.__version__
|
||||
date_m_str = re.findall("dev\\d+", module_version)
|
||||
date_m_delta = datetime.now() - datetime.strptime(
|
||||
date_m_str[0][3:], format_dt
|
||||
)
|
||||
print(f"Nightly date check for {module['name']} version {module_version}")
|
||||
if date_m_delta.days > NIGHTLY_ALLOWED_DELTA:
|
||||
raise RuntimeError(
|
||||
f"Expected {module['name']} to be less then {NIGHTLY_ALLOWED_DELTA} days. But its {date_m_delta}"
|
||||
)
|
||||
|
||||
|
||||
def test_cuda_runtime_errors_captured() -> None:
|
||||
cuda_exception_missed = True
|
||||
try:
|
||||
print("Testing test_cuda_runtime_errors_captured")
|
||||
torch._assert_async(torch.tensor(0, device="cuda"))
|
||||
torch._assert_async(torch.tensor(0 + 0j, device="cuda"))
|
||||
except RuntimeError as e:
|
||||
if re.search("CUDA", f"{e}"):
|
||||
print(f"Caught CUDA exception with success: {e}")
|
||||
cuda_exception_missed = False
|
||||
else:
|
||||
raise e
|
||||
if cuda_exception_missed:
|
||||
raise RuntimeError("Expected CUDA RuntimeError but have not received!")
|
||||
|
||||
|
||||
def smoke_test_cuda(
|
||||
package: str, runtime_error_check: str, torch_compile_check: str
|
||||
) -> None:
|
||||
if not torch.cuda.is_available() and is_cuda_system:
|
||||
raise RuntimeError(f"Expected CUDA {gpu_arch_ver}. However CUDA is not loaded.")
|
||||
|
||||
if package == "all" and is_cuda_system:
|
||||
for module in MODULES:
|
||||
imported_module = importlib.import_module(module["name"])
|
||||
# TBD for vision move extension module to private so it will
|
||||
# be _extention.
|
||||
version = "N/A"
|
||||
if module["extension"] == "extension":
|
||||
version = imported_module.extension._check_cuda_version()
|
||||
else:
|
||||
version = imported_module._extension._check_cuda_version()
|
||||
print(f"{module['name']} CUDA: {version}")
|
||||
|
||||
# torch.compile is available on macos-arm64 and Linux for python 3.8-3.13
|
||||
if (
|
||||
torch_compile_check == "enabled"
|
||||
and sys.version_info < (3, 13, 0)
|
||||
and target_os in ["linux", "linux-aarch64", "macos-arm64", "darwin"]
|
||||
):
|
||||
smoke_test_compile("cuda" if torch.cuda.is_available() else "cpu")
|
||||
|
||||
if torch.cuda.is_available():
|
||||
if torch.version.cuda != gpu_arch_ver:
|
||||
raise RuntimeError(
|
||||
f"Wrong CUDA version. Loaded: {torch.version.cuda} Expected: {gpu_arch_ver}"
|
||||
)
|
||||
print(f"torch cuda: {torch.version.cuda}")
|
||||
# todo add cudnn version validation
|
||||
print(f"torch cudnn: {torch.backends.cudnn.version()}")
|
||||
print(f"cuDNN enabled? {torch.backends.cudnn.enabled}")
|
||||
|
||||
torch.cuda.init()
|
||||
print("CUDA initialized successfully")
|
||||
print(f"Number of CUDA devices: {torch.cuda.device_count()}")
|
||||
for i in range(torch.cuda.device_count()):
|
||||
print(f"Device {i}: {torch.cuda.get_device_name(i)}")
|
||||
|
||||
# nccl is availbale only on Linux
|
||||
if sys.platform in ["linux", "linux2"]:
|
||||
print(f"torch nccl version: {torch.cuda.nccl.version()}")
|
||||
|
||||
if runtime_error_check == "enabled":
|
||||
test_cuda_runtime_errors_captured()
|
||||
|
||||
|
||||
def smoke_test_conv2d() -> None:
|
||||
import torch.nn as nn
|
||||
|
||||
print("Testing smoke_test_conv2d")
|
||||
# With square kernels and equal stride
|
||||
m = nn.Conv2d(16, 33, 3, stride=2)
|
||||
# non-square kernels and unequal stride and with padding
|
||||
m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
|
||||
assert m is not None
|
||||
# non-square kernels and unequal stride and with padding and dilation
|
||||
basic_conv = nn.Conv2d(
|
||||
16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1)
|
||||
)
|
||||
input = torch.randn(20, 16, 50, 100)
|
||||
output = basic_conv(input)
|
||||
|
||||
if is_cuda_system:
|
||||
print("Testing smoke_test_conv2d with cuda")
|
||||
conv = nn.Conv2d(3, 3, 3).cuda()
|
||||
x = torch.randn(1, 3, 24, 24, device="cuda")
|
||||
with torch.cuda.amp.autocast():
|
||||
out = conv(x)
|
||||
assert out is not None
|
||||
|
||||
supported_dtypes = [torch.float16, torch.float32, torch.float64]
|
||||
for dtype in supported_dtypes:
|
||||
print(f"Testing smoke_test_conv2d with cuda for {dtype}")
|
||||
conv = basic_conv.to(dtype).cuda()
|
||||
input = torch.randn(20, 16, 50, 100, device="cuda").type(dtype)
|
||||
output = conv(input)
|
||||
assert output is not None
|
||||
|
||||
|
||||
def test_linalg(device="cpu") -> None:
|
||||
print(f"Testing smoke_test_linalg on {device}")
|
||||
A = torch.randn(5, 3, device=device)
|
||||
U, S, Vh = torch.linalg.svd(A, full_matrices=False)
|
||||
assert (
|
||||
U.shape == A.shape
|
||||
and S.shape == torch.Size([3])
|
||||
and Vh.shape == torch.Size([3, 3])
|
||||
)
|
||||
torch.dist(A, U @ torch.diag(S) @ Vh)
|
||||
|
||||
U, S, Vh = torch.linalg.svd(A)
|
||||
assert (
|
||||
U.shape == torch.Size([5, 5])
|
||||
and S.shape == torch.Size([3])
|
||||
and Vh.shape == torch.Size([3, 3])
|
||||
)
|
||||
torch.dist(A, U[:, :3] @ torch.diag(S) @ Vh)
|
||||
|
||||
A = torch.randn(7, 5, 3, device=device)
|
||||
U, S, Vh = torch.linalg.svd(A, full_matrices=False)
|
||||
torch.dist(A, U @ torch.diag_embed(S) @ Vh)
|
||||
|
||||
if device == "cuda":
|
||||
supported_dtypes = [torch.float32, torch.float64]
|
||||
for dtype in supported_dtypes:
|
||||
print(f"Testing smoke_test_linalg with cuda for {dtype}")
|
||||
A = torch.randn(20, 16, 50, 100, device=device, dtype=dtype)
|
||||
torch.linalg.svd(A)
|
||||
|
||||
|
||||
def smoke_test_compile(device: str = "cpu") -> None:
|
||||
supported_dtypes = [torch.float16, torch.float32, torch.float64]
|
||||
|
||||
def foo(x: torch.Tensor) -> torch.Tensor:
|
||||
return torch.sin(x) + torch.cos(x)
|
||||
|
||||
for dtype in supported_dtypes:
|
||||
print(f"Testing smoke_test_compile for {device} and {dtype}")
|
||||
x = torch.rand(3, 3, device=device).type(dtype)
|
||||
x_eager = foo(x)
|
||||
x_pt2 = torch.compile(foo)(x)
|
||||
torch.testing.assert_close(x_eager, x_pt2)
|
||||
|
||||
# Check that SIMD were detected for the architecture
|
||||
if device == "cpu":
|
||||
from torch._inductor.codecache import pick_vec_isa
|
||||
|
||||
isa = pick_vec_isa()
|
||||
if not isa:
|
||||
raise RuntimeError("Can't detect vectorized ISA for CPU")
|
||||
print(f"Picked CPU ISA {type(isa).__name__} bit width {isa.bit_width()}")
|
||||
|
||||
# Reset torch dynamo since we are changing mode
|
||||
torch._dynamo.reset()
|
||||
dtype = torch.float32
|
||||
torch.set_float32_matmul_precision("high")
|
||||
print(f"Testing smoke_test_compile with mode 'max-autotune' for {dtype}")
|
||||
x = torch.rand(64, 1, 28, 28, device=device).type(torch.float32)
|
||||
model = Net().to(device=device)
|
||||
x_pt2 = torch.compile(model, mode="max-autotune")(x)
|
||||
|
||||
|
||||
def smoke_test_modules():
|
||||
cwd = os.getcwd()
|
||||
for module in MODULES:
|
||||
if module["repo"]:
|
||||
if not os.path.exists(f"{cwd}/{module['repo_name']}"):
|
||||
print(f"Path does not exist: {cwd}/{module['repo_name']}")
|
||||
try:
|
||||
subprocess.check_output(
|
||||
f"git clone --depth 1 {module['repo']}",
|
||||
stderr=subprocess.STDOUT,
|
||||
shell=True,
|
||||
)
|
||||
except subprocess.CalledProcessError as exc:
|
||||
raise RuntimeError(
|
||||
f"Cloning {module['repo']} FAIL: {exc.returncode} Output: {exc.output}"
|
||||
) from exc
|
||||
try:
|
||||
smoke_test_command = f"python3 {module['smoke_test']}"
|
||||
if target_os == "windows":
|
||||
smoke_test_command = f"python {module['smoke_test']}"
|
||||
output = subprocess.check_output(
|
||||
smoke_test_command,
|
||||
stderr=subprocess.STDOUT,
|
||||
shell=True,
|
||||
universal_newlines=True,
|
||||
)
|
||||
except subprocess.CalledProcessError as exc:
|
||||
raise RuntimeError(
|
||||
f"Module {module['name']} FAIL: {exc.returncode} Output: {exc.output}"
|
||||
) from exc
|
||||
else:
|
||||
print(f"Output: \n{output}\n")
|
||||
|
||||
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--package",
|
||||
help="Package to include in smoke testing",
|
||||
type=str,
|
||||
choices=["all", "torchonly"],
|
||||
default="all",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--runtime-error-check",
|
||||
help="No Runtime Error check",
|
||||
type=str,
|
||||
choices=["enabled", "disabled"],
|
||||
default="enabled",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--torch-compile-check",
|
||||
help="Check torch compile",
|
||||
type=str,
|
||||
choices=["enabled", "disabled"],
|
||||
default="enabled",
|
||||
)
|
||||
options = parser.parse_args()
|
||||
print(f"torch: {torch.__version__}")
|
||||
print(torch.__config__.parallel_info())
|
||||
|
||||
check_version(options.package)
|
||||
smoke_test_conv2d()
|
||||
test_linalg()
|
||||
test_numpy()
|
||||
if is_cuda_system:
|
||||
test_linalg("cuda")
|
||||
|
||||
if options.package == "all":
|
||||
smoke_test_modules()
|
||||
|
||||
smoke_test_cuda(
|
||||
options.package, options.runtime_error_check, options.torch_compile_check
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -55,9 +55,6 @@ mv /final_pkgs/debug-*.zip /tmp/debug_final_pkgs || echo "no debug packages to m
|
||||
# Install the package
|
||||
# These network calls should not have 'retry's because they are installing
|
||||
# locally and aren't actually network calls
|
||||
# TODO there is duplicated and inconsistent test-python-env setup across this
|
||||
# file, builder/smoke_test.sh, and builder/run_tests.sh, and also in the
|
||||
# conda build scripts themselves. These should really be consolidated
|
||||
# Pick only one package of multiple available (which happens as result of workflow re-runs)
|
||||
pkg="/final_pkgs/\$(ls -1 /final_pkgs|sort|tail -1)"
|
||||
if [[ "\$PYTORCH_BUILD_VERSION" == *dev* ]]; then
|
||||
@ -90,15 +87,13 @@ if [[ "$PACKAGE_TYPE" == libtorch ]]; then
|
||||
fi
|
||||
|
||||
# Test the package
|
||||
/builder/check_binary.sh
|
||||
/pytorch/.ci/pytorch/check_binary.sh
|
||||
|
||||
if [[ "\$GPU_ARCH_TYPE" != *s390x* && "\$GPU_ARCH_TYPE" != *xpu* && "\$GPU_ARCH_TYPE" != *rocm* && "$PACKAGE_TYPE" != libtorch ]]; then
|
||||
# Exclude s390, xpu, rocm and libtorch builds from smoke testing
|
||||
python /builder/test/smoke_test/smoke_test.py --package=torchonly --torch-compile-check disabled
|
||||
python /pytorch/.ci/pytorch/smoke_test/smoke_test.py --package=torchonly --torch-compile-check disabled
|
||||
fi
|
||||
|
||||
# Clean temp files
|
||||
cd /builder && git clean -ffdx
|
||||
|
||||
# =================== The above code will be executed inside Docker container ===================
|
||||
EOL
|
||||
|
@ -30,7 +30,6 @@ runs:
|
||||
--tty \
|
||||
--detach \
|
||||
-v "${GITHUB_WORKSPACE}/pytorch:/pytorch" \
|
||||
-v "${GITHUB_WORKSPACE}/builder:/builder" \
|
||||
-v "${RUNNER_TEMP}/artifacts:/final_pkgs" \
|
||||
-w / \
|
||||
"${DOCKER_IMAGE}"
|
||||
|
14
.github/workflows/_binary-test-linux.yml
vendored
14
.github/workflows/_binary-test-linux.yml
vendored
@ -182,20 +182,6 @@ jobs:
|
||||
git clean -fxd
|
||||
working-directory: pytorch
|
||||
|
||||
- name: Checkout pytorch/builder to builder dir
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
|
||||
- name: Clean pytorch/builder checkout
|
||||
run: |
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: builder
|
||||
|
||||
- name: Check if the job is disabled
|
||||
id: filter
|
||||
uses: ./pytorch/.github/actions/filter-test-configs
|
||||
|
52
.github/workflows/generated-linux-binary-manywheel-nightly.yml
generated
vendored
52
.github/workflows/generated-linux-binary-manywheel-nightly.yml
generated
vendored
@ -3172,19 +3172,6 @@ jobs:
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: pytorch
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
quiet-checkout: true
|
||||
- name: Clean pytorch/builder checkout
|
||||
run: |
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: builder
|
||||
- name: ROCm set GPU_FLAG
|
||||
run: |
|
||||
echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri --group-add video --group-add daemon" >> "${GITHUB_ENV}"
|
||||
@ -3285,19 +3272,6 @@ jobs:
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: pytorch
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
quiet-checkout: true
|
||||
- name: Clean pytorch/builder checkout
|
||||
run: |
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: builder
|
||||
- name: ROCm set GPU_FLAG
|
||||
run: |
|
||||
echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri --group-add video --group-add daemon" >> "${GITHUB_ENV}"
|
||||
@ -3858,19 +3832,6 @@ jobs:
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: pytorch
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
quiet-checkout: true
|
||||
- name: Clean pytorch/builder checkout
|
||||
run: |
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: builder
|
||||
- name: ROCm set GPU_FLAG
|
||||
run: |
|
||||
echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri --group-add video --group-add daemon" >> "${GITHUB_ENV}"
|
||||
@ -3971,19 +3932,6 @@ jobs:
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: pytorch
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
quiet-checkout: true
|
||||
- name: Clean pytorch/builder checkout
|
||||
run: |
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: builder
|
||||
- name: ROCm set GPU_FLAG
|
||||
run: |
|
||||
echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri --group-add video --group-add daemon" >> "${GITHUB_ENV}"
|
||||
|
Reference in New Issue
Block a user