[Build] Remove pre-CXX11 ABI logic from build script (#149888)

Only keep one in check_binary_symbols to make sure there are no pre-CXX11 ABI symbols in the library
Pull Request resolved: https://github.com/pytorch/pytorch/pull/149888
Approved by: https://github.com/atalman, https://github.com/seemethere
ghstack dependencies: #149887
This commit is contained in:
Nikita Shulga
2025-03-24 16:23:28 -07:00
committed by PyTorch MergeBot
parent 280e48739a
commit 5a7588f183
12 changed files with 11 additions and 150 deletions

View File

@ -78,47 +78,7 @@ fi
echo "Checking that the gcc ABI is what we expect"
if [[ "$(uname)" != 'Darwin' ]]; then
function is_expected() {
if [[ "$1" -gt 0 || "$1" == "ON " ]]; then
echo 1
fi
}
# First we check that the env var in TorchConfig.cmake is correct
# We search for D_GLIBCXX_USE_CXX11_ABI=1 in torch/TorchConfig.cmake
torch_config="${install_root}/share/cmake/Torch/TorchConfig.cmake"
if [[ ! -f "$torch_config" ]]; then
echo "No TorchConfig.cmake found!"
ls -lah "$install_root/share/cmake/Torch"
exit 1
fi
echo "Checking the TorchConfig.cmake"
cat "$torch_config"
# The sed call below is
# don't print lines by default (only print the line we want)
# -n
# execute the following expression
# e
# replace lines that match with the first capture group and print
# s/.*D_GLIBCXX_USE_CXX11_ABI=\(.\)".*/\1/p
# any characters, D_GLIBCXX_USE_CXX11_ABI=, exactly one any character, a
# quote, any characters
# Note the exactly one single character after the '='. In the case that the
# variable is not set the '=' will be followed by a '"' immediately and the
# line will fail the match and nothing will be printed; this is what we
# want. Otherwise it will capture the 0 or 1 after the '='.
# /.*D_GLIBCXX_USE_CXX11_ABI=\(.\)".*/
# replace the matched line with the capture group and print
# /\1/p
actual_gcc_abi="$(sed -ne 's/.*D_GLIBCXX_USE_CXX11_ABI=\(.\)".*/\1/p' < "$torch_config")"
if [[ "$(is_expected "$actual_gcc_abi")" != 1 ]]; then
echo "gcc ABI $actual_gcc_abi not as expected."
exit 1
fi
# We also check that there are [not] cxx11 symbols in libtorch
# We also check that there are cxx11 symbols in libtorch
#
echo "Checking that symbols in libtorch.so have the right gcc abi"
python3 "$(dirname ${BASH_SOURCE[0]})/smoke_test/check_binary_symbols.py"

View File

@ -80,7 +80,7 @@ def grep_symbols(lib: str, patterns: list[Any]) -> list[str]:
return functools.reduce(list.__add__, (x.result() for x in tasks), [])
def check_lib_symbols_for_abi_correctness(lib: str, pre_cxx11_abi: bool = True) -> None:
def check_lib_symbols_for_abi_correctness(lib: str) -> None:
print(f"lib: {lib}")
cxx11_symbols = grep_symbols(lib, LIBTORCH_CXX11_PATTERNS)
pre_cxx11_symbols = grep_symbols(lib, LIBTORCH_PRE_CXX11_PATTERNS)
@ -88,28 +88,12 @@ def check_lib_symbols_for_abi_correctness(lib: str, pre_cxx11_abi: bool = True)
num_pre_cxx11_symbols = len(pre_cxx11_symbols)
print(f"num_cxx11_symbols: {num_cxx11_symbols}")
print(f"num_pre_cxx11_symbols: {num_pre_cxx11_symbols}")
if pre_cxx11_abi:
if num_cxx11_symbols > 0:
raise RuntimeError(
f"Found cxx11 symbols, but there shouldn't be any, see: {cxx11_symbols[:100]}"
)
if num_pre_cxx11_symbols < 1000:
raise RuntimeError("Didn't find enough pre-cxx11 symbols.")
# Check for no recursive iterators, regression test for https://github.com/pytorch/pytorch/issues/133437
rec_iter_symbols = grep_symbols(
lib, [re.compile("std::filesystem::recursive_directory_iterator.*")]
if num_pre_cxx11_symbols > 0:
raise RuntimeError(
f"Found pre-cxx11 symbols, but there shouldn't be any, see: {pre_cxx11_symbols[:100]}"
)
if len(rec_iter_symbols) > 0:
raise RuntimeError(
f"recursive_directory_iterator in used pre-CXX11 binaries, see; {rec_iter_symbols}"
)
else:
if num_pre_cxx11_symbols > 0:
raise RuntimeError(
f"Found pre-cxx11 symbols, but there shouldn't be any, see: {pre_cxx11_symbols[:100]}"
)
if num_cxx11_symbols < 100:
raise RuntimeError("Didn't find enought cxx11 symbols")
if num_cxx11_symbols < 100:
raise RuntimeError("Didn't find enought cxx11 symbols")
def main() -> None:
@ -122,8 +106,7 @@ def main() -> None:
install_root = Path(distutils.sysconfig.get_python_lib()) / "torch"
libtorch_cpu_path = str(install_root / "lib" / "libtorch_cpu.so")
# NOTE: All binaries are built with cxx11abi now
check_lib_symbols_for_abi_correctness(libtorch_cpu_path, False)
check_lib_symbols_for_abi_correctness(libtorch_cpu_path)
if __name__ == "__main__":

View File

@ -69,19 +69,7 @@ string(APPEND CMAKE_CUDA_FLAGS
" -DLIBCUDACXX_ENABLE_SIMPLIFIED_COMPLEX_OPERATIONS")
if(LINUX)
include(cmake/CheckAbi.cmake)
string(APPEND CMAKE_CXX_FLAGS
" -D_GLIBCXX_USE_CXX11_ABI=${GLIBCXX_USE_CXX11_ABI}")
string(APPEND CMAKE_CUDA_FLAGS
" -D_GLIBCXX_USE_CXX11_ABI=${GLIBCXX_USE_CXX11_ABI}")
if(${GLIBCXX_USE_CXX11_ABI} EQUAL 1)
set(CXX_STANDARD_REQUIRED ON)
else()
# Please note this is required in order to ensure compatibility between gcc
# 9 and gcc 7 This could be removed when all Linux PyTorch binary builds are
# compiled by the same toolchain again
append_cxx_flag_if_supported("-fabi-version=11" CMAKE_CXX_FLAGS)
endif()
set(CXX_STANDARD_REQUIRED ON)
endif()
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)

View File

@ -274,13 +274,6 @@ conda install -c conda-forge libuv=1.39
#### Install PyTorch
**On Linux**
If you would like to compile PyTorch with [new C++ ABI](https://gcc.gnu.org/onlinedocs/libstdc++/manual/using_dual_abi.html) enabled, then first run this command:
```bash
export _GLIBCXX_USE_CXX11_ABI=1
```
Please **note** that starting from PyTorch 2.5, the PyTorch build with XPU supports both new and old C++ ABIs. Previously, XPU only supported the new C++ ABI. If you want to compile with Intel GPU support, please follow [Intel GPU Support](#intel-gpu-support).
If you're compiling for AMD ROCm then first run this command:
```bash
# Only run this if you're compiling for ROCm

View File

@ -1317,10 +1317,6 @@ if(BUILD_TEST)
endif()
endif()
if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
include(../cmake/CheckAbi.cmake)
endif()
# CMake config for external projects.
configure_file(
${PROJECT_SOURCE_DIR}/cmake/TorchConfigVersion.cmake.in

View File

@ -1,27 +0,0 @@
if(DEFINED GLIBCXX_USE_CXX11_ABI)
message(STATUS "_GLIBCXX_USE_CXX11_ABI=${GLIBCXX_USE_CXX11_ABI} is already defined as a cmake variable")
return()
endif()
# XXX This ABI check cannot be run with arm-linux-androideabi-g++
message(STATUS "${CMAKE_CXX_COMPILER} ${PROJECT_SOURCE_DIR}/torch/abi-check.cpp -o ${CMAKE_BINARY_DIR}/abi-check")
execute_process(
COMMAND
"${CMAKE_CXX_COMPILER}"
"${PROJECT_SOURCE_DIR}/torch/abi-check.cpp"
"-o"
"${CMAKE_BINARY_DIR}/abi-check"
RESULT_VARIABLE ABI_CHECK_COMPILE_RESULT)
if(ABI_CHECK_COMPILE_RESULT)
message(FATAL_ERROR "Could not compile ABI Check: ${ABI_CHECK_COMPILE_RESULT}")
set(GLIBCXX_USE_CXX11_ABI 0)
endif()
execute_process(
COMMAND "${CMAKE_BINARY_DIR}/abi-check"
RESULT_VARIABLE ABI_CHECK_RESULT
OUTPUT_VARIABLE GLIBCXX_USE_CXX11_ABI)
if(ABI_CHECK_RESULT)
message(WARNING "Could not run ABI Check: ${ABI_CHECK_RESULT}")
set(GLIBCXX_USE_CXX11_ABI 0)
endif()
message(STATUS "Determined _GLIBCXX_USE_CXX11_ABI=${GLIBCXX_USE_CXX11_ABI}")

View File

@ -147,11 +147,6 @@ if(@USE_XPU@ AND @BUILD_SHARED_LIBS@)
append_torchlib_if_found(c10_xpu torch_xpu)
endif()
# When we build libtorch with the old libstdc++ ABI, dependent libraries must too.
if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
set(TORCH_CXX_FLAGS "-D_GLIBCXX_USE_CXX11_ABI=@GLIBCXX_USE_CXX11_ABI@")
endif()
find_library(TORCH_LIBRARY torch PATHS "${TORCH_INSTALL_PREFIX}/lib")
# the statements below changes target properties on
# - the imported target from Caffe2Targets.cmake in shared library mode (see the find_package above)

View File

@ -189,7 +189,6 @@ class CMake:
# Key: environment variable name. Value: Corresponding variable name to be passed to CMake. If you are
# adding a new build option to this block: Consider making these two names identical and adding this option
# in the block below.
"_GLIBCXX_USE_CXX11_ABI": "GLIBCXX_USE_CXX11_ABI",
"CUDNN_LIB_DIR": "CUDNN_LIBRARY",
"USE_CUDA_STATIC_LINK": "CAFFE2_STATIC_LINK_CUDA",
}

View File

@ -2232,7 +2232,7 @@ del _torch_docs, _tensor_docs, _storage_docs, _size_docs
def compiled_with_cxx11_abi() -> builtins.bool:
r"""Returns whether PyTorch was built with _GLIBCXX_USE_CXX11_ABI=1"""
return _C._GLIBCXX_USE_CXX11_ABI
return True
from torch import _library as _library, _ops as _ops

View File

@ -1,9 +0,0 @@
#include <iostream>
int main() {
#ifdef _GLIBCXX_USE_CXX11_ABI
std::cout << _GLIBCXX_USE_CXX11_ABI;
#else
std::cout << 0;
#endif
}

View File

@ -2399,12 +2399,7 @@ Call this whenever a new thread is created in order to propagate values from
ASSERT_TRUE(
set_module_attr("_has_mkldnn", at::hasMKLDNN() ? Py_True : Py_False));
#ifdef _GLIBCXX_USE_CXX11_ABI
ASSERT_TRUE(set_module_attr(
"_GLIBCXX_USE_CXX11_ABI", _GLIBCXX_USE_CXX11_ABI ? Py_True : Py_False));
#else
ASSERT_TRUE(set_module_attr("_GLIBCXX_USE_CXX11_ABI", Py_False));
#endif
ASSERT_TRUE(set_module_attr("_GLIBCXX_USE_CXX11_ABI", Py_True));
// See note [Pybind11 ABI constants]
#define SET_STR_DEFINE(name) \

View File

@ -655,7 +655,6 @@ class BuildExtension(build_ext):
if val is not None and not IS_WINDOWS:
self._add_compile_flag(extension, f'-DPYBIND11_{name}="{val}"')
self._define_torch_extension_name(extension)
self._add_gnu_cpp_abi_flag(extension)
if 'nvcc_dlink' in extension.extra_compile_args:
assert self.use_ninja, f"With dlink=True, ninja is required to build cuda extension {extension.name}."
@ -1113,10 +1112,6 @@ class BuildExtension(build_ext):
define = f'-DTORCH_EXTENSION_NAME={name}'
self._add_compile_flag(extension, define)
def _add_gnu_cpp_abi_flag(self, extension):
# use the same CXX ABI as what PyTorch was compiled with
self._add_compile_flag(extension, '-D_GLIBCXX_USE_CXX11_ABI=' + str(int(torch._C._GLIBCXX_USE_CXX11_ABI)))
def CppExtension(name, sources, *args, **kwargs):
"""
@ -1688,10 +1683,6 @@ def _get_pybind11_abi_build_flags():
abi_cflags.append(f'-DPYBIND11_{pname}=\\"{pval}\\"')
return abi_cflags
def _get_glibcxx_abi_build_flags():
glibcxx_abi_cflags = ['-D_GLIBCXX_USE_CXX11_ABI=' + str(int(torch._C._GLIBCXX_USE_CXX11_ABI))]
return glibcxx_abi_cflags
def check_compiler_is_gcc(compiler):
if not IS_LINUX:
return False
@ -1822,7 +1813,6 @@ def _check_and_build_extension_h_precompiler_headers(
common_cflags += ['-std=c++17', '-fPIC']
common_cflags += [f"{x}" for x in _get_pybind11_abi_build_flags()]
common_cflags += [f"{x}" for x in _get_glibcxx_abi_build_flags()]
common_cflags_str = listToString(common_cflags)
pch_cmd = format_precompiler_header_cmd(compiler, head_file, head_file_pch, common_cflags_str, torch_include_dirs_str, extra_cflags_str, extra_include_paths_str)
@ -2644,8 +2634,6 @@ def _write_ninja_file_to_build_library(path,
common_cflags += [f'-I{shlex.quote(include)}' for include in user_includes]
common_cflags += [f'-isystem {shlex.quote(include)}' for include in system_includes]
common_cflags += [f"{x}" for x in _get_glibcxx_abi_build_flags()]
if IS_WINDOWS:
cflags = common_cflags + ['/std:c++17'] + extra_cflags
cflags += COMMON_HIP_FLAGS if IS_HIP_EXTENSION else COMMON_MSVC_FLAGS