Uniformly apply Windows logic in cpp_extensions everywhere (#31161)

Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/31161

Previously, it wasn't necessary to specify `DT_NEEDED` in C++ extensions on Linux (aka pass `-l` flags) because all of the symbols would have already been loaded with `RTLD_GLOBAL`, so there wouldn't be any undefined symbols.  But when we switch to loading `_C` with `RTLD_LOCAL`, it's now necessary for all the C++ extensions to know what libraries to link with. The resulting code is clearer and more uniform, so it's wins all around.

Signed-off-by: Edward Z. Yang <ezyang@fb.com>

Test Plan: Imported from OSS

Differential Revision: D19262578

Pulled By: ezyang

fbshipit-source-id: a893cc96f2e9aad1c064a6de4f7ccf79257dec3f
This commit is contained in:
Edward Yang
2020-01-09 07:26:25 -08:00
committed by Facebook Github Bot
parent 0dbd5c0bfe
commit 8614860210

View File

@ -446,18 +446,16 @@ def CppExtension(name, sources, *args, **kwargs):
include_dirs += include_paths() include_dirs += include_paths()
kwargs['include_dirs'] = include_dirs kwargs['include_dirs'] = include_dirs
if IS_WINDOWS: library_dirs = kwargs.get('library_dirs', [])
library_dirs = kwargs.get('library_dirs', []) library_dirs += library_paths()
library_dirs += library_paths() kwargs['library_dirs'] = library_dirs
kwargs['library_dirs'] = library_dirs
libraries = kwargs.get('libraries', []) libraries = kwargs.get('libraries', [])
libraries.append('c10') libraries.append('c10')
libraries.append('torch') libraries.append('torch')
libraries.append('torch_cpu') libraries.append('torch_cpu')
libraries.append('torch_python') libraries.append('torch_python')
libraries.append('_C') kwargs['libraries'] = libraries
kwargs['libraries'] = libraries
kwargs['language'] = 'c++' kwargs['language'] = 'c++'
return setuptools.Extension(name, sources, *args, **kwargs) return setuptools.Extension(name, sources, *args, **kwargs)
@ -497,14 +495,12 @@ def CUDAExtension(name, sources, *args, **kwargs):
libraries = kwargs.get('libraries', []) libraries = kwargs.get('libraries', [])
libraries.append('cudart') libraries.append('cudart')
if IS_WINDOWS: libraries.append('c10')
libraries.append('c10') libraries.append('c10_cuda')
libraries.append('c10_cuda') libraries.append('torch')
libraries.append('torch_cpu') libraries.append('torch_cpu')
libraries.append('torch_cuda') libraries.append('torch_cuda')
libraries.append('torch') libraries.append('torch_python')
libraries.append('torch_python')
libraries.append('_C')
kwargs['libraries'] = libraries kwargs['libraries'] = libraries
include_dirs = kwargs.get('include_dirs', []) include_dirs = kwargs.get('include_dirs', [])
@ -561,12 +557,11 @@ def library_paths(cuda=False):
''' '''
paths = [] paths = []
if IS_WINDOWS: # We need to link against libtorch.so
here = os.path.abspath(__file__) here = os.path.abspath(__file__)
torch_path = os.path.dirname(os.path.dirname(here)) torch_path = os.path.dirname(os.path.dirname(here))
lib_path = os.path.join(torch_path, 'lib') lib_path = os.path.join(torch_path, 'lib')
paths.append(lib_path)
paths.append(lib_path)
if cuda: if cuda:
if IS_WINDOWS: if IS_WINDOWS:
@ -933,14 +928,14 @@ def verify_ninja_availability():
def _prepare_ldflags(extra_ldflags, with_cuda, verbose): def _prepare_ldflags(extra_ldflags, with_cuda, verbose):
here = os.path.abspath(__file__)
torch_path = os.path.dirname(os.path.dirname(here))
lib_path = os.path.join(torch_path, 'lib')
if IS_WINDOWS: if IS_WINDOWS:
python_path = os.path.dirname(sys.executable) python_path = os.path.dirname(sys.executable)
python_lib_path = os.path.join(python_path, 'libs') python_lib_path = os.path.join(python_path, 'libs')
here = os.path.abspath(__file__)
torch_path = os.path.dirname(os.path.dirname(here))
lib_path = os.path.join(torch_path, 'lib')
extra_ldflags.append('c10.lib') extra_ldflags.append('c10.lib')
if with_cuda: if with_cuda:
extra_ldflags.append('c10_cuda.lib') extra_ldflags.append('c10_cuda.lib')
@ -949,9 +944,18 @@ def _prepare_ldflags(extra_ldflags, with_cuda, verbose):
extra_ldflags.append('torch_cuda.lib') extra_ldflags.append('torch_cuda.lib')
extra_ldflags.append('torch.lib') extra_ldflags.append('torch.lib')
extra_ldflags.append('torch_python.lib') extra_ldflags.append('torch_python.lib')
extra_ldflags.append('_C.lib')
extra_ldflags.append('/LIBPATH:{}'.format(python_lib_path)) extra_ldflags.append('/LIBPATH:{}'.format(python_lib_path))
extra_ldflags.append('/LIBPATH:{}'.format(lib_path)) extra_ldflags.append('/LIBPATH:{}'.format(lib_path))
else:
extra_ldflags.append('-L{}'.format(lib_path))
extra_ldflags.append('-lc10')
if with_cuda:
extra_ldflags.append('-lc10_cuda')
extra_ldflags.append('-ltorch_cpu')
if with_cuda:
extra_ldflags.append('-ltorch_cuda')
extra_ldflags.append('-ltorch')
extra_ldflags.append('-ltorch_python')
if with_cuda: if with_cuda:
if verbose: if verbose: