Delete THP_CORE macro; partially replace with THP_BUILD_MAIN_LIB (#29143)

Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/29143

THP_CORE macro is a very old macro that appeared to have served
two purposes:

1. The torch-python equivalent of CAFFE2_BUILD_MAIN_LIB, to toggle
   symbol visibility headers

2. Some sort of ad hoc way of hiding certain definitions from headers
   so external clients can't get at them.

It did (2) in a very confusing manner, because we set THP_CORE in both
torch and torch-python (it shouldn't do anything in torch).  In this
PR I just get rid of use case (2) entirely (so everything shows up in
headers all the time), and then redo (1) using a new THP_BUILD_MAIN_LIB
macro.  This cleans up some of the macro definitions and makes my life
easier for working on #27215.

Signed-off-by: Edward Z. Yang <ezyang@fb.com>

Test Plan: Imported from OSS

Differential Revision: D18309594

Pulled By: ezyang

fbshipit-source-id: adcb6d7cb387cd818480137e2b94e5e761dbfefc
This commit is contained in:
Edward Yang
2019-11-06 15:00:18 -08:00
committed by Facebook Github Bot
parent f227530c88
commit a5d356cb39
15 changed files with 2 additions and 30 deletions

View File

@ -659,8 +659,6 @@ if (NOT INTERN_BUILD_MOBILE)
endif()
if (NOT INTERN_BUILD_MOBILE OR NOT BUILD_CAFFE2_MOBILE)
target_compile_definitions(torch PUBLIC _THP_CORE)
# until they can be unified, keep these lists synced with setup.py
if(MSVC)

View File

@ -357,7 +357,6 @@ def add_torch_libs():
]
compiler_flags_cpu = [
"-D_THP_CORE",
"-DUSE_C10D",
"-DUSE_DISTRIBUTED",
"-DUSE_NUMPY",

View File

@ -364,7 +364,7 @@ set_source_files_properties(
PROPERTIES GENERATED TRUE
)
target_compile_definitions(torch_python PUBLIC _THP_CORE)
target_compile_definitions(torch_python PRIVATE "-DTHP_BUILD_MAIN_LIB")
target_link_libraries(torch_python ${TORCH_PYTHON_LINK_LIBRARIES})

View File

@ -147,10 +147,7 @@ struct python_error : public std::exception {
std::string message;
};
#ifdef _THP_CORE
bool THPException_init(PyObject *module);
#endif
namespace torch {

View File

@ -21,6 +21,4 @@ THP_API PyObject * THPGenerator_initDefaultGenerator(at::Generator* cdata);
THP_API PyObject *THPGeneratorClass;
#ifdef _THP_CORE
bool THPGenerator_init(PyObject *module);
#endif

View File

@ -11,6 +11,4 @@ extern PyTypeObject THPSizeType;
PyObject * THPSize_New(const torch::autograd::Variable& t);
PyObject * THPSize_NewFromSizes(int dim, const int64_t *sizes);
#ifdef _THP_CORE
void THPSize_init(PyObject *module);
#endif

View File

@ -47,10 +47,8 @@
#define THPQInt32Storage_CData(obj) (obj)->cdata
#define THPBFloat16Storage_CData(obj) (obj)->cdata
#ifdef _THP_CORE
#define THPStorageType TH_CONCAT_3(THP,Real,StorageType)
#define THPStorageBaseStr TH_CONCAT_STRING_2(Real,StorageBase)
#endif
#include <torch/csrc/generic/Storage.h>
#include <TH/THGenerateAllTypes.h>

View File

@ -38,10 +38,8 @@
#include <torch/csrc/utils.h> // This requires defined Storage and Tensor types
#include <torch/csrc/utils/byte_order.h>
#ifdef _THP_CORE
#include <torch/csrc/serialization.h>
#include <torch/csrc/autograd/python_autograd.h>
#endif
#endif

View File

@ -2,7 +2,7 @@
#define THP_EXPORT_H
#ifdef _WIN32
# ifdef _THP_CORE
# ifdef THP_BUILD_MAIN_LIB
# define THP_API extern __declspec(dllexport)
# define THP_CLASS __declspec(dllexport)
# else

View File

@ -3,7 +3,6 @@
extern THCState *state;
#ifdef _THP_CORE
void THCPModule_setDevice(int idx);
PyObject * THCPModule_getDevice_wrap(PyObject *self);
PyObject * THCPModule_setDevice_wrap(PyObject *self, PyObject *arg);
@ -11,6 +10,5 @@ PyObject * THCPModule_getDeviceName_wrap(PyObject *self, PyObject *arg);
PyObject * THCPModule_getDriverVersion(PyObject *self);
PyObject * THCPModule_isDriverSufficient(PyObject *self);
PyObject * THCPModule_getCurrentBlasHandle_wrap(PyObject *self);
#endif
#endif

View File

@ -36,10 +36,8 @@
#define THCPBoolStorage_CData(obj) (obj)->cdata
#define THCPBFloat16Storage_CData(obj) (obj)->cdata
#ifdef _THP_CORE
#define THCPStorageType TH_CONCAT_3(THCP,Real,StorageType)
#define THCPStorageBaseStr TH_CONCAT_STRING_3(Cuda,Real,StorageBase)
#endif
#include <torch/csrc/cuda/override_macros.h>

View File

@ -13,8 +13,6 @@
#include <torch/csrc/cuda/Storage.h>
#include <torch/csrc/cuda/Stream.h>
#include <torch/csrc/cuda/Event.h>
#ifdef _THP_CORE
#include <torch/csrc/cuda/utils.h>
#endif
#endif

View File

@ -11,9 +11,7 @@
#define THPStorageClass TH_CONCAT_3(THP,Real,StorageClass)
#define THPStorage_(NAME) TH_CONCAT_4(THP,Real,Storage_,NAME)
#ifdef _THP_CORE
#define THWStoragePtr TH_CONCAT_3(TH,Real,StoragePtr)
#define THWTensorPtr TH_CONCAT_3(TH,Real,TensorPtr)
#define THPStoragePtr TH_CONCAT_3(THP,Real,StoragePtr)
#define THPTensorPtr TH_CONCAT_3(THP,Real,TensorPtr)
#endif

View File

@ -7,13 +7,11 @@
THP_API PyObject * THPStorage_(New)(THWStorage *ptr);
extern PyObject *THPStorageClass;
#ifdef _THP_CORE
#include <torch/csrc/Types.h>
bool THPStorage_(init)(PyObject *module);
void THPStorage_(postInit)(PyObject *module);
extern PyTypeObject THPStorageType;
#endif
#endif

View File

@ -127,8 +127,6 @@ THP_API void THPUtils_invalidArguments(
PyObject *given_args, PyObject *given_kwargs,
const char *function_name, size_t num_options, ...);
#ifdef _THP_CORE
bool THPUtils_checkIntTuple(PyObject *arg);
std::vector<int> THPUtils_unpackIntTuple(PyObject *arg);
@ -193,6 +191,4 @@ bool maybeThrowBackCompatKeepdimWarn(char *func);
std::vector<c10::optional<at::cuda::CUDAStream>> THPUtils_PySequence_to_CUDAStreamList(PyObject *obj);
#endif
#endif /* _THP_CORE */
#endif