Hide torch_python symbols (#142214)

Change symbols in torch_python to invisible by default on platforms other than Apple.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/142214
Approved by: https://github.com/ezyang
This commit is contained in:
cyy
2024-12-16 00:59:26 +00:00
committed by PyTorch MergeBot
parent 744a303dee
commit af8789c056
12 changed files with 27 additions and 19 deletions

View File

@ -310,7 +310,7 @@ endif()
add_library(torch_python SHARED ${TORCH_PYTHON_SRCS})
torch_compile_options(torch_python) # see cmake/public/utils.cmake
if(NOT WIN32)
if(APPLE)
target_compile_options(torch_python PRIVATE
$<$<COMPILE_LANGUAGE:CXX>: -fvisibility=default>)
endif()

View File

@ -1,5 +1,5 @@
#pragma once
#include <torch/csrc/Export.h>
#include <torch/csrc/python_headers.h>
#include <ATen/Layout.h>
@ -15,7 +15,7 @@ struct THPLayout {
char name[LAYOUT_NAME_LEN + 1];
};
extern PyTypeObject THPLayoutType;
TORCH_PYTHON_API extern PyTypeObject THPLayoutType;
inline bool THPLayout_Check(PyObject* obj) {
return Py_TYPE(obj) == &THPLayoutType;

View File

@ -1,5 +1,6 @@
#pragma once
#include <torch/csrc/Export.h>
#include <torch/csrc/python_headers.h>
#include <c10/core/MemoryFormat.h>
@ -15,7 +16,7 @@ struct THPMemoryFormat {
char name[MEMORY_FORMAT_NAME_LEN + 1];
};
extern PyTypeObject THPMemoryFormatType;
TORCH_PYTHON_API extern PyTypeObject THPMemoryFormatType;
inline bool THPMemoryFormat_Check(PyObject* obj) {
return Py_TYPE(obj) == &THPMemoryFormatType;

View File

@ -46,6 +46,7 @@
#include <torch/csrc/Dtype.h>
#include <torch/csrc/DynamicTypes.h>
#include <torch/csrc/Event.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/Generator.h>
#include <torch/csrc/Layout.h>
#include <torch/csrc/MemoryFormat.h>
@ -1717,7 +1718,7 @@ class WeakTensorRef {
}
};
extern "C" C10_EXPORT PyObject* initModule();
extern "C" TORCH_PYTHON_API PyObject* initModule();
// separate decl and defn for msvc error C2491
PyObject* initModule() {
HANDLE_TH_ERRORS

View File

@ -1,5 +1,6 @@
#pragma once
#include <torch/csrc/Export.h>
#include <torch/csrc/python_headers.h>
#include <c10/core/QScheme.h>
@ -15,7 +16,7 @@ struct THPQScheme {
char name[QSCHEME_NAME_LEN + 1];
};
extern PyTypeObject THPQSchemeType;
TORCH_PYTHON_API extern PyTypeObject THPQSchemeType;
inline bool THPQScheme_Check(PyObject* obj) {
return Py_TYPE(obj) == &THPQSchemeType;

View File

@ -1,10 +1,11 @@
#pragma once
#include <torch/csrc/Export.h>
#include <torch/csrc/autograd/variable.h>
#include <torch/csrc/python_headers.h>
#include <cstdint>
extern PyTypeObject THPSizeType;
TORCH_PYTHON_API extern PyTypeObject THPSizeType;
#define THPSize_Check(obj) (Py_TYPE(obj) == &THPSizeType)

View File

@ -21,7 +21,7 @@ TORCH_PYTHON_API PyObject* THPStorage_NewWithStorage(
c10::Storage _storage,
c10::impl::PyInterpreterStatus status,
bool allow_preexisting_pyobj = false);
extern PyTypeObject* THPStorageClass;
TORCH_PYTHON_API extern PyTypeObject* THPStorageClass;
inline bool THPStorage_CheckTypeExact(PyTypeObject* tp) {
return tp == THPStorageClass;
@ -47,7 +47,7 @@ void THPStorage_postInit(PyObject* module);
void THPStorage_assertNotNull(THPStorage* storage);
void THPStorage_assertNotNull(PyObject* obj);
extern PyTypeObject THPStorageType;
TORCH_PYTHON_API extern PyTypeObject THPStorageType;
inline const c10::Storage& THPStorage_Unpack(THPStorage* storage) {
return *storage->cdata;

View File

@ -1,5 +1,6 @@
#pragma once
#include <torch/csrc/Export.h>
#include <torch/csrc/python_headers.h>
#include <ATen/ATen.h>
@ -13,8 +14,8 @@ struct THPFInfo : THPDTypeInfo {};
struct THPIInfo : THPDTypeInfo {};
extern PyTypeObject THPFInfoType;
extern PyTypeObject THPIInfoType;
TORCH_PYTHON_API extern PyTypeObject THPFInfoType;
TORCH_PYTHON_API extern PyTypeObject THPIInfoType;
inline bool THPFInfo_Check(PyObject* obj) {
return Py_TYPE(obj) == &THPFInfoType;

View File

@ -3,6 +3,7 @@
#include <torch/csrc/python_headers.h>
#include <torch/csrc/Exceptions.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/autograd/custom_function.h>
#include <torch/csrc/autograd/function.h>
#include <torch/csrc/autograd/saved_variable.h>
@ -150,9 +151,9 @@ struct THPFunction {
};
bool THPFunction_initModule(PyObject* module);
extern PyTypeObject THPFunctionType;
extern PyObject* THPFunctionClass;
extern PyObject* THPGradientEdgeClass;
TORCH_PYTHON_API extern PyTypeObject THPFunctionType;
TORCH_PYTHON_API extern PyObject* THPFunctionClass;
TORCH_PYTHON_API extern PyObject* THPGradientEdgeClass;
inline bool THPFunction_Check(PyObject* obj) {
return PyObject_IsInstance(obj, (PyObject*)&THPFunctionType);

View File

@ -47,6 +47,6 @@ class TORCH_API TSData : public torch::lazy::BackendData {
TORCH_API torch::lazy::BackendImplInterface* GetTSBackendImpl();
TORCH_API void InitTorchScriptBackend();
TORCH_PYTHON_API void InitTorchScriptBackend();
} // namespace torch::lazy

View File

@ -3,6 +3,7 @@
#include <c10/core/Device.h>
#include <c10/core/DispatchKey.h>
#include <c10/core/ScalarType.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/python_headers.h>
namespace at {
@ -13,13 +14,13 @@ namespace torch::tensors {
// Initializes the Python tensor type objects: torch.FloatTensor,
// torch.DoubleTensor, etc. and binds them in their containing modules.
void initialize_python_bindings();
TORCH_PYTHON_API void initialize_python_bindings();
// Same as set_default_tensor_type() but takes a PyObject*
void py_set_default_tensor_type(PyObject* type_obj);
TORCH_PYTHON_API void py_set_default_tensor_type(PyObject* type_obj);
// Same as py_set_default_tensor_type, but only changes the dtype (ScalarType).
void py_set_default_dtype(PyObject* dtype_obj);
TORCH_PYTHON_API void py_set_default_dtype(PyObject* dtype_obj);
// Gets the DispatchKey for the default tensor type.
//

View File

@ -1,6 +1,7 @@
#pragma once
#include <c10/core/TensorOptions.h>
#include <torch/csrc/Export.h>
// device_lazy_init() is always compiled, even for CPU-only builds.
@ -23,7 +24,7 @@ namespace torch::utils {
* try to use CUDA or XPU functionality from a CPU-only build, which is not good
* UX.
*/
void device_lazy_init(at::DeviceType device_type);
TORCH_PYTHON_API void device_lazy_init(at::DeviceType device_type);
void set_requires_device_init(at::DeviceType device_type, bool value);
inline void maybe_initialize_device(at::Device& device) {