Revert "Add magic TORCH_MAKE_PYBIND_ENUM_FASTER macro (#163527)"

This reverts commit 50c0550f5a5b1e35885d892081a7d5115d8b4489.

Reverted https://github.com/pytorch/pytorch/pull/163527 on behalf of https://github.com/swolchok due to breaking import torch in debug builds, see #164297 ([comment](https://github.com/pytorch/pytorch/pull/163527#issuecomment-3361919142))
This commit is contained in:
PyTorch MergeBot
2025-10-02 15:42:40 +00:00
parent b0985144b5
commit c6329524d8
15 changed files with 0 additions and 79 deletions

View File

@ -4,9 +4,6 @@
#include <ATen/core/enum_tag.h> #include <ATen/core/enum_tag.h>
namespace py = pybind11; namespace py = pybind11;
TORCH_MAKE_PYBIND_ENUM_FASTER(at::Tag)
namespace torch { namespace torch {
namespace autograd { namespace autograd {
void initEnumTag(PyObject* module) { void initEnumTag(PyObject* module) {

View File

@ -145,14 +145,6 @@
namespace py = pybind11; namespace py = pybind11;
TORCH_MAKE_PYBIND_ENUM_FASTER(at::native::ConvBackend)
TORCH_MAKE_PYBIND_ENUM_FASTER(sdp::SDPBackend)
TORCH_MAKE_PYBIND_ENUM_FASTER(at::LinalgBackend)
TORCH_MAKE_PYBIND_ENUM_FASTER(at::BlasBackend)
TORCH_MAKE_PYBIND_ENUM_FASTER(at::ROCmFABackend)
TORCH_MAKE_PYBIND_ENUM_FASTER(at::native::BatchNormBackend)
TORCH_MAKE_PYBIND_ENUM_FASTER(at::impl::TorchFunctionDisabledState)
static PyObject* module; static PyObject* module;
static THPGenerator* THPDefaultCPUGenerator = nullptr; static THPGenerator* THPDefaultCPUGenerator = nullptr;

View File

@ -44,9 +44,6 @@
using torch::impl::py_context_manager; using torch::impl::py_context_manager;
using torch::impl::py_context_manager_DEPRECATED; using torch::impl::py_context_manager_DEPRECATED;
TORCH_MAKE_PYBIND_ENUM_FASTER(torch::autograd::CreationMeta)
TORCH_MAKE_PYBIND_ENUM_FASTER(c10::DeviceType)
namespace { namespace {
struct DisableFuncTorch { struct DisableFuncTorch {

View File

@ -10,11 +10,6 @@
#include <c10/cuda/CUDAException.h> #include <c10/cuda/CUDAException.h>
#include <c10/cuda/CUDAGuard.h> #include <c10/cuda/CUDAGuard.h>
TORCH_MAKE_PYBIND_ENUM_FASTER(cudaError_t)
#if !defined(USE_ROCM) && defined(CUDA_VERSION) && CUDA_VERSION < 12000
TORCH_MAKE_PYBIND_ENUM_FASTER(cudaOutputMode_t)
#endif
namespace torch::cuda::shared { namespace torch::cuda::shared {
#ifdef USE_ROCM #ifdef USE_ROCM

View File

@ -13,8 +13,6 @@ using version_tuple = std::tuple<size_t, size_t, size_t>;
#ifdef USE_CUDNN #ifdef USE_CUDNN
#include <cudnn.h> #include <cudnn.h>
TORCH_MAKE_PYBIND_ENUM_FASTER(cudnnRNNMode_t)
namespace { namespace {
version_tuple getCompileVersion() { version_tuple getCompileVersion() {

View File

@ -66,14 +66,6 @@
#include <torch/custom_class.h> #include <torch/custom_class.h>
TORCH_MAKE_PYBIND_ENUM_FASTER(c10d::BuiltinCommHookType)
TORCH_MAKE_PYBIND_ENUM_FASTER(c10d::DebugLevel)
TORCH_MAKE_PYBIND_ENUM_FASTER(c10d::ReduceOp::RedOpType)
TORCH_MAKE_PYBIND_ENUM_FASTER(c10d::ProcessGroup::BackendType)
TORCH_MAKE_PYBIND_ENUM_FASTER(c10d::OpType)
TORCH_MAKE_PYBIND_ENUM_FASTER(c10d::WorkResult)
TORCH_MAKE_PYBIND_ENUM_FASTER(c10d::ErrorType)
namespace { namespace {
#ifdef USE_C10D_NCCL #ifdef USE_C10D_NCCL

View File

@ -19,8 +19,6 @@ static struct PyModuleDef _module =
PYBIND11_MAKE_OPAQUE(std::vector<uint8_t>) PYBIND11_MAKE_OPAQUE(std::vector<uint8_t>)
TORCH_MAKE_PYBIND_ENUM_FASTER(FrameAction)
namespace torch::dynamo { namespace torch::dynamo {
#if IS_PYTHON_3_11_PLUS #if IS_PYTHON_3_11_PLUS

View File

@ -7,7 +7,6 @@
#include <ATen/FunctionalTensorWrapper.h> #include <ATen/FunctionalTensorWrapper.h>
#include <ATen/WrapDimUtils.h> #include <ATen/WrapDimUtils.h>
#include <torch/csrc/functorch/init.h> #include <torch/csrc/functorch/init.h>
#include <torch/csrc/utils/pybind.h>
#include <torch/csrc/utils/python_raii.h> #include <torch/csrc/utils/python_raii.h>
#include <torch/python.h> #include <torch/python.h>
@ -25,9 +24,6 @@
// This file contains functorch's Python bindings. // This file contains functorch's Python bindings.
TORCH_MAKE_PYBIND_ENUM_FASTER(at::functorch::TransformType)
TORCH_MAKE_PYBIND_ENUM_FASTER(at::functorch::RandomnessType)
namespace torch::functorch::impl { namespace torch::functorch::impl {
using namespace at::functorch; using namespace at::functorch;

View File

@ -121,9 +121,6 @@
#include <tuple> #include <tuple>
#include <utility> #include <utility>
TORCH_MAKE_PYBIND_ENUM_FASTER(MobileOptimizerType)
TORCH_MAKE_PYBIND_ENUM_FASTER(c10::SchemaArgType)
namespace torch::jit { namespace torch::jit {
using c10::AliasInfo; using c10::AliasInfo;

View File

@ -75,9 +75,6 @@
#include <fmt/format.h> #include <fmt/format.h>
TORCH_MAKE_PYBIND_ENUM_FASTER(
torch::jit::logging::LockingLogger::AggregationType)
namespace torch::jit { namespace torch::jit {
using ::c10::Argument; using ::c10::Argument;

View File

@ -17,8 +17,6 @@
#include <torch/csrc/monitor/events.h> #include <torch/csrc/monitor/events.h>
#include <torch/csrc/monitor/python_init.h> #include <torch/csrc/monitor/python_init.h>
TORCH_MAKE_PYBIND_ENUM_FASTER(torch::monitor::Aggregation)
namespace pybind11::detail { namespace pybind11::detail {
template <> template <>
struct type_caster<torch::monitor::data_value_t> { struct type_caster<torch::monitor::data_value_t> {

View File

@ -28,11 +28,6 @@
#include <torch/csrc/jit/passes/onnx/shape_type_inference.h> #include <torch/csrc/jit/passes/onnx/shape_type_inference.h>
#include <torch/csrc/jit/passes/onnx/unpack_quantized_weights.h> #include <torch/csrc/jit/passes/onnx/unpack_quantized_weights.h>
#include <torch/csrc/jit/serialization/export.h> #include <torch/csrc/jit/serialization/export.h>
#include <torch/csrc/utils/pybind.h>
TORCH_MAKE_PYBIND_ENUM_FASTER(ONNX_NAMESPACE::TensorProto_DataType)
TORCH_MAKE_PYBIND_ENUM_FASTER(torch::onnx::OperatorExportTypes)
TORCH_MAKE_PYBIND_ENUM_FASTER(torch::onnx::TrainingMode)
namespace torch::onnx { namespace torch::onnx {

View File

@ -12,12 +12,6 @@
#include <torch/csrc/profiler/standalone/execution_trace_observer.h> #include <torch/csrc/profiler/standalone/execution_trace_observer.h>
#include <torch/csrc/utils/pybind.h> #include <torch/csrc/utils/pybind.h>
TORCH_MAKE_PYBIND_ENUM_FASTER(at::RecordScope)
TORCH_MAKE_PYBIND_ENUM_FASTER(torch::profiler::impl::ProfilerState)
TORCH_MAKE_PYBIND_ENUM_FASTER(torch::profiler::impl::ActiveProfilerType)
TORCH_MAKE_PYBIND_ENUM_FASTER(torch::profiler::impl::ActivityType)
TORCH_MAKE_PYBIND_ENUM_FASTER(torch::profiler::impl::EventType)
struct THPCapturedTraceback { struct THPCapturedTraceback {
PyObject_HEAD PyObject_HEAD
std::shared_ptr<torch::CapturedTraceback> data; std::shared_ptr<torch::CapturedTraceback> data;

View File

@ -29,28 +29,6 @@ PYBIND11_DECLARE_HOLDER_TYPE(T, c10::intrusive_ptr<T>, true)
PYBIND11_DECLARE_HOLDER_TYPE(T, c10::SingletonOrSharedTypePtr<T>) PYBIND11_DECLARE_HOLDER_TYPE(T, c10::SingletonOrSharedTypePtr<T>)
PYBIND11_DECLARE_HOLDER_TYPE(T, c10::SingletonTypePtr<T>, true) PYBIND11_DECLARE_HOLDER_TYPE(T, c10::SingletonTypePtr<T>, true)
// pybind11 3.x's type_caster_enum_type handles both py::native_enum
// and py::enum_. py::native_enum is preferred, so it gets checked
// first. We still use lots of py::enum_ because we don't (yet?)
// require pybind11 3.x, and possibly because the difference is
// user-visible. Putting TORCH_MAKE_PYBIND_ENUM_FASTER(T) at global
// scope before using py::enum_<T> will cause pybind function calls
// that pass arguments of type T to go faster (16% at time of writing,
// but they are quite slow currently and the savings is a fixed cost,
// so the percentage may be higher after other optimizations for
// py::enum_ happen).
#ifdef PYBIND11_HAS_NATIVE_ENUM
#define TORCH_MAKE_PYBIND_ENUM_FASTER(T) \
namespace pybind11::detail { \
template <> \
struct type_caster_enum_type_enabled<T, void> : std::false_type {}; \
template <> \
struct type_caster_enum_type_enabled<const T, void> : std::false_type {}; \
} // namespace pybind11::detail
#else // PYBIND11_HAS_NATIVE_ENUM
#define TORCH_MAKE_PYBIND_ENUM_FASTER(T)
#endif // PYBIND11_HAS_NATIVE_ENUM
namespace pybind11::detail { namespace pybind11::detail {
// torch.Tensor <-> at::Tensor conversions (without unwrapping) // torch.Tensor <-> at::Tensor conversions (without unwrapping)

View File

@ -34,9 +34,6 @@
namespace py = pybind11; namespace py = pybind11;
TORCH_MAKE_PYBIND_ENUM_FASTER(c10::DispatchKey)
TORCH_MAKE_PYBIND_ENUM_FASTER(c10::impl::TorchDispatchModeKey)
namespace torch::impl::dispatch { namespace torch::impl::dispatch {
// Global storage for leaked Python filenames to ensure they remain valid // Global storage for leaked Python filenames to ensure they remain valid