diff --git a/aten/src/ATen/DLConvertor.h b/aten/src/ATen/DLConvertor.h index 41a432601a2c..b35c9657527d 100644 --- a/aten/src/ATen/DLConvertor.h +++ b/aten/src/ATen/DLConvertor.h @@ -13,12 +13,10 @@ namespace at { TORCH_API ScalarType toScalarType(const DLDataType& dtype); TORCH_API DLManagedTensor* toDLPack(const Tensor& src); TORCH_API Tensor fromDLPack(DLManagedTensor* src); - -[[deprecated("Please migrate to a non-const variant")]] inline Tensor fromDLPack( - const DLManagedTensor* src) { +C10_DEPRECATED_MESSAGE("Please migrate to a non-const variant") +inline Tensor fromDLPack(const DLManagedTensor* src) { return fromDLPack(const_cast(src)); } - TORCH_API Tensor fromDLPack(DLManagedTensor* src, std::function deleter); TORCH_API DLDataType getDLDataType(const Tensor& t); diff --git a/aten/src/ATen/Dispatch.h b/aten/src/ATen/Dispatch.h index df82de08d2d6..9f5752eb97ed 100644 --- a/aten/src/ATen/Dispatch.h +++ b/aten/src/ATen/Dispatch.h @@ -104,23 +104,23 @@ inline at::ScalarType scalar_type(at::ScalarType s) { return s; } -[[deprecated( +C10_DEPRECATED_MESSAGE( "passing at::DeprecatedTypeProperties to an AT_DISPATCH macro is deprecated, " - "pass an at::ScalarType instead")]] inline at::ScalarType -scalar_type(const at::DeprecatedTypeProperties& t) { + "pass an at::ScalarType instead") +inline at::ScalarType scalar_type(const at::DeprecatedTypeProperties& t) { return t.scalarType(); } -[[deprecated( +C10_DEPRECATED_MESSAGE( "AT_DISPATCH_ALL_TYPES_AND_HALF is deprecated, " - "use AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, ...) instead")]] inline void -deprecated_AT_DISPATCH_ALL_TYPES_AND_HALF() {} + "use AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, ...) instead") +inline void deprecated_AT_DISPATCH_ALL_TYPES_AND_HALF() {} -[[deprecated( +C10_DEPRECATED_MESSAGE( "AT_DISPATCH_ALL_TYPES_AND_HALF_AND_COMPLEX is deprecated, " "use AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND(at::ScalarType::Half, ...) " - "instead")]] inline void -deprecated_AT_DISPATCH_ALL_TYPES_AND_HALF_AND_COMPLEX() {} + "instead") +inline void deprecated_AT_DISPATCH_ALL_TYPES_AND_HALF_AND_COMPLEX() {} } // namespace detail diff --git a/aten/src/ATen/autocast_mode.h b/aten/src/ATen/autocast_mode.h index 8577ae06762e..fbd9121d3851 100644 --- a/aten/src/ATen/autocast_mode.h +++ b/aten/src/ATen/autocast_mode.h @@ -23,37 +23,36 @@ TORCH_API bool is_autocast_cache_enabled(); TORCH_API void set_autocast_cache_enabled(bool enabled); // deprecated CUDA-specific autocast APIs -[[deprecated( - "at::autocast::is_enabled() is deprecated. Please use at::autocast::is_autocast_enabled(at::kCUDA) instead.")]] TORCH_API inline bool -is_enabled() { +C10_DEPRECATED_MESSAGE( + "at::autocast::is_enabled() is deprecated. Please use at::autocast::is_autocast_enabled(at::kCUDA) instead.") +TORCH_API inline bool is_enabled() { TORCH_WARN_DEPRECATION( "at::autocast::", __func__, "() is deprecated. Please use at::autocast::is_autocast_enabled(at::kCUDA) instead.") return is_autocast_enabled(at::kCUDA); } -[[deprecated( - "at::autocast::set_enabled(enabled) is deprecated. Please use at::autocast::set_autocast_enabled(at::kCUDA, enabled) instead.")]] TORCH_API inline void -set_enabled(bool enabled) { +C10_DEPRECATED_MESSAGE( + "at::autocast::set_enabled(enabled) is deprecated. Please use at::autocast::set_autocast_enabled(at::kCUDA, enabled) instead.") +TORCH_API inline void set_enabled(bool enabled) { TORCH_WARN_DEPRECATION( "at::autocast::", __func__, "(enabled) is deprecated. Please use at::autocast::set_autocast_enabled(at::kCUDA, enabled) instead.") set_autocast_enabled(at::kCUDA, enabled); } -[[deprecated( - "at::autocast::get_autocast_gpu_dtype() is deprecated. Please use at::autocast::get_autocast_dtype(at::kCUDA) instead.")]] TORCH_API inline at:: - ScalarType - get_autocast_gpu_dtype() { +C10_DEPRECATED_MESSAGE( + "at::autocast::get_autocast_gpu_dtype() is deprecated. Please use at::autocast::get_autocast_dtype(at::kCUDA) instead.") +TORCH_API inline at::ScalarType get_autocast_gpu_dtype() { TORCH_WARN_DEPRECATION( "at::autocast::", __func__, "() is deprecated. Please use at::autocast::get_autocast_dtype(at::kCUDA) instead.") return get_autocast_dtype(at::kCUDA); } -[[deprecated( - "at::autocast::set_autocast_gpu_dtype(dtype) is deprecated. Please use at::autocast::set_autocast_dtype(at::kCUDA, dtype) instead.")]] TORCH_API inline void -set_autocast_gpu_dtype(at::ScalarType dtype) { +C10_DEPRECATED_MESSAGE( + "at::autocast::set_autocast_gpu_dtype(dtype) is deprecated. Please use at::autocast::set_autocast_dtype(at::kCUDA, dtype) instead.") +TORCH_API inline void set_autocast_gpu_dtype(at::ScalarType dtype) { TORCH_WARN_DEPRECATION( "at::autocast::", __func__, @@ -62,10 +61,11 @@ set_autocast_gpu_dtype(at::ScalarType dtype) { } #define DECLARE_DEPRECATED_AUTOCAST_APIS(name, device_type) \ - [[deprecated( \ + C10_DEPRECATED_MESSAGE( \ "at::autocast::is_" #name \ "_enabled() is deprecated. Please use at::autocast::is_autocast_enabled(" #device_type \ - ") instead.")]] TORCH_API inline bool is_##name##_enabled() { \ + ") instead.") \ + TORCH_API inline bool is_##name##_enabled() { \ TORCH_WARN_DEPRECATION( \ "at::autocast::", \ __func__, \ @@ -74,11 +74,11 @@ set_autocast_gpu_dtype(at::ScalarType dtype) { return is_autocast_enabled(device_type); \ } \ \ - [[deprecated( \ + C10_DEPRECATED_MESSAGE( \ "at::autocast::set_" #name \ "_enabled(enabled) is deprecated. Please use at::autocast::set_autocast_enabled(" #device_type \ - ", enabled) instead.")]] TORCH_API inline void \ - set_##name##_enabled(bool enabled) { \ + ", enabled) instead.") \ + TORCH_API inline void set_##name##_enabled(bool enabled) { \ TORCH_WARN_DEPRECATION( \ "at::autocast::", \ __func__, \ @@ -87,11 +87,11 @@ set_autocast_gpu_dtype(at::ScalarType dtype) { set_autocast_enabled(device_type, enabled); \ } \ \ - [[deprecated( \ + C10_DEPRECATED_MESSAGE( \ "at::autocast::get_autocast_" #name \ "_dtype() is deprecated. Please use at::autocast::get_autocast_dtype(" #device_type \ - ") instead.")]] TORCH_API inline at::ScalarType \ - get_autocast_##name##_dtype() { \ + ") instead.") \ + TORCH_API inline at::ScalarType get_autocast_##name##_dtype() { \ TORCH_WARN_DEPRECATION( \ "at::autocast::", \ __func__, \ @@ -100,11 +100,11 @@ set_autocast_gpu_dtype(at::ScalarType dtype) { return get_autocast_dtype(device_type); \ } \ \ - [[deprecated( \ + C10_DEPRECATED_MESSAGE( \ "at::autocast::set_autocast_" #name \ "_dtype(dtype) is deprecated. Please use at::autocast::set_autocast_dtype(" #device_type \ - ", dtype) instead.")]] TORCH_API inline void \ - set_autocast_##name##_dtype(at::ScalarType dtype) { \ + ", dtype) instead.") \ + TORCH_API inline void set_autocast_##name##_dtype(at::ScalarType dtype) { \ TORCH_WARN_DEPRECATION( \ "at::autocast::", \ __func__, \ diff --git a/aten/src/ATen/core/TensorAccessor.h b/aten/src/ATen/core/TensorAccessor.h index 9b51b3d782a6..a1a4e0972d3a 100644 --- a/aten/src/ATen/core/TensorAccessor.h +++ b/aten/src/ATen/core/TensorAccessor.h @@ -263,8 +263,9 @@ public: // Can't put this directly into the macro function args because of commas #define AT_X GenericPackedTensorAccessor +// Old name for `GenericPackedTensorAccessor` template class PtrTraits = DefaultPtrTraits, typename index_t = int64_t> -using PackedTensorAccessor [[deprecated("Old name for `GenericPackedTensorAccessor`")]] = AT_X; +C10_DEFINE_DEPRECATED_USING(PackedTensorAccessor, AT_X) #undef AT_X diff --git a/aten/src/ATen/core/ivalue.h b/aten/src/ATen/core/ivalue.h index b789ddb7b244..42a03ea94602 100644 --- a/aten/src/ATen/core/ivalue.h +++ b/aten/src/ATen/core/ivalue.h @@ -809,9 +809,12 @@ struct TORCH_API IValue final { IValue(c10::Dict v); template - [[deprecated( - "IValues based on std::unordered_map are slow and deprecated. Please use c10::Dict instead.")]] - IValue(std::unordered_map v); + /// \cond + /// DOXYGEN_CANNOT_HANDLE_CONSTRUCTORS_WITH_MACROS_SO_EXCLUDE_THIS_LINE_FROM_DOXYGEN + C10_DEPRECATED_MESSAGE( + "IValues based on std::unordered_map are slow and deprecated. Please use c10::Dict instead.") + /// \endcond + IValue(std::unordered_map v); template = nullptr> IValue(std::optional v); diff --git a/aten/src/ATen/core/ivalue_inl.h b/aten/src/ATen/core/ivalue_inl.h index 5fab4e6e7afc..96aef86d6686 100644 --- a/aten/src/ATen/core/ivalue_inl.h +++ b/aten/src/ATen/core/ivalue_inl.h @@ -1771,8 +1771,8 @@ struct _fake_type {}; template // TODO this is deprecated but we don't throw a warning because a lot of ops in // native_functions.yaml still return std::vector. -// [[deprecated("IValues based on std::vector are potentially slow -// and deprecated. Please use torch::List instead.")]] +// C10_DEPRECATED_MESSAGE("IValues based on std::vector are potentially slow +// and deprecated. Please use torch::List instead.") std::vector generic_to(IValue ivalue, _fake_type>) { // We need to do a deep copy of the vector because there might be other // references to this same IValue that also use the list. We can't just @@ -1908,8 +1908,8 @@ c10::Dict generic_to( } template -[[deprecated( - "IValues based on std::unordered_map are slow and deprecated. Please use c10::Dict instead.")]] +C10_DEPRECATED_MESSAGE( + "IValues based on std::unordered_map are slow and deprecated. Please use c10::Dict instead.") std::unordered_map generic_to( IValue ivalue, _fake_type>) { diff --git a/aten/src/ATen/core/op_registration/op_registration.h b/aten/src/ATen/core/op_registration/op_registration.h index 2df43db5bb5c..f309ee2f277b 100644 --- a/aten/src/ATen/core/op_registration/op_registration.h +++ b/aten/src/ATen/core/op_registration/op_registration.h @@ -562,7 +562,7 @@ public: } template - [[deprecated("Registering operator kernels with stateful lambdas (i.e. lambdas with a capture) has non-obvious behavior. This is deprecated. Please use a lambda without a capture or a functor class instead.")]] + C10_DEPRECATED_MESSAGE("Registering operator kernels with stateful lambdas (i.e. lambdas with a capture) has non-obvious behavior. This is deprecated. Please use a lambda without a capture or a functor class instead.") // enable_if: only enable it if Lambda is actually a functor but not a stateless lambda std::enable_if_t::value && !guts::is_stateless_lambda>::value, RegisterOperators&&> op(const std::string& schemaOrName, Lambda&& lambda, Options&& options = RegisterOperators::options()) && { diff --git a/aten/src/ATen/core/stack.h b/aten/src/ATen/core/stack.h index 91b724b6b93e..4fd4c2659790 100644 --- a/aten/src/ATen/core/stack.h +++ b/aten/src/ATen/core/stack.h @@ -21,7 +21,7 @@ class Operation { public: template ::value, int> = 0> - [[deprecated("Please use void(Stack&) to register operator instead.")]] + C10_DEPRECATED_MESSAGE("Please use void(Stack&) to register operator instead.") // NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward) Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { raw(&stack); diff --git a/aten/src/ATen/native/cuda/FractionalMaxPool2d.cu b/aten/src/ATen/native/cuda/FractionalMaxPool2d.cu index 5479cd634f9b..3bc3b6f4cb51 100644 --- a/aten/src/ATen/native/cuda/FractionalMaxPool2d.cu +++ b/aten/src/ATen/native/cuda/FractionalMaxPool2d.cu @@ -45,10 +45,10 @@ __device__ inline int get_interval(accscalar_t sample, template __global__ void fractional_max_pool2d_out_cuda_frame( - GenericPackedTensorAccessor output, - GenericPackedTensorAccessor indices, - GenericPackedTensorAccessor input, - GenericPackedTensorAccessor samples, + PackedTensorAccessor output, + PackedTensorAccessor indices, + PackedTensorAccessor input, + PackedTensorAccessor samples, int poolSizeH, int poolSizeW) { using accscalar_t = at::acc_type; @@ -102,9 +102,9 @@ __global__ void fractional_max_pool2d_out_cuda_frame( template __global__ void fractional_max_pool2d_backward_out_cuda_frame( - GenericPackedTensorAccessor gradInput, - GenericPackedTensorAccessor gradOutput, - GenericPackedTensorAccessor indices) { + PackedTensorAccessor gradInput, + PackedTensorAccessor gradOutput, + PackedTensorAccessor indices) { // Output (h, w) point that this thread is responsible for int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; diff --git a/aten/src/ATen/native/cuda/UpSampleBilinear2d.cu b/aten/src/ATen/native/cuda/UpSampleBilinear2d.cu index f1e9907dfee0..b891750891d5 100644 --- a/aten/src/ATen/native/cuda/UpSampleBilinear2d.cu +++ b/aten/src/ATen/native/cuda/UpSampleBilinear2d.cu @@ -37,8 +37,8 @@ __global__ void upsample_bilinear2d_out_frame( const accscalar_t rheight, const accscalar_t rwidth, const bool align_corners, - const GenericPackedTensorAccessor idata, - GenericPackedTensorAccessor odata) { + const PackedTensorAccessor idata, + PackedTensorAccessor odata) { int index = threadIdx.x + blockIdx.x * blockDim.x; const int batchsize = idata.size(0); diff --git a/aten/src/ATen/native/sparse/cuda/SoftMax.cu b/aten/src/ATen/native/sparse/cuda/SoftMax.cu index 69b72e417c22..d39e41c53255 100644 --- a/aten/src/ATen/native/sparse/cuda/SoftMax.cu +++ b/aten/src/ATen/native/sparse/cuda/SoftMax.cu @@ -95,8 +95,8 @@ __global__ void cuda_sparse_coo_softmax_kernel( int64_t* pool_offsets, int64_t nvalues, scalar_t* mx_rows, - GenericPackedTensorAccessor input_values_acc, - GenericPackedTensorAccessor output_values_acc) { + PackedTensorAccessor input_values_acc, + PackedTensorAccessor output_values_acc) { /* See ATen/native/sparse/SoftMax.cpp:cpu_sparse_coo_softmax for the CPU implementation of the sparse softmax algorithm that this implementation is @@ -156,9 +156,9 @@ __global__ void cuda_sparse_coo_softmax_backward_kernel( int64_t* grad_offsets, int64_t* out_offsets, int64_t* lower_bound_values, - GenericPackedTensorAccessor values_accessor, - GenericPackedTensorAccessor out_values_accessor, - GenericPackedTensorAccessor grad_values_accessor) { + PackedTensorAccessor values_accessor, + PackedTensorAccessor out_values_accessor, + PackedTensorAccessor grad_values_accessor) { /* See ATen/native/sparse/SoftMax.cpp:cpu_sparse_coo_softmax_backward for the CPU implementation of the sparse softmax backward algorithm that this diff --git a/aten/src/ATen/templates/TensorBody.h b/aten/src/ATen/templates/TensorBody.h index 78fd5b8fce87..2e1520392ef9 100644 --- a/aten/src/ATen/templates/TensorBody.h +++ b/aten/src/ATen/templates/TensorBody.h @@ -221,7 +221,7 @@ class TORCH_API Tensor: public TensorBase { return copy_(rhs); } - [[deprecated("Tensor.type() is deprecated. Instead use Tensor.options(), which in many cases (e.g. in a constructor) is a drop-in replacement. If you were using data from type(), that is now available from Tensor itself, so instead of tensor.type().scalar_type(), use tensor.scalar_type() instead and instead of tensor.type().backend() use tensor.device().")]] + C10_DEPRECATED_MESSAGE("Tensor.type() is deprecated. Instead use Tensor.options(), which in many cases (e.g. in a constructor) is a drop-in replacement. If you were using data from type(), that is now available from Tensor itself, so instead of tensor.type().scalar_type(), use tensor.scalar_type() instead and instead of tensor.type().backend() use tensor.device().") DeprecatedTypeProperties & type() const { return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties( dispatchKeyToBackend(legacyExtractDispatchKey(key_set())), @@ -237,13 +237,13 @@ class TORCH_API Tensor: public TensorBase { return to(options().device(backendToDeviceType(b)).layout(layout_from_backend(b)), /*non_blocking*/ false, /*copy*/ false); } - [[deprecated("Tensor.is_variable() is deprecated; everything is a variable now. (If you want to assert that variable has been appropriately handled already, use at::impl::variable_excluded_from_dispatch())")]] + C10_DEPRECATED_MESSAGE("Tensor.is_variable() is deprecated; everything is a variable now. (If you want to assert that variable has been appropriately handled already, use at::impl::variable_excluded_from_dispatch())") bool is_variable() const noexcept { return !at::impl::variable_excluded_from_dispatch(); } template - [[deprecated("Tensor.data() is deprecated. Please use Tensor.data_ptr() instead.")]] + C10_DEPRECATED_MESSAGE("Tensor.data() is deprecated. Please use Tensor.data_ptr() instead.") T * data() const { return data_ptr(); } @@ -252,12 +252,12 @@ class TORCH_API Tensor: public TensorBase { T item() const; template class PtrTraits = DefaultPtrTraits, typename index_t = int64_t> - [[deprecated("packed_accessor is deprecated, use packed_accessor32 or packed_accessor64 instead")]] + C10_DEPRECATED_MESSAGE("packed_accessor is deprecated, use packed_accessor32 or packed_accessor64 instead") GenericPackedTensorAccessor packed_accessor() const & { return generic_packed_accessor(); } template class PtrTraits = DefaultPtrTraits, typename index_t = int64_t> - [[deprecated("packed_accessor is deprecated, use packed_accessor32 or packed_accessor64 instead")]] + C10_DEPRECATED_MESSAGE("packed_accessor is deprecated, use packed_accessor32 or packed_accessor64 instead") GenericPackedTensorAccessor packed_accessor() && = delete; Tensor operator~() const { diff --git a/c10/core/Scalar.h b/c10/core/Scalar.h index 0a7113d7f851..efbe3b65adcc 100644 --- a/c10/core/Scalar.h +++ b/c10/core/Scalar.h @@ -191,9 +191,9 @@ class C10_API Scalar { return Tag::HAS_d == tag || Tag::HAS_sd == tag; } - [[deprecated( - "isIntegral is deprecated. Please use the overload with 'includeBool' parameter instead.")]] bool - isIntegral() const { + C10_DEPRECATED_MESSAGE( + "isIntegral is deprecated. Please use the overload with 'includeBool' parameter instead.") + bool isIntegral() const { return Tag::HAS_i == tag || Tag::HAS_si == tag || Tag::HAS_u == tag; } bool isIntegral(bool includeBool) const { diff --git a/c10/core/ScalarType.h b/c10/core/ScalarType.h index d9bf9313dd47..fa0ef9be8412 100644 --- a/c10/core/ScalarType.h +++ b/c10/core/ScalarType.h @@ -364,9 +364,9 @@ inline bool isIntegralType(ScalarType t, bool includeBool) { return isIntegral || (includeBool && t == ScalarType::Bool); } -[[deprecated( - "isIntegralType is deprecated. Please use the overload with 'includeBool' parameter instead.")]] inline bool -isIntegralType(ScalarType t) { +C10_DEPRECATED_MESSAGE( + "isIntegralType is deprecated. Please use the overload with 'includeBool' parameter instead.") +inline bool isIntegralType(ScalarType t) { return isIntegralType(t, /*includeBool=*/false); } diff --git a/c10/util/ArrayRef.h b/c10/util/ArrayRef.h index 7d280507ea72..db9dbdc47812 100644 --- a/c10/util/ArrayRef.h +++ b/c10/util/ArrayRef.h @@ -378,8 +378,8 @@ bool operator!=(c10::ArrayRef a1, const std::vector& a2) { using IntArrayRef = ArrayRef; -using IntList [[deprecated( - "This alias is deprecated because it doesn't make ownership semantics obvious. Use IntArrayRef instead!")]] = - ArrayRef; +// This alias is deprecated because it doesn't make ownership +// semantics obvious. Use IntArrayRef instead! +C10_DEFINE_DEPRECATED_USING(IntList, ArrayRef) } // namespace c10 diff --git a/c10/util/Deprecated.h b/c10/util/Deprecated.h index ab1278f5153b..88440a0242eb 100644 --- a/c10/util/Deprecated.h +++ b/c10/util/Deprecated.h @@ -1,10 +1,102 @@ #pragma once -#if !defined(FBCODE_CAFFE2) && !defined(C10_NO_DEPRECATED) +/** + * This file provides portable macros for marking declarations + * as deprecated. You should generally use C10_DEPRECATED, + * except when marking 'using' declarations as deprecated, + * in which case you should use C10_DEFINE_DEPRECATED_USING + * (due to portability concerns). + */ +// Sample usage: +// +// C10_DEPRECATED void bad_func(); +// struct C10_DEPRECATED BadStruct { +// ... +// }; + +// NB: __cplusplus doesn't work for MSVC, so for now MSVC always uses +// the "__declspec(deprecated)" implementation and not the C++14 +// "[[deprecated]]" attribute. We tried enabling "[[deprecated]]" for C++14 on +// MSVC, but ran into issues with some older MSVC versions. +#if (defined(__cplusplus) && __cplusplus >= 201402L) #define C10_DEPRECATED [[deprecated]] #define C10_DEPRECATED_MESSAGE(message) [[deprecated(message)]] +#elif defined(__GNUC__) +#define C10_DEPRECATED __attribute__((deprecated)) +// TODO Is there some way to implement this? +#define C10_DEPRECATED_MESSAGE(message) __attribute__((deprecated)) + +#elif defined(_MSC_VER) +#define C10_DEPRECATED __declspec(deprecated) +#define C10_DEPRECATED_MESSAGE(message) __declspec(deprecated(message)) +#else +#warning "You need to implement C10_DEPRECATED for this compiler" +#define C10_DEPRECATED +#endif + +// Sample usage: +// +// C10_DEFINE_DEPRECATED_USING(BadType, int) +// +// which is the portable version of +// +// using BadType [[deprecated]] = int; + +// technically [[deprecated]] syntax is from c++14 standard, but it works in +// many compilers. +#if defined(__has_cpp_attribute) +#if __has_cpp_attribute(deprecated) && !defined(__CUDACC__) #define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \ using TypeName [[deprecated]] = TypeThingy; - +#endif +#endif + +#if defined(_MSC_VER) +#if defined(__CUDACC__) +// neither [[deprecated]] nor __declspec(deprecated) work on nvcc on Windows; +// you get the error: +// +// error: attribute does not apply to any entity +// +// So we just turn the macro off in this case. +#if defined(C10_DEFINE_DEPRECATED_USING) +#undef C10_DEFINE_DEPRECATED_USING +#endif +#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \ + using TypeName = TypeThingy; +#else +// [[deprecated]] does work in windows without nvcc, though msc doesn't support +// `__has_cpp_attribute` when c++14 is supported, otherwise +// __declspec(deprecated) is used as the alternative. +#ifndef C10_DEFINE_DEPRECATED_USING +#if defined(_MSVC_LANG) && _MSVC_LANG >= 201402L +#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \ + using TypeName [[deprecated]] = TypeThingy; +#else +#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \ + using TypeName = __declspec(deprecated) TypeThingy; +#endif +#endif +#endif +#endif + +#if !defined(C10_DEFINE_DEPRECATED_USING) && defined(__GNUC__) +// nvcc has a bug where it doesn't understand __attribute__((deprecated)) +// declarations even when the host compiler supports it. We'll only use this gcc +// attribute when not cuda, and when using a GCC compiler that doesn't support +// the c++14 syntax we checked for above (available in __GNUC__ >= 5) +#if !defined(__CUDACC__) +#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \ + using TypeName __attribute__((deprecated)) = TypeThingy; +#else +// using cuda + gcc < 5, neither deprecated syntax is available so turning off. +#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \ + using TypeName = TypeThingy; +#endif +#endif + +#if !defined(C10_DEFINE_DEPRECATED_USING) +#warning "You need to implement C10_DEFINE_DEPRECATED_USING for this compiler" +#define C10_DEFINE_DEPRECATED_USING #endif diff --git a/c10/util/Exception.h b/c10/util/Exception.h index f764ff1e1086..275526cf4008 100644 --- a/c10/util/Exception.h +++ b/c10/util/Exception.h @@ -689,28 +689,28 @@ namespace c10::detail { /* // Deprecation disabled until we fix sites in our codebase -[[deprecated("AT_ERROR(msg) is deprecated, use TORCH_CHECK(false, msg) -instead.")]] +C10_DEPRECATED_MESSAGE("AT_ERROR(msg) is deprecated, use TORCH_CHECK(false, msg) +instead.") */ inline void deprecated_AT_ERROR() {} /* // Deprecation disabled until we fix sites in our codebase -[[deprecated("AT_ASSERT is deprecated, if you mean to indicate an +C10_DEPRECATED_MESSAGE("AT_ASSERT is deprecated, if you mean to indicate an internal invariant failure, use " \ "TORCH_INTERNAL_ASSERT instead; if you mean to do user error checking, use " \ "TORCH_CHECK. See -https://github.com/pytorch/pytorch/issues/20287 for more details.")]] +https://github.com/pytorch/pytorch/issues/20287 for more details.") */ inline void deprecated_AT_ASSERT() {} /* // Deprecation disabled until we fix sites in our codebase -[[deprecated("AT_ASSERTM is deprecated, if you mean to indicate an +C10_DEPRECATED_MESSAGE("AT_ASSERTM is deprecated, if you mean to indicate an internal invariant failure, use " \ "TORCH_INTERNAL_ASSERT instead; if you mean to do user error checking, use " \ "TORCH_CHECK. See -https://github.com/pytorch/pytorch/issues/20287 for more details.")]] +https://github.com/pytorch/pytorch/issues/20287 for more details.") */ inline void deprecated_AT_ASSERTM() {} diff --git a/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp b/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp index f9ce186cbe19..9a4dd3f46a68 100644 --- a/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp +++ b/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp @@ -540,7 +540,7 @@ class TORCH_API ProcessGroupNCCL : public Backend { // This constructor includes the deprecated `groupName` argument. // If you have existing code that uses the `groupName`, you can replace // it by specifying a `c10d::PrefixStore(groupName, store)` for store. - [[deprecated]] ProcessGroupNCCL( + C10_DEPRECATED ProcessGroupNCCL( const c10::intrusive_ptr& store, int rank, int size,