mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Revert "Remove C10_DEPRECATED (#138406)"
This reverts commit 70ec86d7542d461ff6f01ba1a1c9a4f38637af8e. Reverted https://github.com/pytorch/pytorch/pull/138406 on behalf of https://github.com/wdvr due to failing internal tests - see D64714374 ([comment](https://github.com/pytorch/pytorch/pull/138406#issuecomment-2429912896))
This commit is contained in:
@ -13,12 +13,10 @@ namespace at {
|
||||
TORCH_API ScalarType toScalarType(const DLDataType& dtype);
|
||||
TORCH_API DLManagedTensor* toDLPack(const Tensor& src);
|
||||
TORCH_API Tensor fromDLPack(DLManagedTensor* src);
|
||||
|
||||
[[deprecated("Please migrate to a non-const variant")]] inline Tensor fromDLPack(
|
||||
const DLManagedTensor* src) {
|
||||
C10_DEPRECATED_MESSAGE("Please migrate to a non-const variant")
|
||||
inline Tensor fromDLPack(const DLManagedTensor* src) {
|
||||
return fromDLPack(const_cast<DLManagedTensor*>(src));
|
||||
}
|
||||
|
||||
TORCH_API Tensor
|
||||
fromDLPack(DLManagedTensor* src, std::function<void(void*)> deleter);
|
||||
TORCH_API DLDataType getDLDataType(const Tensor& t);
|
||||
|
@ -104,23 +104,23 @@ inline at::ScalarType scalar_type(at::ScalarType s) {
|
||||
return s;
|
||||
}
|
||||
|
||||
[[deprecated(
|
||||
C10_DEPRECATED_MESSAGE(
|
||||
"passing at::DeprecatedTypeProperties to an AT_DISPATCH macro is deprecated, "
|
||||
"pass an at::ScalarType instead")]] inline at::ScalarType
|
||||
scalar_type(const at::DeprecatedTypeProperties& t) {
|
||||
"pass an at::ScalarType instead")
|
||||
inline at::ScalarType scalar_type(const at::DeprecatedTypeProperties& t) {
|
||||
return t.scalarType();
|
||||
}
|
||||
|
||||
[[deprecated(
|
||||
C10_DEPRECATED_MESSAGE(
|
||||
"AT_DISPATCH_ALL_TYPES_AND_HALF is deprecated, "
|
||||
"use AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, ...) instead")]] inline void
|
||||
deprecated_AT_DISPATCH_ALL_TYPES_AND_HALF() {}
|
||||
"use AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, ...) instead")
|
||||
inline void deprecated_AT_DISPATCH_ALL_TYPES_AND_HALF() {}
|
||||
|
||||
[[deprecated(
|
||||
C10_DEPRECATED_MESSAGE(
|
||||
"AT_DISPATCH_ALL_TYPES_AND_HALF_AND_COMPLEX is deprecated, "
|
||||
"use AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND(at::ScalarType::Half, ...) "
|
||||
"instead")]] inline void
|
||||
deprecated_AT_DISPATCH_ALL_TYPES_AND_HALF_AND_COMPLEX() {}
|
||||
"instead")
|
||||
inline void deprecated_AT_DISPATCH_ALL_TYPES_AND_HALF_AND_COMPLEX() {}
|
||||
|
||||
} // namespace detail
|
||||
|
||||
|
@ -23,37 +23,36 @@ TORCH_API bool is_autocast_cache_enabled();
|
||||
TORCH_API void set_autocast_cache_enabled(bool enabled);
|
||||
|
||||
// deprecated CUDA-specific autocast APIs
|
||||
[[deprecated(
|
||||
"at::autocast::is_enabled() is deprecated. Please use at::autocast::is_autocast_enabled(at::kCUDA) instead.")]] TORCH_API inline bool
|
||||
is_enabled() {
|
||||
C10_DEPRECATED_MESSAGE(
|
||||
"at::autocast::is_enabled() is deprecated. Please use at::autocast::is_autocast_enabled(at::kCUDA) instead.")
|
||||
TORCH_API inline bool is_enabled() {
|
||||
TORCH_WARN_DEPRECATION(
|
||||
"at::autocast::",
|
||||
__func__,
|
||||
"() is deprecated. Please use at::autocast::is_autocast_enabled(at::kCUDA) instead.")
|
||||
return is_autocast_enabled(at::kCUDA);
|
||||
}
|
||||
[[deprecated(
|
||||
"at::autocast::set_enabled(enabled) is deprecated. Please use at::autocast::set_autocast_enabled(at::kCUDA, enabled) instead.")]] TORCH_API inline void
|
||||
set_enabled(bool enabled) {
|
||||
C10_DEPRECATED_MESSAGE(
|
||||
"at::autocast::set_enabled(enabled) is deprecated. Please use at::autocast::set_autocast_enabled(at::kCUDA, enabled) instead.")
|
||||
TORCH_API inline void set_enabled(bool enabled) {
|
||||
TORCH_WARN_DEPRECATION(
|
||||
"at::autocast::",
|
||||
__func__,
|
||||
"(enabled) is deprecated. Please use at::autocast::set_autocast_enabled(at::kCUDA, enabled) instead.")
|
||||
set_autocast_enabled(at::kCUDA, enabled);
|
||||
}
|
||||
[[deprecated(
|
||||
"at::autocast::get_autocast_gpu_dtype() is deprecated. Please use at::autocast::get_autocast_dtype(at::kCUDA) instead.")]] TORCH_API inline at::
|
||||
ScalarType
|
||||
get_autocast_gpu_dtype() {
|
||||
C10_DEPRECATED_MESSAGE(
|
||||
"at::autocast::get_autocast_gpu_dtype() is deprecated. Please use at::autocast::get_autocast_dtype(at::kCUDA) instead.")
|
||||
TORCH_API inline at::ScalarType get_autocast_gpu_dtype() {
|
||||
TORCH_WARN_DEPRECATION(
|
||||
"at::autocast::",
|
||||
__func__,
|
||||
"() is deprecated. Please use at::autocast::get_autocast_dtype(at::kCUDA) instead.")
|
||||
return get_autocast_dtype(at::kCUDA);
|
||||
}
|
||||
[[deprecated(
|
||||
"at::autocast::set_autocast_gpu_dtype(dtype) is deprecated. Please use at::autocast::set_autocast_dtype(at::kCUDA, dtype) instead.")]] TORCH_API inline void
|
||||
set_autocast_gpu_dtype(at::ScalarType dtype) {
|
||||
C10_DEPRECATED_MESSAGE(
|
||||
"at::autocast::set_autocast_gpu_dtype(dtype) is deprecated. Please use at::autocast::set_autocast_dtype(at::kCUDA, dtype) instead.")
|
||||
TORCH_API inline void set_autocast_gpu_dtype(at::ScalarType dtype) {
|
||||
TORCH_WARN_DEPRECATION(
|
||||
"at::autocast::",
|
||||
__func__,
|
||||
@ -62,10 +61,11 @@ set_autocast_gpu_dtype(at::ScalarType dtype) {
|
||||
}
|
||||
|
||||
#define DECLARE_DEPRECATED_AUTOCAST_APIS(name, device_type) \
|
||||
[[deprecated( \
|
||||
C10_DEPRECATED_MESSAGE( \
|
||||
"at::autocast::is_" #name \
|
||||
"_enabled() is deprecated. Please use at::autocast::is_autocast_enabled(" #device_type \
|
||||
") instead.")]] TORCH_API inline bool is_##name##_enabled() { \
|
||||
") instead.") \
|
||||
TORCH_API inline bool is_##name##_enabled() { \
|
||||
TORCH_WARN_DEPRECATION( \
|
||||
"at::autocast::", \
|
||||
__func__, \
|
||||
@ -74,11 +74,11 @@ set_autocast_gpu_dtype(at::ScalarType dtype) {
|
||||
return is_autocast_enabled(device_type); \
|
||||
} \
|
||||
\
|
||||
[[deprecated( \
|
||||
C10_DEPRECATED_MESSAGE( \
|
||||
"at::autocast::set_" #name \
|
||||
"_enabled(enabled) is deprecated. Please use at::autocast::set_autocast_enabled(" #device_type \
|
||||
", enabled) instead.")]] TORCH_API inline void \
|
||||
set_##name##_enabled(bool enabled) { \
|
||||
", enabled) instead.") \
|
||||
TORCH_API inline void set_##name##_enabled(bool enabled) { \
|
||||
TORCH_WARN_DEPRECATION( \
|
||||
"at::autocast::", \
|
||||
__func__, \
|
||||
@ -87,11 +87,11 @@ set_autocast_gpu_dtype(at::ScalarType dtype) {
|
||||
set_autocast_enabled(device_type, enabled); \
|
||||
} \
|
||||
\
|
||||
[[deprecated( \
|
||||
C10_DEPRECATED_MESSAGE( \
|
||||
"at::autocast::get_autocast_" #name \
|
||||
"_dtype() is deprecated. Please use at::autocast::get_autocast_dtype(" #device_type \
|
||||
") instead.")]] TORCH_API inline at::ScalarType \
|
||||
get_autocast_##name##_dtype() { \
|
||||
") instead.") \
|
||||
TORCH_API inline at::ScalarType get_autocast_##name##_dtype() { \
|
||||
TORCH_WARN_DEPRECATION( \
|
||||
"at::autocast::", \
|
||||
__func__, \
|
||||
@ -100,11 +100,11 @@ set_autocast_gpu_dtype(at::ScalarType dtype) {
|
||||
return get_autocast_dtype(device_type); \
|
||||
} \
|
||||
\
|
||||
[[deprecated( \
|
||||
C10_DEPRECATED_MESSAGE( \
|
||||
"at::autocast::set_autocast_" #name \
|
||||
"_dtype(dtype) is deprecated. Please use at::autocast::set_autocast_dtype(" #device_type \
|
||||
", dtype) instead.")]] TORCH_API inline void \
|
||||
set_autocast_##name##_dtype(at::ScalarType dtype) { \
|
||||
", dtype) instead.") \
|
||||
TORCH_API inline void set_autocast_##name##_dtype(at::ScalarType dtype) { \
|
||||
TORCH_WARN_DEPRECATION( \
|
||||
"at::autocast::", \
|
||||
__func__, \
|
||||
|
@ -263,8 +263,9 @@ public:
|
||||
// Can't put this directly into the macro function args because of commas
|
||||
#define AT_X GenericPackedTensorAccessor<T, N, PtrTraits, index_t>
|
||||
|
||||
// Old name for `GenericPackedTensorAccessor`
|
||||
template <typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
|
||||
using PackedTensorAccessor [[deprecated("Old name for `GenericPackedTensorAccessor`")]] = AT_X;
|
||||
C10_DEFINE_DEPRECATED_USING(PackedTensorAccessor, AT_X)
|
||||
|
||||
#undef AT_X
|
||||
|
||||
|
@ -809,9 +809,12 @@ struct TORCH_API IValue final {
|
||||
IValue(c10::Dict<Key, Value> v);
|
||||
|
||||
template <class Key, class Value>
|
||||
[[deprecated(
|
||||
"IValues based on std::unordered_map<K, V> are slow and deprecated. Please use c10::Dict<K, V> instead.")]]
|
||||
IValue(std::unordered_map<Key, Value> v);
|
||||
/// \cond
|
||||
/// DOXYGEN_CANNOT_HANDLE_CONSTRUCTORS_WITH_MACROS_SO_EXCLUDE_THIS_LINE_FROM_DOXYGEN
|
||||
C10_DEPRECATED_MESSAGE(
|
||||
"IValues based on std::unordered_map<K, V> are slow and deprecated. Please use c10::Dict<K, V> instead.")
|
||||
/// \endcond
|
||||
IValue(std::unordered_map<Key, Value> v);
|
||||
|
||||
template <class T, enable_if_ivalue_constructible<T> = nullptr>
|
||||
IValue(std::optional<T> v);
|
||||
|
@ -1771,8 +1771,8 @@ struct _fake_type {};
|
||||
template <class Elem>
|
||||
// TODO this is deprecated but we don't throw a warning because a lot of ops in
|
||||
// native_functions.yaml still return std::vector.
|
||||
// [[deprecated("IValues based on std::vector<T> are potentially slow
|
||||
// and deprecated. Please use torch::List<T> instead.")]]
|
||||
// C10_DEPRECATED_MESSAGE("IValues based on std::vector<T> are potentially slow
|
||||
// and deprecated. Please use torch::List<T> instead.")
|
||||
std::vector<Elem> generic_to(IValue ivalue, _fake_type<std::vector<Elem>>) {
|
||||
// We need to do a deep copy of the vector because there might be other
|
||||
// references to this same IValue that also use the list. We can't just
|
||||
@ -1908,8 +1908,8 @@ c10::Dict<Key, Value> generic_to(
|
||||
}
|
||||
|
||||
template <typename K, typename V>
|
||||
[[deprecated(
|
||||
"IValues based on std::unordered_map are slow and deprecated. Please use c10::Dict<K, V> instead.")]]
|
||||
C10_DEPRECATED_MESSAGE(
|
||||
"IValues based on std::unordered_map are slow and deprecated. Please use c10::Dict<K, V> instead.")
|
||||
std::unordered_map<K, V> generic_to(
|
||||
IValue ivalue,
|
||||
_fake_type<std::unordered_map<K, V>>) {
|
||||
|
@ -562,7 +562,7 @@ public:
|
||||
}
|
||||
|
||||
template<class Lambda>
|
||||
[[deprecated("Registering operator kernels with stateful lambdas (i.e. lambdas with a capture) has non-obvious behavior. This is deprecated. Please use a lambda without a capture or a functor class instead.")]]
|
||||
C10_DEPRECATED_MESSAGE("Registering operator kernels with stateful lambdas (i.e. lambdas with a capture) has non-obvious behavior. This is deprecated. Please use a lambda without a capture or a functor class instead.")
|
||||
// enable_if: only enable it if Lambda is actually a functor but not a stateless lambda
|
||||
std::enable_if_t<guts::is_functor<Lambda>::value && !guts::is_stateless_lambda<std::decay_t<Lambda>>::value, RegisterOperators&&>
|
||||
op(const std::string& schemaOrName, Lambda&& lambda, Options&& options = RegisterOperators::options()) && {
|
||||
|
@ -21,7 +21,7 @@ class Operation {
|
||||
public:
|
||||
template <typename F,
|
||||
std::enable_if_t<accepts<F, Stack*>::value, int> = 0>
|
||||
[[deprecated("Please use void(Stack&) to register operator instead.")]]
|
||||
C10_DEPRECATED_MESSAGE("Please use void(Stack&) to register operator instead.")
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward)
|
||||
Operation(F&& raw): op_([raw = std::forward<F>(raw)](Stack& stack) {
|
||||
raw(&stack);
|
||||
|
@ -45,10 +45,10 @@ __device__ inline int get_interval(accscalar_t sample,
|
||||
|
||||
template <typename scalar_t>
|
||||
__global__ void fractional_max_pool2d_out_cuda_frame(
|
||||
GenericPackedTensorAccessor<scalar_t, 4> output,
|
||||
GenericPackedTensorAccessor<int64_t, 4> indices,
|
||||
GenericPackedTensorAccessor<const scalar_t, 4> input,
|
||||
GenericPackedTensorAccessor<const scalar_t, 3> samples,
|
||||
PackedTensorAccessor<scalar_t, 4> output,
|
||||
PackedTensorAccessor<int64_t, 4> indices,
|
||||
PackedTensorAccessor<const scalar_t, 4> input,
|
||||
PackedTensorAccessor<const scalar_t, 3> samples,
|
||||
int poolSizeH, int poolSizeW) {
|
||||
|
||||
using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>;
|
||||
@ -102,9 +102,9 @@ __global__ void fractional_max_pool2d_out_cuda_frame(
|
||||
|
||||
template <typename scalar_t>
|
||||
__global__ void fractional_max_pool2d_backward_out_cuda_frame(
|
||||
GenericPackedTensorAccessor<scalar_t, 4> gradInput,
|
||||
GenericPackedTensorAccessor<const scalar_t, 4> gradOutput,
|
||||
GenericPackedTensorAccessor<const int64_t, 4> indices) {
|
||||
PackedTensorAccessor<scalar_t, 4> gradInput,
|
||||
PackedTensorAccessor<const scalar_t, 4> gradOutput,
|
||||
PackedTensorAccessor<const int64_t, 4> indices) {
|
||||
// Output (h, w) point that this thread is responsible for
|
||||
int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
|
||||
int plane = blockIdx.y;
|
||||
|
@ -37,8 +37,8 @@ __global__ void upsample_bilinear2d_out_frame(
|
||||
const accscalar_t rheight,
|
||||
const accscalar_t rwidth,
|
||||
const bool align_corners,
|
||||
const GenericPackedTensorAccessor<const scalar_t, 4> idata,
|
||||
GenericPackedTensorAccessor<scalar_t, 4> odata) {
|
||||
const PackedTensorAccessor<const scalar_t, 4> idata,
|
||||
PackedTensorAccessor<scalar_t, 4> odata) {
|
||||
int index = threadIdx.x + blockIdx.x * blockDim.x;
|
||||
|
||||
const int batchsize = idata.size(0);
|
||||
|
@ -95,8 +95,8 @@ __global__ void cuda_sparse_coo_softmax_kernel(
|
||||
int64_t* pool_offsets,
|
||||
int64_t nvalues,
|
||||
scalar_t* mx_rows,
|
||||
GenericPackedTensorAccessor<scalar_t, 2> input_values_acc,
|
||||
GenericPackedTensorAccessor<scalar_t, 2> output_values_acc) {
|
||||
PackedTensorAccessor<scalar_t, 2> input_values_acc,
|
||||
PackedTensorAccessor<scalar_t, 2> output_values_acc) {
|
||||
/*
|
||||
See ATen/native/sparse/SoftMax.cpp:cpu_sparse_coo_softmax for the CPU
|
||||
implementation of the sparse softmax algorithm that this implementation is
|
||||
@ -156,9 +156,9 @@ __global__ void cuda_sparse_coo_softmax_backward_kernel(
|
||||
int64_t* grad_offsets,
|
||||
int64_t* out_offsets,
|
||||
int64_t* lower_bound_values,
|
||||
GenericPackedTensorAccessor<scalar_t, 2> values_accessor,
|
||||
GenericPackedTensorAccessor<scalar_t, 2> out_values_accessor,
|
||||
GenericPackedTensorAccessor<scalar_t, 2> grad_values_accessor) {
|
||||
PackedTensorAccessor<scalar_t, 2> values_accessor,
|
||||
PackedTensorAccessor<scalar_t, 2> out_values_accessor,
|
||||
PackedTensorAccessor<scalar_t, 2> grad_values_accessor) {
|
||||
/*
|
||||
See ATen/native/sparse/SoftMax.cpp:cpu_sparse_coo_softmax_backward for
|
||||
the CPU implementation of the sparse softmax backward algorithm that this
|
||||
|
@ -221,7 +221,7 @@ class TORCH_API Tensor: public TensorBase {
|
||||
return copy_(rhs);
|
||||
}
|
||||
|
||||
[[deprecated("Tensor.type() is deprecated. Instead use Tensor.options(), which in many cases (e.g. in a constructor) is a drop-in replacement. If you were using data from type(), that is now available from Tensor itself, so instead of tensor.type().scalar_type(), use tensor.scalar_type() instead and instead of tensor.type().backend() use tensor.device().")]]
|
||||
C10_DEPRECATED_MESSAGE("Tensor.type() is deprecated. Instead use Tensor.options(), which in many cases (e.g. in a constructor) is a drop-in replacement. If you were using data from type(), that is now available from Tensor itself, so instead of tensor.type().scalar_type(), use tensor.scalar_type() instead and instead of tensor.type().backend() use tensor.device().")
|
||||
DeprecatedTypeProperties & type() const {
|
||||
return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
|
||||
dispatchKeyToBackend(legacyExtractDispatchKey(key_set())),
|
||||
@ -237,13 +237,13 @@ class TORCH_API Tensor: public TensorBase {
|
||||
return to(options().device(backendToDeviceType(b)).layout(layout_from_backend(b)), /*non_blocking*/ false, /*copy*/ false);
|
||||
}
|
||||
|
||||
[[deprecated("Tensor.is_variable() is deprecated; everything is a variable now. (If you want to assert that variable has been appropriately handled already, use at::impl::variable_excluded_from_dispatch())")]]
|
||||
C10_DEPRECATED_MESSAGE("Tensor.is_variable() is deprecated; everything is a variable now. (If you want to assert that variable has been appropriately handled already, use at::impl::variable_excluded_from_dispatch())")
|
||||
bool is_variable() const noexcept {
|
||||
return !at::impl::variable_excluded_from_dispatch();
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
[[deprecated("Tensor.data<T>() is deprecated. Please use Tensor.data_ptr<T>() instead.")]]
|
||||
C10_DEPRECATED_MESSAGE("Tensor.data<T>() is deprecated. Please use Tensor.data_ptr<T>() instead.")
|
||||
T * data() const {
|
||||
return data_ptr<T>();
|
||||
}
|
||||
@ -252,12 +252,12 @@ class TORCH_API Tensor: public TensorBase {
|
||||
T item() const;
|
||||
|
||||
template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
|
||||
[[deprecated("packed_accessor is deprecated, use packed_accessor32 or packed_accessor64 instead")]]
|
||||
C10_DEPRECATED_MESSAGE("packed_accessor is deprecated, use packed_accessor32 or packed_accessor64 instead")
|
||||
GenericPackedTensorAccessor<T,N,PtrTraits,index_t> packed_accessor() const & {
|
||||
return generic_packed_accessor<T,N,PtrTraits,index_t>();
|
||||
}
|
||||
template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
|
||||
[[deprecated("packed_accessor is deprecated, use packed_accessor32 or packed_accessor64 instead")]]
|
||||
C10_DEPRECATED_MESSAGE("packed_accessor is deprecated, use packed_accessor32 or packed_accessor64 instead")
|
||||
GenericPackedTensorAccessor<T,N,PtrTraits,index_t> packed_accessor() && = delete;
|
||||
|
||||
Tensor operator~() const {
|
||||
|
@ -191,9 +191,9 @@ class C10_API Scalar {
|
||||
return Tag::HAS_d == tag || Tag::HAS_sd == tag;
|
||||
}
|
||||
|
||||
[[deprecated(
|
||||
"isIntegral is deprecated. Please use the overload with 'includeBool' parameter instead.")]] bool
|
||||
isIntegral() const {
|
||||
C10_DEPRECATED_MESSAGE(
|
||||
"isIntegral is deprecated. Please use the overload with 'includeBool' parameter instead.")
|
||||
bool isIntegral() const {
|
||||
return Tag::HAS_i == tag || Tag::HAS_si == tag || Tag::HAS_u == tag;
|
||||
}
|
||||
bool isIntegral(bool includeBool) const {
|
||||
|
@ -364,9 +364,9 @@ inline bool isIntegralType(ScalarType t, bool includeBool) {
|
||||
return isIntegral || (includeBool && t == ScalarType::Bool);
|
||||
}
|
||||
|
||||
[[deprecated(
|
||||
"isIntegralType is deprecated. Please use the overload with 'includeBool' parameter instead.")]] inline bool
|
||||
isIntegralType(ScalarType t) {
|
||||
C10_DEPRECATED_MESSAGE(
|
||||
"isIntegralType is deprecated. Please use the overload with 'includeBool' parameter instead.")
|
||||
inline bool isIntegralType(ScalarType t) {
|
||||
return isIntegralType(t, /*includeBool=*/false);
|
||||
}
|
||||
|
||||
|
@ -378,8 +378,8 @@ bool operator!=(c10::ArrayRef<T> a1, const std::vector<T>& a2) {
|
||||
|
||||
using IntArrayRef = ArrayRef<int64_t>;
|
||||
|
||||
using IntList [[deprecated(
|
||||
"This alias is deprecated because it doesn't make ownership semantics obvious. Use IntArrayRef instead!")]] =
|
||||
ArrayRef<int64_t>;
|
||||
// This alias is deprecated because it doesn't make ownership
|
||||
// semantics obvious. Use IntArrayRef instead!
|
||||
C10_DEFINE_DEPRECATED_USING(IntList, ArrayRef<int64_t>)
|
||||
|
||||
} // namespace c10
|
||||
|
@ -1,10 +1,102 @@
|
||||
#pragma once
|
||||
|
||||
#if !defined(FBCODE_CAFFE2) && !defined(C10_NO_DEPRECATED)
|
||||
/**
|
||||
* This file provides portable macros for marking declarations
|
||||
* as deprecated. You should generally use C10_DEPRECATED,
|
||||
* except when marking 'using' declarations as deprecated,
|
||||
* in which case you should use C10_DEFINE_DEPRECATED_USING
|
||||
* (due to portability concerns).
|
||||
*/
|
||||
|
||||
// Sample usage:
|
||||
//
|
||||
// C10_DEPRECATED void bad_func();
|
||||
// struct C10_DEPRECATED BadStruct {
|
||||
// ...
|
||||
// };
|
||||
|
||||
// NB: __cplusplus doesn't work for MSVC, so for now MSVC always uses
|
||||
// the "__declspec(deprecated)" implementation and not the C++14
|
||||
// "[[deprecated]]" attribute. We tried enabling "[[deprecated]]" for C++14 on
|
||||
// MSVC, but ran into issues with some older MSVC versions.
|
||||
#if (defined(__cplusplus) && __cplusplus >= 201402L)
|
||||
#define C10_DEPRECATED [[deprecated]]
|
||||
#define C10_DEPRECATED_MESSAGE(message) [[deprecated(message)]]
|
||||
#elif defined(__GNUC__)
|
||||
#define C10_DEPRECATED __attribute__((deprecated))
|
||||
// TODO Is there some way to implement this?
|
||||
#define C10_DEPRECATED_MESSAGE(message) __attribute__((deprecated))
|
||||
|
||||
#elif defined(_MSC_VER)
|
||||
#define C10_DEPRECATED __declspec(deprecated)
|
||||
#define C10_DEPRECATED_MESSAGE(message) __declspec(deprecated(message))
|
||||
#else
|
||||
#warning "You need to implement C10_DEPRECATED for this compiler"
|
||||
#define C10_DEPRECATED
|
||||
#endif
|
||||
|
||||
// Sample usage:
|
||||
//
|
||||
// C10_DEFINE_DEPRECATED_USING(BadType, int)
|
||||
//
|
||||
// which is the portable version of
|
||||
//
|
||||
// using BadType [[deprecated]] = int;
|
||||
|
||||
// technically [[deprecated]] syntax is from c++14 standard, but it works in
|
||||
// many compilers.
|
||||
#if defined(__has_cpp_attribute)
|
||||
#if __has_cpp_attribute(deprecated) && !defined(__CUDACC__)
|
||||
#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \
|
||||
using TypeName [[deprecated]] = TypeThingy;
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#if defined(__CUDACC__)
|
||||
// neither [[deprecated]] nor __declspec(deprecated) work on nvcc on Windows;
|
||||
// you get the error:
|
||||
//
|
||||
// error: attribute does not apply to any entity
|
||||
//
|
||||
// So we just turn the macro off in this case.
|
||||
#if defined(C10_DEFINE_DEPRECATED_USING)
|
||||
#undef C10_DEFINE_DEPRECATED_USING
|
||||
#endif
|
||||
#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \
|
||||
using TypeName = TypeThingy;
|
||||
#else
|
||||
// [[deprecated]] does work in windows without nvcc, though msc doesn't support
|
||||
// `__has_cpp_attribute` when c++14 is supported, otherwise
|
||||
// __declspec(deprecated) is used as the alternative.
|
||||
#ifndef C10_DEFINE_DEPRECATED_USING
|
||||
#if defined(_MSVC_LANG) && _MSVC_LANG >= 201402L
|
||||
#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \
|
||||
using TypeName [[deprecated]] = TypeThingy;
|
||||
#else
|
||||
#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \
|
||||
using TypeName = __declspec(deprecated) TypeThingy;
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if !defined(C10_DEFINE_DEPRECATED_USING) && defined(__GNUC__)
|
||||
// nvcc has a bug where it doesn't understand __attribute__((deprecated))
|
||||
// declarations even when the host compiler supports it. We'll only use this gcc
|
||||
// attribute when not cuda, and when using a GCC compiler that doesn't support
|
||||
// the c++14 syntax we checked for above (available in __GNUC__ >= 5)
|
||||
#if !defined(__CUDACC__)
|
||||
#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \
|
||||
using TypeName __attribute__((deprecated)) = TypeThingy;
|
||||
#else
|
||||
// using cuda + gcc < 5, neither deprecated syntax is available so turning off.
|
||||
#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \
|
||||
using TypeName = TypeThingy;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if !defined(C10_DEFINE_DEPRECATED_USING)
|
||||
#warning "You need to implement C10_DEFINE_DEPRECATED_USING for this compiler"
|
||||
#define C10_DEFINE_DEPRECATED_USING
|
||||
#endif
|
||||
|
@ -689,28 +689,28 @@ namespace c10::detail {
|
||||
|
||||
/*
|
||||
// Deprecation disabled until we fix sites in our codebase
|
||||
[[deprecated("AT_ERROR(msg) is deprecated, use TORCH_CHECK(false, msg)
|
||||
instead.")]]
|
||||
C10_DEPRECATED_MESSAGE("AT_ERROR(msg) is deprecated, use TORCH_CHECK(false, msg)
|
||||
instead.")
|
||||
*/
|
||||
inline void deprecated_AT_ERROR() {}
|
||||
|
||||
/*
|
||||
// Deprecation disabled until we fix sites in our codebase
|
||||
[[deprecated("AT_ASSERT is deprecated, if you mean to indicate an
|
||||
C10_DEPRECATED_MESSAGE("AT_ASSERT is deprecated, if you mean to indicate an
|
||||
internal invariant failure, use " \
|
||||
"TORCH_INTERNAL_ASSERT instead; if you mean to do user
|
||||
error checking, use " \ "TORCH_CHECK. See
|
||||
https://github.com/pytorch/pytorch/issues/20287 for more details.")]]
|
||||
https://github.com/pytorch/pytorch/issues/20287 for more details.")
|
||||
*/
|
||||
inline void deprecated_AT_ASSERT() {}
|
||||
|
||||
/*
|
||||
// Deprecation disabled until we fix sites in our codebase
|
||||
[[deprecated("AT_ASSERTM is deprecated, if you mean to indicate an
|
||||
C10_DEPRECATED_MESSAGE("AT_ASSERTM is deprecated, if you mean to indicate an
|
||||
internal invariant failure, use " \
|
||||
"TORCH_INTERNAL_ASSERT instead; if you mean to do user
|
||||
error checking, use " \ "TORCH_CHECK. See
|
||||
https://github.com/pytorch/pytorch/issues/20287 for more details.")]]
|
||||
https://github.com/pytorch/pytorch/issues/20287 for more details.")
|
||||
*/
|
||||
inline void deprecated_AT_ASSERTM() {}
|
||||
|
||||
|
@ -540,7 +540,7 @@ class TORCH_API ProcessGroupNCCL : public Backend {
|
||||
// This constructor includes the deprecated `groupName` argument.
|
||||
// If you have existing code that uses the `groupName`, you can replace
|
||||
// it by specifying a `c10d::PrefixStore(groupName, store)` for store.
|
||||
[[deprecated]] ProcessGroupNCCL(
|
||||
C10_DEPRECATED ProcessGroupNCCL(
|
||||
const c10::intrusive_ptr<Store>& store,
|
||||
int rank,
|
||||
int size,
|
||||
|
Reference in New Issue
Block a user