Use C++17 Convention Methods in PyTorch (#137958)

Detailed Descriptions:
- `std::is_same<X, Y>::value` -> `std::is_same_v<X, Y>`
- `std::enable_if<C, T>::type` -> `std::enable_if_t<C, T>`
- and so on

Pull Request resolved: https://github.com/pytorch/pytorch/pull/137958
Approved by: https://github.com/janeyx99
This commit is contained in:
FFFrog
2024-10-16 18:18:23 +08:00
committed by PyTorch MergeBot
parent b7cf8fb800
commit ad28565ed7
20 changed files with 110 additions and 112 deletions

View File

@ -271,9 +271,9 @@ struct VecConvert<
1,
int64_t,
2,
typename std::enable_if<
std::enable_if_t<
std::is_same_v<dst_t, int8_t> ||
std::is_same_v<dst_t, uint8_t>>::type> {
std::is_same_v<dst_t, uint8_t>>> {
static inline VectorizedN<dst_t, 1> apply(
const VectorizedN<int64_t, 2>& src) {
return VecConvert<dst_t, 1, int32_t, 1>::apply(

View File

@ -2868,7 +2868,7 @@ std::pair<Vectorized<int64_t>, Vectorized<int64_t>> inline deinterleave2<
}
template <typename T>
typename std::enable_if<std::is_same<T, uint8_t>::value, at::vec::Vectorized<float>>::type
std::enable_if_t<std::is_same_v<T, uint8_t>, at::vec::Vectorized<float>>
inline convert_int8_to_float(const Vectorized<T> &src) {
// Note: this function only convert inputs number of elements equal to at::vec::Vectorized<float>.size()
// Only handle first 64 bits
@ -2878,7 +2878,7 @@ inline convert_int8_to_float(const Vectorized<T> &src) {
}
template <typename T>
typename std::enable_if<std::is_same<T, uint8_t>::value, at::vec::Vectorized<T>>::type
std::enable_if_t<std::is_same_v<T, uint8_t>, at::vec::Vectorized<T>>
inline convert_float_to_int8(const Vectorized<float> &src) {
constexpr auto min_val = std::numeric_limits<T>::min();
constexpr auto max_val = std::numeric_limits<T>::max();

View File

@ -281,9 +281,9 @@ struct VecConvert<
1,
int64_t,
2,
typename std::enable_if<
std::enable_if_t<
std::is_same_v<dst_t, int8_t> ||
std::is_same_v<dst_t, uint8_t>>::type> {
std::is_same_v<dst_t, uint8_t>>> {
static inline VectorizedN<dst_t, 1> apply(
const VectorizedN<int64_t, 2>& src) {
return VecConvert<dst_t, 1, int32_t, 1>::apply(

View File

@ -84,9 +84,9 @@ struct VecMaskLoad<
dst_n,
mask_t,
dst_n,
typename std::enable_if<
std::enable_if_t<
std::is_same_v<data_t, BFloat16> ||
std::is_same_v<data_t, Half>>::type> {
std::is_same_v<data_t, Half>>> {
static inline VectorizedN<data_t, dst_n> apply(
const data_t* ptr,
const VecMask<mask_t, dst_n>& vec_mask) {
@ -151,9 +151,9 @@ struct VecMaskLoad<
1,
mask_t,
1,
typename std::enable_if<
std::enable_if_t<
std::is_same_v<data_t, int8_t> ||
std::is_same_v<data_t, uint8_t>>::type> {
std::is_same_v<data_t, uint8_t>>> {
static inline VectorizedN<data_t, 1> apply(
const data_t* ptr,
const VecMask<mask_t, 1>& vec_mask) {
@ -173,9 +173,9 @@ struct VecMaskLoad<
2,
mask_t,
1,
typename std::enable_if<
std::enable_if_t<
std::is_same_v<data_t, int64_t> ||
std::is_same_v<data_t, double>>::type> {
std::is_same_v<data_t, double>>> {
static inline VectorizedN<data_t, 2> apply(
const data_t* ptr,
const VecMask<mask_t, 1>& vec_mask) {

View File

@ -106,7 +106,7 @@ bool is_fast_path(const Tensor& src, const std::optional<Tensor>& scale, Tensor&
// index_add (using add_indices as the index), without creating an intermediary
// tensor to hold the selected embeddings
template <typename data_t, typename index_t>
static typename std::enable_if<std::is_same<data_t, double>::value, void>::type
static std::enable_if_t<std::is_same_v<data_t, double>, void>
index_select_add(
const Tensor& select_indices,
const Tensor& add_indices,
@ -184,10 +184,9 @@ void fbgemm_spmdm_report_error_(
} // namespace
template <typename data_t, typename index_t>
typename std::enable_if<
std::is_same<data_t, at::Half>::value ||
std::is_same<data_t, at::BFloat16>::value,
void>::type
std::enable_if_t<
std::is_same_v<data_t, at::Half> || std::is_same_v<data_t, at::BFloat16>,
void>
index_select_add(
const Tensor& select_indices,
const Tensor& add_indices,
@ -366,7 +365,7 @@ index_select_add(
}
}
template<typename data_t, typename index_t>
typename std::enable_if<std::is_same<data_t, float>::value, void>::type
std::enable_if_t<std::is_same_v<data_t, float>, void>
index_select_add(const Tensor &select_indices,
const Tensor &add_indices,
const Tensor &src,
@ -493,7 +492,7 @@ index_select_add(const Tensor &select_indices,
// mul (scaling by per_sample_weights)
// index_add (using add_indices as the index)
template <typename data_t, typename index_t>
static typename std::enable_if<std::is_same<data_t, double>::value, void>::type
static std::enable_if_t<std::is_same_v<data_t, double>, void>
index_select_scale_add(
const Tensor& select_indices,
const Tensor& add_indices,
@ -548,10 +547,9 @@ index_select_scale_add(
}
template <typename data_t, typename index_t>
typename std::enable_if<
std::is_same<data_t, at::Half>::value ||
std::is_same<data_t, at::BFloat16>::value,
void>::type
std::enable_if_t<
std::is_same_v<data_t, at::Half> || std::is_same_v<data_t, at::BFloat16>,
void>
index_select_scale_add(
const Tensor& select_indices,
const Tensor& add_indices,
@ -741,7 +739,7 @@ index_select_scale_add(
}
}
template<typename data_t, typename index_t>
typename std::enable_if<std::is_same<data_t, float>::value, void>::type
std::enable_if_t<std::is_same_v<data_t, float>, void>
index_select_scale_add(const Tensor &select_indices,
const Tensor &add_indices,
const Tensor &scale,

View File

@ -23,7 +23,7 @@ namespace native {
// e.g. since 2**-1==0.5, the truncated integral result is zero. 1**negative_exponent is the
// only non-zero result.
template <class T,
typename std::enable_if<std::is_integral<T>::value, T>::type* = nullptr>
std::enable_if_t<std::is_integral_v<T>, T>* = nullptr>
inline HOST_DEVICE __ubsan_ignore_signed_int_overflow__ T powi_impl(T a, T b) {
T result = 1;
while (b) {
@ -37,13 +37,13 @@ inline HOST_DEVICE __ubsan_ignore_signed_int_overflow__ T powi_impl(T a, T b) {
}
template <class T,
typename std::enable_if<std::is_integral<T>::value && !std::is_signed<T>::value, T>::type* = nullptr>
std::enable_if_t<std::is_integral_v<T> && !std::is_signed_v<T>, T>* = nullptr>
inline HOST_DEVICE T powi(T a, T b) {
return powi_impl(a, b);
}
template <class T,
typename std::enable_if<std::is_integral<T>::value && std::is_signed<T>::value, T>::type* = nullptr>
std::enable_if_t<std::is_integral_v<T> && std::is_signed_v<T>, T>* = nullptr>
inline HOST_DEVICE T powi(T a, T b) {
if ( b < 0 ) {
if ( a == 1 ) {

View File

@ -753,11 +753,11 @@ Tensor cumprod_backward(const Tensor& grad, const Tensor& input, int64_t dim, co
namespace {
#ifdef _MSC_VER
template<typename T>
inline typename std::enable_if<std::is_integral<T>::value, bool>::type isnan_(T x) {
inline std::enable_if_t<std::is_integral_v<T>, bool> isnan_(T x) {
return false;
}
template<typename T>
inline typename std::enable_if<!std::is_integral<T>::value, bool>::type isnan_(T x) {
inline std::enable_if_t<!std::is_integral_v<T>, bool> isnan_(T x) {
return std::isnan(x);
}
#else

View File

@ -96,7 +96,7 @@ auto sum(int64_t N, Func f) {
}
template <typename scalar_t, typename opmath_t>
typename std::enable_if<std::is_same<scalar_t, opmath_t>::value, void>::type
std::enable_if_t<std::is_same_v<scalar_t, opmath_t>, void>
gemm_notrans_(
int64_t m,
int64_t n,
@ -132,7 +132,7 @@ gemm_notrans_(
// std::is_same<scalar_t, at::BFloat16> || std::is_same<scalar_t, at::Half>
template <typename scalar_t, typename opmath_t>
typename std::enable_if<!std::is_same<scalar_t, opmath_t>::value, void>::type
std::enable_if_t<!std::is_same_v<scalar_t, opmath_t>, void>
gemm_notrans_(
int64_t m,
int64_t n,
@ -222,7 +222,7 @@ void gemm_transb_impl(
}
template <typename scalar_t, typename opmath_t>
typename std::enable_if<std::is_same<scalar_t, opmath_t>::value, void>::type
std::enable_if_t<std::is_same_v<scalar_t, opmath_t>, void>
gemm_transb_(
TransposeType transb,
int64_t m,
@ -244,7 +244,7 @@ gemm_transb_(
// std::is_same<scalar_t, at::BFloat16> || std::is_same<scalar_t, at::Half>
template <typename scalar_t, typename opmath_t>
typename std::enable_if<!std::is_same<scalar_t, opmath_t>::value, void>::type
std::enable_if_t<!std::is_same_v<scalar_t, opmath_t>, void>
gemm_transb_(
TransposeType transb,
int64_t m,

View File

@ -12,10 +12,10 @@ namespace at::native {
namespace{
template <typename scalar_t, typename opmath_t>
typename std::enable_if<
std::is_same<scalar_t, Half>::value || std::is_same<scalar_t, BFloat16>::value,
void>::
type inline adagrad_math(
std::enable_if_t<
std::is_same_v<scalar_t, Half> || std::is_same_v<scalar_t, BFloat16>,
void>
inline adagrad_math(
scalar_t* param_ptr,
scalar_t* grad_ptr,
scalar_t* state_sum_ptr,
@ -81,10 +81,10 @@ typename std::enable_if<
template <typename scalar_t, typename opmath_t>
typename std::enable_if<
std::is_same<scalar_t, float>::value || std::is_same<scalar_t, double>::value,
void>::
type inline adagrad_math(
std::enable_if_t<
std::is_same_v<scalar_t, float> || std::is_same_v<scalar_t, double>,
void>
inline adagrad_math(
scalar_t* param_ptr,
scalar_t* grad_ptr,
scalar_t* state_sum_ptr,

View File

@ -12,10 +12,10 @@ namespace at::native {
namespace{
template <typename scalar_t, typename opmath_t, ADAM_MODE adam_mode>
typename std::enable_if<
std::is_same<scalar_t, Half>::value || std::is_same<scalar_t, BFloat16>::value,
void>::
type inline adam_math(
std::enable_if_t<
std::is_same_v<scalar_t, Half> || std::is_same_v<scalar_t, BFloat16>,
void>
inline adam_math(
scalar_t* param_ptr,
scalar_t* exp_avg_ptr,
scalar_t* exp_avg_sq_ptr,
@ -155,10 +155,10 @@ typename std::enable_if<
template <typename scalar_t, typename opmath_t, ADAM_MODE adam_mode>
typename std::enable_if<
std::is_same<scalar_t, float>::value || std::is_same<scalar_t, double>::value,
void>::
type inline adam_math(
std::enable_if_t<
std::is_same_v<scalar_t, float> || std::is_same_v<scalar_t, double>,
void>
inline adam_math(
scalar_t* param_ptr,
scalar_t* exp_avg_ptr,
scalar_t* exp_avg_sq_ptr,

View File

@ -12,10 +12,10 @@ namespace at::native {
namespace{
template <typename scalar_t, typename opmath_t>
typename std::enable_if<
std::is_same<scalar_t, Half>::value || std::is_same<scalar_t, BFloat16>::value,
void>::
type inline sgd_math(
std::enable_if_t<
std::is_same_v<scalar_t, Half> || std::is_same_v<scalar_t, BFloat16>,
void>
inline sgd_math(
scalar_t* param_ptr,
scalar_t* grad_ptr,
scalar_t* momentum_buf_ptr,
@ -104,10 +104,10 @@ typename std::enable_if<
template <typename scalar_t, typename opmath_t>
typename std::enable_if<
std::is_same<scalar_t, float>::value || std::is_same<scalar_t, double>::value,
void>::
type inline sgd_math(
std::enable_if_t<
std::is_same_v<scalar_t, float> || std::is_same_v<scalar_t, double>,
void>
inline sgd_math(
scalar_t* param_ptr,
scalar_t* grad_ptr,
scalar_t* momentum_buf_ptr,

View File

@ -31,14 +31,16 @@ struct IsContiguous<0, -1, traits, s> {
};
// output and all inputs are contiguous
template <typename traits,
typename std::enable_if<std::is_void<typename traits::result_type>::value>::type* = nullptr>
template <
typename traits,
std::enable_if_t<std::is_void_v<typename traits::result_type>>* =
nullptr>
static inline bool is_contiguous(const int64_t* strides) {
return IsContiguous<traits::arity, traits::arity - 1, traits>::eval(strides);
}
template <typename traits,
typename std::enable_if<!std::is_void<typename traits::result_type>::value>::type* = nullptr>
std::enable_if_t<!std::is_void_v<typename traits::result_type>>* = nullptr>
static inline bool is_contiguous(const int64_t* strides) {
return IsContiguous<traits::arity, traits::arity, traits>::eval(strides);
}
@ -46,14 +48,14 @@ static inline bool is_contiguous(const int64_t* strides) {
// input at `s` is scalar (stride 0); output and other inputs are contiguous
// NB: output is typically at strides[0] so first input corresponds to s=1
template <typename traits, int s,
typename std::enable_if<std::is_void<typename traits::result_type>::value>::type* = nullptr>
std::enable_if_t<std::is_void_v<typename traits::result_type>>* = nullptr>
static inline bool is_contiguous_scalar(const int64_t* strides) {
static_assert(s > 0 && s <= traits::arity, "scalar argument index out of bounds");
return IsContiguous<traits::arity, traits::arity - 1, traits, s>::eval(strides);
}
template <typename traits, int s,
typename std::enable_if<!std::is_void<typename traits::result_type>::value>::type* = nullptr>
std::enable_if_t<!std::is_void_v<typename traits::result_type>>* = nullptr>
static inline bool is_contiguous_scalar(const int64_t* strides) {
static_assert(s > 0 && s <= traits::arity, "scalar argument index out of bounds");
return IsContiguous<traits::arity, traits::arity, traits, s>::eval(strides);

View File

@ -64,7 +64,7 @@ vec::Vectorized<int64_t> is_nan_vec<int64_t>(vec::Vectorized<int64_t> vec) {
template <typename scalar_t, typename opmath_t>
inline
typename std::enable_if<std::is_same<scalar_t, opmath_t>::value, void>::type
std::enable_if_t<std::is_same_v<scalar_t, opmath_t>, void>
compute_internal(
const scalar_t* input_data,
scalar_t* out_data,
@ -139,7 +139,7 @@ compute_internal(
// std::is_same<scalar_t, at::BFloat16> || std::is_same<scalar_t, at::Half>
template <typename scalar_t, typename opmath_t>
inline
typename std::enable_if<!std::is_same<scalar_t, opmath_t>::value, void>::type
std::enable_if_t<!std::is_same_v<scalar_t, opmath_t>, void>
compute_internal(
const scalar_t* input_data,
scalar_t* out_data,

View File

@ -129,13 +129,13 @@ static void set_results(const res_t result, const TensorIteratorBase &iter, cons
}
template<typename traits, std::size_t i = 0, typename... tuple_t>
inline typename std::enable_if<i == sizeof...(tuple_t), std::size_t>::type
inline std::enable_if_t<i == sizeof...(tuple_t), std::size_t>
for_each_in_tuple(const std::tuple<tuple_t...>& /*t*/, const TensorIteratorBase& /*iter*/, const int /*num_outputs*/) {
return i;
}
template<typename traits, std::size_t i = 0, typename... tuple_t>
inline typename std::enable_if<i < sizeof...(tuple_t), std::size_t>::type
inline std::enable_if_t<i < sizeof...(tuple_t), std::size_t>
for_each_in_tuple(const std::tuple<tuple_t...>& t, const TensorIteratorBase &iter, const int num_outputs) {
if (i < (size_t)num_outputs) {
set_result<traits>(i, std::get<i>(t), iter, num_outputs);

View File

@ -106,7 +106,7 @@ inline void _init(scalar_t* self_ptr, at::opmath_type<scalar_t>* buffer_ptr, int
}
template <typename scalar_t>
inline typename std::enable_if<!std::is_same<scalar_t, Vec2>::value, scalar_t>::type
inline std::enable_if_t<!std::is_same_v<scalar_t, Vec2>, scalar_t>
_max(const scalar_t& x, const scalar_t& y) {
return at::_isnan(y) ? y : std::max(x, y);
}
@ -118,14 +118,14 @@ inline Vectorized<scalar_t> _max(const Vectorized<scalar_t>& x, const Vectorized
}
template <typename vec_t>
inline typename std::enable_if<std::is_same<vec_t, Vec2>::value, Vec2>::type
inline std::enable_if_t<std::is_same_v<vec_t, Vec2>, Vec2>
_max(const vec_t& x, const vec_t& y) {
// vec::maximum propagates NaN
return maximum(x, y);
}
template <typename scalar_t>
inline typename std::enable_if<!std::is_same<scalar_t, Vec2>::value, scalar_t>::type
inline std::enable_if_t<!std::is_same_v<scalar_t, Vec2>, scalar_t>
_min(const scalar_t& x, const scalar_t& y) {
return at::_isnan(y) ? y : std::min(x, y);
}
@ -137,7 +137,7 @@ inline Vectorized<scalar_t> _min(const Vectorized<scalar_t>& x, const Vectorized
}
template <typename vec_t>
inline typename std::enable_if<std::is_same<vec_t, Vec2>::value, Vec2>::type
inline std::enable_if_t<std::is_same_v<vec_t, Vec2>, Vec2>
_min(const vec_t& x, const vec_t& y) {
// vec::minimum propagates NaN
return minimum(x, y);

View File

@ -85,8 +85,8 @@ void GroupNormKernelImplInternal(
}
template <typename T>
typename std::enable_if<std::is_same<T, at::opmath_type<T>>::value,
std::tuple<T, T>>::type
std::enable_if_t<std::is_same_v<T, at::opmath_type<T>>,
std::tuple<T, T>>
ColumnwiseMoments(
const T* X_data,
int64_t HxW,
@ -118,8 +118,8 @@ ColumnwiseMoments(
// std::is_same<T, at::BFloat16> || std::is_same<T, at::Half>
template <typename T>
typename std::enable_if<!std::is_same<T, at::opmath_type<T>>::value,
std::tuple<at::opmath_type<T>, at::opmath_type<T>>>::type
std::enable_if_t<!std::is_same_v<T, at::opmath_type<T>>,
std::tuple<at::opmath_type<T>, at::opmath_type<T>>>
ColumnwiseMoments(
const T* X_data,
int64_t HxW,
@ -160,7 +160,7 @@ ColumnwiseMoments(
}
template <typename T, typename opmath_t>
inline typename std::enable_if<std::is_same<T, opmath_t>::value, void>::type
inline std::enable_if_t<std::is_same_v<T, opmath_t>, void>
CalcMeanVar(
const T* X_ptr,
opmath_t* mean_ptr,
@ -183,7 +183,7 @@ CalcMeanVar(
// std::is_same<T, at::BFloat16> || std::is_same<T, at::Half>
template <typename T, typename opmath_t>
inline typename std::enable_if<!std::is_same<T, opmath_t>::value, void>::type
inline std::enable_if_t<!std::is_same_v<T, opmath_t>, void>
CalcMeanVar(
const T* X_ptr,
opmath_t* mean_ptr,
@ -227,7 +227,7 @@ CalcMeanVar(
}
template <typename T, typename opmath_t>
inline typename std::enable_if<std::is_same<T, opmath_t>::value, void>::type
inline std::enable_if_t<std::is_same_v<T, opmath_t>, void>
ApplyScaleBias(
T* Y_ptr,
const T* X_ptr,
@ -246,7 +246,7 @@ ApplyScaleBias(
// std::is_same<T, at::BFloat16> || std::is_same<T, at::Half>
template <typename T, typename opmath_t>
inline typename std::enable_if<!std::is_same<T, opmath_t>::value, void>::type
inline std::enable_if_t<!std::is_same_v<T, opmath_t>, void>
ApplyScaleBias(
T* Y_ptr,
const T* X_ptr,
@ -529,7 +529,7 @@ void GroupNormKernelImpl(
template <typename T, typename opmath_t>
typename std::enable_if<std::is_same<T, opmath_t>::value, void>::type
std::enable_if_t<std::is_same_v<T, opmath_t>, void>
ComputeInternalGradients(
int64_t N,
int64_t C,
@ -556,7 +556,7 @@ ComputeInternalGradients(
}
template <typename T, typename opmath_t>
typename std::enable_if<!std::is_same<T, opmath_t>::value, void>::type
std::enable_if_t<!std::is_same_v<T, opmath_t>, void>
ComputeInternalGradients(
int64_t N,
int64_t C,
@ -603,7 +603,7 @@ ComputeInternalGradients(
}
template <typename PT, typename opmath_t>
inline typename std::enable_if<std::is_same<PT, opmath_t>::value, void>::type
inline std::enable_if_t<std::is_same_v<PT, opmath_t>, void>
CalcDsDb(
const opmath_t* ds_ptr,
const opmath_t* db_ptr,
@ -626,7 +626,7 @@ CalcDsDb(
}
template <typename PT, typename opmath_t>
inline typename std::enable_if<!std::is_same<PT, opmath_t>::value, void>::type
inline std::enable_if_t<!std::is_same_v<PT, opmath_t>, void>
CalcDsDb(
const opmath_t* ds_ptr,
const opmath_t* db_ptr,
@ -708,7 +708,7 @@ void GroupNormInputBackward(
}
template <typename PT, typename opmath_t>
typename std::enable_if<std::is_same<PT, opmath_t>::value, void>::type
std::enable_if_t<std::is_same_v<PT, opmath_t>, void>
GammaBackward(
int64_t N,
int64_t C,
@ -755,7 +755,7 @@ GammaBackward(
}
template <typename PT, typename opmath_t>
typename std::enable_if<!std::is_same<PT, opmath_t>::value, void>::type
std::enable_if_t<!std::is_same_v<PT, opmath_t>, void>
GammaBackward(
int64_t N,
int64_t C,
@ -817,7 +817,7 @@ GammaBackward(
}
template <typename PT, typename opmath_t>
typename std::enable_if<std::is_same<PT, opmath_t>::value, void>::type
std::enable_if_t<std::is_same_v<PT, opmath_t>, void>
BetaBackward(int64_t N, int64_t C, const opmath_t* db, PT* dbeta) {
using Vec = at::vec::Vectorized<PT>;
constexpr int64_t K = Vec::size();
@ -841,7 +841,7 @@ BetaBackward(int64_t N, int64_t C, const opmath_t* db, PT* dbeta) {
}
template <typename PT, typename opmath_t>
typename std::enable_if<!std::is_same<PT, opmath_t>::value, void>::type
std::enable_if_t<!std::is_same_v<PT, opmath_t>, void>
BetaBackward(int64_t N, int64_t C, const opmath_t* db, PT* dbeta) {
using Vec = at::vec::Vectorized<PT>;
using fVec = at::vec::Vectorized<opmath_t>;
@ -937,7 +937,7 @@ void GroupNormBackwardKernelImplInternal(
}
template <typename T, typename opmath_t>
inline typename std::enable_if<std::is_same<T, opmath_t>::value, void>::type
inline std::enable_if_t<std::is_same_v<T, opmath_t>, void>
DsDbRowwiseMomentsChannelsLast(
const T* dY_ptr,
const T* X_ptr,
@ -972,7 +972,7 @@ DsDbRowwiseMomentsChannelsLast(
}
template <typename T, typename opmath_t>
inline typename std::enable_if<!std::is_same<T, opmath_t>::value, void>::type
inline std::enable_if_t<!std::is_same_v<T, opmath_t>, void>
DsDbRowwiseMomentsChannelsLast(
const T* dY_ptr,
const T* X_ptr,
@ -1024,10 +1024,10 @@ DsDbRowwiseMomentsChannelsLast(
}
template <typename T>
inline typename std::enable_if<std::is_same<T, at::opmath_type<T>>::value,
inline std::enable_if_t<std::is_same_v<T, at::opmath_type<T>>,
std::tuple<
vec::Vectorized<T>,
vec::Vectorized<T>>>::type
vec::Vectorized<T>>>
load_util(const T* data_ptr, int64_t n) {
using Vec = vec::Vectorized<T>;
auto vec0 = Vec::loadu(data_ptr, n > Vec::size() ? Vec::size() : n);
@ -1037,11 +1037,11 @@ load_util(const T* data_ptr, int64_t n) {
}
template <typename T>
inline typename std::enable_if<!std::is_same<T, at::opmath_type<T>>::value,
inline std::enable_if_t<!std::is_same_v<T, at::opmath_type<T>>,
std::tuple<
vec::Vectorized<at::opmath_type<T>>,
vec::Vectorized<at::opmath_type<T>>>
>::type
>
load_util(const T* data_ptr, int64_t n) {
using Vec = vec::Vectorized<T>;
auto vec = Vec::loadu(data_ptr, n);
@ -1049,7 +1049,7 @@ load_util(const T* data_ptr, int64_t n) {
}
template <typename T, typename PT, typename opmath_t>
inline typename std::enable_if<std::is_same<T, opmath_t>::value, void>::type
inline std::enable_if_t<std::is_same_v<T, opmath_t>, void>
ApplyInputGradientsChannelsLastColMov(
const T* dY_data,
const T* X_data,
@ -1097,7 +1097,7 @@ ApplyInputGradientsChannelsLastColMov(
}
template <typename T, typename PT, typename opmath_t>
inline typename std::enable_if<!std::is_same<T, opmath_t>::value, void>::type
inline std::enable_if_t<!std::is_same_v<T, opmath_t>, void>
ApplyInputGradientsChannelsLastColMov(
const T* dY_data,
const T* X_data,
@ -1154,7 +1154,7 @@ ApplyInputGradientsChannelsLastColMov(
}
template <typename T, typename PT, typename opmath_t>
inline typename std::enable_if<std::is_same<T, opmath_t>::value, void>::type
inline std::enable_if_t<std::is_same_v<T, opmath_t>, void>
ApplyInputGradientsChannelsLastRowMov(
const T* dY_data,
const T* X_data,
@ -1190,7 +1190,7 @@ ApplyInputGradientsChannelsLastRowMov(
}
template <typename T, typename PT, typename opmath_t>
inline typename std::enable_if<!std::is_same<T, opmath_t>::value, void>::type
inline std::enable_if_t<!std::is_same_v<T, opmath_t>, void>
ApplyInputGradientsChannelsLastRowMov(
const T* dY_data,
const T* X_data,

View File

@ -12,8 +12,7 @@ namespace {
// filesystem error and a negative CUfileOpError enum value otherwise).
template <
class T,
typename std::enable_if<std::is_integral<T>::value, std::nullptr_t>::type =
nullptr>
std::enable_if_t<std::is_integral_v<T>, std::nullptr_t> = nullptr>
std::string cuGDSFileGetErrorString(T status) {
status = std::abs(status);
return IS_CUFILE_ERR(status) ? std::string(CUFILE_ERRSTR(status))
@ -24,8 +23,7 @@ std::string cuGDSFileGetErrorString(T status) {
// CUfileError_t
template <
class T,
typename std::enable_if<!std::is_integral<T>::value, std::nullptr_t>::type =
nullptr>
std::enable_if_t<!std::is_integral_v<T>, std::nullptr_t> = nullptr>
std::string cuGDSFileGetErrorString(T status) {
std::string errStr = cuGDSFileGetErrorString(static_cast<int>(status.err));
if (IS_CUDA_ERR(status))

View File

@ -418,9 +418,8 @@ __device__ __inline__ Vec<Alignment> add_vec(
// With world_size specialization: perform balanced load from all peers before
// performing reduction.
template <typename T, int alignment, int k_world_size>
__device__ inline
typename std::enable_if<(k_world_size > 0), Vec<alignment>>::type
load_and_reduce(T** ptrs, size_t rank, size_t world_size, size_t offset) {
__device__ inline std::enable_if_t<(k_world_size > 0), Vec<alignment>>
load_and_reduce(T** ptrs, size_t rank, size_t world_size, size_t offset) {
Vec<alignment> vecs[k_world_size];
#pragma unroll k_world_size
for (size_t step = 0; step < k_world_size; ++step) {
@ -438,9 +437,8 @@ __device__ inline
// Without world_size specialization: perform ordered (unbalanced) load and
// accumulate on each load.
template <typename T, int alignment, int k_world_size>
__device__ inline
typename std::enable_if<(k_world_size <= 0), Vec<alignment>>::type
load_and_reduce(T** ptrs, size_t rank, size_t world_size, size_t offset) {
__device__ inline std::enable_if_t<(k_world_size <= 0), Vec<alignment>>
load_and_reduce(T** ptrs, size_t rank, size_t world_size, size_t offset) {
Vec<alignment> acc{};
for (size_t step = 0; step < world_size; ++step) {
auto vec = ld_vec<alignment>(ptrs[step] + offset);

View File

@ -8,13 +8,13 @@ constexpr auto cpp_intrinsics_definition = R"(
namespace std {
template <typename T,
typename std::enable_if<std::is_floating_point<T>::value, int>::type = 0>
std::enable_if_t<std::is_floating_point_v<T>, int> = 0>
T rsqrt(T v) {
return 1.0f / std::sqrt(v);
}
template <typename T,
typename std::enable_if<std::is_floating_point<T>::value, int>::type = 0>
std::enable_if_t<std::is_floating_point_v<T>, int> = 0>
T frac(T v) {
T intpart;
return std::modf(v, &intpart);

View File

@ -1075,14 +1075,16 @@ void LLVMCodeGenImpl::visit(const CompareSelectPtr& v) {
}
template <typename T>
typename std::enable_if<std::is_integral<T>::value, llvm::Value*>::type
getFromType(llvm::Type* type, T value) {
std::enable_if_t<std::is_integral_v<T>, llvm::Value*> getFromType(
llvm::Type* type,
T value) {
return llvm::ConstantInt::get(type, value, std::is_signed<T>::value);
}
template <typename T>
typename std::enable_if<std::is_floating_point<T>::value, llvm::Value*>::type
getFromType(llvm::Type* type, T value) {
std::enable_if_t<std::is_floating_point_v<T>, llvm::Value*> getFromType(
llvm::Type* type,
T value) {
return llvm::ConstantFP::get(type, value);
}