Replace c10::guts::stuff with std::stuff (#30915)

Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/30915

Since we now have C++14, we don't need these c10::guts helpers anymore
ghstack-source-id: 95777609

Test Plan: waitforsandcastle

Differential Revision: D18869639

fbshipit-source-id: 97716f932297c64c6e814410ac47b444c33d4e2e
This commit is contained in:
Sebastian Messmer
2019-12-16 13:48:12 -08:00
committed by Facebook Github Bot
parent c6a8f884d8
commit 643ca5def2
115 changed files with 363 additions and 497 deletions

View File

@ -11,7 +11,7 @@ void DeprecatedTypePropertiesDeleter::operator()(DeprecatedTypeProperties * ptr)
DeprecatedTypePropertiesRegistry::DeprecatedTypePropertiesRegistry() {
for (int b = 0; b < static_cast<int>(Backend::NumOptions); ++b) {
for (int s = 0; s < static_cast<int>(ScalarType::NumOptions); ++s) {
registry[b][s] = c10::guts::make_unique<DeprecatedTypeProperties>(
registry[b][s] = std::make_unique<DeprecatedTypeProperties>(
static_cast<Backend>(b),
static_cast<ScalarType>(s));
}

View File

@ -209,7 +209,7 @@ private:
at::Tensor
>;
using StorageT = guts::conditional_t<
using StorageT = std::conditional_t<
guts::typelist::contains<types_with_direct_list_implementation, T>::value,
T, // The types listed in types_with_direct_list_implementation store the list as std::vector<T>
IValue // All other types store the list as std::vector<IValue>

View File

@ -100,7 +100,7 @@ void internal_set_names_inplace(TensorImpl* impl, optional<DimnameList> names, b
}
auto* meta = get_named_tensor_meta(impl);
if (meta == nullptr) {
impl->set_named_tensor_meta(c10::guts::make_unique<NamedTensorMeta>(*names));
impl->set_named_tensor_meta(std::make_unique<NamedTensorMeta>(*names));
} else {
meta->set_names(*names);
}
@ -112,7 +112,7 @@ void internal_set_names_inplace(TensorImpl* impl, std::vector<Dimname>&& names,
}
auto* meta = get_named_tensor_meta(impl);
if (meta == nullptr) {
impl->set_named_tensor_meta(c10::guts::make_unique<NamedTensorMeta>(names));
impl->set_named_tensor_meta(std::make_unique<NamedTensorMeta>(names));
} else {
meta->set_names(names);
}

View File

@ -26,7 +26,7 @@ struct CAFFE2_API NamedTensorMeta : public c10::NamedTensorMetaInterface {
: names_(std::move(names)) {}
std::unique_ptr<c10::NamedTensorMetaInterface> clone() const override {
return c10::guts::make_unique<NamedTensorMeta>(names_);
return std::make_unique<NamedTensorMeta>(names_);
}
bool has_names() const;

View File

@ -89,7 +89,7 @@ public:
* > public:
* > Tensor operator()(Tensor a, Tensor b) {...}
* > };
* > KernelFunction func = KernelFunction::makeFromUnboxedFunctor(guts::make_unique<MyFunctor>());
* > KernelFunction func = KernelFunction::makeFromUnboxedFunctor(std::make_unique<MyFunctor>());
*/
template<bool AllowLegacyTypes = false, class KernelFunctor>
static KernelFunction makeFromUnboxedFunctor(std::unique_ptr<OperatorKernel> kernelFunctor);
@ -111,7 +111,7 @@ public:
* > Tensor operator()(Tensor a, Tensor b) {...}
* > };
* > KernelFunction func = KernelFunction::makeFromUnboxedFunctor([] {
* > return guts::make_unique<MyFunctor>();
* > return std::make_unique<MyFunctor>();
* > });
*/
template<class KernelFunctor, bool AllowLegacyTypes = false>
@ -133,7 +133,7 @@ public:
* > public:
* > Tensor operator()(Tensor a, Tensor b) {...}
* > };
* > KernelFunction func = KernelFunction::makeFromUnboxedOnlyFunctor(guts::make_unique<MyFunctor>());
* > KernelFunction func = KernelFunction::makeFromUnboxedOnlyFunctor(std::make_unique<MyFunctor>());
*/
template<class KernelFunctor>
static KernelFunction makeFromUnboxedOnlyFunctor(std::unique_ptr<OperatorKernel> kernelFunctor);

View File

@ -149,8 +149,8 @@ inline KernelFunction KernelFunction::makeFromUnboxedRuntimeFunction(FuncType* f
static_assert(!std::is_same<FuncType, BoxedKernelFunction>::value, "Tried to call KernelFunction::makeFromUnboxedRuntimeFunction with a boxed function pointer. Please use KernelFunction::makeFromBoxedFunction instead.");
TORCH_INTERNAL_ASSERT(func != nullptr, "Kernel function cannot be nullptr");
return makeFromUnboxedFunctor<AllowLegacyTypes, detail::WrapRuntimeKernelFunctor<guts::decay_t<FuncType>>>(
guts::make_unique_base<OperatorKernel, detail::WrapRuntimeKernelFunctor<guts::decay_t<FuncType>>>(func)
return makeFromUnboxedFunctor<AllowLegacyTypes, detail::WrapRuntimeKernelFunctor<std::decay_t<FuncType>>>(
guts::make_unique_base<OperatorKernel, detail::WrapRuntimeKernelFunctor<std::decay_t<FuncType>>>(func)
);
}
@ -160,17 +160,17 @@ inline KernelFunction KernelFunction::makeFromUnboxedOnlyRuntimeFunction(FuncTyp
static_assert(!std::is_same<FuncType, BoxedKernelFunction>::value, "Tried to call KernelFunction::makeFromUnboxedRuntimeFunction with a boxed function pointer. Please use KernelFunction::makeFromBoxedFunction instead.");
TORCH_INTERNAL_ASSERT(func != nullptr, "Kernel function cannot be nullptr");
return makeFromUnboxedOnlyFunctor<detail::WrapRuntimeKernelFunctor<guts::decay_t<FuncType>>>(
guts::make_unique_base<OperatorKernel, detail::WrapRuntimeKernelFunctor<guts::decay_t<FuncType>>>(func)
return makeFromUnboxedOnlyFunctor<detail::WrapRuntimeKernelFunctor<std::decay_t<FuncType>>>(
guts::make_unique_base<OperatorKernel, detail::WrapRuntimeKernelFunctor<std::decay_t<FuncType>>>(func)
);
}
template<bool AllowLegacyTypes, class Lambda>
inline KernelFunction KernelFunction::makeFromUnboxedLambda(Lambda&& lambda) {
static_assert(guts::is_functor<guts::decay_t<Lambda>>::value, "Tried to call KernelFunction::makeFromUnboxedLambda with a non-lambda type.");
static_assert(guts::is_functor<std::decay_t<Lambda>>::value, "Tried to call KernelFunction::makeFromUnboxedLambda with a non-lambda type.");
return makeFromUnboxedFunctor<AllowLegacyTypes, detail::WrapRuntimeKernelFunctor<guts::decay_t<Lambda>>>(
guts::make_unique_base<OperatorKernel, detail::WrapRuntimeKernelFunctor<guts::decay_t<Lambda>>>(std::forward<Lambda>(lambda))
return makeFromUnboxedFunctor<AllowLegacyTypes, detail::WrapRuntimeKernelFunctor<std::decay_t<Lambda>>>(
guts::make_unique_base<OperatorKernel, detail::WrapRuntimeKernelFunctor<std::decay_t<Lambda>>>(std::forward<Lambda>(lambda))
);
}

View File

@ -58,13 +58,13 @@ struct unboxed_functor_without_return final : OperatorKernel {
struct unboxed_functor_with_return_factory final {
std::unique_ptr<OperatorKernel> operator()() {
return c10::guts::make_unique<unboxed_functor_with_return>();
return std::make_unique<unboxed_functor_with_return>();
}
};
struct unboxed_functor_without_return_factory final {
std::unique_ptr<OperatorKernel> operator()() {
return c10::guts::make_unique<unboxed_functor_without_return>();
return std::make_unique<unboxed_functor_without_return>();
}
};
@ -170,22 +170,22 @@ TEST(KernelFunctionTest, givenBoxedFunction_withoutReturn_whenCallingUnboxed_the
}
TEST(KernelFunctionTest, givenUnboxedFunctor_withReturn_whenCallingBoxed_thenWorks) {
KernelFunction func = KernelFunction::makeFromUnboxedFunctor<false, kernels::unboxed_functor_with_return>(std::unique_ptr<OperatorKernel>(c10::guts::make_unique<kernels::unboxed_functor_with_return>()));
KernelFunction func = KernelFunction::makeFromUnboxedFunctor<false, kernels::unboxed_functor_with_return>(std::unique_ptr<OperatorKernel>(std::make_unique<kernels::unboxed_functor_with_return>()));
kernels::expectBoxedCallingWithReturnWorks(func);
}
TEST(KernelFunctionTest, givenUnboxedFunctor_withoutReturn_whenCallingBoxed_thenWorks) {
KernelFunction func = KernelFunction::makeFromUnboxedFunctor<false, kernels::unboxed_functor_without_return>(std::unique_ptr<OperatorKernel>(c10::guts::make_unique<kernels::unboxed_functor_without_return>()));
KernelFunction func = KernelFunction::makeFromUnboxedFunctor<false, kernels::unboxed_functor_without_return>(std::unique_ptr<OperatorKernel>(std::make_unique<kernels::unboxed_functor_without_return>()));
kernels::expectBoxedCallingWithoutReturnWorks(func);
}
TEST(KernelFunctionTest, givenUnboxedFunctor_withReturn_whenCallingUnboxed_thenWorks) {
KernelFunction func = KernelFunction::makeFromUnboxedFunctor<false, kernels::unboxed_functor_with_return>(std::unique_ptr<OperatorKernel>(c10::guts::make_unique<kernels::unboxed_functor_with_return>()));
KernelFunction func = KernelFunction::makeFromUnboxedFunctor<false, kernels::unboxed_functor_with_return>(std::unique_ptr<OperatorKernel>(std::make_unique<kernels::unboxed_functor_with_return>()));
kernels::expectUnboxedCallingWithReturnWorks(func);
}
TEST(KernelFunctionTest, givenUnboxedFunctor_withoutReturn_whenCallingUnboxed_thenWorks) {
KernelFunction func = KernelFunction::makeFromUnboxedFunctor<false, kernels::unboxed_functor_without_return>(std::unique_ptr<OperatorKernel>(c10::guts::make_unique<kernels::unboxed_functor_without_return>()));
KernelFunction func = KernelFunction::makeFromUnboxedFunctor<false, kernels::unboxed_functor_without_return>(std::unique_ptr<OperatorKernel>(std::make_unique<kernels::unboxed_functor_without_return>()));
kernels::expectUnboxedCallingWithoutReturnWorks(func);
}
@ -210,22 +210,22 @@ TEST(KernelFunctionTest, givenUnboxedFunctorFactory_withoutReturn_whenCallingUnb
}
TEST(KernelFunctionTest, givenUnboxedOnlyFunctor_withReturn_whenCallingBoxed_thenFails) {
KernelFunction func = KernelFunction::makeFromUnboxedOnlyFunctor<kernels::unboxed_functor_with_return>(std::unique_ptr<OperatorKernel>(c10::guts::make_unique<kernels::unboxed_functor_with_return>()));
KernelFunction func = KernelFunction::makeFromUnboxedOnlyFunctor<kernels::unboxed_functor_with_return>(std::unique_ptr<OperatorKernel>(std::make_unique<kernels::unboxed_functor_with_return>()));
kernels::expectBoxedCallingFailsWith(func, "Tried to call KernelFunction::callBoxed() on a KernelFunction that can only be called with KernelFunction::callUnboxed()");
}
TEST(KernelFunctionTest, givenUnboxedOnlyFunctor_withoutReturn_whenCallingBoxed_thenFails) {
KernelFunction func = KernelFunction::makeFromUnboxedOnlyFunctor<kernels::unboxed_functor_without_return>(std::unique_ptr<OperatorKernel>(c10::guts::make_unique<kernels::unboxed_functor_without_return>()));
KernelFunction func = KernelFunction::makeFromUnboxedOnlyFunctor<kernels::unboxed_functor_without_return>(std::unique_ptr<OperatorKernel>(std::make_unique<kernels::unboxed_functor_without_return>()));
kernels::expectBoxedCallingFailsWith(func, "Tried to call KernelFunction::callBoxed() on a KernelFunction that can only be called with KernelFunction::callUnboxed()");
}
TEST(KernelFunctionTest, givenUnboxedOnlyFunctor_withReturn_whenCallingUnboxed_thenWorks) {
KernelFunction func = KernelFunction::makeFromUnboxedOnlyFunctor<kernels::unboxed_functor_with_return>(std::unique_ptr<OperatorKernel>(c10::guts::make_unique<kernels::unboxed_functor_with_return>()));
KernelFunction func = KernelFunction::makeFromUnboxedOnlyFunctor<kernels::unboxed_functor_with_return>(std::unique_ptr<OperatorKernel>(std::make_unique<kernels::unboxed_functor_with_return>()));
kernels::expectUnboxedCallingWithReturnWorks(func);
}
TEST(KernelFunctionTest, givenUnboxedOnlyFunctor_withoutReturn_whenCallingUnboxed_thenWorks) {
KernelFunction func = KernelFunction::makeFromUnboxedOnlyFunctor<kernels::unboxed_functor_without_return>(std::unique_ptr<OperatorKernel>(c10::guts::make_unique<kernels::unboxed_functor_without_return>()));
KernelFunction func = KernelFunction::makeFromUnboxedOnlyFunctor<kernels::unboxed_functor_without_return>(std::unique_ptr<OperatorKernel>(std::make_unique<kernels::unboxed_functor_without_return>()));
kernels::expectUnboxedCallingWithoutReturnWorks(func);
}

View File

@ -17,9 +17,9 @@ namespace impl {
// Assume T is decayed
template <typename T>
using not_ok_to_box =
c10::guts::disjunction<
c10::guts::negation<
c10::guts::disjunction<
guts::disjunction<
guts::negation<
guts::disjunction<
std::is_constructible<IValue, T>,
// TensorOptions are not directly constructible into IValue,
// but torch::jit::push knows how to handle them
@ -39,21 +39,21 @@ using not_ok_to_box =
template <class Result, class... Args>
using supports_boxing =
c10::guts::negation<c10::guts::disjunction<
guts::negation<guts::disjunction<
std::is_lvalue_reference<Result>,
not_ok_to_box<Result>,
std::is_same<IntArrayRef, Result>,
not_ok_to_box<guts::decay_t<Args>>...
not_ok_to_box<std::decay_t<Args>>...
>>;
template<class Result, class... Args>
Result boxAndCallBoxedFunc(KernelFunction::InternalBoxedKernelFunction* boxed_kernel_func, OperatorKernel* functor, const OperatorHandle& opHandle, Args... args, guts::enable_if_t<!supports_boxing<Result, Args...>::value, int> = 0) {
Result boxAndCallBoxedFunc(KernelFunction::InternalBoxedKernelFunction* boxed_kernel_func, OperatorKernel* functor, const OperatorHandle& opHandle, Args... args, std::enable_if_t<!supports_boxing<Result, Args...>::value, int> = 0) {
TORCH_INTERNAL_ASSERT(false, "Tried to call KernelFunction::callUnboxed() for a kernel that only has a boxed kernel and doesn't support calling from an unboxed API yet.");
}
// SFINAE version for ops with returns
template<class Result, class... Args>
guts::enable_if_t<supports_boxing<Result, Args...>::value && !std::is_same<void, Result>::value, Result>
std::enable_if_t<supports_boxing<Result, Args...>::value && !std::is_same<void, Result>::value, Result>
boxAndCallBoxedFunc(KernelFunction::InternalBoxedKernelFunction* boxed_kernel_func, OperatorKernel* functor, const OperatorHandle& opHandle, Args... args) {
// TODO Reuse stack vector instead of allocating?
torch::jit::Stack stack;
@ -67,7 +67,7 @@ boxAndCallBoxedFunc(KernelFunction::InternalBoxedKernelFunction* boxed_kernel_fu
// SFINAE version for ops without returns
template<class Result, class... Args>
guts::enable_if_t<supports_boxing<Result, Args...>::value && std::is_same<void, Result>::value, Result>
std::enable_if_t<supports_boxing<Result, Args...>::value && std::is_same<void, Result>::value, Result>
boxAndCallBoxedFunc(KernelFunction::InternalBoxedKernelFunction* boxed_kernel_func, OperatorKernel* functor, const OperatorHandle& opHandle, Args... args) {
// TODO Reuse stack vector instead of allocating?
torch::jit::Stack stack;

View File

@ -15,7 +15,7 @@ namespace detail {
return (*kernel_func)(std::forward<Parameters>(args)...);
}
};
template<class FuncType, FuncType* kernel_func, class Enable = guts::enable_if_t<guts::is_function_type<FuncType>::value>>
template<class FuncType, FuncType* kernel_func, class Enable = std::enable_if_t<guts::is_function_type<FuncType>::value>>
struct WrapKernelFunction final {
using type = WrapKernelFunction_<
FuncType,

View File

@ -20,7 +20,7 @@
using c10::RegisterOperators;
using c10::TensorTypeId;
using c10::Stack;
using c10::guts::make_unique;
using std::make_unique;
using c10::intrusive_ptr;
using c10::Dict;
using at::Tensor;

View File

@ -8,7 +8,7 @@
using c10::RegisterOperators;
using c10::TensorTypeId;
using c10::Stack;
using c10::guts::make_unique;
using std::make_unique;
using c10::intrusive_ptr;
using c10::Dict;
using at::Tensor;

View File

@ -51,7 +51,7 @@ namespace detail {
};
template<class T, bool AllowDeprecatedTypes>
struct assert_is_valid_input_type<T, AllowDeprecatedTypes, guts::enable_if_t<guts::typelist::contains<supported_primitive_arg_types, T>::value>> {
struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<guts::typelist::contains<supported_primitive_arg_types, T>::value>> {
// everything is ok, this is a primitive type
};
@ -90,20 +90,20 @@ namespace detail {
// there if they didn't exist, but we can show a better error message
// in some common error scenarios.
template<class T, bool AllowDeprecatedTypes>
struct assert_is_valid_input_type<T, AllowDeprecatedTypes, guts::enable_if_t<std::is_same<float, T>::value>> {
struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<float, T>::value>> {
// There is no reason to support float when we have double. Keep the API lean.
static_assert(guts::false_t<T>::value, "You tried to register a kernel with an unsupported input type: float. Please use double instead.");
};
template<class T, bool AllowDeprecatedTypes>
struct assert_is_valid_input_type<T, AllowDeprecatedTypes, guts::enable_if_t<std::is_same<const char*, T>::value>> {
struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<const char*, T>::value>> {
static_assert(guts::false_t<T>::value, "You tried to register a kernel with an unsupported input type: const char*. Please use std::string instead.");
};
template<class T, bool AllowDeprecatedTypes>
struct assert_is_valid_input_type<T, AllowDeprecatedTypes, guts::enable_if_t<std::is_same<std::vector<bool>, T>::value>> {
struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<std::vector<bool>, T>::value>> {
static_assert(guts::false_t<T>::value, "You tried to register a kernel with an unsupported input type: vector<bool>. Please use List<bool> instead.");
};
template<class T, bool AllowDeprecatedTypes>
struct assert_is_valid_input_type<T, AllowDeprecatedTypes, guts::enable_if_t<std::is_integral<T>::value && !guts::typelist::contains<supported_primitive_arg_types, T>::value>> {
struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_integral<T>::value && !guts::typelist::contains<supported_primitive_arg_types, T>::value>> {
static_assert(guts::false_t<T>::value, "You tried to register a kernel with an unsupported integral input type. Please use int64_t instead.");
};
@ -115,7 +115,7 @@ namespace detail {
};
template<class T, bool AllowDeprecatedTypes>
struct assert_is_valid_output_type<T, AllowDeprecatedTypes, guts::enable_if_t<guts::typelist::contains<supported_primitive_arg_types, T>::value>> {
struct assert_is_valid_output_type<T, AllowDeprecatedTypes, std::enable_if_t<guts::typelist::contains<supported_primitive_arg_types, T>::value>> {
// everything is ok, this is a primitive type
};
@ -156,20 +156,20 @@ namespace detail {
// there if they didn't exist, but we can show a better error message
// in some common error scenarios.
template<class T, bool AllowDeprecatedTypes>
struct assert_is_valid_output_type<T, AllowDeprecatedTypes, guts::enable_if_t<std::is_same<float, T>::value>> {
struct assert_is_valid_output_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<float, T>::value>> {
// There is no reason to support float when we have double. Keep the API lean.
static_assert(guts::false_t<T>::value, "You tried to register a kernel with an unsupported output type: float. Please use double instead.");
};
template<class T, bool AllowDeprecatedTypes>
struct assert_is_valid_output_type<T, AllowDeprecatedTypes, guts::enable_if_t<std::is_same<const char*, T>::value>> {
struct assert_is_valid_output_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<const char*, T>::value>> {
static_assert(guts::false_t<T>::value, "You tried to register a kernel with an unsupported output type: const char*. Please use std::string instead.");
};
template<class T, bool AllowDeprecatedTypes>
struct assert_is_valid_output_type<T, AllowDeprecatedTypes, guts::enable_if_t<std::is_same<std::vector<bool>, T>::value>> {
struct assert_is_valid_output_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<std::vector<bool>, T>::value>> {
static_assert(guts::false_t<T>::value, "You tried to register a kernel with an unsupported output type: vector<bool>. Please use List<bool> instead.");
};
template<class T, bool AllowDeprecatedTypes>
struct assert_is_valid_output_type<T, AllowDeprecatedTypes, guts::enable_if_t<std::is_integral<T>::value && !guts::typelist::contains<supported_primitive_arg_types, T>::value>> {
struct assert_is_valid_output_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_integral<T>::value && !guts::typelist::contains<supported_primitive_arg_types, T>::value>> {
static_assert(guts::false_t<T>::value, "You tried to register a kernel with an unsupported integral output type. Please use int64_t instead.");
};
@ -187,13 +187,13 @@ namespace detail {
}
template<class Functor, bool AllowDeprecatedTypes, size_t... ivalue_arg_indices>
typename guts::infer_function_traits_t<Functor>::return_type call_functor_with_args_from_stack_(Functor* functor, Stack* stack, guts::index_sequence<ivalue_arg_indices...>) {
typename guts::infer_function_traits_t<Functor>::return_type call_functor_with_args_from_stack_(Functor* functor, Stack* stack, std::index_sequence<ivalue_arg_indices...>) {
(void)(stack); // when sizeof...(ivalue_arg_indices) == 0, this argument would be unused and we have to silence the compiler warning.
constexpr size_t num_ivalue_args = sizeof...(ivalue_arg_indices);
using IValueArgTypes = typename guts::infer_function_traits_t<Functor>::parameter_types;
return (*functor)(ivalue_to_arg<guts::remove_cv_t<guts::remove_reference_t<guts::typelist::element_t<ivalue_arg_indices, IValueArgTypes>>>, AllowDeprecatedTypes>(
return (*functor)(ivalue_to_arg<std::remove_cv_t<std::remove_reference_t<guts::typelist::element_t<ivalue_arg_indices, IValueArgTypes>>>, AllowDeprecatedTypes>(
std::move(torch::jit::peek(*stack, ivalue_arg_indices, num_ivalue_args))
)...);
}
@ -201,7 +201,7 @@ namespace detail {
template<class Functor, bool AllowDeprecatedTypes>
typename guts::infer_function_traits_t<Functor>::return_type call_functor_with_args_from_stack(Functor* functor, Stack* stack) {
constexpr size_t num_ivalue_args = guts::infer_function_traits_t<Functor>::number_of_parameters;
return call_functor_with_args_from_stack_<Functor, AllowDeprecatedTypes>(functor, stack, guts::make_index_sequence<num_ivalue_args>());
return call_functor_with_args_from_stack_<Functor, AllowDeprecatedTypes>(functor, stack, std::make_index_sequence<num_ivalue_args>());
}
template<class OutputType, bool AllowDeprecatedTypes>
@ -213,12 +213,12 @@ namespace detail {
template<class... OutputTypes, bool AllowDeprecatedTypes>
struct push_outputs<std::tuple<OutputTypes...>, AllowDeprecatedTypes> final {
static void call(std::tuple<OutputTypes...>&& output, Stack* stack) {
call_(std::move(output), stack, guts::make_index_sequence<sizeof...(OutputTypes)>());
call_(std::move(output), stack, std::make_index_sequence<sizeof...(OutputTypes)>());
}
private:
template<size_t... indices>
static void call_(std::tuple<OutputTypes...>&& output, Stack* stack, guts::index_sequence<indices...>) {
static void call_(std::tuple<OutputTypes...>&& output, Stack* stack, std::index_sequence<indices...>) {
torch::jit::push(*stack, return_to_ivalue<OutputTypes, AllowDeprecatedTypes>(std::move(std::get<indices>(output)))...);
}
};
@ -227,7 +227,7 @@ namespace detail {
// SFINAE version for kernels that return an output
template<class KernelFunctor, bool AllowDeprecatedTypes>
struct make_boxed_from_unboxed_functor<KernelFunctor, AllowDeprecatedTypes, guts::enable_if_t<!std::is_same<void, typename guts::infer_function_traits_t<KernelFunctor>::return_type>::value>> final {
struct make_boxed_from_unboxed_functor<KernelFunctor, AllowDeprecatedTypes, std::enable_if_t<!std::is_same<void, typename guts::infer_function_traits_t<KernelFunctor>::return_type>::value>> final {
static_assert(std::is_base_of<OperatorKernel, KernelFunctor>::value, "Tried to register a kernel functor using the kernel<Functor>() API, but it doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
static void call(OperatorKernel* functor, const OperatorHandle&, Stack* stack) {
@ -241,7 +241,7 @@ namespace detail {
// SFINAE version for kernels that don't return an output
template<class KernelFunctor, bool AllowDeprecatedTypes>
struct make_boxed_from_unboxed_functor<KernelFunctor, AllowDeprecatedTypes, guts::enable_if_t<std::is_same<void, typename guts::infer_function_traits_t<KernelFunctor>::return_type>::value>> final {
struct make_boxed_from_unboxed_functor<KernelFunctor, AllowDeprecatedTypes, std::enable_if_t<std::is_same<void, typename guts::infer_function_traits_t<KernelFunctor>::return_type>::value>> final {
static_assert(std::is_base_of<OperatorKernel, KernelFunctor>::value, "Tried to register a kernel functor using the kernel<Functor>() API, but it doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
static void call(OperatorKernel* functor, const OperatorHandle&, Stack* stack) {
@ -284,7 +284,7 @@ namespace detail {
template<class FuncType>
std::unique_ptr<FunctionSchema> inferFunctionSchema_() {
return guts::make_unique<FunctionSchema>(inferFunctionSchema<FuncType>("", ""));
return std::make_unique<FunctionSchema>(inferFunctionSchema<FuncType>("", ""));
}
template<class KernelFunctor>

View File

@ -9,7 +9,7 @@ using c10::RegisterOperators;
using c10::OperatorKernel;
using c10::TensorTypeId;
using c10::Stack;
using c10::guts::make_unique;
using std::make_unique;
using c10::intrusive_ptr;
using c10::Dict;
using at::Tensor;

View File

@ -19,7 +19,7 @@
using c10::RegisterOperators;
using c10::TensorTypeId;
using c10::Stack;
using c10::guts::make_unique;
using std::make_unique;
using c10::intrusive_ptr;
using c10::Dict;
using at::Tensor;

View File

@ -8,7 +8,7 @@
using c10::RegisterOperators;
using c10::TensorTypeId;
using c10::Stack;
using c10::guts::make_unique;
using std::make_unique;
using c10::intrusive_ptr;
using c10::Dict;
using at::Tensor;

View File

@ -9,7 +9,7 @@
using c10::RegisterOperators;
using c10::TensorTypeId;
using c10::Stack;
using c10::guts::make_unique;
using std::make_unique;
using c10::OperatorHandle;
using std::unique_ptr;

View File

@ -32,7 +32,7 @@ Dispatcher::Dispatcher()
: operators_()
, operatorLookupTable_()
, backendFallbackKernels_()
, listeners_(guts::make_unique<detail::RegistrationListenerList>())
, listeners_(std::make_unique<detail::RegistrationListenerList>())
, mutex_() {}
Dispatcher::~Dispatcher() {}

View File

@ -177,10 +177,10 @@ struct CAFFE2_API IValue final {
template <
typename... Args,
c10::guts::enable_if_t<
!c10::guts::disjunction<
std::enable_if_t<
!guts::disjunction<
std::is_lvalue_reference<Args>...,
c10::guts::negation<std::is_constructible<IValue, Args>>...>::
guts::negation<std::is_constructible<IValue, Args>>...>::
value,
std::nullptr_t> = nullptr>
IValue(const std::tuple<Args...>& t);

View File

@ -399,7 +399,7 @@ namespace detail {
struct _guarded_unsigned_long_unique_dummy final {
_guarded_unsigned_long_unique_dummy(int64_t){};
};
using _guarded_unsigned_long = c10::guts::conditional_t<
using _guarded_unsigned_long = std::conditional_t<
std::is_same<unsigned long, uint32_t>::value ||
std::is_same<unsigned long, uint64_t>::value,
_guarded_unsigned_long_unique_dummy,
@ -546,7 +546,7 @@ namespace detail {
template <typename Tuple, std::size_t... INDEX>
Tuple generic_to_tuple_impl(
const std::vector<IValue>& t,
c10::guts::index_sequence<INDEX...>) {
std::index_sequence<INDEX...>) {
return std::make_tuple(
t[INDEX].to<typename std::tuple_element<INDEX, Tuple>::type>()...);
}
@ -554,11 +554,11 @@ Tuple generic_to_tuple_impl(
template <
typename... Args,
typename Indices = c10::guts::make_index_sequence<sizeof...(Args)>,
c10::guts::enable_if_t<
!c10::guts::disjunction<
typename Indices = std::make_index_sequence<sizeof...(Args)>,
std::enable_if_t<
!guts::disjunction<
std::is_lvalue_reference<Args>...,
c10::guts::negation<std::is_constructible<IValue, Args>>...>::value,
guts::negation<std::is_constructible<IValue, Args>>...>::value,
std::nullptr_t> = nullptr>
std::tuple<Args...> generic_to(IValue ivalue, _fake_type<std::tuple<Args...>>) {
auto vals = ivalue.toTuple()->elements();
@ -655,10 +655,10 @@ inline IValue::IValue(c10::intrusive_ptr<ivalue::Tuple> v)
}
template <
typename... Args,
c10::guts::enable_if_t<
!c10::guts::disjunction<
std::enable_if_t<
!guts::disjunction<
std::is_lvalue_reference<Args>...,
c10::guts::negation<std::is_constructible<IValue, Args>>...>::value,
guts::negation<std::is_constructible<IValue, Args>>...>::value,
std::nullptr_t>>
inline IValue::IValue(const std::tuple<Args...>& t)
: IValue(

View File

@ -44,13 +44,13 @@ constexpr int checkStaticTypes() {
}
template <typename... Ts, size_t... Is>
constexpr std::array<ArgumentDef, sizeof...(Ts)> createArgumentVectorFromTypes(guts::index_sequence<Is...>) {
constexpr std::array<ArgumentDef, sizeof...(Ts)> createArgumentVectorFromTypes(std::index_sequence<Is...>) {
return (
// Check types for common errors
checkStaticTypes<Ts...>(),
// Create the return value
std::array<ArgumentDef, sizeof...(Ts)>{{ArgumentDef{&getTypePtr_<guts::decay_t<Ts>>::call}...}}
std::array<ArgumentDef, sizeof...(Ts)>{{ArgumentDef{&getTypePtr_<std::decay_t<Ts>>::call}...}}
);
}
@ -61,7 +61,7 @@ template<class... ParameterTypes>
struct createArguments<guts::typelist::typelist<ParameterTypes...>> final {
static constexpr std::array<ArgumentDef, sizeof...(ParameterTypes)> call() {
return createArgumentVectorFromTypes<ParameterTypes...>(
guts::make_index_sequence<sizeof...(ParameterTypes)>()
std::make_index_sequence<sizeof...(ParameterTypes)>()
);
}
};
@ -78,13 +78,13 @@ template<class... ReturnTypes>
struct createReturns<std::tuple<ReturnTypes...>, void> final {
static constexpr std::array<ArgumentDef, sizeof...(ReturnTypes)> call() {
return createArgumentVectorFromTypes<ReturnTypes...>(
guts::make_index_sequence<sizeof...(ReturnTypes)>()
std::make_index_sequence<sizeof...(ReturnTypes)>()
);
}
};
template<class ReturnType>
struct createReturns<ReturnType, guts::enable_if_t<!std::is_same<void, ReturnType>::value && !guts::is_instantiation_of<std::tuple, ReturnType>::value>> final {
struct createReturns<ReturnType, std::enable_if_t<!std::is_same<void, ReturnType>::value && !guts::is_instantiation_of<std::tuple, ReturnType>::value>> final {
static constexpr std::array<ArgumentDef, 1> call() {
return createReturns<std::tuple<ReturnType>>::call();
}

View File

@ -141,13 +141,13 @@ public:
*/
template<class KernelFunctor, class... ConstructorParameters>
// enable_if: only enable it if KernelFunctor is actually a functor
guts::enable_if_t<guts::is_functor<KernelFunctor>::value, Options&&> kernel(TensorTypeId dispatch_key, ConstructorParameters&&... constructorParameters) && {
std::enable_if_t<guts::is_functor<KernelFunctor>::value, Options&&> kernel(TensorTypeId dispatch_key, ConstructorParameters&&... constructorParameters) && {
static_assert(std::is_base_of<OperatorKernel, KernelFunctor>::value, "Tried to register a kernel functor using the kernel<Functor>() API, but it doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
static_assert(std::is_constructible<KernelFunctor, ConstructorParameters...>::value, "Wrong argument list for constructor of kernel functor. The arguments to kernel<Functor>(arguments...) must match one of the constructors of Functor.");
return std::move(*this).kernel(
std::move(dispatch_key),
KernelFunction::makeFromUnboxedFunctorFactory<KernelFunctor>(detail::KernelFactory<KernelFunctor, guts::decay_t<ConstructorParameters>...>(std::forward<ConstructorParameters>(constructorParameters)...)),
KernelFunction::makeFromUnboxedFunctorFactory<KernelFunctor>(detail::KernelFactory<KernelFunctor, std::decay_t<ConstructorParameters>...>(std::forward<ConstructorParameters>(constructorParameters)...)),
detail::FunctionSchemaInferer<KernelFunctor>()()
);
}
@ -192,13 +192,13 @@ public:
*/
template<class KernelFunctor, class... ConstructorParameters>
// enable_if: only enable it if KernelFunctor is actually a functor
guts::enable_if_t<guts::is_functor<KernelFunctor>::value, Options&&> catchAllKernel(ConstructorParameters&&... constructorParameters) && {
std::enable_if_t<guts::is_functor<KernelFunctor>::value, Options&&> catchAllKernel(ConstructorParameters&&... constructorParameters) && {
static_assert(std::is_base_of<OperatorKernel, KernelFunctor>::value, "Tried to register a kernel functor using the kernel<Functor>() API, but it doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
static_assert(std::is_constructible<KernelFunctor, ConstructorParameters...>::value, "Wrong argument list for constructor of kernel functor. The arguments to kernel<Functor>(arguments...) must match one of the constructors of Functor.");
return std::move(*this).kernel(
c10::nullopt,
KernelFunction::makeFromUnboxedFunctorFactory<KernelFunctor>(detail::KernelFactory<KernelFunctor, guts::decay_t<ConstructorParameters>...>(std::forward<ConstructorParameters>(constructorParameters)...)),
KernelFunction::makeFromUnboxedFunctorFactory<KernelFunctor>(detail::KernelFactory<KernelFunctor, std::decay_t<ConstructorParameters>...>(std::forward<ConstructorParameters>(constructorParameters)...)),
detail::FunctionSchemaInferer<KernelFunctor>()()
);
}
@ -219,7 +219,7 @@ public:
*/
template<class FuncType, FuncType* kernel_func>
// enable_if: only enable it if FuncType is actually a function
guts::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> kernel(TensorTypeId dispatch_key) && {
std::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> kernel(TensorTypeId dispatch_key) && {
static_assert(!std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
static_assert(kernel_func != nullptr, "Kernel function cannot be nullptr");
@ -247,7 +247,7 @@ public:
*/
template<class FuncType, FuncType* kernel_func>
// enable_if: only enable it if FuncType is actually a function
guts::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> catchAllKernel() && {
std::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> catchAllKernel() && {
static_assert(!std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
static_assert(kernel_func != nullptr, "Kernel function cannot be nullptr");
@ -261,7 +261,7 @@ public:
template<class FuncType>
// enable_if: only enable it if FuncType is actually a function
guts::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> kernel(TensorTypeId dispatch_key, FuncType* kernel_func) && {
std::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> kernel(TensorTypeId dispatch_key, FuncType* kernel_func) && {
static_assert(!std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
TORCH_INTERNAL_ASSERT(kernel_func != nullptr, "Kernel function cannot be nullptr");
@ -269,13 +269,13 @@ public:
std::move(dispatch_key),
KernelFunction::makeFromUnboxedRuntimeFunction(kernel_func),
// TODO Do schema inference without relying on WrapKernelFunction
detail::FunctionSchemaInferer<detail::WrapRuntimeKernelFunctor<guts::decay_t<FuncType>>>()()
detail::FunctionSchemaInferer<detail::WrapRuntimeKernelFunctor<std::decay_t<FuncType>>>()()
);
}
template<class FuncType>
// enable_if: only enable it if FuncType is actually a function
guts::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> catchAllKernel(FuncType* kernel_func) && {
std::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> catchAllKernel(FuncType* kernel_func) && {
static_assert(!std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
TORCH_INTERNAL_ASSERT(kernel_func != nullptr, "Kernel function cannot be nullptr");
@ -283,14 +283,14 @@ public:
c10::nullopt,
KernelFunction::makeFromUnboxedRuntimeFunction(kernel_func),
// TODO Do schema inference without relying on WrapKernelFunction
detail::FunctionSchemaInferer<detail::WrapRuntimeKernelFunctor<guts::decay_t<FuncType>>>()()
detail::FunctionSchemaInferer<detail::WrapRuntimeKernelFunctor<std::decay_t<FuncType>>>()()
);
}
// TODO Remove impl_unboxedOnlyKernel once all of aten can generate boxed kernels
template<class FuncType, FuncType* kernel_func>
// enable_if: only enable it if FuncType is actually a function
guts::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> impl_unboxedOnlyKernel(TensorTypeId dispatch_key) && {
std::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> impl_unboxedOnlyKernel(TensorTypeId dispatch_key) && {
static_assert(!std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
static_assert(kernel_func != nullptr, "Kernel function cannot be nullptr");
@ -304,7 +304,7 @@ public:
// TODO Remove impl_unboxedOnlyCatchAllKernel once all of aten can generate boxed kernels
template<class FuncType, FuncType* kernel_func>
// enable_if: only enable it if FuncType is actually a function
guts::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> impl_unboxedOnlyCatchAllKernel() && {
std::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> impl_unboxedOnlyCatchAllKernel() && {
static_assert(!std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
static_assert(kernel_func != nullptr, "Kernel function cannot be nullptr");
@ -333,11 +333,11 @@ public:
*/
template<class Lambda>
// enable_if: only enable it if Lambda is a functor (note: lambdas are functors)
guts::enable_if_t<
guts::is_functor<guts::decay_t<Lambda>>::value
&& !std::is_same<typename guts::infer_function_traits_t<guts::decay_t<Lambda>>::func_type, KernelFunction::BoxedKernelFunction>::value,
std::enable_if_t<
guts::is_functor<std::decay_t<Lambda>>::value
&& !std::is_same<typename guts::infer_function_traits_t<std::decay_t<Lambda>>::func_type, KernelFunction::BoxedKernelFunction>::value,
Options&&> kernel(TensorTypeId dispatch_key, Lambda&& functor) && {
static_assert(!std::is_base_of<OperatorKernel, guts::decay_t<Lambda>>::value, "The kernel(x) API for registering a kernel is only meant to be used with lambdas. Your kernel is a functor. Please use the kernel<Functor>() API instead.");
static_assert(!std::is_base_of<OperatorKernel, std::decay_t<Lambda>>::value, "The kernel(x) API for registering a kernel is only meant to be used with lambdas. Your kernel is a functor. Please use the kernel<Functor>() API instead.");
// We don't support stateful lambdas (i.e. lambdas with a capture), because their
// behavior would be nonobvious. A functor kernel with cache gets a new instance of
@ -345,13 +345,13 @@ public:
// A lambda with a capture would be global and share its capture between all kernel lookups.
// So, instead of making users having to think about it (including the thread-safety
// issues this causes), let's just forbid stateful lambdas altogether.
static_assert(guts::is_stateless_lambda<guts::decay_t<Lambda>>::value, "The kernel(x) API for registering a kernel only works for stateless lambdas (i.e. lambdas without captures). If you need a cache, please use the functor based API kernel<Functor>() instead.");
static_assert(guts::is_stateless_lambda<std::decay_t<Lambda>>::value, "The kernel(x) API for registering a kernel only works for stateless lambdas (i.e. lambdas without captures). If you need a cache, please use the functor based API kernel<Functor>() instead.");
return std::move(*this).kernel(
std::move(dispatch_key),
KernelFunction::makeFromUnboxedLambda(std::forward<Lambda>(functor)),
// TODO Do schema inference without relying on WrapRuntimeKernelFunctor
detail::FunctionSchemaInferer<detail::WrapRuntimeKernelFunctor<guts::decay_t<Lambda>>>()()
detail::FunctionSchemaInferer<detail::WrapRuntimeKernelFunctor<std::decay_t<Lambda>>>()()
);
}
@ -373,11 +373,11 @@ public:
*/
template<class Lambda>
// enable_if: only enable it if Lambda is a functor (note: lambdas are functors)
guts::enable_if_t<
guts::is_functor<guts::decay_t<Lambda>>::value
&& !std::is_same<typename guts::infer_function_traits_t<guts::decay_t<Lambda>>::func_type, KernelFunction::BoxedKernelFunction>::value,
std::enable_if_t<
guts::is_functor<std::decay_t<Lambda>>::value
&& !std::is_same<typename guts::infer_function_traits_t<std::decay_t<Lambda>>::func_type, KernelFunction::BoxedKernelFunction>::value,
Options&&> catchAllKernel(Lambda&& lambda) && {
static_assert(!std::is_base_of<OperatorKernel, guts::decay_t<Lambda>>::value, "The kernel(x) API for registering a kernel is only meant to be used with lambdas. Your kernel is a functor. Please use the kernel<Functor>() API instead.");
static_assert(!std::is_base_of<OperatorKernel, std::decay_t<Lambda>>::value, "The kernel(x) API for registering a kernel is only meant to be used with lambdas. Your kernel is a functor. Please use the kernel<Functor>() API instead.");
// We don't support stateful lambdas (i.e. lambdas with a capture), because their
// behavior would be nonobvious.
@ -385,13 +385,13 @@ public:
// This would be a likely source for unexpected race conditions, so we forbid it.
// If a kernel really needs global state, they can just have regular global state
// in their .cpp file next to the kernel lambda.
static_assert(guts::is_stateless_lambda<guts::decay_t<Lambda>>::value, "The kernel(x) API for registering a kernel only works for stateless lambdas (i.e. lambdas without captures). If you need a cache, please use the functor based API kernel<Functor>() instead.");
static_assert(guts::is_stateless_lambda<std::decay_t<Lambda>>::value, "The kernel(x) API for registering a kernel only works for stateless lambdas (i.e. lambdas without captures). If you need a cache, please use the functor based API kernel<Functor>() instead.");
return std::move(*this).kernel(
c10::nullopt,
KernelFunction::makeFromUnboxedLambda(std::forward<Lambda>(lambda)),
// TODO Do schema inference without relying on WrapRuntimeKernelFunctor
detail::FunctionSchemaInferer<detail::WrapRuntimeKernelFunctor<guts::decay_t<Lambda>>>()()
detail::FunctionSchemaInferer<detail::WrapRuntimeKernelFunctor<std::decay_t<Lambda>>>()()
);
}
@ -511,14 +511,14 @@ public:
*/
template<class FuncType>
// enable_if: only enable it if FuncType is actually a function, but not a stack based BoxedKernelFunction.
guts::enable_if_t<guts::is_function_type<FuncType>::value && !std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, RegisterOperators&&>
std::enable_if_t<guts::is_function_type<FuncType>::value && !std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, RegisterOperators&&>
op(const std::string& schemaOrName, FuncType* func, Options&& options = RegisterOperators::options()) && {
constexpr bool AllowLegacyTypes = true;
return std::move(*this).op(std::move(options).schema(schemaOrName).kernel(
c10::nullopt,
KernelFunction::makeFromUnboxedRuntimeFunction<AllowLegacyTypes>(func),
// TODO Do schema inference without relying on WrapRuntimeKernelFunctor
detail::FunctionSchemaInferer<detail::WrapRuntimeKernelFunctor<guts::decay_t<FuncType>>>()()
detail::FunctionSchemaInferer<detail::WrapRuntimeKernelFunctor<std::decay_t<FuncType>>>()()
));
}
@ -539,7 +539,7 @@ public:
*/
template<class Lambda>
// enable_if: only enable it if Lambda is actually a stateless lambda
guts::enable_if_t<guts::is_functor<Lambda>::value && guts::is_stateless_lambda<guts::decay_t<Lambda>>::value, RegisterOperators&&>
std::enable_if_t<guts::is_functor<Lambda>::value && guts::is_stateless_lambda<std::decay_t<Lambda>>::value, RegisterOperators&&>
op(const std::string& schemaOrName, Lambda&& lambda, Options&& options = RegisterOperators::options()) && {
static_assert(!std::is_base_of<OperatorKernel, Lambda>::value, "c10::OperatorKernel is part of the new kernel registration API and shouldn't be used together with the deprecated registration API. Please use the new RegisterOperators::options().kernel() based API instead.");
@ -548,14 +548,14 @@ public:
c10::nullopt,
KernelFunction::makeFromUnboxedLambda<AllowLegacyTypes>(std::forward<Lambda>(lambda)),
// TODO Do schema inference without relying on WrapRuntimeKernelFunctor
detail::FunctionSchemaInferer<detail::WrapRuntimeKernelFunctor<guts::decay_t<Lambda>>>()()
detail::FunctionSchemaInferer<detail::WrapRuntimeKernelFunctor<std::decay_t<Lambda>>>()()
));
}
template<class Lambda>
C10_DEPRECATED_MESSAGE("Registering operator kernels with stateful lambdas (i.e. lambdas with a capture) has non-obvious behavior. This is deprecated. Please use a lambda without a capture or a functor class instead.")
// enable_if: only enable it if Lambda is actually a functor but not a stateless lambda
guts::enable_if_t<guts::is_functor<Lambda>::value && !guts::is_stateless_lambda<guts::decay_t<Lambda>>::value, RegisterOperators&&>
std::enable_if_t<guts::is_functor<Lambda>::value && !guts::is_stateless_lambda<std::decay_t<Lambda>>::value, RegisterOperators&&>
op(const std::string& schemaOrName, Lambda&& lambda, Options&& options = RegisterOperators::options()) && {
static_assert(!std::is_base_of<OperatorKernel, Lambda>::value, "c10::OperatorKernel is part of the new kernel registration API and shouldn't be used together with the deprecated registration API. Please use the new RegisterOperators::options().kernel() based API instead.");
@ -564,7 +564,7 @@ public:
c10::nullopt,
KernelFunction::makeFromUnboxedLambda<AllowLegacyTypes>(std::forward<Lambda>(lambda)),
// TODO Do schema inference without relying on WrapRuntimeKernelFunctor
detail::FunctionSchemaInferer<detail::WrapRuntimeKernelFunctor<guts::decay_t<Lambda>>>()()
detail::FunctionSchemaInferer<detail::WrapRuntimeKernelFunctor<std::decay_t<Lambda>>>()()
));
}

View File

@ -86,13 +86,13 @@ DEFINE_FLOAT_INT_CAST(int16_t, float, s)
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GATHER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template<int64_t scale = 1>
c10::guts::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vec256<double>>
std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vec256<double>>
inline gather(const double* base_addr, const Vec256<int64_t>& vindex) {
return _mm256_i64gather_pd(base_addr, vindex, scale);
}
template<int64_t scale = 1>
c10::guts::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vec256<float>>
std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vec256<float>>
inline gather(const float* base_addr, const Vec256<int32_t>& vindex) {
return _mm256_i32gather_ps(base_addr, vindex, scale);
}
@ -100,14 +100,14 @@ inline gather(const float* base_addr, const Vec256<int32_t>& vindex) {
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MASK GATHER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template<int64_t scale = 1>
c10::guts::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vec256<double>>
std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vec256<double>>
inline mask_gather(const Vec256<double>& src, const double* base_addr,
const Vec256<int64_t>& vindex, const Vec256<double>& mask) {
return _mm256_mask_i64gather_pd(src, base_addr, vindex, mask, scale);
}
template<int64_t scale = 1>
c10::guts::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vec256<float>>
std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vec256<float>>
inline mask_gather(const Vec256<float>& src, const float* base_addr,
const Vec256<int32_t>& vindex, const Vec256<float>& mask) {
return _mm256_mask_i32gather_ps(src, base_addr, vindex, mask, scale);

View File

@ -95,7 +95,7 @@ public:
}
}
template<typename... Args,
typename = c10::guts::enable_if_t<(sizeof...(Args) == size())>>
typename = std::enable_if_t<(sizeof...(Args) == size())>>
Vec256(Args... vals) {
values = { vals... };
}
@ -608,7 +608,7 @@ inline T fmadd(const T& a, const T& b, const T& c) {
}
template <int64_t scale = 1, typename T = void>
c10::guts::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vec256<T>>
std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vec256<T>>
inline gather(T const* base_addr, const Vec256<int_same_size_t<T>>& vindex) {
static constexpr int size = Vec256<T>::size();
int_same_size_t<T> index_arr[size];
@ -621,7 +621,7 @@ inline gather(T const* base_addr, const Vec256<int_same_size_t<T>>& vindex) {
}
template <int64_t scale = 1, typename T = void>
c10::guts::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vec256<T>>
std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vec256<T>>
inline mask_gather(const Vec256<T>& src, T const* base_addr,
const Vec256<int_same_size_t<T>>& vindex, Vec256<T>& mask) {
static constexpr int size = Vec256<T>::size();
@ -687,7 +687,7 @@ inline Vec256<int_same_size_t<T>> convert_to_int_of_same_size(const Vec256<T>& s
// returns: Vec256<float> = {a0, a1, a2, a3, a4, a5, a6, a7}
// Vec256<float> = {b0, b1, b2, b3, b4, b5, b6, b7}
template <typename T>
inline c10::guts::enable_if_t<Vec256<T>::size() % 2 == 0, std::pair<Vec256<T>, Vec256<T>>>
inline std::enable_if_t<Vec256<T>::size() % 2 == 0, std::pair<Vec256<T>, Vec256<T>>>
deinterleave2(const Vec256<T>& a, const Vec256<T>& b) {
static constexpr int size = Vec256<T>::size();
static constexpr int half_size = size / 2;
@ -713,7 +713,7 @@ deinterleave2(const Vec256<T>& a, const Vec256<T>& b) {
// returns: Vec256<float> = {a0, b0, a1, b1, a2, b2, a3, b3}
// Vec256<float> = {a4, b4, a5, b5, a6, b6, a7, b7}
template <typename T>
inline c10::guts::enable_if_t<Vec256<T>::size() % 2 == 0, std::pair<Vec256<T>, Vec256<T>>>
inline std::enable_if_t<Vec256<T>::size() % 2 == 0, std::pair<Vec256<T>, Vec256<T>>>
interleave2(const Vec256<T>& a, const Vec256<T>& b) {
static constexpr int size = Vec256<T>::size();
static constexpr int half_size = size / 2;

View File

@ -269,7 +269,7 @@ Tensor fbgemm_pack_quantized_matrix(const Tensor& weight) {
const int64_t N = weight.size(0);
const Tensor weight_contig = weight.contiguous();
const int8_t* weight_ptr = weight_contig.data_ptr<int8_t>();
auto ptr = guts::make_unique<fbgemm::PackBMatrix<int8_t>>(
auto ptr = std::make_unique<fbgemm::PackBMatrix<int8_t>>(
/*trans=*/fbgemm::matrix_op_t::Transpose,
/*nRow=*/K,
/*nCol=*/N,
@ -357,7 +357,7 @@ Tensor fbgemm_pack_gemm_matrix_fp16(const Tensor& weight) {
// this function. This is perfectly fine if the tensors are created and freed
// within this translation unit. It might be very problematic if that tensor
// flows across dll boundaries.
auto ptr = guts::make_unique<fbgemm::PackedGemmMatrixFP16>(
auto ptr = std::make_unique<fbgemm::PackedGemmMatrixFP16>(
fbgemm::matrix_op_t::Transpose, K, N, 1, weight_contig_ptr);
return cpp_custom_type_hack::create(std::move(ptr), weight.options());
}

View File

@ -45,7 +45,7 @@ using namespace vec256;
template <typename traits, std::size_t... INDEX>
typename traits::ArgsTuple
dereference_impl(char* C10_RESTRICT data[], const int64_t* strides, int64_t i,
c10::guts::index_sequence<INDEX...>) {
std::index_sequence<INDEX...>) {
return std::make_tuple(
*(typename traits::template arg<INDEX>::type*)
(data[INDEX] + i * strides[INDEX])...);
@ -54,7 +54,7 @@ dereference_impl(char* C10_RESTRICT data[], const int64_t* strides, int64_t i,
template <typename traits>
typename traits::ArgsTuple
dereference(char* C10_RESTRICT data[], const int64_t* strides, int64_t i) {
using Indices = c10::guts::make_index_sequence<traits::arity>;
using Indices = std::make_index_sequence<traits::arity>;
return dereference_impl<traits>(data, strides, i, Indices{});
}
@ -64,7 +64,7 @@ dereference_vec_impl(char* C10_RESTRICT data[],
const typename traits::result_type& opt_scalar,
size_t S,
int64_t i,
c10::guts::index_sequence<INDEX...>) {
std::index_sequence<INDEX...>) {
using Vec = typename traits::result_type;
using scalar_t = typename Vec::value_type;
return std::make_tuple(
@ -76,7 +76,7 @@ dereference_vec_impl(char* C10_RESTRICT data[],
template <typename traits>
typename traits::ArgsTuple
dereference_vec(char* C10_RESTRICT data[], const typename traits::result_type& opt_scalar, size_t S, int64_t i) {
using Indices = c10::guts::make_index_sequence<traits::arity>;
using Indices = std::make_index_sequence<traits::arity>;
return dereference_vec_impl<traits>(data, opt_scalar, S, i, Indices{});
}
@ -166,7 +166,7 @@ vectorized_loop(char** C10_RESTRICT data_, int64_t n, int64_t S, func_t&& op, ve
template <typename traits, typename cb_t>
static inline void unroll_contiguous_scalar_checks(
const int64_t* strides,
c10::guts::index_sequence<>,
std::index_sequence<>,
cb_t&& cb) {
cb(0);
}
@ -174,12 +174,12 @@ static inline void unroll_contiguous_scalar_checks(
template <typename traits, typename cb_t, size_t INDEX0, size_t ...INDEX>
static inline void unroll_contiguous_scalar_checks(
const int64_t* strides,
c10::guts::index_sequence<INDEX0, INDEX...>,
std::index_sequence<INDEX0, INDEX...>,
cb_t&& cb) {
if (is_contiguous_scalar<traits, INDEX0 + 1>(strides)) {
cb(INDEX0 + 1);
} else {
unroll_contiguous_scalar_checks<traits>(strides, c10::guts::index_sequence<INDEX...>{}, std::forward<cb_t>(cb));
unroll_contiguous_scalar_checks<traits>(strides, std::index_sequence<INDEX...>{}, std::forward<cb_t>(cb));
}
}
@ -192,7 +192,7 @@ void cpu_kernel(TensorIterator& iter, func_t&& op) {
if (is_contiguous<traits>(strides)) {
basic_loop(data, strides, 0, n, std::forward<func_t>(op));
} else {
using Indices = c10::guts::make_index_sequence<traits::arity>;
using Indices = std::make_index_sequence<traits::arity>;
unroll_contiguous_scalar_checks<traits>(strides, Indices{}, [&](size_t _idx) {
basic_loop(data, strides, 0, n, std::forward<func_t>(op));
});
@ -210,7 +210,7 @@ void cpu_kernel_vec(TensorIterator& iter, func_t&& op, vec_func_t&& vop) {
if (is_contiguous<traits>(strides)) {
return vectorized_loop(data, n, 0, std::forward<func_t>(op), std::forward<vec_func_t>(vop));
} else {
using Indices = c10::guts::make_index_sequence<traits::arity>;
using Indices = std::make_index_sequence<traits::arity>;
unroll_contiguous_scalar_checks<traits>(strides, Indices{}, [&](size_t idx) {
if (idx) {
vectorized_loop(data, n, idx, std::forward<func_t>(op), std::forward<vec_func_t>(vop));
@ -233,7 +233,7 @@ void cpu_serial_kernel(TensorIterator& iter, func_t&& op) {
if (is_contiguous<traits>(strides)) {
basic_loop(data, strides, 0, n, std::forward<func_t>(op));
} else {
using Indices = c10::guts::make_index_sequence<traits::arity>;
using Indices = std::make_index_sequence<traits::arity>;
unroll_contiguous_scalar_checks<traits>(strides, Indices{}, [&](size_t _idx) {
basic_loop(data, strides, 0, n, std::forward<func_t>(op));
});

View File

@ -147,7 +147,7 @@ static void set_results(const std::tuple<res_t...>& result, const TensorIterator
}
template <typename T, typename... Args>
struct all_same : c10::guts::conjunction<
struct all_same : guts::conjunction<
std::is_same<T, Args>...
> {};

View File

@ -106,28 +106,28 @@ static void launch_kernel(int64_t N, const func_t& f) {
template <typename traits, typename func_t, typename index_t, size_t... INDEX>
C10_HOST_DEVICE typename traits::result_type
invoke_impl(const func_t &f, char *const C10_RESTRICT data[], const index_t strides[], int i,
c10::guts::index_sequence<INDEX...>) {
std::index_sequence<INDEX...>) {
return f(*(typename traits::template arg<INDEX>::type*)(data[INDEX] + i * strides[INDEX])...);
}
template <typename func_t, typename index_t, typename traits = function_traits<func_t>>
C10_HOST_DEVICE typename traits::result_type
invoke(const func_t &f, char *const C10_RESTRICT data[], const index_t strides[], int i) {
using Indices = c10::guts::make_index_sequence<traits::arity>;
using Indices = std::make_index_sequence<traits::arity>;
return invoke_impl<traits>(f, data, strides, i, Indices{});
}
template <typename traits, typename func_t, typename index_t, size_t... I>
C10_HOST_DEVICE typename traits::result_type
invoke_impl(const func_t &f, char *const C10_RESTRICT data[], const index_t strides[], const ScalarType dtypes[], int i,
c10::guts::index_sequence<I...>) {
std::index_sequence<I...>) {
return f(c10::fetch_and_cast<typename traits::template arg<I>::type>(dtypes[I], data[I] + i * strides[I])...);
}
template <typename func_t, typename index_t, typename traits = function_traits<func_t>>
C10_HOST_DEVICE typename traits::result_type
invoke(const func_t &f, char *const C10_RESTRICT data[], const index_t strides[], const ScalarType dtypes[], int i) {
using Indices = c10::guts::make_index_sequence<traits::arity>;
using Indices = std::make_index_sequence<traits::arity>;
return invoke_impl<traits>(f, data, strides, dtypes, i, Indices{});
}
@ -246,7 +246,7 @@ void gpu_kernel_with_scalars(TensorIterator& iter, const func_t& f) {
template <typename traits, typename func_t, typename index_t, size_t... INDEX>
C10_HOST_DEVICE typename traits::result_type
invoke_with_index_impl(const func_t &f, char* const C10_RESTRICT data[], const index_t strides[], int i, int idx,
c10::guts::index_sequence<INDEX...>) {
std::index_sequence<INDEX...>) {
return f(*(typename traits::template arg<INDEX>::type*)(data[INDEX] + i * strides[INDEX])..., idx);
}
@ -254,14 +254,14 @@ template <typename func_t, typename index_t, typename traits = function_traits<f
C10_HOST_DEVICE typename traits::result_type
invoke_with_index(const func_t &f, char* const C10_RESTRICT data[], const index_t strides[], int i, int idx) {
// index at last position
using Indices = c10::guts::make_index_sequence<traits::arity-1>;
using Indices = std::make_index_sequence<traits::arity-1>;
return invoke_with_index_impl<traits>(f, data, strides, i, idx, Indices{});
}
template <typename traits, typename func_t, typename index_t, size_t... I>
C10_HOST_DEVICE typename traits::result_type
invoke_with_index_impl(const func_t &f, char* const C10_RESTRICT data[], const index_t strides[], const ScalarType dtypes[],
int i, int idx, c10::guts::index_sequence<I...>) {
int i, int idx, std::index_sequence<I...>) {
return f(c10::fetch_and_cast<typename traits::template arg<I>::type>(dtypes[I], data[I] + i * strides[I])..., idx);
}
@ -270,7 +270,7 @@ C10_HOST_DEVICE typename traits::result_type
invoke_with_index(const func_t &f, char* const C10_RESTRICT data[], const index_t strides[], const ScalarType dtypes[],
int i, int idx) {
// index at last position
using Indices = c10::guts::make_index_sequence<traits::arity-1>;
using Indices = std::make_index_sequence<traits::arity-1>;
return invoke_with_index_impl<traits>(f, data, strides, dtypes, i, idx, Indices{});
}

View File

@ -588,7 +588,7 @@ class QConvInt8 final : public c10::OperatorKernel {
// Update the input scale to not pack again.
pack_data.input_scale = input_scale;
pack_data.w.reset();
pack_data.w = guts::make_unique<qnnpack::PrePackConvWeights>(
pack_data.w = std::make_unique<qnnpack::PrePackConvWeights>(
conv_p,
reinterpret_cast<uint8_t*>(qnnp_w_data),
reinterpret_cast<int32_t*>(bias.data_ptr<c10::qint32>()));

View File

@ -187,9 +187,9 @@ class QConvPackWeightInt8 final : public c10::OperatorKernel {
bias_contig = bias->contiguous();
}
auto ret_ptr = guts::make_unique<PackedConvWeight<kSpatialDim>>(
auto ret_ptr = std::make_unique<PackedConvWeight<kSpatialDim>>(
PackedConvWeight<kSpatialDim>{
guts::make_unique<fbgemm::PackWeightsForConv<kSpatialDim>>(
std::make_unique<fbgemm::PackWeightsForConv<kSpatialDim>>(
conv_p, weight_data_int8),
bias_contig,
col_offsets,
@ -298,7 +298,7 @@ class QConvPackWeightInt8 final : public c10::OperatorKernel {
// during the first invocation of operator run. Refer to qconv.cpp for more
// details. TODO Update to actually call pre-pack here once bias is removed
// from pre-packing step.
auto wt_ptr = guts::make_unique<PackedConvWeightsQnnp>(
auto wt_ptr = std::make_unique<PackedConvWeightsQnnp>(
PackedConvWeightsQnnp{nullptr, /* PrePackConvWeights */
weight_contig, /* int8_t weight */
bias_fp32.contiguous(), /* fp32 bias */

View File

@ -259,7 +259,7 @@ class QLinearInt8 final : public torch::OperatorKernel {
// Update the input scale to not pack again.
pack_ptr.input_scale = input_scale;
pack_ptr.w.reset();
pack_ptr.w = guts::make_unique<qnnpack::PackBMatrix>(
pack_ptr.w = std::make_unique<qnnpack::PackBMatrix>(
cols_w /* input_channels */,
rows_w /* output_channels */,
kernel_zp,

View File

@ -104,8 +104,8 @@ class QLinearPackWeightInt8 final : public c10::OperatorKernel {
"bias should have N elements: " + std::to_string(N));
bias_contig = bias->contiguous();
}
auto ret_ptr = guts::make_unique<PackedLinearWeight>(PackedLinearWeight{
guts::make_unique<fbgemm::PackBMatrix<int8_t>>(
auto ret_ptr = std::make_unique<PackedLinearWeight>(PackedLinearWeight{
std::make_unique<fbgemm::PackBMatrix<int8_t>>(
/*trans=*/fbgemm::matrix_op_t::Transpose,
/*nRow=*/K,
/*nCol=*/N,
@ -173,7 +173,7 @@ class QLinearPackWeightInt8 final : public c10::OperatorKernel {
// during the first invocation of operator run. Refer to qlinear.cpp for more
// details. TODO Update to actually call pre-pack here once bias is removed
// from pre-packing step.
auto wt_ptr = guts::make_unique<PackedLinearWeightsQnnp>(
auto wt_ptr = std::make_unique<PackedLinearWeightsQnnp>(
PackedLinearWeightsQnnp{nullptr,
weight_contig, /* int8_t weight */
bias_fp32.contiguous(), /* fp32 bias */

View File

@ -470,9 +470,9 @@ class CAFFE2_API Tensor {
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename T>
using hook_return_void_t = c10::guts::enable_if_t<std::is_void<typename std::result_of<T&(Tensor)>::type>::value, unsigned>;
using hook_return_void_t = std::enable_if_t<std::is_void<typename std::result_of<T&(Tensor)>::type>::value, unsigned>;
template <typename T>
using hook_return_var_t = c10::guts::enable_if_t<std::is_same<typename std::result_of<T&(Tensor)>::type, Tensor>::value, unsigned>;
using hook_return_var_t = std::enable_if_t<std::is_same<typename std::result_of<T&(Tensor)>::type, Tensor>::value, unsigned>;
// Returns the index of the hook in the list which can be used to remove hook
// Register a hook with no return value

View File

@ -12,7 +12,7 @@ using at::NamedTensorMeta;
using at::Symbol;
using at::namedinference::TensorName;
using at::namedinference::TensorNames;
using c10::guts::make_unique;
using std::make_unique;
TEST(NamedTensorTest, defaultMetadata) {
int num_names = 4;

View File

@ -303,7 +303,7 @@ void THTensor_(getRNGState)(at::Generator *_generator, THTensor *self)
THGeneratorStateNew* rng_state = (THGeneratorStateNew*)self->data<scalar_t>();
// accumulate generator data to be copied into byte tensor
auto accum_state = c10::guts::make_unique<THGeneratorStateNew>();
auto accum_state = std::make_unique<THGeneratorStateNew>();
auto cast_generator = at::check_generator<at::CPUGenerator>(_generator);
auto rng_data = cast_generator->engine().data();
accum_state->legacy_pod.the_initial_seed = rng_data.seed_;

View File

@ -51,10 +51,10 @@ defined(TARGET_IPHONE_SIMULATOR)
void observerConfig() {
caffe2::ClearGlobalNetObservers();
caffe2::AddGlobalNetObserverCreator([](caffe2::NetBase* subject) {
return caffe2::make_unique<caffe2::PerfNetObserver>(subject);
return std::make_unique<caffe2::PerfNetObserver>(subject);
});
caffe2::ObserverConfig::setReporter(
caffe2::make_unique<caffe2::NetObserverReporterPrint>());
std::make_unique<caffe2::NetObserverReporterPrint>());
}
bool backendCudaSet(const string& backend) {

View File

@ -438,10 +438,10 @@ TensorProtos convertValues(std::string& file_name) {
void observerConfig() {
caffe2::ClearGlobalNetObservers();
caffe2::AddGlobalNetObserverCreator([](caffe2::NetBase* subject) {
return caffe2::make_unique<caffe2::PerfNetObserver>(subject);
return std::make_unique<caffe2::PerfNetObserver>(subject);
});
caffe2::ObserverConfig::setReporter(
caffe2::make_unique<caffe2::NetObserverReporterPrint>());
std::make_unique<caffe2::NetObserverReporterPrint>());
}
bool backendCudaSet(const string& backend) {

View File

@ -40,7 +40,7 @@ void run() {
// Can be large due to constant fills
VLOG(1) << "Init net: " << ProtoDebugString(init_net);
LOG(INFO) << "Predict net: " << ProtoDebugString(predict_net);
auto predictor = caffe2::make_unique<Predictor>(init_net, predict_net);
auto predictor = std::make_unique<Predictor>(init_net, predict_net);
LOG(INFO) << "Checking that a null forward-pass works";
Predictor::TensorList inputVec, outputVec;
(*predictor)(inputVec, &outputVec);

View File

@ -116,7 +116,7 @@ struct C10_API TensorOptions {
/// Constructs a `TensorOptions` object with the given device.
/// See NOTE [ TensorOptions Constructors ] on why this is templatized.
template<typename T,
typename = c10::guts::enable_if_t<std::is_same<c10::guts::decay_t<T>, Device>::value>>
typename = std::enable_if_t<std::is_same<std::decay_t<T>, Device>::value>>
/* implicit */ TensorOptions(T&& device) : TensorOptions() {
this->set_device(std::forward<T>(device));
}
@ -130,7 +130,7 @@ struct C10_API TensorOptions {
/// way to detect them. So we have this one that allows explicit
/// constructors too.
template <typename... Args,
typename = c10::guts::enable_if_t<std::is_constructible<Device, Args&&...>::value>>
typename = std::enable_if_t<std::is_constructible<Device, Args&&...>::value>>
/* implicit */ TensorOptions(Args&&... args)
: TensorOptions(Device(std::forward<Args>(args)...)) {}

View File

@ -24,7 +24,7 @@ private:
int val;
};
template<class T> using is_my_movable_only_class = std::is_same<MovableOnly, remove_cv_t<remove_reference_t<T>>>;
template<class T> using is_my_movable_only_class = std::is_same<MovableOnly, std::remove_cv_t<std::remove_reference_t<T>>>;
struct CopyCounting {
int move_count;
@ -45,7 +45,7 @@ struct CopyCounting {
}
};
template<class T> using is_my_copy_counting_class = std::is_same<CopyCounting, remove_cv_t<remove_reference_t<T>>>;
template<class T> using is_my_copy_counting_class = std::is_same<CopyCounting, std::remove_cv_t<std::remove_reference_t<T>>>;
namespace test_extract_arg_by_filtered_index {
class MyClass {};

View File

@ -61,9 +61,9 @@ namespace test_true_for_each_type {
namespace test_map {
class MyClass {};
static_assert(std::is_same<typelist<>, map_t<c10::guts::add_lvalue_reference_t, typelist<>>>::value, "");
static_assert(std::is_same<typelist<int&>, map_t<c10::guts::add_lvalue_reference_t, typelist<int>>>::value, "");
static_assert(std::is_same<typelist<int&, double&, const MyClass&>, map_t<c10::guts::add_lvalue_reference_t, typelist<int, double, const MyClass>>>::value, "");
static_assert(std::is_same<typelist<>, map_t<std::add_lvalue_reference_t, typelist<>>>::value, "");
static_assert(std::is_same<typelist<int&>, map_t<std::add_lvalue_reference_t, typelist<int>>>::value, "");
static_assert(std::is_same<typelist<int&, double&, const MyClass&>, map_t<std::add_lvalue_reference_t, typelist<int, double, const MyClass>>>::value, "");
}
namespace test_head {

View File

@ -203,7 +203,7 @@ public:
#if defined(__cpp_deduction_guides) && __cpp_deduction_guides >= 201606
template<typename _Tp, typename... _Up>
array(_Tp, _Up...) ->
array<enable_if_t<(std::is_same<_Tp, _Up>::value && ...), _Tp>, 1 + sizeof...(_Up)>;
array<std::enable_if_t<(std::is_same<_Tp, _Up>::value && ...), _Tp>, 1 + sizeof...(_Up)>;
#endif
// Array comparisons.
@ -260,7 +260,7 @@ template<std::size_t _Int, typename _Tp, std::size_t _Nm>
constexpr _Tp&& get(array<_Tp, _Nm>&& __arr) noexcept
{
static_assert(_Int < _Nm, "array index is within bounds");
return guts::move(get<_Int>(__arr));
return std::move(get<_Int>(__arr));
}
template<std::size_t _Int, typename _Tp, std::size_t _Nm>
@ -279,7 +279,7 @@ constexpr const _Tp& get(const array<_Tp, _Nm>& __arr) noexcept
*/
namespace detail {
template<class T, size_t N, size_t... INDEX>
constexpr inline array<T, N-1> tail_(const array<T, N>& arg, guts::index_sequence<INDEX...>) {
constexpr inline array<T, N-1> tail_(const array<T, N>& arg, std::index_sequence<INDEX...>) {
static_assert(sizeof...(INDEX) == N-1, "invariant");
return {{get<INDEX+1>(arg)...}};
}
@ -287,18 +287,18 @@ constexpr inline array<T, N-1> tail_(const array<T, N>& arg, guts::index_sequenc
template<class T, size_t N>
constexpr inline array<T, N-1> tail(const array<T, N>& arg) {
static_assert(N > 0, "Can only call tail() on an array with at least one element");
return detail::tail_(arg, guts::make_index_sequence<N-1>());
return detail::tail_(arg, std::make_index_sequence<N-1>());
}
namespace detail {
template<class T, size_t N, size_t... INDEX>
constexpr inline array<T, N+1> prepend_(T&& head, const array<T, N>& tail, guts::index_sequence<INDEX...>) {
return {{guts::forward<T>(head), get<INDEX>(tail)...}};
constexpr inline array<T, N+1> prepend_(T&& head, const array<T, N>& tail, std::index_sequence<INDEX...>) {
return {{std::forward<T>(head), get<INDEX>(tail)...}};
}
}
template<class T, size_t N>
constexpr inline array<T, N+1> prepend(T&& head, const array<T, N>& tail) {
return detail::prepend_(guts::forward<T>(head), tail, guts::make_index_sequence<N>());
return detail::prepend_(std::forward<T>(head), tail, std::make_index_sequence<N>());
}
/**
@ -310,14 +310,14 @@ constexpr inline array<T, N+1> prepend(T&& head, const array<T, N>& tail) {
namespace detail {
template<class T, size_t N, size_t... INDEX>
constexpr array<T, N> to_array_(const T (&arr)[N], guts::index_sequence<INDEX...>) {
constexpr array<T, N> to_array_(const T (&arr)[N], std::index_sequence<INDEX...>) {
return {{arr[INDEX]...}};
}
}
template<class T, size_t N>
constexpr array<T, N> to_array(const T (&arr)[N]) {
return detail::to_array_(arr, guts::make_index_sequence<N>());
return detail::to_array_(arr, std::make_index_sequence<N>());
}
}}

View File

@ -21,129 +21,20 @@
#endif
/*
* This header adds some polyfills with C++14 and C++17 functionality
* This header adds some polyfills with C++17 functionality
*/
namespace c10 { namespace guts {
#ifdef __cpp_lib_transformation_trait_aliases
template<bool B, class T, class F> using conditional_t = std::conditional_t<B, T, F>;
template<bool B, class T = void> using enable_if_t = std::enable_if_t<B, T>;
template<class T> using add_lvalue_reference_t = std::add_lvalue_reference_t<T>;
template<class T> using remove_reference_t = std::remove_reference_t<T>;
template<class T> using remove_cv_t = std::remove_cv_t<T>;
template<class T> using result_of_t = std::result_of_t<T>;
template<class T> using decay_t = std::decay_t<T>;
template<class T> using remove_const_t = std::remove_const_t<T>;
template<class T> using remove_pointer_t = std::remove_pointer_t<T>;
template<class... T> using common_type_t = std::common_type_t<T...>;
#else
template<bool B, class T, class F> using conditional_t = typename std::conditional<B, T, F>::type;
template<bool B, class T = void> using enable_if_t = typename std::enable_if<B, T>::type;
template<class T> using add_lvalue_reference_t = typename std::add_lvalue_reference<T>::type;
template<class T> using remove_reference_t = typename std::remove_reference<T>::type;
template<class T> using remove_cv_t = typename std::remove_cv<T>::type;
template<class T> using result_of_t = typename std::result_of<T>::type;
template<class T> using decay_t = typename std::decay<T>::type;
template<class T> using remove_const_t = typename std::remove_const<T>::type;
template<class T> using remove_pointer_t = typename std::remove_pointer<T>::type;
template<class... T> using common_type_t = typename std::common_type<T...>::type;
#endif
// C++11 doesn't have constexpr std::move / std::forward.
// Implementation taken from libc++.
template<class T>
constexpr inline guts::remove_reference_t<T>&& move(T&& t) noexcept {
return static_cast<guts::remove_reference_t<T>&&>(t);
}
template <class T>
constexpr inline T&& forward(guts::remove_reference_t<T>& t) noexcept {
return static_cast<T&&>(t);
}
template <class T>
constexpr inline T&& forward(guts::remove_reference_t<T>&& t) noexcept {
static_assert(!std::is_lvalue_reference<T>::value,
"can not forward an rvalue as an lvalue.");
return static_cast<T&&>(t);
}
#if __cplusplus >= 201402L || defined(__cpp_lib_make_unique) && __cpp_lib_make_unique >= 201304L || \
(defined(__ANDROID__) && __ANDROID__ && __cplusplus >= 201300L) || defined(_MSC_VER) && _MSC_VER >= 1900
/* using override */ using std::make_unique;
#else
// Implementation taken from folly
template <typename T, typename... Args>
typename std::enable_if<!std::is_array<T>::value, std::unique_ptr<T>>::type
make_unique(Args&&... args) {
return std::unique_ptr<T>(new T(c10::guts::forward<Args>(args)...));
}
// Allows 'make_unique<T[]>(10)'. (N3690 s20.9.1.4 p3-4)
template <typename T>
typename std::enable_if<std::is_array<T>::value, std::unique_ptr<T>>::type
make_unique(const size_t n) {
return std::unique_ptr<T>(new typename std::remove_extent<T>::type[n]());
}
// Disallows 'make_unique<T[10]>()'. (N3690 s20.9.1.4 p5)
template <typename T, typename... Args>
typename std::enable_if<std::extent<T>::value != 0, std::unique_ptr<T>>::type
make_unique(Args&&...) = delete;
#endif
template <typename Base, typename Child, typename... Args>
typename std::enable_if<!std::is_array<Base>::value && !std::is_array<Base>::value && std::is_base_of<Base, Child>::value, std::unique_ptr<Base>>::type
make_unique_base(Args&&... args) {
return std::unique_ptr<Base>(new Child(c10::guts::forward<Args>(args)...));
return std::unique_ptr<Base>(new Child(std::forward<Args>(args)...));
}
#ifdef __cpp_lib_integer_sequence
template<class T, T... Ints> using integer_sequence = std::integer_sequence<T, Ints...>;
template<std::size_t... Ints> using index_sequence = std::index_sequence<Ints...>;
template<class T, T N> using make_integer_sequence = std::make_integer_sequence<T, N>;
template<std::size_t N> using make_index_sequence = std::make_index_sequence<N>;
template<class... T> using index_sequence_for = std::index_sequence_for<T...>;
#else
template<class T, T... Ints> struct integer_sequence {
using value_type = T;
static constexpr std::size_t size() noexcept {return sizeof...(Ints);}
};
template<std::size_t... Ints> using index_sequence = integer_sequence<std::size_t, Ints...>;
namespace detail {
template<class T, std::size_t INDEX, std::size_t N, T... Ints>
struct make_integer_sequence_ {
using type = typename make_integer_sequence_<T, INDEX+1, N, Ints..., INDEX>::type;
};
template<class T, std::size_t N, T... Ints>
struct make_integer_sequence_<T, N, N, Ints...> {
using type = integer_sequence<T, Ints...>;
};
}
template<class T, T N> using make_integer_sequence = typename detail::make_integer_sequence_<T, 0, N>::type;
template<std::size_t N> using make_index_sequence = make_integer_sequence<std::size_t, N>;
static_assert(std::is_same<index_sequence<>, make_index_sequence<0>>::value, "");
static_assert(std::is_same<index_sequence<0, 1, 2>, make_index_sequence<3>>::value, "");
template<class... T> using index_sequence_for = make_index_sequence<sizeof...(T)>;
#endif
#ifdef __cpp_lib_logical_traits
@ -163,14 +54,14 @@ template<class...> struct conjunction : std::true_type { };
template<class B1> struct conjunction<B1> : B1 { };
template<class B1, class... Bn>
struct conjunction<B1, Bn...>
: conditional_t<bool(B1::value), conjunction<Bn...>, B1> {};
: std::conditional_t<bool(B1::value), conjunction<Bn...>, B1> {};
// Implementation taken from http://en.cppreference.com/w/cpp/types/disjunction
template<class...> struct disjunction : std::false_type { };
template<class B1> struct disjunction<B1> : B1 { };
template<class B1, class... Bn>
struct disjunction<B1, Bn...>
: conditional_t<bool(B1::value), B1, disjunction<Bn...>> { };
: std::conditional_t<bool(B1::value), B1, disjunction<Bn...>> { };
// Implementation taken from http://en.cppreference.com/w/cpp/types/integral_constant
template <bool B>
@ -184,6 +75,7 @@ struct negation : bool_constant<!bool(B::value)> { };
#ifdef __cpp_lib_void_t
template<class T> using void_t = std::void_t<T>;
@ -215,14 +107,14 @@ template <class F, class Tuple, std::size_t... INDEX>
#if defined(_MSC_VER)
// MSVC has a problem with the decltype() return type, but it also doesn't need it
// Also, nvcc on Windows needs C10_HOST_DEVICE here.
C10_HOST_DEVICE constexpr auto apply_impl(F&& f, Tuple&& t, guts::index_sequence<INDEX...>)
C10_HOST_DEVICE constexpr auto apply_impl(F&& f, Tuple&& t, std::index_sequence<INDEX...>)
#else
// GCC/Clang need the decltype() return type and rocm doesn't like the C10_HOST_DEVICE
constexpr auto apply_impl(F&& f, Tuple&& t, guts::index_sequence<INDEX...>)
-> decltype(c10::guts::forward<F>(f)(std::get<INDEX>(c10::guts::forward<Tuple>(t))...))
constexpr auto apply_impl(F&& f, Tuple&& t, std::index_sequence<INDEX...>)
-> decltype(std::forward<F>(f)(std::get<INDEX>(std::forward<Tuple>(t))...))
#endif
{
return c10::guts::forward<F>(f)(std::get<INDEX>(c10::guts::forward<Tuple>(t))...);
return std::forward<F>(f)(std::get<INDEX>(std::forward<Tuple>(t))...);
}
} // namespace detail
@ -231,12 +123,12 @@ template <class F, class Tuple>
C10_HOST_DEVICE // rocm doesn't like the C10_HOST_DEVICE
#endif
constexpr auto apply(F&& f, Tuple&& t) -> decltype(detail::apply_impl(
c10::guts::forward<F>(f), c10::guts::forward<Tuple>(t),
guts::make_index_sequence<std::tuple_size<guts::remove_reference_t<Tuple>>::value>{}))
std::forward<F>(f), std::forward<Tuple>(t),
std::make_index_sequence<std::tuple_size<std::remove_reference_t<Tuple>>::value>{}))
{
return detail::apply_impl(
c10::guts::forward<F>(f), c10::guts::forward<Tuple>(t),
guts::make_index_sequence<std::tuple_size<guts::remove_reference_t<Tuple>>::value>{});
std::forward<F>(f), std::forward<Tuple>(t),
std::make_index_sequence<std::tuple_size<std::remove_reference_t<Tuple>>::value>{});
}
#endif
@ -306,6 +198,7 @@ template <class T>
constexpr const T& max(const T& a, const T& b) {
return (a < b) ? b : a;
}
}}
#endif // C10_UTIL_CPP17_H_

View File

@ -411,7 +411,7 @@ struct Converter<
To,
std::complex<FromV>,
typename std::enable_if<
c10::guts::negation<is_complex_t<To>>::value>::type> {
guts::negation<is_complex_t<To>>::value>::type> {
To operator()(std::complex<FromV> f) {
return static_cast<To>(f.real());
}

View File

@ -64,14 +64,14 @@ using infer_function_traits_t = typename infer_function_traits<T>::type;
namespace detail {
template<template <class> class Condition, size_t index, class Enable, class... Args> struct extract_arg_by_filtered_index_;
template<template <class> class Condition, size_t index, class Head, class... Tail>
struct extract_arg_by_filtered_index_<Condition, index, guts::enable_if_t<!Condition<Head>::value>, Head, Tail...> {
struct extract_arg_by_filtered_index_<Condition, index, std::enable_if_t<!Condition<Head>::value>, Head, Tail...> {
static auto call(Head&& /*head*/, Tail&&... tail)
-> decltype(extract_arg_by_filtered_index_<Condition, index, void, Tail...>::call(std::forward<Tail>(tail)...)) {
return extract_arg_by_filtered_index_<Condition, index, void, Tail...>::call(std::forward<Tail>(tail)...);
}
};
template<template <class> class Condition, size_t index, class Head, class... Tail>
struct extract_arg_by_filtered_index_<Condition, index, guts::enable_if_t<Condition<Head>::value && index != 0>, Head, Tail...> {
struct extract_arg_by_filtered_index_<Condition, index, std::enable_if_t<Condition<Head>::value && index != 0>, Head, Tail...> {
static auto call(Head&& /*head*/, Tail&&... tail)
-> decltype(extract_arg_by_filtered_index_<Condition, index-1, void, Tail...>::call(std::forward<Tail>(tail)...)) {
return extract_arg_by_filtered_index_<Condition, index-1, void, Tail...>::call(std::forward<Tail>(tail)...);
@ -84,7 +84,7 @@ struct extract_arg_by_filtered_index_<Condition, index, void> {
}
};
template<template <class> class Condition, size_t index, class Head, class... Tail>
struct extract_arg_by_filtered_index_<Condition, index, guts::enable_if_t<Condition<Head>::value && index == 0>, Head, Tail...> {
struct extract_arg_by_filtered_index_<Condition, index, std::enable_if_t<Condition<Head>::value && index == 0>, Head, Tail...> {
static auto call(Head&& head, Tail&&... /*tail*/)
-> decltype(std::forward<Head>(head)) {
return std::forward<Head>(head);
@ -123,24 +123,24 @@ namespace detail {
template<class ResultType, size_t num_results> struct filter_map_ {
template<template <class> class Condition, class Mapper, class... Args, size_t... INDEX>
static guts::array<ResultType, num_results> call(const Mapper& mapper, guts::index_sequence<INDEX...>, Args&&... args) {
static guts::array<ResultType, num_results> call(const Mapper& mapper, std::index_sequence<INDEX...>, Args&&... args) {
return guts::array<ResultType, num_results> { mapper(extract_arg_by_filtered_index<Condition, INDEX>(std::forward<Args>(args)...))... };
}
};
template<class ResultType> struct filter_map_<ResultType, 0> {
template<template <class> class Condition, class Mapper, class... Args, size_t... INDEX>
static guts::array<ResultType, 0> call(const Mapper& /*mapper*/, guts::index_sequence<INDEX...>, Args&&... /*args*/) {
static guts::array<ResultType, 0> call(const Mapper& /*mapper*/, std::index_sequence<INDEX...>, Args&&... /*args*/) {
return guts::array<ResultType, 0> { };
}
};
}
template<class ResultType, template <class> class Condition, class Mapper, class... Args> auto filter_map(const Mapper& mapper, Args&&... args)
-> decltype(detail::filter_map_<ResultType, typelist::count_if<Condition, typelist::typelist<Args...>>::value>::template call<Condition, Mapper, Args...>(mapper, guts::make_index_sequence<typelist::count_if<Condition, typelist::typelist<Args...>>::value>(), std::forward<Args>(args)...)) {
-> decltype(detail::filter_map_<ResultType, typelist::count_if<Condition, typelist::typelist<Args...>>::value>::template call<Condition, Mapper, Args...>(mapper, std::make_index_sequence<typelist::count_if<Condition, typelist::typelist<Args...>>::value>(), std::forward<Args>(args)...)) {
static_assert(is_type_condition<Condition>::value, "In filter_map<Result, Condition>, the Condition argument must be a condition type trait, i.e. have a static constexpr bool ::value member.");
static constexpr size_t num_results = typelist::count_if<Condition, typelist::typelist<Args...>>::value;
return detail::filter_map_<ResultType, num_results>::template call<Condition, Mapper, Args...>(mapper, guts::make_index_sequence<num_results>(), std::forward<Args>(args)...);
return detail::filter_map_<ResultType, num_results>::template call<Condition, Mapper, Args...>(mapper, std::make_index_sequence<num_results>(), std::forward<Args>(args)...);
}
}}

View File

@ -108,7 +108,7 @@ inline constexpr type_index get_type_index() noexcept {
// type index through std::integral_constant.
return type_index{std::integral_constant<
uint64_t,
detail::type_index_impl<guts::remove_cv_t<guts::decay_t<T>>>()>::value};
detail::type_index_impl<std::remove_cv_t<std::decay_t<T>>>()>::value};
#else
// There's nothing in theory preventing us from running this on device code
// except for nvcc throwing a compiler error if we enable it.

View File

@ -102,7 +102,7 @@ template<template <class> class Condition, class TypeList> struct filter final {
template<template <class> class Condition, class Head, class... Tail>
struct filter<Condition, typelist<Head, Tail...>> final {
static_assert(is_type_condition<Condition>::value, "In typelist::filter<Condition, TypeList>, the Condition argument must be a condition type trait, i.e. have a static constexpr bool ::value member.");
using type = guts::conditional_t<
using type = std::conditional_t<
Condition<Head>::value,
concat_t<typelist<Head>, typename filter<Condition, typelist<Tail...>>::type>,
typename filter<Condition, typelist<Tail...>>::type
@ -142,9 +142,9 @@ namespace detail {
template<class TypeList, class Type, class Enable = void> struct contains {};
template<class Type> struct contains<typelist<>, Type, void> : std::false_type {};
template<class Type, class Head, class... Tail>
struct contains<typelist<Head, Tail...>, Type, guts::enable_if_t<std::is_same<Head, Type>::value>> : std::true_type {};
struct contains<typelist<Head, Tail...>, Type, std::enable_if_t<std::is_same<Head, Type>::value>> : std::true_type {};
template<class Type, class Head, class... Tail>
struct contains<typelist<Head, Tail...>, Type, guts::enable_if_t<!std::is_same<Head, Type>::value>> : contains<typelist<Tail...>, Type> {};
struct contains<typelist<Head, Tail...>, Type, std::enable_if_t<!std::is_same<Head, Type>::value>> : contains<typelist<Tail...>, Type> {};
}
template<class TypeList, class Type>
using contains = typename detail::contains<TypeList, Type>::type;
@ -280,11 +280,11 @@ template<template<class> class Condition> struct find_if<typelist<>, Condition,
static_assert(false_higher_t<Condition>::value, "In typelist::find_if<Type/List, Condition>, didn't find any type fulfilling the Condition.");
};
template<class Head, class... Tail, template<class> class Condition>
struct find_if<typelist<Head, Tail...>, Condition, enable_if_t<Condition<Head>::value>> final {
struct find_if<typelist<Head, Tail...>, Condition, std::enable_if_t<Condition<Head>::value>> final {
static constexpr size_t value = 0;
};
template<class Head, class... Tail, template<class> class Condition>
struct find_if<typelist<Head, Tail...>, Condition, enable_if_t<!Condition<Head>::value>> final {
struct find_if<typelist<Head, Tail...>, Condition, std::enable_if_t<!Condition<Head>::value>> final {
static constexpr size_t value = 1 + find_if<typelist<Tail...>, Condition>::value;
};
@ -328,8 +328,8 @@ template<class TypeList> struct map_types_to_values final {
};
template<class... Types> struct map_types_to_values<typelist<Types...>> final {
template<class Func>
static std::tuple<guts::result_of_t<Func(type_<Types>)>...> call(Func&& func) {
return std::tuple<guts::result_of_t<Func(type_<Types>)>...> { std::forward<Func>(func)(type_<Types>())... };
static std::tuple<std::result_of_t<Func(type_<Types>)>...> call(Func&& func) {
return std::tuple<std::result_of_t<Func(type_<Types>)>...> { std::forward<Func>(func)(type_<Types>())... };
}
};
}

View File

@ -76,7 +76,7 @@ using strip_class_t = typename strip_class<T>::type;
template<class Functor, class Enable = void>
struct is_functor : std::false_type {};
template<class Functor>
struct is_functor<Functor, guts::enable_if_t<is_function_type<detail::strip_class_t<decltype(&Functor::operator())>>::value>> : std::true_type {};
struct is_functor<Functor, std::enable_if_t<is_function_type<detail::strip_class_t<decltype(&Functor::operator())>>::value>> : std::true_type {};
/**
@ -101,11 +101,11 @@ struct is_stateless_lambda__<LambdaType, Result (C::*)(Args...)> : std::is_conve
// case where LambdaType is not even a functor
template<class LambdaType, class Enable = void> struct is_stateless_lambda_ final : std::false_type {};
// case where LambdaType is a functor
template<class LambdaType> struct is_stateless_lambda_<LambdaType, guts::enable_if_t<is_functor<LambdaType>::value>>
template<class LambdaType> struct is_stateless_lambda_<LambdaType, std::enable_if_t<is_functor<LambdaType>::value>>
: is_stateless_lambda__<LambdaType, decltype(&LambdaType::operator())> {};
}
template<class T>
using is_stateless_lambda = detail::is_stateless_lambda_<guts::decay_t<T>>;
using is_stateless_lambda = detail::is_stateless_lambda_<std::decay_t<T>>;
@ -117,7 +117,7 @@ using is_stateless_lambda = detail::is_stateless_lambda_<guts::decay_t<T>>;
template<template<class> class C, class Enable = void>
struct is_type_condition : std::false_type {};
template<template<class> class C>
struct is_type_condition<C, guts::enable_if_t<std::is_same<bool, guts::remove_cv_t<decltype(C<int>::value)>>::value>> : std::true_type {};
struct is_type_condition<C, std::enable_if_t<std::is_same<bool, std::remove_cv_t<decltype(C<int>::value)>>::value>> : std::true_type {};
}

View File

@ -19,7 +19,7 @@ class either final {
template <
class Head,
class... Tail,
c10::guts::enable_if_t<
std::enable_if_t<
std::is_constructible<Left, Head, Tail...>::value &&
!std::is_constructible<Right, Head, Tail...>::value>* = nullptr>
either(Head&& construct_left_head_arg, Tail&&... construct_left_tail_args)
@ -32,7 +32,7 @@ class either final {
template <
class Head,
class... Tail,
c10::guts::enable_if_t<
std::enable_if_t<
!std::is_constructible<Left, Head, Tail...>::value &&
std::is_constructible<Right, Head, Tail...>::value>* = nullptr>
either(Head&& construct_right_head_arg, Tail&&... construct_right_tail_args)

View File

@ -195,7 +195,7 @@ class intrusive_ptr final {
if (target_ != NullType::singleton() && --target_->refcount_ == 0) {
// justification for const_cast: release_resources is basically a destructor
// and a destructor always mutates the object, even for const objects.
const_cast<c10::guts::remove_const_t<TTarget>*>(target_)->release_resources();
const_cast<std::remove_const_t<TTarget>*>(target_)->release_resources();
// See comment above about weakcount. As long as refcount>0,
// weakcount is one larger than the actual number of weak references.

View File

@ -838,7 +838,7 @@ private:
EntryPointer initSentinel() {
// needs to be a pointer so that hash map can be used with forward declared types
sentinel_val = c10::guts::make_unique<sherwood_v3_entry<T>>();
sentinel_val = std::make_unique<sherwood_v3_entry<T>>();
sentinel = sentinel_val.get();
reset_list();
return sentinel;

View File

@ -7,6 +7,7 @@
#include <limits>
#include <stdexcept>
#include <string>
#include <algorithm>
namespace c10 {
@ -179,7 +180,7 @@ class basic_string_view final {
c10::guts::to_string(pos) +
", size: " + c10::guts::to_string(size()));
}
size_type copy_length = c10::guts::min(count, size_ - pos);
size_type copy_length = guts::min(count, size_ - pos);
for (auto iter = begin() + pos, end = iter + copy_length; iter != end;) {
*(dest++) = *(iter++);
}
@ -204,7 +205,7 @@ class basic_string_view final {
constexpr int compare(basic_string_view rhs) const noexcept {
#if __cpp_constexpr >= 201304
// if we are in C++14, write it iteratively. This is faster.
for (size_t i = 0, end = c10::guts::min(size(), rhs.size()); i < end; ++i) {
for (size_t i = 0, end = guts::min(size(), rhs.size()); i < end; ++i) {
if (at_(i) < rhs.at_(i)) {
return -1;
} else if (at_(i) > rhs.at_(i)) {
@ -377,7 +378,7 @@ class basic_string_view final {
}
if (v.size() <= size()) {
pos = c10::guts::min(size() - v.size(), pos);
pos = guts::min(size() - v.size(), pos);
do {
if (v.at_(0) == at_(pos) &&
v.substr_(1).equals_(substr_(pos + 1, v.size() - 1))) {
@ -523,7 +524,7 @@ class basic_string_view final {
constexpr basic_string_view substr_(size_type pos = 0, size_type count = npos)
const {
return basic_string_view{begin_ + pos, c10::guts::min(count, size() - pos)};
return basic_string_view{begin_ + pos, guts::min(count, size() - pos)};
}
template <class Condition>
@ -555,7 +556,7 @@ class basic_string_view final {
#if __cpp_constexpr >= 201304
// if we are in C++14, write it iteratively. This is faster.
if (size() > 0) {
pos = c10::guts::min(size() - 1, pos);
pos = guts::min(size() - 1, pos);
do {
if (condition(at_(pos))) {
return pos;

View File

@ -74,7 +74,7 @@ namespace detail {
template <class T>
class _guard_long_unique_dummy final {};
template <class T>
using _guard_long_unique = c10::guts::conditional_t<
using _guard_long_unique = std::conditional_t<
std::is_same<long, int32_t>::value || std::is_same<long, int64_t>::value,
_guard_long_unique_dummy<T>,
T>;

View File

@ -177,7 +177,7 @@ inline void _PlacementNewNotDefault(void* /*ptr*/, size_t /*n*/) {
template <
typename T,
c10::guts::enable_if_t<std::is_default_constructible<T>::value>* = nullptr>
std::enable_if_t<std::is_default_constructible<T>::value>* = nullptr>
inline constexpr TypeMetaData::PlacementNew* _PickPlacementNew() {
return (std::is_fundamental<T>::value || std::is_pointer<T>::value)
? nullptr
@ -186,7 +186,7 @@ inline constexpr TypeMetaData::PlacementNew* _PickPlacementNew() {
template <
typename T,
c10::guts::enable_if_t<!std::is_default_constructible<T>::value>* = nullptr>
std::enable_if_t<!std::is_default_constructible<T>::value>* = nullptr>
inline constexpr TypeMetaData::PlacementNew* _PickPlacementNew() {
static_assert(
!std::is_fundamental<T>::value && !std::is_pointer<T>::value,
@ -208,14 +208,14 @@ inline void* _NewNotDefault() {
template <
typename T,
c10::guts::enable_if_t<std::is_default_constructible<T>::value>* = nullptr>
std::enable_if_t<std::is_default_constructible<T>::value>* = nullptr>
inline constexpr TypeMetaData::New* _PickNew() {
return &_New<T>;
}
template <
typename T,
c10::guts::enable_if_t<!std::is_default_constructible<T>::value>* = nullptr>
std::enable_if_t<!std::is_default_constructible<T>::value>* = nullptr>
inline constexpr TypeMetaData::New* _PickNew() {
return &_NewNotDefault<T>;
}
@ -244,7 +244,7 @@ inline void _CopyNotAllowed(const void* /*src*/, void* /*dst*/, size_t /*n*/) {
template <
typename T,
c10::guts::enable_if_t<std::is_copy_assignable<T>::value>* = nullptr>
std::enable_if_t<std::is_copy_assignable<T>::value>* = nullptr>
inline constexpr TypeMetaData::Copy* _PickCopy() {
return (std::is_fundamental<T>::value || std::is_pointer<T>::value)
? nullptr
@ -253,7 +253,7 @@ inline constexpr TypeMetaData::Copy* _PickCopy() {
template <
typename T,
c10::guts::enable_if_t<!std::is_copy_assignable<T>::value>* = nullptr>
std::enable_if_t<!std::is_copy_assignable<T>::value>* = nullptr>
inline constexpr TypeMetaData::Copy* _PickCopy() {
static_assert(
!std::is_fundamental<T>::value && !std::is_pointer<T>::value,

View File

@ -73,35 +73,7 @@ using std::vector;
#define NOMINMAX
#endif
// make_unique is a C++14 feature. If we don't have 14, we will emulate
// its behavior. This is copied from folly/Memory.h
#if __cplusplus >= 201402L || \
(defined __cpp_lib_make_unique && __cpp_lib_make_unique >= 201304L) || \
(defined(_MSC_VER) && _MSC_VER >= 1900)
/* using override */
using std::make_unique;
#else
template<typename T, typename... Args>
typename std::enable_if<!std::is_array<T>::value, std::unique_ptr<T>>::type
make_unique(Args&&... args) {
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
// Allows 'make_unique<T[]>(10)'. (N3690 s20.9.1.4 p3-4)
template<typename T>
typename std::enable_if<std::is_array<T>::value, std::unique_ptr<T>>::type
make_unique(const size_t n) {
return std::unique_ptr<T>(new typename std::remove_extent<T>::type[n]());
}
// Disallows 'make_unique<T[10]>()'. (N3690 s20.9.1.4 p5)
template<typename T, typename... Args>
typename std::enable_if<
std::extent<T>::value != 0, std::unique_ptr<T>>::type
make_unique(Args&&...) = delete;
#endif
#if defined(__ANDROID__) && !defined(__NDK_MAJOR__)
using ::round;

View File

@ -222,7 +222,7 @@ createC10OperatorWrapper(const c10::OperatorName& op_name) {
".",
op_name.overload_name,
" with caffe2, but didn't find the c10 operator.");
return c10::guts::make_unique<C10OperatorWrapper<Context>>(
return std::make_unique<C10OperatorWrapper<Context>>(
*op_handle, op_def, ws);
};
}

View File

@ -68,7 +68,7 @@ AsyncNetBase::AsyncNetBase(
Workspace* ws)
: NetBase(net_def, ws), options_(net_def), counters_(net_def) {
operator_nodes_ = dag_utils::prepareOperatorNodes(net_def, ws);
helper_ = caffe2::make_unique<AsyncNetExecutorHelper>(this);
helper_ = std::make_unique<AsyncNetExecutorHelper>(this);
operators_.reserve(operator_nodes_.size());
for (const auto& node : operator_nodes_) {
auto op_ptr = node.operator_.get();

View File

@ -10,7 +10,7 @@ AsyncTaskFuture::AsyncTaskFuture() : completed_(false), failed_(false) {}
AsyncTaskFuture::AsyncTaskFuture(const std::vector<AsyncTaskFuture*>& futures)
: completed_(false), failed_(false) {
if (futures.size() > 1) {
parent_counter_ = caffe2::make_unique<ParentCounter>(futures.size());
parent_counter_ = std::make_unique<ParentCounter>(futures.size());
for (auto future : futures) {
future->SetCallback([this](const AsyncTaskFuture* f) {
if (f->IsFailed()) {

View File

@ -14,7 +14,7 @@ bool AsyncTaskGraph::CreateNode(
const std::vector<OperatorBase*>& ops) {
CAFFE_ENFORCE(!frozen_);
if (!nodes_.count(node_id)) {
nodes_[node_id] = caffe2::make_unique<AsyncTask>(ops);
nodes_[node_id] = std::make_unique<AsyncTask>(ops);
return true;
} else {
return false;
@ -45,7 +45,7 @@ bool AsyncTaskGraph::AddDependency(
AsyncTaskFuture* parents_future = nullptr;
if (parent_futures.size() > 1) {
edge_futures_.push_back(
caffe2::make_unique<AsyncTaskFuture>(parent_futures));
std::make_unique<AsyncTaskFuture>(parent_futures));
parents_future = edge_futures_.back().get();
} else {
CAFFE_ENFORCE_EQ(parent_futures.size(), 1);
@ -100,7 +100,7 @@ void AsyncTaskGraph::FreezeGraph() {
CAFFE_ENFORCE(!root_tasks_.empty());
CAFFE_ENFORCE(!final_futures.empty());
run_future_ = caffe2::make_unique<AsyncTaskFuture>(final_futures);
run_future_ = std::make_unique<AsyncTaskFuture>(final_futures);
frozen_ = true;
}

View File

@ -19,7 +19,7 @@ ParallelNet::ParallelNet(
CAFFE_ENFORCE_GT(
num_workers_, 0, "Expected positive number of worker threads");
helper_ = caffe2::make_unique<ParallelNetExecutorHelper>(this);
helper_ = std::make_unique<ParallelNetExecutorHelper>(this);
// initialize operators
operator_nodes_ = dag_utils::prepareOperatorNodes(net_def, ws);

View File

@ -824,7 +824,7 @@ class AsyncErrorOp final : public Operator<CPUContext> {
if (thread_) {
thread_->join();
}
thread_ = caffe2::make_unique<std::thread>([this]() {
thread_ = std::make_unique<std::thread>([this]() {
try {
std::this_thread::sleep_for(std::chrono::seconds(sleep_time_s_));
if (throw_) {

View File

@ -27,7 +27,7 @@ template <>
void DummyObserver<NetBase>::Start() {
vector<OperatorBase*> operators = subject_->GetOperators();
for (auto& op : operators) {
op->AttachObserver(caffe2::make_unique<DummyObserver<OperatorBase>>(op));
op->AttachObserver(std::make_unique<DummyObserver<OperatorBase>>(op));
}
counter.fetch_add(1000);
}

View File

@ -62,7 +62,7 @@ OperatorBase::OperatorBase(const OperatorDef& operator_def, Workspace* ws)
newstyle_outputs_(),
#endif
input_size_(operator_def.input_size()),
event_(caffe2::make_unique<Event>(device_option_)) {
event_(std::make_unique<Event>(device_option_)) {
static GlobalInitIsCalledGuard guard;
inputs_.reserve(operator_def.input_size());
for (const string& input_str : operator_def.input()) {

View File

@ -266,7 +266,7 @@ OpSchema& OpSchema::ScalarType(::caffe2::TensorProto_DataType dt) {
OpSchema& OpSchema::CostInferenceFunction(CostInferenceFunctionType function) {
cost_inference_function_ =
caffe2::make_unique<CostInferenceFunctionType>(function);
std::make_unique<CostInferenceFunctionType>(function);
return *this;
}

View File

@ -361,7 +361,7 @@ bool ExecuteStepRecursive(ExecutionStepWrapper& stepWrapper) {
std::unique_ptr<Reporter> reporter;
if (step.has_report_net() || compiledStep->reportSubsteps.size() > 0) {
reporter = caffe2::make_unique<Reporter>();
reporter = std::make_unique<Reporter>();
auto* reportNet = compiledStep->reportNet;
if (reportNet) {
VLOG(1) << "Starting reporter net";

View File

@ -257,18 +257,18 @@ void qim2col(const ConvArgs& args, const TensorCPU& XQ, const TensorCPU& WQ, Ten
std::unique_ptr<QConvState> create2b1bConvState(Workspace* ws,
const TensorCPU& W,
const TensorCPU* b) {
auto state = caffe2::make_unique<QConvState>();
auto state = std::make_unique<QConvState>();
state->XQs.resize(k2b1bXBits);
state->YQs.resize(k2b1bXBits);
for (auto i = 0; i < k2b1bXBits; ++i) {
state->XQs[i] = caffe2::make_unique<Tensor>(CPU);
state->YQs[i] = caffe2::make_unique<Tensor>(CPU);
state->XQs[i] = std::make_unique<Tensor>(CPU);
state->YQs[i] = std::make_unique<Tensor>(CPU);
}
state->WQ = caffe2::make_unique<Tensor>(CPU);
state->WQN = caffe2::make_unique<Tensor>(CPU);
state->WQL1Norm = caffe2::make_unique<Tensor>(CPU);
state->scratch = caffe2::make_unique<Tensor>(CPU);
state->scratchColBuffer = caffe2::make_unique<Tensor>(CPU);
state->WQ = std::make_unique<Tensor>(CPU);
state->WQN = std::make_unique<Tensor>(CPU);
state->WQL1Norm = std::make_unique<Tensor>(CPU);
state->scratch = std::make_unique<Tensor>(CPU);
state->scratchColBuffer = std::make_unique<Tensor>(CPU);
signQuantize(W, state->WQ.get());
filterNormalization11(*(state->WQ), state->WQN.get());
@ -289,7 +289,7 @@ std::unique_ptr<QConvState> create2b1bConvState(Workspace* ws,
#endif
};
if (b) {
state->bias = caffe2::make_unique<Tensor>(*b, CPU);
state->bias = std::make_unique<Tensor>(*b, CPU);
}
return state;
}

View File

@ -438,7 +438,7 @@ void run2b1bConvIm2ColGEMM(QConvState* state,
const size_t QK = KH * KW * divRoundUp(X.dim32(3), 8);
Y->Resize(X.dim32(0), OH, OW, OC);
if (!state->WQPacked) {
state->WQPacked = caffe2::make_unique<Tensor>(CPU);
state->WQPacked = std::make_unique<Tensor>(CPU);
qpack_tiles<kGEMMTileSize, kGEMMTileDepthBytes>(state, *(state->WQ), 1, state->WQPacked.get());
CAFFE_ENFORCE_EQ(state->WQPacked->dim32(0), divRoundUp(OC, kGEMMTileSize));
CAFFE_ENFORCE_EQ(state->WQPacked->dim32(1), divRoundUp(QK, kGEMMTileDepthBytes));

View File

@ -241,8 +241,8 @@ void ConvTest2b1b(int IC, int KH, int KW, int H, int W, int OC, int N, ConvArgs
std::vector<std::unique_ptr<TensorCPU>> XQs(k2b1bXBits);
std::vector<std::unique_ptr<TensorCPU>> YQs(k2b1bXBits);
for (auto i = 0; i < k2b1bXBits; ++i) {
XQs[i] = caffe2::make_unique<Tensor>(CPU);
YQs[i] = caffe2::make_unique<Tensor>(CPU);
XQs[i] = std::make_unique<Tensor>(CPU);
YQs[i] = std::make_unique<Tensor>(CPU);
}
Tensor WQN(CPU), WQ(CPU);
uniformQuantize2b1b(X, XQs, 0.5, 1.0);

View File

@ -16,7 +16,7 @@ class OperatorAttachingNetObserver : public ObserverBase<NetBase> {
: ObserverBase<NetBase>(subject_) {
const auto& operators = subject_->GetOperators();
for (auto* op : operators) {
auto observer = caffe2::make_unique<TOpObserver>(op, netObserver);
auto observer = std::make_unique<TOpObserver>(op, netObserver);
const auto* ob = observer.get();
op->AttachObserver(std::move(observer));
operator_observers_.push_back(ob);

View File

@ -57,7 +57,7 @@ TEST(TimeObserverTest, Test3Seconds) {
ws.CreateBlob("in");
NetDef net_def;
unique_ptr<NetBase> net(CreateNetTestHelper(&ws));
auto net_ob = caffe2::make_unique<TimeObserver>(net.get());
auto net_ob = std::make_unique<TimeObserver>(net.get());
const auto* ob = net_ob.get();
net->AttachObserver(std::move(net_ob));
net->Run();

View File

@ -7,7 +7,7 @@ namespace caffe2 { namespace onnx {
void Caffe2BackendRep::CheckInit() {
if (!predictor_) {
predictor_ = caffe2::make_unique<caffe2::Predictor>(
predictor_ = std::make_unique<caffe2::Predictor>(
makePredictorConfig(init_net_, pred_net_));
init_net_.Clear();
pred_net_.Clear();

View File

@ -176,7 +176,7 @@ class CounterDeserializer : public BlobDeserializerBase {
CAFFE_ENFORCE_EQ(
tensorProto.int64_data_size(), 1, "Unexpected size of data");
*blob->GetMutable<std::unique_ptr<Counter<int64_t>>>() =
caffe2::make_unique<Counter<int64_t>>(tensorProto.int64_data(0));
std::make_unique<Counter<int64_t>>(tensorProto.int64_data(0));
}
};
}

View File

@ -77,7 +77,7 @@ class FillerOp : public Operator<Context> {
"data type int64_t");
CAFFE_ENFORCE(input.numel() > 0);
auto* shape_data = input.template data<int64_t>();
std::unique_ptr<int64_t[]> shape_data_copy = caffe2::make_unique<int64_t[]>(input.dim32(0));
std::unique_ptr<int64_t[]> shape_data_copy = std::make_unique<int64_t[]>(input.dim32(0));
context_.template CopyToCPU<int64_t>(input.dim32(0), shape_data, shape_data_copy.get());
shape.insert(shape.end(), shape_data_copy.get(), shape_data_copy.get() + input.dim32(0));
}

View File

@ -22,7 +22,7 @@ inline float addErrorTolerance(float scale) {
inline std::unique_ptr<int8::Int8TensorCPU> q(
const std::vector<int64_t>& dims) {
auto r = caffe2::make_unique<int8::Int8TensorCPU>();
auto r = std::make_unique<int8::Int8TensorCPU>();
r->scale = 0.01;
r->zero_point = static_cast<int32_t>(std::numeric_limits<uint8_t>::max()) / 2;
ReinitializeTensor(&r->t, dims, at::dtype<uint8_t>().device(CPU));
@ -38,7 +38,7 @@ inline std::unique_ptr<int8::Int8TensorCPU> q(
inline std::unique_ptr<int8::Int8TensorCPU> biasq(
const std::vector<int64_t>& dims,
double scale) {
auto r = caffe2::make_unique<int8::Int8TensorCPU>();
auto r = std::make_unique<int8::Int8TensorCPU>();
r->scale = scale;
r->zero_point = 0;
r->t.Resize(dims);
@ -53,7 +53,7 @@ inline std::unique_ptr<int8::Int8TensorCPU> biasq(
}
inline std::unique_ptr<TensorCPU> dq(const int8::Int8TensorCPU& XQ) {
auto r = caffe2::make_unique<Tensor>(CPU);
auto r = std::make_unique<Tensor>(CPU);
r->Resize(XQ.t.sizes());
for (auto i = 0; i < r->numel(); ++i) {
r->mutable_data<float>()[i] =
@ -64,7 +64,7 @@ inline std::unique_ptr<TensorCPU> dq(const int8::Int8TensorCPU& XQ) {
}
inline std::unique_ptr<TensorCPU> biasdq(const int8::Int8TensorCPU& XQ) {
auto r = caffe2::make_unique<Tensor>(CPU);
auto r = std::make_unique<Tensor>(CPU);
r->Resize(XQ.t.sizes());
for (auto i = 0; i < r->numel(); ++i) {
r->mutable_data<float>()[i] =
@ -101,7 +101,7 @@ inline void add_input(
const string& name,
Workspace* ws) {
// auto* t = ws->CreateBlob(name)->GetMutable<TensorCPU>();
auto t = caffe2::make_unique<Tensor>(CPU);
auto t = std::make_unique<Tensor>(CPU);
t->Resize(shape);
std::copy(values.begin(), values.end(), t->mutable_data<float>());
BlobGetMutableTensor(ws->CreateBlob(name), CPU)->CopyFrom(*t);

View File

@ -39,7 +39,7 @@ class StringJoinOpTest : public testing::Test {
TEST_F(StringJoinOpTest, testString1DJoin) {
std::vector<std::string> input = {"a", "xx", "c"};
auto blob = caffe2::make_unique<Blob>();
auto blob = std::make_unique<Blob>();
auto* tensor = BlobGetMutableTensor(blob.get(), CPU);
tensor->Resize(input.size());
auto* data = tensor->template mutable_data<std::string>();
@ -59,7 +59,7 @@ TEST_F(StringJoinOpTest, testString2DJoin) {
std::vector<std::vector<std::string>> input = {{"aa", "bb", "cc"},
{"dd", "ee", "ff"}};
auto blob = caffe2::make_unique<Blob>();
auto blob = std::make_unique<Blob>();
auto* tensor = BlobGetMutableTensor(blob.get(), CPU);
tensor->Resize(input.size(), input[0].size());
auto* data = tensor->template mutable_data<std::string>();
@ -79,7 +79,7 @@ TEST_F(StringJoinOpTest, testString2DJoin) {
TEST_F(StringJoinOpTest, testFloat1DJoin) {
std::vector<float> input = {3.90f, 5.234f, 8.12f};
auto blob = caffe2::make_unique<Blob>();
auto blob = std::make_unique<Blob>();
auto* tensor = BlobGetMutableTensor(blob.get(), CPU);
tensor->Resize(input.size());
auto* data = tensor->template mutable_data<float>();
@ -99,7 +99,7 @@ TEST_F(StringJoinOpTest, testFloat2DJoin) {
std::vector<std::vector<float>> input = {{1.23f, 2.45f, 3.56f},
{4.67f, 5.90f, 6.32f}};
auto blob = caffe2::make_unique<Blob>();
auto blob = std::make_unique<Blob>();
auto* tensor = BlobGetMutableTensor(blob.get(), CPU);
tensor->Resize(input.size(), input[0].size());
auto* data = tensor->template mutable_data<float>();
@ -119,7 +119,7 @@ TEST_F(StringJoinOpTest, testFloat2DJoin) {
TEST_F(StringJoinOpTest, testLong2DJoin) {
std::vector<std::vector<int64_t>> input = {{100, 200}, {1000, 2000}};
auto blob = caffe2::make_unique<Blob>();
auto blob = std::make_unique<Blob>();
auto* tensor = BlobGetMutableTensor(blob.get(), CPU);
tensor->Resize(input.size(), input[0].size());
auto* data = tensor->template mutable_data<int64_t>();

View File

@ -264,7 +264,7 @@ void ReplaceSubgraph(
for (const auto& input : op.input()) {
if (!tensor_map.count(input)) {
tensor_map[input] =
g->createNode(caffe2::make_unique<nom::repr::Tensor>(input));
g->createNode(std::make_unique<nom::repr::Tensor>(input));
}
auto tensor_node = tensor_map[input];
@ -274,7 +274,7 @@ void ReplaceSubgraph(
for (const auto& output : op.output()) {
if (!tensor_map.count(output)) {
tensor_map[output] =
g->createNode(caffe2::make_unique<nom::repr::Tensor>(output));
g->createNode(std::make_unique<nom::repr::Tensor>(output));
}
auto tensor_node = tensor_map[output];
g->createEdge(op_node, tensor_node);

View File

@ -11,16 +11,16 @@ std::unordered_map<std::string, NNGraph::NodeRef> genTensors(
std::vector<std::string> names) {
std::unordered_map<std::string, NNGraph::NodeRef> result;
for (auto& name : names) {
result[name] = graph.createNode(caffe2::make_unique<Tensor>(name));
result[name] = graph.createNode(std::make_unique<Tensor>(name));
}
return result;
}
TEST(Basic, MatchSingleNode) {
NNGraph graph;
auto reluInput = graph.createNode(caffe2::make_unique<Tensor>("reluInput"));
auto relu = graph.createNode(caffe2::make_unique<Relu>());
auto reluOutput = graph.createNode(caffe2::make_unique<Tensor>("reluOutput"));
auto reluInput = graph.createNode(std::make_unique<Tensor>("reluInput"));
auto relu = graph.createNode(std::make_unique<Relu>());
auto reluOutput = graph.createNode(std::make_unique<Tensor>("reluOutput"));
graph.createEdge(reluInput, relu);
graph.createEdge(relu, reluOutput);
@ -41,9 +41,9 @@ TEST(Basic, MatchSingleNode) {
TEST(Basic, SyntaxError) {
NNGraph graph;
auto reluInput = graph.createNode(caffe2::make_unique<Tensor>("reluInput"));
auto relu = graph.createNode(caffe2::make_unique<Relu>());
auto reluOutput = graph.createNode(caffe2::make_unique<Tensor>("reluOutput"));
auto reluInput = graph.createNode(std::make_unique<Tensor>("reluInput"));
auto relu = graph.createNode(std::make_unique<Relu>());
auto reluOutput = graph.createNode(std::make_unique<Tensor>("reluOutput"));
graph.createEdge(reluInput, relu);
graph.createEdge(relu, reluOutput);
@ -75,10 +75,10 @@ TEST(Basic, Diamond) {
x
*/
auto tensors = genTensors(graph, {"a", "b", "c", "d", "e", "f", "x"});
auto relu1 = graph.createNode(caffe2::make_unique<Relu>());
auto relu2 = graph.createNode(caffe2::make_unique<Relu>());
auto concat = graph.createNode(caffe2::make_unique<Concat>());
auto sum = graph.createNode(caffe2::make_unique<Sum>());
auto relu1 = graph.createNode(std::make_unique<Relu>());
auto relu2 = graph.createNode(std::make_unique<Relu>());
auto concat = graph.createNode(std::make_unique<Concat>());
auto sum = graph.createNode(std::make_unique<Sum>());
graph.createEdge(tensors["a"], concat);
graph.createEdge(tensors["b"], concat);
@ -146,11 +146,11 @@ TEST(Basic, BadDiamond) {
x
*/
auto tensors = genTensors(graph, {"a", "b", "c", "d", "e", "f", "x"});
auto relu1 = graph.createNode(caffe2::make_unique<Relu>());
auto relu2 = graph.createNode(caffe2::make_unique<Relu>());
auto concat1 = graph.createNode(caffe2::make_unique<Concat>());
auto concat2 = graph.createNode(caffe2::make_unique<Concat>());
auto sum = graph.createNode(caffe2::make_unique<Sum>());
auto relu1 = graph.createNode(std::make_unique<Relu>());
auto relu2 = graph.createNode(std::make_unique<Relu>());
auto concat1 = graph.createNode(std::make_unique<Concat>());
auto concat2 = graph.createNode(std::make_unique<Concat>());
auto sum = graph.createNode(std::make_unique<Sum>());
graph.createEdge(tensors["a"], concat1);
graph.createEdge(tensors["b"], concat2);
@ -217,11 +217,11 @@ TEST(Basic, StarInputs) {
*/
auto tensors =
genTensors(graph, {"a", "b", "c", "d", "e", "f", "g", "h", "x"});
auto concat = graph.createNode(caffe2::make_unique<Concat>());
auto relu = graph.createNode(caffe2::make_unique<Relu>());
auto flat = graph.createNode(caffe2::make_unique<Flatten>());
auto fc = graph.createNode(caffe2::make_unique<FC>());
auto sum = graph.createNode(caffe2::make_unique<Sum>());
auto concat = graph.createNode(std::make_unique<Concat>());
auto relu = graph.createNode(std::make_unique<Relu>());
auto flat = graph.createNode(std::make_unique<Flatten>());
auto fc = graph.createNode(std::make_unique<FC>());
auto sum = graph.createNode(std::make_unique<Sum>());
graph.createEdge(tensors["a"], relu);
graph.createEdge(relu, tensors["e"]);
@ -307,7 +307,7 @@ TEST(Basic, StarOutputs) {
*/
auto tensors = genTensors(graph, {"a", "b", "c", "d", "e", "f"});
auto concat = graph.createNode(caffe2::make_unique<Concat>());
auto concat = graph.createNode(std::make_unique<Concat>());
graph.createEdge(tensors["a"], concat);
graph.createEdge(tensors["b"], concat);
@ -391,8 +391,8 @@ TEST(Caffe2ToNQL, Basic) {
c
*/
auto tensors = genTensors(graph, {"a", "b", "c"});
auto relu = graph.createNode(caffe2::make_unique<Relu>());
auto concat = graph.createNode(caffe2::make_unique<Concat>());
auto relu = graph.createNode(std::make_unique<Relu>());
auto concat = graph.createNode(std::make_unique<Concat>());
graph.createEdge(tensors["a"], concat);
graph.createEdge(concat, tensors["b"]);
@ -425,12 +425,12 @@ TEST(Caffe2ToNQL, TensorsNameDeduplication) {
std::unordered_map<std::string, NNGraph::NodeRef> tensors;
// Manually create tensors with the same names. NQL will have to disambiguate
// the names by adding a suffix.
tensors["a"] = graph.createNode(caffe2::make_unique<Tensor>("tensor"));
tensors["b"] = graph.createNode(caffe2::make_unique<Tensor>("tensor"));
tensors["c"] = graph.createNode(caffe2::make_unique<Tensor>("tensor"));
tensors["a"] = graph.createNode(std::make_unique<Tensor>("tensor"));
tensors["b"] = graph.createNode(std::make_unique<Tensor>("tensor"));
tensors["c"] = graph.createNode(std::make_unique<Tensor>("tensor"));
auto relu = graph.createNode(caffe2::make_unique<Relu>());
auto concat = graph.createNode(caffe2::make_unique<Concat>());
auto relu = graph.createNode(std::make_unique<Relu>());
auto concat = graph.createNode(std::make_unique<Concat>());
graph.createEdge(tensors["a"], concat);
graph.createEdge(concat, tensors["b"]);

View File

@ -459,14 +459,14 @@ bool fuseConvSum(repr::NNModule* nn, caffe2::Workspace* ws) {
auto newOutputName = repr::nn::get<repr::Tensor>(sumInputX)->getName() +
"_fusion_fix_" + std::to_string(i);
auto newInputTensor = c10::guts::make_unique<repr::Tensor>(newOutputName);
auto newInputTensor = std::make_unique<repr::Tensor>(newOutputName);
auto newInput = nn->dataFlow.createNode(
unique_dyn_cast<repr::NeuralNetData>(newInputTensor));
nn->dataFlow.replaceNode(sumInputX, newInput);
nn->dataFlow.deleteNode(sumInputX);
auto newOutputTensor = c10::guts::make_unique<repr::Tensor>(newOutputName);
auto newOutputTensor = std::make_unique<repr::Tensor>(newOutputName);
auto newOutput = nn->dataFlow.createNode(
unique_dyn_cast<repr::NeuralNetData>(newOutputTensor));
@ -627,7 +627,7 @@ bool enforceFusionInplace(repr::NNModule* nn, caffe2::Workspace* ws) {
return false;
}
auto newOutputTensor = c10::guts::make_unique<repr::Tensor>(inputName);
auto newOutputTensor = std::make_unique<repr::Tensor>(inputName);
auto newOutput = nn->dataFlow.createNode(
unique_dyn_cast<repr::NeuralNetData>(newOutputTensor));
nn->dataFlow.replaceNode(convOutput, newOutput);

View File

@ -166,9 +166,9 @@ class PredictorTest : public testing::Test {
void SetUp() override {
DeviceOption op;
op.set_random_seed(1701);
ctx_ = caffe2::make_unique<CPUContext>(op);
ctx_ = std::make_unique<CPUContext>(op);
NetDef init, run;
p_ = caffe2::make_unique<Predictor>(
p_ = std::make_unique<Predictor>(
makePredictorConfig(parseNetDef(initSpec), parseNetDef(predictSpec)));
}

View File

@ -37,7 +37,7 @@ std::unique_ptr<MetaNetDef> extractMetaNetDef(
Blob blob;
DeserializeBlob(proto, &blob);
CAFFE_ENFORCE(blob.template IsType<string>());
auto def = caffe2::make_unique<MetaNetDef>();
auto def = std::make_unique<MetaNetDef>();
CAFFE_ENFORCE(def->ParseFromString(blob.template Get<string>()));
return def;
}

View File

@ -35,7 +35,7 @@ PYBIND11_MODULE(mpi_utils, m) {
auto comm = GlobalMPIComm();
auto length = str.length();
MPI_Bcast(&length, sizeof(length), MPI_CHAR, 0, comm);
auto ptr = caffe2::make_unique<char[]>(length);
auto ptr = std::make_unique<char[]>(length);
if (MPICommRank(comm) == 0) {
memcpy(ptr.get(), str.data(), str.length());
}

View File

@ -95,7 +95,7 @@ void DynamicHistogram::Add(float f) {
max_ = std::max(max_, f);
if (histogram_ == nullptr) {
histogram_ = caffe2::make_unique<Histogram>(
histogram_ = std::make_unique<Histogram>(
nbins_ * OVER_BINNING_FACTOR, min_, max_);
histogram_->Add(f);
return;
@ -136,7 +136,7 @@ void DynamicHistogram::Add(const float* f, int len) {
max_ = maximum;
if (histogram_ == nullptr) {
histogram_ = caffe2::make_unique<Histogram>(
histogram_ = std::make_unique<Histogram>(
nbins_ * OVER_BINNING_FACTOR, min_, max_);
histogram_->Add(f, len);
return;

View File

@ -25,7 +25,7 @@ class CreateBlobsQueueDBOp : public Operator<CPUContext> {
: Operator<CPUContext>(operator_def, ws) {}
bool RunOnDevice() override {
std::unique_ptr<db::DB> db = caffe2::make_unique<BlobsQueueDB>(
std::unique_ptr<db::DB> db = std::make_unique<BlobsQueueDB>(
"",
db::READ,
OperatorBase::Input<std::shared_ptr<BlobsQueue>>(0),

View File

@ -10,7 +10,7 @@ FileAdapter::FileAdapter(const std::string& file_name) {
if (!file_stream_) {
AT_ERROR("open file failed, file path: ", file_name);
}
istream_adapter_ = caffe2::make_unique<IStreamAdapter>(&file_stream_);
istream_adapter_ = std::make_unique<IStreamAdapter>(&file_stream_);
}
size_t FileAdapter::size() const {

View File

@ -51,20 +51,20 @@ size_t PyTorchStreamReader::read(uint64_t pos, char* buf, size_t n) {
}
PyTorchStreamReader::PyTorchStreamReader(const std::string& file_name)
: ar_(caffe2::make_unique<mz_zip_archive>()),
in_(caffe2::make_unique<FileAdapter>(file_name)) {
: ar_(std::make_unique<mz_zip_archive>()),
in_(std::make_unique<FileAdapter>(file_name)) {
init();
}
PyTorchStreamReader::PyTorchStreamReader(std::istream* in)
: ar_(caffe2::make_unique<mz_zip_archive>()),
in_(caffe2::make_unique<IStreamAdapter>(in)) {
: ar_(std::make_unique<mz_zip_archive>()),
in_(std::make_unique<IStreamAdapter>(in)) {
init();
}
PyTorchStreamReader::PyTorchStreamReader(
std::unique_ptr<ReadAdapterInterface> in)
: ar_(caffe2::make_unique<mz_zip_archive>()), in_(std::move(in)) {
: ar_(std::make_unique<mz_zip_archive>()), in_(std::move(in)) {
init();
}
@ -279,7 +279,7 @@ PyTorchStreamWriter::PyTorchStreamWriter(
}
void PyTorchStreamWriter::setup(const string& file_name) {
ar_ = caffe2::make_unique<mz_zip_archive>();
ar_ = std::make_unique<mz_zip_archive>();
memset(ar_.get(), 0, sizeof(mz_zip_archive));
archive_name_plus_slash_ = archive_name_ + "/"; // for writeRecord().

View File

@ -22,7 +22,7 @@ void MutexSerializer::Serialize(
void MutexDeserializer::Deserialize(const BlobProto& /* unused */, Blob* blob) {
*blob->GetMutable<std::unique_ptr<std::mutex>>() =
caffe2::make_unique<std::mutex>();
std::make_unique<std::mutex>();
}
REGISTER_CPU_OPERATOR(Iter, IterOp<CPUContext>);

View File

@ -74,7 +74,7 @@ std::unique_ptr<ThreadPool> ThreadPool::defaultThreadPool() {
}
}
LOG(INFO) << "Constructing thread pool with " << numThreads << " threads";
return caffe2::make_unique<ThreadPool>(numThreads);
return std::make_unique<ThreadPool>(numThreads);
}
ThreadPool::ThreadPool(int numThreads)

View File

@ -231,7 +231,7 @@ class alignas(kGEMMLOWPCacheLineSize) Worker {
: task_(nullptr),
state_(State::ThreadStartup),
counter_to_decrement_when_ready_(counter_to_decrement_when_ready) {
thread_ = caffe2::make_unique<std::thread>([this]() { this->ThreadFunc(); });
thread_ = std::make_unique<std::thread>([this]() { this->ThreadFunc(); });
}
~Worker() {

View File

@ -65,7 +65,7 @@ void VideoDecoder::getAudioSample(
// resample the audio data
out_samples = swr_convert(swr, &output, out_samples, input, in_samples);
auto sample_size = out_samples * c->channels * sizeof(float);
auto buffer = caffe2::make_unique<float[]>(sample_size);
auto buffer = std::make_unique<float[]>(sample_size);
memcpy(buffer.get(), output, sample_size);
av_freep(&output);

View File

@ -59,13 +59,13 @@ namespace {
bool registerGlobalPerfNetObserverCreator(int* /*pargc*/, char*** /*pargv*/) {
AddGlobalNetObserverCreator([](NetBase* subject) {
return caffe2::make_unique<PerfNetObserver>(subject);
return std::make_unique<PerfNetObserver>(subject);
});
#if !defined(C10_MOBILE)
// for aibench usage
caffe2::ObserverConfig::setReporter(
caffe2::make_unique<caffe2::NetObserverReporterPrint>());
std::make_unique<caffe2::NetObserverReporterPrint>());
caffe2::ObserverConfig::initSampleRate(
FLAGS_aiBench_netInitSampleRate,
@ -208,7 +208,7 @@ void PerfNetObserver::Start() {
const auto& operators = subject_->GetOperators();
for (auto* op : operators) {
observerMap_[op] = op->AttachObserver(
caffe2::make_unique<PerfOperatorObserver>(op, this));
std::make_unique<PerfOperatorObserver>(op, this));
}
}

View File

@ -71,7 +71,7 @@ struct TORCH_API Function {
// The enable_if check is to ensure that the user doesn't explicitly provide
// the parameter X.
template<typename X=T, typename... Args>
static auto apply(Args&&... args) -> c10::guts::enable_if_t<std::is_same<X,T>::value, forward_t<X,Args...>>;
static auto apply(Args&&... args) -> std::enable_if_t<std::is_same<X,T>::value, forward_t<X,Args...>>;
};
// Context to save information during forward that can be accessed in backward
@ -175,7 +175,7 @@ typename std::enable_if<std::is_same<T, Variable>::value, T>::type to_output_typ
template<class T>
template<typename X, typename... Args>
auto Function<T>::apply(Args&&... args) -> c10::guts::enable_if_t<std::is_same<X,T>::value, forward_t<X,Args...>> {
auto Function<T>::apply(Args&&... args) -> std::enable_if_t<std::is_same<X,T>::value, forward_t<X,Args...>> {
std::shared_ptr<CppNode<T>> node(new CppNode<T>(), deleteNode);
variable_list input_vars;

View File

@ -12,7 +12,7 @@ namespace autograd {
namespace profiler {
at::Tensor record_function_enter(const std::string& name) {
auto rec = at::guts::make_unique<RecordFunction>();
auto rec = std::make_unique<RecordFunction>();
// Only add new scope if profiling is enabled.
if (auto* current = RecordFunction::current()) {
AT_ASSERT(

View File

@ -47,7 +47,7 @@ at::Tensor singleton_undefined_tensor;
struct ConcreteAutogradMetaFactory : public c10::impl::AutogradMetaFactory {
std::unique_ptr<c10::AutogradMetaInterface> make() const override {
return c10::guts::make_unique<AutogradMeta>();
return std::make_unique<AutogradMeta>();
}
const at::Tensor& undefined_tensor() const override {
return singleton_undefined_tensor;
@ -66,7 +66,7 @@ namespace impl {
TORCH_CHECK(self.defined(), "cannot call materialize_autograd_meta() on undefined tensor");
auto p = self.unsafeGetTensorImpl();
if (!p->autograd_meta()) {
p->set_autograd_meta(c10::guts::make_unique<AutogradMeta>());
p->set_autograd_meta(std::make_unique<AutogradMeta>());
}
return get_autograd_meta(self);
}

View File

@ -364,7 +364,7 @@ inline Variable make_variable_view(
auto data_impl_copy = data.getIntrusivePtr()->shallow_copy_and_detach(
/*version_counter=*/0,
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
data_impl_copy->set_autograd_meta(c10::guts::make_unique<DifferentiableViewMeta>(
data_impl_copy->set_autograd_meta(std::make_unique<DifferentiableViewMeta>(
data_impl_copy.get(), std::move(base)));
return Variable(data_impl_copy);
} else {
@ -397,7 +397,7 @@ inline Variable make_variable(
auto data_impl = data.getIntrusivePtr();
data_impl->set_allow_tensor_metadata_change(allow_tensor_metadata_change);
if (requires_grad) {
data_impl->set_autograd_meta(c10::guts::make_unique<AutogradMeta>(data_impl.get(), requires_grad));
data_impl->set_autograd_meta(std::make_unique<AutogradMeta>(data_impl.get(), requires_grad));
} else {
data_impl->set_autograd_meta(nullptr);
}
@ -407,7 +407,7 @@ inline Variable make_variable(
/*version_counter=*/0,
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
if (requires_grad) {
data_impl_copy->set_autograd_meta(c10::guts::make_unique<AutogradMeta>(
data_impl_copy->set_autograd_meta(std::make_unique<AutogradMeta>(
data_impl_copy.get(), requires_grad));
} else {
data_impl_copy->set_autograd_meta(nullptr);
@ -430,7 +430,7 @@ inline Variable make_variable(
auto data_impl_copy = data.getIntrusivePtr()->shallow_copy_and_detach(
/*version_counter=*/0,
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
data_impl_copy->set_autograd_meta(c10::guts::make_unique<AutogradMeta>(
data_impl_copy->set_autograd_meta(std::make_unique<AutogradMeta>(
data_impl_copy.get(), false, std::move(gradient_edge)));
return Variable(data_impl_copy);
}

View File

@ -33,7 +33,7 @@ std::unique_ptr<CleanupAutogradContextReq> CleanupAutogradContextReq::
// convert ivalue to int and construct request
int64_t context_id = ivalue_context_id.toInt();
return c10::guts::make_unique<CleanupAutogradContextReq>(context_id);
return std::make_unique<CleanupAutogradContextReq>(context_id);
}
} // namespace autograd

View File

@ -140,7 +140,7 @@ std::unique_ptr<RpcWithAutograd> RpcWithAutograd::fromMessage(
wrappedRpc = deserializeResponse(wrappedMessage);
}
return c10::guts::make_unique<RpcWithAutograd>(
return std::make_unique<RpcWithAutograd>(
workerId,
originalMessageType,
autogradMetadata,

View File

@ -94,7 +94,7 @@ Message getMessageWithAutograd(
// Wrap the original rpc with autograd information.
AutogradMetadata autogradMetadata(
autogradContext->contextId(), autogradContainer.newAutogradMessageId());
auto rpcWithAutograd = c10::guts::make_unique<RpcWithAutograd>(
auto rpcWithAutograd = std::make_unique<RpcWithAutograd>(
RpcAgent::getDefaultRpcAgent()->getWorkerInfo().id_,
msgType,
autogradMetadata,

View File

@ -70,7 +70,7 @@ ProcessGroupAgent::ProcessGroupAgent(
std::chrono::milliseconds rpcTimeout)
: RpcAgent(
WorkerInfo(std::move(workerName), pg->getRank()),
c10::guts::make_unique<RequestCallbackImpl>(),
std::make_unique<RequestCallbackImpl>(),
rpcTimeout),
pg_(std::move(pg)),
sendCounts_(pg_->getSize()),
@ -288,7 +288,7 @@ std::shared_ptr<FutureMessage> ProcessGroupAgent::send(
// Unlike the other cases, need to add a tensor deleter, since the
// data outlives the scope of this function. It's shared_ptr<> due
// to c++11 lambda capture limitations with unique_ptr<>.
auto payload = c10::guts::make_unique<std::string>(
auto payload = std::make_unique<std::string>(
wireSerialize(message.payload(), message.tensors()));
const char* data = payload->data();
size_t len = payload->length();

View File

@ -20,7 +20,7 @@ Message PythonCall::toMessage() && {
}
std::unique_ptr<PythonCall> PythonCall::fromMessage(const Message& message) {
return c10::guts::make_unique<PythonCall>(
return std::make_unique<PythonCall>(
message.payload(), message.tensors());
}

View File

@ -87,7 +87,7 @@ std::shared_ptr<FutureMessage> sendPythonRemoteCall(
SerializedPyObj serializedPyObj,
IValue rrefId,
IValue forkId) {
auto pythonRemoteCall = c10::guts::make_unique<PythonRemoteCall>(
auto pythonRemoteCall = std::make_unique<PythonRemoteCall>(
std::move(serializedPyObj), rrefId, forkId);
// set forceGradRecording to true as even if the args does not contain any
@ -155,7 +155,7 @@ std::shared_ptr<FutureMessage> pyRpcBuiltin(
const py::kwargs& kwargs) {
Stack stack;
auto op = matchBuiltinOp(opName, args, kwargs, stack);
auto scriptCall = c10::guts::make_unique<ScriptCall>(op, std::move(stack));
auto scriptCall = std::make_unique<ScriptCall>(op, std::move(stack));
return sendMessageWithAutograd(
agent, dst, std::move(*scriptCall).toMessage());
}
@ -176,7 +176,7 @@ PyRRef pyRemoteBuiltin(
"Does not support creating RRef on self yet.");
auto userRRef = ctx.createUserRRef<IValue>(dst.id_);
auto scriptRemoteCall = c10::guts::make_unique<ScriptRemoteCall>(
auto scriptRemoteCall = std::make_unique<ScriptRemoteCall>(
op, std::move(stack), userRRef->rrefId(), userRRef->forkId());
auto fm = sendMessageWithAutograd(
@ -192,7 +192,7 @@ std::shared_ptr<FutureMessage> pyRpcPythonUdf(
const WorkerInfo& dst,
std::string& pickledPythonUDF,
std::vector<torch::Tensor>& tensors) {
auto pythonCall = c10::guts::make_unique<PythonCall>(
auto pythonCall = std::make_unique<PythonCall>(
std::vector<char>(pickledPythonUDF.begin(), pickledPythonUDF.end()),
tensors);
return sendMessageWithAutograd(

View File

@ -44,7 +44,7 @@ std::unique_ptr<PythonRemoteCall> PythonRemoteCall::fromMessage(
values.pop_back();
auto serializedPyObj = SerializedPyObj::fromIValues(std::move(values));
return c10::guts::make_unique<PythonRemoteCall>(
return std::make_unique<PythonRemoteCall>(
std::move(serializedPyObj), std::move(retRRefId), std::move(retForkId));
}

Some files were not shown because too many files have changed in this diff Show More