mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 12:54:11 +08:00
[codemod] c10:optional
-> std::optional
(#126135)
Generated by running the following from PyTorch root: ``` find . -regex ".*\.\(cpp\|h\|cu\|hpp\|cc\|cxx\)$" | grep -v "build/" | xargs -n 50 -P 4 perl -pi -e 's/c10::optional/std::optional/' ``` `c10::optional` is just an alias for `std::optional`. This removes usages of that alias in preparation for eliminating it entirely. Pull Request resolved: https://github.com/pytorch/pytorch/pull/126135 Approved by: https://github.com/Skylion007, https://github.com/malfet, https://github.com/albanD, https://github.com/aaronenyeshi
This commit is contained in:
committed by
PyTorch MergeBot
parent
b55f57b7af
commit
ed327876f5
@ -81,8 +81,8 @@ inline uint64_t make64BitsFrom32Bits(uint32_t hi, uint32_t lo) {
|
||||
CPUGeneratorImpl::CPUGeneratorImpl(uint64_t seed_in)
|
||||
: c10::GeneratorImpl{Device(DeviceType::CPU), DispatchKeySet(c10::DispatchKey::CPU)},
|
||||
engine_{seed_in},
|
||||
next_float_normal_sample_{c10::optional<float>()},
|
||||
next_double_normal_sample_{c10::optional<double>()} { }
|
||||
next_float_normal_sample_{std::optional<float>()},
|
||||
next_double_normal_sample_{std::optional<double>()} { }
|
||||
|
||||
/**
|
||||
* Manually seeds the engine with the seed input
|
||||
@ -151,8 +151,8 @@ void CPUGeneratorImpl::set_state(const c10::TensorImpl& new_state) {
|
||||
detail::check_rng_state(new_state);
|
||||
|
||||
at::mt19937 engine;
|
||||
auto float_normal_sample = c10::optional<float>();
|
||||
auto double_normal_sample = c10::optional<double>();
|
||||
auto float_normal_sample = std::optional<float>();
|
||||
auto double_normal_sample = std::optional<double>();
|
||||
|
||||
// Construct the state of at::CPUGeneratorImpl based on input byte tensor size.
|
||||
CPUGeneratorImplStateLegacy* legacy_pod{nullptr};
|
||||
@ -160,7 +160,7 @@ void CPUGeneratorImpl::set_state(const c10::TensorImpl& new_state) {
|
||||
if (new_state_size == size_legacy) {
|
||||
legacy_pod = (CPUGeneratorImplStateLegacy*)new_state.data();
|
||||
// Note that in CPUGeneratorImplStateLegacy, we didn't have float version
|
||||
// of normal sample and hence we leave the c10::optional<float> as is
|
||||
// of normal sample and hence we leave the std::optional<float> as is
|
||||
|
||||
// Update next_double_normal_sample.
|
||||
// Note that CPUGeneratorImplStateLegacy stores two uniform values (normal_x, normal_y)
|
||||
@ -171,14 +171,14 @@ void CPUGeneratorImpl::set_state(const c10::TensorImpl& new_state) {
|
||||
auto r = legacy_pod->normal_rho;
|
||||
auto theta = 2.0 * c10::pi<double> * legacy_pod->normal_x;
|
||||
// we return the sin version of the normal sample when in caching mode
|
||||
double_normal_sample = c10::optional<double>(r * ::sin(theta));
|
||||
double_normal_sample = std::optional<double>(r * ::sin(theta));
|
||||
}
|
||||
} else if (new_state_size == size_current) {
|
||||
auto rng_state = (CPUGeneratorImplState*)new_state.data();
|
||||
legacy_pod = &rng_state->legacy_pod;
|
||||
// update next_float_normal_sample
|
||||
if (rng_state->is_next_float_normal_sample_valid) {
|
||||
float_normal_sample = c10::optional<float>(rng_state->next_float_normal_sample);
|
||||
float_normal_sample = std::optional<float>(rng_state->next_float_normal_sample);
|
||||
}
|
||||
|
||||
// Update next_double_normal_sample.
|
||||
@ -186,7 +186,7 @@ void CPUGeneratorImpl::set_state(const c10::TensorImpl& new_state) {
|
||||
// and if it's valid in normal_is_valid. The redundant normal_x and normal_rho
|
||||
// are squashed to 0.0.
|
||||
if (legacy_pod->normal_is_valid) {
|
||||
double_normal_sample = c10::optional<double>(legacy_pod->normal_y);
|
||||
double_normal_sample = std::optional<double>(legacy_pod->normal_y);
|
||||
}
|
||||
} else {
|
||||
AT_ERROR("Expected either a CPUGeneratorImplStateLegacy of size ", size_legacy,
|
||||
@ -283,14 +283,14 @@ uint64_t CPUGeneratorImpl::random64() {
|
||||
/**
|
||||
* Get the cached normal random in float
|
||||
*/
|
||||
c10::optional<float> CPUGeneratorImpl::next_float_normal_sample() {
|
||||
std::optional<float> CPUGeneratorImpl::next_float_normal_sample() {
|
||||
return next_float_normal_sample_;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the cached normal random in double
|
||||
*/
|
||||
c10::optional<double> CPUGeneratorImpl::next_double_normal_sample() {
|
||||
std::optional<double> CPUGeneratorImpl::next_double_normal_sample() {
|
||||
return next_double_normal_sample_;
|
||||
}
|
||||
|
||||
@ -299,7 +299,7 @@ c10::optional<double> CPUGeneratorImpl::next_double_normal_sample() {
|
||||
*
|
||||
* See Note [Acquire lock when using random generators]
|
||||
*/
|
||||
void CPUGeneratorImpl::set_next_float_normal_sample(c10::optional<float> randn) {
|
||||
void CPUGeneratorImpl::set_next_float_normal_sample(std::optional<float> randn) {
|
||||
next_float_normal_sample_ = randn;
|
||||
}
|
||||
|
||||
@ -308,7 +308,7 @@ void CPUGeneratorImpl::set_next_float_normal_sample(c10::optional<float> randn)
|
||||
*
|
||||
* See Note [Acquire lock when using random generators]
|
||||
*/
|
||||
void CPUGeneratorImpl::set_next_double_normal_sample(c10::optional<double> randn) {
|
||||
void CPUGeneratorImpl::set_next_double_normal_sample(std::optional<double> randn) {
|
||||
next_double_normal_sample_ = randn;
|
||||
}
|
||||
|
||||
|
@ -24,18 +24,18 @@ struct TORCH_API CPUGeneratorImpl : public c10::GeneratorImpl {
|
||||
static c10::DeviceType device_type();
|
||||
uint32_t random();
|
||||
uint64_t random64();
|
||||
c10::optional<float> next_float_normal_sample();
|
||||
c10::optional<double> next_double_normal_sample();
|
||||
void set_next_float_normal_sample(c10::optional<float> randn);
|
||||
void set_next_double_normal_sample(c10::optional<double> randn);
|
||||
std::optional<float> next_float_normal_sample();
|
||||
std::optional<double> next_double_normal_sample();
|
||||
void set_next_float_normal_sample(std::optional<float> randn);
|
||||
void set_next_double_normal_sample(std::optional<double> randn);
|
||||
at::mt19937 engine();
|
||||
void set_engine(at::mt19937 engine);
|
||||
|
||||
private:
|
||||
CPUGeneratorImpl* clone_impl() const override;
|
||||
at::mt19937 engine_;
|
||||
c10::optional<float> next_float_normal_sample_;
|
||||
c10::optional<double> next_double_normal_sample_;
|
||||
std::optional<float> next_float_normal_sample_;
|
||||
std::optional<double> next_double_normal_sample_;
|
||||
};
|
||||
|
||||
namespace detail {
|
||||
|
@ -59,7 +59,7 @@ class TORCH_API Context {
|
||||
}
|
||||
}
|
||||
const AcceleratorHooksInterface& getAcceleratorHooksInterface(
|
||||
c10::optional<c10::DeviceType> opt_device_type = c10::nullopt) {
|
||||
std::optional<c10::DeviceType> opt_device_type = c10::nullopt) {
|
||||
c10::DeviceType device_type = opt_device_type.has_value()
|
||||
? opt_device_type.value()
|
||||
: at::getAccelerator(true).value();
|
||||
@ -395,7 +395,7 @@ class TORCH_API Context {
|
||||
bool release_original_weights = false;
|
||||
#endif
|
||||
bool display_vmap_fallback_warnings_ = false;
|
||||
c10::optional<at::QEngine> quantized_engine = c10::nullopt;
|
||||
std::optional<at::QEngine> quantized_engine = c10::nullopt;
|
||||
bool enable_sparse_tensor_invariant_checks = false;
|
||||
bool allow_fp16_reduction_cpu = false;
|
||||
|
||||
|
@ -15,7 +15,7 @@ namespace at {
|
||||
// OptionalDeviceGuard guard(device_of(tensor));
|
||||
|
||||
/// Return the Device of a Tensor, if the Tensor is defined.
|
||||
inline c10::optional<Device> device_of(const Tensor& t) {
|
||||
inline std::optional<Device> device_of(const Tensor& t) {
|
||||
if (t.defined()) {
|
||||
return c10::make_optional(t.device());
|
||||
} else {
|
||||
@ -23,14 +23,14 @@ inline c10::optional<Device> device_of(const Tensor& t) {
|
||||
}
|
||||
}
|
||||
|
||||
inline c10::optional<Device> device_of(const c10::optional<Tensor>& t) {
|
||||
inline std::optional<Device> device_of(const c10::optional<Tensor>& t) {
|
||||
return t.has_value() ? device_of(t.value()) : c10::nullopt;
|
||||
}
|
||||
|
||||
/// Return the Device of a TensorList, if the list is non-empty and
|
||||
/// the first Tensor is defined. (This function implicitly assumes
|
||||
/// that all tensors in the list have the same device.)
|
||||
inline c10::optional<Device> device_of(ITensorListRef t) {
|
||||
inline std::optional<Device> device_of(ITensorListRef t) {
|
||||
if (!t.empty()) {
|
||||
return device_of(t.front());
|
||||
} else {
|
||||
|
@ -163,7 +163,7 @@ TensorBase _empty_generic(
|
||||
c10::Allocator* allocator,
|
||||
c10::DispatchKeySet ks,
|
||||
ScalarType scalar_type,
|
||||
c10::optional<c10::MemoryFormat> memory_format_opt) {
|
||||
std::optional<c10::MemoryFormat> memory_format_opt) {
|
||||
at::detail::check_size_nonnegative(size);
|
||||
at::detail::raise_warning_for_complex_half(scalar_type);
|
||||
caffe2::TypeMeta dtype = scalarTypeToTypeMeta(scalar_type);
|
||||
@ -197,7 +197,7 @@ TensorBase empty_generic(
|
||||
c10::Allocator* allocator,
|
||||
c10::DispatchKeySet ks,
|
||||
ScalarType scalar_type,
|
||||
c10::optional<c10::MemoryFormat> memory_format_opt) {
|
||||
std::optional<c10::MemoryFormat> memory_format_opt) {
|
||||
return _empty_generic(size, allocator, ks, scalar_type, memory_format_opt);
|
||||
}
|
||||
|
||||
@ -206,7 +206,7 @@ TensorBase empty_generic_symint(
|
||||
c10::Allocator* allocator,
|
||||
c10::DispatchKeySet ks,
|
||||
ScalarType scalar_type,
|
||||
c10::optional<c10::MemoryFormat> memory_format_opt) {
|
||||
std::optional<c10::MemoryFormat> memory_format_opt) {
|
||||
return _empty_generic(size, allocator, ks, scalar_type, memory_format_opt);
|
||||
}
|
||||
|
||||
@ -252,7 +252,7 @@ TensorBase empty_strided_symint_generic(
|
||||
}
|
||||
|
||||
TensorBase empty_cpu(IntArrayRef size, ScalarType dtype, bool pin_memory,
|
||||
c10::optional<c10::MemoryFormat> memory_format_opt) {
|
||||
std::optional<c10::MemoryFormat> memory_format_opt) {
|
||||
auto allocator = GetCPUAllocatorMaybePinned(pin_memory);
|
||||
constexpr c10::DispatchKeySet cpu_ks(c10::DispatchKey::CPU);
|
||||
return empty_generic(size, allocator, cpu_ks, dtype, memory_format_opt);
|
||||
@ -260,11 +260,11 @@ TensorBase empty_cpu(IntArrayRef size, ScalarType dtype, bool pin_memory,
|
||||
|
||||
TensorBase empty_cpu(
|
||||
IntArrayRef size,
|
||||
c10::optional<ScalarType> dtype_opt,
|
||||
c10::optional<Layout> layout_opt,
|
||||
c10::optional<Device> device_opt,
|
||||
c10::optional<bool> pin_memory_opt,
|
||||
c10::optional<c10::MemoryFormat> memory_format_opt) {
|
||||
std::optional<ScalarType> dtype_opt,
|
||||
std::optional<Layout> layout_opt,
|
||||
std::optional<Device> device_opt,
|
||||
std::optional<bool> pin_memory_opt,
|
||||
std::optional<c10::MemoryFormat> memory_format_opt) {
|
||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device_or_default(device_opt).type() == DeviceType::CPU);
|
||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided);
|
||||
|
||||
@ -295,10 +295,10 @@ TensorBase empty_strided_cpu(IntArrayRef size, IntArrayRef stride,
|
||||
TensorBase empty_strided_cpu(
|
||||
IntArrayRef size,
|
||||
IntArrayRef stride,
|
||||
c10::optional<ScalarType> dtype_opt,
|
||||
c10::optional<Layout> layout_opt,
|
||||
c10::optional<Device> device_opt,
|
||||
c10::optional<bool> pin_memory_opt) {
|
||||
std::optional<ScalarType> dtype_opt,
|
||||
std::optional<Layout> layout_opt,
|
||||
std::optional<Device> device_opt,
|
||||
std::optional<bool> pin_memory_opt) {
|
||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device_or_default(device_opt).type() == DeviceType::CPU);
|
||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided);
|
||||
|
||||
@ -342,7 +342,7 @@ static MetaAllocator g_meta_alloc;
|
||||
REGISTER_ALLOCATOR(kMeta, &g_meta_alloc);
|
||||
|
||||
TensorBase empty_meta(IntArrayRef size, ScalarType dtype,
|
||||
c10::optional<c10::MemoryFormat> memory_format_opt) {
|
||||
std::optional<c10::MemoryFormat> memory_format_opt) {
|
||||
auto *allocator = GetAllocator(kMeta);
|
||||
constexpr c10::DispatchKeySet meta_dks(c10::DispatchKey::Meta);
|
||||
return at::detail::empty_generic(
|
||||
@ -351,11 +351,11 @@ TensorBase empty_meta(IntArrayRef size, ScalarType dtype,
|
||||
|
||||
TensorBase empty_meta(
|
||||
IntArrayRef size,
|
||||
c10::optional<ScalarType> dtype_opt,
|
||||
c10::optional<Layout> layout_opt,
|
||||
c10::optional<Device> device_opt,
|
||||
c10::optional<bool> pin_memory_opt,
|
||||
c10::optional<c10::MemoryFormat> memory_format_opt
|
||||
std::optional<ScalarType> dtype_opt,
|
||||
std::optional<Layout> layout_opt,
|
||||
std::optional<Device> device_opt,
|
||||
std::optional<bool> pin_memory_opt,
|
||||
std::optional<c10::MemoryFormat> memory_format_opt
|
||||
) {
|
||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device_or_default(device_opt).type() == DeviceType::Meta);
|
||||
// NB: because there is no SparseMeta (yet), non-strided layout is
|
||||
@ -371,11 +371,11 @@ TensorBase empty_meta(
|
||||
|
||||
TensorBase empty_symint_meta(
|
||||
SymIntArrayRef size,
|
||||
c10::optional<ScalarType> dtype_opt,
|
||||
c10::optional<Layout> layout_opt,
|
||||
c10::optional<Device> device_opt,
|
||||
c10::optional<bool> pin_memory_opt,
|
||||
c10::optional<c10::MemoryFormat> memory_format_opt
|
||||
std::optional<ScalarType> dtype_opt,
|
||||
std::optional<Layout> layout_opt,
|
||||
std::optional<Device> device_opt,
|
||||
std::optional<bool> pin_memory_opt,
|
||||
std::optional<c10::MemoryFormat> memory_format_opt
|
||||
) {
|
||||
auto *allocator = GetAllocator(kMeta);
|
||||
constexpr c10::DispatchKeySet ks(c10::DispatchKey::Meta);
|
||||
@ -405,10 +405,10 @@ TensorBase empty_strided_meta(IntArrayRef size, IntArrayRef stride,
|
||||
TensorBase empty_strided_meta(
|
||||
IntArrayRef size,
|
||||
IntArrayRef stride,
|
||||
c10::optional<ScalarType> dtype_opt,
|
||||
c10::optional<Layout> layout_opt,
|
||||
c10::optional<Device> device_opt,
|
||||
c10::optional<bool> pin_memory_opt) {
|
||||
std::optional<ScalarType> dtype_opt,
|
||||
std::optional<Layout> layout_opt,
|
||||
std::optional<Device> device_opt,
|
||||
std::optional<bool> pin_memory_opt) {
|
||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device_or_default(device_opt).type() == DeviceType::Meta);
|
||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided);
|
||||
|
||||
@ -440,10 +440,10 @@ TensorBase empty_strided_symint_meta(SymIntArrayRef size, SymIntArrayRef stride,
|
||||
TensorBase empty_strided_symint_meta(
|
||||
SymIntArrayRef size,
|
||||
SymIntArrayRef stride,
|
||||
c10::optional<ScalarType> dtype_opt,
|
||||
c10::optional<Layout> layout_opt,
|
||||
c10::optional<Device> device_opt,
|
||||
c10::optional<bool> pin_memory_opt) {
|
||||
std::optional<ScalarType> dtype_opt,
|
||||
std::optional<Layout> layout_opt,
|
||||
std::optional<Device> device_opt,
|
||||
std::optional<bool> pin_memory_opt) {
|
||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device_or_default(device_opt).type() == DeviceType::Meta);
|
||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided);
|
||||
|
||||
|
@ -49,14 +49,14 @@ TORCH_API TensorBase empty_generic(
|
||||
c10::Allocator* allocator,
|
||||
c10::DispatchKeySet ks,
|
||||
ScalarType scalar_type,
|
||||
c10::optional<c10::MemoryFormat> memory_format_opt);
|
||||
std::optional<c10::MemoryFormat> memory_format_opt);
|
||||
|
||||
TORCH_API TensorBase empty_generic_symint(
|
||||
SymIntArrayRef size,
|
||||
c10::Allocator* allocator,
|
||||
c10::DispatchKeySet ks,
|
||||
ScalarType scalar_type,
|
||||
c10::optional<c10::MemoryFormat> memory_format_opt);
|
||||
std::optional<c10::MemoryFormat> memory_format_opt);
|
||||
|
||||
TORCH_API TensorBase empty_strided_generic(
|
||||
IntArrayRef size,
|
||||
@ -76,15 +76,15 @@ TORCH_API TensorBase empty_cpu(
|
||||
IntArrayRef size,
|
||||
ScalarType dtype,
|
||||
bool pin_memory = false,
|
||||
c10::optional<c10::MemoryFormat> memory_format_opt = c10::nullopt);
|
||||
std::optional<c10::MemoryFormat> memory_format_opt = c10::nullopt);
|
||||
|
||||
TORCH_API TensorBase empty_cpu(
|
||||
IntArrayRef size,
|
||||
c10::optional<ScalarType> dtype_opt,
|
||||
c10::optional<Layout> layout_opt,
|
||||
c10::optional<Device> device_opt,
|
||||
c10::optional<bool> pin_memory_opt,
|
||||
c10::optional<c10::MemoryFormat> memory_format_opt);
|
||||
std::optional<ScalarType> dtype_opt,
|
||||
std::optional<Layout> layout_opt,
|
||||
std::optional<Device> device_opt,
|
||||
std::optional<bool> pin_memory_opt,
|
||||
std::optional<c10::MemoryFormat> memory_format_opt);
|
||||
|
||||
TORCH_API TensorBase empty_cpu(IntArrayRef size, const TensorOptions& options);
|
||||
|
||||
@ -97,10 +97,10 @@ TORCH_API TensorBase empty_strided_cpu(
|
||||
TORCH_API TensorBase empty_strided_cpu(
|
||||
IntArrayRef size,
|
||||
IntArrayRef stride,
|
||||
c10::optional<ScalarType> dtype_opt,
|
||||
c10::optional<Layout> layout_opt,
|
||||
c10::optional<Device> device_opt,
|
||||
c10::optional<bool> pin_memory_opt);
|
||||
std::optional<ScalarType> dtype_opt,
|
||||
std::optional<Layout> layout_opt,
|
||||
std::optional<Device> device_opt,
|
||||
std::optional<bool> pin_memory_opt);
|
||||
|
||||
TORCH_API TensorBase empty_strided_cpu(
|
||||
IntArrayRef size,
|
||||
@ -110,23 +110,23 @@ TORCH_API TensorBase empty_strided_cpu(
|
||||
TORCH_API TensorBase empty_meta(
|
||||
IntArrayRef size,
|
||||
ScalarType dtype,
|
||||
c10::optional<c10::MemoryFormat> memory_format_opt = c10::nullopt);
|
||||
std::optional<c10::MemoryFormat> memory_format_opt = c10::nullopt);
|
||||
|
||||
TORCH_API TensorBase empty_meta(
|
||||
IntArrayRef size,
|
||||
c10::optional<ScalarType> dtype_opt,
|
||||
c10::optional<Layout> layout_opt,
|
||||
c10::optional<Device> device_opt,
|
||||
c10::optional<bool> pin_memory_opt,
|
||||
c10::optional<c10::MemoryFormat> memory_format_opt);
|
||||
std::optional<ScalarType> dtype_opt,
|
||||
std::optional<Layout> layout_opt,
|
||||
std::optional<Device> device_opt,
|
||||
std::optional<bool> pin_memory_opt,
|
||||
std::optional<c10::MemoryFormat> memory_format_opt);
|
||||
|
||||
TORCH_API TensorBase empty_symint_meta(
|
||||
SymIntArrayRef size,
|
||||
c10::optional<ScalarType> dtype_opt,
|
||||
c10::optional<Layout> layout_opt,
|
||||
c10::optional<Device> device_opt,
|
||||
c10::optional<bool> pin_memory_opt,
|
||||
c10::optional<c10::MemoryFormat> memory_format_opt);
|
||||
std::optional<ScalarType> dtype_opt,
|
||||
std::optional<Layout> layout_opt,
|
||||
std::optional<Device> device_opt,
|
||||
std::optional<bool> pin_memory_opt,
|
||||
std::optional<c10::MemoryFormat> memory_format_opt);
|
||||
|
||||
TORCH_API TensorBase empty_meta(IntArrayRef size, const TensorOptions& options);
|
||||
|
||||
@ -136,10 +136,10 @@ empty_strided_meta(IntArrayRef size, IntArrayRef stride, ScalarType dtype);
|
||||
TORCH_API TensorBase empty_strided_meta(
|
||||
IntArrayRef size,
|
||||
IntArrayRef stride,
|
||||
c10::optional<ScalarType> dtype_opt,
|
||||
c10::optional<Layout> layout_opt,
|
||||
c10::optional<Device> device_opt,
|
||||
c10::optional<bool> pin_memory_opt);
|
||||
std::optional<ScalarType> dtype_opt,
|
||||
std::optional<Layout> layout_opt,
|
||||
std::optional<Device> device_opt,
|
||||
std::optional<bool> pin_memory_opt);
|
||||
|
||||
TORCH_API TensorBase empty_strided_meta(
|
||||
IntArrayRef size,
|
||||
@ -154,10 +154,10 @@ TORCH_API TensorBase empty_strided_symint_meta(
|
||||
TORCH_API TensorBase empty_strided_symint_meta(
|
||||
SymIntArrayRef size,
|
||||
SymIntArrayRef stride,
|
||||
c10::optional<ScalarType> dtype_opt,
|
||||
c10::optional<Layout> layout_opt,
|
||||
c10::optional<Device> device_opt,
|
||||
c10::optional<bool> pin_memory_opt);
|
||||
std::optional<ScalarType> dtype_opt,
|
||||
std::optional<Layout> layout_opt,
|
||||
std::optional<Device> device_opt,
|
||||
std::optional<bool> pin_memory_opt);
|
||||
|
||||
TORCH_API TensorBase empty_strided_symint_meta(
|
||||
SymIntArrayRef size,
|
||||
|
@ -145,7 +145,7 @@ Tensor FunctionalInverses::_neg_view_inverse(const Tensor& base, const Tensor& m
|
||||
}
|
||||
}
|
||||
|
||||
Tensor FunctionalInverses::as_strided_inverse(const Tensor& base, const Tensor& mutated_view, InverseReturnMode inverse_return_mode, at::SymIntArrayRef size, at::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) {
|
||||
Tensor FunctionalInverses::as_strided_inverse(const Tensor& base, const Tensor& mutated_view, InverseReturnMode inverse_return_mode, at::SymIntArrayRef size, at::SymIntArrayRef stride, std::optional<c10::SymInt> storage_offset) {
|
||||
if (inverse_return_mode == InverseReturnMode::AlwaysView) {
|
||||
// NB: assumes mutated_view is a narrowed view of base.
|
||||
// We should NOT do this for functionalization
|
||||
@ -220,7 +220,7 @@ Tensor FunctionalInverses::lift_fresh_inverse(const Tensor& base, const Tensor&
|
||||
return mutated_view;
|
||||
}
|
||||
|
||||
Tensor FunctionalInverses::slice_Tensor_inverse(const Tensor& base, const Tensor& mutated_view, InverseReturnMode inverse_return_mode, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) {
|
||||
Tensor FunctionalInverses::slice_Tensor_inverse(const Tensor& base, const Tensor& mutated_view, InverseReturnMode inverse_return_mode, int64_t dim, std::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) {
|
||||
if (inverse_return_mode == InverseReturnMode::AlwaysView) {
|
||||
// NB: assumes mutated_view is a narrowed view of base.
|
||||
// We should NOT do this for functionalization
|
||||
|
@ -526,7 +526,7 @@ Tensor to_functional_tensor(const Tensor& tensor) {
|
||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!isFunctionalTensor(tensor));
|
||||
return at::detail::make_tensor<FunctionalTensorWrapper>(tensor);
|
||||
}
|
||||
c10::optional<Tensor> to_functional_tensor(const c10::optional<Tensor>& tensor) {
|
||||
std::optional<Tensor> to_functional_tensor(const c10::optional<Tensor>& tensor) {
|
||||
if (tensor.has_value()) {
|
||||
return c10::make_optional<Tensor>(to_functional_tensor(*tensor));
|
||||
}
|
||||
@ -564,7 +564,7 @@ Tensor from_functional_tensor(const Tensor& tensor, bool assert_functional) {
|
||||
return tensor;
|
||||
}
|
||||
}
|
||||
c10::optional<Tensor> from_functional_tensor(const c10::optional<Tensor>& t, bool assert_functional) {
|
||||
std::optional<Tensor> from_functional_tensor(const c10::optional<Tensor>& t, bool assert_functional) {
|
||||
if (t.has_value()) {
|
||||
return c10::make_optional<Tensor>(from_functional_tensor(*t, assert_functional));
|
||||
}
|
||||
@ -610,7 +610,7 @@ void sync(const Tensor& t) {
|
||||
auto functional_impl = at::functionalization::impl::unsafeGetFunctionalWrapper(t);
|
||||
functional_impl->sync_();
|
||||
}
|
||||
void sync(const c10::optional<Tensor>& t) {
|
||||
void sync(const std::optional<Tensor>& t) {
|
||||
if (t.has_value()) {
|
||||
sync(*t);
|
||||
}
|
||||
@ -692,7 +692,7 @@ bool isFunctionalTensor(const at::Tensor& tensor) {
|
||||
return tensor.unsafeGetTensorImpl()->key_set().has(c10::DispatchKey::Functionalize);
|
||||
}
|
||||
|
||||
bool isFunctionalTensor(const c10::optional<Tensor>& t) {
|
||||
bool isFunctionalTensor(const std::optional<Tensor>& t) {
|
||||
if (t.has_value()) {
|
||||
return isFunctionalTensor(*t);
|
||||
} else {
|
||||
|
@ -286,32 +286,32 @@ TORCH_API inline FunctionalTensorWrapper* unsafeGetFunctionalWrapper(
|
||||
}
|
||||
|
||||
TORCH_API bool isFunctionalTensor(const at::Tensor& tensor);
|
||||
TORCH_API bool isFunctionalTensor(const c10::optional<Tensor>& t);
|
||||
TORCH_API bool isFunctionalTensor(const std::optional<Tensor>& t);
|
||||
TORCH_API bool isFunctionalTensor(
|
||||
const c10::List<c10::optional<Tensor>>& t_list);
|
||||
const c10::List<std::optional<Tensor>>& t_list);
|
||||
TORCH_API bool isFunctionalTensor(ITensorListRef list);
|
||||
|
||||
TORCH_API Tensor to_functional_tensor(const Tensor& tensor);
|
||||
TORCH_API c10::optional<Tensor> to_functional_tensor(
|
||||
const c10::optional<Tensor>& tensor);
|
||||
TORCH_API c10::List<c10::optional<Tensor>> to_functional_tensor(
|
||||
const c10::List<c10::optional<Tensor>>& t_list);
|
||||
TORCH_API std::optional<Tensor> to_functional_tensor(
|
||||
const std::optional<Tensor>& tensor);
|
||||
TORCH_API c10::List<std::optional<Tensor>> to_functional_tensor(
|
||||
const c10::List<std::optional<Tensor>>& t_list);
|
||||
TORCH_API std::vector<Tensor> to_functional_tensor(ITensorListRef t_list);
|
||||
|
||||
TORCH_API void freeze_functional_tensor(const Tensor& tensor);
|
||||
|
||||
TORCH_API Tensor
|
||||
from_functional_tensor(const Tensor& tensor, bool assert_functional = true);
|
||||
TORCH_API c10::optional<Tensor> from_functional_tensor(
|
||||
const c10::optional<Tensor>& t,
|
||||
TORCH_API std::optional<Tensor> from_functional_tensor(
|
||||
const std::optional<Tensor>& t,
|
||||
bool assert_functional = true);
|
||||
TORCH_API c10::List<c10::optional<Tensor>> from_functional_tensor(
|
||||
const c10::List<c10::optional<Tensor>>& t_list);
|
||||
TORCH_API c10::List<std::optional<Tensor>> from_functional_tensor(
|
||||
const c10::List<std::optional<Tensor>>& t_list);
|
||||
TORCH_API std::vector<Tensor> from_functional_tensor(ITensorListRef t_list);
|
||||
|
||||
TORCH_API void sync(const at::Tensor& t);
|
||||
TORCH_API void sync(const c10::optional<Tensor>& t);
|
||||
TORCH_API void sync(const c10::List<c10::optional<Tensor>>& t_list);
|
||||
TORCH_API void sync(const std::optional<Tensor>& t);
|
||||
TORCH_API void sync(const c10::List<std::optional<Tensor>>& t_list);
|
||||
TORCH_API void sync(ITensorListRef t_list);
|
||||
|
||||
TORCH_API void replace_(const Tensor& functional_tensor, const Tensor& other);
|
||||
|
@ -125,7 +125,7 @@ namespace {
|
||||
// - when we resize to a larger size, it acts as a mutation
|
||||
// - when we resize to a smaller size, it acts as a view
|
||||
// See Note [resize_ in Functionalization] for more dtails
|
||||
static const at::Tensor & resize__functionalization(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, c10::optional<at::MemoryFormat> memory_format) {
|
||||
static const at::Tensor & resize__functionalization(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, std::optional<at::MemoryFormat> memory_format) {
|
||||
// First unwrap the tensor arguments
|
||||
at::Tensor self_;
|
||||
if (at::functionalization::impl::isFunctionalTensor(self)) {
|
||||
@ -216,7 +216,7 @@ static at::Tensor lift_fresh_functionalize_copy(const at::Tensor & self) {
|
||||
// in the local include TLS. As a result, when we redispatch here,
|
||||
// we will end up hitting PreDispatch stack first. So, we should
|
||||
// directly redispatch to the functionalize key manually.
|
||||
static auto op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::clone", "").typed<at::Tensor(const at::Tensor &, c10::optional<at::MemoryFormat>)>();
|
||||
static auto op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::clone", "").typed<at::Tensor(const at::Tensor &, std::optional<at::MemoryFormat>)>();
|
||||
return op.redispatch(c10::DispatchKeySet({c10::DispatchKey::Functionalize}), self, c10::nullopt);
|
||||
}
|
||||
|
||||
@ -225,7 +225,7 @@ static at::Tensor lift_fresh_functionalize_copy(const at::Tensor & self) {
|
||||
return at::functionalization::impl::to_functional_tensor(out);
|
||||
}
|
||||
|
||||
static bool device_opted_into_functionalization(c10::Device self_device, c10::optional<c10::Device> tgt_device) {
|
||||
static bool device_opted_into_functionalization(c10::Device self_device, std::optional<c10::Device> tgt_device) {
|
||||
// If the target device is empty, then the output tensor should be on the same device as the input
|
||||
auto real_tgt_device = tgt_device.has_value() ? tgt_device.value() : self_device;
|
||||
return real_tgt_device.type() == c10::DeviceType::XLA || real_tgt_device.type() == c10::DeviceType::Lazy;
|
||||
@ -235,12 +235,12 @@ static bool device_opted_into_functionalization(c10::Device self_device, c10::op
|
||||
// We should probably get rid of this though.
|
||||
static at::Tensor _to_copy_functionalize(
|
||||
const at::Tensor & self,
|
||||
c10::optional<at::ScalarType> dtype,
|
||||
c10::optional<at::Layout> layout,
|
||||
c10::optional<at::Device> device,
|
||||
c10::optional<bool> pin_memory,
|
||||
std::optional<at::ScalarType> dtype,
|
||||
std::optional<at::Layout> layout,
|
||||
std::optional<at::Device> device,
|
||||
std::optional<bool> pin_memory,
|
||||
bool non_blocking,
|
||||
c10::optional<at::MemoryFormat> memory_format) {
|
||||
std::optional<at::MemoryFormat> memory_format) {
|
||||
at::Tensor self_;
|
||||
if (at::functionalization::impl::isFunctionalTensor(self)) {
|
||||
// sync any pending updates
|
||||
|
@ -23,7 +23,7 @@ inline void infer_size_impl(
|
||||
ResultVec& res) {
|
||||
NumelType newsize = 1;
|
||||
// N.B. this is an index, not a sym dim!
|
||||
auto infer_dim = c10::optional<int64_t>();
|
||||
auto infer_dim = std::optional<int64_t>();
|
||||
for (int64_t dim = 0, ndim = shape.size(); dim != ndim; dim++) {
|
||||
if (shape[dim] == -1) {
|
||||
if (infer_dim) {
|
||||
|
@ -380,8 +380,8 @@ Tensor select_backward_batching_rule(const Tensor& grad, IntArrayRef input_sizes
|
||||
Tensor slice_batching_rule(
|
||||
const Tensor& self,
|
||||
int64_t dim,
|
||||
c10::optional<int64_t> start,
|
||||
c10::optional<int64_t> end,
|
||||
std::optional<int64_t> start,
|
||||
std::optional<int64_t> end,
|
||||
int64_t step) {
|
||||
auto self_physical = MultiBatchVmapTransform::logicalToPhysical(self);
|
||||
auto dim_physical = self_physical.getPhysicalDim(dim);
|
||||
@ -996,10 +996,10 @@ Tensor new_zeros_batching_rule(
|
||||
Tensor new_empty_batching_rule(
|
||||
const Tensor& self,
|
||||
IntArrayRef size,
|
||||
c10::optional<ScalarType> dtype,
|
||||
c10::optional<Layout> layout,
|
||||
c10::optional<Device> device,
|
||||
c10::optional<bool> pin_memory) {
|
||||
std::optional<ScalarType> dtype,
|
||||
std::optional<Layout> layout,
|
||||
std::optional<Device> device,
|
||||
std::optional<bool> pin_memory) {
|
||||
auto physical_view = MultiBatchVmapTransform::logicalToPhysical(self);
|
||||
auto physical_size = physical_view.getPhysicalShape(size);
|
||||
auto result = physical_view.tensor().new_empty(physical_size, TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(pin_memory));
|
||||
@ -1209,10 +1209,10 @@ TORCH_LIBRARY_IMPL(aten, Batched, m) {
|
||||
BINARY_POINTWISE(mul);
|
||||
BINARY_POINTWISE(div);
|
||||
{
|
||||
using Binop = Tensor (*)(const Tensor&, const Tensor&, c10::optional<c10::string_view>);
|
||||
using Unop = Tensor (*)(const Tensor&, const Scalar&, c10::optional<c10::string_view>);
|
||||
m.impl("div.Tensor_mode", binary_pointwise_batching_rule<Binop, at::div, c10::optional<c10::string_view>>);
|
||||
m.impl("div.Scalar_mode", unwrap_and_call<Unop, at::div, const Scalar&, c10::optional<c10::string_view>>);
|
||||
using Binop = Tensor (*)(const Tensor&, const Tensor&, std::optional<c10::string_view>);
|
||||
using Unop = Tensor (*)(const Tensor&, const Scalar&, std::optional<c10::string_view>);
|
||||
m.impl("div.Tensor_mode", binary_pointwise_batching_rule<Binop, at::div, std::optional<c10::string_view>>);
|
||||
m.impl("div.Scalar_mode", unwrap_and_call<Unop, at::div, const Scalar&, std::optional<c10::string_view>>);
|
||||
}
|
||||
|
||||
// at::pow has three out-of-place overloads
|
||||
|
@ -128,7 +128,7 @@ static void assert_names_equal(DimnameList a, DimnameList b) {
|
||||
}
|
||||
|
||||
const Tensor& propagate_names_if_present_and_nonempty(const Tensor& result,
|
||||
c10::optional<DimnameList> maybe_names,
|
||||
std::optional<DimnameList> maybe_names,
|
||||
bool validate_names) {
|
||||
auto maybe_name_list = maybe_names.value_or(at::ArrayRef<Dimname>{});
|
||||
propagate_names_if_nonempty(result.unsafeGetTensorImpl(), maybe_name_list, validate_names);
|
||||
|
@ -81,7 +81,7 @@ namespace namedinference {
|
||||
|
||||
const Tensor& propagate_names_if_present_and_nonempty(
|
||||
const Tensor& result,
|
||||
c10::optional<DimnameList> maybe_names,
|
||||
std::optional<DimnameList> maybe_names,
|
||||
bool validate_names = false);
|
||||
// Propagates `names` to `result` if `names` is not empty.
|
||||
// `names` can be empty; see [NOTE] Writing name inference rules
|
||||
|
@ -236,7 +236,7 @@ NestedTensorImpl::NestedTensorImpl(
|
||||
set_custom_sizes_strides(c10::TensorImpl::SizesStridesPolicy::CustomSizes);
|
||||
}
|
||||
|
||||
c10::optional<int64_t> NestedTensorImpl::opt_size(int64_t d) const {
|
||||
std::optional<int64_t> NestedTensorImpl::opt_size(int64_t d) const {
|
||||
if (C10_UNLIKELY(!opt_sizes_.has_value())) {
|
||||
// Cache the metadata to avoid recomputing it each time.
|
||||
opt_sizes_ = c10::make_optional(construct_opt_sizes(nested_sizes_));
|
||||
|
@ -61,10 +61,10 @@ struct TORCH_API NestedTensorImpl : public c10::TensorImpl {
|
||||
// Returns nullopt if the ith dimension is irregular. The ith dimension
|
||||
// of a NestedTensor is regular if the unbound tensors match in
|
||||
// size at the (i-1)th dimension.
|
||||
c10::optional<int64_t> opt_size(int64_t d) const;
|
||||
std::optional<int64_t> opt_size(int64_t d) const;
|
||||
|
||||
int64_t size(int64_t d) const {
|
||||
c10::optional<int64_t> optional_size = this->opt_size(d);
|
||||
std::optional<int64_t> optional_size = this->opt_size(d);
|
||||
TORCH_CHECK(
|
||||
optional_size.has_value(),
|
||||
"Given dimension ",
|
||||
@ -171,7 +171,7 @@ struct TORCH_API NestedTensorImpl : public c10::TensorImpl {
|
||||
// Optional to allow it to be computed lazily from nested.
|
||||
// TODO: maybe we can remove this metadata since
|
||||
// we can compute it from `nested_sizes_`
|
||||
mutable c10::optional<std::vector<int64_t>> opt_sizes_;
|
||||
mutable std::optional<std::vector<int64_t>> opt_sizes_;
|
||||
|
||||
template <typename VariableVersion>
|
||||
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach_core(
|
||||
|
@ -35,7 +35,7 @@ void SavedTensorDefaultHooks::enable() {
|
||||
tls.disabled_error_message = c10::nullopt;
|
||||
}
|
||||
|
||||
const c10::optional<std::string>& SavedTensorDefaultHooks::get_disabled_error_message() {
|
||||
const std::optional<std::string>& SavedTensorDefaultHooks::get_disabled_error_message() {
|
||||
return tls.disabled_error_message;
|
||||
}
|
||||
|
||||
|
@ -21,7 +21,7 @@ struct TORCH_API SavedTensorDefaultHooksTLS {
|
||||
// disabled_error_message is nullopt IFF Saved Tensor hooks is enabled
|
||||
// We did this for efficiency (so we didn't have to keep a separate bool
|
||||
// around)
|
||||
c10::optional<std::string> disabled_error_message;
|
||||
std::optional<std::string> disabled_error_message;
|
||||
};
|
||||
|
||||
} // namespace impl
|
||||
@ -46,7 +46,7 @@ struct TORCH_API SavedTensorDefaultHooks {
|
||||
static void disable(const std::string& error_message);
|
||||
static void enable();
|
||||
static bool is_enabled();
|
||||
static const c10::optional<std::string>& get_disabled_error_message();
|
||||
static const std::optional<std::string>& get_disabled_error_message();
|
||||
};
|
||||
|
||||
} // namespace at
|
||||
|
@ -23,7 +23,7 @@ Tensor& scalar_fill(Tensor& self, const Scalar& value) {
|
||||
return self;
|
||||
}
|
||||
|
||||
Tensor scalar_tensor_static(const Scalar& s, c10::optional<ScalarType> dtype_opt, c10::optional<Device> device_opt) {
|
||||
Tensor scalar_tensor_static(const Scalar& s, std::optional<ScalarType> dtype_opt, c10::optional<Device> device_opt) {
|
||||
at::tracer::impl::NoTracerDispatchMode tracer_guard;
|
||||
at::AutoDispatchBelowAutograd mode;
|
||||
Tensor result = at::detail::empty_cpu(
|
||||
|
@ -18,8 +18,8 @@ namespace at::detail {
|
||||
Tensor& scalar_fill(Tensor& self, const Scalar& value);
|
||||
TORCH_API Tensor scalar_tensor_static(
|
||||
const Scalar& s,
|
||||
c10::optional<ScalarType> dtype_opt,
|
||||
c10::optional<Device> device_opt);
|
||||
std::optional<ScalarType> dtype_opt,
|
||||
std::optional<Device> device_opt);
|
||||
} // namespace at::detail
|
||||
|
||||
// This is in the c10 namespace because we use ADL to find the functions in it.
|
||||
|
@ -39,9 +39,9 @@ TORCH_API extern const EllipsisIndexType Ellipsis;
|
||||
struct TORCH_API Slice final {
|
||||
public:
|
||||
Slice(
|
||||
c10::optional<c10::SymInt> start_index = c10::nullopt,
|
||||
c10::optional<c10::SymInt> stop_index = c10::nullopt,
|
||||
c10::optional<c10::SymInt> step_index = c10::nullopt) {
|
||||
std::optional<c10::SymInt> start_index = c10::nullopt,
|
||||
std::optional<c10::SymInt> stop_index = c10::nullopt,
|
||||
std::optional<c10::SymInt> step_index = c10::nullopt) {
|
||||
if (!step_index.has_value()) {
|
||||
step_ = c10::SymInt(1);
|
||||
} else {
|
||||
@ -205,7 +205,7 @@ static inline Tensor applySlice(
|
||||
c10::SymInt step,
|
||||
bool disable_slice_optimization,
|
||||
const at::Device& self_device,
|
||||
const c10::optional<SymIntArrayRef>& self_sizes) {
|
||||
const std::optional<SymIntArrayRef>& self_sizes) {
|
||||
// TODO: implement negative step
|
||||
TORCH_CHECK_VALUE(step > 0, "step must be greater than zero");
|
||||
|
||||
@ -233,7 +233,7 @@ static inline Tensor applySelect(
|
||||
SymInt index,
|
||||
int64_t real_dim,
|
||||
const at::Device& /*self_device*/,
|
||||
const c10::optional<SymIntArrayRef>& self_sizes) {
|
||||
const std::optional<SymIntArrayRef>& self_sizes) {
|
||||
// See NOTE [nested tensor size for indexing]
|
||||
if (self_sizes.has_value()) {
|
||||
auto maybe_index = index.maybe_as_int();
|
||||
@ -431,7 +431,7 @@ static inline Tensor handleDimInMultiDimIndexing(
|
||||
std::vector<Tensor>& outIndices,
|
||||
bool disable_slice_optimization,
|
||||
const at::Device& original_tensor_device,
|
||||
const c10::optional<SymIntArrayRef>& prev_dim_result_sizes) {
|
||||
const std::optional<SymIntArrayRef>& prev_dim_result_sizes) {
|
||||
if (index.is_integer()) {
|
||||
return impl::applySelect(
|
||||
prev_dim_result,
|
||||
@ -515,7 +515,7 @@ static inline Tensor applySlicing(
|
||||
std::vector<Tensor>& outIndices,
|
||||
bool disable_slice_optimization,
|
||||
const at::Device& self_device,
|
||||
const c10::optional<SymIntArrayRef>& self_sizes) {
|
||||
const std::optional<SymIntArrayRef>& self_sizes) {
|
||||
int64_t dim = 0;
|
||||
int64_t specified_dims = impl::count_specified_dimensions(indices);
|
||||
|
||||
@ -531,9 +531,9 @@ static inline Tensor applySlicing(
|
||||
for (const auto i : c10::irange(indices.size())) {
|
||||
auto& obj = indices[i];
|
||||
// See NOTE [nested tensor size for indexing]
|
||||
c10::optional<SymIntArrayRef> result_sizes = result.is_nested()
|
||||
? c10::optional<SymIntArrayRef>(c10::nullopt)
|
||||
: c10::optional<SymIntArrayRef>(result.sym_sizes());
|
||||
std::optional<SymIntArrayRef> result_sizes = result.is_nested()
|
||||
? std::optional<SymIntArrayRef>(c10::nullopt)
|
||||
: std::optional<SymIntArrayRef>(result.sym_sizes());
|
||||
result = handleDimInMultiDimIndexing(
|
||||
/*prev_dim_result=*/result,
|
||||
/*original_tensor=*/self,
|
||||
@ -607,9 +607,9 @@ static inline Tensor get_item(
|
||||
// nested tensor does not have a size (yet) so for now we represent its size
|
||||
// as null may need to be changed after we reach a better solution for nested
|
||||
// tensor size
|
||||
c10::optional<SymIntArrayRef> self_sizes = self.is_nested()
|
||||
? c10::optional<SymIntArrayRef>(c10::nullopt)
|
||||
: c10::optional<SymIntArrayRef>(self.sym_sizes());
|
||||
std::optional<SymIntArrayRef> self_sizes = self.is_nested()
|
||||
? std::optional<SymIntArrayRef>(c10::nullopt)
|
||||
: std::optional<SymIntArrayRef>(self.sym_sizes());
|
||||
|
||||
// handle simple types: integers, slices, none, ellipsis, bool
|
||||
if (indices.size() == 1) {
|
||||
|
@ -147,7 +147,7 @@ struct TORCH_API OperandInfo {
|
||||
/// promotion target_dtype value can become different from tensor's dtype
|
||||
/// also, during type promotion target_dtype and device can be set for an
|
||||
/// undefined tensor so that tensor can be properly constructed later.
|
||||
c10::optional<Device> device = c10::nullopt;
|
||||
std::optional<Device> device = c10::nullopt;
|
||||
ScalarType target_dtype = ScalarType::Undefined;
|
||||
// Caches dtype of the tensor, because scalar_type is an expensive operation
|
||||
// If dtype of the tensor is changed (e.g. as a result of type promotion or in
|
||||
@ -971,9 +971,9 @@ class TORCH_API TensorIteratorConfig final {
|
||||
int num_outputs_ = 0;
|
||||
int num_inputs_ = 0;
|
||||
|
||||
c10::optional<DimVector> static_shape_ = c10::nullopt;
|
||||
c10::optional<ScalarType> static_dtype_ = c10::nullopt;
|
||||
c10::optional<Device> static_device_ = c10::nullopt;
|
||||
std::optional<DimVector> static_shape_ = c10::nullopt;
|
||||
std::optional<ScalarType> static_dtype_ = c10::nullopt;
|
||||
std::optional<Device> static_device_ = c10::nullopt;
|
||||
bool check_mem_overlap_ = true;
|
||||
bool allow_cpu_scalars_ = false;
|
||||
bool is_reduction_ = false;
|
||||
|
@ -61,7 +61,7 @@ inline bool areAnyTensorSubclassLike(TensorList tensors) {
|
||||
}
|
||||
|
||||
inline bool areAnyOptionalTensorSubclassLike(
|
||||
const c10::List<c10::optional<Tensor>>& tensors) {
|
||||
const c10::List<std::optional<Tensor>>& tensors) {
|
||||
if (c10::impl::dispatch_mode_enabled())
|
||||
return true;
|
||||
return std::any_of(
|
||||
|
@ -327,7 +327,7 @@ std::vector<int64_t> defaultStrides(IntArrayRef sizes) {
|
||||
// see overloads of computeStride() below.
|
||||
//
|
||||
template <typename ResultVec, typename NewShapeVec, typename Numel>
|
||||
inline c10::optional<ResultVec> computeStride_impl(
|
||||
inline std::optional<ResultVec> computeStride_impl(
|
||||
const NewShapeVec& oldshape,
|
||||
const NewShapeVec& oldstride,
|
||||
const NewShapeVec& newshape,
|
||||
@ -395,7 +395,7 @@ inline c10::optional<ResultVec> computeStride_impl(
|
||||
return newstride;
|
||||
}
|
||||
|
||||
c10::optional<std::vector<int64_t>> computeStride(
|
||||
std::optional<std::vector<int64_t>> computeStride(
|
||||
IntArrayRef oldshape,
|
||||
IntArrayRef oldstride,
|
||||
IntArrayRef newshape) {
|
||||
@ -403,7 +403,7 @@ c10::optional<std::vector<int64_t>> computeStride(
|
||||
return computeStride_impl<std::vector<int64_t>, IntArrayRef, int64_t>(oldshape, oldstride, newshape, toResult);
|
||||
}
|
||||
|
||||
c10::optional<SymDimVector> computeStride(
|
||||
std::optional<SymDimVector> computeStride(
|
||||
c10::SymIntArrayRef oldshape,
|
||||
c10::SymIntArrayRef oldstride,
|
||||
c10::SymIntArrayRef newshape) {
|
||||
@ -411,7 +411,7 @@ c10::optional<SymDimVector> computeStride(
|
||||
return computeStride_impl<SymDimVector, c10::SymIntArrayRef, c10::SymInt>(oldshape, oldstride, newshape, toResult);
|
||||
}
|
||||
|
||||
c10::optional<DimVector> computeStride(
|
||||
std::optional<DimVector> computeStride(
|
||||
IntArrayRef oldshape,
|
||||
IntArrayRef oldstride,
|
||||
const DimVector& newshape) {
|
||||
|
@ -171,17 +171,17 @@ TORCH_API void check_dim_size(
|
||||
namespace detail {
|
||||
TORCH_API std::vector<int64_t> defaultStrides(IntArrayRef sizes);
|
||||
|
||||
TORCH_API c10::optional<std::vector<int64_t>> computeStride(
|
||||
TORCH_API std::optional<std::vector<int64_t>> computeStride(
|
||||
IntArrayRef oldshape,
|
||||
IntArrayRef oldstride,
|
||||
IntArrayRef newshape);
|
||||
|
||||
TORCH_API c10::optional<SymDimVector> computeStride(
|
||||
TORCH_API std::optional<SymDimVector> computeStride(
|
||||
c10::SymIntArrayRef oldshape,
|
||||
c10::SymIntArrayRef oldstride,
|
||||
c10::SymIntArrayRef newshape);
|
||||
|
||||
TORCH_API c10::optional<DimVector> computeStride(
|
||||
TORCH_API std::optional<DimVector> computeStride(
|
||||
IntArrayRef oldshape,
|
||||
IntArrayRef oldstride,
|
||||
const DimVector& newshape);
|
||||
|
@ -39,7 +39,7 @@ TORCH_LIBRARY_IMPL(aten, VmapMode, m) {
|
||||
// CppFunction::makeNamedNotSupported() to avoid listing out the types of everything.
|
||||
// However, registering e.g. CppFunction::makeNamedNotSupported() as an implementation
|
||||
// only works for operators that support boxing.
|
||||
#define TENSOROPTIONS c10::optional<c10::ScalarType>, c10::optional<c10::Layout>, c10::optional<c10::Device>, c10::optional<bool>
|
||||
#define TENSOROPTIONS std::optional<c10::ScalarType>, c10::optional<c10::Layout>, c10::optional<c10::Device>, c10::optional<bool>
|
||||
|
||||
// random operations (out-of-place)
|
||||
m.impl("bernoulli", unsupportedRandomOp<const Tensor&, optional<Generator>>);
|
||||
|
@ -16,7 +16,7 @@ namespace at {
|
||||
const auto num_arguments = arguments.size();
|
||||
const auto stack_start = stack->size() - num_arguments;
|
||||
|
||||
c10::optional<bool> is_write;
|
||||
std::optional<bool> is_write;
|
||||
for (const auto i : c10::irange(num_arguments)) {
|
||||
const auto& alias_info = arguments[i].alias_info();
|
||||
if (alias_info != nullptr) {
|
||||
|
@ -144,7 +144,7 @@ Tensor cached_cast(at::ScalarType to_type, const Tensor& arg, DeviceType device_
|
||||
Banned functions
|
||||
*******************************/
|
||||
|
||||
static Tensor binary_cross_entropy_banned(const Tensor &, const Tensor &, const c10::optional<Tensor>&, int64_t) {
|
||||
static Tensor binary_cross_entropy_banned(const Tensor &, const Tensor &, const std::optional<Tensor>&, int64_t) {
|
||||
AT_ERROR("torch.nn.functional.binary_cross_entropy and torch.nn.BCELoss are unsafe to autocast.\n"
|
||||
"Many models use a sigmoid layer right before the binary cross entropy layer.\n"
|
||||
"In this case, combine the two layers using torch.nn.functional.binary_cross_entropy_with_logits\n"
|
||||
|
@ -297,9 +297,9 @@ TORCH_API Tensor cached_cast(
|
||||
c10::DeviceType device_type = c10::DeviceType::CUDA);
|
||||
|
||||
// Overload to process optional<Tensor>
|
||||
inline c10::optional<Tensor> cached_cast(
|
||||
inline std::optional<Tensor> cached_cast(
|
||||
at::ScalarType to_type,
|
||||
const c10::optional<Tensor>& arg,
|
||||
const std::optional<Tensor>& arg,
|
||||
c10::DeviceType device_type = c10::DeviceType::CUDA) {
|
||||
if (arg.has_value()) {
|
||||
return cached_cast(to_type, *arg, device_type);
|
||||
@ -353,9 +353,9 @@ Otherwise, set it to the autocast type.
|
||||
********************************************************/
|
||||
|
||||
// Overload to catch dtype flags
|
||||
c10::optional<ScalarType> inline set_opt_dtype(
|
||||
std::optional<ScalarType> inline set_opt_dtype(
|
||||
at::ScalarType to_type,
|
||||
const c10::optional<ScalarType>& dtype) {
|
||||
const std::optional<ScalarType>& dtype) {
|
||||
return dtype.has_value() ? dtype : to_type;
|
||||
}
|
||||
|
||||
@ -392,7 +392,7 @@ enum class CastPolicy : uint8_t {
|
||||
fp32, // Cast all inputs to at::kFloat before running the op.
|
||||
fp32_set_opt_dtype, // Treats functions (like softmax) that
|
||||
// 1. we'd like to run in fp32 and
|
||||
// 2. have a c10::optional<ScalarType> arg that controls
|
||||
// 2. have a std::optional<ScalarType> arg that controls
|
||||
// the output type.
|
||||
// fp32_set_opt_dtype wrappers' policy is: if the output
|
||||
// type is already set, don't touch it, otherwise, set
|
||||
@ -865,24 +865,24 @@ copy pasted in from VariableTypeEverything.cpp with appropriate substitutions.
|
||||
_(ADD_NS(norm), \
|
||||
"norm.Scalar", \
|
||||
Tensor(const Tensor&, const Scalar&), \
|
||||
Tensor(const Tensor&, const c10::optional<Scalar>&, ScalarType), \
|
||||
Tensor(const Tensor&, const std::optional<Scalar>&, ScalarType), \
|
||||
fp32_append_dtype) \
|
||||
_(ADD_NS(norm), \
|
||||
"norm.ScalarOpt_dim", \
|
||||
Tensor(const Tensor&, const c10::optional<Scalar>&, IntArrayRef, bool), \
|
||||
Tensor(const Tensor&, const std::optional<Scalar>&, IntArrayRef, bool), \
|
||||
Tensor( \
|
||||
const Tensor&, \
|
||||
const c10::optional<Scalar>&, \
|
||||
const std::optional<Scalar>&, \
|
||||
IntArrayRef, \
|
||||
bool, \
|
||||
ScalarType), \
|
||||
fp32_append_dtype) \
|
||||
_(ADD_NS(norm), \
|
||||
"norm.names_ScalarOpt_dim", \
|
||||
Tensor(const Tensor&, const c10::optional<Scalar>&, DimnameList, bool), \
|
||||
Tensor(const Tensor&, const std::optional<Scalar>&, DimnameList, bool), \
|
||||
Tensor( \
|
||||
const Tensor&, \
|
||||
const c10::optional<Scalar>&, \
|
||||
const std::optional<Scalar>&, \
|
||||
DimnameList, \
|
||||
bool, \
|
||||
ScalarType), \
|
||||
|
@ -152,7 +152,7 @@ struct CachingHostAllocatorImpl {
|
||||
// do not need to look up the ctx in blocks_.
|
||||
auto* block = reinterpret_cast<B*>(ctx);
|
||||
|
||||
c10::optional<std::vector<E>> events;
|
||||
std::optional<std::vector<E>> events;
|
||||
{
|
||||
std::lock_guard<std::mutex> g(block->mutex_);
|
||||
block->allocated_ = false;
|
||||
@ -263,7 +263,7 @@ struct CachingHostAllocatorImpl {
|
||||
// Avoid calling cudaEventDestroy while holding a mutex, so move
|
||||
// intermediate events out of the lock into this object.
|
||||
// process the last event
|
||||
c10::optional<std::pair<E, B*>> processed;
|
||||
std::optional<std::pair<E, B*>> processed;
|
||||
{
|
||||
std::lock_guard<std::mutex> g(events_mutex_);
|
||||
if (!events_.empty()) {
|
||||
@ -324,7 +324,7 @@ struct CachingHostAllocatorImpl {
|
||||
}
|
||||
|
||||
// Record an event on stream and store event into events.
|
||||
virtual void record_stream(c10::optional<std::vector<E>>& events, S stream) {
|
||||
virtual void record_stream(std::optional<std::vector<E>>& events, S stream) {
|
||||
TORCH_CHECK_NOT_IMPLEMENTED(false, "Not implemented for record_stream");
|
||||
}
|
||||
|
||||
|
@ -2,10 +2,10 @@
|
||||
|
||||
namespace c10::impl {
|
||||
|
||||
inline c10::optional<MemoryFormat>
|
||||
inline std::optional<MemoryFormat>
|
||||
check_tensor_options_and_extract_memory_format(
|
||||
const TensorOptions& options,
|
||||
c10::optional<MemoryFormat> memory_format) {
|
||||
std::optional<MemoryFormat> memory_format) {
|
||||
TORCH_CHECK(
|
||||
options.requires_grad_opt() == c10::nullopt ||
|
||||
options.requires_grad_opt().value() == false,
|
||||
|
@ -14,7 +14,7 @@ Storage DeprecatedTypeProperties::unsafeStorageFromTH(void * th_pointer, bool re
|
||||
return at::unsafeStorageFromTH(th_pointer, retain);
|
||||
}
|
||||
|
||||
Tensor DeprecatedTypeProperties::copy(const Tensor & src, bool non_blocking, c10::optional<Device> to_device) const {
|
||||
Tensor DeprecatedTypeProperties::copy(const Tensor & src, bool non_blocking, std::optional<Device> to_device) const {
|
||||
if (to_device) {
|
||||
return src.to(src.options().dtype(scalarType()).device(to_device), non_blocking, /*copy=*/true);
|
||||
}
|
||||
|
@ -107,7 +107,7 @@ class TORCH_API DeprecatedTypeProperties {
|
||||
|
||||
/// Constructs the `TensorOptions` from a type and a Device. Asserts that
|
||||
/// the device type matches the device type of the type.
|
||||
TensorOptions options(c10::optional<Device> device_opt) const {
|
||||
TensorOptions options(std::optional<Device> device_opt) const {
|
||||
if (!device_opt.has_value()) {
|
||||
return options(-1);
|
||||
} else {
|
||||
@ -129,7 +129,7 @@ class TORCH_API DeprecatedTypeProperties {
|
||||
|
||||
Tensor unsafeTensorFromTH(void * th_pointer, bool retain) const;
|
||||
Storage unsafeStorageFromTH(void * th_pointer, bool retain) const;
|
||||
Tensor copy(const Tensor & src, bool non_blocking=false, c10::optional<Device> to_device={}) const;
|
||||
Tensor copy(const Tensor & src, bool non_blocking=false, std::optional<Device> to_device={}) const;
|
||||
|
||||
private:
|
||||
Backend backend_;
|
||||
|
@ -21,7 +21,7 @@ struct TORCH_API Dimname {
|
||||
bool isWildcard() const { return type_ == NameType::WILDCARD; }
|
||||
|
||||
bool matches(Dimname other) const;
|
||||
c10::optional<Dimname> unify(Dimname other) const;
|
||||
std::optional<Dimname> unify(Dimname other) const;
|
||||
|
||||
private:
|
||||
Dimname(Symbol name)
|
||||
|
@ -144,7 +144,7 @@ template <typename RNG, typename ret_type,
|
||||
C10_HOST_DEVICE inline bool maybe_get_next_##TYPE##_normal_sample(RNG* generator, ret_type* ret) { \
|
||||
if (generator->next_##TYPE##_normal_sample()) { \
|
||||
*ret = *(generator->next_##TYPE##_normal_sample()); \
|
||||
generator->set_next_##TYPE##_normal_sample(c10::optional<TYPE>()); \
|
||||
generator->set_next_##TYPE##_normal_sample(std::optional<TYPE>()); \
|
||||
return true; \
|
||||
} \
|
||||
return false; \
|
||||
|
@ -150,7 +150,7 @@ Generator make_generator(Args&&... args) {
|
||||
* the backend generator type (CPU/CUDAGeneratorImpl etc.)
|
||||
*/
|
||||
template <typename T>
|
||||
static inline T * check_generator(c10::optional<Generator> gen) {
|
||||
static inline T * check_generator(std::optional<Generator> gen) {
|
||||
TORCH_CHECK(gen.has_value(), "Expected Generator but received nullopt");
|
||||
TORCH_CHECK(gen->defined(), "Generator with undefined implementation is not allowed");
|
||||
TORCH_CHECK(T::device_type() == gen->device().type(), "Expected a '", T::device_type(), "' device type for generator but found '", gen->device().type(), "'");
|
||||
@ -164,7 +164,7 @@ static inline T * check_generator(c10::optional<Generator> gen) {
|
||||
* the backend generator type (CPU/CUDAGeneratorImpl etc.)
|
||||
*/
|
||||
template <typename T>
|
||||
static inline T* get_generator_or_default(const c10::optional<Generator>& gen, const Generator& default_gen) {
|
||||
static inline T* get_generator_or_default(const std::optional<Generator>& gen, const Generator& default_gen) {
|
||||
return gen.has_value() && gen->defined() ? check_generator<T>(gen) : check_generator<T>(default_gen);
|
||||
}
|
||||
|
||||
|
@ -5,8 +5,8 @@ namespace at {
|
||||
|
||||
static std::mutex _generator_mutex_lock;
|
||||
|
||||
c10::optional<GeneratorFuncType>& GetGeneratorPrivate() {
|
||||
static c10::optional<GeneratorFuncType> generator_privateuse1 = c10::nullopt;
|
||||
std::optional<GeneratorFuncType>& GetGeneratorPrivate() {
|
||||
static std::optional<GeneratorFuncType> generator_privateuse1 = c10::nullopt;
|
||||
return generator_privateuse1;
|
||||
}
|
||||
|
||||
|
@ -7,7 +7,7 @@ namespace at {
|
||||
|
||||
using GeneratorFuncType = std::function<at::Generator(c10::DeviceIndex)>;
|
||||
|
||||
c10::optional<GeneratorFuncType>& GetGeneratorPrivate();
|
||||
std::optional<GeneratorFuncType>& GetGeneratorPrivate();
|
||||
|
||||
class TORCH_API _GeneratorRegister {
|
||||
public:
|
||||
|
@ -58,10 +58,10 @@ struct ListElementConstReferenceTraits {
|
||||
using const_reference = typename c10::detail::ivalue_to_const_ref_overload_return<T>::type;
|
||||
};
|
||||
|
||||
// There is no to() overload for c10::optional<std::string>.
|
||||
// There is no to() overload for std::optional<std::string>.
|
||||
template<>
|
||||
struct ListElementConstReferenceTraits<c10::optional<std::string>> {
|
||||
using const_reference = c10::optional<std::reference_wrapper<const std::string>>;
|
||||
struct ListElementConstReferenceTraits<std::optional<std::string>> {
|
||||
using const_reference = std::optional<std::reference_wrapper<const std::string>>;
|
||||
};
|
||||
|
||||
template<class T, class Iterator>
|
||||
|
@ -168,8 +168,8 @@ list_element_to_const_ref(const IValue& element) {
|
||||
}
|
||||
|
||||
template<>
|
||||
inline typename ListElementConstReferenceTraits<c10::optional<std::string>>::const_reference
|
||||
list_element_to_const_ref<c10::optional<std::string>>(const IValue& element) {
|
||||
inline typename ListElementConstReferenceTraits<std::optional<std::string>>::const_reference
|
||||
list_element_to_const_ref<std::optional<std::string>>(const IValue& element) {
|
||||
return element.toOptionalStringRef();
|
||||
}
|
||||
|
||||
|
@ -1127,13 +1127,13 @@ TEST(ListTest, canAccessStringByReference) {
|
||||
}
|
||||
|
||||
TEST(ListTest, canAccessOptionalStringByReference) {
|
||||
List<c10::optional<std::string>> list({"one", "two", c10::nullopt});
|
||||
List<std::optional<std::string>> list({"one", "two", c10::nullopt});
|
||||
const auto& listRef = list;
|
||||
static_assert(
|
||||
std::is_same_v<decltype(listRef[1]), c10::optional<std::reference_wrapper<const std::string>>>,
|
||||
"List<c10::optional<std::string>> access should be by const reference");
|
||||
c10::optional<std::string> str1 = list[1];
|
||||
c10::optional<std::string> str2 = list[2];
|
||||
std::is_same_v<decltype(listRef[1]), std::optional<std::reference_wrapper<const std::string>>>,
|
||||
"List<std::optional<std::string>> access should be by const reference");
|
||||
std::optional<std::string> str1 = list[1];
|
||||
std::optional<std::string> str2 = list[2];
|
||||
decltype(auto) strRef1 = listRef[1];
|
||||
decltype(auto) strRef2 = listRef[2];
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
|
@ -100,7 +100,7 @@ void check_names_valid_for(const TensorBase& tensor, DimnameList names);
|
||||
void check_names_valid_for(size_t tensor_dim, DimnameList names);
|
||||
|
||||
// Sets the names of `tensor` to be `names`.
|
||||
TORCH_API const TensorBase& internal_set_names_inplace(const TensorBase& tensor, c10::optional<DimnameList> names);
|
||||
TORCH_API const TensorBase& internal_set_names_inplace(const TensorBase& tensor, std::optional<DimnameList> names);
|
||||
TORCH_API const TensorBase& internal_set_names_inplace(const TensorBase& tensor, std::vector<Dimname>&& names, bool validate_names);
|
||||
|
||||
constexpr size_t kMaxNamedTensorDim = 64;
|
||||
@ -111,7 +111,7 @@ namespace impl {
|
||||
|
||||
// Some helper functions on TensorImpl. Useful for working with names in TH.
|
||||
// XXX: Ideally these would exist as methods on TensorImpl
|
||||
TORCH_API void internal_set_names_inplace(TensorImpl* impl, c10::optional<DimnameList> names, bool validate_names);
|
||||
TORCH_API void internal_set_names_inplace(TensorImpl* impl, std::optional<DimnameList> names, bool validate_names);
|
||||
TORCH_API void internal_set_names_inplace(TensorImpl* impl, std::vector<Dimname>&& names, bool validate_names);
|
||||
|
||||
void check_names_valid_for(TensorImpl* impl, DimnameList names);
|
||||
@ -132,7 +132,7 @@ TORCH_API DimnameList get_names(const TensorImpl* impl);
|
||||
// Returns the names of the tensor if they have been allocated; returns nullopt
|
||||
// instead if the haven't been. The names of a tensor are not allocated if a
|
||||
// tensor is constructed with names=None.
|
||||
TORCH_API c10::optional<DimnameList> get_opt_names(const TensorImpl* impl);
|
||||
TORCH_API std::optional<DimnameList> get_opt_names(const TensorImpl* impl);
|
||||
|
||||
} // namespace impl
|
||||
|
||||
|
@ -7,7 +7,7 @@ namespace c10 {
|
||||
namespace {
|
||||
bool _eq(const char* op, c10::SymNodeImpl* lhs, c10::SymNodeImpl* rhs) {
|
||||
TORCH_INTERNAL_ASSERT(lhs->is_nested_int());
|
||||
c10::optional<int64_t> c = rhs->nested_int();
|
||||
std::optional<int64_t> c = rhs->nested_int();
|
||||
return (
|
||||
c.has_value() && lhs->nested_int() == *c &&
|
||||
lhs->nested_int_coeff() == rhs->nested_int_coeff());
|
||||
@ -68,7 +68,7 @@ c10::SymNode NestedIntSymNodeImpl::le(const c10::SymNode& other) {
|
||||
|
||||
c10::SymNode NestedIntSymNodeImpl::mul(const c10::SymNode& other) {
|
||||
TORCH_CHECK(!other->nested_int(), "nested int cannot be multiplied by nested int");
|
||||
c10::optional<int64_t> c = other->constant_int();
|
||||
std::optional<int64_t> c = other->constant_int();
|
||||
TORCH_CHECK(c.has_value());
|
||||
return SymNode(c10::make_intrusive<NestedIntSymNodeImpl>(val_, coeff_ * *c));
|
||||
}
|
||||
|
@ -134,11 +134,11 @@ class TORCH_API NestedIntSymNodeImpl : public SymNodeImpl {
|
||||
c10::SymNode le(const c10::SymNode& other) override;
|
||||
c10::SymNode mul(const c10::SymNode& other) override;
|
||||
|
||||
c10::optional<int64_t> nested_int() override {
|
||||
std::optional<int64_t> nested_int() override {
|
||||
return val_;
|
||||
}
|
||||
|
||||
c10::optional<int64_t> nested_int_coeff() override {
|
||||
std::optional<int64_t> nested_int_coeff() override {
|
||||
return coeff_;
|
||||
}
|
||||
|
||||
|
@ -14,7 +14,7 @@ namespace {
|
||||
// To achieve this, we ensure that the tls is empty by default and emptied again both when
|
||||
// we call into user torch_dispatch or returning back to python after this call.
|
||||
|
||||
thread_local c10::optional<c10::impl::LocalDispatchKeySet> tls_on_entry;
|
||||
thread_local std::optional<c10::impl::LocalDispatchKeySet> tls_on_entry;
|
||||
|
||||
c10::impl::LocalDispatchKeySet safe_get_tls_on_entry() {
|
||||
TORCH_CHECK(tls_on_entry.has_value(), "Accessing torch dispatch state outside of '__torch_dispatch__' "
|
||||
|
@ -42,7 +42,7 @@ TensorBase TensorBase::to(
|
||||
at::TensorOptions options,
|
||||
bool non_blocking,
|
||||
bool copy,
|
||||
c10::optional<at::MemoryFormat> memory_format) const {
|
||||
std::optional<at::MemoryFormat> memory_format) const {
|
||||
Tensor self(*this);
|
||||
return at::_ops::to_dtype_layout::call(
|
||||
self, optTypeMetaToScalarType(options.dtype_opt()),
|
||||
@ -134,8 +134,8 @@ bool TensorBase::retains_grad() const {
|
||||
}
|
||||
|
||||
void Tensor::_backward(TensorList inputs,
|
||||
const c10::optional<Tensor>& gradient,
|
||||
c10::optional<bool> keep_graph,
|
||||
const std::optional<Tensor>& gradient,
|
||||
std::optional<bool> keep_graph,
|
||||
bool create_graph) const {
|
||||
return impl::GetVariableHooks()->_backward(*this, inputs, gradient, keep_graph, create_graph);
|
||||
}
|
||||
|
@ -147,7 +147,7 @@ class TORCH_API TensorBase {
|
||||
const TensorBase& fill_(const c10::Scalar& scalar) const;
|
||||
const TensorBase& zero_() const;
|
||||
|
||||
TensorBase to(at::TensorOptions options={}, bool non_blocking=false, bool copy=false, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) const;
|
||||
TensorBase to(at::TensorOptions options={}, bool non_blocking=false, bool copy=false, std::optional<at::MemoryFormat> memory_format=c10::nullopt) const;
|
||||
|
||||
bool is_complex() const {
|
||||
return at::isComplexType(this->scalar_type());
|
||||
@ -249,7 +249,7 @@ class TORCH_API TensorBase {
|
||||
return impl_->strides();
|
||||
}
|
||||
// See impl::get_opt_names in ATen/NamedTensor.h for docs.
|
||||
c10::optional<DimnameList> opt_names() const {
|
||||
std::optional<DimnameList> opt_names() const {
|
||||
return impl::get_opt_names(unsafeGetTensorImpl());
|
||||
}
|
||||
// See impl::get_names in ATen/NamedTensor.h for docs.
|
||||
@ -712,7 +712,7 @@ class TORCH_API TensorBase {
|
||||
/// // f requires grad, has no operation creating it
|
||||
/// @endcode
|
||||
|
||||
/// \fn void backward(const Tensor & gradient={}, c10::optional<bool> retain_graph=c10::nullopt, bool create_graph=false, c10::optional<TensorList> inputs=c10::nullopt) const;
|
||||
/// \fn void backward(const Tensor & gradient={}, std::optional<bool> retain_graph=c10::nullopt, bool create_graph=false, c10::optional<TensorList> inputs=c10::nullopt) const;
|
||||
///
|
||||
/// Computes the gradient of current tensor with respect to graph leaves.
|
||||
///
|
||||
@ -1010,7 +1010,7 @@ struct ExclusivelyOwnedTraits<at::TensorBase> : public c10::ExclusivelyOwnedTens
|
||||
namespace at {
|
||||
|
||||
inline c10::MaybeOwned<TensorBase> borrow_from_optional_tensor(
|
||||
const c10::optional<TensorBase>& opt) {
|
||||
const std::optional<TensorBase>& opt) {
|
||||
return opt.has_value()
|
||||
? c10::MaybeOwned<TensorBase>::borrowed(*opt)
|
||||
: c10::MaybeOwned<TensorBase>::owned(std::in_place);
|
||||
|
@ -17,7 +17,7 @@ bool tensorlist_has_dispatch(at::ITensorListRef li) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool tensorlist_has_dispatch(const c10::List<c10::optional<at::Tensor>>& li) {
|
||||
bool tensorlist_has_dispatch(const c10::List<std::optional<at::Tensor>>& li) {
|
||||
for (auto i : c10::irange(li.size())) {
|
||||
auto t = li.get(i);
|
||||
if (t && tensor_has_dispatch(*t)) {
|
||||
|
@ -10,7 +10,7 @@ namespace at::impl {
|
||||
|
||||
TORCH_API bool tensor_has_dispatch(const at::Tensor& t);
|
||||
TORCH_API bool tensorlist_has_dispatch(at::ITensorListRef li);
|
||||
TORCH_API bool tensorlist_has_dispatch(const c10::List<c10::optional<at::Tensor>>& li);
|
||||
TORCH_API bool tensorlist_has_dispatch(const c10::List<std::optional<at::Tensor>>& li);
|
||||
using c10::impl::dispatch_mode_enabled;
|
||||
|
||||
}
|
||||
|
@ -60,8 +60,8 @@ struct TORCH_API VariableHooksInterface {
|
||||
virtual void _backward(
|
||||
const Tensor&,
|
||||
TensorList,
|
||||
const c10::optional<Tensor>&,
|
||||
c10::optional<bool>,
|
||||
const std::optional<Tensor>&,
|
||||
std::optional<bool>,
|
||||
bool) const = 0;
|
||||
virtual void requires_grad_(const TensorBase&, bool) const = 0;
|
||||
virtual void basic_autograd_not_implemented_fallback(
|
||||
|
@ -22,7 +22,7 @@ using has_symint =
|
||||
std::is_same<c10::SymInt, T>,
|
||||
std::is_same<c10::SymIntArrayRef, T>,
|
||||
std::is_same<at::OptionalSymIntArrayRef, T>,
|
||||
std::is_same<c10::optional<c10::SymInt>, T>
|
||||
std::is_same<std::optional<c10::SymInt>, T>
|
||||
>;
|
||||
|
||||
template <typename T>
|
||||
@ -46,8 +46,8 @@ struct remove_symint<c10::SymIntArrayRef> {
|
||||
};
|
||||
|
||||
template <>
|
||||
struct remove_symint<c10::optional<c10::SymInt>> {
|
||||
using type = c10::optional<int64_t>;
|
||||
struct remove_symint<std::optional<c10::SymInt>> {
|
||||
using type = std::optional<int64_t>;
|
||||
};
|
||||
|
||||
|
||||
|
@ -71,7 +71,7 @@ inline typename remove_symint<c10::SymIntArrayRef>::type unpackSymInt(c10::SymIn
|
||||
}
|
||||
|
||||
template <>
|
||||
inline typename remove_symint<c10::optional<c10::SymInt>>::type unpackSymInt(c10::optional<c10::SymInt> x) {
|
||||
inline typename remove_symint<std::optional<c10::SymInt>>::type unpackSymInt(c10::optional<c10::SymInt> x) {
|
||||
return x.has_value() ? c10::make_optional(x->guard_int(__FILE__, __LINE__)) : c10::nullopt;
|
||||
}
|
||||
|
||||
|
@ -6,7 +6,7 @@
|
||||
|
||||
using std::vector;
|
||||
using std::tuple;
|
||||
using c10::optional;
|
||||
using std::optional;
|
||||
using c10::IValue;
|
||||
using c10::OperatorKernel;
|
||||
using c10::OperatorHandle;
|
||||
|
@ -207,15 +207,15 @@ TEST(OperatorRegistrationTestLegacyFunctionBasedKernel, givenKernelWithIntListOu
|
||||
EXPECT_EQ(6, result[0].toIntVector()[2]);
|
||||
}
|
||||
|
||||
std::tuple<Tensor, int64_t, std::vector<Tensor>, c10::optional<int64_t>, Dict<string, Tensor>> kernelWithMultipleOutputs(Tensor) {
|
||||
std::tuple<Tensor, int64_t, std::vector<Tensor>, std::optional<int64_t>, Dict<string, Tensor>> kernelWithMultipleOutputs(Tensor) {
|
||||
Dict<string, Tensor> dict;
|
||||
dict.insert("first", dummyTensor(DispatchKey::CPU));
|
||||
dict.insert("second", dummyTensor(DispatchKey::CUDA));
|
||||
return std::tuple<Tensor, int64_t, std::vector<Tensor>, c10::optional<int64_t>, Dict<string, Tensor>>(
|
||||
return std::tuple<Tensor, int64_t, std::vector<Tensor>, std::optional<int64_t>, Dict<string, Tensor>>(
|
||||
dummyTensor(DispatchKey::CUDA),
|
||||
5,
|
||||
{dummyTensor(DispatchKey::CPU), dummyTensor(DispatchKey::CUDA)},
|
||||
c10::optional<int64_t>(std::in_place, 0),
|
||||
std::optional<int64_t>(std::in_place, 0),
|
||||
dict
|
||||
);
|
||||
}
|
||||
@ -808,11 +808,11 @@ TEST(OperatorRegistrationTestLegacyFunctionBasedKernel, givenFallbackKernelWitho
|
||||
EXPECT_EQ(4, outputs[0].toInt());
|
||||
}
|
||||
|
||||
c10::optional<Tensor> called_arg2 = c10::nullopt;
|
||||
c10::optional<int64_t> called_arg3 = c10::nullopt;
|
||||
c10::optional<std::string> called_arg4 = c10::nullopt;
|
||||
std::optional<Tensor> called_arg2 = c10::nullopt;
|
||||
std::optional<int64_t> called_arg3 = c10::nullopt;
|
||||
std::optional<std::string> called_arg4 = c10::nullopt;
|
||||
|
||||
void kernelWithOptInputWithoutOutput(Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
|
||||
void kernelWithOptInputWithoutOutput(Tensor arg1, const std::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
|
||||
called = true;
|
||||
called_arg2 = arg2;
|
||||
called_arg3 = arg3;
|
||||
@ -846,7 +846,7 @@ TEST(OperatorRegistrationTestLegacyFunctionBasedKernel, givenKernelWithOptionalI
|
||||
EXPECT_FALSE(called_arg4.has_value());
|
||||
}
|
||||
|
||||
c10::optional<Tensor> kernelWithOptInputWithOutput(Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
|
||||
std::optional<Tensor> kernelWithOptInputWithOutput(Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
|
||||
called = true;
|
||||
called_arg2 = arg2;
|
||||
called_arg3 = arg3;
|
||||
@ -883,8 +883,8 @@ TEST(OperatorRegistrationTestLegacyFunctionBasedKernel, givenKernelWithOptionalI
|
||||
EXPECT_FALSE(called_arg4.has_value());
|
||||
}
|
||||
|
||||
std::tuple<c10::optional<Tensor>, c10::optional<int64_t>, c10::optional<std::string>>
|
||||
kernelWithOptInputWithMultipleOutputs(Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
|
||||
std::tuple<std::optional<Tensor>, c10::optional<int64_t>, c10::optional<std::string>>
|
||||
kernelWithOptInputWithMultipleOutputs(Tensor arg1, const std::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
|
||||
return std::make_tuple(arg2, arg3, arg4);
|
||||
}
|
||||
|
||||
@ -936,7 +936,7 @@ TEST(OperatorRegistrationTestLegacyFunctionBasedKernel, givenKernel_whenRegister
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::no_schema_specified", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
|
||||
c10::optional<std::string> differences = c10::findSchemaDifferences(torch::jit::parseSchema("_test::no_schema_specified(Tensor arg1, int arg2, Tensor[] arg3) -> (int, Tensor)"), op->schema());
|
||||
std::optional<std::string> differences = c10::findSchemaDifferences(torch::jit::parseSchema("_test::no_schema_specified(Tensor arg1, int arg2, Tensor[] arg3) -> (int, Tensor)"), op->schema());
|
||||
EXPECT_FALSE(differences.has_value());
|
||||
}
|
||||
|
||||
|
@ -223,15 +223,15 @@ TEST(OperatorRegistrationTestFunctionBasedKernel, givenKernelWithIntListOutput_w
|
||||
EXPECT_EQ(6, result[0].toIntVector()[2]);
|
||||
}
|
||||
|
||||
std::tuple<Tensor, int64_t, c10::List<Tensor>, c10::optional<int64_t>, Dict<string, Tensor>> kernelWithMultipleOutputs(Tensor) {
|
||||
std::tuple<Tensor, int64_t, c10::List<Tensor>, std::optional<int64_t>, Dict<string, Tensor>> kernelWithMultipleOutputs(Tensor) {
|
||||
Dict<string, Tensor> dict;
|
||||
dict.insert("first", dummyTensor(DispatchKey::CPU));
|
||||
dict.insert("second", dummyTensor(DispatchKey::CUDA));
|
||||
return std::tuple<Tensor, int64_t, c10::List<Tensor>, c10::optional<int64_t>, Dict<string, Tensor>>(
|
||||
return std::tuple<Tensor, int64_t, c10::List<Tensor>, std::optional<int64_t>, Dict<string, Tensor>>(
|
||||
dummyTensor(DispatchKey::CUDA),
|
||||
5,
|
||||
c10::List<Tensor>({dummyTensor(DispatchKey::CPU), dummyTensor(DispatchKey::CUDA)}),
|
||||
c10::optional<int64_t>(std::in_place, 0),
|
||||
std::optional<int64_t>(std::in_place, 0),
|
||||
dict
|
||||
);
|
||||
}
|
||||
@ -550,11 +550,11 @@ TEST(OperatorRegistrationTestFunctionBasedKernel, givenFallbackKernelWithoutTens
|
||||
EXPECT_EQ(4, outputs[0].toInt());
|
||||
}
|
||||
|
||||
c10::optional<Tensor> called_arg2 = c10::nullopt;
|
||||
c10::optional<int64_t> called_arg3 = c10::nullopt;
|
||||
c10::optional<std::string> called_arg4 = c10::nullopt;
|
||||
std::optional<Tensor> called_arg2 = c10::nullopt;
|
||||
std::optional<int64_t> called_arg3 = c10::nullopt;
|
||||
std::optional<std::string> called_arg4 = c10::nullopt;
|
||||
|
||||
void kernelWithOptInputWithoutOutput(Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
|
||||
void kernelWithOptInputWithoutOutput(Tensor arg1, const std::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
|
||||
called = true;
|
||||
called_arg2 = arg2;
|
||||
called_arg3 = arg3;
|
||||
@ -588,7 +588,7 @@ TEST(OperatorRegistrationTestFunctionBasedKernel, givenKernelWithOptionalInputs_
|
||||
EXPECT_FALSE(called_arg4.has_value());
|
||||
}
|
||||
|
||||
c10::optional<Tensor> kernelWithOptInputWithOutput(Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
|
||||
std::optional<Tensor> kernelWithOptInputWithOutput(Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
|
||||
called = true;
|
||||
called_arg2 = arg2;
|
||||
called_arg3 = arg3;
|
||||
@ -625,8 +625,8 @@ TEST(OperatorRegistrationTestFunctionBasedKernel, givenKernelWithOptionalInputs_
|
||||
EXPECT_FALSE(called_arg4.has_value());
|
||||
}
|
||||
|
||||
std::tuple<c10::optional<Tensor>, c10::optional<int64_t>, c10::optional<std::string>>
|
||||
kernelWithOptInputWithMultipleOutputs(Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
|
||||
std::tuple<std::optional<Tensor>, c10::optional<int64_t>, c10::optional<std::string>>
|
||||
kernelWithOptInputWithMultipleOutputs(Tensor arg1, const std::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
|
||||
return std::make_tuple(arg2, arg3, arg4);
|
||||
}
|
||||
|
||||
@ -690,7 +690,7 @@ TEST(OperatorRegistrationTestFunctionBasedKernel, givenKernel_whenRegisteredWith
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::no_schema_specified", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
|
||||
c10::optional<std::string> differences = c10::findSchemaDifferences(torch::jit::parseSchema("_test::no_schema_specified(Tensor arg1, int arg2, Tensor[] arg3) -> (int, Tensor)"), op->schema());
|
||||
std::optional<std::string> differences = c10::findSchemaDifferences(torch::jit::parseSchema("_test::no_schema_specified(Tensor arg1, int arg2, Tensor[] arg3) -> (int, Tensor)"), op->schema());
|
||||
EXPECT_FALSE(differences.has_value());
|
||||
}
|
||||
|
||||
|
@ -188,15 +188,15 @@ TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenKernelWithIntListOutp
|
||||
|
||||
TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenKernelWithMultipleOutputs_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::multiple_outputs(Tensor dummy) -> (Tensor, int, Tensor[], int?, Dict(str, Tensor))", [] (Tensor) -> std::tuple<Tensor, int64_t, std::vector<Tensor>, c10::optional<int64_t>, Dict<string, Tensor>> {
|
||||
.op("_test::multiple_outputs(Tensor dummy) -> (Tensor, int, Tensor[], int?, Dict(str, Tensor))", [] (Tensor) -> std::tuple<Tensor, int64_t, std::vector<Tensor>, std::optional<int64_t>, Dict<string, Tensor>> {
|
||||
Dict<string, Tensor> dict;
|
||||
dict.insert("first", dummyTensor(DispatchKey::CPU));
|
||||
dict.insert("second", dummyTensor(DispatchKey::CUDA));
|
||||
return std::tuple<Tensor, int64_t, std::vector<Tensor>, c10::optional<int64_t>, Dict<string, Tensor>>(
|
||||
return std::tuple<Tensor, int64_t, std::vector<Tensor>, std::optional<int64_t>, Dict<string, Tensor>>(
|
||||
dummyTensor(DispatchKey::CUDA),
|
||||
5,
|
||||
{dummyTensor(DispatchKey::CPU), dummyTensor(DispatchKey::CUDA)},
|
||||
c10::optional<int64_t>(std::in_place, 0),
|
||||
std::optional<int64_t>(std::in_place, 0),
|
||||
dict
|
||||
);
|
||||
});
|
||||
@ -733,13 +733,13 @@ TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenFallbackKernelWithout
|
||||
TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenKernelWithOptionalInputs_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
bool called;
|
||||
c10::optional<Tensor> called_arg2 = c10::nullopt;
|
||||
c10::optional<int64_t> called_arg3 = c10::nullopt;
|
||||
c10::optional<std::string> called_arg4 = c10::nullopt;
|
||||
std::optional<Tensor> called_arg2 = c10::nullopt;
|
||||
std::optional<int64_t> called_arg3 = c10::nullopt;
|
||||
std::optional<std::string> called_arg4 = c10::nullopt;
|
||||
|
||||
auto registrar = RegisterOperators().op(
|
||||
"_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> ()",
|
||||
[&] (Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
|
||||
[&] (Tensor arg1, const std::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
|
||||
called = true;
|
||||
called_arg2 = arg2;
|
||||
called_arg3 = arg3;
|
||||
@ -773,13 +773,13 @@ TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenKernelWithOptionalInp
|
||||
TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenKernelWithOptionalInputs_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
bool called;
|
||||
c10::optional<Tensor> called_arg2 = c10::nullopt;
|
||||
c10::optional<int64_t> called_arg3 = c10::nullopt;
|
||||
c10::optional<std::string> called_arg4 = c10::nullopt;
|
||||
std::optional<Tensor> called_arg2 = c10::nullopt;
|
||||
std::optional<int64_t> called_arg3 = c10::nullopt;
|
||||
std::optional<std::string> called_arg4 = c10::nullopt;
|
||||
|
||||
auto registrar = RegisterOperators().op(
|
||||
"_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> Tensor?",
|
||||
[&] (Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
|
||||
[&] (Tensor arg1, const std::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
|
||||
called = true;
|
||||
called_arg2 = arg2;
|
||||
called_arg3 = arg3;
|
||||
@ -816,13 +816,13 @@ TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenKernelWithOptionalInp
|
||||
TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenKernelWithOptionalInputs_withMultipleOutputs_whenRegistered_thenCanBeCalled) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
bool called;
|
||||
c10::optional<Tensor> called_arg2 = c10::nullopt;
|
||||
c10::optional<int64_t> called_arg3 = c10::nullopt;
|
||||
c10::optional<std::string> called_arg4 = c10::nullopt;
|
||||
std::optional<Tensor> called_arg2 = c10::nullopt;
|
||||
std::optional<int64_t> called_arg3 = c10::nullopt;
|
||||
std::optional<std::string> called_arg4 = c10::nullopt;
|
||||
|
||||
auto registrar = RegisterOperators().op(
|
||||
"_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> (Tensor?, int?, str?)",
|
||||
[] (Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
|
||||
[] (Tensor arg1, const std::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
|
||||
return std::make_tuple(arg2, arg3, arg4);
|
||||
});
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::opt_input", ""});
|
||||
@ -866,7 +866,7 @@ TEST(OperatorRegistrationTestLegacyLambdaBasedKernel, givenKernel_whenRegistered
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::no_schema_specified", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
|
||||
c10::optional<std::string> differences = c10::findSchemaDifferences(torch::jit::parseSchema("_test::no_schema_specified(Tensor arg1, int arg2, Tensor[] arg3) -> (int, Tensor)"), op->schema());
|
||||
std::optional<std::string> differences = c10::findSchemaDifferences(torch::jit::parseSchema("_test::no_schema_specified(Tensor arg1, int arg2, Tensor[] arg3) -> (int, Tensor)"), op->schema());
|
||||
EXPECT_FALSE(differences.has_value());
|
||||
}
|
||||
|
||||
|
@ -187,15 +187,15 @@ TEST(OperatorRegistrationTestLambdaBasedKernel, givenKernelWithIntListOutput_whe
|
||||
TEST(OperatorRegistrationTestLambdaBasedKernel, givenKernelWithMultipleOutputs_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators()
|
||||
.op("_test::multiple_outputs(Tensor dummy) -> (Tensor, int, Tensor[], int?, Dict(str, Tensor))",
|
||||
RegisterOperators::options().kernel(DispatchKey::CPU, [] (Tensor) -> std::tuple<Tensor, int64_t, c10::List<Tensor>, c10::optional<int64_t>, Dict<string, Tensor>> {
|
||||
RegisterOperators::options().kernel(DispatchKey::CPU, [] (Tensor) -> std::tuple<Tensor, int64_t, c10::List<Tensor>, std::optional<int64_t>, Dict<string, Tensor>> {
|
||||
Dict<string, Tensor> dict;
|
||||
dict.insert("first", dummyTensor(DispatchKey::CPU));
|
||||
dict.insert("second", dummyTensor(DispatchKey::CUDA));
|
||||
return std::tuple<Tensor, int64_t, c10::List<Tensor>, c10::optional<int64_t>, Dict<string, Tensor>>(
|
||||
return std::tuple<Tensor, int64_t, c10::List<Tensor>, std::optional<int64_t>, Dict<string, Tensor>>(
|
||||
dummyTensor(DispatchKey::CUDA),
|
||||
5,
|
||||
c10::List<Tensor>({dummyTensor(DispatchKey::CPU), dummyTensor(DispatchKey::CUDA)}),
|
||||
c10::optional<int64_t>(std::in_place, 0),
|
||||
std::optional<int64_t>(std::in_place, 0),
|
||||
dict
|
||||
);
|
||||
}));
|
||||
@ -466,14 +466,14 @@ TEST(OperatorRegistrationTestLambdaBasedKernel, givenFallbackKernelWithoutTensor
|
||||
EXPECT_EQ(4, outputs[0].toInt());
|
||||
}
|
||||
|
||||
c10::optional<Tensor> called_arg2 = c10::nullopt;
|
||||
c10::optional<int64_t> called_arg3 = c10::nullopt;
|
||||
c10::optional<std::string> called_arg4 = c10::nullopt;
|
||||
std::optional<Tensor> called_arg2 = c10::nullopt;
|
||||
std::optional<int64_t> called_arg3 = c10::nullopt;
|
||||
std::optional<std::string> called_arg4 = c10::nullopt;
|
||||
|
||||
TEST(OperatorRegistrationTestLambdaBasedKernel, givenKernelWithOptionalInputs_withoutOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators().op(
|
||||
"_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> ()",
|
||||
RegisterOperators::options().kernel(DispatchKey::CPU, [] (Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
|
||||
RegisterOperators::options().kernel(DispatchKey::CPU, [] (Tensor arg1, const std::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
|
||||
called = true;
|
||||
called_arg2 = arg2;
|
||||
called_arg3 = arg3;
|
||||
@ -507,7 +507,7 @@ TEST(OperatorRegistrationTestLambdaBasedKernel, givenKernelWithOptionalInputs_wi
|
||||
TEST(OperatorRegistrationTestLambdaBasedKernel, givenKernelWithOptionalInputs_withOutput_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators().op(
|
||||
"_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> Tensor?",
|
||||
RegisterOperators::options().kernel(DispatchKey::CPU, [] (Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
|
||||
RegisterOperators::options().kernel(DispatchKey::CPU, [] (Tensor arg1, const std::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
|
||||
called = true;
|
||||
called_arg2 = arg2;
|
||||
called_arg3 = arg3;
|
||||
@ -544,7 +544,7 @@ TEST(OperatorRegistrationTestLambdaBasedKernel, givenKernelWithOptionalInputs_wi
|
||||
TEST(OperatorRegistrationTestLambdaBasedKernel, givenKernelWithOptionalInputs_withMultipleOutputs_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators().op(
|
||||
"_test::opt_input(Tensor arg1, Tensor? arg2, int? arg3, str? arg4) -> (Tensor?, int?, str?)",
|
||||
RegisterOperators::options().kernel(DispatchKey::CPU, [] (Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
|
||||
RegisterOperators::options().kernel(DispatchKey::CPU, [] (Tensor arg1, const std::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
|
||||
return std::make_tuple(arg2, arg3, arg4);
|
||||
}));
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::opt_input", ""});
|
||||
@ -588,7 +588,7 @@ TEST(OperatorRegistrationTestLambdaBasedKernel, givenKernel_whenRegisteredWithou
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::no_schema_specified", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
|
||||
c10::optional<std::string> differences = c10::findSchemaDifferences(torch::jit::parseSchema("_test::no_schema_specified(Tensor arg1, int arg2, Tensor[] arg3) -> (int, Tensor)"), op->schema());
|
||||
std::optional<std::string> differences = c10::findSchemaDifferences(torch::jit::parseSchema("_test::no_schema_specified(Tensor arg1, int arg2, Tensor[] arg3) -> (int, Tensor)"), op->schema());
|
||||
EXPECT_FALSE(differences.has_value());
|
||||
}
|
||||
|
||||
|
@ -116,7 +116,7 @@ namespace impl {
|
||||
};
|
||||
|
||||
template<class T, bool AllowDeprecatedTypes>
|
||||
struct assert_is_valid_input_type<c10::optional<T>, AllowDeprecatedTypes>
|
||||
struct assert_is_valid_input_type<std::optional<T>, AllowDeprecatedTypes>
|
||||
: assert_is_valid_input_type<T, AllowDeprecatedTypes> {};
|
||||
|
||||
template <bool AllowDeprecatedTypes, class... Args>
|
||||
@ -226,7 +226,7 @@ namespace impl {
|
||||
};
|
||||
|
||||
template<class T, bool AllowDeprecatedTypes>
|
||||
struct assert_is_valid_output_type<c10::optional<T>, AllowDeprecatedTypes>
|
||||
struct assert_is_valid_output_type<std::optional<T>, AllowDeprecatedTypes>
|
||||
: assert_is_valid_output_type<T, AllowDeprecatedTypes> {};
|
||||
|
||||
template<class T, bool AllowDeprecatedTypes>
|
||||
|
@ -205,15 +205,15 @@ TEST(OperatorRegistrationTestFunctorBasedKernel, givenKernelWithIntListOutput_wh
|
||||
}
|
||||
|
||||
struct KernelWithMultipleOutputs final : OperatorKernel {
|
||||
std::tuple<Tensor, int64_t, c10::List<Tensor>, c10::optional<int64_t>, Dict<string, Tensor>> operator()(Tensor) {
|
||||
std::tuple<Tensor, int64_t, c10::List<Tensor>, std::optional<int64_t>, Dict<string, Tensor>> operator()(Tensor) {
|
||||
Dict<string, Tensor> dict;
|
||||
dict.insert("first", dummyTensor(DispatchKey::CPU));
|
||||
dict.insert("second", dummyTensor(DispatchKey::CUDA));
|
||||
return std::tuple<Tensor, int64_t, c10::List<Tensor>, c10::optional<int64_t>, Dict<string, Tensor>>(
|
||||
return std::tuple<Tensor, int64_t, c10::List<Tensor>, std::optional<int64_t>, Dict<string, Tensor>>(
|
||||
dummyTensor(DispatchKey::CUDA),
|
||||
5,
|
||||
c10::List<Tensor>({dummyTensor(DispatchKey::CPU), dummyTensor(DispatchKey::CUDA)}),
|
||||
c10::optional<int64_t>(std::in_place, 0),
|
||||
std::optional<int64_t>(std::in_place, 0),
|
||||
dict
|
||||
);
|
||||
}
|
||||
@ -679,12 +679,12 @@ TEST(OperatorRegistrationTestFunctorBasedKernel, givenFallbackKernelWithoutTenso
|
||||
EXPECT_EQ(4, outputs[0].toInt());
|
||||
}
|
||||
|
||||
c10::optional<Tensor> called_arg2 = c10::nullopt;
|
||||
c10::optional<int64_t> called_arg3 = c10::nullopt;
|
||||
c10::optional<std::string> called_arg4 = c10::nullopt;
|
||||
std::optional<Tensor> called_arg2 = c10::nullopt;
|
||||
std::optional<int64_t> called_arg3 = c10::nullopt;
|
||||
std::optional<std::string> called_arg4 = c10::nullopt;
|
||||
|
||||
struct KernelWithOptInputWithoutOutput final : OperatorKernel {
|
||||
void operator()(Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
|
||||
void operator()(Tensor arg1, const std::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
|
||||
called = true;
|
||||
called_arg2 = arg2;
|
||||
called_arg3 = arg3;
|
||||
@ -720,7 +720,7 @@ TEST(OperatorRegistrationTestFunctorBasedKernel, givenKernelWithOptionalInputs_w
|
||||
}
|
||||
|
||||
struct KernelWithOptInputWithOutput final : OperatorKernel {
|
||||
c10::optional<Tensor> operator()(Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
|
||||
std::optional<Tensor> operator()(Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
|
||||
called = true;
|
||||
called_arg2 = arg2;
|
||||
called_arg3 = arg3;
|
||||
@ -759,8 +759,8 @@ TEST(OperatorRegistrationTestFunctorBasedKernel, givenKernelWithOptionalInputs_w
|
||||
}
|
||||
|
||||
struct KernelWithOptInputWithMultipleOutputs final : OperatorKernel {
|
||||
std::tuple<c10::optional<Tensor>, c10::optional<int64_t>, c10::optional<std::string>>
|
||||
operator()(Tensor arg1, const c10::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
|
||||
std::tuple<std::optional<Tensor>, c10::optional<int64_t>, c10::optional<std::string>>
|
||||
operator()(Tensor arg1, const std::optional<Tensor>& arg2, c10::optional<int64_t> arg3, c10::optional<std::string> arg4) {
|
||||
return std::make_tuple(arg2, arg3, arg4);
|
||||
}
|
||||
};
|
||||
@ -821,7 +821,7 @@ TEST(OperatorRegistrationTestFunctorBasedKernel, givenKernel_whenRegisteredWitho
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::no_schema_specified", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
|
||||
c10::optional<std::string> differences = c10::findSchemaDifferences(torch::jit::parseSchema("_test::no_schema_specified(Tensor arg1, int arg2, Tensor[] arg3) -> (int, Tensor)"), op->schema());
|
||||
std::optional<std::string> differences = c10::findSchemaDifferences(torch::jit::parseSchema("_test::no_schema_specified(Tensor arg1, int arg2, Tensor[] arg3) -> (int, Tensor)"), op->schema());
|
||||
EXPECT_FALSE(differences.has_value());
|
||||
}
|
||||
|
||||
@ -832,7 +832,7 @@ TEST(OperatorRegistrationTestFunctorBasedKernel, givenKernel_whenRegisteredCatch
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::no_schema_specified", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
|
||||
c10::optional<std::string> differences = c10::findSchemaDifferences(torch::jit::parseSchema("_test::no_schema_specified(Tensor arg1, int arg2, Tensor[] arg3) -> (int, Tensor)"), op->schema());
|
||||
std::optional<std::string> differences = c10::findSchemaDifferences(torch::jit::parseSchema("_test::no_schema_specified(Tensor arg1, int arg2, Tensor[] arg3) -> (int, Tensor)"), op->schema());
|
||||
EXPECT_FALSE(differences.has_value());
|
||||
}
|
||||
|
||||
|
@ -63,7 +63,7 @@ struct BuiltinOpFunction : public Function {
|
||||
|
||||
bool call(
|
||||
Stack& stack,
|
||||
c10::optional<size_t>,
|
||||
std::optional<size_t>,
|
||||
c10::function_ref<void(const Code&)>) override {
|
||||
run(stack);
|
||||
return false;
|
||||
|
@ -469,7 +469,7 @@ bool ClassType::isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const {
|
||||
}
|
||||
|
||||
ClassTypePtr ClassType::create(
|
||||
c10::optional<QualifiedName> qualifiedName,
|
||||
std::optional<QualifiedName> qualifiedName,
|
||||
std::weak_ptr<CompilationUnit> cu,
|
||||
bool is_module,
|
||||
std::string doc_string,
|
||||
@ -483,7 +483,7 @@ ClassTypePtr ClassType::create(
|
||||
}
|
||||
|
||||
ClassType::ClassType(
|
||||
c10::optional<QualifiedName> name,
|
||||
std::optional<QualifiedName> name,
|
||||
std::weak_ptr<CompilationUnit> cu,
|
||||
bool is_module,
|
||||
std::string doc_string,
|
||||
@ -620,7 +620,7 @@ IValue ClassType::getConstant(size_t slot) const {
|
||||
return constantValues_[slot];
|
||||
}
|
||||
|
||||
c10::optional<IValue> ClassType::findConstant(const std::string& name) const {
|
||||
std::optional<IValue> ClassType::findConstant(const std::string& name) const {
|
||||
TORCH_INTERNAL_ASSERT(constantNames_.size() == constantValues_.size());
|
||||
size_t pos = 0;
|
||||
for (const auto& c : constantNames_) {
|
||||
@ -652,7 +652,7 @@ std::shared_ptr<const CompilationUnit> ClassType::compilation_unit() const {
|
||||
return cu;
|
||||
}
|
||||
|
||||
c10::optional<ClassType::Property> ClassType::getProperty(const std::string& name) {
|
||||
std::optional<ClassType::Property> ClassType::getProperty(const std::string& name) {
|
||||
for (auto& prop : properties_) {
|
||||
if (name == prop.name) {
|
||||
return prop;
|
||||
@ -667,7 +667,7 @@ void ClassType::addProperty(const std::string& name, torch::jit::Function* gette
|
||||
properties_.push_back({name, getter, setter});
|
||||
}
|
||||
|
||||
c10::optional<size_t> ClassType::findConstantSlot(const std::string& name) const {
|
||||
std::optional<size_t> ClassType::findConstantSlot(const std::string& name) const {
|
||||
TORCH_CHECK(constantNames_.size() == constantValues_.size());
|
||||
size_t slot = 0;
|
||||
for (const auto& constant : constantNames_) {
|
||||
|
@ -74,7 +74,7 @@ struct TORCH_API ClassType : public NamedType {
|
||||
|
||||
// Create a class type with name `name` and its methods stored in `cu`.
|
||||
static ClassTypePtr create(
|
||||
c10::optional<QualifiedName> qualifiedName,
|
||||
std::optional<QualifiedName> qualifiedName,
|
||||
std::weak_ptr<CompilationUnit> cu,
|
||||
bool is_module = false,
|
||||
std::string doc_string = "",
|
||||
@ -152,7 +152,7 @@ struct TORCH_API ClassType : public NamedType {
|
||||
// Attributes are stored in a specific slot at runtime for effiency.
|
||||
// When emitting instructions we specify the slot so that attribute access is
|
||||
// a constant lookup
|
||||
c10::optional<size_t> findAttributeSlot(const std::string& name) const {
|
||||
std::optional<size_t> findAttributeSlot(const std::string& name) const {
|
||||
size_t slot = 0;
|
||||
for (const auto& attr : attributes_) {
|
||||
if (name == attr.getName()) {
|
||||
@ -239,7 +239,7 @@ struct TORCH_API ClassType : public NamedType {
|
||||
}
|
||||
|
||||
// Get the property with the given \p name, if it exists on the class.
|
||||
c10::optional<ClassType::Property> getProperty(const std::string& name);
|
||||
std::optional<ClassType::Property> getProperty(const std::string& name);
|
||||
// Add a property named \p name with \p getter and \p setter as its getter and setter.
|
||||
void addProperty(const std::string& name, torch::jit::Function* getter, torch::jit::Function* setter);
|
||||
// Get a list of all properties.
|
||||
@ -257,7 +257,7 @@ struct TORCH_API ClassType : public NamedType {
|
||||
|
||||
size_t addConstant(const std::string& name, const IValue& value);
|
||||
|
||||
c10::optional<size_t> findConstantSlot(const std::string& name) const;
|
||||
std::optional<size_t> findConstantSlot(const std::string& name) const;
|
||||
|
||||
size_t getConstantSlot(const std::string& name) const {
|
||||
if (auto r = findConstantSlot(name)) {
|
||||
@ -281,7 +281,7 @@ struct TORCH_API ClassType : public NamedType {
|
||||
|
||||
IValue getConstant(size_t slot) const;
|
||||
|
||||
c10::optional<IValue> findConstant(const std::string& name) const;
|
||||
std::optional<IValue> findConstant(const std::string& name) const;
|
||||
|
||||
size_t numConstants() const;
|
||||
|
||||
@ -384,7 +384,7 @@ struct TORCH_API ClassType : public NamedType {
|
||||
|
||||
private:
|
||||
ClassType(
|
||||
c10::optional<QualifiedName> name,
|
||||
std::optional<QualifiedName> name,
|
||||
std::weak_ptr<CompilationUnit> cu,
|
||||
bool is_module = false,
|
||||
std::string doc_string = "",
|
||||
|
@ -56,7 +56,7 @@ namespace detail {
|
||||
void operator()(const at::Tensor& x) {
|
||||
ts = ts | x.key_set();
|
||||
}
|
||||
void operator()(const c10::optional<at::Tensor>& x) {
|
||||
void operator()(const std::optional<at::Tensor>& x) {
|
||||
if (x.has_value()) {
|
||||
ts = ts | x->key_set();
|
||||
}
|
||||
@ -67,8 +67,8 @@ namespace detail {
|
||||
}
|
||||
}
|
||||
// Tensor?[] translates to this case.
|
||||
void operator()(const c10::List<c10::optional<at::Tensor>>& xs) {
|
||||
for (c10::optional<at::Tensor> x : xs) {
|
||||
void operator()(const c10::List<std::optional<at::Tensor>>& xs) {
|
||||
for (std::optional<at::Tensor> x : xs) {
|
||||
if (x.has_value()) {
|
||||
ts = ts | x.value().key_set();
|
||||
}
|
||||
@ -80,7 +80,7 @@ namespace detail {
|
||||
ts = ts | x.key_set();
|
||||
}
|
||||
}
|
||||
[[noreturn]] void operator()(at::ArrayRef<c10::optional<at::Tensor>>) {
|
||||
[[noreturn]] void operator()(at::ArrayRef<std::optional<at::Tensor>>) {
|
||||
// Just checking that the handling of Tensor?[] didn't change.
|
||||
TORCH_INTERNAL_ASSERT(false);
|
||||
}
|
||||
@ -89,7 +89,7 @@ namespace detail {
|
||||
ts = ts | gen.key_set();
|
||||
}
|
||||
}
|
||||
void operator()(const c10::optional<at::Generator>& gen) {
|
||||
void operator()(const std::optional<at::Generator>& gen) {
|
||||
if (gen.has_value() && gen->defined()) {
|
||||
ts = ts | gen->key_set();
|
||||
}
|
||||
|
@ -76,8 +76,8 @@ C10_EXPORT Dispatcher& Dispatcher::realSingleton() {
|
||||
return _singleton;
|
||||
}
|
||||
|
||||
c10::optional<OperatorHandle> Dispatcher::findOp(const OperatorName& overload_name) {
|
||||
return operatorLookupTable_.read([&] (const ska::flat_hash_map<OperatorName, OperatorHandle>& operatorLookupTable) -> c10::optional<OperatorHandle> {
|
||||
std::optional<OperatorHandle> Dispatcher::findOp(const OperatorName& overload_name) {
|
||||
return operatorLookupTable_.read([&] (const ska::flat_hash_map<OperatorName, OperatorHandle>& operatorLookupTable) -> std::optional<OperatorHandle> {
|
||||
auto found = operatorLookupTable.find(overload_name);
|
||||
if (found == operatorLookupTable.end()) {
|
||||
return c10::nullopt;
|
||||
@ -103,7 +103,7 @@ void Dispatcher::waitForDef(const FunctionSchema& schema) {
|
||||
"the same dependencies.");
|
||||
}
|
||||
|
||||
void Dispatcher::waitForImpl(const OperatorName& op_name, c10::optional<c10::DispatchKey> maybe_dk) {
|
||||
void Dispatcher::waitForImpl(const OperatorName& op_name, std::optional<c10::DispatchKey> maybe_dk) {
|
||||
using namespace std::chrono_literals;
|
||||
std::unique_lock<std::mutex> lock(guard_->mutex);
|
||||
auto dk = maybe_dk.value_or(DispatchKey::CompositeImplicitAutograd);
|
||||
@ -121,7 +121,7 @@ void Dispatcher::waitForImpl(const OperatorName& op_name, c10::optional<c10::Dis
|
||||
"the same dependencies.");
|
||||
}
|
||||
|
||||
c10::optional<OperatorHandle> Dispatcher::findSchema(const OperatorName& overload_name) {
|
||||
std::optional<OperatorHandle> Dispatcher::findSchema(const OperatorName& overload_name) {
|
||||
auto it = findOp(overload_name);
|
||||
if (it.has_value()) {
|
||||
if (it->hasSchema()) {
|
||||
@ -275,7 +275,7 @@ PythonModuleMapType& pythonModulesSingleton() {
|
||||
|
||||
}
|
||||
|
||||
c10::optional<std::pair<const char*, const char*>> Dispatcher::getPyStub(OperatorName op_name) {
|
||||
std::optional<std::pair<const char*, const char*>> Dispatcher::getPyStub(OperatorName op_name) {
|
||||
std::lock_guard<std::mutex> lock(guard_->mutex);
|
||||
auto found = pythonModulesSingleton().find(op_name);
|
||||
if (found == pythonModulesSingleton().end()) {
|
||||
@ -332,9 +332,9 @@ void Dispatcher::throwIfHasPythonModule(OperatorName op_name) {
|
||||
|
||||
RegistrationHandleRAII Dispatcher::registerImpl(
|
||||
OperatorName op_name,
|
||||
c10::optional<DispatchKey> dispatch_key,
|
||||
std::optional<DispatchKey> dispatch_key,
|
||||
KernelFunction kernel,
|
||||
c10::optional<impl::CppSignature> cpp_signature,
|
||||
std::optional<impl::CppSignature> cpp_signature,
|
||||
std::unique_ptr<FunctionSchema> inferred_function_schema,
|
||||
std::string debug
|
||||
) {
|
||||
@ -364,7 +364,7 @@ RegistrationHandleRAII Dispatcher::registerImpl(
|
||||
});
|
||||
}
|
||||
|
||||
void Dispatcher::deregisterImpl_(const OperatorHandle& op, const OperatorName& op_name, c10::optional<DispatchKey> dispatch_key, impl::OperatorEntry::AnnotatedKernelContainerIterator handle) {
|
||||
void Dispatcher::deregisterImpl_(const OperatorHandle& op, const OperatorName& op_name, std::optional<DispatchKey> dispatch_key, impl::OperatorEntry::AnnotatedKernelContainerIterator handle) {
|
||||
op.operatorDef_->op.deregisterKernel_(*this, dispatch_key, handle);
|
||||
|
||||
TORCH_INTERNAL_ASSERT(op.operator_name() == op_name);
|
||||
@ -486,7 +486,7 @@ std::vector<OperatorHandle> Dispatcher::findDanglingImpls() const {
|
||||
});
|
||||
}
|
||||
|
||||
std::vector<OperatorName> Dispatcher::getRegistrationsForDispatchKey(c10::optional<DispatchKey> k) const {
|
||||
std::vector<OperatorName> Dispatcher::getRegistrationsForDispatchKey(std::optional<DispatchKey> k) const {
|
||||
return operatorLookupTable_.read([&] (const ska::flat_hash_map<OperatorName, OperatorHandle>& operatorLookupTable) -> std::vector<OperatorName> {
|
||||
std::vector<OperatorName> op_names;
|
||||
for (const auto& op : operatorLookupTable) {
|
||||
|
@ -137,7 +137,7 @@ public:
|
||||
* and returns it if it is registered WITH A SCHEMA.
|
||||
* Returns nullopt otherwise.
|
||||
*/
|
||||
c10::optional<OperatorHandle> findSchema(const OperatorName& operator_name);
|
||||
std::optional<OperatorHandle> findSchema(const OperatorName& operator_name);
|
||||
|
||||
/**
|
||||
* Variant of findSchema that results in less code generated at the call site.
|
||||
@ -155,7 +155,7 @@ public:
|
||||
OperatorHandle findSchemaOrThrow(const char* name, const char* overload_name);
|
||||
|
||||
// Like findSchema, but also returns OperatorHandle even if there is no schema
|
||||
c10::optional<OperatorHandle> findOp(const OperatorName& operator_name);
|
||||
std::optional<OperatorHandle> findOp(const OperatorName& operator_name);
|
||||
|
||||
// Returns a list of all operator names present in the operatorLookupTable_
|
||||
const std::vector<OperatorName> getAllOpNames();
|
||||
@ -196,7 +196,7 @@ public:
|
||||
|
||||
// Used by torchdeploy/multipy for multiple interpreters racing.
|
||||
void waitForDef(const FunctionSchema& schema);
|
||||
void waitForImpl(const OperatorName& op_name, c10::optional<DispatchKey> dispatch_key);
|
||||
void waitForImpl(const OperatorName& op_name, std::optional<DispatchKey> dispatch_key);
|
||||
|
||||
// ------------------------------------------------------------------------
|
||||
//
|
||||
@ -221,7 +221,7 @@ public:
|
||||
*/
|
||||
// NB: steals the inferred function schema, as we may need to hold on to
|
||||
// it for a bit until the real schema turns up
|
||||
RegistrationHandleRAII registerImpl(OperatorName op_name, c10::optional<DispatchKey> dispatch_key, KernelFunction kernel, c10::optional<impl::CppSignature> cpp_signature, std::unique_ptr<FunctionSchema> inferred_function_schema, std::string debug);
|
||||
RegistrationHandleRAII registerImpl(OperatorName op_name, std::optional<DispatchKey> dispatch_key, KernelFunction kernel, c10::optional<impl::CppSignature> cpp_signature, std::unique_ptr<FunctionSchema> inferred_function_schema, std::string debug);
|
||||
|
||||
/**
|
||||
* Given an operator, tells the Dispatcher that we have implemented a fake impl
|
||||
@ -234,7 +234,7 @@ public:
|
||||
*/
|
||||
void throwIfHasPythonModule(OperatorName op_name);
|
||||
|
||||
c10::optional<std::pair<const char*, const char*>> getPyStub(OperatorName op_name);
|
||||
std::optional<std::pair<const char*, const char*>> getPyStub(OperatorName op_name);
|
||||
|
||||
/**
|
||||
* Register a new operator by name.
|
||||
@ -299,7 +299,7 @@ public:
|
||||
* Returns the names of all operators with a kernel registered for the specified DispatchKey.
|
||||
* If no DispatchKey is specified, it returns all registered operators.
|
||||
*/
|
||||
std::vector<OperatorName> getRegistrationsForDispatchKey(c10::optional<DispatchKey> k) const;
|
||||
std::vector<OperatorName> getRegistrationsForDispatchKey(std::optional<DispatchKey> k) const;
|
||||
|
||||
private:
|
||||
Dispatcher();
|
||||
@ -321,7 +321,7 @@ private:
|
||||
void deregisterImpl_(
|
||||
const OperatorHandle& op,
|
||||
const OperatorName& op_name,
|
||||
c10::optional<DispatchKey> dispatch_key,
|
||||
std::optional<DispatchKey> dispatch_key,
|
||||
impl::OperatorEntry::AnnotatedKernelContainerIterator kernel_handle);
|
||||
void deregisterName_(const OperatorHandle& op, const OperatorName& op_name);
|
||||
void deregisterFallback_(DispatchKey dispatchKey);
|
||||
|
@ -7,7 +7,7 @@ namespace c10 {
|
||||
namespace impl {
|
||||
|
||||
namespace {
|
||||
std::string toString(c10::optional<DispatchKey> k) {
|
||||
std::string toString(std::optional<DispatchKey> k) {
|
||||
if (k.has_value()) {
|
||||
return toString(*k);
|
||||
} else {
|
||||
@ -39,7 +39,7 @@ namespace {
|
||||
// TODO: figure out if we can just directly save real schema at def time
|
||||
FunctionSchema from_def = from_def_.cloneWithRealTypes(kernel.isValidSymUnboxed());
|
||||
FunctionSchema inferred = inferred_.cloneWithRealTypes();
|
||||
c10::optional<std::string> schema_difference = findSchemaDifferences(from_def, inferred);
|
||||
std::optional<std::string> schema_difference = findSchemaDifferences(from_def, inferred);
|
||||
if (schema_difference.has_value()) {
|
||||
TORCH_CHECK(false,
|
||||
"Inferred operator schema for a C++ kernel function doesn't match the expected function schema.\n"
|
||||
@ -101,9 +101,9 @@ void OperatorEntry::deregisterSchema() {
|
||||
|
||||
OperatorEntry::AnnotatedKernelContainerIterator OperatorEntry::registerKernel(
|
||||
const c10::Dispatcher& dispatcher,
|
||||
c10::optional<DispatchKey> dispatch_key,
|
||||
std::optional<DispatchKey> dispatch_key,
|
||||
KernelFunction kernel,
|
||||
c10::optional<CppSignature> cpp_signature,
|
||||
std::optional<CppSignature> cpp_signature,
|
||||
std::unique_ptr<FunctionSchema> inferred_function_schema,
|
||||
std::string debug
|
||||
) {
|
||||
@ -181,7 +181,7 @@ OperatorEntry::AnnotatedKernelContainerIterator OperatorEntry::registerKernel(
|
||||
|
||||
void OperatorEntry::deregisterKernel_(
|
||||
const c10::Dispatcher& dispatcher,
|
||||
c10::optional<DispatchKey> dispatch_key,
|
||||
std::optional<DispatchKey> dispatch_key,
|
||||
AnnotatedKernelContainerIterator kernel
|
||||
) {
|
||||
// Redirect catchAll deregistrations to CompositeImplicitAutograd.
|
||||
|
@ -129,9 +129,9 @@ public:
|
||||
// Postcondition: caller is responsible for disposing of the kernel
|
||||
AnnotatedKernelContainerIterator registerKernel(
|
||||
const Dispatcher& dispatcher,
|
||||
c10::optional<DispatchKey> dispatch_key,
|
||||
std::optional<DispatchKey> dispatch_key,
|
||||
KernelFunction kernel,
|
||||
c10::optional<CppSignature> cpp_signature,
|
||||
std::optional<CppSignature> cpp_signature,
|
||||
std::unique_ptr<FunctionSchema> inferred_function_schema,
|
||||
std::string debug
|
||||
);
|
||||
@ -139,7 +139,7 @@ public:
|
||||
// Precondition: Dispatcher::mutex_ is held
|
||||
void deregisterKernel_(
|
||||
const Dispatcher& dispatcher,
|
||||
c10::optional<DispatchKey> dispatch_key,
|
||||
std::optional<DispatchKey> dispatch_key,
|
||||
AnnotatedKernelContainerIterator kernel
|
||||
);
|
||||
|
||||
@ -221,7 +221,7 @@ public:
|
||||
private:
|
||||
|
||||
OperatorName name_;
|
||||
c10::optional<AnnotatedSchema> schema_;
|
||||
std::optional<AnnotatedSchema> schema_;
|
||||
#ifndef C10_MOBILE
|
||||
std::vector<at::Tag> tags_;
|
||||
#endif
|
||||
@ -282,10 +282,10 @@ private:
|
||||
struct CppSignatureWithDebug {
|
||||
CppSignature signature;
|
||||
std::string debug;
|
||||
c10::optional<DispatchKey> dispatch_key;
|
||||
std::optional<DispatchKey> dispatch_key;
|
||||
};
|
||||
c10::optional<CppSignatureWithDebug> cpp_signature_;
|
||||
c10::optional<CppSignatureWithDebug> sym_cpp_signature_;
|
||||
std::optional<CppSignatureWithDebug> cpp_signature_;
|
||||
std::optional<CppSignatureWithDebug> sym_cpp_signature_;
|
||||
|
||||
// A Python custom error handler for OperatorEntry::reportError
|
||||
std::unique_ptr<c10::SafePyObject> report_error_callback_;
|
||||
|
@ -121,7 +121,7 @@ class DynamicType : public SharedType {
|
||||
* A implementation detail to support NamedTuple.
|
||||
*/
|
||||
struct LabeledDynamicType {
|
||||
c10::optional<std::string> label;
|
||||
std::optional<std::string> label;
|
||||
DynamicTypePtr ty;
|
||||
explicit LabeledDynamicType(DynamicTypePtr t) : ty(std::move(t)) {}
|
||||
|
||||
@ -163,7 +163,7 @@ class DynamicType : public SharedType {
|
||||
Tag tag() const {
|
||||
return tag_;
|
||||
}
|
||||
const c10::optional<std::string>& name() const {
|
||||
const std::optional<std::string>& name() const {
|
||||
return name_;
|
||||
}
|
||||
const Arguments& arguments() const {
|
||||
@ -200,7 +200,7 @@ class DynamicType : public SharedType {
|
||||
}
|
||||
|
||||
Tag tag_;
|
||||
c10::optional<std::string> name_;
|
||||
std::optional<std::string> name_;
|
||||
union {
|
||||
Arguments arguments_;
|
||||
ClassTypePtr class_;
|
||||
|
@ -97,7 +97,7 @@ struct TORCH_API Function {
|
||||
// executor.
|
||||
virtual bool call(
|
||||
Stack&,
|
||||
c10::optional<size_t>,
|
||||
std::optional<size_t>,
|
||||
c10::function_ref<void(const Code&)>) {
|
||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false);
|
||||
return false;
|
||||
|
@ -30,7 +30,7 @@ FunctionSchema FunctionSchema::cloneWithRealTypes(bool with_symint) const {
|
||||
// NB: keep this in sync with unpackSymInt in KernelFunction_impl.h
|
||||
if (
|
||||
*a.real_type() == *getTypePtr<c10::SymInt>() ||
|
||||
*a.real_type() == *getTypePtr<c10::optional<c10::SymInt>>() ||
|
||||
*a.real_type() == *getTypePtr<std::optional<c10::SymInt>>() ||
|
||||
*a.real_type() == *getTypePtr<c10::SymIntArrayRef>() ||
|
||||
*a.real_type() == *getTypePtr<at::OptionalSymIntArrayRef>()
|
||||
) {
|
||||
@ -53,7 +53,7 @@ FunctionSchema FunctionSchema::cloneWithRealTypes(bool with_symint) const {
|
||||
is_varret());
|
||||
}
|
||||
|
||||
bool FunctionSchema::canAliasTypeSetsAlias(const c10::optional<AliasTypeSet> &lhs, const c10::optional<AliasTypeSet> &rhs) const {
|
||||
bool FunctionSchema::canAliasTypeSetsAlias(const std::optional<AliasTypeSet> &lhs, const c10::optional<AliasTypeSet> &rhs) const {
|
||||
if (!lhs || !rhs) {
|
||||
return false;
|
||||
}
|
||||
@ -67,7 +67,7 @@ bool FunctionSchema::canAliasTypeSetsAlias(const c10::optional<AliasTypeSet> &lh
|
||||
return false;
|
||||
}
|
||||
|
||||
c10::optional<AliasTypeSet> FunctionSchema::getAliasTypeSetContainedTypes(const c10::optional<AliasTypeSet> &aliasTypeSet) const {
|
||||
std::optional<AliasTypeSet> FunctionSchema::getAliasTypeSetContainedTypes(const c10::optional<AliasTypeSet> &aliasTypeSet) const {
|
||||
if (!aliasTypeSet) {
|
||||
return c10::nullopt;
|
||||
}
|
||||
@ -95,7 +95,7 @@ c10::optional<AliasTypeSet> FunctionSchema::getAliasTypeSetContainedTypes(const
|
||||
return AliasTypeSet(containedTypes.begin(), containedTypes.end());
|
||||
}
|
||||
|
||||
c10::optional<AliasTypeSet> FunctionSchema::mapTypeToAliasTypeSet(const TypePtr& type) const {
|
||||
std::optional<AliasTypeSet> FunctionSchema::mapTypeToAliasTypeSet(const TypePtr& type) const {
|
||||
switch(type->kind()) {
|
||||
case TypeKind::ListType:
|
||||
case TypeKind::DictType:
|
||||
@ -155,8 +155,8 @@ bool FunctionSchema::may_alias(const SchemaArgument& lhs, const SchemaArgument&
|
||||
const Argument lhsArg = getCorrectList(lhs.type)[lhs.index];
|
||||
const Argument rhsArg = getCorrectList(rhs.type)[rhs.index];
|
||||
|
||||
c10::optional<AliasTypeSet> lhsTypes = mapTypeToAliasTypeSet(lhsArg.type());
|
||||
c10::optional<AliasTypeSet> rhsTypes = mapTypeToAliasTypeSet(rhsArg.type());
|
||||
std::optional<AliasTypeSet> lhsTypes = mapTypeToAliasTypeSet(lhsArg.type());
|
||||
std::optional<AliasTypeSet> rhsTypes = mapTypeToAliasTypeSet(rhsArg.type());
|
||||
|
||||
// Check to see if lhs and rhs have the same alias set
|
||||
if (canAliasTypeSetsAlias(lhsTypes, rhsTypes)) {
|
||||
@ -182,10 +182,10 @@ bool FunctionSchema::may_contain_alias(const SchemaArgument& lhs, const SchemaAr
|
||||
|
||||
const c10::Argument lhsArg = getCorrectList(lhs.type)[lhs.index];
|
||||
const c10::Argument rhsArg = getCorrectList(rhs.type)[rhs.index];
|
||||
c10::optional<AliasTypeSet> lhsTypes = mapTypeToAliasTypeSet(lhsArg.type());
|
||||
c10::optional<AliasTypeSet> rhsTypes = mapTypeToAliasTypeSet(rhsArg.type());
|
||||
c10::optional<AliasTypeSet> lhsContainedTypes = getAliasTypeSetContainedTypes(lhsTypes);
|
||||
c10::optional<AliasTypeSet> rhsContainedTypes = getAliasTypeSetContainedTypes(rhsTypes);
|
||||
std::optional<AliasTypeSet> lhsTypes = mapTypeToAliasTypeSet(lhsArg.type());
|
||||
std::optional<AliasTypeSet> rhsTypes = mapTypeToAliasTypeSet(rhsArg.type());
|
||||
std::optional<AliasTypeSet> lhsContainedTypes = getAliasTypeSetContainedTypes(lhsTypes);
|
||||
std::optional<AliasTypeSet> rhsContainedTypes = getAliasTypeSetContainedTypes(rhsTypes);
|
||||
|
||||
// Checks if one side is wildcard and the other side is a container of the same type
|
||||
bool lhsWildcard = lhsArg.alias_info() && lhsArg.alias_info()->isWildcardAfter() && canAliasTypeSetsAlias(lhsTypes, rhsContainedTypes);
|
||||
|
@ -29,20 +29,20 @@ struct Argument {
|
||||
Argument(
|
||||
std::string name = "",
|
||||
const TypePtr& type = nullptr,
|
||||
c10::optional<int32_t> N = c10::nullopt,
|
||||
c10::optional<IValue> default_value = c10::nullopt,
|
||||
std::optional<int32_t> N = c10::nullopt,
|
||||
std::optional<IValue> default_value = c10::nullopt,
|
||||
bool kwarg_only = false,
|
||||
c10::optional<AliasInfo> alias_info = c10::nullopt)
|
||||
std::optional<AliasInfo> alias_info = c10::nullopt)
|
||||
: Argument(std::move(name), type, type, N, std::move(default_value), kwarg_only, std::move(alias_info)) {}
|
||||
|
||||
Argument(
|
||||
std::string name,
|
||||
TypePtr fake_type,
|
||||
TypePtr real_type,
|
||||
c10::optional<int32_t> N = c10::nullopt,
|
||||
c10::optional<IValue> default_value = c10::nullopt,
|
||||
std::optional<int32_t> N = c10::nullopt,
|
||||
std::optional<IValue> default_value = c10::nullopt,
|
||||
bool kwarg_only = false,
|
||||
c10::optional<AliasInfo> alias_info = c10::nullopt)
|
||||
std::optional<AliasInfo> alias_info = c10::nullopt)
|
||||
: name_(std::move(name)),
|
||||
type_(fake_type ? std::move(fake_type) : TensorType::get()),
|
||||
real_type_(real_type ? std::move(real_type) : type_),
|
||||
@ -94,10 +94,10 @@ struct Argument {
|
||||
const TypePtr& real_type() const {
|
||||
return real_type_;
|
||||
}
|
||||
c10::optional<int32_t> N() const {
|
||||
std::optional<int32_t> N() const {
|
||||
return N_;
|
||||
}
|
||||
const c10::optional<IValue>& default_value() const {
|
||||
const std::optional<IValue>& default_value() const {
|
||||
return default_value_;
|
||||
}
|
||||
bool kwarg_only() const {
|
||||
@ -150,7 +150,7 @@ struct Argument {
|
||||
N_,
|
||||
default_value_,
|
||||
kwarg_only_,
|
||||
alias_info_ ? c10::optional<AliasInfo>(*alias_info_) : c10::nullopt);
|
||||
alias_info_ ? std::optional<AliasInfo>(*alias_info_) : c10::nullopt);
|
||||
}
|
||||
|
||||
// this function checks whether this Argument is backward compatible with
|
||||
@ -179,9 +179,9 @@ struct Argument {
|
||||
// e.g. for int[3]: type = ListType::ofInts(), N = 3
|
||||
// If present, this will allow scalars to be broadcast to this length to
|
||||
// become a list.
|
||||
c10::optional<int32_t> N_;
|
||||
std::optional<int32_t> N_;
|
||||
|
||||
c10::optional<IValue> default_value_;
|
||||
std::optional<IValue> default_value_;
|
||||
// AliasInfo is huge, so let's only allocate memory for it if
|
||||
// necessary (which it isn't during schema parsing on startup, to
|
||||
// give a pertinent example).
|
||||
@ -322,7 +322,7 @@ struct TORCH_API FunctionSchema {
|
||||
// alias information should we infer?
|
||||
// NB: due to alias analysis kind merging, this may be nullopt. Eventually
|
||||
// this should always be set no matter what
|
||||
c10::optional<AliasAnalysisKind> alias_kind_;
|
||||
std::optional<AliasAnalysisKind> alias_kind_;
|
||||
|
||||
template <typename T>
|
||||
void checkArg(const IValue& value, const Argument& argument, optional<size_t> pos) const;
|
||||
@ -395,7 +395,7 @@ struct TORCH_API FunctionSchema {
|
||||
return aliasInfo && aliasInfo->isWrite();
|
||||
}
|
||||
bool is_mutable(c10::string_view name) const {
|
||||
c10::optional<int> index = argumentIndexWithName(name);
|
||||
std::optional<int> index = argumentIndexWithName(name);
|
||||
TORCH_INTERNAL_ASSERT(
|
||||
index != c10::nullopt, "Schema has no argument named ", name);
|
||||
|
||||
@ -416,22 +416,22 @@ struct TORCH_API FunctionSchema {
|
||||
|
||||
// Returns whether the two AliasTypeSets contain any similarities
|
||||
// ie: whether the two type sets can alias.
|
||||
bool canAliasTypeSetsAlias(const c10::optional<AliasTypeSet> &lhs, const c10::optional<AliasTypeSet> &rhs) const;
|
||||
bool canAliasTypeSetsAlias(const std::optional<AliasTypeSet> &lhs, const c10::optional<AliasTypeSet> &rhs) const;
|
||||
|
||||
// Recursively Finds all contained types within the AliasTypeSet.
|
||||
c10::optional<AliasTypeSet> getAliasTypeSetContainedTypes(const c10::optional<AliasTypeSet> &aliasTypeSet) const;
|
||||
std::optional<AliasTypeSet> getAliasTypeSetContainedTypes(const c10::optional<AliasTypeSet> &aliasTypeSet) const;
|
||||
|
||||
// Similar to mapTypeToAliasTypeSet defined in alias_analysis.cpp.
|
||||
// Used to map types to a type such that all types that can alias will be mapped to the same type.
|
||||
// For example, calling this method on 'Optional[List[int]]' is the same as calling this method
|
||||
// on 'List[int]'.
|
||||
c10::optional<AliasTypeSet> mapTypeToAliasTypeSet(const TypePtr& type) const;
|
||||
std::optional<AliasTypeSet> mapTypeToAliasTypeSet(const TypePtr& type) const;
|
||||
|
||||
// Returns either arguments() or returns() depending on the SchemaArgType
|
||||
// output => returns(), input => arguments()
|
||||
const std::vector<Argument>& getCorrectList(SchemaArgType type) const;
|
||||
|
||||
c10::optional<int> argumentIndexWithName(c10::string_view name) const {
|
||||
std::optional<int> argumentIndexWithName(c10::string_view name) const {
|
||||
for (const auto i : c10::irange(arguments().size())) {
|
||||
if(name == arguments()[i].name())
|
||||
return i;
|
||||
@ -470,8 +470,8 @@ struct TORCH_API FunctionSchema {
|
||||
std::string formatTypeMismatchMsg(
|
||||
const Argument& expected,
|
||||
const std::string& actual_type,
|
||||
c10::optional<size_t> position = c10::nullopt,
|
||||
c10::optional<std::string> value = c10::nullopt) const;
|
||||
std::optional<size_t> position = c10::nullopt,
|
||||
std::optional<std::string> value = c10::nullopt) const;
|
||||
|
||||
FunctionSchema cloneWithRemappedTypes(
|
||||
const std::function<TypePtr(TypePtr)> type_map) const;
|
||||
@ -514,7 +514,7 @@ struct TORCH_API FunctionSchema {
|
||||
alias_kind_ = v;
|
||||
}
|
||||
|
||||
c10::optional<c10::string_view> getNamespace() const {
|
||||
std::optional<c10::string_view> getNamespace() const {
|
||||
return name_.getNamespace();
|
||||
}
|
||||
|
||||
|
@ -162,8 +162,8 @@ inline bool Argument::isForwardCompatibleWith(
|
||||
inline std::string FunctionSchema::formatTypeMismatchMsg(
|
||||
const Argument& expected,
|
||||
const std::string& actual_type,
|
||||
c10::optional<size_t> position,
|
||||
c10::optional<std::string> value) const {
|
||||
std::optional<size_t> position,
|
||||
std::optional<std::string> value) const {
|
||||
std::string position_str;
|
||||
if (position) {
|
||||
position_str = c10::str("Position: ", *position, "\n");
|
||||
|
@ -471,7 +471,7 @@ bool IValue::isOptionalTensorList() const {
|
||||
return false;
|
||||
}
|
||||
const auto& ty = static_cast<detail::ListImpl*>(payload.u.as_intrusive_ptr)->elementType;
|
||||
const auto& expected_ty = c10::getTypePtr<c10::optional<at::Tensor>>();
|
||||
const auto& expected_ty = c10::getTypePtr<std::optional<at::Tensor>>();
|
||||
return expected_ty == ty;
|
||||
}
|
||||
|
||||
@ -886,14 +886,14 @@ c10::intrusive_ptr<ivalue::Object> ivalue::Object::create(
|
||||
StrongTypePtr(nullptr, std::move(classType)), numSlots);
|
||||
}
|
||||
|
||||
IValue IValue::deepcopy(c10::optional<at::Device> device) const {
|
||||
IValue IValue::deepcopy(std::optional<at::Device> device) const {
|
||||
IValue::HashAliasedIValueMap memo;
|
||||
return deepcopy(memo, device);
|
||||
}
|
||||
|
||||
IValue IValue::deepcopy(
|
||||
IValue::HashAliasedIValueMap& memo,
|
||||
c10::optional<at::Device> device) const {
|
||||
std::optional<at::Device> device) const {
|
||||
if (memo.count(*this)) {
|
||||
return memo.at(*this);
|
||||
}
|
||||
@ -1027,14 +1027,14 @@ c10::intrusive_ptr<ivalue::Object> ivalue::Object::copy_to_weak_compilation_ref(
|
||||
}
|
||||
|
||||
c10::intrusive_ptr<ivalue::Object> ivalue::Object::deepcopy(
|
||||
c10::optional<at::Device> device) const {
|
||||
std::optional<at::Device> device) const {
|
||||
IValue::HashAliasedIValueMap memo;
|
||||
return deepcopy(memo, device);
|
||||
}
|
||||
|
||||
c10::intrusive_ptr<ivalue::Object> ivalue::Object::deepcopy(
|
||||
IValue::HashAliasedIValueMap& memo,
|
||||
c10::optional<at::Device> device) const {
|
||||
std::optional<at::Device> device) const {
|
||||
auto cu = type_.cu_;
|
||||
auto object = ivalue::Object::create(WeakOrStrongTypePtr(type_.cu_, type_.type_), type()->numAttributes());
|
||||
for (const auto i : c10::irange(slots_.size())) {
|
||||
|
@ -86,20 +86,20 @@ struct StreamData3Holder : c10::intrusive_ptr_target {
|
||||
|
||||
} // namespace ivalue
|
||||
|
||||
// This is an owning wrapper for a c10::optional<std::vector<T>>
|
||||
// This is an owning wrapper for a std::optional<std::vector<T>>
|
||||
// that can be implicitly converted to a (non-owning) optional<ArrayRef<T>>.
|
||||
// Its purpose is to be used in generated code to keep the vector alive
|
||||
// either until the end of a statement (as a temporary), or as a saved arg
|
||||
// in autograd.
|
||||
template <typename T>
|
||||
struct OptionalArray {
|
||||
c10::optional<std::vector<T>> list;
|
||||
std::optional<std::vector<T>> list;
|
||||
|
||||
OptionalArray() = default;
|
||||
OptionalArray(std::vector<T> val) : list(std::move(val)) {}
|
||||
|
||||
// Used when saving an argument for the backwards pass.
|
||||
OptionalArray& operator=(c10::optional<ArrayRef<T>> ref) {
|
||||
OptionalArray& operator=(std::optional<ArrayRef<T>> ref) {
|
||||
if (ref) {
|
||||
list = std::vector<T>(ref->begin(), ref->end());
|
||||
} else {
|
||||
@ -118,7 +118,7 @@ struct OptionalArray {
|
||||
return *this;
|
||||
}
|
||||
|
||||
operator c10::optional<c10::ArrayRef<T>>() {
|
||||
operator std::optional<c10::ArrayRef<T>>() {
|
||||
if (!list) {
|
||||
return nullopt;
|
||||
}
|
||||
@ -697,7 +697,7 @@ struct TORCH_API IValue final {
|
||||
c10::intrusive_ptr<ivalue::ConstantString> toString() &&;
|
||||
c10::intrusive_ptr<ivalue::ConstantString> toString() const&;
|
||||
const std::string& toStringRef() const;
|
||||
c10::optional<std::reference_wrapper<const std::string>> toOptionalStringRef()
|
||||
std::optional<std::reference_wrapper<const std::string>> toOptionalStringRef()
|
||||
const;
|
||||
c10::string_view toStringView() const;
|
||||
|
||||
@ -726,9 +726,9 @@ struct TORCH_API IValue final {
|
||||
|
||||
// OptionalTensorList
|
||||
bool isOptionalTensorList() const;
|
||||
c10::List<c10::optional<at::Tensor>> toOptionalTensorList() &&;
|
||||
c10::List<c10::optional<at::Tensor>> toOptionalTensorList() const&;
|
||||
std::vector<c10::optional<at::Tensor>> toOptionalTensorVector() const;
|
||||
c10::List<std::optional<at::Tensor>> toOptionalTensorList() &&;
|
||||
c10::List<std::optional<at::Tensor>> toOptionalTensorList() const&;
|
||||
std::vector<std::optional<at::Tensor>> toOptionalTensorVector() const;
|
||||
|
||||
// GenericList
|
||||
IValue(c10::List<IValue> v);
|
||||
@ -817,7 +817,7 @@ struct TORCH_API IValue final {
|
||||
IValue(std::unordered_map<Key, Value> v);
|
||||
|
||||
template <class T, enable_if_ivalue_constructible<T> = nullptr>
|
||||
IValue(c10::optional<T> v);
|
||||
IValue(std::optional<T> v);
|
||||
template <class T, enable_if_list_is_ivalue_constructible<T> = nullptr>
|
||||
IValue(c10::OptionalArrayRef<T> v);
|
||||
IValue(c10::nullopt_t);
|
||||
@ -1128,10 +1128,10 @@ struct TORCH_API IValue final {
|
||||
// TODO: There are several places that recurse over IValue. This is fragile.
|
||||
// This visitor should be used to recurse over ivalues.
|
||||
void visit(const std::function<bool(const IValue&)>& visitor) const;
|
||||
IValue deepcopy(c10::optional<at::Device> device = c10::nullopt) const;
|
||||
IValue deepcopy(std::optional<at::Device> device = c10::nullopt) const;
|
||||
IValue deepcopy(
|
||||
HashAliasedIValueMap& memo,
|
||||
c10::optional<at::Device> device = c10::nullopt) const;
|
||||
std::optional<at::Device> device = c10::nullopt) const;
|
||||
|
||||
private:
|
||||
static c10::intrusive_ptr_target* null_to_undefined_tensor(
|
||||
@ -1530,8 +1530,8 @@ struct WeakOrStrongCompilationUnit {
|
||||
return holdingStrongRef() && *strong_ptr_ == nullptr;
|
||||
}
|
||||
|
||||
c10::optional<std::shared_ptr<torch::jit::CompilationUnit>> strong_ptr_;
|
||||
c10::optional<std::weak_ptr<torch::jit::CompilationUnit>> weak_ptr_;
|
||||
std::optional<std::shared_ptr<torch::jit::CompilationUnit>> strong_ptr_;
|
||||
std::optional<std::weak_ptr<torch::jit::CompilationUnit>> weak_ptr_;
|
||||
};
|
||||
|
||||
// An Object will hold a non-owning Compilation Unit reference if it is a
|
||||
|
@ -909,7 +909,7 @@ struct C10_EXPORT ivalue::Future final : c10::intrusive_ptr_target {
|
||||
using WeakStorage = c10::weak_intrusive_ptr<c10::StorageImpl>;
|
||||
void markCompleted(
|
||||
IValue value,
|
||||
c10::optional<std::vector<WeakStorage>> storages = c10::nullopt) {
|
||||
std::optional<std::vector<WeakStorage>> storages = c10::nullopt) {
|
||||
// Start by performing all steps that can throw, before setting any field.
|
||||
// Do this before even acquiring the mutex, because extractStorages might
|
||||
// acquire the GIL, which could lead to a lock inversion with our mutex.
|
||||
@ -1586,11 +1586,11 @@ struct C10_EXPORT ivalue::Object final : c10::intrusive_ptr_target {
|
||||
c10::intrusive_ptr<Object> copy() const;
|
||||
|
||||
c10::intrusive_ptr<Object> deepcopy(
|
||||
c10::optional<at::Device> device = c10::nullopt) const;
|
||||
std::optional<at::Device> device = c10::nullopt) const;
|
||||
|
||||
c10::intrusive_ptr<Object> deepcopy(
|
||||
IValue::HashAliasedIValueMap& memo,
|
||||
c10::optional<at::Device> device = c10::nullopt) const;
|
||||
std::optional<at::Device> device = c10::nullopt) const;
|
||||
|
||||
bool is_weak_compilation_ref() const {
|
||||
return !type_.holds_strong_ref();
|
||||
@ -1613,7 +1613,7 @@ struct ivalue::PyObjectHolder : c10::intrusive_ptr_target {
|
||||
public:
|
||||
virtual PyObject* getPyObject() = 0;
|
||||
virtual c10::InferredType tryToInferType() = 0;
|
||||
virtual IValue toIValue(const TypePtr& type, c10::optional<int32_t> N = c10::nullopt) = 0;
|
||||
virtual IValue toIValue(const TypePtr& type, std::optional<int32_t> N = c10::nullopt) = 0;
|
||||
virtual std::string toStr() = 0;
|
||||
virtual std::vector<at::Tensor> extractTensors() = 0;
|
||||
|
||||
@ -1909,7 +1909,7 @@ std::unordered_map<K, V> generic_to(
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
c10::optional<T> generic_to(IValue ivalue, _fake_type<c10::optional<T>>) {
|
||||
std::optional<T> generic_to(IValue ivalue, _fake_type<c10::optional<T>>) {
|
||||
if (ivalue.isNone()) {
|
||||
return c10::nullopt;
|
||||
}
|
||||
@ -1946,11 +1946,11 @@ inline T IValue::to() && {
|
||||
}
|
||||
|
||||
template <>
|
||||
inline c10::optional<c10::string_view> IValue::to() && {
|
||||
inline std::optional<c10::string_view> IValue::to() && {
|
||||
// In the default implementation, the IValue is destroyed with std::move.
|
||||
// But if the unboxed type is optional<string_view> we cannot destroy
|
||||
// the IValue.
|
||||
return generic_to(*this, _fake_type<c10::optional<c10::string_view>>{});
|
||||
return generic_to(*this, _fake_type<std::optional<c10::string_view>>{});
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
@ -2046,20 +2046,20 @@ inline std::vector<at::Tensor> IValue::toTensorVector() const {
|
||||
return createVectorFromList<at::Tensor>(
|
||||
static_cast<const c10::detail::ListImpl*>(payload.u.as_intrusive_ptr));
|
||||
}
|
||||
inline c10::List<c10::optional<at::Tensor>> IValue::toOptionalTensorList() && {
|
||||
inline c10::List<std::optional<at::Tensor>> IValue::toOptionalTensorList() && {
|
||||
AT_ASSERT(isOptionalTensorList(), "Expected OptionalTensorList but got ", tagKind());
|
||||
return c10::List<c10::optional<at::Tensor>>(moveToIntrusivePtr<c10::detail::ListImpl>());
|
||||
return c10::List<std::optional<at::Tensor>>(moveToIntrusivePtr<c10::detail::ListImpl>());
|
||||
}
|
||||
inline c10::List<c10::optional<at::Tensor>> IValue::toOptionalTensorList() const& {
|
||||
inline c10::List<std::optional<at::Tensor>> IValue::toOptionalTensorList() const& {
|
||||
AT_ASSERT(isOptionalTensorList(), "Expected OptionalTensorList but got ", tagKind());
|
||||
return c10::List<c10::optional<at::Tensor>>(toIntrusivePtr<c10::detail::ListImpl>());
|
||||
return c10::List<std::optional<at::Tensor>>(toIntrusivePtr<c10::detail::ListImpl>());
|
||||
}
|
||||
inline std::vector<c10::optional<at::Tensor>> IValue::toOptionalTensorVector() const {
|
||||
inline std::vector<std::optional<at::Tensor>> IValue::toOptionalTensorVector() const {
|
||||
AT_ASSERT(isOptionalTensorList(), "Expected OptionalTensorList but got ", tagKind());
|
||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
||||
payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(),
|
||||
"called toOptionalTensorVector on null intrusive_ptr IValue");
|
||||
return createVectorFromList<c10::optional<at::Tensor>>(
|
||||
return createVectorFromList<std::optional<at::Tensor>>(
|
||||
static_cast<const c10::detail::ListImpl*>(payload.u.as_intrusive_ptr));
|
||||
}
|
||||
inline c10::List<IValue> IValue::toList() && {
|
||||
@ -2274,7 +2274,7 @@ inline IValue::IValue(std::unordered_map<Key, Value> v)
|
||||
}
|
||||
|
||||
template <class T, IValue::enable_if_ivalue_constructible<T>>
|
||||
inline IValue::IValue(c10::optional<T> v) : IValue() {
|
||||
inline IValue::IValue(std::optional<T> v) : IValue() {
|
||||
if (v.has_value()) {
|
||||
*this = IValue(std::move(*v));
|
||||
}
|
||||
@ -2360,7 +2360,7 @@ inline const std::string& IValue::toStringRef() const {
|
||||
payload.u.as_intrusive_ptr)
|
||||
->string();
|
||||
}
|
||||
inline c10::optional<std::reference_wrapper<const std::string>> IValue::
|
||||
inline std::optional<std::reference_wrapper<const std::string>> IValue::
|
||||
toOptionalStringRef() const {
|
||||
if (isNone()) {
|
||||
return c10::nullopt;
|
||||
|
@ -32,7 +32,7 @@ class Dict;
|
||||
struct IValue;
|
||||
struct FunctionSchema;
|
||||
struct NamedType;
|
||||
using OptNameList = c10::optional<std::vector<std::string>>;
|
||||
using OptNameList = std::optional<std::vector<std::string>>;
|
||||
|
||||
void standardizeVectorForUnion(std::vector<TypePtr>& reference, std::vector<TypePtr>* to_fill);
|
||||
void standardizeVectorForUnion(std::vector<TypePtr>* to_flatten);
|
||||
@ -164,9 +164,9 @@ struct TORCH_API UnionType : public SharedType {
|
||||
return has_free_variables_;
|
||||
}
|
||||
|
||||
c10::optional<TypePtr> toOptional() const;
|
||||
std::optional<TypePtr> toOptional() const;
|
||||
|
||||
c10::optional<TypePtr> subtractTypeSet(std::vector<TypePtr>& to_subtract) const;
|
||||
std::optional<TypePtr> subtractTypeSet(std::vector<TypePtr>& to_subtract) const;
|
||||
|
||||
protected:
|
||||
explicit UnionType(std::vector<TypePtr> types, TypeKind kind=TypeKind::UnionType);
|
||||
@ -247,13 +247,13 @@ struct TORCH_API OptionalType : public UnionType {
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
inline c10::optional<T> merge_primitive(
|
||||
const c10::optional<T>& a,
|
||||
const c10::optional<T>& b) {
|
||||
inline std::optional<T> merge_primitive(
|
||||
const std::optional<T>& a,
|
||||
const std::optional<T>& b) {
|
||||
if (a.has_value() && b.has_value() && a.value() == b.value()) {
|
||||
return a;
|
||||
}
|
||||
return c10::optional<T>{};
|
||||
return std::optional<T>{};
|
||||
}
|
||||
|
||||
// If we see `a + b + c` and know that a, b, and c are the same size and have
|
||||
@ -274,9 +274,9 @@ inline c10::optional<T> merge_primitive(
|
||||
struct TORCH_API Stride {
|
||||
Stride() = default;
|
||||
Stride(
|
||||
const c10::optional<size_t>& stride_index,
|
||||
c10::optional<bool> contiguous,
|
||||
const c10::optional<size_t>& stride)
|
||||
const std::optional<size_t>& stride_index,
|
||||
std::optional<bool> contiguous,
|
||||
const std::optional<size_t>& stride)
|
||||
: stride_index_(stride_index), contiguous_(contiguous), stride_(stride) {}
|
||||
|
||||
bool operator==(const Stride& b) const {
|
||||
@ -288,17 +288,17 @@ struct TORCH_API Stride {
|
||||
return stride_index_ && contiguous_ && stride_;
|
||||
}
|
||||
|
||||
c10::optional<size_t> stride_index_;
|
||||
c10::optional<bool> contiguous_;
|
||||
c10::optional<size_t> stride_;
|
||||
std::optional<size_t> stride_index_;
|
||||
std::optional<bool> contiguous_;
|
||||
std::optional<size_t> stride_;
|
||||
};
|
||||
|
||||
template <>
|
||||
inline c10::optional<Stride> merge_primitive(
|
||||
const c10::optional<Stride>& a,
|
||||
const c10::optional<Stride>& b) {
|
||||
c10::optional<Stride> left = a;
|
||||
c10::optional<Stride> right = b;
|
||||
inline std::optional<Stride> merge_primitive(
|
||||
const std::optional<Stride>& a,
|
||||
const std::optional<Stride>& b) {
|
||||
std::optional<Stride> left = a;
|
||||
std::optional<Stride> right = b;
|
||||
if (!left.has_value()) {
|
||||
left = {Stride()};
|
||||
}
|
||||
@ -314,7 +314,7 @@ inline c10::optional<Stride> merge_primitive(
|
||||
// normalize
|
||||
if (!r.stride_index_.has_value() && !r.contiguous_.has_value() &&
|
||||
!r.stride_.has_value()) {
|
||||
return c10::optional<Stride>{};
|
||||
return std::optional<Stride>{};
|
||||
}
|
||||
|
||||
return r;
|
||||
@ -375,7 +375,7 @@ struct TORCH_API SymbolicShape {
|
||||
SymbolicShape() : dims_(c10::nullopt) {}
|
||||
|
||||
// Known rank but unknown dimentions.
|
||||
SymbolicShape(c10::optional<size_t> rank) : dims_(c10::nullopt) {
|
||||
SymbolicShape(std::optional<size_t> rank) : dims_(c10::nullopt) {
|
||||
if(!rank) {
|
||||
return;
|
||||
}
|
||||
@ -389,10 +389,10 @@ struct TORCH_API SymbolicShape {
|
||||
}
|
||||
|
||||
// Mix of known and unknown ranks
|
||||
SymbolicShape(const std::vector<c10::optional<int64_t>>& dims) {
|
||||
SymbolicShape(const std::vector<std::optional<int64_t>>& dims) {
|
||||
std::vector<ShapeSymbol> shape_symbols;
|
||||
shape_symbols.reserve(dims.size());
|
||||
for(c10::optional<int64_t> dim: dims) {
|
||||
for(std::optional<int64_t> dim: dims) {
|
||||
if(!dim) {
|
||||
shape_symbols.push_back(ShapeSymbol::newSymbol());
|
||||
} else {
|
||||
@ -430,18 +430,18 @@ struct TORCH_API SymbolicShape {
|
||||
}
|
||||
|
||||
// Returns rank or nullopt in case of unranked shape.
|
||||
c10::optional<size_t> rank() const {
|
||||
std::optional<size_t> rank() const {
|
||||
if(!dims_) {
|
||||
return c10::nullopt;
|
||||
}
|
||||
return dims_->size();
|
||||
}
|
||||
|
||||
c10::optional<std::vector<ShapeSymbol>> sizes() const {
|
||||
std::optional<std::vector<ShapeSymbol>> sizes() const {
|
||||
return dims_;
|
||||
}
|
||||
|
||||
c10::optional<std::vector<bool>> symbolicDims() const {
|
||||
std::optional<std::vector<bool>> symbolicDims() const {
|
||||
if (!dims_) {
|
||||
return c10::nullopt;
|
||||
}
|
||||
@ -482,7 +482,7 @@ struct TORCH_API SymbolicShape {
|
||||
}
|
||||
|
||||
private:
|
||||
c10::optional<std::vector<ShapeSymbol>> dims_;
|
||||
std::optional<std::vector<ShapeSymbol>> dims_;
|
||||
};
|
||||
|
||||
namespace detail {
|
||||
@ -498,14 +498,14 @@ inline bool isComplete(const T& /*t*/) {
|
||||
|
||||
template <typename T>
|
||||
struct VaryingShape {
|
||||
using ListOfOptionalElements = std::vector<c10::optional<T>>;
|
||||
using ListOfOptionalElements = std::vector<std::optional<T>>;
|
||||
VaryingShape(const std::vector<T>& vec)
|
||||
: VaryingShape(ListOfOptionalElements(vec.begin(), vec.end())) {}
|
||||
|
||||
VaryingShape(c10::ArrayRef<T> vec)
|
||||
: VaryingShape(ListOfOptionalElements(vec.begin(), vec.end())) {}
|
||||
|
||||
VaryingShape(c10::optional<size_t> size = c10::nullopt) : dims_(c10::nullopt) {
|
||||
VaryingShape(std::optional<size_t> size = c10::nullopt) : dims_(c10::nullopt) {
|
||||
if (size) {
|
||||
dims_ = ListOfOptionalElements(*size);
|
||||
}
|
||||
@ -513,20 +513,20 @@ struct VaryingShape {
|
||||
|
||||
VaryingShape(ListOfOptionalElements dims) : dims_(std::move(dims)) {}
|
||||
|
||||
VaryingShape(size_t size) : VaryingShape(c10::optional<size_t>(size)) {}
|
||||
VaryingShape(size_t size) : VaryingShape(std::optional<size_t>(size)) {}
|
||||
|
||||
bool operator==(const VaryingShape& other) const {
|
||||
return dims_ == other.dims_;
|
||||
}
|
||||
|
||||
const c10::optional<T> &operator[](size_t i) const {
|
||||
const std::optional<T> &operator[](size_t i) const {
|
||||
if (!dims_) {
|
||||
throw std::runtime_error("Rank isn't fixed");
|
||||
}
|
||||
return (*dims_).at(i);
|
||||
}
|
||||
|
||||
c10::optional<size_t> size() const {
|
||||
std::optional<size_t> size() const {
|
||||
if (!dims_) {
|
||||
return c10::nullopt;
|
||||
}
|
||||
@ -534,13 +534,13 @@ struct VaryingShape {
|
||||
return dims.size();
|
||||
}
|
||||
|
||||
const c10::optional<ListOfOptionalElements>& sizes() const {
|
||||
const std::optional<ListOfOptionalElements>& sizes() const {
|
||||
return dims_;
|
||||
}
|
||||
|
||||
TORCH_API VaryingShape merge(const VaryingShape& other) const;
|
||||
|
||||
c10::optional<std::vector<T>> concrete_sizes() const {
|
||||
std::optional<std::vector<T>> concrete_sizes() const {
|
||||
if (!dims_) {
|
||||
return c10::nullopt;
|
||||
}
|
||||
@ -568,7 +568,7 @@ struct VaryingShape {
|
||||
}
|
||||
|
||||
private:
|
||||
c10::optional<ListOfOptionalElements> dims_;
|
||||
std::optional<ListOfOptionalElements> dims_;
|
||||
};
|
||||
|
||||
struct TensorType;
|
||||
@ -581,27 +581,27 @@ struct TORCH_API TensorType : public SharedType {
|
||||
// used by TensorType::create(size_t dim) which in turn used by
|
||||
// shape_analysis.cpp
|
||||
static TensorTypePtr create(
|
||||
c10::optional<at::ScalarType> scalar_type,
|
||||
c10::optional<Device> device,
|
||||
std::optional<at::ScalarType> scalar_type,
|
||||
std::optional<Device> device,
|
||||
const VaryingShape<int64_t>& sizes,
|
||||
const VaryingShape<int64_t>& strides,
|
||||
c10::optional<bool> requires_grad,
|
||||
c10::optional<bool> undefined = false,
|
||||
std::optional<bool> requires_grad,
|
||||
std::optional<bool> undefined = false,
|
||||
bool tensor_contiguity = false);
|
||||
|
||||
static TensorTypePtr create(
|
||||
c10::optional<at::ScalarType> scalar_type,
|
||||
c10::optional<Device> device,
|
||||
std::optional<at::ScalarType> scalar_type,
|
||||
std::optional<Device> device,
|
||||
const SymbolicShape& sizes,
|
||||
const VaryingShape<Stride>& stride_,
|
||||
c10::optional<bool> requires_grad,
|
||||
c10::optional<bool> undefined = false);
|
||||
std::optional<bool> requires_grad,
|
||||
std::optional<bool> undefined = false);
|
||||
|
||||
static TensorTypePtr create(
|
||||
c10::optional<at::ScalarType> scalar_type,
|
||||
c10::optional<Device> device,
|
||||
c10::optional<size_t> dim,
|
||||
c10::optional<bool> requires_grad);
|
||||
std::optional<at::ScalarType> scalar_type,
|
||||
std::optional<Device> device,
|
||||
std::optional<size_t> dim,
|
||||
std::optional<bool> requires_grad);
|
||||
|
||||
// overloaded create variadic template argument as it could not distinguish
|
||||
// initializer list
|
||||
@ -613,7 +613,7 @@ struct TORCH_API TensorType : public SharedType {
|
||||
static TypePtr fromNumberType(const Type& typ);
|
||||
static TypePtr fromBoolType();
|
||||
|
||||
c10::optional<size_t> dim() const {
|
||||
std::optional<size_t> dim() const {
|
||||
return sizes().size();
|
||||
}
|
||||
|
||||
@ -625,13 +625,13 @@ struct TORCH_API TensorType : public SharedType {
|
||||
return strides_;
|
||||
}
|
||||
|
||||
c10::optional<at::Device> device() const {
|
||||
std::optional<at::Device> device() const {
|
||||
return device_;
|
||||
}
|
||||
c10::optional<at::ScalarType> scalarType() const {
|
||||
std::optional<at::ScalarType> scalarType() const {
|
||||
return scalar_type_;
|
||||
}
|
||||
c10::optional<bool> requiresGrad() const {
|
||||
std::optional<bool> requiresGrad() const {
|
||||
return requires_grad_;
|
||||
}
|
||||
bool requires_grad() const override {
|
||||
@ -651,32 +651,32 @@ struct TORCH_API TensorType : public SharedType {
|
||||
}
|
||||
}
|
||||
|
||||
c10::optional<size_t> numel() const {
|
||||
std::optional<size_t> numel() const {
|
||||
size_t prod = 1;
|
||||
const auto& shape = sizes();
|
||||
|
||||
for (size_t i = 0; i < shape.size(); i++) {
|
||||
if (!shape[i]) {
|
||||
return c10::optional<size_t>{};
|
||||
return std::optional<size_t>{};
|
||||
}
|
||||
prod *= shape[i].value();
|
||||
}
|
||||
return prod;
|
||||
}
|
||||
|
||||
TensorTypePtr withRequiresGrad(c10::optional<bool> s) {
|
||||
TensorTypePtr withRequiresGrad(std::optional<bool> s) {
|
||||
auto copy = clone();
|
||||
copy->requires_grad_ = s;
|
||||
return copy;
|
||||
}
|
||||
|
||||
TensorTypePtr withScalarType(c10::optional<ScalarType> st) {
|
||||
TensorTypePtr withScalarType(std::optional<ScalarType> st) {
|
||||
auto copy = clone();
|
||||
copy->scalar_type_ = st;
|
||||
return copy;
|
||||
}
|
||||
|
||||
TensorTypePtr withDim(c10::optional<size_t> d) {
|
||||
TensorTypePtr withDim(std::optional<size_t> d) {
|
||||
auto copy = clone();
|
||||
// withDim is only used by the legacy executor
|
||||
// that only cares about the rank, so create dummy symbols)) :
|
||||
@ -712,7 +712,7 @@ struct TORCH_API TensorType : public SharedType {
|
||||
sizes, contiguousStridesOf(sizes));
|
||||
}
|
||||
|
||||
TensorTypePtr withDevice(const c10::optional<at::Device> device) const {
|
||||
TensorTypePtr withDevice(const std::optional<at::Device> device) const {
|
||||
auto copy = clone();
|
||||
copy->device_ = device;
|
||||
return copy;
|
||||
@ -784,7 +784,7 @@ struct TORCH_API TensorType : public SharedType {
|
||||
return r;
|
||||
}
|
||||
|
||||
c10::optional<bool> undefined() const { return undefined_; }
|
||||
std::optional<bool> undefined() const { return undefined_; }
|
||||
|
||||
static const TensorTypePtr& get();
|
||||
|
||||
@ -824,12 +824,12 @@ struct TORCH_API TensorType : public SharedType {
|
||||
|
||||
private:
|
||||
TensorType(
|
||||
c10::optional<at::ScalarType> scalar_type,
|
||||
c10::optional<Device> device,
|
||||
std::optional<at::ScalarType> scalar_type,
|
||||
std::optional<Device> device,
|
||||
SymbolicShape sizes,
|
||||
VaryingShape<Stride> strides,
|
||||
c10::optional<bool> requires_grad,
|
||||
c10::optional<bool> undefined = false);
|
||||
std::optional<bool> requires_grad,
|
||||
std::optional<bool> undefined = false);
|
||||
|
||||
TensorTypePtr clone() const {
|
||||
return TensorTypePtr(new TensorType(
|
||||
@ -841,11 +841,11 @@ struct TORCH_API TensorType : public SharedType {
|
||||
at::IntArrayRef strides,
|
||||
bool tensor_contiguity = false);
|
||||
|
||||
c10::optional<at::ScalarType> scalar_type_;
|
||||
c10::optional<at::Device> device_;
|
||||
std::optional<at::ScalarType> scalar_type_;
|
||||
std::optional<at::Device> device_;
|
||||
SymbolicShape sizes_;
|
||||
VaryingShape<Stride> strides_;
|
||||
c10::optional<bool> requires_grad_;
|
||||
std::optional<bool> requires_grad_;
|
||||
// we exploit the fact certain tensors must be zero in the autograd to
|
||||
// optimize gradient computation. Such zero tensors are currently implemented
|
||||
// with `UndefinedTensorImpl.` They can be handled only by special operators
|
||||
@ -857,7 +857,7 @@ struct TORCH_API TensorType : public SharedType {
|
||||
// undefined_ may become `c10::nullopt` if the tensor was observed to be both
|
||||
// defined and undefined. However, no tensor type starts out with
|
||||
// `undefined_` set to `c10::nullopt`
|
||||
c10::optional<bool> undefined_;
|
||||
std::optional<bool> undefined_;
|
||||
// Represents whether or not this type was inferred.
|
||||
bool is_inferred_ = false;
|
||||
};
|
||||
@ -1144,16 +1144,16 @@ using NameList = std::vector<std::string>;
|
||||
// This type represents a Tuple
|
||||
struct TORCH_API TupleType : public NamedType {
|
||||
|
||||
static TupleTypePtr createNamed(const c10::optional<c10::QualifiedName>& name,
|
||||
static TupleTypePtr createNamed(const std::optional<c10::QualifiedName>& name,
|
||||
const std::vector<std::string>& field_names,
|
||||
const std::vector<TypePtr>& field_types,
|
||||
std::vector<IValue>& field_defaults);
|
||||
|
||||
static TupleTypePtr createNamed(const c10::optional<c10::QualifiedName>& name,
|
||||
static TupleTypePtr createNamed(const std::optional<c10::QualifiedName>& name,
|
||||
const std::vector<std::string>& field_names,
|
||||
const std::vector<TypePtr>& field_types);
|
||||
|
||||
static TupleTypePtr createNamed(const c10::optional<c10::QualifiedName>& name,
|
||||
static TupleTypePtr createNamed(const std::optional<c10::QualifiedName>& name,
|
||||
const std::vector<c10::string_view>& field_names,
|
||||
const std::vector<TypePtr>& field_types);
|
||||
|
||||
@ -1190,21 +1190,21 @@ struct TORCH_API TupleType : public NamedType {
|
||||
const std::shared_ptr<FunctionSchema>& schema() const {
|
||||
return schema_;
|
||||
}
|
||||
c10::optional<std::vector<c10::string_view>> names() const;
|
||||
std::optional<std::vector<c10::string_view>> names() const;
|
||||
|
||||
static const TypeKind Kind = TypeKind::TupleType;
|
||||
|
||||
private:
|
||||
template <typename S>
|
||||
static TupleTypePtr createWithSpec(
|
||||
const c10::optional<c10::QualifiedName>& name,
|
||||
const std::optional<c10::QualifiedName>& name,
|
||||
const std::vector<S>& field_names,
|
||||
const std::vector<TypePtr>& field_types,
|
||||
std::vector<IValue>& field_defaults);
|
||||
|
||||
TupleType(
|
||||
std::vector<TypePtr> elements_,
|
||||
c10::optional<c10::QualifiedName> name,
|
||||
std::optional<c10::QualifiedName> name,
|
||||
std::shared_ptr<FunctionSchema> schema);
|
||||
|
||||
bool compare(
|
||||
@ -1747,7 +1747,7 @@ inline TypePtr TensorType::fromBoolType() {
|
||||
return TensorType::createContiguous(at::kBool, at::kCPU, {});
|
||||
}
|
||||
|
||||
inline c10::optional<c10::ScalarType> tryScalarTypeFromJitType(const Type& type) {
|
||||
inline std::optional<c10::ScalarType> tryScalarTypeFromJitType(const Type& type) {
|
||||
if (type == *FloatType::get()) {
|
||||
return at::typeMetaToScalarType(c10::get_default_dtype());
|
||||
} else if (type == *IntType::get()) {
|
||||
@ -1782,13 +1782,13 @@ inline at::ScalarType scalarTypeFromJitType(const Type& type) {
|
||||
// If `type_hint` is an `InterfaceType`, then we can use that as a
|
||||
// potential supertype for `ClassType`s in the list. Otherwise, we have
|
||||
// no way to find and use some common interface type
|
||||
TORCH_API c10::optional<TypePtr> unifyTypes(
|
||||
TORCH_API std::optional<TypePtr> unifyTypes(
|
||||
const TypePtr& t1,
|
||||
const TypePtr& t2,
|
||||
bool default_to_union = false,
|
||||
const TypePtr& type_hint = nullptr);
|
||||
|
||||
TORCH_API c10::optional<TypePtr> unifyTypeList(
|
||||
TORCH_API std::optional<TypePtr> unifyTypeList(
|
||||
at::ArrayRef<TypePtr> elements,
|
||||
std::ostream& why_not,
|
||||
bool default_to_union = false,
|
||||
@ -2132,7 +2132,7 @@ struct MatchTypeReturn {
|
||||
private:
|
||||
MatchTypeReturn()
|
||||
: reason_(c10::nullopt) {}
|
||||
c10::optional<std::string> reason_; // is there is no match, this contains the reason
|
||||
std::optional<std::string> reason_; // is there is no match, this contains the reason
|
||||
};
|
||||
|
||||
// attempt to match the type variables in formal to actual, adding them to type_env.
|
||||
|
@ -75,7 +75,7 @@ struct SharedType;
|
||||
// Use this to customize how a Type is printed using `annotation_str()`. If
|
||||
// c10::nullopt is returned, `annotation_str()` falls through to its default
|
||||
// implementation.
|
||||
using TypePrinter = std::function<c10::optional<std::string>(const Type&)>;
|
||||
using TypePrinter = std::function<std::optional<std::string>(const Type&)>;
|
||||
|
||||
namespace detail {
|
||||
template <typename T>
|
||||
@ -688,7 +688,7 @@ using NamedTypePtr = std::shared_ptr<NamedType>;
|
||||
using ConstNamedTypePtr = std::shared_ptr<const NamedType>;
|
||||
|
||||
struct TORCH_API NamedType : public SharedType {
|
||||
NamedType(TypeKind tk, c10::optional<QualifiedName> name)
|
||||
NamedType(TypeKind tk, std::optional<QualifiedName> name)
|
||||
: SharedType(tk), name_(std::move(name)) {
|
||||
TORCH_INTERNAL_ASSERT(
|
||||
tk == TypeKind::TupleType || tk == TypeKind::FunctionType ||
|
||||
@ -700,12 +700,12 @@ struct TORCH_API NamedType : public SharedType {
|
||||
|
||||
// Fully qualified name of type
|
||||
// Looks like: "foo.bar.Baz".
|
||||
const c10::optional<QualifiedName>& name() const {
|
||||
const std::optional<QualifiedName>& name() const {
|
||||
return name_;
|
||||
}
|
||||
|
||||
private:
|
||||
c10::optional<QualifiedName> name_;
|
||||
std::optional<QualifiedName> name_;
|
||||
};
|
||||
|
||||
} // namespace c10
|
||||
|
@ -42,7 +42,7 @@ namespace {
|
||||
constexpr auto CatchAll = c10::DispatchKey::CatchAll;
|
||||
} // anonymous namespace
|
||||
|
||||
CppFunction::CppFunction(c10::KernelFunction func, c10::optional<c10::impl::CppSignature> cpp_signature, std::unique_ptr<c10::FunctionSchema> schema)
|
||||
CppFunction::CppFunction(c10::KernelFunction func, std::optional<c10::impl::CppSignature> cpp_signature, std::unique_ptr<c10::FunctionSchema> schema)
|
||||
: func_(std::move(func))
|
||||
, cpp_signature_(cpp_signature)
|
||||
, schema_(std::move(schema))
|
||||
@ -57,10 +57,10 @@ void Library::reset() {
|
||||
|
||||
#define ERROR_CONTEXT "(Error occurred while processing ", toString(kind_), " block at ", file_, ":", line_, ")"
|
||||
|
||||
Library::Library(Kind kind, std::string ns, c10::optional<c10::DispatchKey> k, const char* file, uint32_t line)
|
||||
Library::Library(Kind kind, std::string ns, std::optional<c10::DispatchKey> k, const char* file, uint32_t line)
|
||||
: kind_(kind)
|
||||
, ns_(ns == "_" ? c10::nullopt : c10::make_optional(std::move(ns)))
|
||||
, dispatch_key_(k.value_or(CatchAll) == CatchAll ? c10::optional<c10::DispatchKey>() : k)
|
||||
, dispatch_key_(k.value_or(CatchAll) == CatchAll ? std::optional<c10::DispatchKey>() : k)
|
||||
, file_(file)
|
||||
, line_(line)
|
||||
{
|
||||
|
@ -43,7 +43,7 @@ FunctionSchema make_function_schema(
|
||||
} // namespace infer_schema
|
||||
} // namespace detail
|
||||
|
||||
c10::optional<std::string> findSchemaDifferences(
|
||||
std::optional<std::string> findSchemaDifferences(
|
||||
const FunctionSchema& lhs,
|
||||
const FunctionSchema& rhs) {
|
||||
if (lhs.arguments().size() != rhs.arguments().size()) {
|
||||
|
@ -155,6 +155,6 @@ FunctionSchema inferFunctionSchemaSingleReturn(std::string&& name, std::string&&
|
||||
return detail::infer_schema::createFunctionSchemaFromTraitsSingleReturn<guts::infer_function_traits_t<FuncType>>(std::move(name), std::move(overload_name));
|
||||
}
|
||||
|
||||
TORCH_API c10::optional<std::string> findSchemaDifferences(const FunctionSchema& inferred, const FunctionSchema& specified);
|
||||
TORCH_API std::optional<std::string> findSchemaDifferences(const FunctionSchema& inferred, const FunctionSchema& specified);
|
||||
|
||||
}
|
||||
|
@ -17,9 +17,9 @@ void build_feature_required_feature_not_available(const char* feature) {
|
||||
} // namespace impl
|
||||
|
||||
static_assert(std::is_nothrow_move_constructible<
|
||||
c10::optional<RegistrationHandleRAII>>::value);
|
||||
std::optional<RegistrationHandleRAII>>::value);
|
||||
static_assert(std::is_nothrow_move_assignable<
|
||||
c10::optional<RegistrationHandleRAII>>::value);
|
||||
std::optional<RegistrationHandleRAII>>::value);
|
||||
|
||||
void RegisterOperators::checkSchemaAndRegisterOp_(Options&& options) {
|
||||
TORCH_CHECK(
|
||||
@ -71,7 +71,7 @@ c10::FunctionSchema RegisterOperators::inferSchemaFromKernels_(
|
||||
opName,
|
||||
" because there is no kernel specified.");
|
||||
|
||||
c10::optional<FunctionSchema> inferred_schema = c10::nullopt;
|
||||
std::optional<FunctionSchema> inferred_schema = c10::nullopt;
|
||||
for (const auto& kernel : options.kernels) {
|
||||
if (nullptr != kernel.inferred_function_schema.get()) {
|
||||
if (!inferred_schema.has_value()) {
|
||||
|
@ -399,7 +399,7 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
Options&& kernel(c10::optional<DispatchKey> dispatch_key, KernelFunction&& func, c10::optional<impl::CppSignature> cpp_signature, std::unique_ptr<FunctionSchema>&& inferred_function_schema) && {
|
||||
Options&& kernel(std::optional<DispatchKey> dispatch_key, KernelFunction&& func, c10::optional<impl::CppSignature> cpp_signature, std::unique_ptr<FunctionSchema>&& inferred_function_schema) && {
|
||||
KernelRegistrationConfig config;
|
||||
config.dispatch_key = dispatch_key;
|
||||
config.func = std::move(func);
|
||||
@ -425,13 +425,13 @@ public:
|
||||
, inferred_function_schema(nullptr)
|
||||
{}
|
||||
|
||||
c10::optional<DispatchKey> dispatch_key;
|
||||
std::optional<DispatchKey> dispatch_key;
|
||||
KernelFunction func;
|
||||
c10::optional<impl::CppSignature> cpp_signature;
|
||||
std::optional<impl::CppSignature> cpp_signature;
|
||||
std::unique_ptr<FunctionSchema> inferred_function_schema;
|
||||
};
|
||||
|
||||
c10::optional<std::variant<OperatorName, FunctionSchema>> schemaOrName_;
|
||||
std::optional<std::variant<OperatorName, FunctionSchema>> schemaOrName_;
|
||||
|
||||
std::vector<KernelRegistrationConfig> kernels;
|
||||
optional<AliasAnalysisKind> aliasAnalysisKind_;
|
||||
|
@ -882,56 +882,56 @@ TEST(OperatorRegistrationTest, testAvailableArgTypes) {
|
||||
|
||||
|
||||
// optional types (with has_value() == true)
|
||||
testArgTypes<c10::optional<double>>::test(
|
||||
c10::optional<double>(1.5), [] (const c10::optional<double>& v) {EXPECT_EQ(1.5, v.value());},
|
||||
c10::optional<double>(2.5), [] (const IValue& v) {EXPECT_EQ(2.5, v.toDouble());},
|
||||
testArgTypes<std::optional<double>>::test(
|
||||
std::optional<double>(1.5), [] (const c10::optional<double>& v) {EXPECT_EQ(1.5, v.value());},
|
||||
std::optional<double>(2.5), [] (const IValue& v) {EXPECT_EQ(2.5, v.toDouble());},
|
||||
"(float? a) -> float?");
|
||||
testArgTypes<c10::optional<int64_t>>::test(
|
||||
c10::optional<int64_t>(1), [] (const c10::optional<int64_t>& v) {EXPECT_EQ(1, v.value());},
|
||||
c10::optional<int64_t>(2), [] (const IValue& v) {EXPECT_EQ(2, v.toInt());},
|
||||
testArgTypes<std::optional<int64_t>>::test(
|
||||
std::optional<int64_t>(1), [] (const c10::optional<int64_t>& v) {EXPECT_EQ(1, v.value());},
|
||||
std::optional<int64_t>(2), [] (const IValue& v) {EXPECT_EQ(2, v.toInt());},
|
||||
"(int? a) -> int?");
|
||||
testArgTypes<c10::optional<bool>>::test(
|
||||
c10::optional<bool>(true), [] (const c10::optional<bool>& v) {EXPECT_EQ(true, v.value());},
|
||||
c10::optional<bool>(false), [] (const IValue& v) {EXPECT_EQ(false, v.toBool());},
|
||||
testArgTypes<std::optional<bool>>::test(
|
||||
std::optional<bool>(true), [] (const c10::optional<bool>& v) {EXPECT_EQ(true, v.value());},
|
||||
std::optional<bool>(false), [] (const IValue& v) {EXPECT_EQ(false, v.toBool());},
|
||||
"(bool? a) -> bool?");
|
||||
testArgTypes<c10::optional<bool>>::test(
|
||||
c10::optional<bool>(false), [] (const c10::optional<bool>& v) {EXPECT_EQ(false, v.value());},
|
||||
c10::optional<bool>(true), [] (const IValue& v) {EXPECT_EQ(true, v.toBool());},
|
||||
testArgTypes<std::optional<bool>>::test(
|
||||
std::optional<bool>(false), [] (const c10::optional<bool>& v) {EXPECT_EQ(false, v.value());},
|
||||
std::optional<bool>(true), [] (const IValue& v) {EXPECT_EQ(true, v.toBool());},
|
||||
"(bool? a) -> bool?");
|
||||
testArgTypes<c10::optional<std::string>>::test(
|
||||
c10::optional<std::string>("string1"), [] (const c10::optional<std::string>& v) {EXPECT_EQ("string1", v.value());},
|
||||
c10::optional<std::string>("string2"), [] (const IValue& v) {EXPECT_EQ("string2", v.toStringRef());},
|
||||
testArgTypes<std::optional<std::string>>::test(
|
||||
std::optional<std::string>("string1"), [] (const c10::optional<std::string>& v) {EXPECT_EQ("string1", v.value());},
|
||||
std::optional<std::string>("string2"), [] (const IValue& v) {EXPECT_EQ("string2", v.toStringRef());},
|
||||
"(str? a) -> str?");
|
||||
testArgTypes<c10::optional<Tensor>>::test(
|
||||
c10::optional<Tensor>(dummyTensor(c10::DispatchKey::CPU)), [] (const c10::optional<Tensor>& v) {EXPECT_EQ(c10::DispatchKey::CPU, extractDispatchKey(v.value()));},
|
||||
c10::optional<Tensor>(dummyTensor(c10::DispatchKey::CUDA)), [] (const IValue& v) {EXPECT_EQ(c10::DispatchKey::CUDA, extractDispatchKey(v.toTensor()));},
|
||||
testArgTypes<std::optional<Tensor>>::test(
|
||||
std::optional<Tensor>(dummyTensor(c10::DispatchKey::CPU)), [] (const c10::optional<Tensor>& v) {EXPECT_EQ(c10::DispatchKey::CPU, extractDispatchKey(v.value()));},
|
||||
std::optional<Tensor>(dummyTensor(c10::DispatchKey::CUDA)), [] (const IValue& v) {EXPECT_EQ(c10::DispatchKey::CUDA, extractDispatchKey(v.toTensor()));},
|
||||
"(Tensor? a) -> Tensor?");
|
||||
|
||||
|
||||
// optional types (with has_value() == false)
|
||||
testArgTypes<c10::optional<double>>::test(
|
||||
c10::optional<double>(c10::nullopt), [] (const c10::optional<double>& v) {EXPECT_FALSE(v.has_value());},
|
||||
c10::optional<double>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());},
|
||||
testArgTypes<std::optional<double>>::test(
|
||||
std::optional<double>(c10::nullopt), [] (const c10::optional<double>& v) {EXPECT_FALSE(v.has_value());},
|
||||
std::optional<double>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());},
|
||||
"(float? a) -> float?");
|
||||
testArgTypes<c10::optional<int64_t>>::test(
|
||||
c10::optional<int64_t>(c10::nullopt), [] (const c10::optional<int64_t>& v) {EXPECT_FALSE(v.has_value());},
|
||||
c10::optional<int64_t>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());},
|
||||
testArgTypes<std::optional<int64_t>>::test(
|
||||
std::optional<int64_t>(c10::nullopt), [] (const c10::optional<int64_t>& v) {EXPECT_FALSE(v.has_value());},
|
||||
std::optional<int64_t>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());},
|
||||
"(int? a) -> int?");
|
||||
testArgTypes<c10::optional<bool>>::test(
|
||||
c10::optional<bool>(c10::nullopt), [] (const c10::optional<bool>& v) {EXPECT_FALSE(v.has_value());},
|
||||
c10::optional<bool>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());},
|
||||
testArgTypes<std::optional<bool>>::test(
|
||||
std::optional<bool>(c10::nullopt), [] (const c10::optional<bool>& v) {EXPECT_FALSE(v.has_value());},
|
||||
std::optional<bool>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());},
|
||||
"(bool? a) -> bool?");
|
||||
testArgTypes<c10::optional<bool>>::test(
|
||||
c10::optional<bool>(c10::nullopt), [] (const c10::optional<bool>& v) {EXPECT_FALSE(v.has_value());},
|
||||
c10::optional<bool>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());},
|
||||
testArgTypes<std::optional<bool>>::test(
|
||||
std::optional<bool>(c10::nullopt), [] (const c10::optional<bool>& v) {EXPECT_FALSE(v.has_value());},
|
||||
std::optional<bool>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());},
|
||||
"(bool? a) -> bool?");
|
||||
testArgTypes<c10::optional<std::string>>::test(
|
||||
c10::optional<std::string>(c10::nullopt), [] (const c10::optional<std::string>& v) {EXPECT_FALSE(v.has_value());},
|
||||
c10::optional<std::string>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());},
|
||||
testArgTypes<std::optional<std::string>>::test(
|
||||
std::optional<std::string>(c10::nullopt), [] (const c10::optional<std::string>& v) {EXPECT_FALSE(v.has_value());},
|
||||
std::optional<std::string>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());},
|
||||
"(str? a) -> str?");
|
||||
testArgTypes<c10::optional<Tensor>>::test(
|
||||
c10::optional<Tensor>(c10::nullopt), [] (const c10::optional<Tensor>& v) {EXPECT_FALSE(v.has_value());},
|
||||
c10::optional<Tensor>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());},
|
||||
testArgTypes<std::optional<Tensor>>::test(
|
||||
std::optional<Tensor>(c10::nullopt), [] (const c10::optional<Tensor>& v) {EXPECT_FALSE(v.has_value());},
|
||||
std::optional<Tensor>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());},
|
||||
"(Tensor? a) -> Tensor?");
|
||||
|
||||
|
||||
@ -1136,21 +1136,21 @@ TEST(OperatorRegistrationTest, testAvailableArgTypes) {
|
||||
"(Tensor[] a) -> Tensor[]");
|
||||
|
||||
// Test optional of list (with nullopt)
|
||||
testArgTypes<c10::optional<c10::List<int64_t>>>::test(
|
||||
c10::optional<c10::List<int64_t>>(c10::nullopt), [] (const c10::optional<c10::List<int64_t>>& v) {EXPECT_FALSE(v.has_value());},
|
||||
c10::optional<c10::List<int64_t>>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());},
|
||||
testArgTypes<std::optional<c10::List<int64_t>>>::test(
|
||||
std::optional<c10::List<int64_t>>(c10::nullopt), [] (const c10::optional<c10::List<int64_t>>& v) {EXPECT_FALSE(v.has_value());},
|
||||
std::optional<c10::List<int64_t>>(c10::nullopt), [] (const IValue& v) {EXPECT_TRUE(v.isNone());},
|
||||
"(int[]? a) -> int[]?");
|
||||
|
||||
// Test optional of list (with empty list)
|
||||
testArgTypes<c10::optional<c10::List<int64_t>>>::test(
|
||||
c10::optional<c10::List<int64_t>>(c10::List<int64_t>({})), [] (const c10::optional<c10::List<int64_t>>& v) {EXPECT_EQ(0, v.value().size());},
|
||||
c10::optional<c10::List<int64_t>>(c10::List<int64_t>({})), [] (const IValue& v) {EXPECT_EQ(0, v.to<c10::List<int64_t>>().size());},
|
||||
testArgTypes<std::optional<c10::List<int64_t>>>::test(
|
||||
std::optional<c10::List<int64_t>>(c10::List<int64_t>({})), [] (const c10::optional<c10::List<int64_t>>& v) {EXPECT_EQ(0, v.value().size());},
|
||||
std::optional<c10::List<int64_t>>(c10::List<int64_t>({})), [] (const IValue& v) {EXPECT_EQ(0, v.to<c10::List<int64_t>>().size());},
|
||||
"(int[]? a) -> int[]?");
|
||||
|
||||
// Test optional of list (with values)
|
||||
testArgTypes<c10::optional<c10::List<int64_t>>>::test(
|
||||
c10::optional<c10::List<int64_t>>(c10::List<int64_t>({1, 2})), [] (const c10::optional<c10::List<int64_t>>& v) {expectListEquals({1, 2}, v.value());},
|
||||
c10::optional<c10::List<int64_t>>(c10::List<int64_t>({3, 4})), [] (const IValue& v) {expectListEquals({3, 4}, v.to<c10::List<int64_t>>());},
|
||||
testArgTypes<std::optional<c10::List<int64_t>>>::test(
|
||||
std::optional<c10::List<int64_t>>(c10::List<int64_t>({1, 2})), [] (const c10::optional<c10::List<int64_t>>& v) {expectListEquals({1, 2}, v.value());},
|
||||
std::optional<c10::List<int64_t>>(c10::List<int64_t>({3, 4})), [] (const IValue& v) {expectListEquals({3, 4}, v.to<c10::List<int64_t>>());},
|
||||
"(int[]? a) -> int[]?");
|
||||
|
||||
// Test list of optional (with empty list)
|
||||
@ -1161,8 +1161,8 @@ TEST(OperatorRegistrationTest, testAvailableArgTypes) {
|
||||
|
||||
// Test list of optional (with values)
|
||||
testArgTypes<c10::List<::std::optional<int64_t>>>::test(
|
||||
c10::List<::std::optional<int64_t>>(c10::List<::std::optional<int64_t>>({3, c10::nullopt, 2})), [] (const c10::List<::std::optional<int64_t>>& v) {expectListEquals<c10::optional<int64_t>>({3, c10::nullopt, 2}, v);},
|
||||
c10::List<::std::optional<int64_t>>(c10::List<::std::optional<int64_t>>({3, c10::nullopt, 2})), [] (const IValue& v) {expectListEquals<c10::optional<int64_t>>({3, c10::nullopt, 2}, v.to<c10::List<::std::optional<int64_t>>>());},
|
||||
c10::List<::std::optional<int64_t>>(c10::List<::std::optional<int64_t>>({3, c10::nullopt, 2})), [] (const c10::List<::std::optional<int64_t>>& v) {expectListEquals<std::optional<int64_t>>({3, c10::nullopt, 2}, v);},
|
||||
c10::List<::std::optional<int64_t>>(c10::List<::std::optional<int64_t>>({3, c10::nullopt, 2})), [] (const IValue& v) {expectListEquals<std::optional<int64_t>>({3, c10::nullopt, 2}, v.to<c10::List<::std::optional<int64_t>>>());},
|
||||
"(int?[] a) -> int?[]");
|
||||
|
||||
// dict types
|
||||
|
@ -23,7 +23,7 @@ struct OperatorName final {
|
||||
// Return the namespace of this OperatorName, if it exists. The
|
||||
// returned string_view is only live as long as the OperatorName
|
||||
// exists and name is not mutated
|
||||
c10::optional<c10::string_view> getNamespace() const {
|
||||
std::optional<c10::string_view> getNamespace() const {
|
||||
auto pos = name.find("::");
|
||||
if (pos == std::string::npos) {
|
||||
return c10::nullopt;
|
||||
|
@ -274,12 +274,12 @@ TensorTypePtr TensorType::create(const at::Tensor& t) {
|
||||
}
|
||||
|
||||
TensorTypePtr TensorType::create(
|
||||
c10::optional<at::ScalarType> scalar_type,
|
||||
c10::optional<Device> device,
|
||||
std::optional<at::ScalarType> scalar_type,
|
||||
std::optional<Device> device,
|
||||
const VaryingShape<int64_t>& sizes,
|
||||
const VaryingShape<int64_t>& strides,
|
||||
c10::optional<bool> requires_grad,
|
||||
c10::optional<bool> undefined, bool tensor_contiguity) {
|
||||
std::optional<bool> requires_grad,
|
||||
std::optional<bool> undefined, bool tensor_contiguity) {
|
||||
if(strides.concrete_sizes() && strides.concrete_sizes().has_value()){
|
||||
// handles case where strides are set
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
@ -304,22 +304,22 @@ TensorTypePtr TensorType::create(
|
||||
}
|
||||
|
||||
TensorTypePtr TensorType::create(
|
||||
c10::optional<at::ScalarType> scalar_type,
|
||||
c10::optional<Device> device,
|
||||
std::optional<at::ScalarType> scalar_type,
|
||||
std::optional<Device> device,
|
||||
const SymbolicShape& sizes,
|
||||
const VaryingShape<Stride>& strides,
|
||||
c10::optional<bool> requires_grad,
|
||||
c10::optional<bool> undefined) {
|
||||
std::optional<bool> requires_grad,
|
||||
std::optional<bool> undefined) {
|
||||
auto pt = TensorTypePtr(new TensorType(
|
||||
scalar_type, device, sizes, strides, requires_grad, undefined));
|
||||
return pt;
|
||||
}
|
||||
|
||||
TensorTypePtr TensorType::create(
|
||||
c10::optional<at::ScalarType> scalar_type,
|
||||
c10::optional<Device> device,
|
||||
c10::optional<size_t> dim,
|
||||
c10::optional<bool> requires_grad) {
|
||||
std::optional<at::ScalarType> scalar_type,
|
||||
std::optional<Device> device,
|
||||
std::optional<size_t> dim,
|
||||
std::optional<bool> requires_grad) {
|
||||
return TensorType::create(
|
||||
scalar_type,
|
||||
device,
|
||||
@ -349,7 +349,7 @@ VaryingShape<int64_t> TensorType::sizes() const {
|
||||
fmap(*sizes_.sizes(), [](ShapeSymbol ss) {
|
||||
// we turn symbolic shapes into unknowns
|
||||
return ss.is_static()
|
||||
? c10::optional<int64_t>(ss.static_size())
|
||||
? std::optional<int64_t>(ss.static_size())
|
||||
: c10::nullopt;
|
||||
}));
|
||||
}
|
||||
@ -371,7 +371,7 @@ TensorTypePtr TensorType::merge(const TensorType& other, bool merge_sizes) const
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
bool is_null_or_equal(c10::optional<T> a, c10::IntArrayRef b) {
|
||||
bool is_null_or_equal(std::optional<T> a, c10::IntArrayRef b) {
|
||||
return !a.has_value() || a.value() == b;
|
||||
}
|
||||
|
||||
@ -417,7 +417,7 @@ VaryingShape<int64_t> TensorType::strides() const {
|
||||
if (!strides_.size().has_value()) {
|
||||
return VaryingShape<int64_t>();
|
||||
}
|
||||
std::vector<c10::optional<int64_t>> ss(*strides_.size());
|
||||
std::vector<std::optional<int64_t>> ss(*strides_.size());
|
||||
for (size_t i = 0; i < *strides_.size(); i++) {
|
||||
if (!strides_[i].has_value()) {
|
||||
continue;
|
||||
@ -431,12 +431,12 @@ VaryingShape<int64_t> TensorType::strides() const {
|
||||
}
|
||||
|
||||
TensorType::TensorType(
|
||||
c10::optional<at::ScalarType> scalar_type,
|
||||
c10::optional<Device> device,
|
||||
std::optional<at::ScalarType> scalar_type,
|
||||
std::optional<Device> device,
|
||||
SymbolicShape sizes,
|
||||
VaryingShape<Stride> strides,
|
||||
c10::optional<bool> requires_grad,
|
||||
c10::optional<bool> undefined)
|
||||
std::optional<bool> requires_grad,
|
||||
std::optional<bool> undefined)
|
||||
: SharedType(TypeKind::TensorType),
|
||||
scalar_type_(scalar_type),
|
||||
device_(device),
|
||||
|
@ -364,7 +364,7 @@ SymBoolTypePtr SymBoolType::get() {
|
||||
return value;
|
||||
}
|
||||
|
||||
static c10::optional<TypePtr> unifyTypesImpl(const TypePtr& t1, const TypePtr& t2, bool default_to_union=false, const TypePtr& type_hint=nullptr) {
|
||||
static std::optional<TypePtr> unifyTypesImpl(const TypePtr& t1, const TypePtr& t2, bool default_to_union=false, const TypePtr& type_hint=nullptr) {
|
||||
// check direct subtyping relation
|
||||
if (t1->isSubtypeOf(*t2)) {
|
||||
return t2;
|
||||
@ -446,7 +446,7 @@ static c10::optional<TypePtr> unifyTypesImpl(const TypePtr& t1, const TypePtr& t
|
||||
return c10::nullopt;
|
||||
}
|
||||
|
||||
c10::optional<TypePtr> unifyTypes(const TypePtr& t1, const TypePtr& t2, bool default_to_union, const TypePtr& type_hint) {
|
||||
std::optional<TypePtr> unifyTypes(const TypePtr& t1, const TypePtr& t2, bool default_to_union, const TypePtr& type_hint) {
|
||||
auto unified = unifyTypesImpl(t1, t2, default_to_union, type_hint);
|
||||
|
||||
if (default_to_union && !unified) {
|
||||
@ -456,7 +456,7 @@ c10::optional<TypePtr> unifyTypes(const TypePtr& t1, const TypePtr& t2, bool def
|
||||
return unified;
|
||||
}
|
||||
|
||||
c10::optional<TypePtr> unifyTypeList(
|
||||
std::optional<TypePtr> unifyTypeList(
|
||||
at::ArrayRef<TypePtr> elements,
|
||||
std::ostream& why_not,
|
||||
bool default_to_union,
|
||||
@ -468,7 +468,7 @@ c10::optional<TypePtr> unifyTypeList(
|
||||
|
||||
TypePtr ret_type = elements.at(0);
|
||||
for (size_t i = 1; i < elements.size() && ret_type; ++i) {
|
||||
c10::optional<TypePtr> maybe_unified = unifyTypes(ret_type, elements.at(i), default_to_union, type_hint);
|
||||
std::optional<TypePtr> maybe_unified = unifyTypes(ret_type, elements.at(i), default_to_union, type_hint);
|
||||
if (!maybe_unified) {
|
||||
why_not << "Could not unify type list since element " << i << " of type "
|
||||
<< elements.at(i)->repr_str()
|
||||
@ -719,7 +719,7 @@ bool Type::is_module() const {
|
||||
}
|
||||
|
||||
TupleTypePtr TupleType::createNamed(
|
||||
const c10::optional<c10::QualifiedName>& qualName,
|
||||
const std::optional<c10::QualifiedName>& qualName,
|
||||
const std::vector<std::string>& field_names,
|
||||
const std::vector<TypePtr>& field_types) {
|
||||
std::vector<IValue> empty_defaults;
|
||||
@ -727,7 +727,7 @@ TupleTypePtr TupleType::createNamed(
|
||||
}
|
||||
|
||||
TupleTypePtr TupleType::createNamed(
|
||||
const c10::optional<c10::QualifiedName>& qualName,
|
||||
const std::optional<c10::QualifiedName>& qualName,
|
||||
const std::vector<c10::string_view>& field_names,
|
||||
const std::vector<TypePtr>& field_types) {
|
||||
std::vector<IValue> empty_defaults;
|
||||
@ -735,7 +735,7 @@ TupleTypePtr TupleType::createNamed(
|
||||
}
|
||||
|
||||
TupleTypePtr TupleType::createNamed(
|
||||
const c10::optional<c10::QualifiedName>& qualName,
|
||||
const std::optional<c10::QualifiedName>& qualName,
|
||||
const std::vector<std::string>& field_names,
|
||||
const std::vector<TypePtr>& field_types,
|
||||
std::vector<IValue>& field_defaults) {
|
||||
@ -743,7 +743,7 @@ TupleTypePtr TupleType::createNamed(
|
||||
}
|
||||
|
||||
template <typename S>
|
||||
TupleTypePtr TupleType::createWithSpec(const c10::optional<c10::QualifiedName>& qualName,
|
||||
TupleTypePtr TupleType::createWithSpec(const std::optional<c10::QualifiedName>& qualName,
|
||||
const std::vector<S>& field_names,
|
||||
const std::vector<TypePtr>& field_types,
|
||||
std::vector<IValue>& field_defaults) {
|
||||
@ -784,7 +784,7 @@ TupleTypePtr TupleType::createWithSpec(const c10::optional<c10::QualifiedName>&
|
||||
field_types, qualName, std::move(schema))); // NOLINT(modernize-make-shared)
|
||||
}
|
||||
|
||||
c10::optional<std::vector<c10::string_view>> TupleType::names() const {
|
||||
std::optional<std::vector<c10::string_view>> TupleType::names() const {
|
||||
if (!schema_) {
|
||||
return {};
|
||||
}
|
||||
@ -820,7 +820,7 @@ bool NumberType::isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const {
|
||||
|
||||
TupleType::TupleType(
|
||||
std::vector<TypePtr> elements,
|
||||
c10::optional<c10::QualifiedName> name,
|
||||
std::optional<c10::QualifiedName> name,
|
||||
std::shared_ptr<FunctionSchema> schema)
|
||||
: NamedType(TypeKind::TupleType, std::move(name)),
|
||||
elements_(std::move(elements)),
|
||||
|
@ -29,7 +29,7 @@ ListTypePtr ListType::ofOptionalTensors() {
|
||||
|
||||
namespace {
|
||||
|
||||
c10::optional<TypePtr> subtractTypeSetFrom(std::vector<TypePtr>& to_subtract, ArrayRef<TypePtr> from) {
|
||||
std::optional<TypePtr> subtractTypeSetFrom(std::vector<TypePtr>& to_subtract, ArrayRef<TypePtr> from) {
|
||||
std::vector<TypePtr> types;
|
||||
|
||||
// Given a TypePtr `lhs`, this function says whether or not `lhs` (or
|
||||
@ -93,7 +93,7 @@ void filterDuplicateSubtypes(std::vector<TypePtr>* types) {
|
||||
if (types->empty()) {
|
||||
return;
|
||||
}
|
||||
auto get_supertype = [](const TypePtr& t1, const TypePtr& t2) -> c10::optional<TypePtr> {
|
||||
auto get_supertype = [](const TypePtr& t1, const TypePtr& t2) -> std::optional<TypePtr> {
|
||||
// We don't want nested Optionals. Also, prematurely unifying to
|
||||
// `Optional` could prevent us from coalescing other types
|
||||
if ((t1->isSubtypeOf(*NoneType::get()) && !t2->isSubtypeOf(*NoneType::get()))
|
||||
@ -114,7 +114,7 @@ void filterDuplicateSubtypes(std::vector<TypePtr>* types) {
|
||||
size_t end_idx = types->size()-1;
|
||||
for (size_t i = types->size()-1; i > 0; --i) {
|
||||
for (size_t j = std::min(i-1, end_idx); ; --j) {
|
||||
c10::optional<TypePtr> unified;
|
||||
std::optional<TypePtr> unified;
|
||||
unified = get_supertype((*types)[i], (*types)[j]);
|
||||
if (unified) {
|
||||
(*types)[j] = *unified;
|
||||
@ -272,11 +272,11 @@ UnionTypePtr UnionType::create(std::vector<TypePtr> reference) {
|
||||
return union_type;
|
||||
}
|
||||
|
||||
c10::optional<TypePtr> UnionType::subtractTypeSet(std::vector<TypePtr>& to_subtract) const {
|
||||
std::optional<TypePtr> UnionType::subtractTypeSet(std::vector<TypePtr>& to_subtract) const {
|
||||
return subtractTypeSetFrom(to_subtract, containedTypes());
|
||||
}
|
||||
|
||||
c10::optional<TypePtr> UnionType::toOptional() const {
|
||||
std::optional<TypePtr> UnionType::toOptional() const {
|
||||
if (!canHoldType(*NoneType::get())) {
|
||||
return c10::nullopt;
|
||||
}
|
||||
@ -432,7 +432,7 @@ bool UnionType::canHoldType(const Type& type) const {
|
||||
bool OptionalType::equals(const Type& rhs) const {
|
||||
if (auto union_rhs = rhs.cast<UnionType>()) {
|
||||
auto optional_rhs = union_rhs->toOptional();
|
||||
// `**optional_rhs` = `*` to get value of `c10::optional<TypePtr>`,
|
||||
// `**optional_rhs` = `*` to get value of `std::optional<TypePtr>`,
|
||||
// then `*` to dereference the pointer
|
||||
return optional_rhs && *this == **optional_rhs;
|
||||
} else if (auto optional_rhs = rhs.cast<OptionalType>()) {
|
||||
|
@ -105,7 +105,7 @@ struct CUDACachingHostAllocatorImpl
|
||||
}
|
||||
|
||||
void record_stream(
|
||||
c10::optional<std::vector<EventPool::Event>>& events,
|
||||
std::optional<std::vector<EventPool::Event>>& events,
|
||||
CUDAStream stream) override {
|
||||
auto event = create_event_internal(stream.device_index());
|
||||
event->record(stream);
|
||||
|
@ -8,8 +8,8 @@ namespace at::detail {
|
||||
TensorBase empty_cuda(
|
||||
IntArrayRef size,
|
||||
ScalarType dtype,
|
||||
c10::optional<Device> device_opt,
|
||||
c10::optional<c10::MemoryFormat> memory_format_opt) {
|
||||
std::optional<Device> device_opt,
|
||||
std::optional<c10::MemoryFormat> memory_format_opt) {
|
||||
at::globalContext().lazyInitCUDA();
|
||||
const auto device = device_or_default(device_opt);
|
||||
TORCH_INTERNAL_ASSERT(device.is_cuda());
|
||||
@ -22,11 +22,11 @@ TensorBase empty_cuda(
|
||||
|
||||
TensorBase empty_cuda(
|
||||
IntArrayRef size,
|
||||
c10::optional<ScalarType> dtype_opt,
|
||||
c10::optional<Layout> layout_opt,
|
||||
c10::optional<Device> device_opt,
|
||||
c10::optional<bool> pin_memory_opt,
|
||||
c10::optional<c10::MemoryFormat> memory_format_opt) {
|
||||
std::optional<ScalarType> dtype_opt,
|
||||
std::optional<Layout> layout_opt,
|
||||
std::optional<Device> device_opt,
|
||||
std::optional<bool> pin_memory_opt,
|
||||
std::optional<c10::MemoryFormat> memory_format_opt) {
|
||||
TORCH_CHECK(!pin_memory_opt.has_value() || !*pin_memory_opt, "Only dense CPU tensors can be pinned");
|
||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided);
|
||||
|
||||
@ -49,7 +49,7 @@ TensorBase empty_strided_cuda(
|
||||
IntArrayRef size,
|
||||
IntArrayRef stride,
|
||||
ScalarType dtype,
|
||||
c10::optional<Device> device_opt) {
|
||||
std::optional<Device> device_opt) {
|
||||
at::globalContext().lazyInitCUDA();
|
||||
const auto device = device_or_default(device_opt);
|
||||
TORCH_INTERNAL_ASSERT(device.is_cuda());
|
||||
@ -63,10 +63,10 @@ TensorBase empty_strided_cuda(
|
||||
TensorBase empty_strided_cuda(
|
||||
IntArrayRef size,
|
||||
IntArrayRef stride,
|
||||
c10::optional<ScalarType> dtype_opt,
|
||||
c10::optional<Layout> layout_opt,
|
||||
c10::optional<Device> device_opt,
|
||||
c10::optional<bool> pin_memory_opt) {
|
||||
std::optional<ScalarType> dtype_opt,
|
||||
std::optional<Layout> layout_opt,
|
||||
std::optional<Device> device_opt,
|
||||
std::optional<bool> pin_memory_opt) {
|
||||
TORCH_CHECK(!pin_memory_opt.has_value() || !*pin_memory_opt, "Only dense CPU tensors can be pinned");
|
||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided);
|
||||
|
||||
|
@ -6,16 +6,16 @@ namespace at::detail {
|
||||
TORCH_CUDA_CPP_API TensorBase empty_cuda(
|
||||
IntArrayRef size,
|
||||
ScalarType dtype,
|
||||
c10::optional<Device> device_opt,
|
||||
c10::optional<c10::MemoryFormat> memory_format_opt);
|
||||
std::optional<Device> device_opt,
|
||||
std::optional<c10::MemoryFormat> memory_format_opt);
|
||||
|
||||
TORCH_CUDA_CPP_API TensorBase empty_cuda(
|
||||
IntArrayRef size,
|
||||
c10::optional<ScalarType> dtype_opt,
|
||||
c10::optional<Layout> layout_opt,
|
||||
c10::optional<Device> device_opt,
|
||||
c10::optional<bool> pin_memory_opt,
|
||||
c10::optional<c10::MemoryFormat> memory_format_opt);
|
||||
std::optional<ScalarType> dtype_opt,
|
||||
std::optional<Layout> layout_opt,
|
||||
std::optional<Device> device_opt,
|
||||
std::optional<bool> pin_memory_opt,
|
||||
std::optional<c10::MemoryFormat> memory_format_opt);
|
||||
|
||||
TORCH_CUDA_CPP_API TensorBase empty_cuda(
|
||||
IntArrayRef size,
|
||||
@ -25,15 +25,15 @@ TORCH_CUDA_CPP_API TensorBase empty_strided_cuda(
|
||||
IntArrayRef size,
|
||||
IntArrayRef stride,
|
||||
ScalarType dtype,
|
||||
c10::optional<Device> device_opt);
|
||||
std::optional<Device> device_opt);
|
||||
|
||||
TORCH_CUDA_CPP_API TensorBase empty_strided_cuda(
|
||||
IntArrayRef size,
|
||||
IntArrayRef stride,
|
||||
c10::optional<ScalarType> dtype_opt,
|
||||
c10::optional<Layout> layout_opt,
|
||||
c10::optional<Device> device_opt,
|
||||
c10::optional<bool> pin_memory_opt);
|
||||
std::optional<ScalarType> dtype_opt,
|
||||
std::optional<Layout> layout_opt,
|
||||
std::optional<Device> device_opt,
|
||||
std::optional<bool> pin_memory_opt);
|
||||
|
||||
TORCH_CUDA_CPP_API TensorBase empty_strided_cuda(
|
||||
IntArrayRef size,
|
||||
|
@ -8,13 +8,13 @@
|
||||
|
||||
namespace at::native {
|
||||
|
||||
bool is_pinned_cuda(const Tensor& self, c10::optional<Device> device) {
|
||||
bool is_pinned_cuda(const Tensor& self, std::optional<Device> device) {
|
||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!device.has_value() || device->is_cuda());
|
||||
// TODO: unhook this
|
||||
return detail::getCUDAHooks().isPinnedPtr(self.storage().data());
|
||||
}
|
||||
|
||||
Tensor _pin_memory_cuda(const Tensor& self, c10::optional<Device> device) {
|
||||
Tensor _pin_memory_cuda(const Tensor& self, std::optional<Device> device) {
|
||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!device.has_value() || device->is_cuda());
|
||||
auto* allocator = at::cuda::getPinnedMemoryAllocator();
|
||||
auto storage = Storage(
|
||||
|
@ -22,9 +22,9 @@ std::tuple<Tensor,Tensor,Tensor,Tensor,Tensor>
|
||||
_cudnn_rnn_cast_reflatten(const Tensor & input,
|
||||
TensorList weight,
|
||||
int64_t weight_stride0,
|
||||
const c10::optional<Tensor>& weight_buf_opt,
|
||||
const std::optional<Tensor>& weight_buf_opt,
|
||||
const Tensor& hx,
|
||||
const c10::optional<Tensor>& cx,
|
||||
const std::optional<Tensor>& cx,
|
||||
int64_t mode,
|
||||
int64_t hidden_size,
|
||||
int64_t proj_size,
|
||||
@ -34,7 +34,7 @@ _cudnn_rnn_cast_reflatten(const Tensor & input,
|
||||
bool train,
|
||||
bool bidirectional,
|
||||
IntArrayRef batch_sizes,
|
||||
const c10::optional<Tensor>& dropout_state) {
|
||||
const std::optional<Tensor>& dropout_state) {
|
||||
#if AT_CUDNN_ENABLED()
|
||||
c10::impl::ExcludeDispatchKeyGuard no_autocast(DispatchKey::Autocast);
|
||||
|
||||
|
@ -303,7 +303,7 @@ static std::tuple<Tensor, optional<int64_t>> log_sigmoid_backward_batch_rule(
|
||||
return std::make_tuple(at::log_sigmoid_backward(out_grad, out_self, out_buffer), 0);
|
||||
}
|
||||
|
||||
static Tensor binomial_wrapper(const Tensor& count, const Tensor& prob, c10::optional<Generator> gen) {
|
||||
static Tensor binomial_wrapper(const Tensor& count, const Tensor& prob, std::optional<Generator> gen) {
|
||||
return at::binomial(count, prob.contiguous(), std::move(gen)); // Bug in PyTorch, prob shouldn't need to be contiguous
|
||||
}
|
||||
|
||||
@ -457,7 +457,7 @@ TORCH_LIBRARY_IMPL(aten, FuncTorchBatched, m) {
|
||||
using TensorScalarInplaceT = Tensor& (Tensor::*)(const Tensor&, const Scalar&) const;
|
||||
using ScalarScalarInplaceT = Tensor& (Tensor::*)(const Scalar&, const Scalar&) const;
|
||||
using TensorInplaceT = Tensor& (Tensor::*)(const Tensor&) const;
|
||||
using TensorInplaceModeT = Tensor& (Tensor::*)(const Tensor&, c10::optional<c10::string_view>) const;
|
||||
using TensorInplaceModeT = Tensor& (Tensor::*)(const Tensor&, std::optional<c10::string_view>) const;
|
||||
using ScalarInplaceT = Tensor& (Tensor::*)(const Scalar&) const;
|
||||
using CopyT = Tensor& (Tensor::*)(const Tensor&, bool) const;
|
||||
|
||||
@ -471,7 +471,7 @@ TORCH_LIBRARY_IMPL(aten, FuncTorchBatched, m) {
|
||||
VMAP_SUPPORT2(mul_, Tensor, SINGLE_ARG(binary_pointwise_inplace_batch_rule<TensorInplaceT, &Tensor::mul_>));
|
||||
VMAP_SUPPORT2(mul_, Scalar, SINGLE_ARG(unary_inplace_batch_rule<ScalarInplaceT, &Tensor::mul_, const Scalar&>));
|
||||
VMAP_SUPPORT2(div_, Tensor, SINGLE_ARG(binary_pointwise_inplace_batch_rule<TensorInplaceT, &Tensor::div_>));
|
||||
VMAP_SUPPORT2(div_, Tensor_mode, SINGLE_ARG(binary_pointwise_inplace_batch_rule<TensorInplaceModeT, &Tensor::div_, c10::optional<c10::string_view>>));
|
||||
VMAP_SUPPORT2(div_, Tensor_mode, SINGLE_ARG(binary_pointwise_inplace_batch_rule<TensorInplaceModeT, &Tensor::div_, std::optional<c10::string_view>>));
|
||||
VMAP_SUPPORT2(div_, Scalar, SINGLE_ARG(unary_inplace_batch_rule<ScalarInplaceT, &Tensor::div_, const Scalar&>));
|
||||
VMAP_SUPPORT2(clamp_min_, Tensor, SINGLE_ARG(binary_pointwise_inplace_batch_rule<TensorInplaceT, &Tensor::clamp_min_>));
|
||||
VMAP_SUPPORT2(clamp_max_, Tensor, SINGLE_ARG(binary_pointwise_inplace_batch_rule<TensorInplaceT, &Tensor::clamp_max_>));
|
||||
|
@ -124,7 +124,7 @@ convolution_batch_rule(const Tensor& lhs, optional<int64_t> lhs_bdim, const Tens
|
||||
}
|
||||
|
||||
static Tensor _convolution_decomp(
|
||||
const Tensor& input_r, const Tensor& weight_r, const c10::optional<Tensor>& bias_r_opt,
|
||||
const Tensor& input_r, const Tensor& weight_r, const std::optional<Tensor>& bias_r_opt,
|
||||
IntArrayRef stride_, IntArrayRef padding_, IntArrayRef dilation_,
|
||||
bool transposed_, IntArrayRef output_padding_, int64_t groups_,
|
||||
bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) {
|
||||
|
@ -107,11 +107,11 @@ static std::tuple<Tensor,optional<int64_t>> linspace_logspace_batch_rule_helper(
|
||||
const at::Tensor& start, optional<int64_t> start_bdim,
|
||||
const at::Tensor& end, optional<int64_t> end_bdim,
|
||||
int64_t steps,
|
||||
c10::optional<double> base,
|
||||
c10::optional<at::ScalarType> dtype,
|
||||
c10::optional<at::Layout> layout,
|
||||
c10::optional<at::Device> device,
|
||||
c10::optional<bool> pin_memory)
|
||||
std::optional<double> base,
|
||||
std::optional<at::ScalarType> dtype,
|
||||
std::optional<at::Layout> layout,
|
||||
std::optional<at::Device> device,
|
||||
std::optional<bool> pin_memory)
|
||||
{
|
||||
auto batch_size = get_bdim_size2(start, start_bdim, end, end_bdim);
|
||||
auto start_ = ensure_has_bdim(start, start_bdim.has_value(), batch_size);
|
||||
@ -145,10 +145,10 @@ static std::tuple<Tensor,optional<int64_t>> linspace_Tensor_Tensor_batch_rule(
|
||||
const at::Tensor& start, optional<int64_t> start_bdim,
|
||||
const at::Tensor& end, optional<int64_t> end_bdim,
|
||||
int64_t steps,
|
||||
c10::optional<at::ScalarType> dtype,
|
||||
c10::optional<at::Layout> layout,
|
||||
c10::optional<at::Device> device,
|
||||
c10::optional<bool> pin_memory){
|
||||
std::optional<at::ScalarType> dtype,
|
||||
std::optional<at::Layout> layout,
|
||||
std::optional<at::Device> device,
|
||||
std::optional<bool> pin_memory){
|
||||
return linspace_logspace_batch_rule_helper(start, start_bdim, end, end_bdim, steps, c10::nullopt, dtype, layout, device, pin_memory);
|
||||
}
|
||||
|
||||
@ -156,10 +156,10 @@ static std::tuple<Tensor,optional<int64_t>> linspace_Tensor_Scalar_batch_rule(
|
||||
const at::Tensor& start, optional<int64_t> start_bdim,
|
||||
const at::Scalar& end,
|
||||
int64_t steps,
|
||||
c10::optional<at::ScalarType> dtype,
|
||||
c10::optional<at::Layout> layout,
|
||||
c10::optional<at::Device> device,
|
||||
c10::optional<bool> pin_memory){
|
||||
std::optional<at::ScalarType> dtype,
|
||||
std::optional<at::Layout> layout,
|
||||
std::optional<at::Device> device,
|
||||
std::optional<bool> pin_memory){
|
||||
|
||||
auto end_t = at::native::wrapped_scalar_tensor(end, start.device());
|
||||
return linspace_logspace_batch_rule_helper(start, start_bdim, end_t, c10::nullopt, steps, c10::nullopt, dtype, layout, device, pin_memory);
|
||||
@ -169,10 +169,10 @@ static std::tuple<Tensor,optional<int64_t>> linspace_Scalar_Tensor_batch_rule(
|
||||
const at::Scalar& start,
|
||||
const at::Tensor& end, optional<int64_t> end_bdim,
|
||||
int64_t steps,
|
||||
c10::optional<at::ScalarType> dtype,
|
||||
c10::optional<at::Layout> layout,
|
||||
c10::optional<at::Device> device,
|
||||
c10::optional<bool> pin_memory){
|
||||
std::optional<at::ScalarType> dtype,
|
||||
std::optional<at::Layout> layout,
|
||||
std::optional<at::Device> device,
|
||||
std::optional<bool> pin_memory){
|
||||
|
||||
auto start_t = at::native::wrapped_scalar_tensor(start, end.device());
|
||||
return linspace_logspace_batch_rule_helper(start_t, c10::nullopt, end, end_bdim, steps, c10::nullopt, dtype, layout, device, pin_memory);
|
||||
@ -183,10 +183,10 @@ static std::tuple<Tensor,optional<int64_t>> logspace_Tensor_Tensor_batch_rule(
|
||||
const at::Tensor& end, optional<int64_t> end_bdim,
|
||||
int64_t steps,
|
||||
double base,
|
||||
c10::optional<at::ScalarType> dtype,
|
||||
c10::optional<at::Layout> layout,
|
||||
c10::optional<at::Device> device,
|
||||
c10::optional<bool> pin_memory){
|
||||
std::optional<at::ScalarType> dtype,
|
||||
std::optional<at::Layout> layout,
|
||||
std::optional<at::Device> device,
|
||||
std::optional<bool> pin_memory){
|
||||
return linspace_logspace_batch_rule_helper(start, start_bdim, end, end_bdim, steps, c10::make_optional(base), dtype, layout, device, pin_memory);
|
||||
}
|
||||
|
||||
@ -195,10 +195,10 @@ static std::tuple<Tensor,optional<int64_t>> logspace_Tensor_Scalar_batch_rule(
|
||||
const at::Scalar& end,
|
||||
int64_t steps,
|
||||
double base,
|
||||
c10::optional<at::ScalarType> dtype,
|
||||
c10::optional<at::Layout> layout,
|
||||
c10::optional<at::Device> device,
|
||||
c10::optional<bool> pin_memory){
|
||||
std::optional<at::ScalarType> dtype,
|
||||
std::optional<at::Layout> layout,
|
||||
std::optional<at::Device> device,
|
||||
std::optional<bool> pin_memory){
|
||||
|
||||
auto end_t = at::native::wrapped_scalar_tensor(end, start.device());
|
||||
return linspace_logspace_batch_rule_helper(start, start_bdim, end_t, c10::nullopt, steps, c10::make_optional(base), dtype, layout, device, pin_memory);
|
||||
@ -209,10 +209,10 @@ static std::tuple<Tensor,optional<int64_t>> logspace_Scalar_Tensor_batch_rule(
|
||||
const at::Tensor& end, optional<int64_t> end_bdim,
|
||||
int64_t steps,
|
||||
double base,
|
||||
c10::optional<at::ScalarType> dtype,
|
||||
c10::optional<at::Layout> layout,
|
||||
c10::optional<at::Device> device,
|
||||
c10::optional<bool> pin_memory){
|
||||
std::optional<at::ScalarType> dtype,
|
||||
std::optional<at::Layout> layout,
|
||||
std::optional<at::Device> device,
|
||||
std::optional<bool> pin_memory){
|
||||
|
||||
auto start_t = at::native::wrapped_scalar_tensor(start, end.device());
|
||||
return linspace_logspace_batch_rule_helper(start_t, c10::nullopt, end, end_bdim, steps, c10::make_optional(base), dtype, layout, device, pin_memory);
|
||||
|
@ -157,9 +157,9 @@ void _linalg_check_errors_batch_rule(const Tensor& info, optional<int64_t> info_
|
||||
at::_linalg_check_errors(info_, api_name, false);
|
||||
}
|
||||
|
||||
std::tuple<Tensor, c10::optional<int64_t>>
|
||||
householder_product_batch_rule(const Tensor &input, c10::optional<int64_t> input_bdim,
|
||||
const Tensor &tau, c10::optional<int64_t> tau_bdim)
|
||||
std::tuple<Tensor, std::optional<int64_t>>
|
||||
householder_product_batch_rule(const Tensor &input, std::optional<int64_t> input_bdim,
|
||||
const Tensor &tau, std::optional<int64_t> tau_bdim)
|
||||
{
|
||||
auto input_ = moveBatchDimToFront(input, input_bdim);
|
||||
auto tau_ = moveBatchDimToFront(tau, tau_bdim);
|
||||
@ -330,8 +330,8 @@ oneOutput linalg_lu_solve_batch_rule(
|
||||
}
|
||||
|
||||
oneOutput cholesky_solve_batch_rule(
|
||||
const Tensor& self, c10::optional<int64_t> self_bdim,
|
||||
const Tensor& A, c10::optional<int64_t> A_bdim,
|
||||
const Tensor& self, std::optional<int64_t> self_bdim,
|
||||
const Tensor& A, std::optional<int64_t> A_bdim,
|
||||
bool upper) {
|
||||
TORCH_CHECK(rankWithoutBatchDim(self, self_bdim) >= 2,
|
||||
"b should have at least 2 dimensions, but has ", self.dim(), " dimensions instead");
|
||||
@ -345,14 +345,14 @@ oneOutput cholesky_solve_batch_rule(
|
||||
}
|
||||
|
||||
threeOutputs linalg_lu_factor_ex_batch_rule(
|
||||
const Tensor& A, c10::optional<int64_t> A_bdim, bool pivot, bool check_errors) {
|
||||
const Tensor& A, std::optional<int64_t> A_bdim, bool pivot, bool check_errors) {
|
||||
TORCH_CHECK(rankWithoutBatchDim(A, A_bdim) >= 2, "torch.lu_factor_ex: Expected tensor with 2 or more dimensions. Got size: ", A.sizes(), " instead");
|
||||
const auto A_ = moveBatchDimToFront(A, A_bdim);
|
||||
const auto res = at::linalg_lu_factor_ex(A_, pivot, check_errors);
|
||||
return std::make_tuple(std::get<0>(res), 0, std::get<1>(res), 0, std::get<2>(res), 0);
|
||||
}
|
||||
|
||||
oneOutput matrix_exp_batch_rule(const Tensor& self, c10::optional<int64_t> self_bdim) {
|
||||
oneOutput matrix_exp_batch_rule(const Tensor& self, std::optional<int64_t> self_bdim) {
|
||||
TORCH_CHECK(rankWithoutBatchDim(self, self_bdim) >= 2, "torch.matrix_exp: The input tensor A must have at least 2 dimensions.");
|
||||
const auto self_ = moveBatchDimToFront(self, self_bdim).contiguous(); // seems to be a bug
|
||||
return std::make_tuple(at::matrix_exp(self_), 0);
|
||||
@ -400,8 +400,8 @@ fourOutputs solve_ex_batch_rule(
|
||||
return std::make_tuple(std::get<0>(res), 0, std::get<1>(res), 0, std::get<2>(res), 0, std::get<3>(res), 0);
|
||||
}
|
||||
|
||||
oneOutput cross_batch_rule(const Tensor& self, c10::optional<int64_t> self_bdim,
|
||||
const Tensor& other, c10::optional<int64_t> other_bdim, const int64_t dim) {
|
||||
oneOutput cross_batch_rule(const Tensor& self, std::optional<int64_t> self_bdim,
|
||||
const Tensor& other, std::optional<int64_t> other_bdim, const int64_t dim) {
|
||||
// match cross dimension checks
|
||||
TORCH_CHECK(rankWithoutBatchDim(self, self_bdim) == rankWithoutBatchDim(other, other_bdim),
|
||||
"linalg.cross: inputs must have the same number of dimensions."
|
||||
@ -418,16 +418,16 @@ oneOutput cross_batch_rule(const Tensor& self, c10::optional<int64_t> self_bdim,
|
||||
return std::make_tuple(linalg_cross(self_, other_, dim_), 0);
|
||||
}
|
||||
|
||||
c10::optional<int64_t> batch_dim_if_not_empty(const Tensor& t) {
|
||||
std::optional<int64_t> batch_dim_if_not_empty(const Tensor& t) {
|
||||
if (t.dim() == 1 && t.size(0) == 0) {
|
||||
return c10::optional<int64_t>();
|
||||
return std::optional<int64_t>();
|
||||
}
|
||||
return c10::optional<int64_t>(0);
|
||||
return std::optional<int64_t>(0);
|
||||
}
|
||||
|
||||
fourOutputs linalg_lstsq_batch_rule(
|
||||
const Tensor& self, c10::optional<int64_t> self_bdim, const Tensor& b, c10::optional<int64_t> b_bdim,
|
||||
c10::optional<double> rcond, c10::optional<c10::string_view> driver) {
|
||||
const Tensor& self, std::optional<int64_t> self_bdim, const Tensor& b, c10::optional<int64_t> b_bdim,
|
||||
std::optional<double> rcond, c10::optional<c10::string_view> driver) {
|
||||
TORCH_CHECK(rankWithoutBatchDim(self, self_bdim) >= 2, "torch.linalg.lstsq: input must have at least 2 dimensions.");
|
||||
TORCH_CHECK(rankWithoutBatchDim(b, b_bdim) >= 1, "torch.linalg.lstsq: other must have at least 1 dimension.");
|
||||
|
||||
@ -449,7 +449,7 @@ fourOutputs linalg_lstsq_batch_rule(
|
||||
}
|
||||
|
||||
template<typename F>
|
||||
std::tuple<Tensor, c10::optional<int64_t>>
|
||||
std::tuple<Tensor, std::optional<int64_t>>
|
||||
atol_rtol_tensor_batch_rule(
|
||||
F Func, const Tensor& input, optional<int64_t> input_bdim,
|
||||
const optional<Tensor>& atol, const optional<int64_t> atol_bdim,
|
||||
@ -478,11 +478,11 @@ atol_rtol_tensor_batch_rule(
|
||||
return std::make_tuple(Func(input_, atol_, rtol_, hermitian), 0);
|
||||
}
|
||||
|
||||
static std::tuple<Tensor, c10::optional<int64_t>>
|
||||
static std::tuple<Tensor, std::optional<int64_t>>
|
||||
pinv_batch_rule(
|
||||
const Tensor& input, c10::optional<int64_t> input_bdim, const optional<Tensor>& atol,
|
||||
const c10::optional<int64_t> atol_bdim, const optional<Tensor>& rtol,
|
||||
const c10::optional<int64_t> rtol_bdim, bool hermitian) {
|
||||
const Tensor& input, std::optional<int64_t> input_bdim, const optional<Tensor>& atol,
|
||||
const std::optional<int64_t> atol_bdim, const optional<Tensor>& rtol,
|
||||
const std::optional<int64_t> rtol_bdim, bool hermitian) {
|
||||
return atol_rtol_tensor_batch_rule(ATEN_FN2(linalg_pinv, atol_rtol_tensor), input, input_bdim, atol, atol_bdim, rtol, rtol_bdim, hermitian, "linalg.pinv");
|
||||
}
|
||||
}
|
||||
|
@ -123,7 +123,7 @@ static Tensor binary_cross_entropy_plumbing(
|
||||
|
||||
static Tensor binary_cross_entropy_backward_plumbing(
|
||||
const Tensor& grad, const Tensor& input, const Tensor& target,
|
||||
const c10::optional<Tensor>& weight_opt, int64_t reduction) {
|
||||
const std::optional<Tensor>& weight_opt, int64_t reduction) {
|
||||
auto maybe_layer = maybeCurrentDynamicLayer();
|
||||
vmap_check_escaped(maybe_layer, "binary_cross_entropy_backward_plumbing");
|
||||
int64_t cur_level = maybe_layer->layerId();
|
||||
|
@ -45,10 +45,10 @@ template<typename F, F Func>
|
||||
std::tuple<Tensor,optional<int64_t>,Tensor,optional<int64_t>,Tensor,optional<int64_t>>
|
||||
batch_norm_batch_rule(
|
||||
const Tensor& input, optional<int64_t> input_bdim,
|
||||
const c10::optional<Tensor>& weight_opt, optional<int64_t> weight_bdim,
|
||||
const c10::optional<Tensor>& bias_opt, optional<int64_t> bias_bdim,
|
||||
const c10::optional<Tensor>& running_mean_opt, optional<int64_t> running_mean_bdim,
|
||||
const c10::optional<Tensor>& running_var_opt, optional<int64_t> running_var_bdim,
|
||||
const std::optional<Tensor>& weight_opt, optional<int64_t> weight_bdim,
|
||||
const std::optional<Tensor>& bias_opt, optional<int64_t> bias_bdim,
|
||||
const std::optional<Tensor>& running_mean_opt, optional<int64_t> running_mean_bdim,
|
||||
const std::optional<Tensor>& running_var_opt, optional<int64_t> running_var_bdim,
|
||||
bool training, double momentum, double eps) {
|
||||
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
|
||||
const Tensor& weight = *weight_maybe_owned;
|
||||
@ -63,7 +63,7 @@ batch_norm_batch_rule(
|
||||
"were not batched.\nIf you are using a module and do not need eval mode, please set `track_running_stats` to be False.",
|
||||
"If you are using a prebuilt module and do not need eval mode, please see the functorch website for resources on ",
|
||||
"how to patch your module to work with vmap");
|
||||
c10::optional<int64_t> bdim_size;
|
||||
std::optional<int64_t> bdim_size;
|
||||
Tensor result0;
|
||||
Tensor mean;
|
||||
Tensor rstd;
|
||||
@ -80,8 +80,8 @@ batch_norm_batch_rule(
|
||||
input_ = ensure_has_bdim(input_, input_bdim.has_value(), bdim_size.value());
|
||||
input_ = reshape_dim_into(0, /*channels dim*/1, input_);
|
||||
|
||||
c10::optional<Tensor> running_mean_;
|
||||
c10::optional<Tensor> running_var_;
|
||||
std::optional<Tensor> running_mean_;
|
||||
std::optional<Tensor> running_var_;
|
||||
if (running_mean.defined()) {
|
||||
running_mean_ = moveBatchDimToFront(running_mean, running_mean_bdim);
|
||||
running_mean_ = ensure_has_bdim(*running_mean_, running_mean_bdim.has_value(), bdim_size.value());
|
||||
@ -127,8 +127,8 @@ template<typename F, F Func>
|
||||
std::tuple<at::Tensor,optional<int64_t>> batch_norm_backward_no_weight_bias_batch_rule(
|
||||
const at::Tensor & grad_out, optional<int64_t> grad_out_bdim,
|
||||
const at::Tensor & input, optional<int64_t> input_bdim,
|
||||
const c10::optional<at::Tensor> & running_mean_opt, optional<int64_t> running_mean_bdim,
|
||||
const c10::optional<at::Tensor> & running_var_opt, optional<int64_t> running_var_bdim,
|
||||
const std::optional<at::Tensor> & running_mean_opt, optional<int64_t> running_mean_bdim,
|
||||
const std::optional<at::Tensor> & running_var_opt, optional<int64_t> running_var_bdim,
|
||||
const at::Tensor & mean, optional<int64_t> mean_bdim,
|
||||
const at::Tensor & rstd, optional<int64_t> rstd_bdim,
|
||||
bool training, double eps) {
|
||||
@ -199,11 +199,11 @@ template<typename F, F Func>
|
||||
std::tuple<at::Tensor,at::Tensor,at::Tensor> batch_norm_backward_plumbing(
|
||||
const at::Tensor & grad_out,
|
||||
const at::Tensor & input,
|
||||
const c10::optional<at::Tensor> & weight_opt,
|
||||
const c10::optional<at::Tensor> & running_mean_opt,
|
||||
const c10::optional<at::Tensor> & running_var_opt,
|
||||
const c10::optional<at::Tensor> & save_mean_opt,
|
||||
const c10::optional<at::Tensor> & save_rstd_opt,
|
||||
const std::optional<at::Tensor> & weight_opt,
|
||||
const std::optional<at::Tensor> & running_mean_opt,
|
||||
const std::optional<at::Tensor> & running_var_opt,
|
||||
const std::optional<at::Tensor> & save_mean_opt,
|
||||
const std::optional<at::Tensor> & save_rstd_opt,
|
||||
bool training,
|
||||
double eps,
|
||||
std::array<bool,3> output_mask) {
|
||||
@ -284,8 +284,8 @@ std::tuple<at::Tensor,at::Tensor,at::Tensor> batch_norm_backward_plumbing(
|
||||
}
|
||||
|
||||
static std::tuple<Tensor,Tensor,Tensor> native_group_norm_plumbing(
|
||||
const Tensor & input, const c10::optional<Tensor> & weight_opt,
|
||||
const c10::optional<Tensor> & bias_opt, int64_t N, int64_t C,
|
||||
const Tensor & input, const std::optional<Tensor> & weight_opt,
|
||||
const std::optional<Tensor> & bias_opt, int64_t N, int64_t C,
|
||||
int64_t HxW, int64_t group, double eps) {
|
||||
// See [Note: hacky wrapper removal for optional tensor]
|
||||
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
|
||||
@ -372,7 +372,7 @@ static std::tuple<at::Tensor,optional<int64_t>> group_norm_backward_no_weight_bi
|
||||
|
||||
static std::tuple<Tensor,Tensor,Tensor> native_group_norm_backward_plumbing(
|
||||
const Tensor & grad_out, const Tensor & input, const Tensor & mean,
|
||||
const Tensor & rstd, const c10::optional<Tensor> & weight_opt,
|
||||
const Tensor & rstd, const std::optional<Tensor> & weight_opt,
|
||||
int64_t N, int64_t C, int64_t HxW, int64_t group, std::array<bool,3> output_mask
|
||||
) {
|
||||
// See [Note: hacky wrapper removal for optional tensor]
|
||||
@ -488,8 +488,8 @@ static std::tuple<Tensor,optional<int64_t>,Tensor,optional<int64_t>,Tensor,optio
|
||||
native_layer_norm_batch_rule(
|
||||
const Tensor& input, optional<int64_t> input_bdim,
|
||||
c10::SymIntArrayRef normalized_shape,
|
||||
const c10::optional<Tensor>& weight_opt, optional<int64_t> weight_bdim,
|
||||
const c10::optional<Tensor>& bias_opt, optional<int64_t> bias_bdim,
|
||||
const std::optional<Tensor>& weight_opt, optional<int64_t> weight_bdim,
|
||||
const std::optional<Tensor>& bias_opt, optional<int64_t> bias_bdim,
|
||||
double eps) {
|
||||
auto input_ = moveBatchDimToFront(input, input_bdim);
|
||||
if (!weight_bdim && !bias_bdim) {
|
||||
@ -573,8 +573,8 @@ static std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm_backward_p
|
||||
at::IntArrayRef normalized_shape,
|
||||
const at::Tensor & mean,
|
||||
const at::Tensor & rstd,
|
||||
const c10::optional<at::Tensor> & weight_opt,
|
||||
const c10::optional<at::Tensor> & bias_opt,
|
||||
const std::optional<at::Tensor> & weight_opt,
|
||||
const std::optional<at::Tensor> & bias_opt,
|
||||
std::array<bool,3> output_mask) {
|
||||
// See [Note: hacky wrapper removal for optional tensor]
|
||||
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
|
||||
@ -653,10 +653,10 @@ template <typename F, F Func>
|
||||
struct NativeBatchNormBatchRuleHelper {
|
||||
static std::tuple<Tensor,optional<int64_t>,Tensor,optional<int64_t>,Tensor,optional<int64_t>> apply(
|
||||
const Tensor& input, optional<int64_t> input_bdim,
|
||||
const c10::optional<Tensor>& weight_opt, optional<int64_t> weight_bdim,
|
||||
const c10::optional<Tensor>& bias_opt, optional<int64_t> bias_bdim,
|
||||
const c10::optional<Tensor>& running_mean_opt, optional<int64_t> running_mean_bdim,
|
||||
const c10::optional<Tensor>& running_var_opt, optional<int64_t> running_var_bdim,
|
||||
const std::optional<Tensor>& weight_opt, optional<int64_t> weight_bdim,
|
||||
const std::optional<Tensor>& bias_opt, optional<int64_t> bias_bdim,
|
||||
const std::optional<Tensor>& running_mean_opt, optional<int64_t> running_mean_bdim,
|
||||
const std::optional<Tensor>& running_var_opt, optional<int64_t> running_var_bdim,
|
||||
bool training, double momentum, double eps) {
|
||||
return batch_norm_batch_rule<F, Func>(
|
||||
input, input_bdim, weight_opt, weight_bdim, bias_opt, bias_bdim,
|
||||
@ -669,9 +669,9 @@ struct CudnnBatchNormBatchRuleHelper {
|
||||
static std::tuple<Tensor,optional<int64_t>,Tensor,optional<int64_t>,Tensor,optional<int64_t>,Tensor,optional<int64_t>> apply(
|
||||
const Tensor& input, optional<int64_t> input_bdim,
|
||||
const Tensor& weight_opt, optional<int64_t> weight_bdim,
|
||||
const c10::optional<Tensor>& bias_opt, optional<int64_t> bias_bdim,
|
||||
const c10::optional<Tensor>& running_mean_opt, optional<int64_t> running_mean_bdim,
|
||||
const c10::optional<Tensor>& running_var_opt, optional<int64_t> running_var_bdim,
|
||||
const std::optional<Tensor>& bias_opt, optional<int64_t> bias_bdim,
|
||||
const std::optional<Tensor>& running_mean_opt, optional<int64_t> running_mean_bdim,
|
||||
const std::optional<Tensor>& running_var_opt, optional<int64_t> running_var_bdim,
|
||||
bool training, double momentum, double eps) {
|
||||
auto reserve = at::empty({0}, input.options().dtype(kByte)); // in experiments, reserve was never set to anything other than empty by cuda
|
||||
auto res = batch_norm_batch_rule<F, Func>(
|
||||
@ -686,9 +686,9 @@ struct MiopenBatchNormBatchRuleHelper {
|
||||
static std::tuple<Tensor,optional<int64_t>,Tensor,optional<int64_t>,Tensor,optional<int64_t>> apply(
|
||||
const Tensor& input, optional<int64_t> input_bdim,
|
||||
const Tensor& weight_opt, optional<int64_t> weight_bdim,
|
||||
const c10::optional<Tensor>& bias_opt, optional<int64_t> bias_bdim,
|
||||
const c10::optional<Tensor>& running_mean_opt, optional<int64_t> running_mean_bdim,
|
||||
const c10::optional<Tensor>& running_var_opt, optional<int64_t> running_var_bdim,
|
||||
const std::optional<Tensor>& bias_opt, optional<int64_t> bias_bdim,
|
||||
const std::optional<Tensor>& running_mean_opt, optional<int64_t> running_mean_bdim,
|
||||
const std::optional<Tensor>& running_var_opt, optional<int64_t> running_var_bdim,
|
||||
bool training, double momentum, double eps) {
|
||||
return batch_norm_batch_rule<F, Func>(
|
||||
input, input_bdim, weight_opt, weight_bdim, bias_opt, bias_bdim,
|
||||
@ -716,11 +716,11 @@ struct NativeBatchNormBackwardBatchRuleHelper {
|
||||
static std::tuple<Tensor,Tensor,Tensor> apply(
|
||||
const at::Tensor & grad_out,
|
||||
const at::Tensor & input,
|
||||
const c10::optional<at::Tensor> & weight_opt,
|
||||
const c10::optional<at::Tensor> & running_mean_opt,
|
||||
const c10::optional<at::Tensor> & running_var_opt,
|
||||
const c10::optional<at::Tensor> & save_mean_opt,
|
||||
const c10::optional<at::Tensor> & save_rstd_opt,
|
||||
const std::optional<at::Tensor> & weight_opt,
|
||||
const std::optional<at::Tensor> & running_mean_opt,
|
||||
const std::optional<at::Tensor> & running_var_opt,
|
||||
const std::optional<at::Tensor> & save_mean_opt,
|
||||
const std::optional<at::Tensor> & save_rstd_opt,
|
||||
bool training,
|
||||
double eps,
|
||||
std::array<bool,3> output_mask) {
|
||||
@ -748,10 +748,10 @@ struct CudnnBatchNormBackwardBatchRuleHelper {
|
||||
const at::Tensor & input,
|
||||
const at::Tensor & grad_out,
|
||||
const at::Tensor & weight,
|
||||
const c10::optional<at::Tensor> & running_mean_opt,
|
||||
const c10::optional<at::Tensor> & running_var_opt,
|
||||
const c10::optional<at::Tensor> & save_mean_opt,
|
||||
const c10::optional<at::Tensor> & save_rstd_opt,
|
||||
const std::optional<at::Tensor> & running_mean_opt,
|
||||
const std::optional<at::Tensor> & running_var_opt,
|
||||
const std::optional<at::Tensor> & save_mean_opt,
|
||||
const std::optional<at::Tensor> & save_rstd_opt,
|
||||
double eps,
|
||||
const at::Tensor & reserve) {
|
||||
|
||||
@ -777,10 +777,10 @@ struct MiopenBatchNormBackwardBatchRuleHelper {
|
||||
const at::Tensor & input,
|
||||
const at::Tensor & grad_out,
|
||||
const at::Tensor & weight,
|
||||
const c10::optional<at::Tensor> & running_mean_opt,
|
||||
const c10::optional<at::Tensor> & running_var_opt,
|
||||
const c10::optional<at::Tensor> & save_mean_opt,
|
||||
const c10::optional<at::Tensor> & save_rstd_opt,
|
||||
const std::optional<at::Tensor> & running_mean_opt,
|
||||
const std::optional<at::Tensor> & running_var_opt,
|
||||
const std::optional<at::Tensor> & save_mean_opt,
|
||||
const std::optional<at::Tensor> & save_rstd_opt,
|
||||
double eps) {
|
||||
|
||||
auto maybe_layer = maybeCurrentDynamicLayer();
|
||||
@ -818,10 +818,10 @@ static std::tuple<at::Tensor,at::Tensor,at::Tensor> cudnn_batch_norm_backward_wr
|
||||
const at::Tensor & grad_out,
|
||||
const at::Tensor & input,
|
||||
const at::Tensor& weight_opt,
|
||||
const c10::optional<at::Tensor> & running_mean_opt,
|
||||
const c10::optional<at::Tensor> & running_var_opt,
|
||||
const c10::optional<at::Tensor> & save_mean_opt,
|
||||
const c10::optional<at::Tensor> & save_rstd_opt,
|
||||
const std::optional<at::Tensor> & running_mean_opt,
|
||||
const std::optional<at::Tensor> & running_var_opt,
|
||||
const std::optional<at::Tensor> & save_mean_opt,
|
||||
const std::optional<at::Tensor> & save_rstd_opt,
|
||||
bool training,
|
||||
double eps,
|
||||
std::array<bool,3> output_mask) {
|
||||
@ -834,10 +834,10 @@ static std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_batch_norm_backward_w
|
||||
const at::Tensor & grad_out,
|
||||
const at::Tensor & input,
|
||||
const at::Tensor& weight_opt,
|
||||
const c10::optional<at::Tensor> & running_mean_opt,
|
||||
const c10::optional<at::Tensor> & running_var_opt,
|
||||
const c10::optional<at::Tensor> & save_mean_opt,
|
||||
const c10::optional<at::Tensor> & save_rstd_opt,
|
||||
const std::optional<at::Tensor> & running_mean_opt,
|
||||
const std::optional<at::Tensor> & running_var_opt,
|
||||
const std::optional<at::Tensor> & save_mean_opt,
|
||||
const std::optional<at::Tensor> & save_rstd_opt,
|
||||
bool training,
|
||||
double eps,
|
||||
std::array<bool,3> output_mask) {
|
||||
@ -850,13 +850,13 @@ static std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_batch_norm_backward_w
|
||||
// work with dynamo anyway so we gain some buffer room to do wrong things here. The (reasonable) hope is that we will
|
||||
// make native_batch_norm composite implicit within a few weeks and we can fix this before vmap works with dynamo.
|
||||
static std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_batch(
|
||||
const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt,
|
||||
const Tensor& self, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt,
|
||||
Tensor& running_mean, Tensor& running_var, bool train, double momentum, double eps) {
|
||||
return at::native_batch_norm(self, weight_opt, bias_opt, running_mean, running_var, train, momentum, eps);
|
||||
}
|
||||
|
||||
static std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_no_stats_batch(
|
||||
const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt,
|
||||
const Tensor& self, const std::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt,
|
||||
bool train, double momentum, double eps) {
|
||||
return at::native_batch_norm(self, weight_opt, bias_opt, Tensor(), Tensor(), train, momentum, eps);
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ Tensor& random_inplace_batching_rule(Tensor& self, ExtraArgs... extra_args) {
|
||||
}
|
||||
}
|
||||
|
||||
static Tensor& bernoulli_inplace_Tensor_batching_rule(Tensor& self, const Tensor& p_, c10::optional<Generator> gen) {
|
||||
static Tensor& bernoulli_inplace_Tensor_batching_rule(Tensor& self, const Tensor& p_, std::optional<Generator> gen) {
|
||||
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchVmapMode);
|
||||
auto maybe_layer = maybeCurrentDynamicLayer();
|
||||
auto cur_level = maybe_layer->layerId();
|
||||
@ -173,7 +173,7 @@ Tensor tensor_like_random_batch_rule(const Tensor& self, ExtraArgs... extra_args
|
||||
return (randomness == RandomnessType::Same) ? res : makeBatched(res, 0, cur_level);
|
||||
}
|
||||
|
||||
static std::tuple<Tensor,Tensor> native_dropout_batching_rule(const Tensor& tensor, double p, c10::optional<bool> train) {
|
||||
static std::tuple<Tensor,Tensor> native_dropout_batching_rule(const Tensor& tensor, double p, std::optional<bool> train) {
|
||||
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchVmapMode);
|
||||
auto maybe_layer = maybeCurrentDynamicLayer();
|
||||
const auto cur_level = maybe_layer->layerId();
|
||||
@ -213,7 +213,7 @@ static std::tuple<Tensor,Tensor> native_dropout_batching_rule(const Tensor& tens
|
||||
return std::make_tuple(output, mask);
|
||||
}
|
||||
|
||||
static Tensor multinomial_batching_rule(const Tensor& self, const int64_t num_samples, const bool replacement, const c10::optional<Generator> generator) {
|
||||
static Tensor multinomial_batching_rule(const Tensor& self, const int64_t num_samples, const bool replacement, const std::optional<Generator> generator) {
|
||||
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchVmapMode);
|
||||
auto maybe_layer = maybeCurrentDynamicLayer();
|
||||
const auto cur_level = maybe_layer->layerId();
|
||||
|
@ -169,7 +169,7 @@ void boxed_reduction_batch_rule(const c10::OperatorHandle& op, torch::jit::Stack
|
||||
new_dims.push_back(getPhysicalDim(self, self_bdim.has_value(), dim));
|
||||
}
|
||||
bool is_scalar_case = logical_dim == 0 && dims.size() == 1 && is_allowed_dim_on_scalar_tensor(dims[0]);
|
||||
c10::optional<bool> maybe_keepdim;
|
||||
std::optional<bool> maybe_keepdim;
|
||||
if (is_scalar_case) {
|
||||
// NOTE: [boxed_reduction_batch_rule scalar tensor handling]
|
||||
// Reduction operations in PyTorch have an edge case where they allow
|
||||
@ -321,9 +321,9 @@ static std::tuple<Tensor,optional<int64_t>> searchsorted_batch_rule(
|
||||
optional<int64_t> self_bdim,
|
||||
bool out_int32,
|
||||
bool right,
|
||||
c10::optional<c10::string_view> side,
|
||||
const c10::optional<Tensor>& sorter,
|
||||
c10::optional<int64_t> sorter_bdim) {
|
||||
std::optional<c10::string_view> side,
|
||||
const std::optional<Tensor>& sorter,
|
||||
std::optional<int64_t> sorter_bdim) {
|
||||
auto buckets_logical_rank = rankWithoutBatchDim(sorted_sequence, sorted_sequence_bdim);
|
||||
auto self_logical_rank = rankWithoutBatchDim(self, self_bdim);
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user