diff --git a/aten/src/ATen/CPUGeneratorImpl.h b/aten/src/ATen/CPUGeneratorImpl.h index 34dd33a475b9..e15ca23d6bf7 100644 --- a/aten/src/ATen/CPUGeneratorImpl.h +++ b/aten/src/ATen/CPUGeneratorImpl.h @@ -3,7 +3,7 @@ #include #include #include -#include +#include namespace at { diff --git a/aten/src/ATen/InferSize.h b/aten/src/ATen/InferSize.h index 411cf12d5134..4bf820312d2f 100644 --- a/aten/src/ATen/InferSize.h +++ b/aten/src/ATen/InferSize.h @@ -4,7 +4,7 @@ #include #include #include -#include +#include #include #include diff --git a/aten/src/ATen/SavedTensorHooks.cpp b/aten/src/ATen/SavedTensorHooks.cpp index 7aa9b0f02ea3..6837348c9321 100644 --- a/aten/src/ATen/SavedTensorHooks.cpp +++ b/aten/src/ATen/SavedTensorHooks.cpp @@ -32,7 +32,7 @@ void SavedTensorDefaultHooks::disable(const std::string& message) { } void SavedTensorDefaultHooks::enable() { - tls.disabled_error_message = c10::nullopt; + tls.disabled_error_message = std::nullopt; } /* static */ bool SavedTensorDefaultHooks::set_tracing(bool is_tracing) { diff --git a/aten/src/ATen/SavedTensorHooks.h b/aten/src/ATen/SavedTensorHooks.h index b69b9c25e8e6..9cf1ea37c353 100644 --- a/aten/src/ATen/SavedTensorHooks.h +++ b/aten/src/ATen/SavedTensorHooks.h @@ -1,8 +1,8 @@ #pragma once #include -#include #include +#include #include #include diff --git a/aten/src/ATen/TensorIndexing.h b/aten/src/ATen/TensorIndexing.h index eb36c0e02fa4..1fe9e7ebdcb0 100644 --- a/aten/src/ATen/TensorIndexing.h +++ b/aten/src/ATen/TensorIndexing.h @@ -5,8 +5,8 @@ #include #include #include -#include #include +#include #ifndef AT_PER_OPERATOR_HEADERS #include diff --git a/aten/src/ATen/native/BatchLinearAlgebra.h b/aten/src/ATen/native/BatchLinearAlgebra.h index c8402640aa08..58d46aacd473 100644 --- a/aten/src/ATen/native/BatchLinearAlgebra.h +++ b/aten/src/ATen/native/BatchLinearAlgebra.h @@ -1,6 +1,6 @@ #pragma once -#include +#include #include #include #include diff --git a/aten/src/ATen/record_function.h b/aten/src/ATen/record_function.h index 014260fb220f..63fbcb55e96d 100644 --- a/aten/src/ATen/record_function.h +++ b/aten/src/ATen/record_function.h @@ -3,8 +3,8 @@ #include #include #include -#include #include +#include #include #include diff --git a/c10/core/ConstantSymNodeImpl.h b/c10/core/ConstantSymNodeImpl.h index 3c0fb66f7469..791a81cace41 100644 --- a/c10/core/ConstantSymNodeImpl.h +++ b/c10/core/ConstantSymNodeImpl.h @@ -3,8 +3,8 @@ #include #include #include -#include #include +#include #include #include @@ -73,14 +73,14 @@ class C10_API ConstantSymNodeImpl : public SymNodeImpl { if constexpr (is_int_()) { return ::std::get(value_); } else { - return c10::nullopt; + return std::nullopt; } } std::optional constant_bool() override { if constexpr (is_bool_()) { return ::std::get(value_); } else { - return c10::nullopt; + return std::nullopt; } } bool is_constant() override { diff --git a/c10/core/ScalarTypeToTypeMeta.h b/c10/core/ScalarTypeToTypeMeta.h index d2694c96221e..5e9e1a936af5 100644 --- a/c10/core/ScalarTypeToTypeMeta.h +++ b/c10/core/ScalarTypeToTypeMeta.h @@ -30,7 +30,7 @@ inline ScalarType typeMetaToScalarType(caffe2::TypeMeta dtype) { inline optional optTypeMetaToScalarType( optional type_meta) { if (!type_meta.has_value()) { - return c10::nullopt; + return std::nullopt; } return type_meta->toScalarType(); } diff --git a/c10/core/SymBool.h b/c10/core/SymBool.h index 9f9f141293a3..06ce32c1a716 100644 --- a/c10/core/SymBool.h +++ b/c10/core/SymBool.h @@ -3,9 +3,9 @@ #include #include #include -#include #include #include +#include #include #include @@ -68,7 +68,7 @@ class C10_API SymBool { std::optional maybe_as_bool() const { if (!is_heap_allocated()) { - return c10::make_optional(data_); + return std::make_optional(data_); } return toSymNodeImplUnowned()->constant_bool(); } diff --git a/c10/core/SymInt.h b/c10/core/SymInt.h index 025c351334a0..eef34aac24ca 100644 --- a/c10/core/SymInt.h +++ b/c10/core/SymInt.h @@ -5,7 +5,7 @@ #include #include #include -#include +#include #include #include @@ -231,7 +231,7 @@ class C10_API SymInt { std::optional maybe_as_int() const { if (!is_heap_allocated()) { - return c10::make_optional(data_); + return std::make_optional(data_); } auto* node = toSymNodeImplUnowned(); if (auto c = node->constant_int()) { diff --git a/c10/core/SymIntArrayRef.h b/c10/core/SymIntArrayRef.h index 760f4ba4e79a..ce7253c60ec5 100644 --- a/c10/core/SymIntArrayRef.h +++ b/c10/core/SymIntArrayRef.h @@ -3,8 +3,8 @@ #include #include #include -#include #include +#include namespace c10 { using SymIntArrayRef = ArrayRef; @@ -23,7 +23,7 @@ inline std::optional asIntArrayRefSlowOpt( c10::SymIntArrayRef ar) { for (const c10::SymInt& sci : ar) { if (sci.is_heap_allocated()) { - return c10::nullopt; + return std::nullopt; } } diff --git a/c10/core/SymNodeImpl.h b/c10/core/SymNodeImpl.h index bb92b09775b7..39e4bbbc2c6c 100644 --- a/c10/core/SymNodeImpl.h +++ b/c10/core/SymNodeImpl.h @@ -3,9 +3,9 @@ #include #include #include -#include #include #include +#include #include #include @@ -207,19 +207,19 @@ class C10_API SymNodeImpl : public c10::intrusive_ptr_target { TORCH_CHECK(false, "NYI"); }; virtual std::optional nested_int() { - return c10::nullopt; + return std::nullopt; } virtual std::optional nested_int_coeff() { - return c10::nullopt; + return std::nullopt; } virtual std::optional constant_int() { - return c10::nullopt; + return std::nullopt; } virtual std::optional constant_bool() { - return c10::nullopt; + return std::nullopt; } virtual std::optional maybe_as_int() { - return c10::nullopt; + return std::nullopt; } virtual bool is_constant() { return false; diff --git a/c10/core/SymbolicShapeMeta.cpp b/c10/core/SymbolicShapeMeta.cpp index 62b03d36ec71..b59a95a4a2fa 100644 --- a/c10/core/SymbolicShapeMeta.cpp +++ b/c10/core/SymbolicShapeMeta.cpp @@ -56,7 +56,7 @@ normalize_sym_sizes_strides(SymIntArrayRef sizes, SymIntArrayRef strides) { // Couldn't find. Tell the caller to do the normal computation // Alternately, if everything is hinted, we want the normal computation // too - return c10::nullopt; + return std::nullopt; } // Populate the SymNode array std::vector size_nodes; @@ -69,7 +69,7 @@ normalize_sym_sizes_strides(SymIntArrayRef sizes, SymIntArrayRef strides) { for (const auto& s : strides) { stride_nodes.emplace_back(s.wrap_node(base)); } - return c10::make_optional( + return std::make_optional( std::tuple, std::vector>( std::move(base), std::move(size_nodes), std::move(stride_nodes))); } diff --git a/c10/core/TensorImpl.cpp b/c10/core/TensorImpl.cpp index 516a61f02004..130292aaa70d 100644 --- a/c10/core/TensorImpl.cpp +++ b/c10/core/TensorImpl.cpp @@ -8,9 +8,9 @@ #include #include #include -#include #include #include +#include #include diff --git a/c10/core/TensorImpl.h b/c10/core/TensorImpl.h index 877c1c09543c..67543614c021 100644 --- a/c10/core/TensorImpl.h +++ b/c10/core/TensorImpl.h @@ -24,12 +24,12 @@ #include #include #include -#include #include #include #include #include #include +#include #include #include @@ -233,8 +233,8 @@ struct C10_API ExtraMeta { std::unique_ptr symbolic_shape_meta_ = nullptr; std::unique_ptr named_tensor_meta_ = nullptr; intrusive_ptr backend_meta_ = nullptr; - std::optional custom_data_ptr_error_msg_ = c10::nullopt; - std::optional custom_storage_error_msg_ = c10::nullopt; + std::optional custom_data_ptr_error_msg_ = std::nullopt; + std::optional custom_storage_error_msg_ = std::nullopt; ExtraMeta() = default; ExtraMeta(const ExtraMeta& other) { @@ -260,8 +260,8 @@ struct C10_API ExtraMeta { std::unique_ptr symbolic_shape_meta, std::unique_ptr named_tensor_meta, intrusive_ptr backend_meta, - std::optional custom_data_ptr_error_msg = c10::nullopt, - std::optional custom_storage_access_error_msg = c10::nullopt) + std::optional custom_data_ptr_error_msg = std::nullopt, + std::optional custom_storage_access_error_msg = std::nullopt) : symbolic_shape_meta_(std::move(symbolic_shape_meta)), named_tensor_meta_(std::move(named_tensor_meta)), backend_meta_(std::move(backend_meta)), @@ -1737,7 +1737,7 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target { void set_sizes_and_strides( c10::SymIntArrayRef sizes, c10::SymIntArrayRef strides, - std::optional storage_offset = c10::nullopt); + std::optional storage_offset = std::nullopt); // This is renamed to avoid breaking overload BC void generic_set_sizes_contiguous(c10::SymIntArrayRef sizes); void generic_set_sizes_contiguous(c10::IntArrayRef sizes) { @@ -1834,7 +1834,7 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target { void set_sizes_and_strides( IntArrayRef new_size, IntArrayRef new_stride, - std::optional storage_offset = c10::nullopt) { + std::optional storage_offset = std::nullopt) { TORCH_CHECK( allow_tensor_metadata_change(), "set_sizes_and_strides ", diff --git a/c10/core/TensorOptions.h b/c10/core/TensorOptions.h index d99005d3d28f..9c23c767ffc5 100644 --- a/c10/core/TensorOptions.h +++ b/c10/core/TensorOptions.h @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include @@ -284,10 +284,10 @@ struct C10_API TensorOptions { return has_device_; } - /// Returns the device of the `TensorOptions`, or `c10::nullopt` if + /// Returns the device of the `TensorOptions`, or `std::nullopt` if /// device is not specified. std::optional device_opt() const noexcept { - return has_device_ ? c10::make_optional(device_) : c10::nullopt; + return has_device_ ? std::make_optional(device_) : std::nullopt; } /// Returns the device index of the `TensorOptions`. @@ -305,10 +305,10 @@ struct C10_API TensorOptions { return has_dtype_; } - /// Returns the dtype of the `TensorOptions`, or `c10::nullopt` if + /// Returns the dtype of the `TensorOptions`, or `std::nullopt` if /// device is not specified. std::optional dtype_opt() const noexcept { - return has_dtype_ ? c10::make_optional(dtype_) : c10::nullopt; + return has_dtype_ ? std::make_optional(dtype_) : std::nullopt; } /// Returns the layout of the `TensorOptions`. @@ -321,10 +321,10 @@ struct C10_API TensorOptions { return has_layout_; } - /// Returns the layout of the `TensorOptions`, or `c10::nullopt` if + /// Returns the layout of the `TensorOptions`, or `std::nullopt` if /// layout is not specified. std::optional layout_opt() const noexcept { - return has_layout_ ? c10::make_optional(layout_) : c10::nullopt; + return has_layout_ ? std::make_optional(layout_) : std::nullopt; } /// Returns the `requires_grad` property of the `TensorOptions`. @@ -338,10 +338,10 @@ struct C10_API TensorOptions { } /// Returns the `requires_grad` property of the `TensorOptions`, or - /// `c10::nullopt` if `requires_grad` is not specified. + /// `std::nullopt` if `requires_grad` is not specified. std::optional requires_grad_opt() const noexcept { - return has_requires_grad_ ? c10::make_optional(requires_grad_) - : c10::nullopt; + return has_requires_grad_ ? std::make_optional(requires_grad_) + : std::nullopt; } /// Returns the `pinned_memory` property of the `TensorOptions`. @@ -378,10 +378,10 @@ struct C10_API TensorOptions { } /// Returns the `pinned_memory` property of the `TensorOptions`, or - /// `c10::nullopt` if `pinned_memory` is not specified. + /// `std::nullopt` if `pinned_memory` is not specified. std::optional pinned_memory_opt() const noexcept { - return has_pinned_memory_ ? c10::make_optional(pinned_memory_) - : c10::nullopt; + return has_pinned_memory_ ? std::make_optional(pinned_memory_) + : std::nullopt; } /// Returns whether the `memory_layout` is specified @@ -393,10 +393,10 @@ struct C10_API TensorOptions { // behavior of memory_format varies from function to function. /// Returns the `memory_layout` property of `TensorOptions, or - /// `c10::nullopt` if `memory_format` is not specified. + /// `std::nullopt` if `memory_format` is not specified. std::optional memory_format_opt() const noexcept { - return has_memory_format_ ? c10::make_optional(memory_format_) - : c10::nullopt; + return has_memory_format_ ? std::make_optional(memory_format_) + : std::nullopt; } // Resolves the ATen backend specified by the current construction axes. diff --git a/c10/core/UndefinedTensorImpl.cpp b/c10/core/UndefinedTensorImpl.cpp index 1b16a5d5b9fd..2a715d78bdb7 100644 --- a/c10/core/UndefinedTensorImpl.cpp +++ b/c10/core/UndefinedTensorImpl.cpp @@ -5,7 +5,7 @@ namespace c10 { // should this use the globalContext? Can it get a context passed in somehow? UndefinedTensorImpl::UndefinedTensorImpl() - : TensorImpl(DispatchKey::Undefined, caffe2::TypeMeta(), c10::nullopt) { + : TensorImpl(DispatchKey::Undefined, caffe2::TypeMeta(), std::nullopt) { set_storage_access_should_throw(); // TODO: accessing the sizes on an undefined tensor is not meaningful // and should error too, but empirically it does not! diff --git a/c10/core/impl/InlineDeviceGuard.h b/c10/core/impl/InlineDeviceGuard.h index 3e9f91eff617..a70c194efccf 100644 --- a/c10/core/impl/InlineDeviceGuard.h +++ b/c10/core/impl/InlineDeviceGuard.h @@ -404,7 +404,7 @@ class InlineOptionalDeviceGuard { /// Returns the device that was set immediately prior to initialization of /// the, guard, or nullopt if the guard is uninitialized. optional original_device() const { - return guard_.has_value() ? make_optional(guard_->original_device()) + return guard_.has_value() ? std::make_optional(guard_->original_device()) : nullopt; } @@ -412,7 +412,7 @@ class InlineOptionalDeviceGuard { /// either from construction, or via set_device, if the guard is initialized, /// or nullopt if the guard is uninitialized. optional current_device() const { - return guard_.has_value() ? make_optional(guard_->current_device()) + return guard_.has_value() ? std::make_optional(guard_->current_device()) : nullopt; } diff --git a/c10/core/impl/InlineStreamGuard.h b/c10/core/impl/InlineStreamGuard.h index b99e7db72add..5ac913c4ff7f 100644 --- a/c10/core/impl/InlineStreamGuard.h +++ b/c10/core/impl/InlineStreamGuard.h @@ -173,7 +173,7 @@ class InlineOptionalStreamGuard { /// Returns the stream that was set at the time the guard was most recently /// initialized, or nullopt if the guard is uninitialized. optional original_stream() const { - return guard_.has_value() ? make_optional(guard_->original_stream()) + return guard_.has_value() ? std::make_optional(guard_->original_stream()) : nullopt; } @@ -181,7 +181,7 @@ class InlineOptionalStreamGuard { /// either from construction, or via reset_stream, if the guard is /// initialized, or nullopt if the guard is uninitialized. optional current_stream() const { - return guard_.has_value() ? make_optional(guard_->current_stream()) + return guard_.has_value() ? std::make_optional(guard_->current_stream()) : nullopt; } diff --git a/c10/core/impl/PyObjectSlot.h b/c10/core/impl/PyObjectSlot.h index 518b0e63e492..8f2833b5c7da 100644 --- a/c10/core/impl/PyObjectSlot.h +++ b/c10/core/impl/PyObjectSlot.h @@ -2,8 +2,8 @@ #include #include -#include #include +#include #include @@ -106,13 +106,13 @@ struct C10_API PyObjectSlot { // after we query here. The only time when we can conclude a tensor // is definitely uninitialized is when we have just allocated it and // it cannot have escaped to other threads yet - return c10::nullopt; + return std::nullopt; } else if (interpreter == self_interpreter) { // NB: pyobj_ could still be null! if (!ignore_hermetic_tls && c10::impl::HermeticPyObjectTLS::get_state()) { - return c10::nullopt; + return std::nullopt; } else { - return c10::make_optional(_unchecked_untagged_pyobj()); + return std::make_optional(_unchecked_untagged_pyobj()); } } else { TORCH_CHECK( diff --git a/c10/core/impl/TorchDispatchModeTLS.cpp b/c10/core/impl/TorchDispatchModeTLS.cpp index f1847cb005b4..c9a3274ed896 100644 --- a/c10/core/impl/TorchDispatchModeTLS.cpp +++ b/c10/core/impl/TorchDispatchModeTLS.cpp @@ -16,7 +16,7 @@ bool TorchDispatchModeTLS::any_modes_set(bool skip_infra_modes) { if (!skip_infra_modes) { for (const auto i : c10::irange( static_cast(TorchDispatchModeKey::NUM_MODE_KEYS))) { - if (torchDispatchModeState.infra_modes_[i] != c10::nullopt) { + if (torchDispatchModeState.infra_modes_[i] != std::nullopt) { return true; } } @@ -48,7 +48,7 @@ const std::shared_ptr TorchDispatchModeTLS:: if (torchDispatchModeState.infra_modes_[i].has_value()) { // NOLINTNEXTLINE(bugprone-unchecked-optional-access) out = std::move(torchDispatchModeState.infra_modes_[i].value()); - torchDispatchModeState.infra_modes_[i] = c10::nullopt; + torchDispatchModeState.infra_modes_[i] = std::nullopt; break; } } @@ -70,7 +70,7 @@ const std:: if (torchDispatchModeState.infra_modes_[i].has_value()) { // NOLINTNEXTLINE(bugprone-unchecked-optional-access) auto out_mode = torchDispatchModeState.infra_modes_[i].value(); - torchDispatchModeState.infra_modes_[i] = c10::nullopt; + torchDispatchModeState.infra_modes_[i] = std::nullopt; if (!any_modes_set()) { c10::impl::tls_set_dispatch_key_included(DispatchKey::Python, false); c10::impl::tls_set_dispatch_key_included( @@ -114,7 +114,7 @@ int64_t TorchDispatchModeTLS::stack_len() { int64_t infra_modes_len = 0; for (const auto i : c10::irange(static_cast(TorchDispatchModeKey::NUM_MODE_KEYS))) { - if (torchDispatchModeState.infra_modes_[i] != c10::nullopt) { + if (torchDispatchModeState.infra_modes_[i] != std::nullopt) { infra_modes_len += 1; } } @@ -131,7 +131,7 @@ void TorchDispatchModeTLS::set_mode( TorchDispatchModeKey mode_key) { TORCH_CHECK( torchDispatchModeState.infra_modes_[static_cast(mode_key)] == - c10::nullopt, + std::nullopt, "trying to set the current ", to_string(mode_key), ", but one already exists"); @@ -149,7 +149,7 @@ const std::optional> TorchDispatchModeTLS::unset_mode(TorchDispatchModeKey mode_key) { auto out = torchDispatchModeState.infra_modes_[static_cast(mode_key)]; torchDispatchModeState.infra_modes_[static_cast(mode_key)] = - c10::nullopt; + std::nullopt; if (out.has_value() && !any_modes_set()) { c10::impl::tls_set_dispatch_key_included(DispatchKey::Python, false); c10::impl::tls_set_dispatch_key_included( diff --git a/c10/cuda/CUDACachingAllocator.cpp b/c10/cuda/CUDACachingAllocator.cpp index 11bea6056e9d..e4535292ceba 100644 --- a/c10/cuda/CUDACachingAllocator.cpp +++ b/c10/cuda/CUDACachingAllocator.cpp @@ -411,7 +411,7 @@ struct ExpandableSegment { return rangeFromHandles(begin, end); } while (end > handles_.size()) { - handles_.emplace_back(c10::nullopt); + handles_.emplace_back(std::nullopt); } for (auto i : c10::irange(begin, end)) { TORCH_INTERNAL_ASSERT(!handles_.at(i)); @@ -426,7 +426,7 @@ struct ExpandableSegment { if (status == CUDA_ERROR_OUT_OF_MEMORY) { for (auto j : c10::irange(begin, i)) { auto h = handles_.at(j).value(); - handles_.at(j) = c10::nullopt; + handles_.at(j) = std::nullopt; C10_CUDA_DRIVER_CHECK(DriverAPI::get()->cuMemRelease_(h)); } trimHandles(); @@ -507,7 +507,7 @@ struct ExpandableSegment { C10_CUDA_CHECK(cudaStreamSynchronize(stream_)); for (auto i : c10::irange(begin, end)) { CUmemGenericAllocationHandle h = handles_.at(i).value(); - handles_.at(i) = c10::nullopt; + handles_.at(i) = std::nullopt; C10_CUDA_DRIVER_CHECK(DriverAPI::get()->cuMemUnmap_( ptr_ + segment_size_ * i, segment_size_)); C10_CUDA_DRIVER_CHECK(DriverAPI::get()->cuMemRelease_(h)); diff --git a/c10/cuda/CUDAFunctions.cpp b/c10/cuda/CUDAFunctions.cpp index 2b53eb4d7c7c..8d88000b89db 100644 --- a/c10/cuda/CUDAFunctions.cpp +++ b/c10/cuda/CUDAFunctions.cpp @@ -166,7 +166,7 @@ std::optional getDeviceIndexWithPrimaryContext() { return device_index; } } - return c10::nullopt; + return std::nullopt; } namespace _internal { diff --git a/c10/cuda/CUDAGuard.h b/c10/cuda/CUDAGuard.h index 254522893d5e..65f5c5d191b7 100644 --- a/c10/cuda/CUDAGuard.h +++ b/c10/cuda/CUDAGuard.h @@ -242,7 +242,7 @@ struct OptionalCUDAStreamGuard { optional original_stream() const { auto r = guard_.original_stream(); if (r.has_value()) { - return make_optional(CUDAStream(CUDAStream::UNCHECKED, r.value())); + return std::make_optional(CUDAStream(CUDAStream::UNCHECKED, r.value())); } else { return nullopt; } @@ -254,7 +254,7 @@ struct OptionalCUDAStreamGuard { optional current_stream() const { auto r = guard_.current_stream(); if (r.has_value()) { - return make_optional(CUDAStream(CUDAStream::UNCHECKED, r.value())); + return std::make_optional(CUDAStream(CUDAStream::UNCHECKED, r.value())); } else { return nullopt; } diff --git a/c10/cuda/impl/CUDAGuardImpl.h b/c10/cuda/impl/CUDAGuardImpl.h index ec50c8152b33..1ef2fcb2c08f 100644 --- a/c10/cuda/impl/CUDAGuardImpl.h +++ b/c10/cuda/impl/CUDAGuardImpl.h @@ -14,9 +14,9 @@ #include #include #include -#include #include #include +#include namespace c10::cuda::impl { @@ -45,7 +45,7 @@ struct CUDAGuardImpl final : public c10::impl::DeviceGuardImplInterface { const auto err = C10_CUDA_ERROR_HANDLED(c10::cuda::GetDevice(&device)); C10_CUDA_CHECK_WARN(err); if (err != cudaSuccess) { - return c10::nullopt; + return std::nullopt; } return Device(DeviceType::CUDA, device); } diff --git a/c10/test/core/DeviceGuard_test.cpp b/c10/test/core/DeviceGuard_test.cpp index 63049ae7b555..0869ea1168d1 100644 --- a/c10/test/core/DeviceGuard_test.cpp +++ b/c10/test/core/DeviceGuard_test.cpp @@ -36,6 +36,7 @@ TEST(OptionalDeviceGuard, ResetDeviceDifferentDeviceType) { g.reset_device(Device(DeviceType::HIP, 2), &hip_impl); ASSERT_EQ(FakeGuardImpl::getDeviceIndex(), 0); ASSERT_EQ(FakeGuardImpl::getDeviceIndex(), 2); - ASSERT_EQ(g.current_device(), make_optional(Device(DeviceType::HIP, 2))); - ASSERT_EQ(g.original_device(), make_optional(Device(DeviceType::HIP, 0))); + ASSERT_EQ(g.current_device(), std::make_optional(Device(DeviceType::HIP, 2))); + ASSERT_EQ( + g.original_device(), std::make_optional(Device(DeviceType::HIP, 0))); } diff --git a/c10/test/core/SymInt_test.cpp b/c10/test/core/SymInt_test.cpp index 8055ec7a3251..7cefa1e4a771 100644 --- a/c10/test/core/SymInt_test.cpp +++ b/c10/test/core/SymInt_test.cpp @@ -8,7 +8,7 @@ using namespace c10; #ifndef C10_MOBILE static void check(int64_t value) { const auto i = SymInt(value); - EXPECT_EQ(i.maybe_as_int(), c10::make_optional(value)); + EXPECT_EQ(i.maybe_as_int(), std::make_optional(value)); } TEST(SymIntTest, ConcreteInts) { diff --git a/c10/test/core/impl/InlineDeviceGuard_test.cpp b/c10/test/core/impl/InlineDeviceGuard_test.cpp index 69db93e307bf..2b4ad0c5b238 100644 --- a/c10/test/core/impl/InlineDeviceGuard_test.cpp +++ b/c10/test/core/impl/InlineDeviceGuard_test.cpp @@ -170,12 +170,12 @@ TEST(InlineOptionalDeviceGuard, SetDevice) { MaybeTestGuard g; DeviceIndex i = 1; g.set_device(dev(i)); - ASSERT_EQ(g.original_device(), make_optional(dev(init_i))); - ASSERT_EQ(g.current_device(), make_optional(dev(i))); + ASSERT_EQ(g.original_device(), std::make_optional(dev(init_i))); + ASSERT_EQ(g.current_device(), std::make_optional(dev(i))); ASSERT_EQ(TestGuardImpl::getDeviceIndex(), i); g.set_device(dev(i)); - ASSERT_EQ(g.original_device(), make_optional(dev(init_i))); - ASSERT_EQ(g.current_device(), make_optional(dev(i))); + ASSERT_EQ(g.original_device(), std::make_optional(dev(init_i))); + ASSERT_EQ(g.current_device(), std::make_optional(dev(i))); ASSERT_EQ(TestGuardImpl::getDeviceIndex(), i); } @@ -185,11 +185,11 @@ TEST(InlineOptionalDeviceGuard, SetIndex) { DeviceIndex i = 1; MaybeTestGuard g; g.set_index(i); - ASSERT_EQ(g.original_device(), make_optional(dev(init_i))); - ASSERT_EQ(g.current_device(), make_optional(dev(i))); + ASSERT_EQ(g.original_device(), std::make_optional(dev(init_i))); + ASSERT_EQ(g.current_device(), std::make_optional(dev(i))); ASSERT_EQ(TestGuardImpl::getDeviceIndex(), i); g.set_index(i); - ASSERT_EQ(g.original_device(), make_optional(dev(init_i))); - ASSERT_EQ(g.current_device(), make_optional(dev(i))); + ASSERT_EQ(g.original_device(), std::make_optional(dev(init_i))); + ASSERT_EQ(g.current_device(), std::make_optional(dev(i))); ASSERT_EQ(TestGuardImpl::getDeviceIndex(), i); } diff --git a/c10/test/core/impl/InlineStreamGuard_test.cpp b/c10/test/core/impl/InlineStreamGuard_test.cpp index 692504cebd1c..06c4b96ef913 100644 --- a/c10/test/core/impl/InlineStreamGuard_test.cpp +++ b/c10/test/core/impl/InlineStreamGuard_test.cpp @@ -109,8 +109,8 @@ TEST(InlineOptionalStreamGuard, Constructor) { ASSERT_EQ(TestGuardImpl::getDeviceIndex(), 1); ASSERT_EQ(TestGuardImpl::getCurrentStreamIdFor(1), 2); ASSERT_EQ(TestGuardImpl::getCurrentStreamIdFor(0), 0); - ASSERT_EQ(g.original_stream(), make_optional(stream(0, 0))); - ASSERT_EQ(g.current_stream(), make_optional(stream(1, 2))); + ASSERT_EQ(g.original_stream(), std::make_optional(stream(0, 0))); + ASSERT_EQ(g.current_stream(), std::make_optional(stream(1, 2))); } ASSERT_EQ(TestGuardImpl::getDeviceIndex(), 0); ASSERT_EQ(TestGuardImpl::getCurrentStreamIdFor(1), 0); @@ -120,8 +120,8 @@ TEST(InlineOptionalStreamGuard, Constructor) { ASSERT_EQ(TestGuardImpl::getDeviceIndex(), 1); ASSERT_EQ(TestGuardImpl::getCurrentStreamIdFor(1), 2); ASSERT_EQ(TestGuardImpl::getCurrentStreamIdFor(0), 0); - ASSERT_EQ(g.original_stream(), make_optional(stream(0, 0))); - ASSERT_EQ(g.current_stream(), make_optional(stream(1, 2))); + ASSERT_EQ(g.original_stream(), std::make_optional(stream(0, 0))); + ASSERT_EQ(g.current_stream(), std::make_optional(stream(1, 2))); } ASSERT_EQ(TestGuardImpl::getDeviceIndex(), 0); ASSERT_EQ(TestGuardImpl::getCurrentStreamIdFor(1), 0); @@ -146,8 +146,8 @@ TEST(InlineOptionalStreamGuard, ResetStreamSameDevice) { ASSERT_EQ(TestGuardImpl::getDeviceIndex(), 1); ASSERT_EQ(TestGuardImpl::getCurrentStreamIdFor(1), 3); ASSERT_EQ(TestGuardImpl::getCurrentStreamIdFor(0), 0); - ASSERT_EQ(g.original_stream(), make_optional(stream(0, 0))); - ASSERT_EQ(g.current_stream(), make_optional(stream(1, 3))); + ASSERT_EQ(g.original_stream(), std::make_optional(stream(0, 0))); + ASSERT_EQ(g.current_stream(), std::make_optional(stream(1, 3))); } ASSERT_EQ(TestGuardImpl::getDeviceIndex(), 0); ASSERT_EQ(TestGuardImpl::getCurrentStreamIdFor(1), 0); @@ -164,8 +164,8 @@ TEST(InlineOptionalStreamGuard, ResetStreamDifferentDevice) { ASSERT_EQ(TestGuardImpl::getCurrentStreamIdFor(2), 3); ASSERT_EQ(TestGuardImpl::getCurrentStreamIdFor(1), 0); ASSERT_EQ(TestGuardImpl::getCurrentStreamIdFor(0), 0); - ASSERT_EQ(g.original_stream(), make_optional(stream(0, 0))); - ASSERT_EQ(g.current_stream(), make_optional(stream(2, 3))); + ASSERT_EQ(g.original_stream(), std::make_optional(stream(0, 0))); + ASSERT_EQ(g.current_stream(), std::make_optional(stream(2, 3))); } ASSERT_EQ(TestGuardImpl::getDeviceIndex(), 0); ASSERT_EQ(TestGuardImpl::getCurrentStreamIdFor(2), 0); diff --git a/c10/test/util/optional_test.cpp b/c10/test/util/optional_test.cpp index aa4c5a527ce6..e9496d9dc288 100644 --- a/c10/test/util/optional_test.cpp +++ b/c10/test/util/optional_test.cpp @@ -1,4 +1,4 @@ -#include +#include #include #include @@ -67,7 +67,7 @@ TYPED_TEST(OptionalTest, Empty) { EXPECT_FALSE(empty.has_value()); // NOLINTNEXTLINE(bugprone-unchecked-optional-access,hicpp-avoid-goto,cppcoreguidelines-avoid-goto) - EXPECT_THROW(empty.value(), c10::bad_optional_access); + EXPECT_THROW(empty.value(), std::bad_optional_access); } TYPED_TEST(OptionalTest, Initialized) { @@ -111,32 +111,32 @@ TEST_P(SelfCompareTest, SelfCompare) { INSTANTIATE_TEST_SUITE_P( nullopt, SelfCompareTest, - testing::Values(c10::nullopt)); + testing::Values(std::nullopt)); INSTANTIATE_TEST_SUITE_P( int, SelfCompareTest, - testing::Values(c10::make_optional(2))); + testing::Values(std::make_optional(2))); TEST(OptionalTest, Nullopt) { std::optional x = 2; - EXPECT_THAT(c10::nullopt, Not(Eq(x))); - EXPECT_THAT(x, Not(Eq(c10::nullopt))); + EXPECT_THAT(std::nullopt, Not(Eq(x))); + EXPECT_THAT(x, Not(Eq(std::nullopt))); - EXPECT_THAT(x, Ne(c10::nullopt)); - EXPECT_THAT(c10::nullopt, Ne(x)); + EXPECT_THAT(x, Ne(std::nullopt)); + EXPECT_THAT(std::nullopt, Ne(x)); - EXPECT_THAT(x, Not(Lt(c10::nullopt))); - EXPECT_THAT(c10::nullopt, Lt(x)); + EXPECT_THAT(x, Not(Lt(std::nullopt))); + EXPECT_THAT(std::nullopt, Lt(x)); - EXPECT_THAT(x, Not(Le(c10::nullopt))); - EXPECT_THAT(c10::nullopt, Le(x)); + EXPECT_THAT(x, Not(Le(std::nullopt))); + EXPECT_THAT(std::nullopt, Le(x)); - EXPECT_THAT(x, Gt(c10::nullopt)); - EXPECT_THAT(c10::nullopt, Not(Gt(x))); + EXPECT_THAT(x, Gt(std::nullopt)); + EXPECT_THAT(std::nullopt, Not(Gt(x))); - EXPECT_THAT(x, Ge(c10::nullopt)); - EXPECT_THAT(c10::nullopt, Not(Ge(x))); + EXPECT_THAT(x, Ge(std::nullopt)); + EXPECT_THAT(std::nullopt, Not(Ge(x))); } // Ensure comparisons work... diff --git a/c10/util/Backtrace.cpp b/c10/util/Backtrace.cpp index 7d0fedbb335a..d461267000be 100644 --- a/c10/util/Backtrace.cpp +++ b/c10/util/Backtrace.cpp @@ -1,7 +1,7 @@ #include -#include #include #include +#include #include #include @@ -150,19 +150,19 @@ std::optional parse_frame_information( auto function_name_start = frame_string.find('('); if (function_name_start == std::string::npos) { - return c10::nullopt; + return std::nullopt; } function_name_start += 1; auto offset_start = frame_string.find('+', function_name_start); if (offset_start == std::string::npos) { - return c10::nullopt; + return std::nullopt; } offset_start += 1; const auto offset_end = frame_string.find(')', offset_start); if (offset_end == std::string::npos) { - return c10::nullopt; + return std::nullopt; } frame.object_file = frame_string.substr(0, function_name_start - 1); @@ -186,7 +186,7 @@ std::optional parse_frame_information( skip >> frame.offset_into_function; #else #warning Unknown standard library, backtraces may have incomplete debug information - return c10::nullopt; + return std::nullopt; #endif // defined(__GLIBCXX__) // Some system-level functions don't have sufficient debug information, so diff --git a/c10/util/OptionalArrayRef.h b/c10/util/OptionalArrayRef.h index 98237bba92f5..ae4f4f1f2c67 100644 --- a/c10/util/OptionalArrayRef.h +++ b/c10/util/OptionalArrayRef.h @@ -12,9 +12,9 @@ #pragma once #include -#include #include #include +#include #include #include @@ -27,16 +27,16 @@ class OptionalArrayRef final { constexpr OptionalArrayRef() noexcept = default; - constexpr OptionalArrayRef(nullopt_t) noexcept {} + constexpr OptionalArrayRef(std::nullopt_t) noexcept {} OptionalArrayRef(const OptionalArrayRef& other) = default; OptionalArrayRef(OptionalArrayRef&& other) noexcept = default; - constexpr OptionalArrayRef(const optional>& other) noexcept + constexpr OptionalArrayRef(const std::optional>& other) noexcept : wrapped_opt_array_ref(other) {} - constexpr OptionalArrayRef(optional>&& other) noexcept + constexpr OptionalArrayRef(std::optional>&& other) noexcept : wrapped_opt_array_ref(std::move(other)) {} constexpr OptionalArrayRef(const T& value) noexcept @@ -89,8 +89,8 @@ class OptionalArrayRef final { // Assignment - constexpr OptionalArrayRef& operator=(nullopt_t) noexcept { - wrapped_opt_array_ref = c10::nullopt; + constexpr OptionalArrayRef& operator=(std::nullopt_t) noexcept { + wrapped_opt_array_ref = std::nullopt; return *this; } @@ -99,13 +99,13 @@ class OptionalArrayRef final { OptionalArrayRef& operator=(OptionalArrayRef&& other) noexcept = default; constexpr OptionalArrayRef& operator=( - const optional>& other) noexcept { + const std::optional>& other) noexcept { wrapped_opt_array_ref = other; return *this; } constexpr OptionalArrayRef& operator=( - optional>&& other) noexcept { + std::optional>&& other) noexcept { wrapped_opt_array_ref = std::move(other); return *this; } @@ -213,7 +213,7 @@ class OptionalArrayRef final { } private: - optional> wrapped_opt_array_ref; + std::optional> wrapped_opt_array_ref; }; using OptionalIntArrayRef = OptionalArrayRef; diff --git a/c10/xpu/test/impl/XPUStreamTest.cpp b/c10/xpu/test/impl/XPUStreamTest.cpp index 01a1dbb62621..6cbe3ae67215 100644 --- a/c10/xpu/test/impl/XPUStreamTest.cpp +++ b/c10/xpu/test/impl/XPUStreamTest.cpp @@ -1,9 +1,9 @@ #include -#include #include #include #include +#include #include #include diff --git a/torch/csrc/Module.cpp b/torch/csrc/Module.cpp index 00a2c0bbe302..96788c5d79f3 100644 --- a/torch/csrc/Module.cpp +++ b/torch/csrc/Module.cpp @@ -1,8 +1,8 @@ #include -#include #include #include #include +#include #ifndef _MSC_VER #include @@ -1817,7 +1817,7 @@ Call this whenever a new thread is created in order to propagate values from transposed_, output_padding_, std::move(groups_), - c10::nullopt); + std::nullopt); }, py::arg("input"), py::arg("weight"), @@ -1842,7 +1842,7 @@ Call this whenever a new thread is created in order to propagate values from at::SymIntArrayRef output_padding_, c10::SymInt groups_, std::optional> bias_sizes_opt) { - c10::OptionalArrayRef ref = c10::nullopt; + c10::OptionalArrayRef ref = std::nullopt; if (bias_sizes_opt) { ref = (*bias_sizes_opt); } @@ -2031,7 +2031,7 @@ Call this whenever a new thread is created in order to propagate values from py_module.def( "_get_accelerator", - [](std::optional check = c10::nullopt) { + [](std::optional check = std::nullopt) { return c10::Device( at::getAccelerator(check.value_or(false)) .value_or(c10::DeviceType::CPU), diff --git a/torch/csrc/Storage.cpp b/torch/csrc/Storage.cpp index aa5584abd39e..77520b6f1cdb 100644 --- a/torch/csrc/Storage.cpp +++ b/torch/csrc/Storage.cpp @@ -153,7 +153,7 @@ static bool THPStorage_isPreservable(THPStorage* self) { if (storage.unsafeGetStorageImpl()->pyobj_slot()->check_pyobj( getPyInterpreter(), /*ignore_hermetic_tls=*/true) != - c10::make_optional((PyObject*)self)) { + std::make_optional((PyObject*)self)) { return false; } if (storage.use_count() <= 1) { diff --git a/torch/csrc/api/include/torch/expanding_array.h b/torch/csrc/api/include/torch/expanding_array.h index f0901b06af68..62c12d2e0ac8 100644 --- a/torch/csrc/api/include/torch/expanding_array.h +++ b/torch/csrc/api/include/torch/expanding_array.h @@ -2,8 +2,8 @@ #include #include -#include #include +#include #include #include diff --git a/torch/csrc/api/include/torch/fft.h b/torch/csrc/api/include/torch/fft.h index d9a3430a7a24..ef6d9b1bc236 100644 --- a/torch/csrc/api/include/torch/fft.h +++ b/torch/csrc/api/include/torch/fft.h @@ -15,9 +15,9 @@ namespace fft { /// ``` inline Tensor fft( const Tensor& self, - std::optional n = c10::nullopt, + std::optional n = std::nullopt, int64_t dim = -1, - std::optional norm = c10::nullopt) { + std::optional norm = std::nullopt) { return torch::fft_fft_symint(self, n, dim, norm); } @@ -31,9 +31,9 @@ inline Tensor fft( /// ``` inline Tensor ifft( const Tensor& self, - std::optional n = c10::nullopt, + std::optional n = std::nullopt, int64_t dim = -1, - std::optional norm = c10::nullopt) { + std::optional norm = std::nullopt) { return torch::fft_ifft_symint(self, n, dim, norm); } @@ -47,9 +47,9 @@ inline Tensor ifft( /// ``` inline Tensor fft2( const Tensor& self, - OptionalIntArrayRef s = c10::nullopt, + OptionalIntArrayRef s = std::nullopt, IntArrayRef dim = {-2, -1}, - std::optional norm = c10::nullopt) { + std::optional norm = std::nullopt) { return torch::fft_fft2(self, s, dim, norm); } @@ -63,9 +63,9 @@ inline Tensor fft2( /// ``` inline Tensor ifft2( const Tensor& self, - at::OptionalIntArrayRef s = c10::nullopt, + at::OptionalIntArrayRef s = std::nullopt, IntArrayRef dim = {-2, -1}, - std::optional norm = c10::nullopt) { + std::optional norm = std::nullopt) { return torch::fft_ifft2(self, s, dim, norm); } @@ -79,9 +79,9 @@ inline Tensor ifft2( /// ``` inline Tensor fftn( const Tensor& self, - at::OptionalIntArrayRef s = c10::nullopt, - at::OptionalIntArrayRef dim = c10::nullopt, - std::optional norm = c10::nullopt) { + at::OptionalIntArrayRef s = std::nullopt, + at::OptionalIntArrayRef dim = std::nullopt, + std::optional norm = std::nullopt) { return torch::fft_fftn(self, s, dim, norm); } @@ -95,9 +95,9 @@ inline Tensor fftn( /// ``` inline Tensor ifftn( const Tensor& self, - at::OptionalIntArrayRef s = c10::nullopt, - at::OptionalIntArrayRef dim = c10::nullopt, - std::optional norm = c10::nullopt) { + at::OptionalIntArrayRef s = std::nullopt, + at::OptionalIntArrayRef dim = std::nullopt, + std::optional norm = std::nullopt) { return torch::fft_ifftn(self, s, dim, norm); } @@ -112,9 +112,9 @@ inline Tensor ifftn( /// ``` inline Tensor rfft( const Tensor& self, - std::optional n = c10::nullopt, + std::optional n = std::nullopt, int64_t dim = -1, - std::optional norm = c10::nullopt) { + std::optional norm = std::nullopt) { return torch::fft_rfft_symint(self, n, dim, norm); } @@ -131,9 +131,9 @@ inline Tensor rfft( /// ``` inline Tensor irfft( const Tensor& self, - std::optional n = c10::nullopt, + std::optional n = std::nullopt, int64_t dim = -1, - std::optional norm = c10::nullopt) { + std::optional norm = std::nullopt) { return torch::fft_irfft_symint(self, n, dim, norm); } @@ -147,9 +147,9 @@ inline Tensor irfft( /// ``` inline Tensor rfft2( const Tensor& self, - at::OptionalIntArrayRef s = c10::nullopt, + at::OptionalIntArrayRef s = std::nullopt, IntArrayRef dim = {-2, -1}, - std::optional norm = c10::nullopt) { + std::optional norm = std::nullopt) { return torch::fft_rfft2(self, s, dim, norm); } @@ -163,9 +163,9 @@ inline Tensor rfft2( /// ``` inline Tensor irfft2( const Tensor& self, - at::OptionalIntArrayRef s = c10::nullopt, + at::OptionalIntArrayRef s = std::nullopt, IntArrayRef dim = {-2, -1}, - std::optional norm = c10::nullopt) { + std::optional norm = std::nullopt) { return torch::fft_irfft2(self, s, dim, norm); } @@ -179,9 +179,9 @@ inline Tensor irfft2( /// ``` inline Tensor rfftn( const Tensor& self, - at::OptionalIntArrayRef s = c10::nullopt, - at::OptionalIntArrayRef dim = c10::nullopt, - std::optional norm = c10::nullopt) { + at::OptionalIntArrayRef s = std::nullopt, + at::OptionalIntArrayRef dim = std::nullopt, + std::optional norm = std::nullopt) { return torch::fft_rfftn(self, s, dim, norm); } @@ -195,9 +195,9 @@ inline Tensor rfftn( /// ``` inline Tensor irfftn( const Tensor& self, - at::OptionalIntArrayRef s = c10::nullopt, - at::OptionalIntArrayRef dim = c10::nullopt, - std::optional norm = c10::nullopt) { + at::OptionalIntArrayRef s = std::nullopt, + at::OptionalIntArrayRef dim = std::nullopt, + std::optional norm = std::nullopt) { return torch::fft_irfftn(self, s, dim, norm); } @@ -215,9 +215,9 @@ inline Tensor irfftn( /// ``` inline Tensor hfft( const Tensor& self, - std::optional n = c10::nullopt, + std::optional n = std::nullopt, int64_t dim = -1, - std::optional norm = c10::nullopt) { + std::optional norm = std::nullopt) { return torch::fft_hfft_symint(self, n, dim, norm); } @@ -234,9 +234,9 @@ inline Tensor hfft( /// ``` inline Tensor ihfft( const Tensor& self, - std::optional n = c10::nullopt, + std::optional n = std::nullopt, int64_t dim = -1, - std::optional norm = c10::nullopt) { + std::optional norm = std::nullopt) { return torch::fft_ihfft_symint(self, n, dim, norm); } @@ -253,9 +253,9 @@ inline Tensor ihfft( /// ``` inline Tensor hfft2( const Tensor& self, - at::OptionalIntArrayRef s = c10::nullopt, + at::OptionalIntArrayRef s = std::nullopt, IntArrayRef dim = {-2, -1}, - std::optional norm = c10::nullopt) { + std::optional norm = std::nullopt) { return torch::fft_hfft2(self, s, dim, norm); } @@ -273,9 +273,9 @@ inline Tensor hfft2( /// ``` inline Tensor ihfft2( const Tensor& self, - at::OptionalIntArrayRef s = c10::nullopt, + at::OptionalIntArrayRef s = std::nullopt, IntArrayRef dim = {-2, -1}, - std::optional norm = c10::nullopt) { + std::optional norm = std::nullopt) { return torch::fft_ihfft2(self, s, dim, norm); } @@ -292,9 +292,9 @@ inline Tensor ihfft2( /// ``` inline Tensor hfftn( const Tensor& self, - at::OptionalIntArrayRef s = c10::nullopt, + at::OptionalIntArrayRef s = std::nullopt, IntArrayRef dim = {-2, -1}, - std::optional norm = c10::nullopt) { + std::optional norm = std::nullopt) { return torch::fft_hfftn(self, s, dim, norm); } @@ -312,9 +312,9 @@ inline Tensor hfftn( /// ``` inline Tensor ihfftn( const Tensor& self, - at::OptionalIntArrayRef s = c10::nullopt, + at::OptionalIntArrayRef s = std::nullopt, IntArrayRef dim = {-2, -1}, - std::optional norm = c10::nullopt) { + std::optional norm = std::nullopt) { return torch::fft_ihfftn(self, s, dim, norm); } @@ -364,7 +364,7 @@ inline Tensor rfftfreq(int64_t n, const TensorOptions& options) { /// ``` inline Tensor fftshift( const Tensor& x, - at::OptionalIntArrayRef dim = c10::nullopt) { + at::OptionalIntArrayRef dim = std::nullopt) { return torch::fft_fftshift(x, dim); } @@ -381,7 +381,7 @@ inline Tensor fftshift( /// ``` inline Tensor ifftshift( const Tensor& x, - at::OptionalIntArrayRef dim = c10::nullopt) { + at::OptionalIntArrayRef dim = std::nullopt) { return torch::fft_ifftshift(x, dim); } diff --git a/torch/csrc/api/include/torch/nested.h b/torch/csrc/api/include/torch/nested.h index 780aab423047..2e4365e0031c 100644 --- a/torch/csrc/api/include/torch/nested.h +++ b/torch/csrc/api/include/torch/nested.h @@ -26,7 +26,7 @@ inline at::Tensor nested_tensor( auto out = at::_nested_tensor_from_tensor_list( nested_tensor_data, c10::typeMetaToScalarType(options.dtype()), - c10::nullopt, + std::nullopt, options.device(), options.pinned_memory()); if (options.has_requires_grad() && options.requires_grad()) { @@ -55,7 +55,7 @@ inline at::Tensor nested_tensor( auto out = at::_nested_tensor_from_tensor_list( tensor_list, c10::typeMetaToScalarType(options.dtype()), - c10::nullopt, + std::nullopt, options.device(), options.pinned_memory()); if (options.has_requires_grad() && options.requires_grad()) { @@ -72,10 +72,10 @@ inline at::Tensor nested_tensor( /// ``` inline at::Tensor as_nested_tensor( at::TensorList list, - std::optional dtype = c10::nullopt, - std::optional device = c10::nullopt) { + std::optional dtype = std::nullopt, + std::optional device = std::nullopt) { return at::_nested_tensor_from_tensor_list( - list, dtype, c10::nullopt, device, c10::nullopt); + list, dtype, std::nullopt, device, std::nullopt); } /// Nested to padded tensor @@ -87,7 +87,7 @@ inline at::Tensor as_nested_tensor( inline at::Tensor to_padded_tensor( const at::Tensor& self, double padding, - at::OptionalIntArrayRef output_size = c10::nullopt) { + at::OptionalIntArrayRef output_size = std::nullopt) { return at::nested_to_padded_tensor(self, padding, output_size); } diff --git a/torch/csrc/api/include/torch/nn/functional/activation.h b/torch/csrc/api/include/torch/nn/functional/activation.h index 89e596f71d14..5ae6fcc31760 100644 --- a/torch/csrc/api/include/torch/nn/functional/activation.h +++ b/torch/csrc/api/include/torch/nn/functional/activation.h @@ -236,7 +236,7 @@ inline Tensor softmax( std::optional dtype) { Tensor ret; - if (dtype == c10::nullopt) { + if (dtype == std::nullopt) { ret = input.softmax(dim); } else { ret = input.softmax(dim, dtype); @@ -273,7 +273,7 @@ inline Tensor softmin( std::optional dtype) { Tensor ret; - if (dtype == c10::nullopt) { + if (dtype == std::nullopt) { ret = (-input).softmax(dim); } else { ret = (-input).softmax(dim, dtype); @@ -310,7 +310,7 @@ inline Tensor log_softmax( std::optional dtype) { Tensor ret; - if (dtype == c10::nullopt) { + if (dtype == std::nullopt) { ret = input.log_softmax(dim); } else { ret = input.log_softmax(dim, dtype); diff --git a/torch/csrc/api/include/torch/nn/functional/embedding.h b/torch/csrc/api/include/torch/nn/functional/embedding.h index b06b0a3dc1e8..602268ab2eba 100644 --- a/torch/csrc/api/include/torch/nn/functional/embedding.h +++ b/torch/csrc/api/include/torch/nn/functional/embedding.h @@ -31,7 +31,7 @@ inline Tensor embedding( bool sparse) { auto input_ = input; - if (padding_idx != c10::nullopt) { + if (padding_idx != std::nullopt) { if (*padding_idx > 0) { TORCH_CHECK( *padding_idx < weight.size(0), @@ -46,7 +46,7 @@ inline Tensor embedding( padding_idx = -1; } - if (max_norm != c10::nullopt) { + if (max_norm != std::nullopt) { input_ = input_.contiguous(); // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) _no_grad_embedding_renorm_(weight, input_, *max_norm, norm_type); @@ -149,7 +149,7 @@ inline Tensor embedding_bag( TORCH_CHECK(false, "mode has to be one of sum, mean or max"); } - if (max_norm != c10::nullopt) { + if (max_norm != std::nullopt) { // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) _no_grad_embedding_renorm_(weight, input_, *max_norm, norm_type); } diff --git a/torch/csrc/api/include/torch/nn/functional/loss.h b/torch/csrc/api/include/torch/nn/functional/loss.h index d1e285d0a0f1..6a425e606caf 100644 --- a/torch/csrc/api/include/torch/nn/functional/loss.h +++ b/torch/csrc/api/include/torch/nn/functional/loss.h @@ -346,7 +346,7 @@ inline Tensor smooth_l1_loss( const Tensor& input, const Tensor& target, SmoothL1LossFuncOptions::reduction_t reduction, - std::optional beta_opt = c10::nullopt) { + std::optional beta_opt = std::nullopt) { if (target.sizes() != input.sizes()) { TORCH_WARN( "Using a target size (", @@ -405,7 +405,7 @@ inline Tensor smooth_l1_loss( const SmoothL1LossFuncOptions& options, double beta) { TORCH_CHECK( - options.beta() == c10::nullopt, + options.beta() == std::nullopt, "expected beta not to be provided in 'options', but got ", options.beta().value()); return detail::smooth_l1_loss(input, target, options.reduction(), beta); diff --git a/torch/csrc/api/include/torch/nn/functional/normalization.h b/torch/csrc/api/include/torch/nn/functional/normalization.h index 53bd61839f74..965cfcd9ac83 100644 --- a/torch/csrc/api/include/torch/nn/functional/normalization.h +++ b/torch/csrc/api/include/torch/nn/functional/normalization.h @@ -17,7 +17,7 @@ inline Tensor normalize( int64_t dim, double eps, std::optional out) { - if (out == c10::nullopt) { + if (out == std::nullopt) { auto denom = input.norm(p, dim, true).clamp_min(eps).expand_as(input); return input / denom; } else { @@ -115,7 +115,7 @@ inline Tensor local_response_norm( /*padding=*/0, /*ceil_mode=*/false, /*count_include_pad=*/true, - /*divisor_override=*/c10::nullopt) + /*divisor_override=*/std::nullopt) .squeeze(1); } else { auto sizes = input.sizes(); @@ -132,7 +132,7 @@ inline Tensor local_response_norm( /*padding=*/0, /*ceil_mode=*/false, /*count_include_pad=*/true, - /*divisor_override=*/c10::nullopt) + /*divisor_override=*/std::nullopt) .squeeze(1); div = div.view(sizes); } diff --git a/torch/csrc/api/include/torch/nn/functional/pooling.h b/torch/csrc/api/include/torch/nn/functional/pooling.h index be3009f62201..798467c0e0a6 100644 --- a/torch/csrc/api/include/torch/nn/functional/pooling.h +++ b/torch/csrc/api/include/torch/nn/functional/pooling.h @@ -820,15 +820,15 @@ inline std::tuple fractional_max_pool2d_with_indices( const std::optional>& output_size, const std::optional>& output_ratio, const Tensor& _random_samples) { - if (output_size == c10::nullopt && output_ratio == c10::nullopt) { + if (output_size == std::nullopt && output_ratio == std::nullopt) { TORCH_CHECK( false, "fractional_max_pool2d requires specifying either ", "an output_size or an output_ratio"); } std::optional> output_size_ = output_size; - if (output_size_ == c10::nullopt) { - TORCH_INTERNAL_ASSERT(output_ratio != c10::nullopt); + if (output_size_ == std::nullopt) { + TORCH_INTERNAL_ASSERT(output_ratio != std::nullopt); output_size_ = { (int64_t)(static_cast(input.size(-2)) * (*output_ratio.value())[0]), @@ -913,7 +913,7 @@ inline std::tuple fractional_max_pool3d_with_indices( const std::optional>& output_size, const std::optional>& output_ratio, const Tensor& _random_samples) { - if (output_size == c10::nullopt && output_ratio == c10::nullopt) { + if (output_size == std::nullopt && output_ratio == std::nullopt) { TORCH_CHECK( false, "fractional_max_pool3d requires specifying either ", @@ -921,8 +921,8 @@ inline std::tuple fractional_max_pool3d_with_indices( } std::optional> output_size_ = output_size; - if (output_size_ == c10::nullopt) { - TORCH_INTERNAL_ASSERT(output_ratio != c10::nullopt); + if (output_size_ == std::nullopt) { + TORCH_INTERNAL_ASSERT(output_ratio != std::nullopt); output_size_ = { (int64_t)(static_cast(input.size(-3)) * (*output_ratio.value())[0]), @@ -1066,7 +1066,7 @@ inline Tensor lp_pool2d( /*padding=*/0, ceil_mode, /*count_include_pad=*/true, - /*divisor_override=*/c10::nullopt); + /*divisor_override=*/std::nullopt); return (torch::sign(out) * relu(torch::abs(out))) .mul(kw * kh) @@ -1116,7 +1116,7 @@ inline Tensor lp_pool3d( /*padding=*/0, ceil_mode, /*count_include_pad=*/true, - /*divisor_override=*/c10::nullopt); + /*divisor_override=*/std::nullopt); return (torch::sign(out) * relu(torch::abs(out))) .mul(kd * kw * kh) diff --git a/torch/csrc/api/include/torch/nn/functional/upsampling.h b/torch/csrc/api/include/torch/nn/functional/upsampling.h index 38c5c51f9a47..75707ef091a7 100644 --- a/torch/csrc/api/include/torch/nn/functional/upsampling.h +++ b/torch/csrc/api/include/torch/nn/functional/upsampling.h @@ -19,13 +19,13 @@ inline std::vector _interp_output_size( std::optional>, std::optional> closed_over_args) { auto [input, size, scale_factor, recompute_scale_factor] = closed_over_args; - if (size == c10::nullopt && scale_factor == c10::nullopt) { + if (size == std::nullopt && scale_factor == std::nullopt) { TORCH_CHECK(false, "either size or scale_factor should be defined"); } - if (size != c10::nullopt && scale_factor != c10::nullopt) { + if (size != std::nullopt && scale_factor != std::nullopt) { TORCH_CHECK(false, "only one of size or scale_factor should be defined"); } - if (scale_factor != c10::nullopt) { + if (scale_factor != std::nullopt) { if (static_cast(scale_factor.value().size()) != dim) { TORCH_CHECK( false, @@ -36,14 +36,14 @@ inline std::vector _interp_output_size( torch::ArrayRef(*scale_factor)); } } - if (size != c10::nullopt) { + if (size != std::nullopt) { return *size; } - TORCH_INTERNAL_ASSERT(scale_factor != c10::nullopt); + TORCH_INTERNAL_ASSERT(scale_factor != std::nullopt); auto scale_factors = *scale_factor; - if (recompute_scale_factor == c10::nullopt) { + if (recompute_scale_factor == std::nullopt) { // only warn when the scales have floating values since // the result for ints is the same with/without recompute_scale_factor bool is_float_scale_factor = false; @@ -83,14 +83,14 @@ inline Tensor interpolate( bool antialias) { if (std::holds_alternative(mode) || std::get_if(&mode)) { - if (align_corners != c10::nullopt) { + if (align_corners != std::nullopt) { TORCH_CHECK( false, "align_corners option can only be set with the " "interpolating modes: linear | bilinear | bicubic | trilinear"); } } else { - if (align_corners == c10::nullopt) { + if (align_corners == std::nullopt) { TORCH_WARN( "Default upsampling behavior when mode=", enumtype::get_enum_name(mode), @@ -114,8 +114,8 @@ inline Tensor interpolate( auto scale_factor_len = input.dim() - 2; std::vector> scale_factor_list( - scale_factor_len, c10::nullopt); - if (scale_factor != c10::nullopt && !recompute_scale_factor.value_or(false)) { + scale_factor_len, std::nullopt); + if (scale_factor != std::nullopt && !recompute_scale_factor.value_or(false)) { auto _scale_factor_repeated = *scale_factor; scale_factor_list = {}; for (const auto& elem : _scale_factor_repeated) { @@ -181,7 +181,7 @@ inline Tensor interpolate( input, _interp_output_size(3, std::move(closed_over_args))); } else if (input.dim() == 3 && std::get_if(&mode)) { TORCH_CHECK( - align_corners != c10::nullopt, "align_corners should be specified."); + align_corners != std::nullopt, "align_corners should be specified."); return torch::upsample_linear1d( input, _interp_output_size(1, std::move(closed_over_args)), @@ -195,7 +195,7 @@ inline Tensor interpolate( TORCH_CHECK(false, "Got 4D input, but linear mode needs 3D input"); } else if (input.dim() == 4 && std::get_if(&mode)) { TORCH_CHECK( - align_corners != c10::nullopt, "align_corners should be specified."); + align_corners != std::nullopt, "align_corners should be specified."); if (antialias) { return torch::_upsample_bilinear2d_aa( input, @@ -218,7 +218,7 @@ inline Tensor interpolate( TORCH_CHECK(false, "Got 5D input, but bilinear mode needs 4D input"); } else if (input.dim() == 5 && std::get_if(&mode)) { TORCH_CHECK( - align_corners != c10::nullopt, "align_corners should be specified."); + align_corners != std::nullopt, "align_corners should be specified."); return torch::upsample_trilinear3d( input, _interp_output_size(3, std::move(closed_over_args)), @@ -228,7 +228,7 @@ inline Tensor interpolate( scale_factor_list.at(2)); } else if (input.dim() == 4 && std::get_if(&mode)) { TORCH_CHECK( - align_corners != c10::nullopt, "align_corners should be specified."); + align_corners != std::nullopt, "align_corners should be specified."); if (antialias) { return torch::_upsample_bicubic2d_aa( input, diff --git a/torch/csrc/api/include/torch/nn/modules/batchnorm.h b/torch/csrc/api/include/torch/nn/modules/batchnorm.h index ec76c6b4a6fb..0f5e32746936 100644 --- a/torch/csrc/api/include/torch/nn/modules/batchnorm.h +++ b/torch/csrc/api/include/torch/nn/modules/batchnorm.h @@ -106,7 +106,7 @@ class BatchNormImplBase : public NormImplBase { this->_check_input_dim(input); // NOLINTNEXTLINE(cppcoreguidelines-init-variables) double exponential_average_factor; - if (this->options.momentum() == c10::nullopt) { + if (this->options.momentum() == std::nullopt) { exponential_average_factor = 0.0; } else { exponential_average_factor = this->options.momentum().value(); @@ -116,7 +116,7 @@ class BatchNormImplBase : public NormImplBase { if (this->num_batches_tracked.defined()) { this->num_batches_tracked += 1; if (this->options.momentum() == - c10::nullopt) { // use cumulative moving average + std::nullopt) { // use cumulative moving average exponential_average_factor = 1.0 / this->num_batches_tracked.template item(); } else { // use exponential moving average diff --git a/torch/csrc/api/include/torch/nn/modules/conv.h b/torch/csrc/api/include/torch/nn/modules/conv.h index 9c55254ddb91..e44fd44b954a 100644 --- a/torch/csrc/api/include/torch/nn/modules/conv.h +++ b/torch/csrc/api/include/torch/nn/modules/conv.h @@ -350,7 +350,7 @@ class TORCH_API ConvTranspose1dImpl explicit ConvTranspose1dImpl(ConvTranspose1dOptions options_); Tensor forward( const Tensor& input, - const std::optional& output_size = c10::nullopt); + const std::optional& output_size = std::nullopt); protected: FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(std::optional())}) @@ -392,7 +392,7 @@ class TORCH_API ConvTranspose2dImpl explicit ConvTranspose2dImpl(ConvTranspose2dOptions options_); Tensor forward( const Tensor& input, - const std::optional& output_size = c10::nullopt); + const std::optional& output_size = std::nullopt); protected: FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(std::optional())}) @@ -434,7 +434,7 @@ class TORCH_API ConvTranspose3dImpl explicit ConvTranspose3dImpl(ConvTranspose3dOptions options_); Tensor forward( const Tensor& input, - const std::optional& output_size = c10::nullopt); + const std::optional& output_size = std::nullopt); protected: FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(std::optional())}) diff --git a/torch/csrc/api/include/torch/nn/modules/pooling.h b/torch/csrc/api/include/torch/nn/modules/pooling.h index 6bcdca463b1b..0fac60edbcde 100644 --- a/torch/csrc/api/include/torch/nn/modules/pooling.h +++ b/torch/csrc/api/include/torch/nn/modules/pooling.h @@ -507,7 +507,7 @@ class TORCH_API MaxUnpool1dImpl : public MaxUnpoolImpl<1, MaxUnpool1dImpl> { Tensor forward( const Tensor& input, const Tensor& indices, - const std::optional>& output_size = c10::nullopt); + const std::optional>& output_size = std::nullopt); protected: FORWARD_HAS_DEFAULT_ARGS({2, AnyValue(std::optional>())}) @@ -539,7 +539,7 @@ class TORCH_API MaxUnpool2dImpl : public MaxUnpoolImpl<2, MaxUnpool2dImpl> { Tensor forward( const Tensor& input, const Tensor& indices, - const std::optional>& output_size = c10::nullopt); + const std::optional>& output_size = std::nullopt); protected: FORWARD_HAS_DEFAULT_ARGS({2, AnyValue(std::optional>())}) @@ -571,7 +571,7 @@ class TORCH_API MaxUnpool3dImpl : public MaxUnpoolImpl<3, MaxUnpool3dImpl> { Tensor forward( const Tensor& input, const Tensor& indices, - const std::optional>& output_size = c10::nullopt); + const std::optional>& output_size = std::nullopt); protected: FORWARD_HAS_DEFAULT_ARGS({2, AnyValue(std::optional>())}) diff --git a/torch/csrc/api/include/torch/nn/modules/utils.h b/torch/csrc/api/include/torch/nn/modules/utils.h index 869027a24149..6eaa0c1fb2c7 100644 --- a/torch/csrc/api/include/torch/nn/modules/utils.h +++ b/torch/csrc/api/include/torch/nn/modules/utils.h @@ -1,8 +1,8 @@ #pragma once #include -#include #include +#include #include diff --git a/torch/csrc/api/include/torch/nn/options/activation.h b/torch/csrc/api/include/torch/nn/options/activation.h index 165212e0e860..ac6cbc4ea4de 100644 --- a/torch/csrc/api/include/torch/nn/options/activation.h +++ b/torch/csrc/api/include/torch/nn/options/activation.h @@ -252,7 +252,7 @@ struct TORCH_API SoftmaxFuncOptions { /// If specified, the input tensor is casted to `dtype` before the operation /// is performed. This is useful for preventing data type overflows. Default: /// None. - TORCH_ARG(std::optional, dtype) = c10::nullopt; + TORCH_ARG(std::optional, dtype) = std::nullopt; }; } // namespace functional @@ -293,7 +293,7 @@ struct TORCH_API SoftminFuncOptions { /// If specified, the input tensor is casted to `dtype` before the operation /// is performed. This is useful for preventing data type overflows. Default: /// None. - TORCH_ARG(std::optional, dtype) = c10::nullopt; + TORCH_ARG(std::optional, dtype) = std::nullopt; }; } // namespace functional @@ -334,7 +334,7 @@ struct TORCH_API LogSoftmaxFuncOptions { /// If specified, the input tensor is casted to `dtype` before the operation /// is performed. This is useful for preventing data type overflows. Default: /// None. - TORCH_ARG(std::optional, dtype) = c10::nullopt; + TORCH_ARG(std::optional, dtype) = std::nullopt; }; } // namespace functional @@ -640,10 +640,10 @@ struct TORCH_API MultiheadAttentionOptions { /// add a new batch of zeros to the key and value sequences at dim=1. TORCH_ARG(bool, add_zero_attn) = false; - /// total number of features in key. Default: c10::nullopt. + /// total number of features in key. Default: std::nullopt. TORCH_ARG(int64_t, kdim); - /// total number of features in key. Default: c10::nullopt. + /// total number of features in key. Default: std::nullopt. TORCH_ARG(int64_t, vdim); }; diff --git a/torch/csrc/api/include/torch/nn/options/embedding.h b/torch/csrc/api/include/torch/nn/options/embedding.h index 20eacf907335..a3d2fdb72f54 100644 --- a/torch/csrc/api/include/torch/nn/options/embedding.h +++ b/torch/csrc/api/include/torch/nn/options/embedding.h @@ -28,10 +28,10 @@ struct TORCH_API EmbeddingOptions { /// Embedding, the embedding vector at `padding_idx` will default to all /// zeros, but can be updated to another value to be used as the padding /// vector. - TORCH_ARG(std::optional, padding_idx) = c10::nullopt; + TORCH_ARG(std::optional, padding_idx) = std::nullopt; /// If given, each embedding vector with norm larger than `max_norm` is /// renormalized to have norm `max_norm`. - TORCH_ARG(std::optional, max_norm) = c10::nullopt; + TORCH_ARG(std::optional, max_norm) = std::nullopt; /// The p of the p-norm to compute for the `max_norm` option. Default ``2``. TORCH_ARG(double, norm_type) = 2.; /// If given, this will scale gradients by the inverse of frequency of the @@ -55,10 +55,10 @@ struct TORCH_API EmbeddingFromPretrainedOptions { /// If specified, the entries at `padding_idx` do not contribute to the /// gradient; therefore, the embedding vector at `padding_idx` is not updated /// during training, i.e. it remains as a fixed "pad". - TORCH_ARG(std::optional, padding_idx) = c10::nullopt; + TORCH_ARG(std::optional, padding_idx) = std::nullopt; /// If given, each embedding vector with norm larger than `max_norm` is /// renormalized to have norm `max_norm`. - TORCH_ARG(std::optional, max_norm) = c10::nullopt; + TORCH_ARG(std::optional, max_norm) = std::nullopt; /// The p of the p-norm to compute for the `max_norm` option. Default ``2``. TORCH_ARG(double, norm_type) = 2.; /// If given, this will scale gradients by the inverse of frequency of the @@ -84,10 +84,10 @@ struct TORCH_API EmbeddingFuncOptions { /// If specified, the entries at `padding_idx` do not contribute to the /// gradient; therefore, the embedding vector at `padding_idx` is not updated /// during training, i.e. it remains as a fixed "pad". - TORCH_ARG(std::optional, padding_idx) = c10::nullopt; + TORCH_ARG(std::optional, padding_idx) = std::nullopt; /// If given, each embedding vector with norm larger than `max_norm` is /// renormalized to have norm `max_norm`. - TORCH_ARG(std::optional, max_norm) = c10::nullopt; + TORCH_ARG(std::optional, max_norm) = std::nullopt; /// The p of the p-norm to compute for the `max_norm` option. Default ``2``. TORCH_ARG(double, norm_type) = 2.; /// If given, this will scale gradients by the inverse of frequency of the @@ -120,7 +120,7 @@ struct TORCH_API EmbeddingBagOptions { TORCH_ARG(int64_t, embedding_dim); /// If given, each embedding vector with norm larger than `max_norm` is /// renormalized to have norm `max_norm`. - TORCH_ARG(std::optional, max_norm) = c10::nullopt; + TORCH_ARG(std::optional, max_norm) = std::nullopt; /// The p of the p-norm to compute for the `max_norm` option. Default ``2``. TORCH_ARG(double, norm_type) = 2.; /// If given, this will scale gradients by the inverse of frequency of the @@ -148,7 +148,7 @@ struct TORCH_API EmbeddingBagOptions { /// zeros, but can be updated to another value to be used as the padding /// vector. Note that the embedding vector at `padding_idx` is excluded from /// the reduction. - TORCH_ARG(std::optional, padding_idx) = c10::nullopt; + TORCH_ARG(std::optional, padding_idx) = std::nullopt; }; // ============================================================================ @@ -161,7 +161,7 @@ struct TORCH_API EmbeddingBagFromPretrainedOptions { TORCH_ARG(bool, freeze) = true; /// If given, each embedding vector with norm larger than `max_norm` is /// renormalized to have norm `max_norm`. - TORCH_ARG(std::optional, max_norm) = c10::nullopt; + TORCH_ARG(std::optional, max_norm) = std::nullopt; /// The p of the p-norm to compute for the `max_norm` option. Default ``2``. TORCH_ARG(double, norm_type) = 2.; /// If given, this will scale gradients by the inverse of frequency of the @@ -184,7 +184,7 @@ struct TORCH_API EmbeddingBagFromPretrainedOptions { /// gradient; therefore, the embedding vector at padding_idx is not updated /// during training, i.e. it remains as a fixed "pad". Note that the embedding /// vector at `padding_idx` is excluded from the reduction. - TORCH_ARG(std::optional, padding_idx) = c10::nullopt; + TORCH_ARG(std::optional, padding_idx) = std::nullopt; }; // ============================================================================ @@ -205,7 +205,7 @@ struct TORCH_API EmbeddingBagFuncOptions { TORCH_ARG(torch::Tensor, offsets) = Tensor(); /// If given, each embedding vector with norm larger than `max_norm` is /// renormalized to have norm `max_norm`. - TORCH_ARG(std::optional, max_norm) = c10::nullopt; + TORCH_ARG(std::optional, max_norm) = std::nullopt; /// The p of the p-norm to compute for the `max_norm` option. Default ``2``. TORCH_ARG(double, norm_type) = 2.; /// If given, this will scale gradients by the inverse of frequency of the @@ -233,7 +233,7 @@ struct TORCH_API EmbeddingBagFuncOptions { /// gradient; therefore, the embedding vector at padding_idx is not updated /// during training, i.e. it remains as a fixed "pad". Note that the embedding /// vector at `padding_idx` is excluded from the reduction. - TORCH_ARG(std::optional, padding_idx) = c10::nullopt; + TORCH_ARG(std::optional, padding_idx) = std::nullopt; }; } // namespace functional diff --git a/torch/csrc/api/include/torch/nn/options/loss.h b/torch/csrc/api/include/torch/nn/options/loss.h index f1fc7a4d4111..5a6e7aa3ab20 100644 --- a/torch/csrc/api/include/torch/nn/options/loss.h +++ b/torch/csrc/api/include/torch/nn/options/loss.h @@ -451,7 +451,7 @@ struct TORCH_API TripletMarginWithDistanceLossOptions { /// closeness of two tensors. If not specified, `F::pairwise_distance` will /// be used. Default: nullopt TORCH_ARG(std::optional, distance_function) = - c10::nullopt; + std::nullopt; /// Specifies a nonnegative margin representing the minimum difference /// between the positive and negative distances required for the loss to be 0. /// Larger margins penalize cases where the negative examples are not distance @@ -548,7 +548,7 @@ struct TORCH_API SmoothL1LossOptions { /// Specifies the threshold at which to change between L1 and L2 loss. /// If beta is not specified, a value of 1.0 will be used. /// Default: nullopt - TORCH_ARG(std::optional, beta) = c10::nullopt; + TORCH_ARG(std::optional, beta) = std::nullopt; }; namespace functional { diff --git a/torch/csrc/api/include/torch/nn/options/normalization.h b/torch/csrc/api/include/torch/nn/options/normalization.h index a1e5b1a0aeab..4b6dcd6ffe0c 100644 --- a/torch/csrc/api/include/torch/nn/options/normalization.h +++ b/torch/csrc/api/include/torch/nn/options/normalization.h @@ -133,7 +133,7 @@ struct TORCH_API NormalizeFuncOptions { TORCH_ARG(double, eps) = 1e-12; /// the output tensor. If `out` is used, this /// operation won't be differentiable. - TORCH_ARG(std::optional, out) = c10::nullopt; + TORCH_ARG(std::optional, out) = std::nullopt; }; } // namespace functional diff --git a/torch/csrc/api/include/torch/nn/options/pooling.h b/torch/csrc/api/include/torch/nn/options/pooling.h index 8f6cee99bff6..75408890e7cd 100644 --- a/torch/csrc/api/include/torch/nn/options/pooling.h +++ b/torch/csrc/api/include/torch/nn/options/pooling.h @@ -32,7 +32,7 @@ struct AvgPoolOptions { /// if specified, it will be used as divisor, otherwise size of the pooling /// region will be used. - TORCH_ARG(std::optional, divisor_override) = c10::nullopt; + TORCH_ARG(std::optional, divisor_override) = std::nullopt; }; /// `AvgPoolOptions` specialized for the `AvgPool1d` module. @@ -401,7 +401,7 @@ struct MaxUnpoolFuncOptions { TORCH_ARG(ExpandingArray, padding) = 0; /// the targeted output size - TORCH_ARG(std::optional>, output_size) = c10::nullopt; + TORCH_ARG(std::optional>, output_size) = std::nullopt; }; /// `MaxUnpoolFuncOptions` specialized for @@ -450,12 +450,12 @@ struct FractionalMaxPoolOptions { TORCH_ARG(ExpandingArray, kernel_size); /// the target output size of the image - TORCH_ARG(std::optional>, output_size) = c10::nullopt; + TORCH_ARG(std::optional>, output_size) = std::nullopt; /// If one wants to have an output size as a ratio of the input size, this /// option can be given. This has to be a number or tuple in the range (0, 1) using ExpandingArrayDouble = torch::ExpandingArray; - TORCH_ARG(std::optional, output_ratio) = c10::nullopt; + TORCH_ARG(std::optional, output_ratio) = std::nullopt; TORCH_ARG(torch::Tensor, _random_samples) = Tensor(); }; diff --git a/torch/csrc/api/include/torch/nn/options/upsampling.h b/torch/csrc/api/include/torch/nn/options/upsampling.h index 21df2b89998d..df8eb194180a 100644 --- a/torch/csrc/api/include/torch/nn/options/upsampling.h +++ b/torch/csrc/api/include/torch/nn/options/upsampling.h @@ -20,10 +20,10 @@ namespace nn { /// ``` struct TORCH_API UpsampleOptions { /// output spatial sizes. - TORCH_ARG(std::optional>, size) = c10::nullopt; + TORCH_ARG(std::optional>, size) = std::nullopt; /// multiplier for spatial size. - TORCH_ARG(std::optional>, scale_factor) = c10::nullopt; + TORCH_ARG(std::optional>, scale_factor) = std::nullopt; /// the upsampling algorithm: one of "nearest", "linear", "bilinear", /// "bicubic" and "trilinear". Default: "nearest" @@ -40,7 +40,7 @@ struct TORCH_API UpsampleOptions { /// aligned, and thus preserving the values at those pixels. This only has /// effect when :attr:`mode` is "linear", "bilinear", "bicubic", or /// "trilinear". Default: "False" - TORCH_ARG(std::optional, align_corners) = c10::nullopt; + TORCH_ARG(std::optional, align_corners) = std::nullopt; }; namespace functional { @@ -65,10 +65,10 @@ struct TORCH_API InterpolateFuncOptions { mode_t; /// output spatial sizes. - TORCH_ARG(std::optional>, size) = c10::nullopt; + TORCH_ARG(std::optional>, size) = std::nullopt; /// multiplier for spatial size. - TORCH_ARG(std::optional>, scale_factor) = c10::nullopt; + TORCH_ARG(std::optional>, scale_factor) = std::nullopt; /// the upsampling algorithm: one of "nearest", "linear", "bilinear", /// "bicubic", "trilinear", "area", "nearest-exact". Default: "nearest" @@ -83,7 +83,7 @@ struct TORCH_API InterpolateFuncOptions { /// this operation *independent* of input size when `scale_factor` is /// kept the same. It is *required* when interpolating mode is "linear", /// "bilinear", "bicubic" or "trilinear". Default: "False" - TORCH_ARG(std::optional, align_corners) = c10::nullopt; + TORCH_ARG(std::optional, align_corners) = std::nullopt; /// recompute the scale_factor for use in the /// interpolation calculation. When `scale_factor` is passed as a parameter, @@ -95,7 +95,7 @@ struct TORCH_API InterpolateFuncOptions { /// used in the interpolation computation. Note that when `scale_factor` is /// floating-point, the recomputed scale_factor may differ from the one passed /// in due to rounding and precision issues. - TORCH_ARG(std::optional, recompute_scale_factor) = c10::nullopt; + TORCH_ARG(std::optional, recompute_scale_factor) = std::nullopt; /// flag to apply anti-aliasing. Using anti-alias /// option together with :attr:`align_corners` equals "False", interpolation diff --git a/torch/csrc/api/include/torch/nn/options/vision.h b/torch/csrc/api/include/torch/nn/options/vision.h index c012b40d21f6..a5204f0dffb6 100644 --- a/torch/csrc/api/include/torch/nn/options/vision.h +++ b/torch/csrc/api/include/torch/nn/options/vision.h @@ -28,7 +28,7 @@ struct TORCH_API GridSampleFuncOptions { /// padding mode for outside grid values. Default: Zeros TORCH_ARG(padding_mode_t, padding_mode) = torch::kZeros; /// Specifies perspective to pixel as point. Default: false - TORCH_ARG(std::optional, align_corners) = c10::nullopt; + TORCH_ARG(std::optional, align_corners) = std::nullopt; }; } // namespace functional diff --git a/torch/csrc/api/include/torch/nn/utils/clip_grad.h b/torch/csrc/api/include/torch/nn/utils/clip_grad.h index fbb533662c7b..8a2a569c0333 100644 --- a/torch/csrc/api/include/torch/nn/utils/clip_grad.h +++ b/torch/csrc/api/include/torch/nn/utils/clip_grad.h @@ -64,7 +64,7 @@ inline double clip_grad_norm_( // synchronizing the CPU and the gradients' device until the very end to // preserve async execution on the device. When checking for finite-ness, this // optional ensures we only sync once. - std::optional total_norm = c10::nullopt; + std::optional total_norm = std::nullopt; if (error_if_nonfinite) { total_norm = total_norm_tensor.item().toDouble(); TORCH_CHECK( @@ -79,7 +79,7 @@ inline double clip_grad_norm_( auto clip_coef = max_norm / (total_norm_tensor + 1e-6); auto clip_coef_clamped = - torch::clamp(clip_coef, c10::nullopt /* min */, 1.0 /* max */); + torch::clamp(clip_coef, std::nullopt /* min */, 1.0 /* max */); for (auto& param : params_with_grad) { param.grad().data().mul_(clip_coef_clamped); } diff --git a/torch/csrc/api/include/torch/nn/utils/convert_parameters.h b/torch/csrc/api/include/torch/nn/utils/convert_parameters.h index 6f62d483c4d8..b8bfee33473f 100644 --- a/torch/csrc/api/include/torch/nn/utils/convert_parameters.h +++ b/torch/csrc/api/include/torch/nn/utils/convert_parameters.h @@ -15,7 +15,7 @@ inline std::optional _check_param_device( const torch::Tensor& param, std::optional old_param_device) { // Meet the first parameter - if (old_param_device == c10::nullopt) { + if (old_param_device == std::nullopt) { old_param_device = param.is_cuda() ? param.get_device() : -1; } else { bool warn = false; diff --git a/torch/csrc/api/include/torch/optim/lbfgs.h b/torch/csrc/api/include/torch/optim/lbfgs.h index 001b0cd33f25..0832afff5f8f 100644 --- a/torch/csrc/api/include/torch/optim/lbfgs.h +++ b/torch/csrc/api/include/torch/optim/lbfgs.h @@ -17,11 +17,11 @@ struct TORCH_API LBFGSOptions : public OptimizerCloneableOptions { LBFGSOptions(double lr = 1); TORCH_ARG(double, lr) = 1; TORCH_ARG(int64_t, max_iter) = 20; - TORCH_ARG(std::optional, max_eval) = c10::nullopt; + TORCH_ARG(std::optional, max_eval) = std::nullopt; TORCH_ARG(double, tolerance_grad) = 1e-7; TORCH_ARG(double, tolerance_change) = 1e-9; TORCH_ARG(int64_t, history_size) = 100; - TORCH_ARG(std::optional, line_search_fn) = c10::nullopt; + TORCH_ARG(std::optional, line_search_fn) = std::nullopt; public: void serialize(torch::serialize::InputArchive& archive) override; @@ -45,7 +45,7 @@ struct TORCH_API LBFGSParamState TORCH_ARG(std::deque, old_dirs); TORCH_ARG(std::deque, old_stps); TORCH_ARG(std::deque, ro); - TORCH_ARG(std::optional>, al) = c10::nullopt; + TORCH_ARG(std::optional>, al) = std::nullopt; public: void serialize(torch::serialize::InputArchive& archive) override; @@ -66,13 +66,13 @@ class TORCH_API LBFGS : public Optimizer { TORCH_CHECK( param_groups_.size() == 1, "LBFGS doesn't support per-parameter options (parameter groups)"); - if (defaults.max_eval() == c10::nullopt) { + if (defaults.max_eval() == std::nullopt) { auto max_eval_val = (defaults.max_iter() * 5) / 4; static_cast(param_groups_[0].options()) .max_eval(max_eval_val); static_cast(*defaults_.get()).max_eval(max_eval_val); } - _numel_cache = c10::nullopt; + _numel_cache = std::nullopt; } explicit LBFGS(std::vector params, LBFGSOptions defaults = {}) : LBFGS({OptimizerParamGroup(std::move(params))}, defaults) {} diff --git a/torch/csrc/api/include/torch/optim/optimizer.h b/torch/csrc/api/include/torch/optim/optimizer.h index 1f448e4fffd6..dd5bd600ff3e 100644 --- a/torch/csrc/api/include/torch/optim/optimizer.h +++ b/torch/csrc/api/include/torch/optim/optimizer.h @@ -186,22 +186,22 @@ class TORCH_API Optimizer { }; /* How do we decide whether to serialize undefined tensors or - c10::nullopt values into the output archive? + std::nullopt values into the output archive? Answer: we strictly follow the behavior of Python API. To be more specific: For optimizer options: a) For undefined tensor: currently no tensor is used as an options argument in -Python API, so we don't need to worry about it now. b) For c10::nullopt value: -we serialize c10::nullopt values into the output archive, to follow the exact +Python API, so we don't need to worry about it now. b) For std::nullopt value: +we serialize std::nullopt values into the output archive, to follow the exact same behavior as Python API. For optimizer param state: a) For undefined tensor: in param state, undefined tensor in C++ impl is equivalent to missing key in Python impl. Since we don't serialize missing keys in Python API, we skip undefined tensors when serializing the param state. b) -For c10::nullopt value: in param state, c10::nullopt value in C++ impl is +For std::nullopt value: in param state, std::nullopt value in C++ impl is equivalent to missing key in Python impl. Since we don't serialize missing keys -in Python API, we skip c10::nullopt values when serializing the param state. */ +in Python API, we skip std::nullopt values when serializing the param state. */ /// Serializes an `Optimizer` into an `OutputArchive`. TORCH_API serialize::OutputArchive& operator<<( diff --git a/torch/csrc/api/include/torch/serialize/input-archive.h b/torch/csrc/api/include/torch/serialize/input-archive.h index f77b34aad0bd..3650cfcfea23 100644 --- a/torch/csrc/api/include/torch/serialize/input-archive.h +++ b/torch/csrc/api/include/torch/serialize/input-archive.h @@ -1,10 +1,10 @@ #pragma once #include -#include #include #include #include +#include #include #include @@ -76,27 +76,27 @@ class TORCH_API InputArchive final { /// is not specified, the module is loaded to the original device. void load_from( const std::string& filename, - std::optional device = c10::nullopt); + std::optional device = std::nullopt); /// Loads the `InputArchive` from a serialized representation stored in the /// given `stream`. Storage are remapped using device option. If device /// is not specified, the module is loaded to the original device. void load_from( std::istream& stream, - std::optional device = c10::nullopt); + std::optional device = std::nullopt); // Loads given the specified flat array. void load_from( const char* data, size_t size, - std::optional device = c10::nullopt); + std::optional device = std::nullopt); // Loads given the specified read and size functions. void load_from( const std::function& read_func, const std::function& size_func, - std::optional device = c10::nullopt); + std::optional device = std::nullopt); // Returns the vector of keys in the input archive. std::vector keys(); diff --git a/torch/csrc/api/include/torch/types.h b/torch/csrc/api/include/torch/types.h index 8a23cd122b8d..febda7ac6bb8 100644 --- a/torch/csrc/api/include/torch/types.h +++ b/torch/csrc/api/include/torch/types.h @@ -2,7 +2,7 @@ #include -#include +#include #include #include @@ -38,7 +38,7 @@ namespace torch { // the `func()` function defined in `at::` namespace is always hidden. using namespace at; // NOLINT -using c10::nullopt; +using std::nullopt; using std::optional; using Dtype = at::ScalarType; diff --git a/torch/csrc/api/src/jit.cpp b/torch/csrc/api/src/jit.cpp index 16d9d0040a65..07064dbdc9e7 100644 --- a/torch/csrc/api/src/jit.cpp +++ b/torch/csrc/api/src/jit.cpp @@ -11,7 +11,7 @@ namespace jit { std::shared_ptr compile(const std::string& source) { auto module = std::make_shared(); - module->define(c10::nullopt, source, nativeResolver(), nullptr); + module->define(std::nullopt, source, nativeResolver(), nullptr); return module; } diff --git a/torch/csrc/api/src/nn/modules/activation.cpp b/torch/csrc/api/src/nn/modules/activation.cpp index 56218ad091de..518072d0653f 100644 --- a/torch/csrc/api/src/nn/modules/activation.cpp +++ b/torch/csrc/api/src/nn/modules/activation.cpp @@ -130,7 +130,7 @@ void SoftmaxImpl::pretty_print(std::ostream& stream) const { } Tensor SoftmaxImpl::forward(const Tensor& input) { - return F::detail::softmax(input, options.dim(), c10::nullopt); + return F::detail::softmax(input, options.dim(), std::nullopt); } // ============================================================================ @@ -144,7 +144,7 @@ void SoftminImpl::pretty_print(std::ostream& stream) const { } Tensor SoftminImpl::forward(const Tensor& input) { - return F::detail::softmin(input, options.dim(), c10::nullopt); + return F::detail::softmin(input, options.dim(), std::nullopt); } // ============================================================================ @@ -159,7 +159,7 @@ void LogSoftmaxImpl::pretty_print(std::ostream& stream) const { } Tensor LogSoftmaxImpl::forward(const Tensor& input) { - return F::detail::log_softmax(input, options.dim(), c10::nullopt); + return F::detail::log_softmax(input, options.dim(), std::nullopt); } // ============================================================================ @@ -174,7 +174,7 @@ Tensor Softmax2dImpl::forward(const Tensor& input) { TORCH_CHECK( input.dim() == 4 || input.dim() == 3, "Softmax2d requires a 3D or 4D tensor as input"); - return F::detail::softmax(input, /*dim=*/-3, c10::nullopt); + return F::detail::softmax(input, /*dim=*/-3, std::nullopt); } // ============================================================================ diff --git a/torch/csrc/api/src/nn/modules/conv.cpp b/torch/csrc/api/src/nn/modules/conv.cpp index 197c3cf0725c..26e52df637f8 100644 --- a/torch/csrc/api/src/nn/modules/conv.cpp +++ b/torch/csrc/api/src/nn/modules/conv.cpp @@ -176,7 +176,7 @@ std::vector ConvTransposeNdImpl::_output_padding( std::vector ret; std::optional output_size_ = output_size; - if (output_size_ == c10::nullopt) { + if (output_size_ == std::nullopt) { ret = at::IntArrayRef(this->options.output_padding()).vec(); } else { auto k = input.dim() - 2; diff --git a/torch/csrc/api/src/nn/modules/embedding.cpp b/torch/csrc/api/src/nn/modules/embedding.cpp index 553a93875e17..4c6683d1f36b 100644 --- a/torch/csrc/api/src/nn/modules/embedding.cpp +++ b/torch/csrc/api/src/nn/modules/embedding.cpp @@ -20,7 +20,7 @@ EmbeddingImpl::EmbeddingImpl(EmbeddingOptions options_) } void EmbeddingImpl::reset() { - if (options.padding_idx() != c10::nullopt) { + if (options.padding_idx() != std::nullopt) { if (*options.padding_idx() > 0) { TORCH_CHECK( *options.padding_idx() < options.num_embeddings(), @@ -50,7 +50,7 @@ void EmbeddingImpl::reset() { void EmbeddingImpl::reset_parameters() { torch::nn::init::normal_(weight); - if (options.padding_idx() != c10::nullopt) { + if (options.padding_idx() != std::nullopt) { torch::NoGradGuard no_grad; weight[*options.padding_idx()].fill_(0); } @@ -59,10 +59,10 @@ void EmbeddingImpl::reset_parameters() { void EmbeddingImpl::pretty_print(std::ostream& stream) const { stream << "torch::nn::Embedding(num_embeddings=" << options.num_embeddings() << ", embedding_dim=" << options.embedding_dim(); - if (options.padding_idx() != c10::nullopt) { + if (options.padding_idx() != std::nullopt) { stream << ", padding_idx=" << *options.padding_idx(); } - if (options.max_norm() != c10::nullopt) { + if (options.max_norm() != std::nullopt) { stream << ", max_norm=" << *options.max_norm(); } if (options.norm_type() != 2) { @@ -154,7 +154,7 @@ void EmbeddingBagImpl::pretty_print(std::ostream& stream) const { stream << "torch::nn::EmbeddingBag(num_embeddings=" << options.num_embeddings() << ", embedding_dim=" << options.embedding_dim(); - if (options.max_norm() != c10::nullopt) { + if (options.max_norm() != std::nullopt) { stream << ", max_norm=" << *options.max_norm(); } if (options.norm_type() != 2) { diff --git a/torch/csrc/api/src/nn/modules/pooling.cpp b/torch/csrc/api/src/nn/modules/pooling.cpp index 0b11b914dcc1..a02d8cd712aa 100644 --- a/torch/csrc/api/src/nn/modules/pooling.cpp +++ b/torch/csrc/api/src/nn/modules/pooling.cpp @@ -281,19 +281,19 @@ FractionalMaxPool2dImpl::FractionalMaxPool2dImpl( void FractionalMaxPool2dImpl::reset() { _random_samples = register_buffer("_random_samples", options._random_samples()); - if (options.output_size() == c10::nullopt && - options.output_ratio() == c10::nullopt) { + if (options.output_size() == std::nullopt && + options.output_ratio() == std::nullopt) { TORCH_CHECK( false, "FractionalMaxPool2d requires specifying either ", "an output size, or a pooling ratio"); } - if (options.output_size() != c10::nullopt && - options.output_ratio() != c10::nullopt) { + if (options.output_size() != std::nullopt && + options.output_ratio() != std::nullopt) { TORCH_CHECK( false, "only one of output_size and output_ratio may be specified"); } - if (options.output_ratio() != c10::nullopt) { + if (options.output_ratio() != std::nullopt) { at::ArrayRef output_ratio = at::ArrayRef(options.output_ratio().value()); if (!(0 < output_ratio[0] && output_ratio[0] < 1 && 0 < output_ratio[1] && @@ -340,19 +340,19 @@ FractionalMaxPool3dImpl::FractionalMaxPool3dImpl( void FractionalMaxPool3dImpl::reset() { _random_samples = register_buffer("_random_samples", options._random_samples()); - if (options.output_size() == c10::nullopt && - options.output_ratio() == c10::nullopt) { + if (options.output_size() == std::nullopt && + options.output_ratio() == std::nullopt) { TORCH_CHECK( false, "FractionalMaxPool3d requires specifying either ", "an output size, or a pooling ratio"); } - if (options.output_size() != c10::nullopt && - options.output_ratio() != c10::nullopt) { + if (options.output_size() != std::nullopt && + options.output_ratio() != std::nullopt) { TORCH_CHECK( false, "only one of output_size and output_ratio may be specified"); } - if (options.output_ratio() != c10::nullopt) { + if (options.output_ratio() != std::nullopt) { at::ArrayRef output_ratio = at::ArrayRef(options.output_ratio().value()); if (!(0 < output_ratio[0] && output_ratio[0] < 1 && 0 < output_ratio[1] && diff --git a/torch/csrc/api/src/nn/modules/upsampling.cpp b/torch/csrc/api/src/nn/modules/upsampling.cpp index 8e7bb2fe33cd..378d5aadb920 100644 --- a/torch/csrc/api/src/nn/modules/upsampling.cpp +++ b/torch/csrc/api/src/nn/modules/upsampling.cpp @@ -15,7 +15,7 @@ void UpsampleImpl::reset() {} void UpsampleImpl::pretty_print(std::ostream& stream) const { stream << "torch::nn::Upsample("; - if (options.scale_factor() != c10::nullopt) { + if (options.scale_factor() != std::nullopt) { stream << "scale_factor=" << at::ArrayRef(*options.scale_factor()); } else { stream << "size=" << at::ArrayRef(*options.size()); @@ -43,7 +43,7 @@ Tensor UpsampleImpl::forward(const Tensor& input) { options.scale_factor(), mode, options.align_corners(), - c10::nullopt, + std::nullopt, false); } diff --git a/torch/csrc/api/src/optim/lbfgs.cpp b/torch/csrc/api/src/optim/lbfgs.cpp index 10739be62386..dbf17f718614 100644 --- a/torch/csrc/api/src/optim/lbfgs.cpp +++ b/torch/csrc/api/src/optim/lbfgs.cpp @@ -68,7 +68,7 @@ bool if_container_equal(T lhs, T rhs) { bool operator==(const LBFGSParamState& lhs, const LBFGSParamState& rhs) { auto isNull = [](const std::optional>& val) { - return val == c10::nullopt; + return val == std::nullopt; }; return (lhs.func_evals() == rhs.func_evals()) && (lhs.n_iter() == rhs.n_iter()) && (lhs.t() == rhs.t()) && @@ -97,7 +97,7 @@ void LBFGSParamState::serialize( _TORCH_OPTIM_SERIALIZE_TORCH_ARG_DEQUE(old_stps); _TORCH_OPTIM_SERIALIZE_TORCH_ARG_DEQUE(ro); // Python version only serializes state vars if explicitly defined - if (al() != c10::nullopt) { + if (al() != std::nullopt) { _TORCH_OPTIM_SERIALIZE_TORCH_ARG(al); } } @@ -131,7 +131,7 @@ Tensor LBFGS::_gather_flat_grad() { } int64_t LBFGS::_numel() { - if (_numel_cache == c10::nullopt) { + if (_numel_cache == std::nullopt) { auto res = 0; for (const auto& p : param_groups_.at(0).params()) { res += p.numel(); @@ -194,12 +194,12 @@ static double _cubic_interpolate( double x2, double f2, double g2, - std::optional> bounds = c10::nullopt) { + std::optional> bounds = std::nullopt) { // ported from https://github.com/torch/optim/blob/master/polyinterp.lua // Compute bounds of interpolation area // NOLINTNEXTLINE(cppcoreguidelines-init-variables) double xmin_bound, xmax_bound; - if (bounds != c10::nullopt) { + if (bounds != std::nullopt) { std::tie(xmin_bound, xmax_bound) = *bounds; } else { std::tie(xmin_bound, xmax_bound) = @@ -509,7 +509,7 @@ Tensor LBFGS::step(LossClosure closure) { // multiplied by the gradient int64_t num_old = static_cast(old_dirs.size()); - if (state.al() == c10::nullopt) { + if (state.al() == std::nullopt) { state.al(std::vector(history_size)); } auto& al = state.al(); @@ -557,7 +557,7 @@ Tensor LBFGS::step(LossClosure closure) { // optional line search: user function auto ls_func_evals = 0; - if (line_search_fn != c10::nullopt) { + if (line_search_fn != std::nullopt) { TORCH_CHECK( *line_search_fn == "strong_wolfe", "only 'strong_wolfe' is supported"); @@ -627,7 +627,7 @@ void LBFGS::load(serialize::InputArchive& archive) { TORCH_WARN( "Your serialized LBFGS optimizer is still using the old serialization format. " "The func_evals and n_iter value in state will be set to 0, ro will be set to an empty deque " - "and al will be set to c10::nullopt because the old LBFGS optimizer didn't save these values." + "and al will be set to std::nullopt because the old LBFGS optimizer didn't save these values." "You should re-save your LBFGS optimizer to use the new serialization format."); Tensor d, t, H_diag, prev_flat_grad, prev_loss; std::deque old_dirs, old_stps; diff --git a/torch/csrc/api/src/serialize/input-archive.cpp b/torch/csrc/api/src/serialize/input-archive.cpp index 852f4eab1b52..8644b6193e0b 100644 --- a/torch/csrc/api/src/serialize/input-archive.cpp +++ b/torch/csrc/api/src/serialize/input-archive.cpp @@ -93,20 +93,20 @@ void InputArchive::read(const std::string& key, InputArchive& archive) { void InputArchive::load_from( const std::string& filename, - std::optional device /*= c10::nullopt*/) { + std::optional device /*= std::nullopt*/) { module_ = torch::jit::load(filename, std::move(device)); } void InputArchive::load_from( std::istream& stream, - std::optional device /*= c10::nullopt*/) { + std::optional device /*= std::nullopt*/) { module_ = torch::jit::load(stream, std::move(device)); } void InputArchive::load_from( const char* data, size_t size, - std::optional device /*= c10::nullopt*/) { + std::optional device /*= std::nullopt*/) { using caffe2::serialize::ReadAdapterInterface; class OurAdapter : public ReadAdapterInterface { public: @@ -136,7 +136,7 @@ void InputArchive::load_from( void InputArchive::load_from( const std::function& read_func, const std::function& size_func, - std::optional device /*= c10::nullopt*/) { + std::optional device /*= std::nullopt*/) { using caffe2::serialize::ReadAdapterInterface; class OurAdapter : public ReadAdapterInterface { public: diff --git a/torch/csrc/autograd/FunctionsManual.cpp b/torch/csrc/autograd/FunctionsManual.cpp index 9d897c667c90..7ca1a1720968 100644 --- a/torch/csrc/autograd/FunctionsManual.cpp +++ b/torch/csrc/autograd/FunctionsManual.cpp @@ -630,7 +630,7 @@ Tensor div_tensor_self_backward( T other, ScalarType self_st) { return div_tensor_self_backward( - grad, std::move(other), self_st, c10::nullopt); + grad, std::move(other), self_st, std::nullopt); } template Tensor div_tensor_self_backward(const Tensor&, Tensor, ScalarType); template Tensor div_tensor_self_backward(const Tensor&, Scalar, ScalarType); @@ -652,7 +652,7 @@ Tensor div_tensor_other_backward( const Tensor& grad, const Tensor& self, const Tensor& other) { - return div_tensor_other_backward(grad, self, other, c10::nullopt); + return div_tensor_other_backward(grad, self, other, std::nullopt); } Tensor permute_backwards(const Tensor& grad, IntArrayRef fwd_dims) { @@ -1282,12 +1282,12 @@ Tensor convolution_jvp( at::SymIntArrayRef output_padding, const c10::SymInt& groups) { auto bias_t_opt = - bias_t.defined() ? std::optional(bias_t) : c10::nullopt; + bias_t.defined() ? std::optional(bias_t) : std::nullopt; return ( at::convolution_symint( input_t, weight_p, - c10::nullopt, + std::nullopt, stride, padding, dilation, @@ -1324,12 +1324,12 @@ Tensor _convolution_jvp( bool cudnn_enabled, bool allow_tf32) { auto bias_t_opt = - bias_t.defined() ? std::optional(bias_t) : c10::nullopt; + bias_t.defined() ? std::optional(bias_t) : std::nullopt; return ( at::_convolution_symint( input_t, weight_p, - c10::nullopt, + std::nullopt, stride, padding, dilation, @@ -6193,7 +6193,7 @@ Tensor batch_norm_jvp( std::optional result_p = weight_p.defined() ? std::optional((input_p - mean_p) * invstd_p) - : c10::nullopt; + : std::nullopt; return _affine_jvp( result_p, result_t, @@ -6232,7 +6232,7 @@ Tensor layer_norm_jvp( std::optional result_p = weight_p.defined() ? std::optional((input_p - mean_p) * invstd_p) - : c10::nullopt; + : std::nullopt; return _affine_jvp( result_p, result_t, @@ -6273,7 +6273,7 @@ Tensor group_norm_jvp( /*eps=*/0) .view(input_shape); - std::optional result_p = c10::nullopt; + std::optional result_p = std::nullopt; if (weight_p.defined()) { std::vector view_size(input_t_reshaped.dim(), 1); view_size[1] = input_t_reshaped.size(1); @@ -6706,7 +6706,7 @@ std::tuple _cudnn_convolution_backward( grad_output, self, weight, - c10::nullopt, + std::nullopt, stride, padding, dilation, @@ -6956,7 +6956,7 @@ Tensor to_sparse_backward( if (self_layout == c10::kStrided) { return grad.to_dense(); } else { - OptionalIntArrayRef blocksize = c10::nullopt; + OptionalIntArrayRef blocksize = std::nullopt; if (self_blocksize.has_value()) { blocksize = c10::asIntArrayRefSlowOpt(*self_blocksize); } diff --git a/torch/csrc/autograd/FunctionsManual.h b/torch/csrc/autograd/FunctionsManual.h index dedff70be1ba..3c461dd88ee5 100644 --- a/torch/csrc/autograd/FunctionsManual.h +++ b/torch/csrc/autograd/FunctionsManual.h @@ -39,7 +39,7 @@ TORCH_API inline std::optional wrap_opt_if( const Tensor& t, const bool cond) { using OptTensor = std::optional; - return cond ? OptTensor(t) : static_cast(c10::nullopt); + return cond ? OptTensor(t) : static_cast(std::nullopt); } TORCH_API Tensor diff --git a/torch/csrc/autograd/TraceTypeManual.cpp b/torch/csrc/autograd/TraceTypeManual.cpp index 46e4014d8dd1..1473058a3a53 100644 --- a/torch/csrc/autograd/TraceTypeManual.cpp +++ b/torch/csrc/autograd/TraceTypeManual.cpp @@ -1,11 +1,11 @@ #include #include #include -#include #include #include #include #include +#include using namespace at; diff --git a/torch/csrc/autograd/VariableTypeManual.cpp b/torch/csrc/autograd/VariableTypeManual.cpp index 20f66694677e..92096dca9a69 100644 --- a/torch/csrc/autograd/VariableTypeManual.cpp +++ b/torch/csrc/autograd/VariableTypeManual.cpp @@ -2,7 +2,6 @@ #include #include #include -#include #include #include #include @@ -11,6 +10,7 @@ #include #include #include +#include #include diff --git a/torch/csrc/autograd/VariableTypeUtils.h b/torch/csrc/autograd/VariableTypeUtils.h index d5fe8a70dae1..3b598898f80c 100644 --- a/torch/csrc/autograd/VariableTypeUtils.h +++ b/torch/csrc/autograd/VariableTypeUtils.h @@ -217,7 +217,7 @@ inline at::Tensor as_view( tensor, diff_view_meta->get_backward_view().chain( base, tensor, std::move(view_func), std::move(rev_view_func)), - c10::nullopt, + std::nullopt, /*shared_view_info*/ true, creation_meta, allow_tensor_metadata_change); @@ -225,7 +225,7 @@ inline at::Tensor as_view( return make_variable_differentiable_view( tensor, ViewInfo(base, std::move(view_func), std::move(rev_view_func)), - c10::nullopt, + std::nullopt, /*shared_view_info*/ true, creation_meta, allow_tensor_metadata_change); diff --git a/torch/csrc/autograd/autograd.h b/torch/csrc/autograd/autograd.h index 94ee179225a4..bd5d4a462102 100644 --- a/torch/csrc/autograd/autograd.h +++ b/torch/csrc/autograd/autograd.h @@ -47,7 +47,7 @@ namespace torch::autograd { TORCH_API void backward( const variable_list& tensors, const variable_list& grad_tensors = {}, - std::optional retain_graph = c10::nullopt, + std::optional retain_graph = std::nullopt, bool create_graph = false, const variable_list& inputs = {}); @@ -81,7 +81,7 @@ TORCH_API variable_list grad( const variable_list& outputs, const variable_list& inputs, const variable_list& grad_outputs = {}, - std::optional retain_graph = c10::nullopt, + std::optional retain_graph = std::nullopt, bool create_graph = false, bool allow_unused = false); diff --git a/torch/csrc/autograd/autograd_not_implemented_fallback.cpp b/torch/csrc/autograd/autograd_not_implemented_fallback.cpp index eff2a27c105f..f922c3fc7632 100644 --- a/torch/csrc/autograd/autograd_not_implemented_fallback.cpp +++ b/torch/csrc/autograd/autograd_not_implemented_fallback.cpp @@ -345,7 +345,7 @@ static void autogradNotImplementedFallbackImpl( [&](size_t idx, size_t _, const at::Tensor& t) { storage_saved.push_back( t.has_storage() ? std::optional(t.storage()) - : c10::nullopt); + : std::nullopt); impl_saved.push_back(t.getIntrusivePtr()); }, &stack_args_copy, diff --git a/torch/csrc/autograd/engine.cpp b/torch/csrc/autograd/engine.cpp index cfacbf0e3be7..cb9f5caca0ee 100644 --- a/torch/csrc/autograd/engine.cpp +++ b/torch/csrc/autograd/engine.cpp @@ -735,10 +735,10 @@ void GraphTask::exec_post_processing() { for (const auto& leaf_stream : leaf_streams) { // stash_current_cuda/privateuse1_streams() stashed streams for all device // IDs that already had a CUDA/privateuse1 context before the GraphTask - // executed. For inactive devices, it stashed a c10::nullopt. I don't + // executed. For inactive devices, it stashed a std::nullopt. I don't // expect GraphTask's backward pass ran leaf nodes on any new devices, so // the stashed streams should be enough. If leaf_stream.device_index() - // happens to be for a new device, operator* on the c10::nullopt should + // happens to be for a new device, operator* on the std::nullopt should // throw an error. const auto caller_current_stream = // NOLINTNEXTLINE(bugprone-unchecked-optional-access) @@ -1554,7 +1554,7 @@ void GraphTask::stash_current_streams() { idx)) { caller_current_streams_[idx] = guard.getStream({accelerator, idx}); } else { - caller_current_streams_[idx] = c10::nullopt; + caller_current_streams_[idx] = std::nullopt; } } } diff --git a/torch/csrc/autograd/function.h b/torch/csrc/autograd/function.h index c8c3538a061f..4f7f53c90ec1 100644 --- a/torch/csrc/autograd/function.h +++ b/torch/csrc/autograd/function.h @@ -242,14 +242,14 @@ struct TORCH_API Node : std::enable_shared_from_this { std::optional stream() { auto opt_device_type = at::getAccelerator(); if (!opt_device_type.has_value()) { - return c10::nullopt; + return std::nullopt; } for (const auto& metadata : input_metadata_) { if (metadata.device().type() == opt_device_type.value()) return metadata.stream(); } - return c10::nullopt; + return std::nullopt; } void clear_input_metadata() { diff --git a/torch/csrc/autograd/functions/accumulate_grad.h b/torch/csrc/autograd/functions/accumulate_grad.h index 2efde9d5f2f2..99597a73762f 100644 --- a/torch/csrc/autograd/functions/accumulate_grad.h +++ b/torch/csrc/autograd/functions/accumulate_grad.h @@ -224,7 +224,7 @@ struct TORCH_API AccumulateGrad : public Node { // variable_grad += new_grad; // } else { // result = at::empty_strided(variable.sizes(), variable.strides(), - // variable.options().memory_format(c10::nullopt)); + // variable.options().memory_format(std::nullopt)); // update_grad(at::native::add_out(result, variable_grad, // new_grad, 1.0); // } diff --git a/torch/csrc/autograd/functions/comm.cpp b/torch/csrc/autograd/functions/comm.cpp index 1aed18cb79a5..5093f51e7eff 100644 --- a/torch/csrc/autograd/functions/comm.cpp +++ b/torch/csrc/autograd/functions/comm.cpp @@ -105,7 +105,7 @@ variable_list Gather::apply(variable_list&& inputs) { std::move(source_devices), std::move(input_sizes), dim_, - /*streams=*/c10::nullopt, + /*streams=*/std::nullopt, /*unsqueeze_scalars=*/unsqueeze_scalars); grad_fn->set_next_edges(collect_next_edges(inputs)); } diff --git a/torch/csrc/autograd/functions/comm.h b/torch/csrc/autograd/functions/comm.h index 0924cd030fce..2730827a1eb3 100644 --- a/torch/csrc/autograd/functions/comm.h +++ b/torch/csrc/autograd/functions/comm.h @@ -6,7 +6,7 @@ #include #include -#include +#include #include #include @@ -17,10 +17,10 @@ namespace autograd { struct TORCH_CUDA_CU_API Scatter : public Node { explicit Scatter( std::vector devices, - std::optional> chunk_sizes = c10::nullopt, + std::optional> chunk_sizes = std::nullopt, int64_t dim = 0, std::optional>> streams = - c10::nullopt, + std::nullopt, bool unsqueeze_scalars = false); ~Scatter() override; diff --git a/torch/csrc/autograd/init.cpp b/torch/csrc/autograd/init.cpp index e6a907ee2f0a..b22199ee1ad6 100644 --- a/torch/csrc/autograd/init.cpp +++ b/torch/csrc/autograd/init.cpp @@ -1084,7 +1084,7 @@ static PyObject* push_on_torch_dispatch_stack( using c10::impl::TorchDispatchModeKey; // When we push a mode onto the mode stack, we need to // check if it's an "infra" mode, by checking its _mode_key attribute. - std::optional mode_key = c10::nullopt; + std::optional mode_key = std::nullopt; py::object maybe_mode_key_obj = PyObject_FastGetAttrString(arg, "_mode_key"); if (maybe_mode_key_obj) { @@ -1108,7 +1108,7 @@ static PyObject* pop_torch_dispatch_stack( PyObject* _unused, PyObject* maybe_mode_key) { HANDLE_TH_ERRORS - std::optional mode_key = c10::nullopt; + std::optional mode_key = std::nullopt; PyObject* r = nullptr; if (maybe_mode_key != Py_None) { mode_key = py::cast(maybe_mode_key); @@ -1174,7 +1174,7 @@ static PyObject* get_dispatch_mode(PyObject* _unused, PyObject* arg) { auto mode_key = py::cast(arg); auto maybe_mode = c10::impl::TorchDispatchModeTLS::get_mode(mode_key); - if (maybe_mode == c10::nullopt) { + if (maybe_mode == std::nullopt) { Py_RETURN_NONE; } // NOLINTNEXTLINE(bugprone-unchecked-optional-access) @@ -1190,7 +1190,7 @@ static PyObject* unset_dispatch_mode(PyObject* _unused, PyObject* arg) { auto mode_key = py::cast(arg); const auto maybe_mode = c10::impl::TorchDispatchModeTLS::unset_mode(mode_key); - if (maybe_mode == c10::nullopt) { + if (maybe_mode == std::nullopt) { Py_RETURN_NONE; } // NOLINTNEXTLINE(bugprone-unchecked-optional-access) diff --git a/torch/csrc/autograd/input_buffer.cpp b/torch/csrc/autograd/input_buffer.cpp index 6c12bbadc5d2..f2b08e364318 100644 --- a/torch/csrc/autograd/input_buffer.cpp +++ b/torch/csrc/autograd/input_buffer.cpp @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include @@ -159,7 +159,7 @@ void InputBuffer::add( // Accumulation happens on the var device's default stream. TORCH_INTERNAL_ASSERT(device_of(var)); - std::optional opt_accumulate_stream = c10::nullopt; + std::optional opt_accumulate_stream = std::nullopt; const auto device_type = device_of(var).value().type(); // NOLINTNEXTLINE(bugprone-unchecked-optional-access) if (device_of(var)->is_cuda() || device_of(var)->is_privateuseone()) { @@ -179,7 +179,7 @@ void InputBuffer::add( record_stream_any_impl(var, *opt_accumulate_stream); } } else { - std::optional opt_sync_stream = c10::nullopt; + std::optional opt_sync_stream = std::nullopt; const auto guard = c10::impl::VirtualGuardImpl{device_type}; if (on_consumer && !on_producer) { // (3a) diff --git a/torch/csrc/autograd/input_buffer.h b/torch/csrc/autograd/input_buffer.h index 7e471ef528bb..e445ef897fc1 100644 --- a/torch/csrc/autograd/input_buffer.h +++ b/torch/csrc/autograd/input_buffer.h @@ -9,8 +9,8 @@ #include #include -#include #include +#include namespace torch::autograd { diff --git a/torch/csrc/autograd/profiler_legacy.cpp b/torch/csrc/autograd/profiler_legacy.cpp index b9387479667e..53a24eaa150d 100644 --- a/torch/csrc/autograd/profiler_legacy.cpp +++ b/torch/csrc/autograd/profiler_legacy.cpp @@ -122,7 +122,7 @@ using torch::profiler::impl::ProfilerStateBase; struct ProfilerLegacyThreadLocalState : public ProfilerStateBase { explicit ProfilerLegacyThreadLocalState( const torch::profiler::impl::ProfilerConfig& config) - : ProfilerStateBase(config), remoteProfiledEvents_{c10::nullopt} {} + : ProfilerStateBase(config), remoteProfiledEvents_{std::nullopt} {} ~ProfilerLegacyThreadLocalState() override = default; static ProfilerLegacyThreadLocalState* getTLS() { diff --git a/torch/csrc/autograd/profiler_legacy.h b/torch/csrc/autograd/profiler_legacy.h index 9bd88b0b3dc5..59198129b2b2 100644 --- a/torch/csrc/autograd/profiler_legacy.h +++ b/torch/csrc/autograd/profiler_legacy.h @@ -336,7 +336,7 @@ TORCH_API void enableProfilerLegacy( using thread_event_lists = std::vector>; TORCH_API thread_event_lists disableProfilerLegacy( std::optional profilerDisableOptions = - c10::nullopt); + std::nullopt); // adds profiledEvents to the current thread local recorded events. Each event // will be marked with node ID given by fromNodeId. @@ -377,9 +377,9 @@ struct TORCH_API TLSLegacyProfilerGuard { explicit TLSLegacyProfilerGuard( const torch::profiler::impl::ProfilerConfig& cfg, std::optional> - resultCallback = c10::nullopt, + resultCallback = std::nullopt, std::optional profilerDisableOptions = - c10::nullopt) + std::nullopt) : cb_(std::move(resultCallback)), profilerDisableOptions_(profilerDisableOptions) { enableProfilerLegacy(cfg); diff --git a/torch/csrc/autograd/profiler_python.cpp b/torch/csrc/autograd/profiler_python.cpp index 5fcc7b86a2fa..e930faa1fdeb 100644 --- a/torch/csrc/autograd/profiler_python.cpp +++ b/torch/csrc/autograd/profiler_python.cpp @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include @@ -29,6 +28,7 @@ #include #include #include +#include namespace py = pybind11; @@ -349,7 +349,7 @@ TensorMetadata toTensorMetadata(PyObject* self) { std::optional ValueCache::recordIfTensor(py::handle p) { return THPVariable_CheckExact(p.ptr()) ? std::optional{toTensorMetadata(p.ptr())} - : c10::nullopt; + : std::nullopt; } std::vector> ValueCache::unpackTensorMap( @@ -379,7 +379,7 @@ void ValueCache::store(const PyCallKey& key, no_ephemeral_t) { template <> ExtraFields::args_t ValueCache::load( const PyCallKey& key) const { - return {std::get(state_).at(key), c10::nullopt}; + return {std::get(state_).at(key), std::nullopt}; } template <> @@ -419,7 +419,7 @@ ExtraFields::args_t ValueCache::load( return { /*frame_state_=*/std::get(state_).at(*cache.location_), /*module_info_=*/std::move(info), - /*optimizer_info_=*/c10::nullopt}; + /*optimizer_info_=*/std::nullopt}; } template <> @@ -465,7 +465,7 @@ ExtraFields::args_t ValueCache::load< return { // NOLINTNEXTLINE(bugprone-unchecked-optional-access) /*frame_state_=*/std::get(state_).at(*cache.location_), - /*module_info_=*/c10::nullopt, + /*module_info_=*/std::nullopt, /*optimizer_info_=*/std::move(info)}; } diff --git a/torch/csrc/autograd/python_function.cpp b/torch/csrc/autograd/python_function.cpp index 0227229d1f7f..a5ba07b2cdb5 100644 --- a/torch/csrc/autograd/python_function.cpp +++ b/torch/csrc/autograd/python_function.cpp @@ -778,7 +778,7 @@ static void _get_tensors_to_save( for (const auto i : c10::irange(num_saved)) { PyObject* obj = PyTuple_GET_ITEM(self->to_save, i); if (obj == Py_None) { - tensors_to_save.emplace_back(c10::nullopt); + tensors_to_save.emplace_back(std::nullopt); continue; } else if (THPVariable_Check(obj)) { const auto& tensor = THPVariable_Unpack(obj); diff --git a/torch/csrc/autograd/python_function.h b/torch/csrc/autograd/python_function.h index c2744f365476..0bf3c8bbab70 100644 --- a/torch/csrc/autograd/python_function.h +++ b/torch/csrc/autograd/python_function.h @@ -10,7 +10,7 @@ #include #include -#include +#include #include #include diff --git a/torch/csrc/autograd/python_variable.cpp b/torch/csrc/autograd/python_variable.cpp index 65f4b0efd3c1..94596c32a705 100644 --- a/torch/csrc/autograd/python_variable.cpp +++ b/torch/csrc/autograd/python_variable.cpp @@ -347,7 +347,7 @@ bool isResurrectable(THPVariable* self) { // Check if this is hermetic. If it is, no resurrection. if (tensor.unsafeGetTensorImpl()->pyobj_slot()->check_pyobj( getPyInterpreter(), /*ignore_hermetic_tls=*/false) != - c10::make_optional((PyObject*)self)) { + std::make_optional((PyObject*)self)) { return false; } return true; @@ -455,7 +455,7 @@ static int THPVariable_clear(THPVariable* self) { if (!self->cdata.unsafeIsBorrowed() && tensor.unsafeGetTensorImpl()->pyobj_slot()->check_pyobj( getPyInterpreter(), /*ignore_hermetic_tls=*/false) == - c10::make_optional((PyObject*)self)) { + std::make_optional((PyObject*)self)) { // TODO: empirically, on OS X this assert appears to be untrue // In test_py_tensors_multi_async_call - ProcessGroupRpcTestWithSpawn // distributed/rpc/test_process_group_agent.py @@ -587,14 +587,14 @@ static PyObject* view_func_impl( auto& view_func = view_info.view_fn(); // Determine new SymInt / tensor state as needed. - std::optional> new_symints = c10::nullopt; + std::optional> new_symints = std::nullopt; if (symint_visitor_fn != Py_None) { new_symints = map_py_func( py::cast(symint_visitor_fn), view_func.get_symints()); } - std::optional> new_tensors = c10::nullopt; + std::optional> new_tensors = std::nullopt; if (tensor_visitor_fn != Py_None) { new_tensors = map_py_func( py::cast(tensor_visitor_fn), diff --git a/torch/csrc/autograd/python_variable_indexing.cpp b/torch/csrc/autograd/python_variable_indexing.cpp index fdcafd6cd709..e9b40b0dc8f7 100644 --- a/torch/csrc/autograd/python_variable_indexing.cpp +++ b/torch/csrc/autograd/python_variable_indexing.cpp @@ -100,7 +100,7 @@ static inline Variable sequenceToVariable( c10::TensorOptions options, PyObject* seq) { return torch::utils::indexing_tensor_from_data( - options, kLong, c10::nullopt, seq); + options, kLong, std::nullopt, seq); } inline Variable valueToTensor( @@ -201,7 +201,7 @@ static inline Variable applySlicing( // as null may need to be changed after we reach a better solution for // nested tensor size std::optional result_sizes = result.is_nested() - ? std::optional(c10::nullopt) + ? std::optional(std::nullopt) : std::optional(result.sym_sizes()); result = at::indexing::handleDimInMultiDimIndexing( /*prev_dim_result=*/result, diff --git a/torch/csrc/autograd/record_function_ops.h b/torch/csrc/autograd/record_function_ops.h index a145523c1bf8..a84d47c5b482 100644 --- a/torch/csrc/autograd/record_function_ops.h +++ b/torch/csrc/autograd/record_function_ops.h @@ -1,7 +1,7 @@ #pragma once #include -#include #include +#include namespace torch::autograd::profiler { @@ -17,7 +17,7 @@ struct PythonRecordFunction : public torch::CustomClassHolder { // callbacks. TORCH_API c10::intrusive_ptr record_function_enter_new( const std::string& name, - const std::optional& args = c10::nullopt); + const std::optional& args = std::nullopt); // Schedules RecordFunction's end callbacks to be run on completion of a future. TORCH_API c10::intrusive_ptr _call_end_callbacks_on_fut_new( diff --git a/torch/csrc/autograd/utils/grad_layout_contract.h b/torch/csrc/autograd/utils/grad_layout_contract.h index 1dad10663dd7..7189e0204725 100644 --- a/torch/csrc/autograd/utils/grad_layout_contract.h +++ b/torch/csrc/autograd/utils/grad_layout_contract.h @@ -67,7 +67,7 @@ inline at::Tensor clone_obey_contract( .new_empty_strided_symint( variable.sym_sizes(), variable.sym_strides(), - variable.options().memory_format(c10::nullopt)) + variable.options().memory_format(std::nullopt)) .copy_(new_grad)); } else { // (2) diff --git a/torch/csrc/autograd/utils/python_arg_parsing.h b/torch/csrc/autograd/utils/python_arg_parsing.h index 326221e44d14..e3fd671fb57c 100644 --- a/torch/csrc/autograd/utils/python_arg_parsing.h +++ b/torch/csrc/autograd/utils/python_arg_parsing.h @@ -31,7 +31,7 @@ parse_to_conversion(PythonArgs& r, bool allow_copy) { if (!allow_copy && !r.isNone(2)) throw std::runtime_error(".to() does not accept copy argument"); return std::make_tuple( - c10::nullopt, + std::nullopt, r.scalartype(0), r.toBool(1), r.toBool(2), diff --git a/torch/csrc/autograd/variable.h b/torch/csrc/autograd/variable.h index d60f37085f38..2ce91146dc8d 100644 --- a/torch/csrc/autograd/variable.h +++ b/torch/csrc/autograd/variable.h @@ -351,8 +351,8 @@ struct TORCH_API ViewFunc { /// Returns a clone of this ViewFunc, optionally with the specified saved /// state. virtual std::unique_ptr clone_and_set( - std::optional> = c10::nullopt, - std::optional> = c10::nullopt) const = 0; + std::optional> = std::nullopt, + std::optional> = std::nullopt) const = 0; protected: /// Sets the values of any SymInts in the saved state. The input vector size @@ -382,8 +382,8 @@ struct ChainedViewFunc : public ViewFunc { } virtual at::Tensor operator()(const at::Tensor&) const override; virtual std::unique_ptr clone_and_set( - std::optional> = c10::nullopt, - std::optional> = c10::nullopt) const override; + std::optional> = std::nullopt, + std::optional> = std::nullopt) const override; private: std::unique_ptr first; @@ -398,8 +398,8 @@ struct ErroringViewFunc : public ViewFunc { TORCH_CHECK(false, error_msg); } virtual std::unique_ptr clone_and_set( - std::optional> = c10::nullopt, - std::optional> = c10::nullopt) const override { + std::optional> = std::nullopt, + std::optional> = std::nullopt) const override { return std::make_unique(error_msg); } diff --git a/torch/csrc/cuda/comm.cpp b/torch/csrc/cuda/comm.cpp index d8f968eae5f5..52331909fe1d 100644 --- a/torch/csrc/cuda/comm.cpp +++ b/torch/csrc/cuda/comm.cpp @@ -11,9 +11,9 @@ #include #include #include -#include #include #include +#include #include #include diff --git a/torch/csrc/cuda/comm.h b/torch/csrc/cuda/comm.h index 92009a1c40ad..860629bcf2e9 100644 --- a/torch/csrc/cuda/comm.h +++ b/torch/csrc/cuda/comm.h @@ -3,8 +3,8 @@ #include #include #include -#include #include +#include #include #include @@ -29,15 +29,15 @@ TORCH_CUDA_CU_API std::vector& scatter_out( std::vector& out_tensors, int64_t dim = 0, const std::optional>>& - streams = c10::nullopt); + streams = std::nullopt); TORCH_CUDA_CU_API std::vector scatter( const at::Tensor& tensor, at::IntArrayRef devices, - const std::optional>& chunk_sizes = c10::nullopt, + const std::optional>& chunk_sizes = std::nullopt, int64_t dim = 0, const std::optional>>& - streams = c10::nullopt); + streams = std::nullopt); TORCH_CUDA_CU_API at::Tensor& gather_out( at::TensorList tensors, diff --git a/torch/csrc/cuda/memory_snapshot.h b/torch/csrc/cuda/memory_snapshot.h index eb22767a78f9..fe5699af4160 100644 --- a/torch/csrc/cuda/memory_snapshot.h +++ b/torch/csrc/cuda/memory_snapshot.h @@ -1,8 +1,8 @@ #pragma once -#include #include #include +#include #include namespace torch::cuda { diff --git a/torch/csrc/cuda/nccl.h b/torch/csrc/cuda/nccl.h index 37d1be15cbd7..6561ccb6e76c 100644 --- a/torch/csrc/cuda/nccl.h +++ b/torch/csrc/cuda/nccl.h @@ -2,9 +2,9 @@ #include #include -#include #include +#include #include // NCCL BFloat16 is enabled only for CUDA 11+ and NCCL versions 2.10+, or for diff --git a/torch/csrc/cuda/python_nccl.cpp b/torch/csrc/cuda/python_nccl.cpp index 5060f9289a9e..f62311efbd93 100644 --- a/torch/csrc/cuda/python_nccl.cpp +++ b/torch/csrc/cuda/python_nccl.cpp @@ -60,7 +60,7 @@ static std::vector> unpack_streams( PyObject* obj, size_t size) { if (obj == Py_None) { - return std::vector>(size, c10::nullopt); + return std::vector>(size, std::nullopt); } auto streams = THPUtils_PySequence_to_CUDAStreamList(obj); if (streams.size() != size) { diff --git a/torch/csrc/distributed/autograd/engine/dist_engine.cpp b/torch/csrc/distributed/autograd/engine/dist_engine.cpp index 062a15da4964..d37e695c7719 100644 --- a/torch/csrc/distributed/autograd/engine/dist_engine.cpp +++ b/torch/csrc/distributed/autograd/engine/dist_engine.cpp @@ -98,7 +98,7 @@ void DistEngine::globalCpuThread( InputBuffer::variables(std::move(task.inputs_))]() mutable { InputBuffer inputs(variables.size()); for (const auto i : c10::irange(variables.size())) { - inputs.add(i, std::move(variables[i]), c10::nullopt, c10::nullopt); + inputs.add(i, std::move(variables[i]), std::nullopt, std::nullopt); } execute_graph_task_until_ready_queue_empty( /*node_task*/ NodeTask(graphTask, graphRoot, std::move(inputs)), diff --git a/torch/csrc/distributed/c10d/NCCLUtils.cpp b/torch/csrc/distributed/c10d/NCCLUtils.cpp index 6507fe6abc2a..98af0d51a3d0 100644 --- a/torch/csrc/distributed/c10d/NCCLUtils.cpp +++ b/torch/csrc/distributed/c10d/NCCLUtils.cpp @@ -18,7 +18,7 @@ namespace c10d { ncclComm_t NCCLComm::getNcclComm() { std::unique_lock lock(mutex_); if (aborted_) { - auto commFailureMsg = commFailureReason_ != c10::nullopt + auto commFailureMsg = commFailureReason_ != std::nullopt ? c10::str(" Original reason for failure was: ", *commFailureReason_) : ""; TORCH_CHECK_WITH( @@ -76,7 +76,7 @@ std::shared_ptr NCCLComm::split( C10D_NCCL_CHECK( ncclCommSplit( source->ncclComm_, color_id, rank, &(comm->ncclComm_), &config), - c10::nullopt); + std::nullopt); ++source->ncclCommSplitCounter_; comm->rank_ = rank; return comm; @@ -186,11 +186,11 @@ std::string ncclGetErrorWithVersion(ncclResult_t error) { // thrown in the NCCL codebase. std::string getNcclErrorDetailStr( ncclResult_t error, - std::optional processGroupFailureReason /* = c10::nullopt */ + std::optional processGroupFailureReason /* = std::nullopt */ ) { // Prioritize failure reason provided by PG NCCL first, as it can abort // communicators when it encounters collective timeouts, etc. - if (processGroupFailureReason != c10::nullopt) { + if (processGroupFailureReason != std::nullopt) { return *processGroupFailureReason; } std::string interpret; diff --git a/torch/csrc/distributed/c10d/NCCLUtils.hpp b/torch/csrc/distributed/c10d/NCCLUtils.hpp index 9ce25b55dc13..06568f6ce7d2 100644 --- a/torch/csrc/distributed/c10d/NCCLUtils.hpp +++ b/torch/csrc/distributed/c10d/NCCLUtils.hpp @@ -11,8 +11,8 @@ #include #include -#include #include +#include #if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \ (NCCL_MINOR >= 14) @@ -183,7 +183,7 @@ bool shouldBroadcastNCCLUniqueID(bool isSendRecvSelf); // thrown in the NCCL codebase. TORCH_API std::string getNcclErrorDetailStr( ncclResult_t error, - std::optional processGroupFailureReason = c10::nullopt); + std::optional processGroupFailureReason = std::nullopt); // Write NCCL debug info to local disk or any storage users define. // There are some constrains we set for the debug info writer: @@ -221,7 +221,7 @@ class NCCLComm { : ncclComm_(ncclComm), aborted_(false), ncclAsyncErr_(ncclSuccess), - commFailureReason_(c10::nullopt), + commFailureReason_(std::nullopt), initialized_(false) {} NCCLComm() : NCCLComm(nullptr) {} @@ -249,7 +249,7 @@ class NCCLComm { auto comm = std::make_shared(); C10D_NCCL_CHECK( ncclCommInitRank(&(comm->ncclComm_), numRanks, commId, rank), - c10::nullopt); + std::nullopt); comm->ncclId_ = commId; comm->rank_ = rank; comm->initialized_ = true; @@ -271,12 +271,12 @@ class NCCLComm { C10D_NCCL_CHECK_NONBLOCKING( ncclCommInitRankConfig( &(comm->ncclComm_), numRanks, commId, rank, &config), - c10::nullopt); + std::nullopt); } else { C10D_NCCL_CHECK( ncclCommInitRankConfig( &(comm->ncclComm_), numRanks, commId, rank, &config), - c10::nullopt); + std::nullopt); // under blocking mode, comm is initialized after NCCL CHECK isInitialized = true; } @@ -301,7 +301,7 @@ class NCCLComm { LOG(INFO) << "Communicator was aborted before trying to dump its state."; return dump; } - C10D_NCCL_CHECK(::ncclCommDump(ncclComm_, dump), c10::nullopt); + C10D_NCCL_CHECK(::ncclCommDump(ncclComm_, dump), std::nullopt); return dump; } #endif @@ -336,7 +336,7 @@ class NCCLComm { } void ncclCommAbort( - std::optional commFailureReason = c10::nullopt) { + std::optional commFailureReason = std::nullopt) { std::unique_lock lock(mutex_); #ifdef ENABLE_NCCL_ERROR_CHECKING if (aborted_) { diff --git a/torch/csrc/distributed/c10d/ProcessGroupCudaP2P.hpp b/torch/csrc/distributed/c10d/ProcessGroupCudaP2P.hpp index cff4ad09b706..23ee93b91d7a 100644 --- a/torch/csrc/distributed/c10d/ProcessGroupCudaP2P.hpp +++ b/torch/csrc/distributed/c10d/ProcessGroupCudaP2P.hpp @@ -128,7 +128,7 @@ class TORCH_API ProcessGroupCudaP2P : public Backend { const BarrierOptions& opts = BarrierOptions()) override; c10::intrusive_ptr intra_node_barrier( - c10::optional> ranks = c10::nullopt); + c10::optional> ranks = std::nullopt); at::Tensor get_p2p_buffer( size_t rank, @@ -136,7 +136,7 @@ class TORCH_API ProcessGroupCudaP2P : public Backend { c10::ScalarType dtype, int64_t storage_offest = 0); - void shutdown(c10::optional reason = c10::nullopt); + void shutdown(c10::optional reason = std::nullopt); private: c10::intrusive_ptr nccl_backend_; diff --git a/torch/csrc/distributed/c10d/ProcessGroupGloo.cpp b/torch/csrc/distributed/c10d/ProcessGroupGloo.cpp index cba0249829e6..a6ed8fd26a16 100644 --- a/torch/csrc/distributed/c10d/ProcessGroupGloo.cpp +++ b/torch/csrc/distributed/c10d/ProcessGroupGloo.cpp @@ -2425,7 +2425,7 @@ class AsyncScatterWork : public ProcessGroupGloo::AsyncWork { seq, "gloo:scatter", !inputs.empty() ? std::optional>(inputs[0]) - : c10::nullopt), + : std::nullopt), context(context), outputs(outputs), inputs(inputs), @@ -2888,7 +2888,7 @@ class AsyncBarrierWork : public ProcessGroupGloo::AsyncWork { OpType::BARRIER, seq, "gloo:barrier", - c10::nullopt), + std::nullopt), context(context), priorWork(std::move(priorWork)), tag(tag) {} diff --git a/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp b/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp index 87c87b8f1ae9..9f1e63d58adf 100644 --- a/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp +++ b/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp @@ -74,7 +74,7 @@ class TORCH_API ProcessGroupGloo : public Backend { uint64_t seq, const char* profilingTitle = nullptr, const std::optional>& inputTensors = - c10::nullopt); + std::nullopt); ~AsyncWork() override = default; diff --git a/torch/csrc/distributed/c10d/ProcessGroupMPI.cpp b/torch/csrc/distributed/c10d/ProcessGroupMPI.cpp index 6d02f89f6005..91e9f938f1dd 100644 --- a/torch/csrc/distributed/c10d/ProcessGroupMPI.cpp +++ b/torch/csrc/distributed/c10d/ProcessGroupMPI.cpp @@ -673,7 +673,7 @@ c10::intrusive_ptr ProcessGroupMPI::scatter( "mpi:scatter", !inputTensors.empty() ? std::optional>(inputTensors[0]) - : c10::nullopt); + : std::nullopt); } else { auto entry = std::make_unique( nullptr, &outputTensors, std::move(runFunc)); @@ -682,7 +682,7 @@ c10::intrusive_ptr ProcessGroupMPI::scatter( "mpi:scatter", !inputTensors.empty() ? std::optional>(inputTensors[0]) - : c10::nullopt); + : std::nullopt); } } @@ -932,7 +932,7 @@ c10::intrusive_ptr ProcessGroupMPI::barrier(const BarrierOptions& opts) { }; auto entry = std::make_unique(nullptr, nullptr, std::move(runFunc)); - return enqueue(std::move(entry), "mpi:barrier", c10::nullopt); + return enqueue(std::move(entry), "mpi:barrier", std::nullopt); } c10::intrusive_ptr ProcessGroupMPI::_allgather_base( diff --git a/torch/csrc/distributed/c10d/ProcessGroupMPI.hpp b/torch/csrc/distributed/c10d/ProcessGroupMPI.hpp index 6e52e680e5c2..5eb06b739557 100644 --- a/torch/csrc/distributed/c10d/ProcessGroupMPI.hpp +++ b/torch/csrc/distributed/c10d/ProcessGroupMPI.hpp @@ -87,7 +87,7 @@ class TORCH_API ProcessGroupMPI : public Backend { std::vector outputTensors, const char* profilingTitle = nullptr, const std::optional>& inputTensors = - c10::nullopt) + std::nullopt) : Work(-1, OpType::UNKNOWN, profilingTitle, inputTensors), outputTensors_(std::move(outputTensors)), future_(c10::make_intrusive( @@ -115,7 +115,7 @@ class TORCH_API ProcessGroupMPI : public Backend { std::vector outputTensors, const char* profilingTitle = nullptr, const std::optional>& inputTensors = - c10::nullopt); + std::nullopt); ~AsyncWork() override; @@ -244,7 +244,7 @@ class TORCH_API ProcessGroupMPI : public Backend { std::unique_ptr entry, const char* profilingTitle = nullptr, const std::optional>& inputTensors = - c10::nullopt); + std::nullopt); bool stop_; diff --git a/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp b/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp index e7699b552451..af940f53bf24 100644 --- a/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp +++ b/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include @@ -32,6 +31,7 @@ #include #include #include +#include namespace c10d { @@ -376,7 +376,7 @@ std::string dump_nccl_trace( bool includeStackTraces, bool onlyActive) { return NCCLTraceBuffer::get()->dump( - c10::nullopt, includeCollectives, includeStackTraces, onlyActive); + std::nullopt, includeCollectives, includeStackTraces, onlyActive); } #endif @@ -393,7 +393,7 @@ std::optional)>>& get_cpp_trace_dumper() { static std::optional< std::function)>> - dumper(c10::nullopt); + dumper(std::nullopt); return dumper; } @@ -658,7 +658,7 @@ void ProcessGroupNCCL::WorkNCCL::synchronizeInternal( if (blockingWait_) { while (!isCompleted()) { bool timedOut = checkTimeout( - timeout == kNoTimeout ? c10::nullopt : c10::make_optional(timeout)); + timeout == kNoTimeout ? std::nullopt : std::make_optional(timeout)); // Explicitly abort ncclComms here before throwing this timed out // exception to users. // If throwing timed out excepiton without aborting nccl communicators @@ -1245,7 +1245,7 @@ void ProcessGroupNCCL::heartbeatMonitor() { : heartbeatTimeoutInSec_ * 1000; auto lastTimePollStore = std::chrono::steady_clock::now(); auto lastTimeHeartBeatCheck = std::chrono::steady_clock::now(); - std::optional dumpPipe = c10::nullopt; + std::optional dumpPipe = std::nullopt; if (uid_ == 0) { // DumpPipe is one per-trainer process, and its convenient to name them // after 'global' ranks in the system, So we assume processgroup (uid)==0 is @@ -1881,7 +1881,7 @@ std::exception_ptr ProcessGroupNCCL::checkForNCCLErrorsInternal( // Prioritize commFailureReason over checkForNcclError() result if // commFailureReason is set. auto commFailureReason = ncclComm->getNcclCommFailureReason(); - if (commFailureReason != c10::nullopt) { + if (commFailureReason != std::nullopt) { return std::make_exception_ptr(C10_BUILD_ERROR( DistBackendError, c10::str( @@ -2050,7 +2050,7 @@ std::shared_ptr ProcessGroupNCCL::getNCCLComm( bool singleP2POp = isP2POp(opType, batchP2P); // For point-to-point communication, lower rank of the two will get unique id. if (rank_ == 0 || (singleP2POp && p2pRank == 0)) { - C10D_NCCL_CHECK(ncclGetUniqueId(&ncclID), c10::nullopt); + C10D_NCCL_CHECK(ncclGetUniqueId(&ncclID), std::nullopt); } if (shouldBroadcastNCCLUniqueID(isSendRecvSelf)) { @@ -2086,7 +2086,7 @@ std::shared_ptr ProcessGroupNCCL::getNCCLComm( for (const auto i : c10::irange(ncclActiveGroupCounter_)) { (void)i; // comms have not been initiated yet, so can only check in blocking-way - C10D_NCCL_CHECK(ncclGroupEnd(), c10::nullopt); + C10D_NCCL_CHECK(ncclGroupEnd(), std::nullopt); } // GPU world size and GPU rank @@ -2182,7 +2182,7 @@ std::shared_ptr ProcessGroupNCCL::getNCCLComm( // See [Group Start/End Note] for (const auto i : c10::irange(ncclActiveGroupCounter_)) { (void)i; - C10D_NCCL_CHECK(ncclGroupStart(), c10::nullopt); + C10D_NCCL_CHECK(ncclGroupStart(), std::nullopt); } ncclStreams_.emplace(deviceKey, std::move(streamVal)); @@ -2334,7 +2334,7 @@ c10::intrusive_ptr ProcessGroupNCCL::initWork( seqCollective_, profilingTitle, profilingTitle != nullptr ? std::optional>(inputs) - : c10::nullopt, + : std::nullopt, desyncDebug_, enableTiming_.load(), dist_debug_level_); @@ -4190,23 +4190,23 @@ c10::intrusive_ptr ProcessGroupNCCL::recv( } void ProcessGroupNCCL::groupStart() { - C10D_NCCL_CHECK(ncclGroupStart(), c10::nullopt); + C10D_NCCL_CHECK(ncclGroupStart(), std::nullopt); ++ncclActiveGroupCounter_; } void ProcessGroupNCCL::groupEnd() { - C10D_NCCL_CHECK(ncclGroupEnd(), c10::nullopt); + C10D_NCCL_CHECK(ncclGroupEnd(), std::nullopt); --ncclActiveGroupCounter_; } void ProcessGroupNCCL::groupEndNonblocking(std::shared_ptr comm) { #ifndef NCCL_HAS_COMM_NONBLOCKING - C10D_NCCL_CHECK(ncclGroupEnd(), c10::nullopt); + C10D_NCCL_CHECK(ncclGroupEnd(), std::nullopt); #else if (!nccl_use_nonblocking()) { - C10D_NCCL_CHECK(ncclGroupEnd(), c10::nullopt); + C10D_NCCL_CHECK(ncclGroupEnd(), std::nullopt); } else { - C10D_NCCL_CHECK_TIMEOUT_GROUPEND(ncclGroupEnd(), comm, c10::nullopt); + C10D_NCCL_CHECK_TIMEOUT_GROUPEND(ncclGroupEnd(), comm, std::nullopt); } #endif --ncclActiveGroupCounter_; diff --git a/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp b/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp index faaabe411bfc..763ef9829618 100644 --- a/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp +++ b/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp @@ -254,7 +254,7 @@ class TORCH_API ProcessGroupNCCL : public Backend { OpType opType, uint64_t seq, const char* profilingTitle = nullptr, - const std::optional>& inputs = c10::nullopt, + const std::optional>& inputs = std::nullopt, bool desyncDebug = false, bool enableTiming = false, DebugLevel distDebugLevel = DebugLevel::Off); @@ -311,7 +311,7 @@ class TORCH_API ProcessGroupNCCL : public Backend { // and False otherwise. // In case of timeout, set exception on the WorkNCCL object. bool checkTimeout( - std::optional timeout = c10::nullopt); + std::optional timeout = std::nullopt); std::vector result() override; @@ -662,9 +662,9 @@ class TORCH_API ProcessGroupNCCL : public Backend { // Provides an API to abort the ProcessGroup (similar to ncclCommAbort) // instead of relying on ProcessGroupNCCL destructor. // return true if abort is successful, otherwise false - bool abort(std::optional abortReason = c10::nullopt); + bool abort(std::optional abortReason = std::nullopt); - void shutdown(std::optional reason = c10::nullopt); + void shutdown(std::optional reason = std::nullopt); void eagerConnectSingleDevice(at::Device device) override; diff --git a/torch/csrc/distributed/c10d/TCPStore.cpp b/torch/csrc/distributed/c10d/TCPStore.cpp index fe24c31f9068..2de969d135e8 100644 --- a/torch/csrc/distributed/c10d/TCPStore.cpp +++ b/torch/csrc/distributed/c10d/TCPStore.cpp @@ -293,7 +293,7 @@ TCPStore::TCPStore( masterPort, isServer, numWorkers ? std::optional(*numWorkers) - : c10::nullopt, + : std::nullopt, waitWorkers, timeout}} {} @@ -376,7 +376,7 @@ TCPStore::~TCPStore() = default; void TCPStore::waitForWorkers() { detail::timing_guard tguard(clientCounters_["waitForWorkers"]); - if (numWorkers_ == c10::nullopt) { + if (numWorkers_ == std::nullopt) { return; } diff --git a/torch/csrc/distributed/c10d/TCPStore.hpp b/torch/csrc/distributed/c10d/TCPStore.hpp index 25783f2d2ace..9fd29b1c844c 100644 --- a/torch/csrc/distributed/c10d/TCPStore.hpp +++ b/torch/csrc/distributed/c10d/TCPStore.hpp @@ -49,7 +49,7 @@ struct TCPStoreOptions { std::uint16_t port = kDefaultPort; bool isServer = false; - std::optional numWorkers = c10::nullopt; + std::optional numWorkers = std::nullopt; bool waitWorkers = true; std::chrono::milliseconds timeout = Store::kDefaultTimeout; @@ -60,7 +60,7 @@ struct TCPStoreOptions { // If specified, and if isServer is true, the underlying TCPServer will take // over the bound socket associated to this fd. This option is useful to avoid // port assignment races in certain scenarios. - std::optional masterListenFd = c10::nullopt; + std::optional masterListenFd = std::nullopt; // A boolean value indicating whether to use the experimental libUV backend. bool useLibUV = true; @@ -73,7 +73,7 @@ class TORCH_API TCPStore : public Store { [[deprecated("Use TCPStore(host, opts) instead.")]] explicit TCPStore( const std::string& masterAddr, std::uint16_t masterPort, - std::optional numWorkers = c10::nullopt, + std::optional numWorkers = std::nullopt, bool isServer = false, const std::chrono::milliseconds& timeout = kDefaultTimeout, bool waitWorkers = true); diff --git a/torch/csrc/distributed/c10d/TraceUtils.h b/torch/csrc/distributed/c10d/TraceUtils.h index de623d77fe9e..9ff71f9d41b8 100644 --- a/torch/csrc/distributed/c10d/TraceUtils.h +++ b/torch/csrc/distributed/c10d/TraceUtils.h @@ -516,7 +516,7 @@ struct NCCLTraceBuffer { std::chrono::milliseconds timeout_ms, bool isP2P) { if (!enabled_) { - return c10::nullopt; + return std::nullopt; } auto traceback = torch::CapturedTraceback::gather(true, true, capture_cpp_stack_); @@ -621,7 +621,7 @@ struct NCCLTraceBuffer { bool can_compute_duration = false; Event* startEvent = nullptr; Event* endEvent = nullptr; - std::optional duration = c10::nullopt; + std::optional duration = std::nullopt; std::unique_lock guard(mutex_); diff --git a/torch/csrc/distributed/c10d/Types.hpp b/torch/csrc/distributed/c10d/Types.hpp index 669957a72673..7cdb9f62ebbb 100644 --- a/torch/csrc/distributed/c10d/Types.hpp +++ b/torch/csrc/distributed/c10d/Types.hpp @@ -121,7 +121,7 @@ struct BroadcastOptions { struct AllreduceOptions { ReduceOp reduceOp = ReduceOp::SUM; std::chrono::milliseconds timeout = kUnsetTimeout; - std::optional sparseIndices = c10::nullopt; + std::optional sparseIndices = std::nullopt; }; struct AllreduceCoalescedOptions : AllreduceOptions {}; diff --git a/torch/csrc/distributed/c10d/Utils.hpp b/torch/csrc/distributed/c10d/Utils.hpp index a03337e97514..b77a914da4e6 100644 --- a/torch/csrc/distributed/c10d/Utils.hpp +++ b/torch/csrc/distributed/c10d/Utils.hpp @@ -440,7 +440,7 @@ inline at::Tensor newLikeFlat( sizes.insert(sizes.end(), t.sizes().begin(), t.sizes().end()); strides.insert(strides.end(), t.strides().begin(), t.strides().end()); return at::empty_strided( - sizes, strides, t.options().memory_format(c10::nullopt)); + sizes, strides, t.options().memory_format(std::nullopt)); } inline at::Tensor newLikeFlat(std::vector& tensors) { diff --git a/torch/csrc/distributed/c10d/Work.hpp b/torch/csrc/distributed/c10d/Work.hpp index d29b83832117..c10e5007b9f5 100644 --- a/torch/csrc/distributed/c10d/Work.hpp +++ b/torch/csrc/distributed/c10d/Work.hpp @@ -51,7 +51,7 @@ class TORCH_API Work : public torch::CustomClassHolder { OpType opType = OpType::UNKNOWN, const char* profilingTitle = nullptr, const std::optional>& inputTensors = - c10::nullopt); + std::nullopt); ~Work() override; diff --git a/torch/csrc/distributed/c10d/init.cpp b/torch/csrc/distributed/c10d/init.cpp index 6f1b28886b98..5145c969a95b 100644 --- a/torch/csrc/distributed/c10d/init.cpp +++ b/torch/csrc/distributed/c10d/init.cpp @@ -1415,7 +1415,7 @@ Example:: bool multiTenant, std::optional masterListenFd, bool useLibUV) { - std::optional numWorkers = c10::nullopt; + std::optional numWorkers = std::nullopt; if (worldSize.has_value() && worldSize.value() > -1) { numWorkers = static_cast(worldSize.value()); } @@ -2648,7 +2648,7 @@ options :class:`~torch.distributed.ProcessGroupNCCL.Options`). py::arg("store"), py::arg("rank"), py::arg("world_size"), - py::arg("buffer_size") = c10::nullopt) + py::arg("buffer_size") = std::nullopt) .def("barrier", &IntraNodeComm::barrier, py::arg("ranks") = py::none()); #ifdef NCCL_HAS_COMM_CTA_CGA diff --git a/torch/csrc/distributed/c10d/intra_node_comm.hpp b/torch/csrc/distributed/c10d/intra_node_comm.hpp index 5d7e2d426d30..b4d70f580da5 100644 --- a/torch/csrc/distributed/c10d/intra_node_comm.hpp +++ b/torch/csrc/distributed/c10d/intra_node_comm.hpp @@ -33,7 +33,7 @@ class TORCH_API IntraNodeComm : public c10::intrusive_ptr_target { c10::intrusive_ptr store, size_t rank, size_t worldSize, - std::optional bufferSize = c10::nullopt); + std::optional bufferSize = std::nullopt); ~IntraNodeComm() override; @@ -65,7 +65,7 @@ class TORCH_API IntraNodeComm : public c10::intrusive_ptr_target { /** * Perform a barrier among the specified ranks. */ - void barrier(std::optional> ranks = c10::nullopt); + void barrier(std::optional> ranks = std::nullopt); at::Tensor getBuffer( size_t rank, diff --git a/torch/csrc/distributed/c10d/logger.cpp b/torch/csrc/distributed/c10d/logger.cpp index 711039bf4859..48f8786842f0 100644 --- a/torch/csrc/distributed/c10d/logger.cpp +++ b/torch/csrc/distributed/c10d/logger.cpp @@ -234,7 +234,7 @@ void Logger::set_event_time( Timer& timer, Timer::Event event) { auto timestamp = timer.getTimestamp(event); - if (timestamp != c10::nullopt) { + if (timestamp != std::nullopt) { // TODO: should we set this as human-readable time instead of unixtime? event_time = *timestamp; } diff --git a/torch/csrc/distributed/c10d/reducer.cpp b/torch/csrc/distributed/c10d/reducer.cpp index 6a2812ab24b9..6c5f7a79ff9f 100644 --- a/torch/csrc/distributed/c10d/reducer.cpp +++ b/torch/csrc/distributed/c10d/reducer.cpp @@ -61,7 +61,7 @@ class CpuTimer : public Timer { // calculate the valid avg_time. // In this case, skip calculating the avg_time and return. if (end_time < start_time) { - return c10::nullopt; + return std::nullopt; } return end_time - start_time; } @@ -499,7 +499,7 @@ std::vector Reducer::get_grad_buckets( bucket.lengths, bucket.sizes_vec, variables_for_bucket, - c10::nullopt); + std::nullopt); } return gradBuckets; } @@ -1655,9 +1655,9 @@ void Reducer::finalize_backward() { } } - if (installed_futures_ != c10::nullopt) { + if (installed_futures_ != std::nullopt) { c10::collectAll(*installed_futures_)->wait(); - installed_futures_ = c10::nullopt; + installed_futures_ = std::nullopt; } // See Note [Skip allreducing local_used_maps_dev] diff --git a/torch/csrc/distributed/c10d/reducer.hpp b/torch/csrc/distributed/c10d/reducer.hpp index 1f72b0eb37b9..aa3c40ae95bb 100644 --- a/torch/csrc/distributed/c10d/reducer.hpp +++ b/torch/csrc/distributed/c10d/reducer.hpp @@ -262,9 +262,9 @@ class TORCH_API Reducer { // List of futures installed by Reducer::install_futures that should be // awaited at the end of backwards pass. std::optional>> - installed_futures_{c10::nullopt}; + installed_futures_{std::nullopt}; // Mixed precision parameter dtype for bucket type checking. - std::optional mixed_precision_param_dtype_{c10::nullopt}; + std::optional mixed_precision_param_dtype_{std::nullopt}; // Work handle for allreduce on local_used_map_ c10::intrusive_ptr local_used_work_; @@ -389,7 +389,7 @@ class TORCH_API Reducer { bool expect_sparse_gradient = false; // Sparse indices tensor - std::optional sparse_tensor_indices = c10::nullopt; + std::optional sparse_tensor_indices = std::nullopt; // TODO(@pietern) // Memory copies from gradient tensors into the bucket are potentially diff --git a/torch/csrc/distributed/c10d/reducer_cuda.cpp b/torch/csrc/distributed/c10d/reducer_cuda.cpp index 84bff02072b6..a158e44fc047 100644 --- a/torch/csrc/distributed/c10d/reducer_cuda.cpp +++ b/torch/csrc/distributed/c10d/reducer_cuda.cpp @@ -59,7 +59,7 @@ class CudaTimer : public Timer { // If it is never recorded/created, skip synchronize and calculation. // Otherwise it will throw cuda errors. if (!start_event.isCreated() || !end_event.isCreated()) { - return c10::nullopt; + return std::nullopt; } // set_runtime_stats_and_log is called at the beginning of forward call, // when it is cheap to synchronize the cuda events of previous iteration, @@ -74,7 +74,7 @@ class CudaTimer : public Timer { // calculate the valid avg_time. // In this case, skip calculating the avg_time and return. if (milliseconds < 0) { - return c10::nullopt; + return std::nullopt; } return int64_t(milliseconds * kMilliSecondToNanosSecond); } diff --git a/torch/csrc/distributed/c10d/reducer_timer.hpp b/torch/csrc/distributed/c10d/reducer_timer.hpp index f9b9f11c8c96..dbea3958db43 100644 --- a/torch/csrc/distributed/c10d/reducer_timer.hpp +++ b/torch/csrc/distributed/c10d/reducer_timer.hpp @@ -47,7 +47,7 @@ class TORCH_API Timer { std::optional getTimestamp(Event event) { auto time = getTimeRef(event); if (time == kUnsetTime) { - return c10::nullopt; + return std::nullopt; } else { return time; } diff --git a/torch/csrc/distributed/c10d/sequence_num.cpp b/torch/csrc/distributed/c10d/sequence_num.cpp index fd76247199f6..3807d629d830 100644 --- a/torch/csrc/distributed/c10d/sequence_num.cpp +++ b/torch/csrc/distributed/c10d/sequence_num.cpp @@ -10,7 +10,7 @@ SequenceNum::SequenceNum(const uint64_t num) : num_(num) {} SequenceNum::SequenceNum(const SequenceNum& other) { if (!other.isSet()) { - num_ = c10::nullopt; + num_ = std::nullopt; } else { num_ = other.get(); } @@ -23,7 +23,7 @@ uint64_t SequenceNum::get() const { void SequenceNum::increment() { std::lock_guard lock(lock_); - TORCH_CHECK(num_ != c10::nullopt); + TORCH_CHECK(num_ != std::nullopt); num_ = ++(*num_); } @@ -32,7 +32,7 @@ void SequenceNum::increment() { uint64_t SequenceNum::getAndIncrement() { uint64_t curVal = 0; std::lock_guard lock(lock_); - TORCH_CHECK(num_ != c10::nullopt); + TORCH_CHECK(num_ != std::nullopt); curVal = *num_; num_ = ++(*num_); return curVal; @@ -45,13 +45,13 @@ void SequenceNum::set(const uint64_t num) { bool SequenceNum::isSet() const { std::lock_guard lock(lock_); - return num_ != c10::nullopt; + return num_ != std::nullopt; } SequenceNum& SequenceNum::operator=(const SequenceNum& other) { std::lock_guard lock(lock_); if (!other.isSet()) { - num_ = c10::nullopt; + num_ = std::nullopt; } else { num_ = other.get(); } diff --git a/torch/csrc/distributed/c10d/sequence_num.hpp b/torch/csrc/distributed/c10d/sequence_num.hpp index ce31f4b55272..38bd4cb5ed9d 100644 --- a/torch/csrc/distributed/c10d/sequence_num.hpp +++ b/torch/csrc/distributed/c10d/sequence_num.hpp @@ -1,9 +1,9 @@ #pragma once #include -#include #include #include +#include #include namespace c10d { diff --git a/torch/csrc/distributed/rpc/profiler/remote_profiler_manager.cpp b/torch/csrc/distributed/rpc/profiler/remote_profiler_manager.cpp index 3a37e7b02a5f..eb45679873f0 100644 --- a/torch/csrc/distributed/rpc/profiler/remote_profiler_manager.cpp +++ b/torch/csrc/distributed/rpc/profiler/remote_profiler_manager.cpp @@ -10,7 +10,7 @@ namespace rpc { const std::string REMOTE_PROFILING_KEY_PREFIX = "#remote_op: "; constexpr int kAutoIncrementBits = 48; /*static */ thread_local std::optional - RemoteProfilerManager::currentThreadLocalKey_ = c10::nullopt; + RemoteProfilerManager::currentThreadLocalKey_ = std::nullopt; /*static */ RemoteProfilerManager& RemoteProfilerManager::getInstance() { static RemoteProfilerManager* handler = new RemoteProfilerManager(); return *handler; @@ -32,7 +32,7 @@ bool RemoteProfilerManager::isCurrentKeySet() const { } void RemoteProfilerManager::unsetCurrentKey() { - currentThreadLocalKey_ = c10::nullopt; + currentThreadLocalKey_ = std::nullopt; } void RemoteProfilerManager::eraseKey(const ProfilingId& globallyUniqueId) { diff --git a/torch/csrc/distributed/rpc/profiler/remote_profiler_manager.h b/torch/csrc/distributed/rpc/profiler/remote_profiler_manager.h index c6f8b353806b..2889120b67ca 100644 --- a/torch/csrc/distributed/rpc/profiler/remote_profiler_manager.h +++ b/torch/csrc/distributed/rpc/profiler/remote_profiler_manager.h @@ -1,8 +1,8 @@ #pragma once -#include #include #include #include +#include #include namespace torch { diff --git a/torch/csrc/distributed/rpc/py_rref.cpp b/torch/csrc/distributed/rpc/py_rref.cpp index ed7847a1f5fa..887f25b6c16d 100644 --- a/torch/csrc/distributed/rpc/py_rref.cpp +++ b/torch/csrc/distributed/rpc/py_rref.cpp @@ -119,7 +119,7 @@ TypePtr tryInferTypeWithTypeHint( /////////////////////////// PyRRef ////////////////////////////////// PyRRef::PyRRef(c10::intrusive_ptr rref) - : rref_(std::move(rref)), profilingFuture_(c10::nullopt) { + : rref_(std::move(rref)), profilingFuture_(std::nullopt) { TORCH_CHECK(rref_, "PyRRef must not wrap nullptr"); C10_LOG_API_USAGE_ONCE("torch.distributed.rref"); } diff --git a/torch/csrc/distributed/rpc/python_functions.cpp b/torch/csrc/distributed/rpc/python_functions.cpp index 57acbc037025..51ee554abda7 100644 --- a/torch/csrc/distributed/rpc/python_functions.cpp +++ b/torch/csrc/distributed/rpc/python_functions.cpp @@ -261,7 +261,7 @@ c10::intrusive_ptr pyRpcTorchscript( functionSchema, argsTuple.cast(), kwargsDict.cast(), - c10::nullopt); + std::nullopt); } DCHECK(!PyGILState_Check()); c10::intrusive_ptr fut = rpcTorchscript( @@ -408,7 +408,7 @@ PyRRef pyRemoteTorchscript( // Acquire GIL for py::args and py::kwargs processing. py::gil_scoped_acquire ag; stack = torch::jit::createStackForSchema( - functionSchema, args, kwargs, c10::nullopt); + functionSchema, args, kwargs, std::nullopt); } DCHECK(!PyGILState_Check()); auto rrefPtr = remoteTorchscript( diff --git a/torch/csrc/distributed/rpc/request_callback_no_python.cpp b/torch/csrc/distributed/rpc/request_callback_no_python.cpp index fb73cf2abf48..3b6b04047c4e 100644 --- a/torch/csrc/distributed/rpc/request_callback_no_python.cpp +++ b/torch/csrc/distributed/rpc/request_callback_no_python.cpp @@ -440,7 +440,7 @@ c10::intrusive_ptr RequestCallbackNoPython:: true /* cleanup TLS state */, false /* consolidate events */); { TLSLegacyProfilerGuard g( - profilingConfig, c10::nullopt, requestThreadOptions); + profilingConfig, std::nullopt, requestThreadOptions); TORCH_INTERNAL_ASSERT( profilerEnabled(), "Expected profiler to be enabled!"); // Kick off processing for nested work and get Future result in diff --git a/torch/csrc/distributed/rpc/rref_impl.h b/torch/csrc/distributed/rpc/rref_impl.h index d6da3f2ea455..507d6bc84658 100644 --- a/torch/csrc/distributed/rpc/rref_impl.h +++ b/torch/csrc/distributed/rpc/rref_impl.h @@ -3,10 +3,10 @@ #include #include #include -#include #include #include #include +#include #include diff --git a/torch/csrc/distributed/rpc/script_call.h b/torch/csrc/distributed/rpc/script_call.h index dacded5cc1e6..5db4adf95f85 100644 --- a/torch/csrc/distributed/rpc/script_call.h +++ b/torch/csrc/distributed/rpc/script_call.h @@ -1,10 +1,10 @@ #pragma once -#include #include #include #include #include +#include #include namespace torch { diff --git a/torch/csrc/distributed/rpc/tensorpipe_cuda.cpp b/torch/csrc/distributed/rpc/tensorpipe_cuda.cpp index 50cc97785f61..8259efeee1f9 100644 --- a/torch/csrc/distributed/rpc/tensorpipe_cuda.cpp +++ b/torch/csrc/distributed/rpc/tensorpipe_cuda.cpp @@ -94,7 +94,7 @@ class TensorpipeCudaConverter : public TensorpipeDeviceTypeConverter { message.tensors.push_back(std::move(tensor)); - return c10::nullopt; + return std::nullopt; } at::DataPtr allocateTensorForReceiving( diff --git a/torch/csrc/distributed/rpc/tensorpipe_utils.cpp b/torch/csrc/distributed/rpc/tensorpipe_utils.cpp index 929ae30f8a6d..9d38b5538d55 100644 --- a/torch/csrc/distributed/rpc/tensorpipe_utils.cpp +++ b/torch/csrc/distributed/rpc/tensorpipe_utils.cpp @@ -59,7 +59,7 @@ class TensorpipeCpuConverter : public TensorpipeDeviceTypeConverter { message.tensors.push_back(std::move(tensor)); - return c10::make_optional(std::move(storageData)); + return std::make_optional(std::move(storageData)); } else { tensorpipe::CpuBuffer buffer; buffer.ptr = static_cast(storage.mutable_data()); @@ -70,7 +70,7 @@ class TensorpipeCpuConverter : public TensorpipeDeviceTypeConverter { message.tensors.push_back(std::move(tensor)); - return c10::nullopt; + return std::nullopt; } } diff --git a/torch/csrc/dynamo/python_compiled_autograd.cpp b/torch/csrc/dynamo/python_compiled_autograd.cpp index 2e5cb3bfab02..7913caad5449 100644 --- a/torch/csrc/dynamo/python_compiled_autograd.cpp +++ b/torch/csrc/dynamo/python_compiled_autograd.cpp @@ -591,7 +591,7 @@ CacheNode* _compiled_autograd_impl( if (next.is_valid() && output.defined()) { input_buffers.lookup(next.function.get()) .add( - next.input_nr, std::move(output), c10::nullopt, c10::nullopt); + next.input_nr, std::move(output), std::nullopt, std::nullopt); } } } diff --git a/torch/csrc/functorch/init.cpp b/torch/csrc/functorch/init.cpp index 53da5a634746..b54a7285f635 100644 --- a/torch/csrc/functorch/init.cpp +++ b/torch/csrc/functorch/init.cpp @@ -242,7 +242,7 @@ int64_t _grad_increment_nesting() { // See NOTE [grad and vjp interaction with no_grad] bool prev_grad_mode = c10::GradMode::is_enabled(); return initAndPushDynamicLayer( - TransformType::Grad, c10::nullopt, c10::nullopt, prev_grad_mode); + TransformType::Grad, std::nullopt, std::nullopt, prev_grad_mode); } int64_t _grad_decrement_nesting() { @@ -257,9 +257,9 @@ int64_t _jvp_increment_nesting() { c10::AutogradState::get_tls_state().get_fw_grad_mode(); return initAndPushDynamicLayer( TransformType::Jvp, - c10::nullopt, - c10::nullopt, - c10::nullopt, + std::nullopt, + std::nullopt, + std::nullopt, prev_fwd_grad_mode); } @@ -287,10 +287,10 @@ int64_t _vmap_decrement_nesting() { int64_t _func_increment_nesting(bool reapply_views) { return initAndPushDynamicLayer( TransformType::Functionalize, - c10::nullopt, - c10::nullopt, - c10::nullopt, - c10::nullopt, + std::nullopt, + std::nullopt, + std::nullopt, + std::nullopt, /*functionalize_add_back_views=*/reapply_views); } @@ -528,7 +528,7 @@ void initFuncTorchBindings(PyObject* module) { "get_interpreter_stack", []() -> std::optional> { const auto& stack = getDynamicLayerStack(); if (stack.empty()) { - return c10::nullopt; + return std::nullopt; } std::vector result; result.reserve(stack.size()); @@ -540,7 +540,7 @@ void initFuncTorchBindings(PyObject* module) { m.def("peek_interpreter_stack", []() -> std::optional { const auto& stack = getDynamicLayerStack(); if (stack.empty()) { - return c10::nullopt; + return std::nullopt; } auto result = stack.back().interpreter(); return result; diff --git a/torch/csrc/inductor/aoti_torch/utils.h b/torch/csrc/inductor/aoti_torch/utils.h index 6e7bd355c57c..eca21f6bf348 100644 --- a/torch/csrc/inductor/aoti_torch/utils.h +++ b/torch/csrc/inductor/aoti_torch/utils.h @@ -7,9 +7,9 @@ #include #include #include -#include #include #include +#include #define AOTI_TORCH_CONVERT_EXCEPTION_TO_ERROR_CODE(...) \ try { \ @@ -66,41 +66,41 @@ inline void assert_inf_and_nan( // utility functions to convert a pointer to an optional value template inline std::optional pointer_to_optional(T* ptr) { - return ptr ? c10::make_optional(*ptr) : c10::nullopt; + return ptr ? std::make_optional(*ptr) : std::nullopt; } template >> inline std::optional pointer_to_optional(U* ptr) { - return ptr ? c10::make_optional(T(*ptr)) : c10::nullopt; + return ptr ? std::make_optional(T(*ptr)) : std::nullopt; } template <> inline std::optional pointer_to_optional(AtenTensorHandle* ptr) { - return ptr ? c10::make_optional(*tensor_handle_to_tensor_pointer(*ptr)) - : c10::nullopt; + return ptr ? std::make_optional(*tensor_handle_to_tensor_pointer(*ptr)) + : std::nullopt; } template <> inline std::optional pointer_to_optional( const AtenTensorHandle* ptr) { - return ptr ? c10::make_optional(*tensor_handle_to_tensor_pointer(*ptr)) - : c10::nullopt; + return ptr ? std::make_optional(*tensor_handle_to_tensor_pointer(*ptr)) + : std::nullopt; } template <> inline std::optional pointer_to_optional( AtenGeneratorHandle* ptr) { - return ptr ? c10::make_optional(*generator_handle_to_generator_pointer(*ptr)) - : c10::nullopt; + return ptr ? std::make_optional(*generator_handle_to_generator_pointer(*ptr)) + : std::nullopt; } inline std::optional pointer_to_optional_device( int32_t* device_type, int32_t device_index) { - return device_type ? c10::make_optional(c10::Device( + return device_type ? std::make_optional(c10::Device( static_cast(*device_type), static_cast(device_index))) - : c10::nullopt; + : std::nullopt; } // utility functions to convert a pointer to a list @@ -180,8 +180,8 @@ inline std::optional> pointer_to_optional_list( U** ptr, int64_t len) { return ptr - ? c10::make_optional>(pointer_to_list(*ptr, len)) - : c10::nullopt; + ? std::make_optional>(pointer_to_list(*ptr, len)) + : std::nullopt; } } // namespace torch::aot_inductor diff --git a/torch/csrc/jit/api/compilation_unit.h b/torch/csrc/jit/api/compilation_unit.h index 8e28ef4717b9..d1c2c829d660 100644 --- a/torch/csrc/jit/api/compilation_unit.h +++ b/torch/csrc/jit/api/compilation_unit.h @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include @@ -97,7 +97,7 @@ struct TORCH_API CompilationUnit { const Self* self, // see [name mangling] bool shouldMangle = false, - std::optional operator_set_version = c10::nullopt); + std::optional operator_set_version = std::nullopt); void define_hooks( const std::optional& prefix, @@ -293,7 +293,7 @@ struct TORCH_API CompilationUnit { const std::unordered_map& function_table, bool shouldMangle = false, FunctionType type = FunctionType::Method, - std::optional version = c10::nullopt) const; + std::optional version = std::nullopt) const; // Define a property on \p self. struct PropertyPair; diff --git a/torch/csrc/jit/api/function_impl.h b/torch/csrc/jit/api/function_impl.h index 6ed8cb36199e..01e7a3c98e30 100644 --- a/torch/csrc/jit/api/function_impl.h +++ b/torch/csrc/jit/api/function_impl.h @@ -13,7 +13,7 @@ struct TORCH_API GraphFunction : public Function { std::shared_ptr graph, std::function function_creator, std::optional executor_execution_mode = - c10::nullopt) + std::nullopt) : name_(std::move(name)), graph_(std::move(graph)), executor_execution_mode_(executor_execution_mode), diff --git a/torch/csrc/jit/api/module.cpp b/torch/csrc/jit/api/module.cpp index 45b99eb8e47a..ae878376bab3 100644 --- a/torch/csrc/jit/api/module.cpp +++ b/torch/csrc/jit/api/module.cpp @@ -158,11 +158,11 @@ void Module::to(at::Device device, at::ScalarType dtype, bool non_blocking) { } void Module::to(at::ScalarType dtype, bool non_blocking) { - to_impl(/*device=*/c10::nullopt, dtype, non_blocking); + to_impl(/*device=*/std::nullopt, dtype, non_blocking); } void Module::to(at::Device device, bool non_blocking) { - to_impl(device, /*dtype=*/c10::nullopt, non_blocking); + to_impl(device, /*dtype=*/std::nullopt, non_blocking); } static void module_state_to( diff --git a/torch/csrc/jit/api/module.h b/torch/csrc/jit/api/module.h index 92b9c96c3a6e..9b2648737b0c 100644 --- a/torch/csrc/jit/api/module.h +++ b/torch/csrc/jit/api/module.h @@ -15,8 +15,8 @@ #include #include #include -#include #include +#include #include #include @@ -238,7 +238,7 @@ struct TORCH_API Module : public Object { Module copy() const; - Module deepcopy(std::optional device = c10::nullopt) const; + Module deepcopy(std::optional device = std::nullopt) const; // Clones both the underlying `ClassType` and the module instance(data), this // function creates a new `ClassType` and returns a new instance that has the @@ -334,7 +334,7 @@ struct TORCH_API Module : public Object { TORCH_API Module freeze( const Module& module, const std::optional>& preserved_attrs = - c10::nullopt, + std::nullopt, bool optimize_numerics = true); // C++ equivalent api of `torch.jit.optimize_for_inference`. See documentation @@ -552,7 +552,7 @@ struct slot_list_impl { : module_(std::move(module)), recurse_(recurse), return_module_(return_module), - size_(c10::nullopt) { + size_(std::nullopt) { if (!recurse && !return_module && Policy::all_slots) { size_ = module_.num_slots(); } diff --git a/torch/csrc/jit/api/object.cpp b/torch/csrc/jit/api/object.cpp index b707e7677276..f95d576d6c8c 100644 --- a/torch/csrc/jit/api/object.cpp +++ b/torch/csrc/jit/api/object.cpp @@ -20,7 +20,7 @@ std::optional Object::find_method(const std::string& basename) const { return Method(_ivalue(), fn); } } - return c10::nullopt; + return std::nullopt; } void Object::define(const std::string& src, const ResolverPtr& resolver) { diff --git a/torch/csrc/jit/api/object.h b/torch/csrc/jit/api/object.h index 164f6e2ac073..2c0f7e3b164f 100644 --- a/torch/csrc/jit/api/object.h +++ b/torch/csrc/jit/api/object.h @@ -2,8 +2,8 @@ #include #include -#include #include +#include #include @@ -129,7 +129,7 @@ struct TORCH_API Object { const Property get_property(const std::string& name) const { for (const auto& prop : type()->properties()) { if (prop.name == name) { - std::optional setter = c10::nullopt; + std::optional setter = std::nullopt; if (prop.setter) { setter = Method(_ivalue(), prop.setter); } @@ -142,7 +142,7 @@ struct TORCH_API Object { const std::vector get_properties() const { return c10::fmap(type()->properties(), [&](ClassType::Property prop) { - std::optional setter = c10::nullopt; + std::optional setter = std::nullopt; if (prop.setter) { setter = Method(_ivalue(), prop.setter); } diff --git a/torch/csrc/jit/codegen/fuser/compiler.cpp b/torch/csrc/jit/codegen/fuser/compiler.cpp index b4bc3e8f4727..7e03b576d121 100644 --- a/torch/csrc/jit/codegen/fuser/compiler.cpp +++ b/torch/csrc/jit/codegen/fuser/compiler.cpp @@ -231,7 +231,7 @@ std::shared_ptr compileKernel( size_t input_index = 0; for (const auto& p : graph->inputs()) { if (p->type()->isSubtypeOf(*FloatType::get())) { - flat_inputs.emplace_back(p, c10::nullopt); + flat_inputs.emplace_back(p, std::nullopt); } if (!p->type()->isSubtypeOf(*TensorType::get())) { continue; diff --git a/torch/csrc/jit/codegen/fuser/cpu/fused_kernel.cpp b/torch/csrc/jit/codegen/fuser/cpu/fused_kernel.cpp index 5f692d50e6b5..db9d57a679cb 100644 --- a/torch/csrc/jit/codegen/fuser/cpu/fused_kernel.cpp +++ b/torch/csrc/jit/codegen/fuser/cpu/fused_kernel.cpp @@ -3,9 +3,9 @@ #include #include #include -#include #include #include +#include #include #include @@ -65,7 +65,7 @@ std::optional exec(const std::wstring& cmd) { std::unique_ptr pipe( _wpopen(cmd.c_str(), L"r"), _pclose); if (!pipe) { - return c10::nullopt; + return std::nullopt; } while (fgetws(buffer.data(), static_cast(buffer.size()), pipe.get()) != nullptr) { diff --git a/torch/csrc/jit/codegen/fuser/executor.cpp b/torch/csrc/jit/codegen/fuser/executor.cpp index 8abb99283ffc..411dbe62a2e1 100644 --- a/torch/csrc/jit/codegen/fuser/executor.cpp +++ b/torch/csrc/jit/codegen/fuser/executor.cpp @@ -4,7 +4,6 @@ #include #include #include -#include #include #include #include @@ -12,6 +11,7 @@ #include #include #include +#include #include #include // TODO: remove, debugging only @@ -44,7 +44,7 @@ static std::optional> getMapSize( try { map_size = at::infer_size(map_size, arg.sizes()); } catch (...) { - return c10::nullopt; + return std::nullopt; } } else { auto tensor_sizes = arg.sizes().vec(); @@ -52,13 +52,13 @@ static std::optional> getMapSize( const auto dim = at::maybe_wrap_dim(chunk_desc.dim(), tensor_sizes.size()); if (tensor_sizes[dim] % num_chunks != 0) { - return c10::nullopt; + return std::nullopt; } tensor_sizes[dim] /= num_chunks; try { map_size = at::infer_size(map_size, tensor_sizes); } catch (...) { - return c10::nullopt; + return std::nullopt; } } } @@ -83,12 +83,12 @@ static std::optional> canRunKernel( if (!map_size) { map_size = getMapSize(spec, args, broadcast_group); if (!map_size) - return c10::nullopt; + return std::nullopt; } else { const auto group_map_size = getMapSize(spec, args, broadcast_group); // Note: this checks that group_map_size is defined AND equal to map_size if (map_size != group_map_size) - return c10::nullopt; + return std::nullopt; } } diff --git a/torch/csrc/jit/codegen/fuser/kernel_spec.h b/torch/csrc/jit/codegen/fuser/kernel_spec.h index 2fc52f2d76f0..eacdbc7ec3f3 100644 --- a/torch/csrc/jit/codegen/fuser/kernel_spec.h +++ b/torch/csrc/jit/codegen/fuser/kernel_spec.h @@ -2,13 +2,13 @@ #include #include -#include #include #include #include #include #include #include +#include #include #include @@ -122,7 +122,7 @@ struct TORCH_API KernelSpec { std::lock_guard guard{mutex_}; const auto it = kernels_.find(arg_spec); if (it == kernels_.end()) - return c10::nullopt; + return std::nullopt; return it->second; } void cacheKernel(const ArgSpec& arg_spec, std::shared_ptr kernel) diff --git a/torch/csrc/jit/codegen/onednn/graph_helper.cpp b/torch/csrc/jit/codegen/onednn/graph_helper.cpp index 16484dd4653c..30f32f5994c1 100644 --- a/torch/csrc/jit/codegen/onednn/graph_helper.cpp +++ b/torch/csrc/jit/codegen/onednn/graph_helper.cpp @@ -26,7 +26,7 @@ static std::optional getDimensions(Value* v) { if (v->type()->isSubtypeOf(TensorType::get())) { return v->type()->cast()->sizes().size(); } else { - return c10::nullopt; + return std::nullopt; } } diff --git a/torch/csrc/jit/codegen/onednn/graph_rewriter.cpp b/torch/csrc/jit/codegen/onednn/graph_rewriter.cpp index dfbfe467e976..71e745016569 100644 --- a/torch/csrc/jit/codegen/onednn/graph_rewriter.cpp +++ b/torch/csrc/jit/codegen/onednn/graph_rewriter.cpp @@ -132,7 +132,7 @@ std::optional GraphRewriter::tryMerge(Node* consumer, Node* producer) { bool canMerge = llgaHelper_.shouldMerge(producer, consumer) && aliasDb_.moveBeforeTopologicallyValid(producer, consumer); if (!canMerge) { - return c10::nullopt; + return std::nullopt; } llgaHelper_.mergeNodeIntoSubgraph(producer, consumer, aliasDb_); return consumer; diff --git a/torch/csrc/jit/codegen/onednn/prepare_binary.cpp b/torch/csrc/jit/codegen/onednn/prepare_binary.cpp index a4f6d268694e..d09b5777f973 100644 --- a/torch/csrc/jit/codegen/onednn/prepare_binary.cpp +++ b/torch/csrc/jit/codegen/onednn/prepare_binary.cpp @@ -69,7 +69,7 @@ static void handleBinaryOpInputs(Node* node) { auto second_input_typeptr = node->input(1)->type()->expect(); std::optional second_input_type = second_input_typeptr->scalarType(); - if (second_input_type != c10::nullopt) { + if (second_input_type != std::nullopt) { // dtype of the second tensor might not be available in the IR auto dtypeOfSecondInput = second_input_type.value(); if (dtypeOfFirstInput != dtypeOfSecondInput) { diff --git a/torch/csrc/jit/cuda/cuda.h b/torch/csrc/jit/cuda/cuda.h index 80b2e2a82f78..edac94a7357b 100644 --- a/torch/csrc/jit/cuda/cuda.h +++ b/torch/csrc/jit/cuda/cuda.h @@ -15,7 +15,7 @@ class CUDAStream final : public CustomClassHolder { public: // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) CUDAStream( - std::optional device = c10::nullopt, + std::optional device = std::nullopt, int64_t priority = 0) { c10::DeviceIndex device_index = device.has_value() ? device->index() : c10::cuda::current_device(); @@ -157,7 +157,7 @@ TORCH_LIBRARY(cuda, m) { auto stream_class = m.class_("Stream").def( torch::init, int64_t>(), "", - {torch::arg("device") = c10::nullopt, torch::arg("priority") = 0}); + {torch::arg("device") = std::nullopt, torch::arg("priority") = 0}); auto event_class = m.class_("Event").def( torch::init(), "", diff --git a/torch/csrc/jit/frontend/builtin_functions.cpp b/torch/csrc/jit/frontend/builtin_functions.cpp index c1c1d87176b7..2b3bdc42e4cc 100644 --- a/torch/csrc/jit/frontend/builtin_functions.cpp +++ b/torch/csrc/jit/frontend/builtin_functions.cpp @@ -121,7 +121,7 @@ struct BuiltinFunctionRegistry { void loadSource(const std::string& source, const std::string& the_namespace) { std::shared_ptr cu = std::make_shared(); modules.emplace_back(cu); - cu->define(c10::nullopt, source, nativeResolver(), /*self=*/nullptr); + cu->define(std::nullopt, source, nativeResolver(), /*self=*/nullptr); for (auto& method : cu->get_functions()) { builtins_by_name_[Symbol::fromQualString( the_namespace + "::" + method->name())] diff --git a/torch/csrc/jit/frontend/canonicalize_modified_loop.cpp b/torch/csrc/jit/frontend/canonicalize_modified_loop.cpp index 943551e80692..f2ef8b0e953c 100644 --- a/torch/csrc/jit/frontend/canonicalize_modified_loop.cpp +++ b/torch/csrc/jit/frontend/canonicalize_modified_loop.cpp @@ -28,7 +28,7 @@ static void canonicalizeModifiedLoop(Node* n) { g->insertConstant(std::numeric_limits::max())); auto inp_condition = toIValue(loop.inputCond()); - if (inp_condition == c10::nullopt || inp_condition->toBool() == false) { + if (inp_condition == std::nullopt || inp_condition->toBool() == false) { condition = g->insert(aten::__and__, {condition, loop.inputCond()}); } loop.replaceInputCondition(condition); diff --git a/torch/csrc/jit/frontend/concrete_module_type.cpp b/torch/csrc/jit/frontend/concrete_module_type.cpp index c15116ac3e24..cfdef51afc31 100644 --- a/torch/csrc/jit/frontend/concrete_module_type.cpp +++ b/torch/csrc/jit/frontend/concrete_module_type.cpp @@ -151,7 +151,7 @@ TypePtr ConcreteModuleType::getJitType() const { std::optional ConcreteModuleType::getPyClass() const { if (!data_.pyClass_) { - return c10::nullopt; + return std::nullopt; } return data_.pyClass_; } @@ -162,7 +162,7 @@ std::optional> ConcreteModuleType::findOverloads( if (it != data_.overloads_.end()) { return it->second; } - return c10::nullopt; + return std::nullopt; } std::optional ConcreteModuleType::findFunctionAttribute( @@ -171,7 +171,7 @@ std::optional ConcreteModuleType::findFunctionAttribute( if (it != data_.functionAttributes_.end()) { return it->second.function_->function(); } - return c10::nullopt; + return std::nullopt; } std::optional ConcreteModuleType::findBuiltinFunction( @@ -180,7 +180,7 @@ std::optional ConcreteModuleType::findBuiltinFunction( if (it != data_.builtinFunctions_.end()) { return it->second; } - return c10::nullopt; + return std::nullopt; } std::optional ConcreteModuleType::findFailedAttribute( @@ -189,7 +189,7 @@ std::optional ConcreteModuleType::findFailedAttribute( if (it != data_.failedAttributes_.end()) { return it->second; } - return c10::nullopt; + return std::nullopt; } bool ConcreteModuleType::isIgnoredAttribute(const std::string& name) const { diff --git a/torch/csrc/jit/frontend/function_schema_parser.cpp b/torch/csrc/jit/frontend/function_schema_parser.cpp index ba86a891d31d..00ccce8567fb 100644 --- a/torch/csrc/jit/frontend/function_schema_parser.cpp +++ b/torch/csrc/jit/frontend/function_schema_parser.cpp @@ -2,10 +2,10 @@ #include #include -#include #include #include #include +#include #include #include @@ -25,7 +25,7 @@ struct SchemaParser { explicit SchemaParser(const std::string& str, bool allow_typevars) : L(std::make_shared( c10::string_view(str), - c10::nullopt, + std::nullopt, 0, nullptr, Source::DONT_COPY)), diff --git a/torch/csrc/jit/frontend/ir_emitter.cpp b/torch/csrc/jit/frontend/ir_emitter.cpp index 350305b83567..788483aef224 100644 --- a/torch/csrc/jit/frontend/ir_emitter.cpp +++ b/torch/csrc/jit/frontend/ir_emitter.cpp @@ -32,8 +32,8 @@ #include -#include #include +#include #include #include @@ -292,7 +292,7 @@ struct Environment { if (msg != runner->error_messages.end()) { return msg->second(); } else { - return c10::nullopt; + return std::nullopt; } } @@ -1267,7 +1267,7 @@ struct to_ir { {}); auto refinements = RefinementSet(findIsNoneRefinements( cond_op.lhs(), lhs_val, cond_op.rhs(), rhs_val, expr.kind())); - return CondValue(cond_value, refinements, c10::nullopt); + return CondValue(cond_value, refinements, std::nullopt); } } break; default: { @@ -1294,7 +1294,7 @@ struct to_ir { } } auto expr_out = emitToBool(expr.range(), emitExpr(expr)); - std::optional static_if = c10::nullopt; + std::optional static_if = std::nullopt; auto kind = expr_out->node()->kind(); if (kind == aten::is_scripting) { static_if = true; @@ -2291,7 +2291,7 @@ struct to_ir { Value* result = graph->insertNode(graph->createIsInstance(lhs_val, rhs_types)) ->output(); - return CondValue(result, std::move(refinement), c10::nullopt); + return CondValue(result, std::move(refinement), std::nullopt); } void emitIf(const If& stmt) { @@ -2752,7 +2752,7 @@ struct to_ir { getAugOp(stmt, lhs->type()), /*args=*/{lhs, rhs}, /*kwargs=*/{}, - /*self=*/c10::nullopt); + /*self=*/std::nullopt); } } @@ -2968,7 +2968,7 @@ struct to_ir { auto outputs = rhs_output->asTuple( rhs_loc, method, - starred_unpack ? c10::nullopt : std::optional{n_binders}); + starred_unpack ? std::nullopt : std::optional{n_binders}); if (outputs.size() < n_binders) { throw ErrorReport(tl) << "need " << (starred_unpack ? "at least " : "") << n_binders @@ -4796,11 +4796,11 @@ struct to_ir { tuple_args.reserve(3); start ? tuple_args.emplace_back(start) - : tuple_args.emplace_back(c10::nullopt); + : tuple_args.emplace_back(std::nullopt); end ? tuple_args.emplace_back(end) - : tuple_args.emplace_back(c10::nullopt); + : tuple_args.emplace_back(std::nullopt); step ? tuple_args.emplace_back(step) - : tuple_args.emplace_back(c10::nullopt); + : tuple_args.emplace_back(std::nullopt); return emitTupleSlice(loc, args[0], tuple_args); } @@ -4886,7 +4886,7 @@ struct to_ir { }; std::vector dims(subscript_exprs.size()); std::vector> exprs( - subscript_exprs.size(), c10::nullopt); + subscript_exprs.size(), std::nullopt); auto handle_indexing = [&](const Expr& subscript_expr, int expr_idx, @@ -5231,7 +5231,7 @@ struct to_ir { val_range, "begin", emitExpr(Expr(slice.start().get()))); tuple_args.emplace_back(begin); } else { - tuple_args.emplace_back(c10::nullopt); + tuple_args.emplace_back(std::nullopt); } if (slice.end().present()) { @@ -5239,7 +5239,7 @@ struct to_ir { NamedValue(val_range, "end", emitExpr(Expr(slice.end().get()))); tuple_args.emplace_back(end); } else { - tuple_args.emplace_back(c10::nullopt); + tuple_args.emplace_back(std::nullopt); } if (slice.step().present()) { @@ -5247,7 +5247,7 @@ struct to_ir { NamedValue(val_range, "step", emitExpr(Expr(slice.step().get()))); tuple_args.emplace_back(step); } else { - tuple_args.emplace_back(c10::nullopt); + tuple_args.emplace_back(std::nullopt); } auto tupleSliceValue = emitTupleSlice(val_range, s_tuple_val, tuple_args); @@ -5327,7 +5327,7 @@ struct FunctionResolver : public Resolver { CompilationUnit::CompilationUnit(const std::string& source) : CompilationUnit() { // calles the define with native resolver to generate the graph for functions - define(c10::nullopt, source, nativeResolver(), nullptr); + define(std::nullopt, source, nativeResolver(), nullptr); } // This pair represents a pair of functions (getter and setter) obtained from diff --git a/torch/csrc/jit/frontend/parse_string_literal.h b/torch/csrc/jit/frontend/parse_string_literal.h index 5b924864bebd..13bbbf89cc34 100644 --- a/torch/csrc/jit/frontend/parse_string_literal.h +++ b/torch/csrc/jit/frontend/parse_string_literal.h @@ -1,7 +1,7 @@ #pragma once -#include #include #include +#include namespace torch { namespace jit { @@ -15,17 +15,17 @@ inline bool isCharCount(char c, const std::string& str, size_t start, int len) { inline std::optional parseOctal(const std::string& str, size_t pos) { //\xxx where x are 0-7 if (pos + 3 >= str.size()) - return c10::nullopt; + return std::nullopt; size_t c = 0; for (size_t i = 1, b = 64; i < 4; ++i, b /= 8) { // NOLINTNEXTLINE(bugprone-signed-char-misuse) int d = str[pos + i]; if (d < '0' || d > '7') - return c10::nullopt; + return std::nullopt; c += b * (d - '0'); } if (c >= 256) - return c10::nullopt; + return std::nullopt; return c; } diff --git a/torch/csrc/jit/frontend/parser.cpp b/torch/csrc/jit/frontend/parser.cpp index ae2c98028e07..5bf6144d8c7d 100644 --- a/torch/csrc/jit/frontend/parser.cpp +++ b/torch/csrc/jit/frontend/parser.cpp @@ -1,10 +1,10 @@ #include -#include #include #include #include #include +#include namespace torch::jit { @@ -241,7 +241,7 @@ struct ParserImpl { return create_compound('=', r, {}); // no reduction } break; default: - return c10::nullopt; + return std::nullopt; } } TreeRef parseTrinary( diff --git a/torch/csrc/jit/frontend/schema_matching.cpp b/torch/csrc/jit/frontend/schema_matching.cpp index 87ec9992141d..a91f204a404c 100644 --- a/torch/csrc/jit/frontend/schema_matching.cpp +++ b/torch/csrc/jit/frontend/schema_matching.cpp @@ -3,7 +3,6 @@ #include #include #include -#include #include #include #include @@ -13,6 +12,7 @@ #include #include #include +#include namespace torch::jit { @@ -261,7 +261,7 @@ std::optional findInputWithName( return i; } } - return c10::nullopt; + return std::nullopt; } /// Creates a list with the provided values if each value's type can be matched @@ -364,7 +364,7 @@ static std::optional tryMatchSchema( std::ostream* failure_messages, bool allow_conversions) { if (isBlockListedSchema(schema)) { - return c10::nullopt; + return std::nullopt; } auto err = [&]() -> std::ostream& { @@ -392,7 +392,7 @@ static std::optional tryMatchSchema( std::optional actual_named_value; if (arg.name() == "self" && self) { actual_named_value = self; - self = c10::nullopt; + self = std::nullopt; } else if (!arg.kwarg_only() && used_args < args.size()) { // Try to convert all the remaining non-kwarg arguments (used_args) to a // list. Allow zeros(IntArrayRef sizes) to work with zeros(1, 2) or @@ -417,7 +417,7 @@ static std::optional tryMatchSchema( allow_conversions, type_env); if (!list) { - return c10::nullopt; + return std::nullopt; } used_args = args.size(); positional_inputs.push_back(list); @@ -437,7 +437,7 @@ static std::optional tryMatchSchema( err() << "Argument " << nv.name() << " specified twice in schema, submit a bug report!\n"; } - return c10::nullopt; + return std::nullopt; } used_kwarg[*kwarg_idx] = true; actual_named_value = nv; @@ -450,7 +450,7 @@ static std::optional tryMatchSchema( err() << "Argument " << schema.arguments()[schema_i].name() << " not provided.\n"; } - return c10::nullopt; + return std::nullopt; } // Make sure the actual_named_value found matches the type of arg @@ -464,16 +464,16 @@ static std::optional tryMatchSchema( allow_conversions, type_env); if (!positional) { - return c10::nullopt; + return std::nullopt; } positional_inputs.push_back(positional); } // check for unused self argument - if (self != c10::nullopt) { + if (self != std::nullopt) { if (failure_messages) { err() << "Provided self argument not used in schema.\n"; } - return c10::nullopt; + return std::nullopt; } if (schema.is_vararg()) { @@ -488,7 +488,7 @@ static std::optional tryMatchSchema( err() << "Expected at most " << used_args << " arguments " << "but found " << args.size() << " positional arguments.\n"; } - return c10::nullopt; + return std::nullopt; } // check for unused kwargs for (const auto i : c10::irange(kwargs.size())) { @@ -501,7 +501,7 @@ static std::optional tryMatchSchema( err() << "Keyword argument " << nv.name() << " specified twice.\n"; } } - return c10::nullopt; + return std::nullopt; } } @@ -518,7 +518,7 @@ static std::optional tryMatchSchema( std::all_of(returns.begin(), returns.end(), [&](const Argument& r) { return r.name().length() > 0; }); - c10::OptNameList return_field_names = c10::nullopt; + c10::OptNameList return_field_names = std::nullopt; if (return_has_field_names) { return_field_names = fmap(returns, [&](const Argument& r) { return r.name(); }); @@ -633,7 +633,7 @@ static Value* packOutputs( if (field_names) { auto types = fmap(values, [](Value* v) { return v->type(); }); named_tuple = - TupleType::createNamed(c10::nullopt, field_names.value(), types); + TupleType::createNamed(std::nullopt, field_names.value(), types); } return g.insertNode(g.createTuple(values, named_tuple))->output(); } diff --git a/torch/csrc/jit/frontend/schema_matching.h b/torch/csrc/jit/frontend/schema_matching.h index 0c69df521df6..8a24863cbe71 100644 --- a/torch/csrc/jit/frontend/schema_matching.h +++ b/torch/csrc/jit/frontend/schema_matching.h @@ -10,7 +10,7 @@ namespace jit { // Try to match a list of inputs and keyword 'attributes' to this // schema. Return the flat list of positional inputs to the call or -// `c10::nullopt` on failure (`failure_messages` contains a good error +// `std::nullopt` on failure (`failure_messages` contains a good error // report in this case) struct MatchedSchema { @@ -28,7 +28,7 @@ TORCH_API MatchedSchema matchSchema( Graph& graph, at::ArrayRef args, at::ArrayRef kwargs, - const std::optional& self = c10::nullopt); + const std::optional& self = std::nullopt); TORCH_API std::pair matchSchemas( const std::vector& schemas, @@ -36,7 +36,7 @@ TORCH_API std::pair matchSchemas( Graph& graph, at::ArrayRef args, at::ArrayRef kwargs, - const std::optional& self = c10::nullopt, + const std::optional& self = std::nullopt, bool render_errors = false); TORCH_API bool convertibleToList( @@ -51,7 +51,7 @@ TORCH_API Value* emitBuiltinCall( Symbol name, at::ArrayRef args, at::ArrayRef kwargs, - const std::optional& self = c10::nullopt); + const std::optional& self = std::nullopt); TORCH_API std::optional findInputWithName( const std::string& name, diff --git a/torch/csrc/jit/frontend/schema_type_parser.cpp b/torch/csrc/jit/frontend/schema_type_parser.cpp index 2adacb976a04..f7bc4a04cb6c 100644 --- a/torch/csrc/jit/frontend/schema_type_parser.cpp +++ b/torch/csrc/jit/frontend/schema_type_parser.cpp @@ -155,7 +155,7 @@ std::optional SchemaTypeParser::parseAliasAnnotation() { Symbol::fromQualString("alias::$" + std::to_string(next_id++))); alias_info.setIsWrite(true); } else { - return c10::nullopt; + return std::nullopt; } return alias_info; @@ -172,7 +172,7 @@ std::optional SchemaTypeParser::parseTensorDType( if (type != type_map.end()) { return type->second; } - return c10::nullopt; + return std::nullopt; } std::optional SchemaTypeParser::tryToParseDeviceType() { @@ -297,7 +297,7 @@ TypePtr SchemaTypeParser::parseRefinedTensor() { // Parsing ranks, supports mix of sized and unsized ranks, or, just strided // ranks if (L.cur().kind == '*') { - dims.emplace_back(c10::nullopt); + dims.emplace_back(std::nullopt); L.next(); if (L.cur().kind == ':') { throw ErrorReport(L.cur()) << "Strides for unsized ranks not supported"; diff --git a/torch/csrc/jit/frontend/script_type_parser.cpp b/torch/csrc/jit/frontend/script_type_parser.cpp index 9295a3ed4007..db21737f4c4b 100644 --- a/torch/csrc/jit/frontend/script_type_parser.cpp +++ b/torch/csrc/jit/frontend/script_type_parser.cpp @@ -137,10 +137,10 @@ std::optional> ScriptTypeParser::parseBroadcastList( } if (expr.kind() != TK_SUBSCRIPT) - return c10::nullopt; + return std::nullopt; auto subscript = Subscript(expr); if (subscript.value().kind() != TK_VAR) - return c10::nullopt; + return std::nullopt; auto var = Var(subscript.value()); auto subscript_exprs = subscript.subscript_exprs(); @@ -151,10 +151,10 @@ std::optional> ScriptTypeParser::parseBroadcastList( TypePtr opt_type = OptionalType::create(broadcast_list->first); return std::pair(opt_type, broadcast_list->second); } else { - return c10::nullopt; + return std::nullopt; } } else if (var.name().name().find("BroadcastingList") != 0) { - return c10::nullopt; + return std::nullopt; } if (subscript_exprs.size() != 1) @@ -352,7 +352,7 @@ std::vector ScriptTypeParser::evaluateDefaults( CompilationUnit cu; cu.define( - c10::nullopt, + std::nullopt, /*properties=*/{}, /*propResolvers=*/{}, {def}, @@ -407,7 +407,7 @@ std::vector ScriptTypeParser::parseArgsFromDecl( auto decl_arg = *it; TypePtr type; - std::optional N = c10::nullopt; + std::optional N = std::nullopt; if (!decl_arg.type().present()) { // If this param doesn't have a type, default to "tensor" type = TensorType::getInferred(); @@ -421,7 +421,7 @@ std::vector ScriptTypeParser::parseArgsFromDecl( type = parseTypeFromExpr(decl_arg.type().get()); } } - std::optional default_value = c10::nullopt; + std::optional default_value = std::nullopt; if (decl_arg.defaultValue().present()) { default_value = *defaults_it++; } @@ -431,7 +431,7 @@ std::vector ScriptTypeParser::parseArgsFromDecl( N, default_value, decl_arg.kwarg_only(), - /*alias_info=*/c10::nullopt); + /*alias_info=*/std::nullopt); retval.push_back(arg); } return retval; @@ -455,8 +455,8 @@ std::vector ScriptTypeParser::parseReturnFromDecl(const Decl& decl) { return {Argument( "", parsed_type, - /*N =*/c10::nullopt, - /*default_value =*/c10::nullopt, + /*N =*/std::nullopt, + /*default_value =*/std::nullopt, /*kwarg_only =*/false)}; } FunctionSchema ScriptTypeParser::parseSchemaFromDef( diff --git a/torch/csrc/jit/frontend/source_range.cpp b/torch/csrc/jit/frontend/source_range.cpp index 20ffbfd4601e..b1dfecbbf641 100644 --- a/torch/csrc/jit/frontend/source_range.cpp +++ b/torch/csrc/jit/frontend/source_range.cpp @@ -154,7 +154,7 @@ size_t SourceRangeHasher::operator()(const torch::jit::SourceRange& key) const { std::optional Source::findSourceRangeThatGenerated( const SourceRange& range) { if (!gen_ranges_) { - return c10::nullopt; + return std::nullopt; } return gen_ranges_->findSourceRangeThatGenerated(range); } diff --git a/torch/csrc/jit/frontend/source_range.h b/torch/csrc/jit/frontend/source_range.h index 1f8715ad0096..a8f22a800b02 100644 --- a/torch/csrc/jit/frontend/source_range.h +++ b/torch/csrc/jit/frontend/source_range.h @@ -1,6 +1,6 @@ #pragma once #include -#include +#include #include #include @@ -190,7 +190,7 @@ struct TORCH_API Source { explicit Source( c10::string_view text_view, - std::optional filename = c10::nullopt, + std::optional filename = std::nullopt, size_t starting_line_no = 0, std::shared_ptr gen_ranges = nullptr, CopiesString copies_str = COPIES_STRING) @@ -210,7 +210,7 @@ struct TORCH_API Source { explicit Source( StringCordView str, - std::optional filename = c10::nullopt, + std::optional filename = std::nullopt, size_t starting_line_no = 0, std::shared_ptr gen_ranges = nullptr) : text_view_(std::move(str)), @@ -360,7 +360,7 @@ struct TORCH_API SourceRange { std::optional> file_line_col() const { if (!source_view_ || !source()->filename()) { - return c10::nullopt; + return std::nullopt; } auto lineno = source_view_->lineno_for_offset(start_); @@ -383,7 +383,7 @@ struct TORCH_API SourceRange { std::optional findSourceRangeThatGenerated() const { if (!source_view_) { - return c10::nullopt; + return std::nullopt; } return source_view_->findSourceRangeThatGenerated(*this); } diff --git a/torch/csrc/jit/frontend/sugared_value.cpp b/torch/csrc/jit/frontend/sugared_value.cpp index 4b65903529d2..94a11b21b1f2 100644 --- a/torch/csrc/jit/frontend/sugared_value.cpp +++ b/torch/csrc/jit/frontend/sugared_value.cpp @@ -658,7 +658,7 @@ void IterableTree::addChild( // iterables run for the minimum length of all its leaves unroll_length_ = std::min(*child_len, *unroll_length_); } else { - unroll_length_ = c10::nullopt; + unroll_length_ = std::nullopt; } } children_.push_back(iter_value); diff --git a/torch/csrc/jit/frontend/sugared_value.h b/torch/csrc/jit/frontend/sugared_value.h index 97b092cad3ce..1ca59ced6e68 100644 --- a/torch/csrc/jit/frontend/sugared_value.h +++ b/torch/csrc/jit/frontend/sugared_value.h @@ -1,7 +1,7 @@ #pragma once -#include #include #include +#include #include #include @@ -122,13 +122,13 @@ struct TORCH_API SugaredValue // to support containers of Heterogenous types, like Module Containers & // Tuples virtual std::optional staticLen() { - return c10::nullopt; + return std::nullopt; } // When iterating over this SugaredValue, should we emit the for loop as an // unrolled loop. bool shouldEmitUnrolled() { - return staticLen() != c10::nullopt; + return staticLen() != std::nullopt; } // return length of this thing, if not then it can't be iterated. @@ -323,7 +323,7 @@ struct TORCH_API BuiltinModule : public SugaredValue { } auto sym = Symbol::fromQualString(name + "::" + field); - return std::make_shared(sym, c10::nullopt); + return std::make_shared(sym, std::nullopt); } private: @@ -506,7 +506,7 @@ struct TORCH_API PrintValue : public SugaredValue { // is a noop when the input is a subtype of 'type' struct TORCH_API CastValue : public BuiltinFunction { CastValue(TypePtr type, c10::Symbol method) - : BuiltinFunction(method, c10::nullopt), type_(std::move(type)) {} + : BuiltinFunction(method, std::nullopt), type_(std::move(type)) {} std::shared_ptr call( const SourceRange& loc, GraphFunction& m, @@ -638,7 +638,7 @@ struct TORCH_API RangeValue : SugaredValue { const SourceRange& loc, GraphFunction& m, std::vector input, - std::optional static_len = c10::nullopt); + std::optional static_len = std::nullopt); std::string kind() const override { return "range"; @@ -730,7 +730,7 @@ struct TORCH_API IterableTree : SugaredValue { TypePtr type_hint = nullptr) override; private: - std::optional unroll_length_ = c10::nullopt; + std::optional unroll_length_ = std::nullopt; std::vector children_; }; diff --git a/torch/csrc/jit/frontend/tracer.cpp b/torch/csrc/jit/frontend/tracer.cpp index 9616e0f83dfb..a90d5bb897f4 100644 --- a/torch/csrc/jit/frontend/tracer.cpp +++ b/torch/csrc/jit/frontend/tracer.cpp @@ -818,8 +818,8 @@ void addInputs(Node* n, const char* name, std::optional value) { n, name, value.has_value() - ? c10::make_optional(value->guard_int(__FILE__, __LINE__)) - : c10::nullopt); + ? std::make_optional(value->guard_int(__FILE__, __LINE__)) + : std::nullopt); } void addInputs( diff --git a/torch/csrc/jit/ir/alias_analysis.cpp b/torch/csrc/jit/ir/alias_analysis.cpp index f9b2ed5dd7ce..6f674f30b90f 100644 --- a/torch/csrc/jit/ir/alias_analysis.cpp +++ b/torch/csrc/jit/ir/alias_analysis.cpp @@ -105,7 +105,7 @@ class MutableTypePtrHelper { } } if (mutable_types.empty()) { - return c10::nullopt; + return std::nullopt; } return mutable_types; } @@ -121,7 +121,7 @@ class MutableTypePtrHelper { return {AliasTypeSet{ FutureType::create(*toSingleType(*maybe_mut_types))}}; } - return c10::nullopt; + return std::nullopt; } case TypeKind::AwaitType: { if (auto maybe_mut_types = mapTypeToAliasTypeSet( @@ -129,7 +129,7 @@ class MutableTypePtrHelper { return { AliasTypeSet{AwaitType::create(*toSingleType(*maybe_mut_types))}}; } - return c10::nullopt; + return std::nullopt; } case TypeKind::TupleType: { std::vector mutable_types; @@ -142,12 +142,12 @@ class MutableTypePtrHelper { } } if (mutable_types.empty()) { - return c10::nullopt; + return std::nullopt; } return {AliasTypeSet{TupleType::create(mutable_types)}}; } default: - return c10::nullopt; + return std::nullopt; } } ska::flat_hash_map* mutable_type_cache_; @@ -1896,7 +1896,7 @@ bool AliasDb::mayAliasWildcard(const at::ArrayRef vs) const { std::optional AliasDb::tryGetOrCreateWildcard(const TypePtr& type) { auto maybe_mut_types = mapTypeToAliasTypeSetPtr(type); if (!maybe_mut_types) { - return c10::nullopt; + return std::nullopt; } auto mut_type = toSingleType(*maybe_mut_types); auto existing_wildcard = wildcardIndex_.find(*mut_type); @@ -1970,7 +1970,7 @@ std::optional AliasDb::setWildcard(const Value* v) { std::optional maybe_wildcardElement = tryGetOrCreateWildcard(v->type()); if (!maybe_wildcardElement) { - return c10::nullopt; + return std::nullopt; } // Ensure that we create a corresponding Element for `v` still, as it is an // invariant that all mutable values have an Element diff --git a/torch/csrc/jit/ir/constants.cpp b/torch/csrc/jit/ir/constants.cpp index ef697a5af768..a0f8c8760a13 100644 --- a/torch/csrc/jit/ir/constants.cpp +++ b/torch/csrc/jit/ir/constants.cpp @@ -69,7 +69,7 @@ std::optional tryInsertConstant( at::Tensor ref = val.toTensor(); if (!insertableTensor(val.toTensor())) { n->destroy(); - return c10::nullopt; + return std::nullopt; } if (!ref.defined()) { n->destroy(); @@ -99,7 +99,7 @@ std::optional tryInsertConstant( n->output()->setType(val.type()); } else { n->destroy(); - return c10::nullopt; + return std::nullopt; } } else if (val.isString()) { n->s_(attr::value, val.toStringRef()); @@ -125,7 +125,7 @@ std::optional tryInsertConstant( n->output()->setType(val.type()); } else { n->destroy(); - return c10::nullopt; + return std::nullopt; }; } else if (val.isObject()) { const auto& ref = val.toObjectRef(); @@ -137,14 +137,14 @@ std::optional tryInsertConstant( n->output()->setType(val.type()); } else { n->destroy(); - return c10::nullopt; + return std::nullopt; } } else if ((val.isGenericDict() && insertableIValue(val)) || (val.isEnum())) { n->ival_(attr::value, val); n->output()->setType(val.type()); } else { n->destroy(); - return c10::nullopt; + return std::nullopt; } if (loc) n->setSourceRange(*loc); @@ -155,7 +155,7 @@ std::optional tryInsertConstant( std::optional toIValue(const Value* v) { if (v->node()->kind() != prim::Constant || v->type()->cast()) { - return c10::nullopt; + return std::nullopt; } const Node* node = v->node(); const TypePtr& type = v->type(); diff --git a/torch/csrc/jit/ir/constants.h b/torch/csrc/jit/ir/constants.h index 118da1e932d9..160dad5eab4c 100644 --- a/torch/csrc/jit/ir/constants.h +++ b/torch/csrc/jit/ir/constants.h @@ -25,27 +25,27 @@ struct TORCH_API constant_not_supported_error : public std::runtime_error { TORCH_API Value* insertConstant( Graph& g, const IValue& val, - std::optional loc = c10::nullopt, - std::optional scope = c10::nullopt); + std::optional loc = std::nullopt, + std::optional scope = std::nullopt); // note: prefer g.insertConsant(val, loc) which does exactly the same thing // this function is only declared/defined here because its implementation is // closely related to the implementation of prim::Constant that is also in // constants.cpp. // -// returns a c10::nullopt if the IValue kind cannot be inserted as a constant +// returns a std::nullopt if the IValue kind cannot be inserted as a constant TORCH_API std::optional tryInsertConstant( Graph& g, const IValue& val, - std::optional loc = c10::nullopt, - std::optional scope = c10::nullopt); + std::optional loc = std::nullopt, + std::optional scope = std::nullopt); //////////////////////////////////////////////////////////////////////////////// // Helper for retrieving constants //////////////////////////////////////////////////////////////////////////////// // attempt to convert a (possibly constant) Value* into an interpreter value -// (IValue). returns c10::nullopt if the Value* was not constant +// (IValue). returns std::nullopt if the Value* was not constant TORCH_API std::optional toIValue(const Value* v); // if a value is a constant then try to turn into type T using the @@ -55,7 +55,7 @@ std::optional constant_as(const Value* v) { if (auto ivalue = toIValue(v)) { return ivalue->to(); } - return c10::nullopt; + return std::nullopt; } } // namespace jit } // namespace torch diff --git a/torch/csrc/jit/ir/ir.cpp b/torch/csrc/jit/ir/ir.cpp index a6b0116d7fb6..3b449ea7ea21 100644 --- a/torch/csrc/jit/ir/ir.cpp +++ b/torch/csrc/jit/ir/ir.cpp @@ -412,7 +412,7 @@ std::ostream& operator<<(std::ostream& out, const Graph& g) { static void checkSameDevice(const Node* node) { bool has_device = false; - std::optional device = c10::nullopt; + std::optional device = std::nullopt; auto checkValue = [&](const Value* v) { if (TensorTypePtr type = v->type()->cast()) { if (type->device() && !has_device) { @@ -1297,7 +1297,7 @@ Node::Node(Graph* graph_, NodeKind kind_) graph_(graph_), owning_block_(nullptr), scope_(graph_->current_scope_), - callstack_(c10::nullopt), + callstack_(std::nullopt), op_(nullptr), topo_position_(0) { graph_->all_nodes.emplace(this); @@ -2101,11 +2101,11 @@ std::vector inlineCallTo( std::unordered_map new_callstack_entries; - std::optional module_instance_info = c10::nullopt; + std::optional module_instance_info = std::nullopt; if (to_replace->kind() == prim::CallMethod) { auto class_type_ptr = to_replace->input(0)->type()->cast(); if (to_replace->input(0)->node()->kind() == prim::GetAttr) { - module_instance_info = c10::make_optional(ModuleInstanceInfo( + module_instance_info = std::make_optional(ModuleInstanceInfo( class_type_ptr, to_replace->input(0)->node()->s(attr::name))); } else if ( !to_replace->owningGraph()->inputs().empty() && @@ -2113,11 +2113,11 @@ std::vector inlineCallTo( // This CallMethod must correspond to method of the same object // to which this graph belongs. module_instance_info = - c10::make_optional(ModuleInstanceInfo(class_type_ptr, "SELF")); + std::make_optional(ModuleInstanceInfo(class_type_ptr, "SELF")); } else { // Not sure if it is possible to come here ever. // TODO: Remove this else. Or add assert - module_instance_info = c10::make_optional( + module_instance_info = std::make_optional( ModuleInstanceInfo(class_type_ptr, "INSTANCE_NAME_UNKNOWN")); } } diff --git a/torch/csrc/jit/ir/ir.h b/torch/csrc/jit/ir/ir.h index 859da3cb3cae..3db67b2f9798 100644 --- a/torch/csrc/jit/ir/ir.h +++ b/torch/csrc/jit/ir/ir.h @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include @@ -348,7 +348,7 @@ struct TORCH_API Node { // is changed, we need to rely on this name // to retrieve old schemas to successfully apply upgraders // for this operator. - std::optional historic_schema_name_ = c10::nullopt; + std::optional historic_schema_name_ = std::nullopt; protected: Node(Graph* graph_, NodeKind kind_); // defined after graph @@ -534,7 +534,7 @@ struct TORCH_API Node { if (auto v = get(name)) { return v->template to(); } - return c10::nullopt; + return std::nullopt; } // Returns true if the value of input name is statically known @@ -1368,8 +1368,8 @@ struct Graph : std::enable_shared_from_this { // Insert constant IValue into the graph. TORCH_API Value* insertConstant( const IValue& val, - std::optional loc = c10::nullopt, - std::optional scope = c10::nullopt); + std::optional loc = std::nullopt, + std::optional scope = std::nullopt); // Schema-driven insert: // This inserts a node into the graph with inputs determined from args and @@ -1733,14 +1733,14 @@ struct OperatorMap { std::optional find(const Operator& op) { const auto it = map.find(Symbol::fromQualString(op.schema().name())); if (it == map.end()) { - return c10::nullopt; + return std::nullopt; } for (auto vit = it->second.begin(); vit != it->second.end(); ++vit) { if (vit->first->schema() == op.schema()) { return vit->second; } } - return c10::nullopt; + return std::nullopt; } // TODO: return iterator @@ -1809,14 +1809,14 @@ struct FunctionSchemaMap { std::optional find(const FunctionSchema& schema) const { const auto it = map.find(Symbol::fromQualString(schema.name())); if (it == map.end()) { - return c10::nullopt; + return std::nullopt; } for (auto vit = it->second.begin(); vit != it->second.end(); ++vit) { if (vit->first == schema) { return vit->second; } } - return c10::nullopt; + return std::nullopt; } // TODO: return iterator diff --git a/torch/csrc/jit/ir/scope.h b/torch/csrc/jit/ir/scope.h index 544980303223..5eafeb0fc4aa 100644 --- a/torch/csrc/jit/ir/scope.h +++ b/torch/csrc/jit/ir/scope.h @@ -1,10 +1,10 @@ #pragma once #include #include -#include #include #include #include +#include #include namespace torch { diff --git a/torch/csrc/jit/mobile/compatibility/backport_manager.cpp b/torch/csrc/jit/mobile/compatibility/backport_manager.cpp index f0dd562cc1cd..3b8319ad8f90 100644 --- a/torch/csrc/jit/mobile/compatibility/backport_manager.cpp +++ b/torch/csrc/jit/mobile/compatibility/backport_manager.cpp @@ -408,7 +408,7 @@ std::stringstream backport_v6_to_v5(std::stringstream& input_model_stream) { } // Loading the TS module is required for this backport, because bytecode needs // to be re-emitted (refer to the comments below) - Module torch_script = torch::jit::load(rai, c10::nullopt, extra_files); + Module torch_script = torch::jit::load(rai, std::nullopt, extra_files); // The RAII guard to change the flag, emitBytecodeDefaultInputs, to true, so // that TS stores the default argument values in the constant table, and emits @@ -476,7 +476,7 @@ std::stringstream backport_v7_to_v6(std::stringstream& input_model_stream) { } // Loading the TS module is required for this backport, because bytecode needs // to be re-emitted (refer to the comments below) - Module torch_script = torch::jit::load(rai, c10::nullopt, extra_files); + Module torch_script = torch::jit::load(rai, std::nullopt, extra_files); // The RAII guard to change the flag, emit_default_input_instructions, to // false to keep the same behavior in bytecode version 6. Change the flag, @@ -502,7 +502,7 @@ std::stringstream backport_v7_to_v6(std::stringstream& input_model_stream) { std::stringstream backport_v9_to_v8(std::stringstream& input_model_stream) { ExtraFilesMap extra_files; Module torch_script = - torch::jit::load(input_model_stream, c10::nullopt, extra_files); + torch::jit::load(input_model_stream, std::nullopt, extra_files); std::stringstream intermediate_model_stream; // TODO(@pavithran) : Check if debug info is available and use load/save while // backporting hardcode debaug info to be false untill supported. @@ -540,7 +540,7 @@ std::stringstream backport_v8_to_v7(std::stringstream& input_model_stream) { extra_files.emplace(record.substr(found + 1), ""); } } - Module torch_script = torch::jit::load(rai, c10::nullopt, extra_files); + Module torch_script = torch::jit::load(rai, std::nullopt, extra_files); std::stringstream intermediate_model_stream; { BytecodeEmitModeGuard argNumGuard( diff --git a/torch/csrc/jit/mobile/compatibility/runtime_compatibility.h b/torch/csrc/jit/mobile/compatibility/runtime_compatibility.h index 2e65f1f38bd8..d89165bb1d29 100644 --- a/torch/csrc/jit/mobile/compatibility/runtime_compatibility.h +++ b/torch/csrc/jit/mobile/compatibility/runtime_compatibility.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include #include #include diff --git a/torch/csrc/jit/mobile/flatbuffer_loader.cpp b/torch/csrc/jit/mobile/flatbuffer_loader.cpp index bca407358913..2094d4a87a17 100644 --- a/torch/csrc/jit/mobile/flatbuffer_loader.cpp +++ b/torch/csrc/jit/mobile/flatbuffer_loader.cpp @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include @@ -35,6 +34,7 @@ #include #include #include +#include #ifndef DISABLE_UPGRADER #include @@ -364,7 +364,7 @@ std::unique_ptr FlatbufferLoader::parseFunction( (operator_version < caffe2::serialize::kProducedFileFormatVersion); for (const auto* op : *method->operators()) { - std::optional num_args = c10::nullopt; + std::optional num_args = std::nullopt; if (op->num_args_serialized() > -1) { num_args = op->num_args_serialized(); } @@ -399,7 +399,7 @@ std::unique_ptr FlatbufferLoader::parseFunction( auto arg = c10::Argument( arg_tb->name()->str(), std::move(type_ptr), - c10::nullopt /*N*/, + std::nullopt /*N*/, std::move(default_value)); args.emplace_back(std::move(arg)); } diff --git a/torch/csrc/jit/mobile/flatbuffer_loader.h b/torch/csrc/jit/mobile/flatbuffer_loader.h index 9ac9636f3f14..62b2c795bf84 100644 --- a/torch/csrc/jit/mobile/flatbuffer_loader.h +++ b/torch/csrc/jit/mobile/flatbuffer_loader.h @@ -9,8 +9,8 @@ #include #include #include -#include #include +#include /** * Defines the public API for loading flatbuffer-serialized mobile modules. @@ -58,7 +58,7 @@ using ExtraFilesMap = std::unordered_map; TORCH_API mobile::Module parse_and_initialize_mobile_module( void* data, size_t size, // of `data`, in bytes. - std::optional device = c10::nullopt, + std::optional device = std::nullopt, ExtraFilesMap* extra_files = nullptr, bool should_copy_tensor_memory = false); @@ -74,7 +74,7 @@ TORCH_API mobile::Module parse_and_initialize_mobile_module( TORCH_API mobile::Module parse_and_initialize_mobile_module( std::shared_ptr data, size_t size, // of `data`, in bytes. - std::optional device = c10::nullopt, + std::optional device = std::nullopt, ExtraFilesMap* extra_files = nullptr); // Parse a mobile::Module from raw bytes, also returning JIT-related metadata. @@ -87,7 +87,7 @@ TORCH_API mobile::Module parse_and_initialize_mobile_module_for_jit( size_t size, // of `data`, in bytes. ExtraFilesMap& jit_sources, std::vector& jit_constants, - std::optional device = c10::nullopt, + std::optional device = std::nullopt, ExtraFilesMap* extra_files = nullptr); // Load a mobile::Module from a filepath. @@ -100,7 +100,7 @@ TORCH_API mobile::Module parse_and_initialize_mobile_module_for_jit( // directly. TORCH_API mobile::Module load_mobile_module_from_file( const std::string& filename, - std::optional device = c10::nullopt, + std::optional device = std::nullopt, ExtraFilesMap* extra_files = nullptr); TORCH_API uint64_t get_bytecode_version(std::istream& in); @@ -114,7 +114,7 @@ TORCH_API mobile::ModuleInfo get_module_info_from_flatbuffer( // its entirity to a buffer TORCH_API mobile::Module load_mobile_module_from_stream_with_copy( std::istream& in, - std::optional device = c10::nullopt, + std::optional device = std::nullopt, ExtraFilesMap* extra_files = nullptr); TORCH_API mobile::Module parse_flatbuffer_no_object( diff --git a/torch/csrc/jit/mobile/frame.h b/torch/csrc/jit/mobile/frame.h index 45c51fef0085..4ad3817af624 100644 --- a/torch/csrc/jit/mobile/frame.h +++ b/torch/csrc/jit/mobile/frame.h @@ -2,8 +2,8 @@ #include -#include #include +#include namespace torch { namespace jit { diff --git a/torch/csrc/jit/mobile/function.cpp b/torch/csrc/jit/mobile/function.cpp index 36f19fb1fac4..9c3626e361da 100644 --- a/torch/csrc/jit/mobile/function.cpp +++ b/torch/csrc/jit/mobile/function.cpp @@ -72,7 +72,7 @@ bool Function::initialize_operators(bool should_check_operators) { const auto& opname = code_.op_names_[i]; int num_args = code_.operator_input_sizes_[i]; std::optional num_specified_args = - num_args < 0 ? c10::nullopt : std::optional(num_args); + num_args < 0 ? std::nullopt : std::optional(num_args); auto func = makeOperatorFunction(opname, num_specified_args); if (!func.has_value()) { unsupported_op_names.insert(operator_str(opname)); @@ -189,7 +189,7 @@ std::optional> makeOperatorFunction( TORCH_CHECK(false, "arguments are missing for operator ", opname); } } else { - return c10::nullopt; + return std::nullopt; } } } diff --git a/torch/csrc/jit/mobile/import.cpp b/torch/csrc/jit/mobile/import.cpp index da7b87bae611..1fa2fe47904b 100644 --- a/torch/csrc/jit/mobile/import.cpp +++ b/torch/csrc/jit/mobile/import.cpp @@ -5,7 +5,6 @@ #include #include #include -#include #include #include #include @@ -23,6 +22,7 @@ #include #include #include +#include #include #include @@ -267,7 +267,7 @@ void BytecodeDeserializer::parseFunctionSchema( args.emplace_back( name, std::move(type), - c10::nullopt /*N*/, + std::nullopt /*N*/, std::move(default_value)); } tryRegisterMethod(args, *function); @@ -704,7 +704,7 @@ void _load_extra_only_for_mobile( // TODO: the current flatbuffers implementation will always load the // whole module including the extra files. Ideally it should be // possible to just get the extra files given data - load_mobile_module_from_file(filename, c10::nullopt, &extra_files); + load_mobile_module_from_file(filename, std::nullopt, &extra_files); break; } default: { diff --git a/torch/csrc/jit/mobile/import.h b/torch/csrc/jit/mobile/import.h index 77a801e62571..73ebe18976d6 100644 --- a/torch/csrc/jit/mobile/import.h +++ b/torch/csrc/jit/mobile/import.h @@ -45,15 +45,15 @@ TORCH_API mobile::Module _load_for_mobile( TORCH_API mobile::Module _load_for_mobile( std::istream& in, - std::optional device = c10::nullopt); + std::optional device = std::nullopt); TORCH_API mobile::Module _load_for_mobile( const std::string& filename, - std::optional device = c10::nullopt); + std::optional device = std::nullopt); TORCH_API mobile::Module _load_for_mobile( std::unique_ptr rai, - std::optional device = c10::nullopt); + std::optional device = std::nullopt); /** * Load only the contents of the "extra/" files whose names are diff --git a/torch/csrc/jit/mobile/import_data.h b/torch/csrc/jit/mobile/import_data.h index 25e1fd81341c..d2d2fa7f998e 100644 --- a/torch/csrc/jit/mobile/import_data.h +++ b/torch/csrc/jit/mobile/import_data.h @@ -2,8 +2,8 @@ #include #include -#include #include +#include #include #include @@ -19,7 +19,7 @@ namespace jit { */ TORCH_API std::map _load_parameters( std::istream& in, - std::optional device = c10::nullopt); + std::optional device = std::nullopt); /** * Loads named parameters from the serialized data in @p filename. @@ -28,7 +28,7 @@ TORCH_API std::map _load_parameters( */ TORCH_API std::map _load_parameters( const std::string& filename, - std::optional device = c10::nullopt); + std::optional device = std::nullopt); // NOTE: Please prefer using _load_parameters over using the function below. TORCH_API std::map mobile_module_to_parameter_map( diff --git a/torch/csrc/jit/mobile/model_tracer/MobileModelRunner.h b/torch/csrc/jit/mobile/model_tracer/MobileModelRunner.h index b6abe86c0fdc..813d7be7e7a2 100644 --- a/torch/csrc/jit/mobile/model_tracer/MobileModelRunner.h +++ b/torch/csrc/jit/mobile/model_tracer/MobileModelRunner.h @@ -104,7 +104,7 @@ class MobileModelRunner { */ bool has_new_style_bundled_inputs() const { return module_->find_method("get_bundled_inputs_functions_and_info") != - c10::nullopt; + std::nullopt; } /** diff --git a/torch/csrc/jit/mobile/model_tracer/TracerRunner.cpp b/torch/csrc/jit/mobile/model_tracer/TracerRunner.cpp index 585747c14d82..3687f84f7039 100644 --- a/torch/csrc/jit/mobile/model_tracer/TracerRunner.cpp +++ b/torch/csrc/jit/mobile/model_tracer/TracerRunner.cpp @@ -117,10 +117,10 @@ void call_dependent_methods(std::set& root_ops) { if (is_training && has_batchnorm) { at::batch_norm( at::ones({2, 2}), - c10::nullopt, - c10::nullopt, - c10::nullopt, - c10::nullopt, + std::nullopt, + std::nullopt, + std::nullopt, + std::nullopt, true, 0.1, 0.1, diff --git a/torch/csrc/jit/mobile/module.cpp b/torch/csrc/jit/mobile/module.cpp index 23dfe9ff3678..bcf4e5e1f6ba 100644 --- a/torch/csrc/jit/mobile/module.cpp +++ b/torch/csrc/jit/mobile/module.cpp @@ -90,10 +90,10 @@ void Module::unsafeCopyMethod( std::optional Module::find_method(const std::string& basename) const { for (const auto& fn : cu_->methods()) { if (fn->name() == basename) { - return c10::make_optional(Method(this, fn.get())); + return std::make_optional(Method(this, fn.get())); } } - return c10::nullopt; + return std::nullopt; } namespace { @@ -324,7 +324,7 @@ static std::optional print_type(const c10::Type& t) { if (auto dyn = t.castRaw()) { return dyn->fallback()->annotation_str(); } - return c10::nullopt; + return std::nullopt; } TORCH_API ModuleInfo get_module_info(const mobile::Module& module) { diff --git a/torch/csrc/jit/mobile/promoted_prim_ops.cpp b/torch/csrc/jit/mobile/promoted_prim_ops.cpp index 8e4974904242..1d9d6fb3abcf 100644 --- a/torch/csrc/jit/mobile/promoted_prim_ops.cpp +++ b/torch/csrc/jit/mobile/promoted_prim_ops.cpp @@ -118,7 +118,7 @@ void toPrimDType(Stack& stack) { pop(stack, non_blocking, copy); std::optional scalarType = pop(stack).toOptional(); - std::optional device = c10::nullopt; + std::optional device = std::nullopt; at::Tensor self = pop(stack).toTensor(); push(stack, to_dispatch(self, device, scalarType, non_blocking, copy)); } diff --git a/torch/csrc/jit/operator_upgraders/upgraders_entry.cpp b/torch/csrc/jit/operator_upgraders/upgraders_entry.cpp index 3185b0eaf123..21889a84b440 100644 --- a/torch/csrc/jit/operator_upgraders/upgraders_entry.cpp +++ b/torch/csrc/jit/operator_upgraders/upgraders_entry.cpp @@ -122,7 +122,7 @@ std::shared_ptr create_upgrader_graph( const std::string& upgrader_name, const std::string& upgrader_body) { auto cu = std::make_shared(); - cu->define(c10::nullopt, upgrader_body, nativeResolver(), nullptr); + cu->define(std::nullopt, upgrader_body, nativeResolver(), nullptr); Function& jitFunc = cu->get_function(upgrader_name); GraphFunction& graphFunction = toGraphFunction(jitFunc); return graphFunction.graph(); diff --git a/torch/csrc/jit/operator_upgraders/utils.cpp b/torch/csrc/jit/operator_upgraders/utils.cpp index fef7b92c83c9..98819b08d640 100644 --- a/torch/csrc/jit/operator_upgraders/utils.cpp +++ b/torch/csrc/jit/operator_upgraders/utils.cpp @@ -1,9 +1,9 @@ #include -#include #include #include #include +#include #include #include #include @@ -27,7 +27,7 @@ std::optional findUpgrader( if (pos != upgraders_for_schema.end()) { return *pos; } - return c10::nullopt; + return std::nullopt; } bool isOpCurrentBasedOnUpgraderEntries( diff --git a/torch/csrc/jit/operator_upgraders/utils.h b/torch/csrc/jit/operator_upgraders/utils.h index a30b8c1182b9..95e794261e6b 100644 --- a/torch/csrc/jit/operator_upgraders/utils.h +++ b/torch/csrc/jit/operator_upgraders/utils.h @@ -1,8 +1,8 @@ #pragma once #include -#include #include #include +#include #include #include diff --git a/torch/csrc/jit/passes/autocast.cpp b/torch/csrc/jit/passes/autocast.cpp index 635162e04953..bbd56744afb7 100644 --- a/torch/csrc/jit/passes/autocast.cpp +++ b/torch/csrc/jit/passes/autocast.cpp @@ -4,10 +4,10 @@ #include #include #include -#include #include #include #include +#include #include #include @@ -65,7 +65,7 @@ std::optional parseAutocast( const AutocastContext& context) { if (!isAutocastNode(value)) { // Not an autocast... - return c10::nullopt; + return std::nullopt; } if (value->node()->kind() == prim::CreateObject) { AutocastScope scope; @@ -135,7 +135,7 @@ std::optional parseAutocast( AT_ERROR("Unsupported autocast syntax"); } - return c10::nullopt; + return std::nullopt; } void castTensorInputs( @@ -269,7 +269,7 @@ void updateAutocastEnabledCheck(Node* node, bool is_jit_enabled) { void handleBlock(Block* block, AutocastContext initial_state) { std::stack autocast_stack; - std::optional incompatible_amp = c10::nullopt; + std::optional incompatible_amp = std::nullopt; // The current autocast enabled/disabled state auto current_state = [&] { diff --git a/torch/csrc/jit/passes/canonicalize.cpp b/torch/csrc/jit/passes/canonicalize.cpp index 20a883a8d06f..2aa6aff76bc1 100644 --- a/torch/csrc/jit/passes/canonicalize.cpp +++ b/torch/csrc/jit/passes/canonicalize.cpp @@ -144,7 +144,7 @@ bool isBeforeOrAfter(const Use& a, const Use& b, bool checking_before) { std::optional firstOrLastUse(Value* v, bool find_first) { if (v->uses().empty()) { - return c10::nullopt; + return std::nullopt; } Use extreme_use = v->uses()[0]; for (size_t i = 1; i < v->uses().size(); ++i) { @@ -176,12 +176,12 @@ static std::vector sort_indexes(at::ArrayRef values) { // if neither has any uses, use original ordering. Since the // only values that jitter are ones added by the compiler and are guaranteed // to have uses, original ordering is fine. - if (first_uses[i1] == c10::nullopt && first_uses[i2] == c10::nullopt) { + if (first_uses[i1] == std::nullopt && first_uses[i2] == std::nullopt) { return i1 < i2; } - if (first_uses[i1] == c10::nullopt) { + if (first_uses[i1] == std::nullopt) { return false; - } else if (first_uses[i2] == c10::nullopt) { + } else if (first_uses[i2] == std::nullopt) { return true; } diff --git a/torch/csrc/jit/passes/canonicalize_graph_fuser_ops.cpp b/torch/csrc/jit/passes/canonicalize_graph_fuser_ops.cpp index 72d419eeb9c1..b3e190445b8f 100644 --- a/torch/csrc/jit/passes/canonicalize_graph_fuser_ops.cpp +++ b/torch/csrc/jit/passes/canonicalize_graph_fuser_ops.cpp @@ -26,14 +26,14 @@ static std::optional> getChunkOutputs(Node* chunk) { // number of chunks if (static_cast(list_use.user->outputs().size()) != chunk->get(attr::chunks).value()) { - return c10::nullopt; + return std::nullopt; } auto unpack_outputs = list_use.user->outputs(); for (const auto i : c10::irange(unpack_outputs.size())) { outputs.emplace_back(unpack_outputs[i], i); } } else { - return c10::nullopt; + return std::nullopt; } } return outputs; diff --git a/torch/csrc/jit/passes/constant_propagation.cpp b/torch/csrc/jit/passes/constant_propagation.cpp index 6334cd75faa9..5ec8b561cba8 100644 --- a/torch/csrc/jit/passes/constant_propagation.cpp +++ b/torch/csrc/jit/passes/constant_propagation.cpp @@ -28,14 +28,14 @@ std::optional> runNodeIfInputsAreConstant( if (auto ival = toIValue(input)) { stack.push_back(*ival); } else { - return c10::nullopt; + return std::nullopt; } } switch (n->kind()) { case prim::ListUnpack: { if (stack.back().toList().size() != n->outputs().size()) { - return c10::nullopt; + return std::nullopt; } listUnpack(stack, n->outputs().size()); } break; @@ -78,14 +78,14 @@ std::optional> runNodeIfInputsAreConstant( // vararg schemas require the number of inputs at the top of the stack // but this is broken in other places in constant prop, so disable it // for now - return c10::nullopt; + return std::nullopt; } try { auto op = n->getOperation(); op(stack); } catch (...) { - return c10::nullopt; + return std::nullopt; } } break; } @@ -95,13 +95,13 @@ std::optional> runNodeIfInputsAreConstant( const at::Tensor& t = v.toTensor(); if (t.defined() && t.requires_grad()) { // requires grad tensors cannot be constants - return c10::nullopt; + return std::nullopt; } } // Weak form of const propagation if (ignore_custom_classes) { if (v.isCustomClass()) { - return c10::nullopt; + return std::nullopt; } } // see [Constant Object Weak CompilationUnit Reference] @@ -123,7 +123,7 @@ std::optional> runNodeIfInputsAreConstant( } if (v.isObject()) { if (!v.toObject()->is_weak_compilation_ref()) { - return c10::nullopt; + return std::nullopt; } } } diff --git a/torch/csrc/jit/passes/create_autodiff_subgraphs.cpp b/torch/csrc/jit/passes/create_autodiff_subgraphs.cpp index c5fe65537669..46eca6f2b221 100644 --- a/torch/csrc/jit/passes/create_autodiff_subgraphs.cpp +++ b/torch/csrc/jit/passes/create_autodiff_subgraphs.cpp @@ -287,7 +287,7 @@ class SubgraphSlicer { aliasDb_.moveBeforeTopologicallyValid(producer, consumer); if (!canMerge) { - return c10::nullopt; + return std::nullopt; } SubgraphUtils::mergeNodeIntoSubgraphAndUpdateAliasing( @@ -305,11 +305,11 @@ class SubgraphSlicer { std::optional getProfileNodeRequiresGrad(Node* n) { TORCH_INTERNAL_ASSERT(n->kind() == prim::profile); if (!n->hasAttribute(attr::profiled_type)) { - return c10::nullopt; + return std::nullopt; } auto& type = n->ty(attr::profiled_type); if (type->castRaw() == nullptr) { - return c10::nullopt; + return std::nullopt; } return type->expectRef().requiresGrad(); } @@ -403,7 +403,7 @@ std::optional findRequiresGradForOutput( } } - return c10::nullopt; + return std::nullopt; } void AddRequiresGradToDifferentiableGraph( diff --git a/torch/csrc/jit/passes/device_type_analysis.cpp b/torch/csrc/jit/passes/device_type_analysis.cpp index 7670292696ae..c9c9188d37dc 100644 --- a/torch/csrc/jit/passes/device_type_analysis.cpp +++ b/torch/csrc/jit/passes/device_type_analysis.cpp @@ -2,12 +2,12 @@ #include #include #include -#include #include #include #include #include #include +#include #include namespace torch { @@ -88,7 +88,7 @@ bool propWithNoDevice(Node* n) { } if (input_num == n->inputs().size()) { // No tensor found - return setReturnsToDevice(n, c10::nullopt); + return setReturnsToDevice(n, std::nullopt); } auto tensor_type = n->inputs()[input_num]->type()->expect(); @@ -108,7 +108,7 @@ bool propWithNoDevice(Node* n) { only_seen_cpu_zerodim = false; } else { // Bail on the type not match case - return setReturnsToDevice(n, c10::nullopt); + return setReturnsToDevice(n, std::nullopt); } } } diff --git a/torch/csrc/jit/passes/dtype_analysis.cpp b/torch/csrc/jit/passes/dtype_analysis.cpp index f63ea6f34194..2311cb791a44 100644 --- a/torch/csrc/jit/passes/dtype_analysis.cpp +++ b/torch/csrc/jit/passes/dtype_analysis.cpp @@ -3,13 +3,13 @@ #include #include #include -#include #include #include #include #include #include #include +#include #ifndef AT_PER_OPERATOR_HEADERS #include @@ -102,7 +102,7 @@ static bool canBeInferredWithMetaTensor(Node* n) { std::optional inferWithMetaTensor(Node* n) { GRAPH_DEBUG("inferWithMetaTensor", getHeader(n)); if (!canBeInferredWithMetaTensor(n)) { - return c10::nullopt; + return std::nullopt; } Operation op = n->getOperation(); try { @@ -116,7 +116,7 @@ std::optional inferWithMetaTensor(Node* n) { } catch (...) { GRAPH_DEBUG("caught exception with Metatensor run!"); }; - return c10::nullopt; + return std::nullopt; } bool setDtype( diff --git a/torch/csrc/jit/passes/erase_number_types.cpp b/torch/csrc/jit/passes/erase_number_types.cpp index 540f1a7e13fb..ccafee9aa4ae 100644 --- a/torch/csrc/jit/passes/erase_number_types.cpp +++ b/torch/csrc/jit/passes/erase_number_types.cpp @@ -41,7 +41,7 @@ void EraseNumberTypesOnBlock(Block* block) { WithInsertPoint guard(*it); Value* r = block->owningGraph()->insertConstant( - scalar_to_tensor(s), c10::nullopt, it->scope()); + scalar_to_tensor(s), std::nullopt, it->scope()); r->copyMetadata(it->output()); it->output()->replaceAllUsesWith(r); it.destroyCurrent(); diff --git a/torch/csrc/jit/passes/freeze_module.cpp b/torch/csrc/jit/passes/freeze_module.cpp index 4d67d5d21781..23bc873addc7 100644 --- a/torch/csrc/jit/passes/freeze_module.cpp +++ b/torch/csrc/jit/passes/freeze_module.cpp @@ -170,7 +170,7 @@ class AttributePropagator { std::optional resolveName(const std::string& name) { auto sub_names = splitName(name); if (sub_names.empty()) { - return c10::nullopt; + return std::nullopt; } auto& attr_name = sub_names.back(); auto cur_module = module_; @@ -189,7 +189,7 @@ class AttributePropagator { } } if (!found) { - return c10::nullopt; + return std::nullopt; } } @@ -207,7 +207,7 @@ class AttributePropagator { return std::make_pair(std::move(cur_module), std::move(attr_name)); } - return c10::nullopt; + return std::nullopt; } bool _loadModulePath(Value* input, std::shared_ptr& graph) { @@ -230,7 +230,7 @@ class AttributePropagator { std::shared_ptr& graph) { bool success = _loadModulePath(input, graph); if (!success) { - return c10::nullopt; + return std::nullopt; } return names_; } diff --git a/torch/csrc/jit/passes/frozen_ops_to_mkldnn.cpp b/torch/csrc/jit/passes/frozen_ops_to_mkldnn.cpp index c28e99a44525..b508cd905c58 100644 --- a/torch/csrc/jit/passes/frozen_ops_to_mkldnn.cpp +++ b/torch/csrc/jit/passes/frozen_ops_to_mkldnn.cpp @@ -1105,7 +1105,7 @@ class MKLDNNSubgraphSlicer { aliasDb_.moveAfterTopologicallyValid(consumer, producer); if (!canMerge) { - return c10::nullopt; + return std::nullopt; } SubgraphUtils::mergeNodeIntoSubgraphAndUpdateAliasing( diff --git a/torch/csrc/jit/passes/graph_fuser.cpp b/torch/csrc/jit/passes/graph_fuser.cpp index 984878307262..5136615cd2e4 100644 --- a/torch/csrc/jit/passes/graph_fuser.cpp +++ b/torch/csrc/jit/passes/graph_fuser.cpp @@ -494,7 +494,7 @@ struct GraphFuser { AT_ASSERT(group->kind() == prim::FusionGroup); auto it = std::find(group->inputs().begin(), group->inputs().end(), input); if (it == group->inputs().end()) { - return c10::nullopt; + return std::nullopt; } size_t input_index = it - group->inputs().begin(); auto& subgraph = getSubgraph(group); @@ -505,7 +505,7 @@ struct GraphFuser { AT_ASSERT(subgraph_input->uses().size() == 1); return node; } - return c10::nullopt; + return std::nullopt; } void fuseChunkByReusingExistingFusedChunk( diff --git a/torch/csrc/jit/passes/graph_rewrite_helper.cpp b/torch/csrc/jit/passes/graph_rewrite_helper.cpp index edb9f5b9589a..430dbb3fd1c8 100644 --- a/torch/csrc/jit/passes/graph_rewrite_helper.cpp +++ b/torch/csrc/jit/passes/graph_rewrite_helper.cpp @@ -287,7 +287,7 @@ bool isClampFusable( vmap.find("output_max") != vmap.end(), "Expected to find output_max as well given " "output_min exist in pattern graph."); - // If output_min/max are not constant, we get c10::nullopt. + // If output_min/max are not constant, we get std::nullopt. auto output_min = graph_rewrite_helper::getIValue("output_min", match_vmap, vmap); auto output_max = diff --git a/torch/csrc/jit/passes/inline_autodiff_subgraphs.cpp b/torch/csrc/jit/passes/inline_autodiff_subgraphs.cpp index f8d63e87f07b..226826e94609 100644 --- a/torch/csrc/jit/passes/inline_autodiff_subgraphs.cpp +++ b/torch/csrc/jit/passes/inline_autodiff_subgraphs.cpp @@ -68,7 +68,7 @@ graph_node_list::iterator scanNode(Node* node, size_t threshold) { // so the profiles will have outdated requires_grad=False. // conservatively update them to maybe requiring grad, bc we might create // autodiff graphs when the tensors maybe require grad - UpdateDifferentiableGraphRequiresGrad(subgraph, c10::nullopt); + UpdateDifferentiableGraphRequiresGrad(subgraph, std::nullopt); SubgraphUtils::unmergeSubgraph(node); return next_node; } diff --git a/torch/csrc/jit/passes/integer_value_refinement.cpp b/torch/csrc/jit/passes/integer_value_refinement.cpp index 16a329b3b11f..cf9b577f927b 100644 --- a/torch/csrc/jit/passes/integer_value_refinement.cpp +++ b/torch/csrc/jit/passes/integer_value_refinement.cpp @@ -93,7 +93,7 @@ struct IntegerValueRefiner { auto other_output = other_if_block->outputs().at(i); auto other_const_value = other_output->type()->cast() ? constant_as(other_output) - : c10::nullopt; + : std::nullopt; if (!other_const_value || block_output->node()->kind() == prim::Constant) { continue; @@ -211,7 +211,7 @@ struct IntegerValueRefiner { return maybe_refinement->second; } } - return c10::nullopt; + return std::nullopt; } std::shared_ptr graph_; diff --git a/torch/csrc/jit/passes/onnx/constant_fold.cpp b/torch/csrc/jit/passes/onnx/constant_fold.cpp index 4eeba79aae90..61d97057c5b4 100644 --- a/torch/csrc/jit/passes/onnx/constant_fold.cpp +++ b/torch/csrc/jit/passes/onnx/constant_fold.cpp @@ -5,9 +5,9 @@ #include #include -#include #include #include +#include namespace torch { namespace jit { @@ -72,15 +72,15 @@ std::optional runTorchSlice_opset9( TORCH_WARN( "Constant folding - Invalid number of inputs found for opset 9 " "onnx::Slice op. Constant folding not applied."); - return c10::nullopt; + return std::nullopt; } if (!(node->hasAttributeS("starts") && node->hasAttributeS("ends"))) { - return c10::nullopt; + return std::nullopt; } auto startsAttr = node->is(attr::starts); auto endsAttr = node->is(attr::ends); if (startsAttr.size() != endsAttr.size()) { - return c10::nullopt; + return std::nullopt; } std::vector axesAttr; if (node->hasAttributeS("axes")) { @@ -98,7 +98,7 @@ std::optional runTorchSlice_opset9( handleNegativeStartEndIndex(start, end, axis, updated_val.sizes()); int64_t length = end - start; if (length < 0 || start > updated_val.sizes()[axis] - length) - return c10::nullopt; + return std::nullopt; updated_val = at::narrow(updated_val, axis, start, length); } return std::optional(updated_val); @@ -114,7 +114,7 @@ std::optional runTorchSlice_opset10( TORCH_WARN( "Constant folding - Invalid number of inputs found for opset opset >= 10 onnx::Slice op. " "Constant folding not applied."); - return c10::nullopt; + return std::nullopt; } // Checking validity of 'starts' and 'ends' input if (inputTensorValues[1].sizes().size() != 1 || @@ -122,12 +122,12 @@ std::optional runTorchSlice_opset10( TORCH_WARN( "Constant folding - Invalid 'starts' or 'ends' inputs found for opset >= 10 onnx::Slice op. " "Constant folding not applied."); - return c10::nullopt; + return std::nullopt; } if (inputTensorValues[1].sizes()[0] != inputTensorValues[2].sizes()[0]) { // Number of elements of 'starts' and 'ends' 1-D input tensors should be the // same - return c10::nullopt; + return std::nullopt; } // Checking 'axes' input, if available. std::vector axes; @@ -136,7 +136,7 @@ std::optional runTorchSlice_opset10( TORCH_WARN( "Constant folding - Invalid 'axes' input found for opset >= 10 onnx::Slice op. " "Constant folding not applied."); - return c10::nullopt; + return std::nullopt; } if (inputTensorValues[3].sizes()[0] != inputTensorValues[1].sizes()[0]) { // Number of elements of 'axes' and 'ends' 1-D input tensors should be the @@ -144,7 +144,7 @@ std::optional runTorchSlice_opset10( TORCH_WARN( "Constant folding - Invalid 'axes' or 'ends' inputs found for opset >= 10 onnx::Slice op. " "Constant folding not applied."); - return c10::nullopt; + return std::nullopt; } auto axes_a = inputTensorValues[3].accessor(); axes.resize(inputTensorValues[3].sizes()[0]); @@ -162,7 +162,7 @@ std::optional runTorchSlice_opset10( TORCH_WARN( "Constant folding - Invalid 'steps' input found for opset >= 10 onnx::Slice op. " "Constant folding not applied."); - return c10::nullopt; + return std::nullopt; } if (inputTensorValues[4].sizes()[0] != inputTensorValues[1].sizes()[0]) { // Number of elements of 'steps' and 'ends' 1-D input tensors should be @@ -170,7 +170,7 @@ std::optional runTorchSlice_opset10( TORCH_WARN( "Constant folding - Invalid 'steps' or 'ends' inputs found for opset >= 10 onnx::Slice op. " "Constant folding not applied."); - return c10::nullopt; + return std::nullopt; } auto steps_a = inputTensorValues[4].accessor(); for (const auto i : c10::irange(inputTensorValues[4].sizes()[0])) { @@ -179,7 +179,7 @@ std::optional runTorchSlice_opset10( TORCH_WARN( "Constant folding - Only steps=1 can be constant folded for opset >= 10 onnx::Slice op. " "Constant folding not applied."); - return c10::nullopt; + return std::nullopt; } } } @@ -192,7 +192,7 @@ std::optional runTorchSlice_opset10( handleNegativeStartEndIndex(start, end, axis, updated_val.sizes()); int64_t length = end - start; if (length < 0 || start > updated_val.sizes()[axis] - length) - return c10::nullopt; + return std::nullopt; updated_val = at::narrow(updated_val, axis, start, length); } return std::optional(updated_val); @@ -272,11 +272,11 @@ std::optional runTorchBackendForOnnx( } else { TORCH_WARN( "Constant folding - unsupported opset version. Constant folding not applied."); - return c10::nullopt; + return std::nullopt; } } else if (node->kind() == onnx::Concat) { if (!node->hasAttributeS("axis")) { - return c10::nullopt; + return std::nullopt; } updated_val = at::cat(at::TensorList(inputTensorValues), node->i(attr::axis)); @@ -310,7 +310,7 @@ std::optional runTorchBackendForOnnx( TORCH_WARN( "Constant folding - Invalid 'axes' inputs found for opset 13 onnx::Unsqueeze op. " "Constant folding not applied."); - return c10::nullopt; + return std::nullopt; } auto axes_a = inputTensorValues[1].accessor(); std::vector axes; @@ -332,7 +332,7 @@ std::optional runTorchBackendForOnnx( } else if (opset_version >= ONNX_OPSET_9) { assert(inputTensorValues.size() == 1); if (!node->hasAttributeS("axes")) { - return c10::nullopt; + return std::nullopt; } updated_val = inputTensorValues[0]; std::vector axesAttr = node->is(attr::axes); @@ -345,7 +345,7 @@ std::optional runTorchBackendForOnnx( TORCH_WARN( "Constant folding - unsupported opset version. " "Constant folding not applied."); - return c10::nullopt; + return std::nullopt; } } else if (node->kind() == onnx::Squeeze) { assert(inputTensorValues.size() == 2 || inputTensorValues.size() == 1); @@ -359,7 +359,7 @@ std::optional runTorchBackendForOnnx( TORCH_WARN( "Constant folding - Invalid 'axes' inputs found for opset 13 onnx::Squeeze op. " "Constant folding not applied."); - return c10::nullopt; + return std::nullopt; } auto axes_a = inputTensorValues[1].accessor(); std::vector axes; @@ -389,12 +389,12 @@ std::optional runTorchBackendForOnnx( TORCH_WARN( "Constant folding - unsupported opset version. " "Constant folding not applied."); - return c10::nullopt; + return std::nullopt; } } else if (node->kind() == onnx::Transpose) { assert(inputTensorValues.size() == 1); if (!node->hasAttributeS("perm")) { - return c10::nullopt; + return std::nullopt; } updated_val = inputTensorValues[0].permute(node->is(attr::perm)); return std::optional(updated_val); @@ -405,7 +405,7 @@ std::optional runTorchBackendForOnnx( ONNXTypeToATenType(node->i(attr::to)).value()); return std::optional(updated_val); } - return c10::nullopt; + return std::nullopt; } else if (node->kind() == onnx::Reshape) { assert(inputTensorValues.size() == 2); updated_val = inputTensorValues[0]; @@ -441,10 +441,10 @@ std::optional runTorchBackendForOnnx( } else if (node->kind() == onnx::ReduceL1 || node->kind() == onnx::ReduceL2) { assert(inputTensorValues.size() == 1); if (!node->hasAttributeS("axes")) { - return c10::nullopt; + return std::nullopt; } if (!node->hasAttributeS("keepdims")) { - return c10::nullopt; + return std::nullopt; } int p = node->kind() == onnx::ReduceL1 ? 1 : 2; updated_val = at::norm( @@ -485,7 +485,7 @@ std::optional runTorchBackendForOnnx( // at::index_select only supports indices with rank <= 1. // See https://pytorch.org/docs/main/generated/torch.index_select.html if (q > 1) { - return c10::nullopt; + return std::nullopt; } // If the device of indices tensor is not the same with it of the input // tensor, move it to the device of the input tensor @@ -539,7 +539,7 @@ std::optional runTorchBackendForOnnx( updated_val = at::softmax(inputTensorValues[0], axis); return std::optional(updated_val); } else { - return c10::nullopt; + return std::nullopt; } } @@ -652,7 +652,7 @@ void ConstantFoldONNX(Block* b, ParamMap& paramsDict, int opset_version) { } auto updatedValWrapped = onnx_constant_fold::runTorchBackendForOnnx( node, inputTensorValues, opset_version); - if (updatedValWrapped == c10::nullopt) { + if (updatedValWrapped == std::nullopt) { // Constant folding is not supported for this op. Skip it. continue; } diff --git a/torch/csrc/jit/passes/onnx/constant_fold.h b/torch/csrc/jit/passes/onnx/constant_fold.h index 201c3def3268..d25ebee32a78 100644 --- a/torch/csrc/jit/passes/onnx/constant_fold.h +++ b/torch/csrc/jit/passes/onnx/constant_fold.h @@ -2,8 +2,8 @@ #include -#include #include +#include namespace torch { namespace jit { diff --git a/torch/csrc/jit/passes/onnx/constant_map.cpp b/torch/csrc/jit/passes/onnx/constant_map.cpp index f9c96d0430df..99c801dcf773 100644 --- a/torch/csrc/jit/passes/onnx/constant_map.cpp +++ b/torch/csrc/jit/passes/onnx/constant_map.cpp @@ -34,14 +34,14 @@ bool ConstantValueMap::HasRank(const std::string& tensorName) { std::optional ConstantValueMap::GetRank(const std::string& tensorName) { if (!HasRank(tensorName)) { - return c10::nullopt; + return std::nullopt; } return ConstantValueMap::getInstance().rankMap[tensorName]; } void ConstantValueMap::SetAllGraphInputsStatic(bool all_static) { ConstantValueMap::getInstance().allGraphInputsStatic = - c10::make_optional(all_static); + std::make_optional(all_static); } std::optional ConstantValueMap::GetAllGraphInputsStatic() { @@ -71,7 +71,7 @@ bool ConstantValueMap::HasShape(const std::string& tensorName) { std::optional ConstantValueMap::GetShape( const std::string& tensorName) { if (!HasShape(tensorName)) { - return c10::nullopt; + return std::nullopt; } return ConstantValueMap::getInstance().shapeMap[tensorName]; } @@ -90,7 +90,7 @@ bool ConstantValueMap::HasValue(const std::string& tensorName) { std::optional ConstantValueMap::GetValue( const std::string& tensorName) { if (!HasValue(tensorName)) { - return c10::nullopt; + return std::nullopt; } return ConstantValueMap::getInstance().tensorValueMap[tensorName]; } @@ -121,7 +121,7 @@ std::optional> ConstantValueMap::GetShapeInto1DInt64Vector( return shape_value; } } - return c10::nullopt; + return std::nullopt; } std::optional> ConstantValueMap:: @@ -152,7 +152,7 @@ std::optional> ConstantValueMap:: } } } - return c10::nullopt; + return std::nullopt; } // accessor for 1DInt64 case. @@ -183,7 +183,7 @@ bool ConstantValueMap::HasTypeReliable(const std::string& tensorName) { std::optional ConstantValueMap::GetTypeReliable( const std::string& tensorName) { if (!HasTypeReliable(tensorName)) { - return c10::nullopt; + return std::nullopt; } return ConstantValueMap::getInstance().typeReliableMap[tensorName]; } @@ -202,7 +202,7 @@ bool ConstantValueMap::HasUseInferredType(const std::string& tensorName) { std::optional ConstantValueMap::GetUseInferredType( const std::string& tensorName) { if (!HasUseInferredType(tensorName)) { - return c10::nullopt; + return std::nullopt; } return ConstantValueMap::getInstance().useInferredTypeMap[tensorName]; } @@ -221,7 +221,7 @@ bool ConstantValueMap::HasShapeValue(const std::string& tensorName) { std::optional ConstantValueMap::GetShapeValue( const std::string& tensorName) { if (!HasShapeValue(tensorName)) { - return c10::nullopt; + return std::nullopt; } return ConstantValueMap::getInstance().shapeValueMap[tensorName]; } @@ -284,7 +284,7 @@ void ConstantValueMap::ClearMaps() { ConstantValueMap::getInstance().inferredShapeData.clear(); ConstantValueMap::getInstance().symbolDimMap.clear(); ConstantValueMap::getInstance().dimSymbolMap.clear(); - ConstantValueMap::getInstance().allGraphInputsStatic = c10::nullopt; + ConstantValueMap::getInstance().allGraphInputsStatic = std::nullopt; ConstantValueMap::getInstance().allGraphInputsReliableComputed = false; } diff --git a/torch/csrc/jit/passes/onnx/function_extraction.cpp b/torch/csrc/jit/passes/onnx/function_extraction.cpp index c545c7aba823..febf412e5d12 100644 --- a/torch/csrc/jit/passes/onnx/function_extraction.cpp +++ b/torch/csrc/jit/passes/onnx/function_extraction.cpp @@ -225,16 +225,16 @@ std::optional FunctionExtractor::FunctionContext::FindAttrName( auto v_it = scope_ctxs_[scope_key_]->env_to_subgraph_.find(ref_n->outputs().at(0)); if (v_it == scope_ctxs_[scope_key_]->env_to_subgraph_.end()) { - return c10::nullopt; + return std::nullopt; } auto* n_in_def = v_it->second->node(); auto n_attr_it = node_attr_to_name_.find(n_in_def); if (n_attr_it == node_attr_to_name_.end()) { - return c10::nullopt; + return std::nullopt; } auto name_it = n_attr_it->second.find(attr.toUnqualString()); if (name_it == n_attr_it->second.end()) { - return c10::nullopt; + return std::nullopt; } return name_it->second; } @@ -301,7 +301,7 @@ std::optional FunctionExtractor::FindCommonAncestor( ScopePtr a, ScopePtr b) { if (!IsValidScope(a) || !IsValidScope(b)) { - return c10::nullopt; + return std::nullopt; } auto diff = @@ -327,20 +327,20 @@ std::optional FunctionExtractor::FindCommonAncestor( } } - return c10::nullopt; + return std::nullopt; } std::optional FunctionExtractor::FindCommonAncestor( const scope_list& scopes) { if (scopes.empty()) { - return c10::nullopt; + return std::nullopt; } std::optional common_ancestor = scopes.at(0); for (const auto& scope : scopes) { common_ancestor = FindCommonAncestor(common_ancestor.value(), scope); if (!common_ancestor.has_value()) { - return c10::nullopt; + return std::nullopt; } } @@ -410,7 +410,7 @@ std::optional FunctionExtractor::InferScope(Node* n) { } } - return c10::nullopt; + return std::nullopt; } std::shared_ptr FunctionExtractor::ConstructFuncGraph( diff --git a/torch/csrc/jit/passes/onnx/list_model_parameters.cpp b/torch/csrc/jit/passes/onnx/list_model_parameters.cpp index b28de0fdee4c..6a1e3b08f3b9 100644 --- a/torch/csrc/jit/passes/onnx/list_model_parameters.cpp +++ b/torch/csrc/jit/passes/onnx/list_model_parameters.cpp @@ -52,7 +52,7 @@ std::deque findSubModuleAttr( Value* addParamAsArgument(Function* function, std::string& name, IValue& attr) { auto schema = function->getSchema(); auto args = schema.arguments(); - args.emplace_back(name, nullptr, c10::nullopt, attr); + args.emplace_back(name, nullptr, std::nullopt, attr); auto new_schema = FunctionSchema( schema.name(), schema.overload_name(), diff --git a/torch/csrc/jit/passes/onnx/pattern_conversion/pattern_conversion.cpp b/torch/csrc/jit/passes/onnx/pattern_conversion/pattern_conversion.cpp index 6c064b70ae61..cd975d0375fc 100644 --- a/torch/csrc/jit/passes/onnx/pattern_conversion/pattern_conversion.cpp +++ b/torch/csrc/jit/passes/onnx/pattern_conversion/pattern_conversion.cpp @@ -46,7 +46,7 @@ Value* ConvertSliceToIndex(Node* slice, Value* size, Node* insertBefore) { aten::slice, {index, graph->insertConstant( - scalar_to_tensor(at::Scalar(0)), c10::nullopt, slice->scope()), + scalar_to_tensor(at::Scalar(0)), std::nullopt, slice->scope()), start, end, step}); diff --git a/torch/csrc/jit/passes/onnx/pattern_conversion/pattern_encapsulation.cpp b/torch/csrc/jit/passes/onnx/pattern_conversion/pattern_encapsulation.cpp index 611095499045..7a98567a529b 100644 --- a/torch/csrc/jit/passes/onnx/pattern_conversion/pattern_encapsulation.cpp +++ b/torch/csrc/jit/passes/onnx/pattern_conversion/pattern_encapsulation.cpp @@ -84,7 +84,7 @@ std::optional EncapsulatePatternIntoSubblock(Node* n) { return EncapsulateInplaceIndexPutForONNX(n); } } - return c10::nullopt; + return std::nullopt; } } // namespace jit diff --git a/torch/csrc/jit/passes/onnx/peephole.cpp b/torch/csrc/jit/passes/onnx/peephole.cpp index b468e739a03f..18c31ea65661 100644 --- a/torch/csrc/jit/passes/onnx/peephole.cpp +++ b/torch/csrc/jit/passes/onnx/peephole.cpp @@ -16,7 +16,7 @@ #include #endif -#include +#include #if defined(_MSC_VER) #include @@ -105,14 +105,14 @@ std::optional fusibleExpandTo( at::IntArrayRef from, at::IntArrayRef to) { if (from.size() > to.size()) { - return c10::nullopt; + return std::nullopt; } for (const auto i : c10::irange(from.size())) { auto fdim = from[from.size() - 1 - i]; auto tdim = to[to.size() - 1 - i]; if (fdim != 1 && fdim != tdim) { - return c10::nullopt; + return std::nullopt; } } @@ -168,7 +168,7 @@ void fuseBroadcast(Block* b) { .sizes() .concrete_sizes() .value()); // to - if (axis == c10::nullopt) { + if (axis == std::nullopt) { continue; } diff --git a/torch/csrc/jit/passes/onnx/scalar_type_analysis.cpp b/torch/csrc/jit/passes/onnx/scalar_type_analysis.cpp index 427e5771a9f0..009566499275 100644 --- a/torch/csrc/jit/passes/onnx/scalar_type_analysis.cpp +++ b/torch/csrc/jit/passes/onnx/scalar_type_analysis.cpp @@ -100,7 +100,7 @@ static bool IsImplicitCastSupported(const NodeKind& nodeKind) { static std::optional PromoteScalarTypes( const std::vector& types) { if (types.empty()) { - return c10::nullopt; + return std::nullopt; } auto st = types[0]; for (const auto i : c10::irange(1, types.size())) { @@ -131,9 +131,9 @@ static std::optional PromoteScalarTypesWithCategory( return 0; }; - if (c10::nullopt == typeFromScalar) { + if (std::nullopt == typeFromScalar) { return typeFromTensor; - } else if (c10::nullopt == typeFromTensor) { + } else if (std::nullopt == typeFromTensor) { return typeFromScalar; } @@ -155,7 +155,7 @@ static std::optional InferExpectedScalarType(const Node* n) { if (auto* tensor_type = input->type()->castRaw()) { return tensor_type->scalarType(); } - return c10::nullopt; + return std::nullopt; }; auto emplace_type_from_scalar = [&typesFromTensors, &typesFromScalars](at::ScalarType scalar_type) { @@ -252,7 +252,7 @@ static std::optional InferExpectedScalarType(const Node* n) { } }); - std::optional st = c10::nullopt; + std::optional st = std::nullopt; const auto output_st = get_scalar_type(n->output()); if (IsComparisonOp(n->kind())) { @@ -313,7 +313,7 @@ static void UpdateScalarTypeForInputs( for (auto input : n->inputs()) { auto input_tensor_type = input->type()->cast(); auto input_scalar_type = - input_tensor_type ? input_tensor_type->scalarType() : c10::nullopt; + input_tensor_type ? input_tensor_type->scalarType() : std::nullopt; // We skip the 'condition' input (i.e., the first input) in case of // onnx:Where operator. @@ -393,7 +393,7 @@ static void RecoverScalarTypeForOutput( static void LowPrecisionCastNodeForStandardOps(Node* n, int opset_version) { TORCH_INTERNAL_ASSERT(n->outputs().size() == 1); if (n->output()->type()->cast() == nullptr || - n->output()->type()->cast()->scalarType() == c10::nullopt) { + n->output()->type()->cast()->scalarType() == std::nullopt) { // skip LowPrecisionCast if op output type is null. return; } @@ -401,7 +401,7 @@ static void LowPrecisionCastNodeForStandardOps(Node* n, int opset_version) { n->output()->type()->cast()->scalarType().value(); for (size_t i = 0; i < n->inputs().size(); ++i) { if (n->input(i)->type()->cast() == nullptr || - n->input(i)->type()->cast()->scalarType() == c10::nullopt) { + n->input(i)->type()->cast()->scalarType() == std::nullopt) { // skip LowPrecisionCast if any op input type node is null. return; } diff --git a/torch/csrc/jit/passes/onnx/shape_type_inference.cpp b/torch/csrc/jit/passes/onnx/shape_type_inference.cpp index 65d065adeb2b..3691f0bf7b09 100644 --- a/torch/csrc/jit/passes/onnx/shape_type_inference.cpp +++ b/torch/csrc/jit/passes/onnx/shape_type_inference.cpp @@ -98,7 +98,7 @@ c10::ShapeSymbol ONNXDimToShapeSymbol( if (dim.has_dim_value()) { return c10::ShapeSymbol::fromStaticSize(dim.dim_value()); } - std::optional sym = c10::nullopt; + std::optional sym = std::nullopt; if (dim.has_dim_param()) { // If this param is already known, assign the same Symbol. GRAPH_UPDATE("Got dim_param:", dim.dim_param()); @@ -267,7 +267,7 @@ Value* CloneValueFromListConstruct( // is preserved. If the elemtype is Int, insert a onnx::Concat node into // the graph. TypePtr elem = v->type()->castRaw()->getElementType(); - std::optional scalar_type = c10::nullopt; + std::optional scalar_type = std::nullopt; if (elem->cast()) { scalar_type = at::kLong; if (isValidToTransformToONNXConcatNode(v->node())) { @@ -332,7 +332,7 @@ Node* CloneNodeToGraph( // Try to lookup input value and insert it into the graph. // If the input value is unknown, set it to graph input in the new // graph, and copy over metadata, such as datatype and shape. - ::std::optional val = ::c10::nullopt; + ::std::optional val = ::std::nullopt; auto v0 = params_dict.find(v->debugName()); if (v0 != params_dict.end()) { val = v0->second.toTensor(); @@ -420,13 +420,13 @@ void ConvertGraphToONNXProto( std::optional ComputeConstantFolding(Node* n, int opset_version) { if (n->inputs().empty()) { - return c10::nullopt; + return std::nullopt; } std::vector inputTensorValues; for (auto i : c10::irange(n->inputs().size())) { if (TensorTypePtr input_type = n->input(i)->type()->cast()) { if (!ConstantValueMap::HasValue(n->input(i)->debugName())) { - return c10::nullopt; + return std::nullopt; } auto tensor_value = ConstantValueMap::GetValue(n->input(i)->debugName()).value(); @@ -434,7 +434,7 @@ std::optional ComputeConstantFolding(Node* n, int opset_version) { } } if (inputTensorValues.size() < n->inputs().size()) { - return c10::nullopt; + return std::nullopt; } try { return onnx_constant_fold::runTorchBackendForOnnx( @@ -443,7 +443,7 @@ std::optional ComputeConstantFolding(Node* n, int opset_version) { auto ex_str = std::string(ex.what()); ex_str = ex_str.substr(0, ex_str.find('\n')); TORCH_WARN("Constant folding in symbolic shape inference fails: ", ex_str); - return c10::nullopt; + return std::nullopt; } } @@ -500,7 +500,7 @@ std::optional<::c10::SymbolicShape> ComputeShapeFromReshape( std::numeric_limits::max() / input_shape.static_size()) { TORCH_WARN( "ComputeShapeFromReshape(), shape_ratio overflows, skip shape inference."); - return c10::nullopt; + return std::nullopt; } else { shape_ratio *= static_cast(input_shape.static_size()); } @@ -523,7 +523,7 @@ std::optional<::c10::SymbolicShape> ComputeShapeFromReshape( } else { auto value = target_shape.value(); if (sym_map.find(value) == sym_map.end()) { - return c10::nullopt; + return std::nullopt; } sym_map[value]--; if (sym_map[value] == 0) { @@ -535,7 +535,7 @@ std::optional<::c10::SymbolicShape> ComputeShapeFromReshape( // sym_map is used to match shape symbols between the input and shape. // If there is a mismatch, the output shape cannot be estimated. if (!sym_map.empty()) { - return c10::nullopt; + return std::nullopt; } TORCH_INTERNAL_ASSERT( @@ -565,7 +565,7 @@ std::optional<::c10::SymbolicShape> ComputeShapeFromExpand( const std::vector& reshape) { for (const auto& it : reshape) { if (it < 0) { - return c10::nullopt; + return std::nullopt; } } std::vector<::c10::ShapeSymbol> final_shape; @@ -607,7 +607,7 @@ std::optional<::c10::SymbolicShape> ComputeShapeFromTile( "ONNX Tile input shapes do not match."); for (const auto& it : reshape) { if (it < 0) { - return c10::nullopt; + return std::nullopt; } } std::vector<::c10::ShapeSymbol> final_shape; @@ -688,7 +688,7 @@ std::optional> GetValueFromListConstructNode( } return lc_node->inputs().size() == shape_size.size() ? std::optional>(shape_size) - : c10::nullopt; + : std::nullopt; } void SetShapeValueFromListConstructNode(Node* lc_node) { diff --git a/torch/csrc/jit/passes/onnx/unpack_quantized_weights.cpp b/torch/csrc/jit/passes/onnx/unpack_quantized_weights.cpp index 7390bea56e77..d889295dca19 100644 --- a/torch/csrc/jit/passes/onnx/unpack_quantized_weights.cpp +++ b/torch/csrc/jit/passes/onnx/unpack_quantized_weights.cpp @@ -655,11 +655,11 @@ void UnpackQuantizedTensorInputs(std::shared_ptr& graph) { auto input_scale = graph->insertInput(index + 1, input_name + "_scale") ->setType(TensorType::create( - at::kDouble, at::kCPU, 0, /*requires_grad=*/c10::nullopt)); + at::kDouble, at::kCPU, 0, /*requires_grad=*/std::nullopt)); auto input_zero_point = graph->insertInput(index + 2, input_name + "_zero_point") ->setType(TensorType::create( - at::kLong, at::kCPU, 0, /*requires_grad=*/c10::nullopt)); + at::kLong, at::kCPU, 0, /*requires_grad=*/std::nullopt)); std::vector converted{input_value, input_scale, input_zero_point}; auto input_tuple = graph->prependNode(graph->createTuple(converted))->output(); diff --git a/torch/csrc/jit/passes/peephole_dict_idioms.cpp b/torch/csrc/jit/passes/peephole_dict_idioms.cpp index d3a5cfa36261..171b787d17b0 100644 --- a/torch/csrc/jit/passes/peephole_dict_idioms.cpp +++ b/torch/csrc/jit/passes/peephole_dict_idioms.cpp @@ -34,7 +34,7 @@ class DictNodeImpl : public DictNodeImplBase { auto key_opt = toIValue(dict_creation_node->input(i)); // Key is not constant if we cannot convert to IValue - if (key_opt == c10::nullopt) { + if (key_opt == std::nullopt) { has_non_const_key_ = true; continue; } @@ -129,7 +129,7 @@ class DictNode { if (impl_ && impl_->contains(key)) { return impl_->get(key); } - return c10::nullopt; + return std::nullopt; } private: @@ -185,14 +185,14 @@ class PeepholeOptimizeDictIdiomsImpl { const DictNode& dict_node = getDictNode(dict_creation_node); auto key_opt = toIValue(key); // Key is not constant if we cannot convert to IValue - if (key_opt == c10::nullopt) { - return c10::nullopt; + if (key_opt == std::nullopt) { + return std::nullopt; } IValue key_ival = *key_opt; if (dict_node.canOptimize()) { return dict_node.getOrNullopt(key_ival); } - return c10::nullopt; + return std::nullopt; } std::optional computeLen(Node* dict_creation_node) { @@ -200,13 +200,13 @@ class PeepholeOptimizeDictIdiomsImpl { if (dict_node.canOptimize()) { return static_cast(dict_node.size()); } - return c10::nullopt; + return std::nullopt; } bool optimizeLen(Node* len_node, Node* creation_node) { if (creation_node->kind() == prim::DictConstruct) { auto len = computeLen(creation_node); - if (len != c10::nullopt) { + if (len != std::nullopt) { WithInsertPoint guard(len_node); len_node->output()->replaceAllUsesWith(graph_->insertConstant(len)); return true; @@ -219,7 +219,7 @@ class PeepholeOptimizeDictIdiomsImpl { if (creation_node->kind() == prim::DictConstruct) { auto key = getitem_node->input(1); auto value = getValueFromDict(creation_node, key); - if (value != c10::nullopt) { + if (value != std::nullopt) { getitem_node->output()->replaceAllUsesWith(*value); return true; } diff --git a/torch/csrc/jit/passes/peephole_list_idioms.cpp b/torch/csrc/jit/passes/peephole_list_idioms.cpp index 9c106e13edf1..f644fe4f1de1 100644 --- a/torch/csrc/jit/passes/peephole_list_idioms.cpp +++ b/torch/csrc/jit/passes/peephole_list_idioms.cpp @@ -21,7 +21,7 @@ static std::optional normalizeIndex(int64_t index, size_t len) { if (index >= 0 && index < static_cast(len)) { return index; } else { - return c10::nullopt; + return std::nullopt; } } @@ -136,7 +136,7 @@ struct ListLenRefiner { return maybe_refinement->second; } } - return c10::nullopt; + return std::nullopt; } std::shared_ptr graph_; @@ -199,8 +199,8 @@ struct PeepholeOptimizeListIdiomsImpl { auto step_val = toIValue(slice_node->input(3)); // All args must be constant to apply this optimization. - if (start_val == c10::nullopt || end_val == c10::nullopt || - step_val == c10::nullopt) { + if (start_val == std::nullopt || end_val == std::nullopt || + step_val == std::nullopt) { return false; } diff --git a/torch/csrc/jit/passes/quantization/helper.cpp b/torch/csrc/jit/passes/quantization/helper.cpp index 8a74ec01086a..7eea68eb1065 100644 --- a/torch/csrc/jit/passes/quantization/helper.cpp +++ b/torch/csrc/jit/passes/quantization/helper.cpp @@ -325,7 +325,7 @@ std::optional getClampScalarInputUse(Value* v) { } } } - return c10::nullopt; + return std::nullopt; } void cloneMethod( @@ -503,7 +503,7 @@ std::optional> getFixedQParams(Node* n) { if (isAtenFunc(n, fixed_qparam_funcs)) { return _fixed_qparams_map.at(n->kind()); } - return c10::nullopt; + return std::nullopt; } bool userDefinedCallFunction(Node* n) { @@ -534,13 +534,13 @@ bool nodeQuantizable(Node* n, QuantType quant_type) { bool useQuantizable(const Use& use, QuantType quant_type) { if (quant_type == QuantType::STATIC) { for (const auto& func_input : _observe_inputs_aten_func) { - if (matchAtenFuncToUse(use, func_input.func_name, c10::nullopt)) { + if (matchAtenFuncToUse(use, func_input.func_name, std::nullopt)) { return use.offset == static_cast(func_input.arg_index); } } for (const auto& func_input : _observe_inputs_call_func) { - if (matchCallFuncToUse(use, func_input.func_name, c10::nullopt)) { + if (matchCallFuncToUse(use, func_input.func_name, std::nullopt)) { return use.offset == static_cast(func_input.arg_index); } } @@ -653,7 +653,7 @@ std::optional getInvokedModuleOpt( if (m.attr(p).isModule()) { m = m.attr(p).toModule(); } else { - return c10::nullopt; + return std::nullopt; } } return m; @@ -691,7 +691,7 @@ std::optional getModuleName(Value* value) { if (type && type->name()) { return removeTorchMangle(type->name()->qualifiedName()); } - return c10::nullopt; + return std::nullopt; } static bool is_module( diff --git a/torch/csrc/jit/passes/quantization/helper.h b/torch/csrc/jit/passes/quantization/helper.h index 680e3c7ca43d..21efbff7aa69 100644 --- a/torch/csrc/jit/passes/quantization/helper.h +++ b/torch/csrc/jit/passes/quantization/helper.h @@ -150,7 +150,7 @@ TORCH_API Module getInvokedModule(Module& module, Node* n, Value* self); // Given an CallMethod node, get the module instance corresponding // to the instance Value if the instance is a module, otherwise return -// c10::nullopt +// std::nullopt std::optional getInvokedModuleOpt( const Module& module, Node* n, diff --git a/torch/csrc/jit/passes/quantization/insert_observers.cpp b/torch/csrc/jit/passes/quantization/insert_observers.cpp index 145448210958..f906efacceca 100644 --- a/torch/csrc/jit/passes/quantization/insert_observers.cpp +++ b/torch/csrc/jit/passes/quantization/insert_observers.cpp @@ -49,7 +49,7 @@ void fillQConfigMap( const QConfigDict& qconfig_dict, ModuleQConfigMap& map, const std::string& key = "", - const std::optional& parent_qconfig = c10::nullopt) { + const std::optional& parent_qconfig = std::nullopt) { std::optional qconfig; if (qconfig_dict.find(key) != qconfig_dict.end()) { GRAPH_DEBUG("Got module config for key:", key); @@ -1414,7 +1414,7 @@ InsertObserversHelper::insertObserversFor( if (!isObserved(v, block_observed_values)) { block_output_observers.emplace_back(getObserverFor(v)); } else { - block_output_observers.emplace_back(c10::nullopt); + block_output_observers.emplace_back(std::nullopt); } } } diff --git a/torch/csrc/jit/passes/quantization/insert_quant_dequant.cpp b/torch/csrc/jit/passes/quantization/insert_quant_dequant.cpp index 92fb2fc79bcc..3d24834261d2 100644 --- a/torch/csrc/jit/passes/quantization/insert_quant_dequant.cpp +++ b/torch/csrc/jit/passes/quantization/insert_quant_dequant.cpp @@ -234,7 +234,7 @@ std::optional findObserverName(Value* v) { return module_instance->node()->s(attr::name); } } - return c10::nullopt; + return std::nullopt; } bool isPlaceholderObserver(Value* observer) { @@ -268,7 +268,7 @@ std::optional getEmbeddingBagObsName( auto op_name = observer_module.attr("custom_op").toStringRef(); return isPlaceholderObserver(observer) ? std::move(op_name) : ""; } - return c10::nullopt; + return std::nullopt; } bool isEmbeddingBagOp( @@ -792,7 +792,7 @@ class InsertQuantDeQuantHelper { const std::vector& inputs, bool is_scalar = false, const std::optional>& qparams_opt = - c10::nullopt); + std::nullopt); bool isQuantized(Value* v) { return quantized_values_.count(v) != 0; @@ -1269,7 +1269,7 @@ std::optional> getDequantizedInputs(Value* output) { return inputs; } } - return c10::nullopt; + return std::nullopt; } void InsertQuantDeQuantHelper::propagateQuantizationOps(Block* block) { diff --git a/torch/csrc/jit/passes/remove_mutation.h b/torch/csrc/jit/passes/remove_mutation.h index be8fc12b11f3..1242555358f7 100644 --- a/torch/csrc/jit/passes/remove_mutation.h +++ b/torch/csrc/jit/passes/remove_mutation.h @@ -11,7 +11,7 @@ namespace jit { struct TORCH_API MutationRemover { MutationRemover( std::shared_ptr graph, - std::optional> mutation_filter = c10::nullopt) + std::optional> mutation_filter = std::nullopt) : mutation_filter_(mutation_filter), aliasDb_(nullptr), graph_(std::move(graph)) {} @@ -71,7 +71,7 @@ TORCH_API bool RemoveListMutation(const std::shared_ptr& graph); // return true if graph is modified TORCH_API bool RemoveTensorMutation( const std::shared_ptr& graph, - std::optional> mutation_filter = c10::nullopt); + std::optional> mutation_filter = std::nullopt); // Replaces in-place aten activation ops with their functional equivalence TORCH_API bool InplaceToFunctionalActivation( diff --git a/torch/csrc/jit/passes/replacement_of_old_operators.cpp b/torch/csrc/jit/passes/replacement_of_old_operators.cpp index 38255ad14187..2d3b3a2aba7f 100644 --- a/torch/csrc/jit/passes/replacement_of_old_operators.cpp +++ b/torch/csrc/jit/passes/replacement_of_old_operators.cpp @@ -30,7 +30,7 @@ struct OldOpsReplacerWithUpgraders { Node* node = graph_it.next(); while (node) { // load the schema name for this op - std::optional schema_name = c10::nullopt; + std::optional schema_name = std::nullopt; if (auto op_schema = node->maybeSchema()) { schema_name = getFullSchemaName(*op_schema); } else { diff --git a/torch/csrc/jit/passes/shape_analysis.cpp b/torch/csrc/jit/passes/shape_analysis.cpp index abc7bb6411db..7290e1936128 100644 --- a/torch/csrc/jit/passes/shape_analysis.cpp +++ b/torch/csrc/jit/passes/shape_analysis.cpp @@ -151,7 +151,7 @@ bool containsTensorType(const TypePtr& t) { } // for each node in the schema with type Tensor, extract the T type -// returns c10::nullopt if any Tensor in the schema does not have a known +// returns std::nullopt if any Tensor in the schema does not have a known // shape ignores non-tensor in the list of inputs std::optional> gatherTensorTypes( Node* node, @@ -160,26 +160,26 @@ std::optional> gatherTensorTypes( auto schema_opt = node->maybeSchema(); if (!schema_opt) { - return c10::nullopt; + return std::nullopt; } auto& schema = *schema_opt; auto& args = schema.arguments(); // can't handle varargs primitives because we don't know what should be a // Tensor if (schema.is_vararg()) { - return c10::nullopt; + return std::nullopt; } for (const auto i : c10::irange(args.size())) { if (args[i].type()->isSubtypeOf(*ListType::ofTensors())) { - return c10::nullopt; + return std::nullopt; } else if (args[i].type()->isSubtypeOf(*TensorType::get())) { if (auto type = node->input(i)->type()->cast()) { if (complete && !type->isComplete()) { - return c10::nullopt; + return std::nullopt; } tensor_types.push_back(type); } else { - return c10::nullopt; + return std::nullopt; } } else /* non-tensor type */ { continue; @@ -217,7 +217,7 @@ std::optional getPromotedTypeForArithmeticOp(Node* node) { auto dtt = node->inputs()[i]->type()->expect(); auto inputDtype = dtt->scalarType(); if (!dtt || !inputDtype) { - return c10::nullopt; + return std::nullopt; } if (dtt->dim() && *dtt->dim() > 0) { dimmed = unionScalarTypes(dimmed, *inputDtype); @@ -552,7 +552,7 @@ class ShapePropagator : public PropertyPropBase { tryScalarTypeFromJitType(*input_base_type); if (auto grad_index = node->schema().argumentIndexWithName("dtype")) { auto inp = toIValue(node->inputs().at(*grad_index)); - if (inp == c10::nullopt) { + if (inp == std::nullopt) { return; } else if (!inp->isNone()) { default_type = inp->toScalarType(); @@ -562,14 +562,14 @@ class ShapePropagator : public PropertyPropBase { at::Device default_device = at::kCPU; if (auto device_index = node->schema().argumentIndexWithName("device")) { auto inp = toIValue(node->inputs().at(*device_index)); - if (inp == c10::nullopt) { + if (inp == std::nullopt) { return; } else if (!inp->isNone()) { default_device = inp->toDevice(); } } node->output()->setType(TensorType::create( - default_type, default_device, dims, /*requires_grad=*/c10::nullopt)); + default_type, default_device, dims, /*requires_grad=*/std::nullopt)); } // returns whether any such values were found @@ -612,10 +612,10 @@ class ShapePropagator : public PropertyPropBase { if (typ->isSubtypeOf(*IntType::get()) || typ->isSubtypeOf(*BoolType::get())) { node->output()->setType(TensorType::create( - at::kLong, at::kCPU, 0, /*requires_grad=*/c10::nullopt)); + at::kLong, at::kCPU, 0, /*requires_grad=*/std::nullopt)); } else if (node->input()->type()->isSubtypeOf(*FloatType::get())) { node->output()->setType(TensorType::create( - at::kDouble, at::kCPU, 0, /*requires_grad=*/c10::nullopt)); + at::kDouble, at::kCPU, 0, /*requires_grad=*/std::nullopt)); } return; } @@ -750,7 +750,7 @@ class ShapePropagator : public PropertyPropBase { if (input_node->kind() == prim::ListConstruct) { return input_node->inputs().size(); } - return c10::nullopt; + return std::nullopt; } // is it ok to try to run the op @@ -778,7 +778,7 @@ class ShapePropagator : public PropertyPropBase { auto max_dims = any_type->dim(); for (auto& type : tensor_types) { if (!max_dims || !type->dim()) { - max_dims = c10::nullopt; + max_dims = std::nullopt; } else { max_dims = std::max(*max_dims, *type->dim()); } @@ -787,7 +787,7 @@ class ShapePropagator : public PropertyPropBase { t, any_type->device(), max_dims, - /*requires_grad=*/c10::nullopt); + /*requires_grad=*/std::nullopt); }; using type_vec_t = std::vector; @@ -1245,7 +1245,7 @@ class ShapePropagator : public PropertyPropBase { int64_t num_reduced_dim = 0, bool upcast_integer = false, std::optional opt_dtype = - c10::nullopt) -> type_vec_t { + std::nullopt) -> type_vec_t { if (auto type = node->input(0)->type()->cast()) { if (!type->scalarType() || !type->dim()) { return {}; @@ -1418,7 +1418,7 @@ class ShapePropagator : public PropertyPropBase { : maybe_dtype_option->toScalarType()); return {TensorType::create( - dtype, device, dim, /*requires_grad=*/c10::nullopt)}; + dtype, device, dim, /*requires_grad=*/std::nullopt)}; }; static const auto factory_like_with_ndim = [](Node* node, @@ -1448,7 +1448,7 @@ class ShapePropagator : public PropertyPropBase { } return {TensorType::create( - in_type, in_dev, dim, /*requires_grad=*/c10::nullopt)}; + in_type, in_dev, dim, /*requires_grad=*/std::nullopt)}; }; // Requirements: @@ -1748,7 +1748,7 @@ class ShapePropagator : public PropertyPropBase { if (auto dtype_index = node->schema().argumentIndexWithName("dtype")) { auto inp = toIValue(node->inputs().at(*dtype_index)); - if (inp == c10::nullopt) { + if (inp == std::nullopt) { return nullptr; } if (!inp->isNone()) { @@ -1758,7 +1758,7 @@ class ShapePropagator : public PropertyPropBase { if (auto device_index = node->schema().argumentIndexWithName("device")) { auto inp = toIValue(node->inputs().at(*device_index)); - if (inp == c10::nullopt) { + if (inp == std::nullopt) { return nullptr; } if (!inp->isNone()) { @@ -1769,7 +1769,7 @@ class ShapePropagator : public PropertyPropBase { default_type, default_device, type->dim(), - /*requires_grad=*/c10::nullopt)); + /*requires_grad=*/std::nullopt)); } } return nullptr; diff --git a/torch/csrc/jit/passes/symbolic_shape_analysis.cpp b/torch/csrc/jit/passes/symbolic_shape_analysis.cpp index 951c093cefe5..6ac9576a8e2b 100644 --- a/torch/csrc/jit/passes/symbolic_shape_analysis.cpp +++ b/torch/csrc/jit/passes/symbolic_shape_analysis.cpp @@ -61,7 +61,7 @@ namespace jit { // %y.2: Tensor(5, SS(-1), (New Symbolic Shape)) = aten::view(%y, %2) // // x.view([5, y.size(0), inp]) -// will have inputs equal to [5, SS(-1), c10::nullopt] +// will have inputs equal to [5, SS(-1), std::nullopt] struct ShapeArg : public std:: @@ -73,17 +73,17 @@ struct ShapeArg } ShapeArg(int64_t int_value) { - this->first = c10::nullopt; + this->first = std::nullopt; this->second = int_value; } ShapeArg(c10::ShapeSymbol ss) { if (ss.is_static()) { - this->first = c10::nullopt; + this->first = std::nullopt; this->second = ss.value(); } else { this->first = ss; - this->second = c10::nullopt; + this->second = std::nullopt; } } @@ -97,8 +97,8 @@ struct ShapeArg private: ShapeArg() { - this->first = c10::nullopt; - this->second = c10::nullopt; + this->first = std::nullopt; + this->second = std::nullopt; } }; @@ -215,7 +215,7 @@ std::optional normIndex(int64_t index, size_t len) { if (index >= 0 && index < static_cast(len)) { return index; } else { - return c10::nullopt; + return std::nullopt; } } @@ -608,7 +608,7 @@ struct SymbolicShapeOpAnalyzer { std::optional> run( std::vector& inputs) { if (!shape_compute_graph_) { - return c10::nullopt; + return std::nullopt; } inputs_ = inputs; substituteConstantInputs(); @@ -788,7 +788,7 @@ c10::SymbolicShape combine_bounds( c10::SymbolicShape& upper_bound) { // TODO: At some point we might want to add support for dynamic dims TORCH_INTERNAL_ASSERT(lower_bound.rank() == upper_bound.rank()); - if (lower_bound.rank() == c10::nullopt) { + if (lower_bound.rank() == std::nullopt) { return c10::SymbolicShape(); } std::vector merged_shapes; @@ -837,14 +837,14 @@ struct SymbolicShapeGraphAnalyzer { return use.user->kind() == aten::cat; })) { GRAPH_DEBUG("Non cat list use ", getHeader(curr)); - return c10::nullopt; + return std::nullopt; } continue; } if (!partial_evaluated_graphs.count(curr)) { GRAPH_DEBUG("No graph ", getHeader(curr)); - return c10::nullopt; + return std::nullopt; } auto outputs = curr->outputs(); @@ -852,13 +852,13 @@ struct SymbolicShapeGraphAnalyzer { auto tt = v->type()->cast(); if (!tt) { GRAPH_DEBUG("Non tensor node", getHeader(curr)); - return c10::nullopt; + return std::nullopt; } auto symbolic_sizes = tt->symbolic_sizes(); // TODO: dont require # of dimensions of tensors set ? if (!symbolic_sizes.rank()) { GRAPH_DEBUG("No rank on output ", getHeader(curr)); - return c10::nullopt; + return std::nullopt; } } auto partial_eval_graph = partial_evaluated_graphs[curr]; @@ -1133,11 +1133,11 @@ calculateSymbolicShapesOnOp( const FunctionSchema* schema, const std::vector& inputs) { auto bounded_graphs = boundedGraphsForSchema(*schema); - auto has_shape_compute = shapeComputeGraphForSchema(*schema) != c10::nullopt; - if (!has_shape_compute && bounded_graphs == c10::nullopt) { + auto has_shape_compute = shapeComputeGraphForSchema(*schema) != std::nullopt; + if (!has_shape_compute && bounded_graphs == std::nullopt) { // Avoid doing all this work for functions that don't have a // supported schema - return c10::nullopt; + return std::nullopt; } if (auto cached_ret_vec = get_cached_shape_function(schema, inputs)) { @@ -1172,7 +1172,7 @@ calculateSymbolicShapesOnOp( cache_shape_function(schema, inputs, merged_res); return merged_res; } - return c10::nullopt; + return std::nullopt; } auto op_analyzer = SymbolicShapeOpAnalyzer(schema); diff --git a/torch/csrc/jit/passes/symbolic_shape_cache.cpp b/torch/csrc/jit/passes/symbolic_shape_cache.cpp index 4a742b3f5f63..d01d11983a62 100644 --- a/torch/csrc/jit/passes/symbolic_shape_cache.cpp +++ b/torch/csrc/jit/passes/symbolic_shape_cache.cpp @@ -120,7 +120,7 @@ get_cached_shape_function( get_cache_key(schema, arg_vec, ss_map, /* deep_copy */ false); auto cached_ret_vec = shapeCache.Get(cache_key); if (cached_ret_vec == nullptr) { - return c10::nullopt; + return std::nullopt; } // Decanonicalize the return values auto inverse_ss_map = std::unordered_map(); @@ -148,7 +148,7 @@ void CanonicalizedSymbolicShape::init( std::unordered_map& ss_map) { auto sizes = orig_shape.sizes(); if (!sizes) { - values_ = c10::nullopt; + values_ = std::nullopt; return; } values_ = std::vector(); diff --git a/torch/csrc/jit/passes/symbolic_shape_runtime_fusion.cpp b/torch/csrc/jit/passes/symbolic_shape_runtime_fusion.cpp index 9c213f2480d5..3cf23732a9ad 100644 --- a/torch/csrc/jit/passes/symbolic_shape_runtime_fusion.cpp +++ b/torch/csrc/jit/passes/symbolic_shape_runtime_fusion.cpp @@ -190,7 +190,7 @@ TryGeneralizeInputDimensionsToSymbolicShapes( } auto tt = v->type()->expectRef(); if (!tt.sizes().isComplete() || !tt.strides().isComplete()) { - return c10::nullopt; + return std::nullopt; } input_striding.push_back(summarizeInputStrides(tt)); std::vector shape_vec = *tt.symbolic_sizes().sizes(); diff --git a/torch/csrc/jit/passes/tensorexpr_fuser.cpp b/torch/csrc/jit/passes/tensorexpr_fuser.cpp index c9b9b974600d..684f47f4efb9 100644 --- a/torch/csrc/jit/passes/tensorexpr_fuser.cpp +++ b/torch/csrc/jit/passes/tensorexpr_fuser.cpp @@ -782,7 +782,7 @@ class TensorExprFuser { std::optional tryMerge(Node* fusion_group, Node* to_merge) { if (!canMerge(fusion_group, to_merge)) { - return c10::nullopt; + return std::nullopt; } std::vector nodes_to_merge = {to_merge}; @@ -799,7 +799,7 @@ class TensorExprFuser { GRAPH_UPDATE("Trying to move node next to fusion group: ", getHeader(n)); if (!aliasDb_->moveBeforeTopologicallyValid(n, move_point)) { GRAPH_UPDATE("Failed to move because of AliasDB checks!"); - return c10::nullopt; + return std::nullopt; } move_point = n; } diff --git a/torch/csrc/jit/passes/utils/check_alias_annotation.cpp b/torch/csrc/jit/passes/utils/check_alias_annotation.cpp index 4c081200715a..6082058952ce 100644 --- a/torch/csrc/jit/passes/utils/check_alias_annotation.cpp +++ b/torch/csrc/jit/passes/utils/check_alias_annotation.cpp @@ -196,7 +196,7 @@ std::optional toIValueProp(const Value* v) { genericList.push_back(*elem); } else { // One of the list elements isn't constant. - return c10::nullopt; + return std::nullopt; } } @@ -213,7 +213,7 @@ std::optional toIValueProp(const Value* v) { return IValue( fmap(genericList, [](const IValue& v) { return v.toTensor(); })); } else { - return c10::nullopt; + return std::nullopt; } } @@ -222,7 +222,7 @@ std::optional toIValueProp(const Value* v) { return maybe_stack->at(0); } } - return c10::nullopt; + return std::nullopt; } // batch_norm and instance_norm have incorrect annotations, because diff --git a/torch/csrc/jit/passes/utils/memory_dag.h b/torch/csrc/jit/passes/utils/memory_dag.h index da5584f9d4bd..1d2292fe90c5 100644 --- a/torch/csrc/jit/passes/utils/memory_dag.h +++ b/torch/csrc/jit/passes/utils/memory_dag.h @@ -2,12 +2,12 @@ #include #include -#include #include #include #include #include #include +#include #include #include #include diff --git a/torch/csrc/jit/passes/utils/subgraph_utils.cpp b/torch/csrc/jit/passes/utils/subgraph_utils.cpp index 377621c04b6d..f4dfc4ce99c9 100644 --- a/torch/csrc/jit/passes/utils/subgraph_utils.cpp +++ b/torch/csrc/jit/passes/utils/subgraph_utils.cpp @@ -429,7 +429,7 @@ Node* createSingletonSubgraphAndUpdateAliasing( Symbol subgraphKind, AliasDb& db) { return executeSubgraphMergeAndUpdateAliasing( - to_merge, c10::nullopt, db, [&]() { + to_merge, std::nullopt, db, [&]() { return createSingletonSubgraph(to_merge, subgraphKind); }); } diff --git a/torch/csrc/jit/python/init.cpp b/torch/csrc/jit/python/init.cpp index 1bfc6c94a707..862aaba7d7dc 100644 --- a/torch/csrc/jit/python/init.cpp +++ b/torch/csrc/jit/python/init.cpp @@ -157,7 +157,7 @@ std::optional toTypeInferredIValueOptional(py::handle input) { try { return toTypeInferredIValue(input); } catch (const c10::Error& e) { - return c10::nullopt; + return std::nullopt; } } } // anonymous namespace @@ -219,7 +219,7 @@ void initJITBindings(PyObject* module) { "_jit_shape_compute_graph_for_node", [](Node* n) -> std::optional> { if (!n->maybeSchema()) { - return c10::nullopt; + return std::nullopt; } return shapeComputeGraphForSchema(n->schema()); }) @@ -227,7 +227,7 @@ void initJITBindings(PyObject* module) { "_jit_decomposition_graph_for_node", [](Node* n) -> std::optional> { if (!n->maybeSchema()) { - return c10::nullopt; + return std::nullopt; } return GetDecomposition(n->schema()); }) @@ -1165,7 +1165,7 @@ void initJITBindings(PyObject* module) { c10::kCPU, std::vector{1}, std::vector{1}, - c10::nullopt)); + std::nullopt)); } } }) @@ -1680,7 +1680,7 @@ void initJITBindings(PyObject* module) { [op, symbol, allow_numbers_as_tensors]( c10::DispatchKey dk_, py::args args, py::kwargs kwargs) { std::optional dk = - c10::make_optional(dk_); + std::make_optional(dk_); ToIValueAllowNumbersAsTensors g(allow_numbers_as_tensors); return _get_operation_for_overload_or_packet( {op}, symbol, args, kwargs, /*is_overload*/ true, dk); diff --git a/torch/csrc/jit/python/module_python.h b/torch/csrc/jit/python/module_python.h index 5c7fbbb42d6c..b1ddf6f37c67 100644 --- a/torch/csrc/jit/python/module_python.h +++ b/torch/csrc/jit/python/module_python.h @@ -14,7 +14,7 @@ inline std::optional as_module(py::handle obj) { if (py::isinstance(obj, ScriptModule)) { return py::cast(obj.attr("_c")); } - return c10::nullopt; + return std::nullopt; } inline std::optional as_object(py::handle obj) { @@ -29,7 +29,7 @@ inline std::optional as_object(py::handle obj) { if (py::isinstance(obj, RecursiveScriptClass)) { return py::cast(obj.attr("_c")); } - return c10::nullopt; + return std::nullopt; } } // namespace torch::jit diff --git a/torch/csrc/jit/python/pybind_utils.cpp b/torch/csrc/jit/python/pybind_utils.cpp index a731640223c0..2dbcfee423ae 100644 --- a/torch/csrc/jit/python/pybind_utils.cpp +++ b/torch/csrc/jit/python/pybind_utils.cpp @@ -754,7 +754,7 @@ std::pair, Stack> getOpWithStack( std::shared_ptr op = operations.at(0); // Create a stack full of the arguments and keyword arguments. stack = createStackForSchema( - op->schema(), std::move(args), kwargs, c10::nullopt); + op->schema(), std::move(args), kwargs, std::nullopt); return std::make_pair(std::move(op), std::move(stack)); } else { @@ -762,7 +762,7 @@ std::pair, Stack> getOpWithStack( std::shared_ptr found_op = nullptr; for (const auto& op : operations) { try { - stack = createStackForSchema(op->schema(), args, kwargs, c10::nullopt); + stack = createStackForSchema(op->schema(), args, kwargs, std::nullopt); found_op = op; break; } catch (schema_match_error& error) { diff --git a/torch/csrc/jit/python/pybind_utils.h b/torch/csrc/jit/python/pybind_utils.h index 23fda5b0d784..cd8a7335167d 100644 --- a/torch/csrc/jit/python/pybind_utils.h +++ b/torch/csrc/jit/python/pybind_utils.h @@ -36,8 +36,8 @@ #include #endif #include -#include #include +#include #include #include @@ -62,7 +62,7 @@ void clear_registered_instances(void* ptr); TORCH_PYTHON_API IValue toIValue( py::handle obj, const TypePtr& type, - std::optional N = c10::nullopt); + std::optional N = std::nullopt); TORCH_PYTHON_API py::object toPyObject(IValue ivalue); @@ -111,7 +111,7 @@ struct VISIBILITY_HIDDEN PythonFutureWrapper explicit PythonFutureWrapper( c10::intrusive_ptr fut, - std::optional unwrap_func = c10::nullopt) + std::optional unwrap_func = std::nullopt) : fut(std::move(fut)), unwrap_func(std::move(unwrap_func)) {} explicit PythonFutureWrapper(const PythonFutureWrapper&) = delete; @@ -1205,7 +1205,7 @@ inline std::optional maybeTorchFunctionDispatch( /*module_name=*/qualname.prefix().c_str())); } - return c10::nullopt; + return std::nullopt; } inline py::object invokeScriptFunctionFromPython( @@ -1219,7 +1219,7 @@ inline py::object invokeScriptFunctionFromPython( callee, args, kwargs, - /*self=*/c10::nullopt, + /*self=*/std::nullopt, [&](Graph& graph, const MatchedSchema& match) { return graph.insertFunctionCall(&callee, match); }); @@ -1255,7 +1255,7 @@ TORCH_PYTHON_API py::object invokeOperatorFromPython( const std::vector>& operations, py::args args, const py::kwargs& kwargs, - std::optional dk = c10::nullopt); + std::optional dk = std::nullopt); TORCH_PYTHON_API std::optional _maybe_handle_torch_function( const std::string& ns, @@ -1276,6 +1276,6 @@ TORCH_PYTHON_API py::object _get_operation_for_overload_or_packet( py::args args, const py::kwargs& kwargs, bool is_overload, - std::optional dk = c10::nullopt); + std::optional dk = std::nullopt); } // namespace torch::jit diff --git a/torch/csrc/jit/python/python_ir.cpp b/torch/csrc/jit/python/python_ir.cpp index 79957999f543..c80208b9d00d 100644 --- a/torch/csrc/jit/python/python_ir.cpp +++ b/torch/csrc/jit/python/python_ir.cpp @@ -138,17 +138,17 @@ std::optional ConcretePythonOp::autogradFunction() const { auto r = py::getattr(obj, "__self__", py::none()); if (r.is_none()) - return c10::nullopt; + return std::nullopt; auto apply = py::getattr(r, "apply", py::none()); if (apply.is_none()) - return c10::nullopt; + return std::nullopt; auto c = PyObject_RichCompareBool(apply.ptr(), obj.ptr(), Py_NE); if (PyErr_Occurred()) throw py::error_already_set(); if (c) - return c10::nullopt; + return std::nullopt; return THPObjectPtr(r.release().ptr()); } diff --git a/torch/csrc/jit/python/python_ivalue.h b/torch/csrc/jit/python/python_ivalue.h index 4cdc8e430b9a..6d0bf1afc3b0 100644 --- a/torch/csrc/jit/python/python_ivalue.h +++ b/torch/csrc/jit/python/python_ivalue.h @@ -31,7 +31,7 @@ struct C10_EXPORT ConcretePyObjectHolder final : PyObjectHolder { return torch::jit::tryToInferType(py_obj_); } - IValue toIValue(const TypePtr& type, std::optional N = c10::nullopt) + IValue toIValue(const TypePtr& type, std::optional N = std::nullopt) override { pybind11::gil_scoped_acquire ag; return torch::jit::toIValue(py_obj_, type, N); diff --git a/torch/csrc/jit/python/python_list.h b/torch/csrc/jit/python/python_list.h index b5bb88b3aeb2..f73cb5048529 100644 --- a/torch/csrc/jit/python/python_list.h +++ b/torch/csrc/jit/python/python_list.h @@ -4,10 +4,10 @@ #include #include #include -#include #include #include #include +#include #include namespace torch::jit { @@ -175,7 +175,7 @@ class ScriptList final { // Remove and return the element at the specified index from the list. If no // index is passed, the last element is removed and returned. - IValue pop(std::optional idx = c10::nullopt) { + IValue pop(std::optional idx = std::nullopt) { IValue ret; if (idx) { diff --git a/torch/csrc/jit/python/python_sugared_value.cpp b/torch/csrc/jit/python/python_sugared_value.cpp index d6f014759c05..c5d48f5cbe74 100644 --- a/torch/csrc/jit/python/python_sugared_value.cpp +++ b/torch/csrc/jit/python/python_sugared_value.cpp @@ -28,7 +28,7 @@ std::optional as_function(const py::object& obj) { if (py::isinstance(obj)) { return py::cast(obj); } - return c10::nullopt; + return std::nullopt; } FunctionSchema PythonValue::getSchema( @@ -66,8 +66,8 @@ FunctionSchema PythonValue::getSchema( args.emplace_back( /*name=*/*names_it, /*type=*/TensorType::get(), - /*N=*/c10::nullopt, - /*default_value=*/c10::nullopt, + /*N=*/std::nullopt, + /*default_value=*/std::nullopt, /*kwarg_only=*/false); } @@ -95,8 +95,8 @@ FunctionSchema PythonValue::getSchema( args.emplace_back( /*name=*/*names_it, /*type=*/std::move(*types_it), - /*N=*/c10::nullopt, - /*default_value=*/c10::nullopt, + /*N=*/std::nullopt, + /*default_value=*/std::nullopt, /*kwarg_only=*/false); } rets.push_back(Argument("0", std::move(ret_type), {}, {}, false)); @@ -240,10 +240,10 @@ std::shared_ptr CUDAPythonModuleValue::attr( // these APIs. if (field == "current_device" || field == "set_device") { return std::make_shared( - Symbol::cuda("_" + field), c10::nullopt); + Symbol::cuda("_" + field), std::nullopt); } else { return std::make_shared( - Symbol::cuda(field), c10::nullopt); + Symbol::cuda(field), std::nullopt); } } @@ -673,7 +673,7 @@ std::shared_ptr ModuleValue::tryGetAttr( if (const auto fnAttr = concreteType_->findFunctionAttribute(field)) { return std::make_shared(*fnAttr); } else if (const auto builtin = concreteType_->findBuiltinFunction(field)) { - return std::make_shared(*builtin, /*self=*/c10::nullopt); + return std::make_shared(*builtin, /*self=*/std::nullopt); } // 5. Check if it's an attribute of the original Python class that this @@ -1263,7 +1263,7 @@ std::shared_ptr toSugaredValue( py::module::import("torch.jit._builtins").attr("_find_builtin")(obj); if (!builtin_name.is_none()) { return std::make_shared( - Symbol::fromQualString(py::str(builtin_name)), c10::nullopt); + Symbol::fromQualString(py::str(builtin_name)), std::nullopt); } if (py::cast(py::module::import("torch._jit_internal") diff --git a/torch/csrc/jit/python/python_sugared_value.h b/torch/csrc/jit/python/python_sugared_value.h index cb397796c9f5..508d95c8c538 100644 --- a/torch/csrc/jit/python/python_sugared_value.h +++ b/torch/csrc/jit/python/python_sugared_value.h @@ -32,7 +32,7 @@ std::optional as_function(const py::object& obj); struct VISIBILITY_HIDDEN PythonValue : public SugaredValue { PythonValue( py::object the_self, - std::optional rcb = c10::nullopt, + std::optional rcb = std::nullopt, Value* module_self = nullptr) : self(std::move(the_self)), rcb(std::move(rcb)), diff --git a/torch/csrc/jit/python/python_tree_views.cpp b/torch/csrc/jit/python/python_tree_views.cpp index 50d18b908107..0cd93887471e 100644 --- a/torch/csrc/jit/python/python_tree_views.cpp +++ b/torch/csrc/jit/python/python_tree_views.cpp @@ -14,7 +14,7 @@ namespace torch::jit { std::optional maybeConvertToString(const py::object& obj) { if (obj.is_none()) { - return c10::nullopt; + return std::nullopt; } std::stringstream ss; ss << py::str(obj); @@ -180,7 +180,7 @@ void initTreeViewBindings(PyObject* module) { return std::optional(property.setter().get().name()); } - return std::optional(c10::nullopt); + return std::optional(std::nullopt); }); py::class_(m, "ClassDef") diff --git a/torch/csrc/jit/python/script_init.cpp b/torch/csrc/jit/python/script_init.cpp index c46762a88615..565f0b163638 100644 --- a/torch/csrc/jit/python/script_init.cpp +++ b/torch/csrc/jit/python/script_init.cpp @@ -220,7 +220,7 @@ std::optional tryCalculateDefaultParam( return toIValue(def_value, arg.type()); } } catch (...) { - return c10::nullopt; + return std::nullopt; } } @@ -702,13 +702,13 @@ void pyCompilationUnitDefine( const ResolutionCallback* rcb, const uint32_t _frames_up) { if (rcb && *rcb) { - cu.define(c10::nullopt, src, pythonResolver(*rcb), nullptr); + cu.define(std::nullopt, src, pythonResolver(*rcb), nullptr); } else { py::object py_default_rcb = py::module::import("torch._jit_internal") .attr("createResolutionCallbackFromFrame")(_frames_up); auto default_rcb = py_default_rcb.cast(); - cu.define(c10::nullopt, src, pythonResolver(default_rcb), nullptr); + cu.define(std::nullopt, src, pythonResolver(default_rcb), nullptr); } } @@ -1315,7 +1315,7 @@ void initJitScriptBindings(PyObject* module) { "find_method", [](mobile::Module& m, const std::string& method_name) { auto method = m.find_method(method_name); - return method != c10::nullopt; + return method != std::nullopt; }, py::arg("method_name")) .def( @@ -1372,7 +1372,7 @@ void initJitScriptBindings(PyObject* module) { return std::optional( StrongFunctionPtr(std::move(self), fn)); } else { - return std::optional(c10::nullopt); + return std::optional(std::nullopt); } }) .def( @@ -2124,7 +2124,7 @@ void initJitScriptBindings(PyObject* module) { m.def( "_get_graph_executor_optimize", - [](std::optional new_setting = c10::nullopt) { + [](std::optional new_setting = std::nullopt) { bool old_value = getGraphExecutorOptimize(); if (new_setting) { setGraphExecutorOptimize(*new_setting); diff --git a/torch/csrc/jit/runtime/autodiff.cpp b/torch/csrc/jit/runtime/autodiff.cpp index 3987521f658f..047a35e417ff 100644 --- a/torch/csrc/jit/runtime/autodiff.cpp +++ b/torch/csrc/jit/runtime/autodiff.cpp @@ -134,11 +134,11 @@ static std::optional> build_script_grad( auto graph = node->owningGraph(); auto maybe_schema = node->maybeSchema(); if (!maybe_schema) { - return c10::nullopt; + return std::nullopt; } auto compiled_graphs = gradientInfoForSchema(*maybe_schema); if (!compiled_graphs) { - return c10::nullopt; + return std::nullopt; } // Use forward graph to replace node in grad_desc.f value_list new_outputs; diff --git a/torch/csrc/jit/runtime/decomposition_registry.cpp b/torch/csrc/jit/runtime/decomposition_registry.cpp index de205ed834c3..989a48bf06ab 100644 --- a/torch/csrc/jit/runtime/decomposition_registry.cpp +++ b/torch/csrc/jit/runtime/decomposition_registry.cpp @@ -63,7 +63,7 @@ void loadDecompositionFunctions() { [&](const std::string& name) -> std::shared_ptr { return src; }, 1); compilation_unit->define( - c10::nullopt, GetSerializedDecompositions(), resolver, nullptr); + std::nullopt, GetSerializedDecompositions(), resolver, nullptr); loadModule(*compilation_unit); } @@ -117,7 +117,7 @@ std::optional> GetDecomposition( } GRAPH_DEBUG("Could not find schema: ", schema); - return c10::nullopt; + return std::nullopt; } std::optional GetDecompositionFunction( @@ -127,7 +127,7 @@ std::optional GetDecompositionFunction( GRAPH_DEBUG("Trying to find schema: ", schema); if (cache_it == schema_to_function.end()) { GRAPH_DEBUG("Could not find schema: ", schema); - return c10::nullopt; + return std::nullopt; } auto& func = toGraphFunction(*cache_it->second); // Simple Executor: diff --git a/torch/csrc/jit/runtime/graph_executor.h b/torch/csrc/jit/runtime/graph_executor.h index fce8d4a02e66..971e45e818ca 100644 --- a/torch/csrc/jit/runtime/graph_executor.h +++ b/torch/csrc/jit/runtime/graph_executor.h @@ -87,7 +87,7 @@ struct TORCH_API GraphExecutor { // current global fusion strategy settings. const ExecutionPlan& getPlanFor( Stack& inputs, - std::optional remaining_bailout_depth = c10::nullopt); + std::optional remaining_bailout_depth = std::nullopt); GraphExecutorState getDebugState(); void debugFlushCompilationCache(); diff --git a/torch/csrc/jit/runtime/graph_executor_impl.h b/torch/csrc/jit/runtime/graph_executor_impl.h index 22a563f00be2..70069ac1907b 100644 --- a/torch/csrc/jit/runtime/graph_executor_impl.h +++ b/torch/csrc/jit/runtime/graph_executor_impl.h @@ -78,7 +78,7 @@ struct GraphExecutorImplBase { virtual const ExecutionPlan& getPlanFor( Stack& stack, - std::optional remaining_bailout_depth = c10::nullopt) = 0; + std::optional remaining_bailout_depth = std::nullopt) = 0; virtual GraphExecutorState getDebugState() = 0; virtual ~GraphExecutorImplBase() = default; diff --git a/torch/csrc/jit/runtime/interpreter.cpp b/torch/csrc/jit/runtime/interpreter.cpp index 18231173dd70..0f6eb900e361 100644 --- a/torch/csrc/jit/runtime/interpreter.cpp +++ b/torch/csrc/jit/runtime/interpreter.cpp @@ -169,7 +169,7 @@ struct InterpreterStateImpl : c10::intrusive_ptr_target { } void enterFrame(const Code& code, size_t base_pointer) { - frames.emplace_back(Frame{code.pImpl, 0, base_pointer, c10::nullopt}); + frames.emplace_back(Frame{code.pImpl, 0, base_pointer, std::nullopt}); registers.resize(registers.size() + code.pImpl->register_size_); } @@ -181,7 +181,7 @@ struct InterpreterStateImpl : c10::intrusive_ptr_target { void callFunction( Function& f, Stack& stack, - std::optional bailOut = c10::nullopt, + std::optional bailOut = std::nullopt, bool next = true) { bool newFrame = f.call(stack, bailOut, [&](const Code& code) { enterFrame(code, stack.size() - code.num_inputs()); @@ -1244,7 +1244,7 @@ void InterpreterContinuation::operator()() { auto prev_dist_id = DistAutogradContainer::currentContextId(); DistAutogradContainer::forceCurrentContextId(dist_autograd_context_id_); #endif - if (tls_state_ != c10::nullopt) { + if (tls_state_ != std::nullopt) { at::ThreadLocalStateGuard g(*tls_state_); state.runAsync(stack); } else { diff --git a/torch/csrc/jit/runtime/interpreter.h b/torch/csrc/jit/runtime/interpreter.h index a28b1eb93526..ffafd3ab096a 100644 --- a/torch/csrc/jit/runtime/interpreter.h +++ b/torch/csrc/jit/runtime/interpreter.h @@ -1,6 +1,6 @@ #pragma once -#include #include +#include #include #include @@ -124,7 +124,7 @@ struct InterpreterContinuation { InterpreterState state_, Stack stack_, int64_t dist_autograd_context_id = 0, - std::optional tls_state = c10::nullopt) + std::optional tls_state = std::nullopt) : state(std::move(state_)), stack(std::move(stack_)), tls_state_(std::move(tls_state)) @@ -140,7 +140,7 @@ struct InterpreterContinuation { private: InterpreterState state; Stack stack; - std::optional tls_state_ = c10::nullopt; + std::optional tls_state_ = std::nullopt; #ifdef USE_DISTRIBUTED int64_t dist_autograd_context_id_; #endif diff --git a/torch/csrc/jit/runtime/jit_exception.h b/torch/csrc/jit/runtime/jit_exception.h index 34c3ebd6fca8..cb4f572a8bd3 100644 --- a/torch/csrc/jit/runtime/jit_exception.h +++ b/torch/csrc/jit/runtime/jit_exception.h @@ -2,8 +2,8 @@ #include -#include #include +#include #include namespace torch::jit { @@ -11,8 +11,8 @@ namespace torch::jit { struct TORCH_API JITException : public std::runtime_error { explicit JITException( const std::string& msg, - std::optional python_class_name = c10::nullopt, - std::optional original_msg = c10::nullopt); + std::optional python_class_name = std::nullopt, + std::optional original_msg = std::nullopt); std::optional getPythonClassName() const { return python_class_name_; diff --git a/torch/csrc/jit/runtime/operator.h b/torch/csrc/jit/runtime/operator.h index dbc2638457c0..2e609f18ecc0 100644 --- a/torch/csrc/jit/runtime/operator.h +++ b/torch/csrc/jit/runtime/operator.h @@ -322,7 +322,7 @@ std::optional OperatorGenerator( torch::detail::SelectiveStr schema_str, Func&& op, AliasAnalysisKind alias_analysis) { - return c10::nullopt; + return std::nullopt; } template diff --git a/torch/csrc/jit/runtime/profiling_graph_executor_impl.cpp b/torch/csrc/jit/runtime/profiling_graph_executor_impl.cpp index 48c7a1959ab2..54ec8e8441fa 100644 --- a/torch/csrc/jit/runtime/profiling_graph_executor_impl.cpp +++ b/torch/csrc/jit/runtime/profiling_graph_executor_impl.cpp @@ -1,6 +1,5 @@ #include -#include #include #include #include @@ -37,6 +36,7 @@ #include #include #include +#include C10_DEFINE_bool( torch_jit_enable_new_executor, @@ -118,11 +118,11 @@ static FusionStrategy getInitialStrategy() { } // defer initial value so that we can load in gflags -static std::optional fusion_strategy = c10::nullopt; +static std::optional fusion_strategy = std::nullopt; FusionStrategy getFusionStrategy() { std::lock_guard guard(fusion_strategy_lock); - if (fusion_strategy == c10::nullopt) { + if (fusion_strategy == std::nullopt) { fusion_strategy = getInitialStrategy(); } return *fusion_strategy; @@ -130,7 +130,7 @@ FusionStrategy getFusionStrategy() { FusionStrategy setFusionStrategy(FusionStrategy& strategy) { std::lock_guard guard(fusion_strategy_lock); - if (fusion_strategy == c10::nullopt) { + if (fusion_strategy == std::nullopt) { fusion_strategy = getInitialStrategy(); } FusionStrategy old_strategy = *fusion_strategy; @@ -320,7 +320,7 @@ static bool guardDifferentiableGraph(Node* dnode) { // we inline the differentiable graph as a fallback // ideally we would set this up for re-profiling UpdateDifferentiableGraphRequiresGrad( - dnode->g(attr::Subgraph), c10::nullopt); + dnode->g(attr::Subgraph), std::nullopt); SubgraphUtils::unmergeSubgraph(dnode); return false; } diff --git a/torch/csrc/jit/runtime/register_ops_utils.h b/torch/csrc/jit/runtime/register_ops_utils.h index 3386bc3e4a49..ebdc5ba205cd 100644 --- a/torch/csrc/jit/runtime/register_ops_utils.h +++ b/torch/csrc/jit/runtime/register_ops_utils.h @@ -878,6 +878,6 @@ struct OperatorGeneratorArgs { TORCH_API at::Generator make_generator_for_device( c10::Device device, - std::optional seed = c10::nullopt); + std::optional seed = std::nullopt); } // namespace torch::jit diff --git a/torch/csrc/jit/runtime/register_prim_ops.cpp b/torch/csrc/jit/runtime/register_prim_ops.cpp index bb9c08465c0a..f6eccede28ba 100644 --- a/torch/csrc/jit/runtime/register_prim_ops.cpp +++ b/torch/csrc/jit/runtime/register_prim_ops.cpp @@ -1,6 +1,5 @@ #include #include -#include #include #include #include @@ -8,6 +7,7 @@ #include #include #include +#include #include #include @@ -1807,7 +1807,7 @@ static const std::vector stringOpGenArgs{ std::string::size_type prev_pos = 0; std::string::size_type pos = 0; c10::List splits; - if (ivalue == c10::nullopt) { + if (ivalue == std::nullopt) { // if separator is not specified, // a different splitting algorithm is applied as Python splits = splitNoneSeparator(string); @@ -2463,8 +2463,8 @@ static const std::vector opGenArgs1{ // NOLINTNEXTLINE(cppcoreguidelines-init-variables) bool copy; pop(stack, self, non_blocking, copy); - std::optional device = c10::nullopt; - std::optional scalarType = c10::nullopt; + std::optional device = std::nullopt; + std::optional scalarType = std::nullopt; push( stack, to_dispatch(self, device, scalarType, non_blocking, copy)); }, diff --git a/torch/csrc/jit/runtime/register_prim_ops_fulljit.cpp b/torch/csrc/jit/runtime/register_prim_ops_fulljit.cpp index 4359b852b6a3..035a5d35c463 100644 --- a/torch/csrc/jit/runtime/register_prim_ops_fulljit.cpp +++ b/torch/csrc/jit/runtime/register_prim_ops_fulljit.cpp @@ -430,13 +430,13 @@ at::Tensor interpolate( std::optional align_corners, std::optional recompute_scale_factor) { if ((mode == "nearest" || mode == "area")) { - if (align_corners != c10::nullopt) { + if (align_corners != std::nullopt) { throw std::runtime_error( "align_corners option can only be set with the " "interpolating modes: linear | bilinear | bicubic | trilinear"); } } else { - if (align_corners == c10::nullopt) { + if (align_corners == std::nullopt) { TORCH_WARN( "Default upsampling behavior when mode=", mode, @@ -451,7 +451,7 @@ at::Tensor interpolate( double scale_factors_2 = -1.0; double scale_factors_3 = -1.0; - if (!scale_factors.isNone() && recompute_scale_factor == c10::nullopt) { + if (!scale_factors.isNone() && recompute_scale_factor == std::nullopt) { recompute_scale_factor = true; bool warn_recompute_scale_factor = false; @@ -510,7 +510,7 @@ at::Tensor interpolate( return at::upsample_nearest1d( input, _output_size(input, 1, size, scale_factors), - c10::make_optional(scale_factors_1)); + std::make_optional(scale_factors_1)); if (input_dim == dim2d && mode == "nearest") return at::upsample_nearest2d( input, @@ -538,7 +538,7 @@ at::Tensor interpolate( input, _output_size(input, 1, size, scale_factors), *align_corners, - c10::make_optional(scale_factors_1)); + std::make_optional(scale_factors_1)); if (input_dim == dim1d && mode == "bilinear") throw std::runtime_error("Got 3D input, but bilinear mode needs 4D input"); if (input_dim == dim1d && mode == "bicubic") @@ -646,7 +646,7 @@ void upsample_nearest_op(Stack& stack) { pop(stack, input, size, scale_factor_int); IValue scale_factor_double = convert_scale_factor_to_double(scale_factor_int); at::Tensor res = interpolate( - input, size, scale_factor_double, "nearest", c10::nullopt, c10::nullopt); + input, size, scale_factor_double, "nearest", std::nullopt, std::nullopt); push(stack, std::move(res)); } @@ -664,7 +664,7 @@ void upsample_op(Stack& stack) { scale_factor_double, mode, align_corners.toOptional(), - c10::nullopt); + std::nullopt); push(stack, std::move(res)); } @@ -675,7 +675,7 @@ void upsample_bilinear_op(Stack& stack) { pop(stack, input, size, scale_factor_int); IValue scale_factor_double = convert_scale_factor_to_double(scale_factor_int); at::Tensor res = interpolate( - input, size, scale_factor_double, "bilinear", true, c10::nullopt); + input, size, scale_factor_double, "bilinear", true, std::nullopt); push(stack, std::move(res)); } diff --git a/torch/csrc/jit/runtime/register_special_ops.cpp b/torch/csrc/jit/runtime/register_special_ops.cpp index 5b8c70c404ae..63fdee6de804 100644 --- a/torch/csrc/jit/runtime/register_special_ops.cpp +++ b/torch/csrc/jit/runtime/register_special_ops.cpp @@ -301,9 +301,9 @@ RegisterOperators reg({ at::native::scalar_tensor( scalar_val, typeMetaToScalarType(c10::get_default_dtype()), - c10::nullopt /* layout */, + std::nullopt /* layout */, at::kCPU, - c10::nullopt /* pin_memory*/)) + std::nullopt /* pin_memory*/)) DEFINE_TORCH_TENSOR_OP( int, int64_t, @@ -314,9 +314,9 @@ RegisterOperators reg({ at::native::scalar_tensor( scalar_val, typeMetaToScalarType(c10::get_default_complex_dtype()), - c10::nullopt /* layout */, + std::nullopt /* layout */, at::kCPU, - c10::nullopt /* pin_memory */)) + std::nullopt /* pin_memory */)) // reference python implementation: internal_new_from_data in // tensor_new.cpp diff --git a/torch/csrc/jit/runtime/simple_graph_executor_impl.cpp b/torch/csrc/jit/runtime/simple_graph_executor_impl.cpp index c1dbbddc6d33..fd908b48ee04 100644 --- a/torch/csrc/jit/runtime/simple_graph_executor_impl.cpp +++ b/torch/csrc/jit/runtime/simple_graph_executor_impl.cpp @@ -1,8 +1,8 @@ #include -#include #include #include +#include namespace torch::jit { diff --git a/torch/csrc/jit/runtime/static/fusion.cpp b/torch/csrc/jit/runtime/static/fusion.cpp index ffac37efc9b7..86925200b7f4 100644 --- a/torch/csrc/jit/runtime/static/fusion.cpp +++ b/torch/csrc/jit/runtime/static/fusion.cpp @@ -173,7 +173,7 @@ static std::optional tryMerge( Node* to_merge, AliasDb* aliasDb) { if (!canMerge(fusion_group, to_merge, aliasDb)) { - return c10::nullopt; + return std::nullopt; } std::vector nodes_to_merge = {to_merge}; @@ -190,7 +190,7 @@ static std::optional tryMerge( GRAPH_UPDATE("Trying to move node next to fusion group: ", getHeader(n)); if (!aliasDb->moveBeforeTopologicallyValid(n, move_point)) { GRAPH_UPDATE("Failed to move because of AliasDb checks!"); - return c10::nullopt; + return std::nullopt; } move_point = n; } diff --git a/torch/csrc/jit/runtime/static/impl.cpp b/torch/csrc/jit/runtime/static/impl.cpp index 9dc31446d1e1..0c989efcad75 100644 --- a/torch/csrc/jit/runtime/static/impl.cpp +++ b/torch/csrc/jit/runtime/static/impl.cpp @@ -320,7 +320,7 @@ std::pair, std::optional> PrepareForStaticModule( const StaticModuleOptions& opts, std::vector sample_inputs) { PrepareGraphForStaticModule(graph, opts, std::move(sample_inputs)); - return std::make_pair(graph, c10::nullopt); + return std::make_pair(graph, std::nullopt); } } // namespace @@ -573,7 +573,7 @@ StaticModule::StaticModule( const auto num_schema_args = schema_->arguments().size(); DCHECK(num_schema_args > 0); if (removeSelfFromGraphInput(graph_)) { - module_ = c10::nullopt; + module_ = std::nullopt; num_inputs_ = num_schema_args - 1; } } @@ -1251,7 +1251,7 @@ bool BlockRunner::fast_check_and_correct_overlap_with( auto& tensor = tensor_ival.toTensor(); if (planner_->overlapWithInternalBuffer(tensor.data_ptr())) { DLOG(INFO) << "Detected alias for node: " << PrintNode(n.node()); - tensor_ival = at::native::clone(tensor, c10::nullopt); + tensor_ival = at::native::clone(tensor, std::nullopt); n.set_outputs_memory_overlap_detected(); return true; } @@ -2218,7 +2218,7 @@ bool ProcessedNode::check_and_correct_overlap_with( auto& tensor = output_ival.toTensor(); if (!checkNoMemoryOverlap(input, tensor)) { DLOG(INFO) << "Detected alias for node: " << PrintNode(node()); - output_ival = at::native::clone(tensor, c10::nullopt); + output_ival = at::native::clone(tensor, std::nullopt); set_outputs_memory_overlap_detected(); return true; } diff --git a/torch/csrc/jit/runtime/static/ops.cpp b/torch/csrc/jit/runtime/static/ops.cpp index b1b8a081c4ce..35a74c0bac08 100644 --- a/torch/csrc/jit/runtime/static/ops.cpp +++ b/torch/csrc/jit/runtime/static/ops.cpp @@ -75,7 +75,7 @@ static void repeat_out( } // return an empty tensor if one of the repeat dimensions is zero - at::native::resize_(result, target_size, c10::nullopt); + at::native::resize_(result, target_size, std::nullopt); if (zero_tensor) { return; } @@ -101,7 +101,7 @@ at::Tensor& reshape_copy_out( const auto& shape = infer_size ? at::infer_size_dv(proposed_shape, self.numel()) : proposed_shape; - at::native::resize_(out, shape, c10::nullopt); + at::native::resize_(out, shape, std::nullopt); auto self_contig = self.expect_contiguous(); @@ -214,7 +214,7 @@ at::Tensor& to_copy_out( at::native::resize_impl_cpu_( out.unsafeGetTensorImpl(), self.sizes(), self.strides()); } else { - at::native::resize_(out, self.sizes(), c10::nullopt); + at::native::resize_(out, self.sizes(), std::nullopt); } auto is_unsupported_dtype = [](ScalarType t) { #define TORCH_OPS_UNSUPPORTED_TYPE(_, type) \ @@ -233,7 +233,7 @@ at::Tensor& to_copy_out( // expensive. if (self.is_contiguous() && !non_blocking && // Did the user request us to make a copy that isn't contiguous? - (memory_format == c10::nullopt || + (memory_format == std::nullopt || memory_format == c10::MemoryFormat::Preserve || memory_format == c10::MemoryFormat::Contiguous) && // CopyKernel.cpp handles this case specially, so let's not mess @@ -303,7 +303,7 @@ static Tensor& c2_argmin_out( out_dims.push_back(in_dims[i]); next_size *= in_dims[i]; } - at::native::resize_(output, out_dims, c10::nullopt); + at::native::resize_(output, out_dims, std::nullopt); const auto n = in_dims[dim_]; @@ -370,7 +370,7 @@ static at::Tensor& dequantize_copy_out(Tensor& out, const Tensor& self) { if (C10_UNLIKELY(!self.is_quantized())) { // fallback to dequantize_cpu equivalent case: make sure out is at::kFloat DCHECK(out.scalar_type() == kFloat); - return at::native::to_copy_out(out, self, false, false, c10::nullopt); + return at::native::to_copy_out(out, self, false, false, std::nullopt); } return get_qtensorimpl(self)->quantizer()->dequantize_out(out, self); } @@ -658,11 +658,11 @@ REGISTER_OPERATOR_FUNCTOR( out_t, at::cpu::clamp(in0_t, clamp_min, clamp_max), in3_s, - c10::nullopt, - c10::nullopt); + std::nullopt, + std::nullopt); return; } - at::native::resize_(out_t, in0_t.sizes(), c10::nullopt); + at::native::resize_(out_t, in0_t.sizes(), std::nullopt); auto output_size = in0_t.numel(); @@ -700,7 +700,7 @@ REGISTER_OPERATOR_FUNCTOR(aten::clamp, aten_clamp, [](Node* n) -> SROperator { at::cpu::clamp_out(out_t, in0_t, in1_s, in2_s); return; } - at::native::resize_(out_t, in0_t.sizes(), c10::nullopt); + at::native::resize_(out_t, in0_t.sizes(), std::nullopt); auto output_size = in0_t.numel(); auto min = in1_s.has_value() ? in1_s->toFloat() : -std::numeric_limits::infinity(); @@ -830,7 +830,7 @@ void varStackFastOut( ? std::array{num_inputs, 1} : std::array{1, num_inputs}; - at::native::resize_(out, output_size, c10::nullopt); + at::native::resize_(out, output_size, std::nullopt); AT_DISPATCH_ALL_TYPES(out.scalar_type(), "varStackFastOut", [&]() { auto* out_data = out.mutable_data_ptr(); @@ -952,7 +952,7 @@ REGISTER_OPERATOR_FUNCTOR(aten::relu, aten_relu, [](Node* n) -> SROperator { at::cpu::threshold_out(out_t, in0_t, 0, 0); return; } - at::native::resize_(out_t, in0_t.sizes(), c10::nullopt); + at::native::resize_(out_t, in0_t.sizes(), std::nullopt); int64_t nn = in0_t.numel(); te->call({out_t.data_ptr(), in0_t.data_ptr(), &nn}); }; @@ -975,7 +975,7 @@ REGISTER_OPERATOR_FUNCTOR(aten::tanh, aten_tanh, [](Node* n) -> SROperator { at::cpu::tanh_out(out_t, in0_t); return; } - at::native::resize_(out_t, in0_t.sizes(), c10::nullopt); + at::native::resize_(out_t, in0_t.sizes(), std::nullopt); int64_t nn = in0_t.numel(); te->call({out_t.data_ptr(), in0_t.data_ptr(), &nn}); }; @@ -1036,7 +1036,7 @@ REGISTER_OPERATOR_FUNCTOR( at::cpu::sigmoid_out(out_t, in0_t); return; } - at::native::resize_(out_t, in0_t.sizes(), c10::nullopt); + at::native::resize_(out_t, in0_t.sizes(), std::nullopt); int64_t nn = in0_t.numel(); te->call({out_t.data_ptr(), in0_t.data_ptr(), &nn}); }; @@ -1048,12 +1048,12 @@ REGISTER_OPERATOR_FUNCTOR(aten::logit, aten_logit, [](Node* n) -> SROperator { LogAndDumpSchema(n); return nullptr; } - std::optional clamp = c10::nullopt; + std::optional clamp = std::nullopt; if (n->inputs()[1]->node()->kind() == prim::Constant) { auto clamp_d = toIValue(n->inputs()[1])->toOptional(); clamp = clamp_d - ? c10::make_optional(static_cast(clamp_d.value())) - : c10::nullopt; + ? std::make_optional(static_cast(clamp_d.value())) + : std::nullopt; } auto te = clamp ? createLogit() : nullptr; float clamp_value = clamp ? *clamp : 0.0f; @@ -1070,7 +1070,7 @@ REGISTER_OPERATOR_FUNCTOR(aten::logit, aten_logit, [](Node* n) -> SROperator { at::native::logit_out(in0_t, in1_d, out_t); return; } - at::native::resize_(out_t, in0_t.sizes(), c10::nullopt); + at::native::resize_(out_t, in0_t.sizes(), std::nullopt); int64_t nn = in0_t.numel(); float c = clamp_value; te->call({out_t.data_ptr(), in0_t.data_ptr(), &nn, &c}); @@ -1454,7 +1454,7 @@ C10_ALWAYS_INLINE void to_copy_functor_impl( if (memory_format == c10::MemoryFormat::Preserve) { if (self.is_non_overlapping_and_dense()) { - memory_format = c10::nullopt; + memory_format = std::nullopt; copy_strides = true; } else { memory_format = self.suggest_memory_format(); @@ -1485,7 +1485,7 @@ C10_ALWAYS_INLINE void to_copy_functor_impl( args->dtype, args->layout, self.device(), - c10::nullopt, + std::nullopt, memory_format); } else { if (has_memory_format) { @@ -1905,7 +1905,7 @@ REGISTER_OPERATOR_FUNCTOR(aten::div, aten_div, [](Node* n) -> SROperator { return [te = createDiv()](ProcessedNode* p_node) { const auto& in0_t = p_node->Input(0).toTensor(); - std::optional rounding_mode = c10::nullopt; + std::optional rounding_mode = std::nullopt; if (p_node->num_inputs() > 2) { rounding_mode = p_node->Input(2).toOptional(); } @@ -2112,14 +2112,14 @@ REGISTER_OPERATOR_FUNCTOR(aten::layer_norm, aten_layer_norm, [](Node* n) -> SROp if (p_node->Output(0).isNone()) { p_node->Output(0) = at::native::empty_like( *X, - c10::nullopt /* dtype */, - c10::nullopt /* layout */, - c10::nullopt /* device */, - c10::nullopt /* pin_memory */, + std::nullopt /* dtype */, + std::nullopt /* layout */, + std::nullopt /* device */, + std::nullopt /* pin_memory */, at::MemoryFormat::Contiguous); } else { at::native::resize_( - p_node->Output(0).toTensor(), X->sizes(), c10::nullopt); + p_node->Output(0).toTensor(), X->sizes(), std::nullopt); } at::Tensor& output = p_node->Output(0).toTensor(); at::native::layer_norm_cpu_out(output, *X, *gamma, *beta, eps, M, N); @@ -2231,12 +2231,12 @@ REGISTER_OPERATOR_FUNCTOR(quantized::linear, quantized_linear, [](Node* n) -> SR p_node->Output(0) = at::native::empty_affine_quantized( {0}, c10::kQUInt8, - c10::nullopt, + std::nullopt, c10::kCPU, false, output_scale, output_zero_point, - c10::nullopt); + std::nullopt); } auto& out_t = p_node->Output(0).toTensor(); fastResizeToZero(out_t); @@ -2277,12 +2277,12 @@ REGISTER_OPERATOR_FUNCTOR( p_node->Output(0) = at::native::empty_affine_quantized( {0}, c10::kQUInt8, - c10::nullopt, + std::nullopt, c10::kCPU, false, output_scale, output_zero_point, - c10::nullopt); + std::nullopt); } auto& out_t = p_node->Output(0).toTensor(); fastResizeToZero(out_t); @@ -2463,7 +2463,7 @@ REGISTER_OPERATOR_FUNCTOR(aten::full_like, aten_full_like, [](Node* n) -> SROper in0_t, dtype, layout, device, pin_memory, memory_format); } auto& out_t = p_node->Output(0).toTensor(); - at::native::resize_(out_t, in0_t.sizes(), c10::nullopt); + at::native::resize_(out_t, in0_t.sizes(), std::nullopt); at::native::fill_out(out_t, in1_s); }; }); @@ -2528,7 +2528,7 @@ REGISTER_OPERATOR_FUNCTOR(aten::zeros, aten_zeros, [](Node* n) -> SROperator { const auto layout = p_node->Input(2).toOptional(); if (!hasTensorWithOptions(p_node->Output(0), dtype, layout)) { p_node->Output(0) = at::compositeexplicitautograd::zeros( - size, dtype, layout, c10::nullopt, c10::nullopt); + size, dtype, layout, std::nullopt, std::nullopt); return; } auto& out_t = p_node->Output(0).toTensor(); @@ -2709,7 +2709,7 @@ unsigned char abs_if_signed(unsigned char val) { // Computes f(x) = sign(x) * ln(|1 + x|) for each x in the input tensor void signed_log1p_out(at::Tensor& out, const at::Tensor& input) { - at::native::resize_(out, input.sizes(), c10::nullopt); + at::native::resize_(out, input.sizes(), std::nullopt); const auto input_contig = input.expect_contiguous(); auto output_contig = out.expect_contiguous(); @@ -2750,7 +2750,7 @@ REGISTER_OPERATOR_FUNCTOR( signed_log1p_out(out, input); return; } - at::native::resize_(out, input.sizes(), c10::nullopt); + at::native::resize_(out, input.sizes(), std::nullopt); int64_t nn = input.numel(); te->call({out.data_ptr(), input.data_ptr(), &nn}); }; diff --git a/torch/csrc/jit/runtime/static/ops.h b/torch/csrc/jit/runtime/static/ops.h index 362837e7ce78..623340daec06 100644 --- a/torch/csrc/jit/runtime/static/ops.h +++ b/torch/csrc/jit/runtime/static/ops.h @@ -57,8 +57,8 @@ inline at::Tensor create_empty_from(const at::Tensor& t) { c10::typeMetaToScalarType(t.dtype()), t.layout(), t.device(), - c10::nullopt, - c10::nullopt); + std::nullopt, + std::nullopt); } inline at::Tensor create_empty_from( @@ -69,20 +69,20 @@ inline at::Tensor create_empty_from( c10::typeMetaToScalarType(t.dtype()), t.layout(), t.device(), - c10::nullopt, - c10::nullopt); + std::nullopt, + std::nullopt); } inline at::Tensor create_empty(c10::ScalarType dtype) { return at::detail::empty_cpu( - {0}, dtype, c10::nullopt, c10::nullopt, c10::nullopt, c10::nullopt); + {0}, dtype, std::nullopt, std::nullopt, std::nullopt, std::nullopt); } inline at::Tensor create_empty_from( const at::Tensor& t, c10::ScalarType dtype) { return at::detail::empty_cpu( - {0}, dtype, t.layout(), t.device(), c10::nullopt, c10::nullopt); + {0}, dtype, t.layout(), t.device(), std::nullopt, std::nullopt); } inline at::Tensor create_empty_from(const at::Tensor& t, c10::Layout layout) { @@ -91,8 +91,8 @@ inline at::Tensor create_empty_from(const at::Tensor& t, c10::Layout layout) { c10::typeMetaToScalarType(t.dtype()), layout, t.device(), - c10::nullopt, - c10::nullopt); + std::nullopt, + std::nullopt); } inline at::Tensor create_empty_from(const at::Tensor& t, c10::Device device) { @@ -101,8 +101,8 @@ inline at::Tensor create_empty_from(const at::Tensor& t, c10::Device device) { c10::typeMetaToScalarType(t.dtype()), t.layout(), device, - c10::nullopt, - c10::nullopt); + std::nullopt, + std::nullopt); } inline at::Tensor create_empty_from( @@ -113,7 +113,7 @@ inline at::Tensor create_empty_from( c10::typeMetaToScalarType(t.dtype()), t.layout(), t.device(), - c10::nullopt, + std::nullopt, memory_format); } @@ -122,7 +122,7 @@ inline at::Tensor create_empty_from( c10::ScalarType dtype, c10::MemoryFormat memory_format) { return at::detail::empty_cpu( - {0}, dtype, t.layout(), t.device(), c10::nullopt, memory_format); + {0}, dtype, t.layout(), t.device(), std::nullopt, memory_format); } inline bool checkResizedDataPtr(at::Tensor& t) { diff --git a/torch/csrc/jit/runtime/symbolic_script.cpp b/torch/csrc/jit/runtime/symbolic_script.cpp index 6aa65c528a42..92d901e43a5d 100644 --- a/torch/csrc/jit/runtime/symbolic_script.cpp +++ b/torch/csrc/jit/runtime/symbolic_script.cpp @@ -1609,7 +1609,7 @@ static void loadModule(const CompilationUnit& module) { static void loadFunctions() { for (const std::string& str : functions) { - compilation_unit.define(c10::nullopt, str, nativeResolver(), nullptr); + compilation_unit.define(std::nullopt, str, nativeResolver(), nullptr); } loadModule(compilation_unit); } @@ -1635,7 +1635,7 @@ std::optional gradientInfoForSchema( return sym_script_it->second; } } - return c10::nullopt; + return std::nullopt; } bool hasGradientInfoForSchema(const FunctionSchema& schema) { diff --git a/torch/csrc/jit/runtime/symbolic_script.h b/torch/csrc/jit/runtime/symbolic_script.h index 271bf66916f3..0715f0deeb12 100644 --- a/torch/csrc/jit/runtime/symbolic_script.h +++ b/torch/csrc/jit/runtime/symbolic_script.h @@ -2,9 +2,9 @@ // This file is temporary until native_functions.yaml and derivatives.yaml are // merged. Ideally this should all go into native_functions.yaml -#include #include #include +#include namespace torch::jit { struct GradientPair { diff --git a/torch/csrc/jit/runtime/symbolic_shape_registry.cpp b/torch/csrc/jit/runtime/symbolic_shape_registry.cpp index ddea031aba73..f8cfca26c702 100644 --- a/torch/csrc/jit/runtime/symbolic_shape_registry.cpp +++ b/torch/csrc/jit/runtime/symbolic_shape_registry.cpp @@ -365,7 +365,7 @@ void loadFunctions() { [&](const std::string& name) -> std::shared_ptr { return src; }, 1); compilation_unit->define( - c10::nullopt, shape_compute_functions, resolver, nullptr); + std::nullopt, shape_compute_functions, resolver, nullptr); loadModule(*compilation_unit); } catch (...) { // Reset the cache and compilation unit so that we don't get weird errors @@ -391,7 +391,7 @@ std::optional> shapeComputeGraphForSchema( } GRAPH_DEBUG("Could not find schema: ", schema); - return c10::nullopt; + return std::nullopt; } TORCH_API std::optional boundedGraphsForSchema( @@ -406,7 +406,7 @@ TORCH_API std::optional boundedGraphsForSchema( return cache_it->second; } - return c10::nullopt; + return std::nullopt; } void RegisterShapeComputeGraphForSchema( diff --git a/torch/csrc/jit/serialization/callstack_debug_info_serialization.cpp b/torch/csrc/jit/serialization/callstack_debug_info_serialization.cpp index 4a326285b297..2bc464a0de17 100644 --- a/torch/csrc/jit/serialization/callstack_debug_info_serialization.cpp +++ b/torch/csrc/jit/serialization/callstack_debug_info_serialization.cpp @@ -173,7 +173,7 @@ std::optional InlinedCallStackDeserializer:: const c10::IValue& iv, const std::shared_ptr& cu) { if (iv.isNone()) { - return c10::nullopt; + return std::nullopt; } auto tup = iv.toTuple(); auto it = cached_module_instance_info_.find(tup); diff --git a/torch/csrc/jit/serialization/export.cpp b/torch/csrc/jit/serialization/export.cpp index 6ef9bdbf4abf..2cfe34cd4abd 100644 --- a/torch/csrc/jit/serialization/export.cpp +++ b/torch/csrc/jit/serialization/export.cpp @@ -5,7 +5,6 @@ #include #include #include -#include #include #include #include @@ -21,6 +20,7 @@ #include #include #include +#include C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wnewline-eof") #include diff --git a/torch/csrc/jit/serialization/export_bytecode.cpp b/torch/csrc/jit/serialization/export_bytecode.cpp index 9f194cd0ad31..4b895f9d657b 100644 --- a/torch/csrc/jit/serialization/export_bytecode.cpp +++ b/torch/csrc/jit/serialization/export_bytecode.cpp @@ -166,7 +166,7 @@ mobile::Code compileGraphToMobileCode( // and is not allowed. For an operator with num_args = -1, it means the // number of arguments is not available for this operator, we don't do any // backward compatibility adaptation at runtime. - std::optional num_args = c10::nullopt; + std::optional num_args = std::nullopt; auto it = op_to_specified_args.find(unique_name); if (it != op_to_specified_args.end()) { num_args = it->second; diff --git a/torch/csrc/jit/serialization/export_module.cpp b/torch/csrc/jit/serialization/export_module.cpp index 5bd7714c4e8d..779e63a84360 100644 --- a/torch/csrc/jit/serialization/export_module.cpp +++ b/torch/csrc/jit/serialization/export_module.cpp @@ -259,7 +259,7 @@ std::pair getFunctionTuple( if (namedType && namedType->name()) { return type_name_uniquer_.getUniqueName(namedType).qualifiedName(); } - return c10::nullopt; + return std::nullopt; }; auto makeArgTuple = [&](const std::vector& args) { @@ -765,7 +765,7 @@ std::optional type_printer( if (namedType && namedType->name()) { return type_name_uniquer.getUniqueName(namedType).qualifiedName(); } - return c10::nullopt; + return std::nullopt; } } // namespace diff --git a/torch/csrc/jit/serialization/flatbuffer_serializer.cpp b/torch/csrc/jit/serialization/flatbuffer_serializer.cpp index 5a47fe900f3f..e1ad60afa5c3 100644 --- a/torch/csrc/jit/serialization/flatbuffer_serializer.cpp +++ b/torch/csrc/jit/serialization/flatbuffer_serializer.cpp @@ -69,7 +69,7 @@ auto print_type(const c10::Type& t) -> std::optional { if (auto dyn = t.castRaw()) { return dyn->fallback()->annotation_str(); } - return c10::nullopt; + return std::nullopt; } class FlatbufferSerializer { @@ -306,7 +306,7 @@ flatbuffers::Offset FlatbufferSerializer:: if (auto dyn = t.castRaw()) { return dyn->fallback()->annotation_str(); } - return c10::nullopt; + return std::nullopt; }; flatbuffers::Offset schema_offset = 0; diff --git a/torch/csrc/jit/serialization/import.h b/torch/csrc/jit/serialization/import.h index b090a1c80a3c..2da1e639ee80 100644 --- a/torch/csrc/jit/serialization/import.h +++ b/torch/csrc/jit/serialization/import.h @@ -21,19 +21,19 @@ class DeserializationStorageContext; TORCH_API Module import_ir_module( std::shared_ptr cu, const std::string& filename, - std::optional device = c10::nullopt, + std::optional device = std::nullopt, bool load_debug_files = true); TORCH_API Module import_ir_module( std::shared_ptr cu, std::istream& in, - std::optional device = c10::nullopt, + std::optional device = std::nullopt, bool load_debug_files = true); TORCH_API Module import_ir_module( std::shared_ptr cu, std::unique_ptr rai, - std::optional device = c10::nullopt, + std::optional device = std::nullopt, bool load_debug_files = true); TORCH_API Module import_ir_module( @@ -80,7 +80,7 @@ TORCH_API Module import_ir_module( /// `torch::jit::ExportModule` in C++. TORCH_API Module load( std::istream& in, - std::optional device = c10::nullopt, + std::optional device = std::nullopt, bool load_debug_files = true); TORCH_API Module load( @@ -96,7 +96,7 @@ TORCH_API Module load( /// Python or `torch::jit::ExportModule` in C++. TORCH_API Module load( const std::string& filename, - std::optional device = c10::nullopt, + std::optional device = std::nullopt, bool load_debug_files = true); TORCH_API Module load( @@ -112,7 +112,7 @@ TORCH_API Module load( /// Python or `torch::jit::ExportModule` in C++. TORCH_API Module load( std::shared_ptr rai, - std::optional device = c10::nullopt, + std::optional device = std::nullopt, bool load_debug_files = true); TORCH_API Module load( @@ -131,17 +131,17 @@ TORCH_API Module parse_and_initialize_jit_module( std::shared_ptr data, size_t size, ExtraFilesMap& extra_files, - std::optional device = c10::nullopt); + std::optional device = std::nullopt); TORCH_API Module load_jit_module_from_file( const std::string& filename, ExtraFilesMap& extra_files, - std::optional device = c10::nullopt); + std::optional device = std::nullopt); TORCH_API Module load_jit_module_from_stream( std::istream& in, ExtraFilesMap& extra_files, - std::optional device = c10::nullopt); + std::optional device = std::nullopt); TORCH_API Module parse_and_initialize_jit_module( std::shared_ptr data, diff --git a/torch/csrc/jit/serialization/import_source.cpp b/torch/csrc/jit/serialization/import_source.cpp index f67c2a22e9eb..017ae5bd3da7 100644 --- a/torch/csrc/jit/serialization/import_source.cpp +++ b/torch/csrc/jit/serialization/import_source.cpp @@ -372,7 +372,7 @@ std::optional SourceImporterImpl:: if (replacements.count(demangled_classname)) { auto lhs = Var(assign.lhs()); if (!assign.type().present() || assign.type().get().kind() != TK_VAR) { - return c10::nullopt; + return std::nullopt; } auto type = Var(assign.type().get()); @@ -389,7 +389,7 @@ std::optional SourceImporterImpl:: assign.range(), assign.lhs_list(), assign.rhs(), maybe_typename); } } - return c10::nullopt; + return std::nullopt; } void SourceImporterImpl::importClass( diff --git a/torch/csrc/jit/serialization/import_source.h b/torch/csrc/jit/serialization/import_source.h index 9b364f379b40..a86a1f91926d 100644 --- a/torch/csrc/jit/serialization/import_source.h +++ b/torch/csrc/jit/serialization/import_source.h @@ -2,7 +2,6 @@ #include #include -#include #include #include #include @@ -13,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -66,7 +66,7 @@ struct SourceImporterImpl : public Resolver, std::shared_ptr cu_; std::unordered_map> env_; SourceLoader source_loader_; - std::optional version_ = c10::nullopt; + std::optional version_ = std::nullopt; std::unordered_set loaded_sources_; // named types and functions loaded from a file but not yet defined because // their type has not been requested yet. diff --git a/torch/csrc/jit/serialization/pickle.cpp b/torch/csrc/jit/serialization/pickle.cpp index be36a4e2d8dd..c05bf330e7af 100644 --- a/torch/csrc/jit/serialization/pickle.cpp +++ b/torch/csrc/jit/serialization/pickle.cpp @@ -92,9 +92,9 @@ IValue pickle_load(const std::vector& data) { "data", /*pickle_prefix=*/"", /*tensor_prefix=*/"", - /*type_resolver=*/c10::nullopt, - /*obj_loader=*/c10::nullopt, - /*device=*/c10::nullopt, + /*type_resolver=*/std::nullopt, + /*obj_loader=*/std::nullopt, + /*device=*/std::nullopt, reader); #else AT_ERROR( diff --git a/torch/csrc/jit/serialization/pickler.cpp b/torch/csrc/jit/serialization/pickler.cpp index 173ab5c13e5d..04d3fc9a4356 100644 --- a/torch/csrc/jit/serialization/pickler.cpp +++ b/torch/csrc/jit/serialization/pickler.cpp @@ -605,7 +605,7 @@ std::optional type_printer(const c10::Type& type) { if (auto dyn = type.castRaw()) { return dyn->fallback()->annotation_str(type_printer); } - return c10::nullopt; + return std::nullopt; } } // namespace diff --git a/torch/csrc/jit/serialization/python_print.cpp b/torch/csrc/jit/serialization/python_print.cpp index f1b0865032c3..2292f11fd555 100644 --- a/torch/csrc/jit/serialization/python_print.cpp +++ b/torch/csrc/jit/serialization/python_print.cpp @@ -1725,7 +1725,7 @@ static std::optional printType( if (namedType && namedType->name()) { return type_name_uniquer.getUniqueName(namedType).qualifiedName(); } - return c10::nullopt; + return std::nullopt; } void jitModuleToPythonCodeAndConstants( diff --git a/torch/csrc/jit/serialization/source_range_serialization.cpp b/torch/csrc/jit/serialization/source_range_serialization.cpp index 118becd20dc7..6892493312b0 100644 --- a/torch/csrc/jit/serialization/source_range_serialization.cpp +++ b/torch/csrc/jit/serialization/source_range_serialization.cpp @@ -68,7 +68,7 @@ std::shared_ptr SourceRangeDeserializer::deserialize_source( const auto& textIndex = tup_elems[0].toIntList(); int64_t fnameIndex = tup_elems[1].toInt(); int64_t starting_line_no_ = tup_elems[2].toInt(); - std::optional filename = c10::nullopt; + std::optional filename = std::nullopt; TORCH_CHECK( (uint64_t)fnameIndex < text_table_.size(), @@ -248,7 +248,7 @@ std::optional ConcreteSourceRangeUnpickler:: return (entry - 1)->range; } - return c10::nullopt; + return std::nullopt; } TORCH_API void setShouldUseFormatWithStringTable( diff --git a/torch/csrc/jit/tensorexpr/codegen.cpp b/torch/csrc/jit/tensorexpr/codegen.cpp index e1464d0efc3e..1ba4d54c4d29 100644 --- a/torch/csrc/jit/tensorexpr/codegen.cpp +++ b/torch/csrc/jit/tensorexpr/codegen.cpp @@ -99,7 +99,7 @@ static std::optional bufSize(BufPtr buf) { size_t size = elementSize(buf->dtype().scalar_type()) * buf->dtype().lanes(); for (auto& d : buf->dims()) { if (!d->isConstant()) { - return c10::nullopt; + return std::nullopt; } size = size * (*intValue(d)); } diff --git a/torch/csrc/jit/tensorexpr/eval.cpp b/torch/csrc/jit/tensorexpr/eval.cpp index 5666097f2dd4..ceab479dc879 100644 --- a/torch/csrc/jit/tensorexpr/eval.cpp +++ b/torch/csrc/jit/tensorexpr/eval.cpp @@ -1305,7 +1305,7 @@ std::optional evalInt(ExprPtr e) { return ExprEval(cast(ExprHandle(e))) .value(); } catch (std::runtime_error& err) { - return c10::nullopt; + return std::nullopt; } } diff --git a/torch/csrc/jit/tensorexpr/expr.h b/torch/csrc/jit/tensorexpr/expr.h index 8c8de8997575..c410c902ea4e 100644 --- a/torch/csrc/jit/tensorexpr/expr.h +++ b/torch/csrc/jit/tensorexpr/expr.h @@ -6,11 +6,11 @@ #pragma once #include -#include #include #include #include #include +#include #include @@ -207,10 +207,10 @@ class TORCH_API Buf : public ExprNode { const std::string& name_hint, const std::vector& dims, Dtype dtype, - std::optional initializer = c10::nullopt, - std::optional> strides = c10::nullopt, - std::optional qscale = c10::nullopt, - std::optional qzero = c10::nullopt); + std::optional initializer = std::nullopt, + std::optional> strides = std::nullopt, + std::optional qscale = std::nullopt, + std::optional qzero = std::nullopt); // TODO: unique_name VarPtr base_handle() const { @@ -232,7 +232,7 @@ class TORCH_API Buf : public ExprNode { const std::vector& dims, Dtype dtype, ExprPtr initializer = nullptr, - std::optional> strides = c10::nullopt, + std::optional> strides = std::nullopt, ExprPtr qscale = nullptr, ExprPtr qzero = nullptr) : Buf(alloc(name_hint, kHandle), @@ -248,7 +248,7 @@ class TORCH_API Buf : public ExprNode { std::vector dims, Dtype dtype, ExprPtr initializer = nullptr, - std::optional> strides = c10::nullopt, + std::optional> strides = std::nullopt, ExprPtr qscale = nullptr, ExprPtr qzero = nullptr); diff --git a/torch/csrc/jit/tensorexpr/external_functions.cpp b/torch/csrc/jit/tensorexpr/external_functions.cpp index a3146ccfaff5..decfe0bceb32 100644 --- a/torch/csrc/jit/tensorexpr/external_functions.cpp +++ b/torch/csrc/jit/tensorexpr/external_functions.cpp @@ -123,7 +123,7 @@ std::vector constructTensors( } } else { // handle quantized - std::vector> qdata(bufs_num, c10::nullopt); + std::vector> qdata(bufs_num, std::nullopt); for (const auto& qd : *qdataArg) { qdata[qd.first] = qd.second; } @@ -233,7 +233,7 @@ std::vector constructTensors2( } } else { // handle quantized - std::vector> qdata(bufs_in_num, c10::nullopt); + std::vector> qdata(bufs_in_num, std::nullopt); for (const auto& qd : *qdataArg) { qdata[qd.first - bufs_out_num] = qd.second; } @@ -993,10 +993,10 @@ void nnc_aten_upsample_nearest2d( x, (output_size_h != -1) ? std::optional({output_size_h, output_size_w}) - : c10::nullopt, + : std::nullopt, (scale_factor_h != -1.f) ? std::optional>( {scale_factor_h, scale_factor_w}) - : c10::nullopt); + : std::nullopt); memcpy(buf_data[0], r.const_data_ptr(), r.element_size() * r.numel()); } @@ -1043,10 +1043,10 @@ void nnc_aten_upsample_nearest2d_out( x, (output_size_h != -1) ? std::optional({output_size_h, output_size_w}) - : c10::nullopt, + : std::nullopt, (scale_factor_h != -1.f) ? std::optional>( {scale_factor_h, scale_factor_w}) - : c10::nullopt); + : std::nullopt); buf_data[0] = r.data_ptr(); c10::raw::intrusive_ptr::incref(r.getIntrusivePtr().get()); buf_data[bufs_in_num + bufs_out_num] = r.getIntrusivePtr().get(); @@ -1089,7 +1089,7 @@ void nnc_aten_quantize_per_tensor_out( buf_dims, buf_strides, buf_dtypes, - c10::nullopt, + std::nullopt, bufs_out_num); // NOLINTNEXTLINE(facebook-hte-LocalUncheckedArrayBounds) at::Tensor x = tensors[1]; @@ -1214,7 +1214,7 @@ void nnc_aten_conv1d_out( buf_dims, buf_strides, buf_dtypes, - c10::nullopt, + std::nullopt, bufs_out_num); at::Tensor r; diff --git a/torch/csrc/jit/tensorexpr/external_functions.h b/torch/csrc/jit/tensorexpr/external_functions.h index 1fd90a3f056b..9dc859d22471 100644 --- a/torch/csrc/jit/tensorexpr/external_functions.h +++ b/torch/csrc/jit/tensorexpr/external_functions.h @@ -75,7 +75,7 @@ std::vector constructTensors( int64_t* buf_strides, int8_t* buf_dtypes, std::optional>> qdataArg = - c10::nullopt); + std::nullopt); std::vector constructTensors2( int64_t bufs_in_num, @@ -85,7 +85,7 @@ std::vector constructTensors2( int64_t* buf_strides, int8_t* buf_dtypes, std::optional>> qdataArg = - c10::nullopt, + std::nullopt, size_t bufs_out_num = 0); #ifdef C10_MOBILE diff --git a/torch/csrc/jit/tensorexpr/graph_opt.cpp b/torch/csrc/jit/tensorexpr/graph_opt.cpp index 01511b2b4d8c..0699dfd63da5 100644 --- a/torch/csrc/jit/tensorexpr/graph_opt.cpp +++ b/torch/csrc/jit/tensorexpr/graph_opt.cpp @@ -351,7 +351,7 @@ static std::optional inferScalarType(Node* n) { if (tt->scalarType() && *tt->scalarType() != scalar_type) { GRAPH_DEBUG( "Inputs of ", n, " have different scalar types, cannot fixup!"); - return c10::nullopt; + return std::nullopt; } } } @@ -369,7 +369,7 @@ static std::optional inferDevice(Node* n) { } if (tt->device() && *tt->device() != device) { GRAPH_DEBUG("Inputs of ", n, " have different devices, cannot fixup!"); - return c10::nullopt; + return std::nullopt; } } } diff --git a/torch/csrc/jit/tensorexpr/ir.h b/torch/csrc/jit/tensorexpr/ir.h index 89c3f96aba6e..90c540047251 100644 --- a/torch/csrc/jit/tensorexpr/ir.h +++ b/torch/csrc/jit/tensorexpr/ir.h @@ -367,7 +367,7 @@ inline std::optional intValue(const ExprPtr& e) { } AT_FORALL_INT_TYPES(TYPE_CASE); #undef TYPE_CASE - return c10::nullopt; + return std::nullopt; } inline std::optional intValue(const ExprHandle& e) { diff --git a/torch/csrc/jit/tensorexpr/ir_simplifier.cpp b/torch/csrc/jit/tensorexpr/ir_simplifier.cpp index afb7aefdda65..b69d167dba53 100644 --- a/torch/csrc/jit/tensorexpr/ir_simplifier.cpp +++ b/torch/csrc/jit/tensorexpr/ir_simplifier.cpp @@ -1885,7 +1885,7 @@ static std::optional isModRound(TermPtr e) { if (!mod) { mod = to(m); } else { - return c10::nullopt; + return std::nullopt; } } else { // Take care of special cases before multiplying the scalar and variable. @@ -1911,14 +1911,14 @@ static std::optional isModRound(TermPtr e) { if (!mod) { // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks) - return c10::nullopt; + return std::nullopt; } mod_divisor = IRSimplifier::simplify(mod->rhs()); other = mod->lhs(); if (!(div = to
(other))) { - return c10::nullopt; + return std::nullopt; } divisor = IRSimplifier::simplify(div->rhs()); @@ -1953,16 +1953,16 @@ static std::optional isModRound(TermPtr e) { // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks) denom = IRSimplifier::simplify(alloc
(other, c)); } else { - return c10::nullopt; + return std::nullopt; } } else { - return c10::nullopt; + return std::nullopt; } } // Deny cases in which divisor=1. Such cases are considered as Mods. if (divisor->isConstant() && immediateEquals(divisor, 1)) { - return c10::nullopt; + return std::nullopt; } if (!scalar) { diff --git a/torch/csrc/jit/tensorexpr/kernel.cpp b/torch/csrc/jit/tensorexpr/kernel.cpp index d18a3d65f21e..81c171d56711 100644 --- a/torch/csrc/jit/tensorexpr/kernel.cpp +++ b/torch/csrc/jit/tensorexpr/kernel.cpp @@ -129,12 +129,12 @@ bool& getOptConditionals() { std::optional pickDeviceType( const at::ArrayRef& inputs) { - std::optional device = c10::nullopt; + std::optional device = std::nullopt; for (auto const& input : inputs) { auto tt = input->type()->cast(); if (tt && tt->device()) { if (device && *device != *tt->device()) { - return c10::nullopt; + return std::nullopt; } device = *tt->device(); } @@ -144,7 +144,7 @@ std::optional pickDeviceType( static std::optional pickDeviceType( const std::shared_ptr& graph) { - std::optional device = c10::nullopt; + std::optional device = std::nullopt; for (auto const& node : graph->nodes()) { for (auto const& input : node->inputs()) { if (auto tt = input->type()->cast()) { @@ -184,10 +184,10 @@ static std::optional getTensorInfoJit(torch::jit::Value* v) { c10::ScalarType dtype = c10::ScalarType::Float; if (!it) { - return c10::nullopt; + return std::nullopt; } if (!it->isComplete()) { - return c10::nullopt; + return std::nullopt; } if (it->scalarType()) { // TODO: ideally we should be strict here and return nullopt if the dtype is @@ -197,7 +197,7 @@ static std::optional getTensorInfoJit(torch::jit::Value* v) { } auto concrete_sizes = it->sizes().concrete_sizes(); if (!concrete_sizes) { - return c10::nullopt; + return std::nullopt; } return TensorInfo{*concrete_sizes, dtype}; } @@ -712,7 +712,7 @@ static std::optional tripCount(ForPtr loop) { if (auto val = to(tc.node())) { return val->value(); } - return c10::nullopt; + return std::nullopt; } // Prune innermost loops until iterations satisfies a minimum grain size. @@ -1314,7 +1314,7 @@ Tensor TensorExprKernel::convertSymbolicOutputToCorrectStrides( BufPtr buf = bufs_.at(v); TORCH_INTERNAL_ASSERT(buf != nullptr); TORCH_INTERNAL_ASSERT(tt != nullptr); - TORCH_INTERNAL_ASSERT(tt->symbolic_sizes().rank() != c10::nullopt); + TORCH_INTERNAL_ASSERT(tt->symbolic_sizes().rank() != std::nullopt); auto stride_desc = getSymbolicStrideDesc(v); TORCH_INTERNAL_ASSERT(stride_desc.size() == 1); diff --git a/torch/csrc/jit/tensorexpr/llvm_codegen.cpp b/torch/csrc/jit/tensorexpr/llvm_codegen.cpp index dec03637847e..1cae1fe9b2dc 100644 --- a/torch/csrc/jit/tensorexpr/llvm_codegen.cpp +++ b/torch/csrc/jit/tensorexpr/llvm_codegen.cpp @@ -85,15 +85,15 @@ C10_DEFINE_bool( namespace torch::jit::tensorexpr { std::optional& LLVMTargetTriple() { - static std::optional triple = c10::nullopt; + static std::optional triple = std::nullopt; return triple; } std::optional& LLVMTargetCPU() { - static std::optional cpu = c10::nullopt; + static std::optional cpu = std::nullopt; return cpu; } std::optional& LLVMTargetAttrs() { - static std::optional attrs = c10::nullopt; + static std::optional attrs = std::nullopt; return attrs; } bool& LLVMAOTWorkflow() { diff --git a/torch/csrc/jit/tensorexpr/llvm_codegen.h b/torch/csrc/jit/tensorexpr/llvm_codegen.h index 74271fa879f3..1d96b4dd0467 100644 --- a/torch/csrc/jit/tensorexpr/llvm_codegen.h +++ b/torch/csrc/jit/tensorexpr/llvm_codegen.h @@ -7,7 +7,7 @@ #include #include -#include +#include #include #include @@ -27,9 +27,9 @@ class TORCH_API LLVMCodeGen : public CodeGen { at::Device device = at::kCPU, const std::string& kernel_func_name = "func", Dtype dtype = kInt, - std::optional triple = c10::nullopt, - std::optional cpu = c10::nullopt, - std::optional attrs = c10::nullopt); + std::optional triple = std::nullopt, + std::optional cpu = std::nullopt, + std::optional attrs = std::nullopt); explicit LLVMCodeGen(StmtPtr stmt); LLVMCodeGen() = delete; @@ -126,9 +126,9 @@ struct TORCH_API LLVMCodeGenBuilder { at::Device device_ = at::kCPU; std::string kernelFuncName_ = "func"; Dtype dtype_ = kInt; - std::optional triple_ = c10::nullopt; - std::optional cpu_ = c10::nullopt; - std::optional attrs_ = c10::nullopt; + std::optional triple_ = std::nullopt; + std::optional cpu_ = std::nullopt; + std::optional attrs_ = std::nullopt; }; TORCH_API std::optional& LLVMTargetTriple(); diff --git a/torch/csrc/jit/tensorexpr/llvm_jit.h b/torch/csrc/jit/tensorexpr/llvm_jit.h index 98238e004388..beadbdd5e537 100644 --- a/torch/csrc/jit/tensorexpr/llvm_jit.h +++ b/torch/csrc/jit/tensorexpr/llvm_jit.h @@ -3,8 +3,8 @@ #ifdef TORCH_ENABLE_LLVM #include #include -#include #include +#include C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wsuggest-override") #include diff --git a/torch/csrc/jit/tensorexpr/operators/conv2d.cpp b/torch/csrc/jit/tensorexpr/operators/conv2d.cpp index bdf313f0ad05..bfce006d5517 100644 --- a/torch/csrc/jit/tensorexpr/operators/conv2d.cpp +++ b/torch/csrc/jit/tensorexpr/operators/conv2d.cpp @@ -51,7 +51,7 @@ Tensor conv2d_depthwise_static( Tensor conv = Reduce( "conv2d_depthwise", {N, K, OH, OW}, - c10::nullopt, // TODO + std::nullopt, // TODO Sum(), [&](const std::vector& v) { return init_func(v); }, [&](const std::vector& v) { @@ -123,7 +123,7 @@ Tensor conv2d_depthwise_dynamic( return Reduce( "conv2d_depthwise", {N, K, OH, OW}, - c10::nullopt, // TODO + std::nullopt, // TODO Sum(), [&](const std::vector& v) { return init_func(v); }, [&](const std::vector& v) { diff --git a/torch/csrc/jit/tensorexpr/operators/misc.cpp b/torch/csrc/jit/tensorexpr/operators/misc.cpp index 938cab6ffd88..6ff6dd733885 100644 --- a/torch/csrc/jit/tensorexpr/operators/misc.cpp +++ b/torch/csrc/jit/tensorexpr/operators/misc.cpp @@ -165,7 +165,7 @@ std::optional getTensorInfo(BufHandle b) { for (auto dim : b.dims()) { auto val = intValue(dim.node()); if (!val) { - return c10::nullopt; + return std::nullopt; } dims.push_back(*val); } diff --git a/torch/csrc/jit/tensorexpr/operators/pointwise.h b/torch/csrc/jit/tensorexpr/operators/pointwise.h index 0ce10424b3d3..589674117c1a 100644 --- a/torch/csrc/jit/tensorexpr/operators/pointwise.h +++ b/torch/csrc/jit/tensorexpr/operators/pointwise.h @@ -9,7 +9,7 @@ namespace tensorexpr { TORCH_API Tensor computeSign( const std::vector& inputs, const std::vector& outputShape, - std::optional> outputStrides = c10::nullopt); + std::optional> outputStrides = std::nullopt); Tensor computeOneOperand( const std::string& name, diff --git a/torch/csrc/jit/tensorexpr/operators/quantization.cpp b/torch/csrc/jit/tensorexpr/operators/quantization.cpp index 66c0688538a1..204a4c2211f7 100644 --- a/torch/csrc/jit/tensorexpr/operators/quantization.cpp +++ b/torch/csrc/jit/tensorexpr/operators/quantization.cpp @@ -171,7 +171,7 @@ Tensor computeQuantizePerTensor( ExprHandleVectorToExprVector(outputShape), dtype, nullptr, - c10::nullopt, + std::nullopt, qscale.node(), qzero.node()); return Tensor(buf, vars, e.node()); @@ -731,7 +731,7 @@ Tensor computeUpsampleNearest2d( "upsample_nearest2d", outputShape, Dtype(*outputType), - c10::nullopt, // initializer + std::nullopt, // initializer fmap(strides, [&](ExprPtr stride) { return ExprHandle(stride); }), ExprHandle(A.node()->qscale()), ExprHandle(A.node()->qzero())); diff --git a/torch/csrc/jit/tensorexpr/operators/softmax.cpp b/torch/csrc/jit/tensorexpr/operators/softmax.cpp index 9bd82afd177d..f73e06086d3d 100644 --- a/torch/csrc/jit/tensorexpr/operators/softmax.cpp +++ b/torch/csrc/jit/tensorexpr/operators/softmax.cpp @@ -103,7 +103,7 @@ Tensor computeSoftmax( auto max = Reduce( "aten_softmax_max", non_softmax_dims, - c10::nullopt, + std::nullopt, Maximum(dtype), [&](ParameterList& indices) { return tensorOrConstant( @@ -113,7 +113,7 @@ Tensor computeSoftmax( auto e = Compute( "aten_softmax_exp", outputShape, - c10::nullopt, + std::nullopt, [&](ParameterList& indices) { auto inp = tensorOrConstant( inputs[0], convert_indices_to_expr_handle(indices)); @@ -122,7 +122,7 @@ Tensor computeSoftmax( auto sum = Reduce( "aten_softmax_sum", non_softmax_dims, - c10::nullopt, + std::nullopt, Sum(), [&](ParameterList& indices) { return e.load(move_softmax_dim_index_to_pos(indices)); @@ -130,7 +130,7 @@ Tensor computeSoftmax( {outputShape[softmax_dim]}); if (!log_softmax) { auto result = Compute( - "aten_softmax", outputShape, c10::nullopt, [&](ParameterList& indices) { + "aten_softmax", outputShape, std::nullopt, [&](ParameterList& indices) { return e.load(indices) / sum.load(remove_softmax_dim_index(indices)); }); return Tensor( @@ -142,12 +142,12 @@ Tensor computeSoftmax( auto log_sum = Compute( "aten_softmax_log_sum", non_softmax_dims, - c10::nullopt, + std::nullopt, [&](ParameterList& indices) { return log(sum.load(indices)); }); auto result = Compute( "aten_log_softmax", outputShape, - c10::nullopt, + std::nullopt, [&](ParameterList& indices) { auto inp = tensorOrConstant( inputs[0], convert_indices_to_expr_handle(indices)); diff --git a/torch/csrc/jit/tensorexpr/tensor.cpp b/torch/csrc/jit/tensorexpr/tensor.cpp index 5bc734bb80b8..5a9af09f9d87 100644 --- a/torch/csrc/jit/tensorexpr/tensor.cpp +++ b/torch/csrc/jit/tensorexpr/tensor.cpp @@ -103,14 +103,14 @@ Tensor Compute( const std::function&)>& body_func) { std::vector args = create_index_vars(dims); ExprHandle body = body_func(args); - BufHandle buf = Buf::make(name, dims, body.dtype(), c10::nullopt, strides); + BufHandle buf = Buf::make(name, dims, body.dtype(), std::nullopt, strides); return Tensor(buf, args, body); } Tensor Compute( const std::string& name, const std::vector& dims, const std::function&)>& body_func) { - return Compute(name, dims, c10::nullopt, body_func); + return Compute(name, dims, std::nullopt, body_func); } Tensor Compute( @@ -124,14 +124,14 @@ Tensor Compute( std::vector args = create_index_vars(dims); ExprHandle body = body_func(args[0]); - BufHandle buf = Buf::make(name, dims, body.dtype(), c10::nullopt, strides); + BufHandle buf = Buf::make(name, dims, body.dtype(), std::nullopt, strides); return Tensor(buf, args, body); } Tensor Compute( const std::string& name, const std::vector& dims, const std::function& body_func) { - return Compute(name, dims, c10::nullopt, body_func); + return Compute(name, dims, std::nullopt, body_func); } Tensor Compute( @@ -145,7 +145,7 @@ Tensor Compute( } std::vector args = create_index_vars(dims); ExprHandle body = body_func(args[0], args[1]); - BufHandle buf = Buf::make(name, dims, body.dtype(), c10::nullopt, strides); + BufHandle buf = Buf::make(name, dims, body.dtype(), std::nullopt, strides); return Tensor(buf, args, body); } Tensor Compute( @@ -153,7 +153,7 @@ Tensor Compute( const std::vector& dims, const std::function& body_func) { - return Compute(name, dims, c10::nullopt, body_func); + return Compute(name, dims, std::nullopt, body_func); } Tensor Compute( @@ -168,7 +168,7 @@ Tensor Compute( } std::vector args = create_index_vars(dims); ExprHandle body = body_func(args[0], args[1], args[2]); - BufHandle buf = Buf::make(name, dims, body.dtype(), c10::nullopt, strides); + BufHandle buf = Buf::make(name, dims, body.dtype(), std::nullopt, strides); return Tensor(buf, args, body); } Tensor Compute( @@ -177,7 +177,7 @@ Tensor Compute( const std::function< ExprHandle(const VarHandle&, const VarHandle&, const VarHandle&)>& body_func) { - return Compute(name, dims, c10::nullopt, body_func); + return Compute(name, dims, std::nullopt, body_func); } Tensor Compute( @@ -194,7 +194,7 @@ Tensor Compute( } std::vector args = create_index_vars(dims); ExprHandle body = body_func(args[0], args[1], args[2], args[3]); - BufHandle buf = Buf::make(name, dims, body.dtype(), c10::nullopt, strides); + BufHandle buf = Buf::make(name, dims, body.dtype(), std::nullopt, strides); return Tensor(buf, args, body); } Tensor Compute( @@ -205,7 +205,7 @@ Tensor Compute( const VarHandle&, const VarHandle&, const VarHandle&)>& body_func) { - return Compute(name, dims, c10::nullopt, body_func); + return Compute(name, dims, std::nullopt, body_func); } Tensor Reduce( @@ -229,7 +229,7 @@ Tensor Reduce( const Reducer& reducer, const BufHandle& buffer, const std::vector& reduce_dims) { - return Reduce(name, dims, c10::nullopt, reducer, buffer, reduce_dims); + return Reduce(name, dims, std::nullopt, reducer, buffer, reduce_dims); } Tensor Reduce( @@ -253,7 +253,7 @@ Tensor Reduce( const Reducer& reducer, Tensor tensor, const std::vector& reduce_dims) { - return Reduce(name, dims, c10::nullopt, reducer, tensor, reduce_dims); + return Reduce(name, dims, std::nullopt, reducer, tensor, reduce_dims); } } // namespace torch::jit::tensorexpr diff --git a/torch/csrc/jit/tensorexpr/tensor.h b/torch/csrc/jit/tensorexpr/tensor.h index 7b589d0974b3..3fb55152b70d 100644 --- a/torch/csrc/jit/tensorexpr/tensor.h +++ b/torch/csrc/jit/tensorexpr/tensor.h @@ -161,7 +161,7 @@ Tensor Reduce( if (reduce_vars.empty()) { ExprHandle body = Reducer::getReduceBody(body_func, vars); BufHandle func_result = Buf::make( - func_name, dims, body.dtype(), c10::nullopt, std::move(strides)); + func_name, dims, body.dtype(), std::nullopt, std::move(strides)); return Tensor(std::move(func_result), vars, std::move(body)); } @@ -206,7 +206,7 @@ Tensor Reduce( return Reduce( func_name, dims, - c10::nullopt, + std::nullopt, reducer, init_func, body_func, @@ -238,7 +238,7 @@ Tensor Reduce( const BodyFunc& body_func, const std::vector& reduce_dims) { return Reduce( - func_name, dims, c10::nullopt, reducer, body_func, reduce_dims); + func_name, dims, std::nullopt, reducer, body_func, reduce_dims); } // Overload which allows inline lambda functions for the body_func. @@ -259,7 +259,7 @@ Tensor Reduce( const Reducer& reducer, const BodyFunc&& body_func, const std::vector& reduce_dims) { - return Reduce(func_name, dims, c10::nullopt, reducer, body_func, reduce_dims); + return Reduce(func_name, dims, std::nullopt, reducer, body_func, reduce_dims); } TORCH_API Tensor Reduce( diff --git a/torch/csrc/jit/testing/file_check.cpp b/torch/csrc/jit/testing/file_check.cpp index ec0011f40d77..027eb2aa0acf 100644 --- a/torch/csrc/jit/testing/file_check.cpp +++ b/torch/csrc/jit/testing/file_check.cpp @@ -10,13 +10,13 @@ // API modified from llvm::FileCheck #include -#include #include #include #include #include #include #include +#include #include #include @@ -43,13 +43,13 @@ struct Check { Check( CheckType type, std::string str, - std::optional count = c10::nullopt) + std::optional count = std::nullopt) : type_(type), count_(count), search_str_(std::move(str)) {} Check( CheckType type, c10::string_view str, - std::optional count = c10::nullopt) + std::optional count = std::nullopt) : Check(type, std::string(str.begin(), str.end()), count) {} CheckType type_; @@ -234,7 +234,7 @@ struct FileCheckImpl { TORCH_API void addCheck( CheckType type, const std::string& s, - std::optional count = c10::nullopt) { + std::optional count = std::nullopt) { addCheck(Check(type, s, count)); } @@ -264,7 +264,7 @@ struct FileCheckImpl { } size_t end_check_string = suffix_pos + check_suffix.size(); CheckType type = check_pair.first; - std::optional count = c10::nullopt; + std::optional count = std::nullopt; auto end_line = source->text_str().find("\n", end_check_string); bool exactly = false; if (type == CHECK_COUNT) { diff --git a/torch/csrc/lazy/backend/backend_device.cpp b/torch/csrc/lazy/backend/backend_device.cpp index 6d146ca0881c..3eac703be175 100644 --- a/torch/csrc/lazy/backend/backend_device.cpp +++ b/torch/csrc/lazy/backend/backend_device.cpp @@ -2,10 +2,10 @@ #include #include -#include #include #include #include +#include namespace torch { namespace lazy { @@ -60,7 +60,7 @@ std::optional GetBackendDevice(at::ITensorListRef tensors) { return lt->GetDevice(); } } - return c10::nullopt; + return std::nullopt; } std::optional GetBackendDevice(at::TensorList tensors) { @@ -71,19 +71,19 @@ std::optional GetBackendDevice(const at::Tensor& tensor) { if (auto lt = TryGetLtcTensor(tensor)) { return lt->GetDevice(); } - return c10::nullopt; + return std::nullopt; } std::optional GetBackendDevice( const std::optional& device) { if (device) { - return c10::make_optional(atenDeviceToBackendDevice(*device)); + return std::make_optional(atenDeviceToBackendDevice(*device)); } - return c10::nullopt; + return std::nullopt; } std::optional GetBackendDevice() { - return c10::nullopt; + return std::nullopt; } } // namespace lazy diff --git a/torch/csrc/lazy/backend/backend_device.h b/torch/csrc/lazy/backend/backend_device.h index e80c800a2ece..fdfc2ac15d9a 100644 --- a/torch/csrc/lazy/backend/backend_device.h +++ b/torch/csrc/lazy/backend/backend_device.h @@ -7,7 +7,7 @@ #include #include #include -#include +#include namespace c10 { struct Device; diff --git a/torch/csrc/lazy/core/ir_builder.h b/torch/csrc/lazy/core/ir_builder.h index 981e16677729..570dc942e6a6 100644 --- a/torch/csrc/lazy/core/ir_builder.h +++ b/torch/csrc/lazy/core/ir_builder.h @@ -1,12 +1,12 @@ #pragma once #include -#include #include #include #include #include #include +#include #include // This file is part of the backend interface. So, ops shouldn't be added or @@ -61,7 +61,7 @@ struct IrBuilder { virtual NodePtr MakeCast( const Value& input0, const at::ScalarType& dtype, - const std::optional& stype = c10::nullopt) const = 0; + const std::optional& stype = std::nullopt) const = 0; virtual NodePtr MakeTensorList(const OpList& inputs) const = 0; virtual NodePtr MakeGeneric( const OpKind& op, @@ -96,7 +96,7 @@ static inline NodePtr MakeExpand( static inline NodePtr MakeCast( const Value& input0, const at::ScalarType& dtype, - const std::optional& stype = c10::nullopt) { + const std::optional& stype = std::nullopt) { return getIrBuilder()->MakeCast(input0, dtype, stype); } static inline NodePtr MakeTensorList(const OpList& inputs) { diff --git a/torch/csrc/lazy/core/ir_dump_util.cpp b/torch/csrc/lazy/core/ir_dump_util.cpp index a4fb11761a67..d81d810a54e9 100644 --- a/torch/csrc/lazy/core/ir_dump_util.cpp +++ b/torch/csrc/lazy/core/ir_dump_util.cpp @@ -1,10 +1,10 @@ #include -#include #include #include #include #include +#include #include #include @@ -37,7 +37,7 @@ std::optional ParseAttrTag( // @lint-ignore-every CLANGTIDY facebook-hte-StdRegexIsAwful if (!std::regex_search( node_string.begin() + pos, node_string.end(), match, tag_regex)) { - return c10::nullopt; + return std::nullopt; } std::string::size_type vpos = match[1].second - node_string.begin() + 1; @@ -102,7 +102,7 @@ std::optional GetRootNodeId( const std::unordered_map& roots_ids) { auto it = roots_ids.find(node); if (it == roots_ids.end()) { - return c10::nullopt; + return std::nullopt; } return it->second; } diff --git a/torch/csrc/lazy/core/lazy_graph_executor.cpp b/torch/csrc/lazy/core/lazy_graph_executor.cpp index 569cd5ee5e0a..b01b5ead3434 100644 --- a/torch/csrc/lazy/core/lazy_graph_executor.cpp +++ b/torch/csrc/lazy/core/lazy_graph_executor.cpp @@ -695,7 +695,7 @@ std::vector LazyGraphExecutor::SetTensorData( // resets the ir_value. We have already done the resetting as part // of ExtractIRAndPrepareTensorData to overlap with previous execution. tensor->data()->handle = handle; - tensor->data()->tensor_data = c10::nullopt; + tensor->data()->tensor_data = std::nullopt; } tensors_data.emplace_back(std::move(handle)); } diff --git a/torch/csrc/lazy/core/shape.cpp b/torch/csrc/lazy/core/shape.cpp index 939e2745ed39..bf49cfacb99f 100644 --- a/torch/csrc/lazy/core/shape.cpp +++ b/torch/csrc/lazy/core/shape.cpp @@ -78,7 +78,7 @@ static c10::SymbolicShape get_symbolic_shape(at::Tensor& tensor) { std::vector> symbolic_dims; for (size_t i = 0; i < sizes.size(); i++) { if (is_symbolic->at(i)) { - symbolic_dims.emplace_back(c10::nullopt); + symbolic_dims.emplace_back(std::nullopt); } else { symbolic_dims.emplace_back(sizes.at(i)); } @@ -114,7 +114,7 @@ void applySymbolicShapesOnLT( auto res_symbolic = jit::calculateSymbolicShapesOnOp(&schema, converted_args); if (!res_symbolic) { for (auto& result_shape : result_shapes) { - result_shape = result_shape.with_symbolic_dims(c10::nullopt); + result_shape = result_shape.with_symbolic_dims(std::nullopt); } } else { TORCH_INTERNAL_ASSERT( diff --git a/torch/csrc/lazy/core/shape.h b/torch/csrc/lazy/core/shape.h index 63566619fd14..99e4a892bc58 100644 --- a/torch/csrc/lazy/core/shape.h +++ b/torch/csrc/lazy/core/shape.h @@ -19,7 +19,7 @@ class TORCH_API Shape { Shape( at::ScalarType scalar_type, c10::ArrayRef sizes, - std::optional> is_symbolic = c10::nullopt); + std::optional> is_symbolic = std::nullopt); std::string to_string() const; @@ -64,7 +64,7 @@ class TORCH_API Shape { // Stores which dimmensions are symbolic // If nullopt, either it hasn't been initialized or the symbolic // dimmensions are not calculatable - std::optional> is_symbolic_ = c10::nullopt; + std::optional> is_symbolic_ = std::nullopt; }; TORCH_API std::ostream& operator<<(std::ostream& out, const Shape& shape); diff --git a/torch/csrc/lazy/core/shape_inference.h b/torch/csrc/lazy/core/shape_inference.h index 77eeaaa56318..76ddea597a78 100644 --- a/torch/csrc/lazy/core/shape_inference.h +++ b/torch/csrc/lazy/core/shape_inference.h @@ -6,11 +6,11 @@ #include #include #include -#include #include #include #include #include +#include #include namespace torch { diff --git a/torch/csrc/lazy/core/tensor.cpp b/torch/csrc/lazy/core/tensor.cpp index ba0571f87df4..972af7dafc8b 100644 --- a/torch/csrc/lazy/core/tensor.cpp +++ b/torch/csrc/lazy/core/tensor.cpp @@ -143,13 +143,13 @@ void LazyTensor::SetDataHandle(BackendDataPtr handle, bool sync) { // trimming. AssignIrValue(Value()); if (sync) { - data()->tensor_data = c10::nullopt; + data()->tensor_data = std::nullopt; } } void LazyTensor::SetIrValue(Value ir_value) { data()->handle = nullptr; - data()->tensor_data = c10::nullopt; + data()->tensor_data = std::nullopt; AssignIrValue(std::move(ir_value)); TryLimitGraphSize(); } @@ -158,7 +158,7 @@ void LazyTensor::SetInPlaceIrValue(Value ir_value) { auto tensor_shape = shape(); if (tensor_shape.Get().scalar_type() != ir_value.shape().scalar_type()) { ir_value = - MakeCast(ir_value, tensor_shape.Get().scalar_type(), c10::nullopt); + MakeCast(ir_value, tensor_shape.Get().scalar_type(), std::nullopt); } SetIrValue(std::move(ir_value)); } @@ -253,7 +253,7 @@ at::Tensor LazyTensor::ToTensor(bool detached) { if (data()->ir_value || data()->handle != nullptr) { // If we have other authoritive sources, just drop our reference and // transfer it to the caller. - data()->tensor_data = c10::nullopt; + data()->tensor_data = std::nullopt; } else { // Otherwise we need to make a copy to prevent the caller changing our // version. diff --git a/torch/csrc/lazy/core/unique.h b/torch/csrc/lazy/core/unique.h index fc09c8d71d7d..3088da160860 100644 --- a/torch/csrc/lazy/core/unique.h +++ b/torch/csrc/lazy/core/unique.h @@ -5,7 +5,7 @@ #pragma once -#include +#include #include #include diff --git a/torch/csrc/lazy/core/util.h b/torch/csrc/lazy/core/util.h index e535e5365f22..bfd68b73355d 100644 --- a/torch/csrc/lazy/core/util.h +++ b/torch/csrc/lazy/core/util.h @@ -9,8 +9,8 @@ #include #include -#include #include +#include namespace torch { namespace lazy { @@ -114,7 +114,7 @@ std::optional> ToOptionalVector( if (arrayRef) { return arrayRef->vec(); } - return c10::nullopt; + return std::nullopt; } template diff --git a/torch/csrc/lazy/python/python_util.cpp b/torch/csrc/lazy/python/python_util.cpp index 90d9797e3fd3..1ae663c519f5 100644 --- a/torch/csrc/lazy/python/python_util.cpp +++ b/torch/csrc/lazy/python/python_util.cpp @@ -13,12 +13,12 @@ namespace lazy { std::optional GetPythonFrameTop() { if (!Py_IsInitialized()) { - return c10::nullopt; + return std::nullopt; } pybind11::gil_scoped_acquire gil; PyFrameObject* frame = PyEval_GetFrame(); if (frame == nullptr) { - return c10::nullopt; + return std::nullopt; } SourceLocation loc; auto code = THPCodeObjectPtr(PyFrame_GetCode(frame)); diff --git a/torch/csrc/lazy/python/python_util.h b/torch/csrc/lazy/python/python_util.h index 456aafa88097..271c694ee35d 100644 --- a/torch/csrc/lazy/python/python_util.h +++ b/torch/csrc/lazy/python/python_util.h @@ -1,7 +1,7 @@ #pragma once -#include #include #include +#include #include namespace torch { diff --git a/torch/csrc/lazy/ts_backend/ir_builder.h b/torch/csrc/lazy/ts_backend/ir_builder.h index c53829237443..9fff33135a5c 100644 --- a/torch/csrc/lazy/ts_backend/ir_builder.h +++ b/torch/csrc/lazy/ts_backend/ir_builder.h @@ -34,7 +34,7 @@ struct TorchScriptIrBuilder : IrBuilder { const Value& input0, const at::ScalarType& dtype, const std::optional& stype = - c10::nullopt) const override { + std::nullopt) const override { return ReuseOrMakeNode(input0, dtype, stype); } NodePtr MakeTensorList(const OpList& inputs) const override { diff --git a/torch/csrc/lazy/ts_backend/ts_eager_fallback.cpp b/torch/csrc/lazy/ts_backend/ts_eager_fallback.cpp index 42acc2c5df10..a00ec260e5a1 100644 --- a/torch/csrc/lazy/ts_backend/ts_eager_fallback.cpp +++ b/torch/csrc/lazy/ts_backend/ts_eager_fallback.cpp @@ -137,7 +137,7 @@ std::optional compute_target_device( } } } - return c10::nullopt; + return std::nullopt; } } // namespace diff --git a/torch/csrc/lazy/ts_backend/ts_native_functions.cpp b/torch/csrc/lazy/ts_backend/ts_native_functions.cpp index 78ae6a6f6e2e..55d0b7f5a465 100644 --- a/torch/csrc/lazy/ts_backend/ts_native_functions.cpp +++ b/torch/csrc/lazy/ts_backend/ts_native_functions.cpp @@ -39,10 +39,10 @@ at::Tensor CreateLtcTensor( std::optional GetLtcDevice( const std::optional& device) { if (!device) { - return c10::nullopt; + return std::nullopt; } if (device->type() != at::kLazy) { - return c10::nullopt; + return std::nullopt; } return torch::lazy::atenDeviceToBackendDevice(*device); } @@ -235,7 +235,7 @@ at::Tensor LazyNativeFunctions::_to_copy( // captured IR, or we will try to convert an eager tensor back to a lazy one // inside the torchscript executor lazy:0 -> lazy:1 is handled in case3, so // we can safely drop the device argument - device = c10::nullopt; + device = std::nullopt; torch::lazy::NodePtr node = torch::lazy::ReuseNode( lazy_self->GetIrValue(), @@ -307,7 +307,7 @@ at::Tensor LazyNativeFunctions::empty_strided_symint( std::optional pin_memory) { TORCH_LAZY_FN_COUNTER("lazy::"); at::Tensor t = - empty_symint(sym_size, dtype, layout, device, pin_memory, c10::nullopt); + empty_symint(sym_size, dtype, layout, device, pin_memory, std::nullopt); auto size = C10_AS_INTARRAYREF_SLOW(sym_size); auto stride = C10_AS_INTARRAYREF_SLOW(sym_stride); return t.as_strided(size, stride, /*storage_offset=*/0); diff --git a/torch/csrc/profiler/collection.cpp b/torch/csrc/profiler/collection.cpp index e5daea953c57..687e8bf28787 100644 --- a/torch/csrc/profiler/collection.cpp +++ b/torch/csrc/profiler/collection.cpp @@ -200,7 +200,7 @@ auto InputOutputEncoder::getIValueGenerator(const IOType& io_type) { if (io_type == tagToIOType(tag)) { out.emplace_back(std::move(input)); } else { - out.emplace_back(c10::nullopt); + out.emplace_back(std::nullopt); } }; @@ -223,7 +223,7 @@ auto InputOutputEncoder::getIValueGenerator(const IOType& io_type) { arg.emplace_back(decode_tensor()); } if (found_undefined) { - push_value(*tag_it, c10::nullopt); + push_value(*tag_it, std::nullopt); } else { push_value(Tag::TensorListBegin, std::move(arg)); } @@ -236,7 +236,7 @@ auto InputOutputEncoder::getIValueGenerator(const IOType& io_type) { case Tag::UndefinedTensor: case Tag::Other: - push_value(*tag_it, c10::nullopt); + push_value(*tag_it, std::nullopt); break; case Tag::TERMINATOR: diff --git a/torch/csrc/profiler/collection.h b/torch/csrc/profiler/collection.h index 1c0a780370a9..71cb0c02bccc 100644 --- a/torch/csrc/profiler/collection.h +++ b/torch/csrc/profiler/collection.h @@ -91,7 +91,7 @@ using op_input_t = std::variant< TensorMetadata, std::vector, c10::IValue, - c10::nullopt_t>; + std::nullopt_t>; // ============================================================================ // == ExtraFields ============================================================= diff --git a/torch/csrc/profiler/python/init.cpp b/torch/csrc/profiler/python/init.cpp index e5cae40c84e3..25f93a2663df 100644 --- a/torch/csrc/profiler/python/init.cpp +++ b/torch/csrc/profiler/python/init.cpp @@ -458,7 +458,7 @@ void initPythonBindings(PyObject* module) { [&](const c10::IValue& v) { out.append(torch::jit::toPyObject(v)); }, - [&](const c10::nullopt_t&) { out.append(py::none()); }, + [&](const std::nullopt_t&) { out.append(py::none()); }, [&](const auto& v) { out.append(py::cast(v)); }), input); } diff --git a/torch/csrc/profiler/unwind/unwind.cpp b/torch/csrc/profiler/unwind/unwind.cpp index 74d7877edadf..8a3c4487ab77 100644 --- a/torch/csrc/profiler/unwind/unwind.cpp +++ b/torch/csrc/profiler/unwind/unwind.cpp @@ -290,12 +290,12 @@ std::vector unwind() { std::optional> libraryFor(void* addr) { if (!addr) { - return c10::nullopt; + return std::nullopt; } std::shared_lock lock(cache_mutex_); const LibraryInfo* library_info = unwind_cache.findLibraryFor((uint64_t)addr); if (!library_info) { - return c10::nullopt; + return std::nullopt; } return std::make_pair( library_info->name(), (uint64_t)addr - library_info->load_bias()); diff --git a/torch/csrc/profiler/unwind/unwind.h b/torch/csrc/profiler/unwind/unwind.h index 1c302dfca445..bf93b88fa63d 100644 --- a/torch/csrc/profiler/unwind/unwind.h +++ b/torch/csrc/profiler/unwind/unwind.h @@ -1,7 +1,7 @@ #pragma once #include -#include #include +#include #include #include diff --git a/torch/csrc/profiler/unwind/unwind_error.h b/torch/csrc/profiler/unwind/unwind_error.h index ae3630057f6a..cca8f8d12187 100644 --- a/torch/csrc/profiler/unwind/unwind_error.h +++ b/torch/csrc/profiler/unwind/unwind_error.h @@ -1,6 +1,6 @@ #pragma once -#include #include +#include #include namespace torch::unwind { diff --git a/torch/csrc/profiler/util.h b/torch/csrc/profiler/util.h index b06a479e70cc..1a607909c452 100644 --- a/torch/csrc/profiler/util.h +++ b/torch/csrc/profiler/util.h @@ -9,10 +9,10 @@ #include #include -#include #include #include #include +#include // TODO: replace with pytorch/rfcs#43 when it is ready. #define SOFT_ASSERT(cond, ...) \ diff --git a/torch/csrc/tensor/python_tensor.cpp b/torch/csrc/tensor/python_tensor.cpp index 8d18180ed919..6960034626d5 100644 --- a/torch/csrc/tensor/python_tensor.cpp +++ b/torch/csrc/tensor/python_tensor.cpp @@ -449,7 +449,7 @@ void py_set_default_dtype(PyObject* obj) { THPDtype_Check(obj), "invalid dtype object: only floating-point types are supported as the default type"); auto scalar_type = ((THPDtype*)obj)->scalar_type; - set_default_tensor_type(/*backend=*/c10::nullopt, scalar_type); + set_default_tensor_type(/*backend=*/std::nullopt, scalar_type); } c10::DispatchKey get_default_dispatch_key() { diff --git a/torch/csrc/utils/nested.cpp b/torch/csrc/utils/nested.cpp index 29ccf312851e..360abda078df 100644 --- a/torch/csrc/utils/nested.cpp +++ b/torch/csrc/utils/nested.cpp @@ -82,7 +82,7 @@ at::Tensor nested_tensor_ctor( final_device = new_list[0].device(); } auto out = at::_nested_tensor_from_tensor_list( - new_list, final_dtype, c10::nullopt, final_device, pin_memory); + new_list, final_dtype, std::nullopt, final_device, pin_memory); out.requires_grad_(args_requires_grad); return out; } diff --git a/torch/csrc/utils/python_arg_parser.cpp b/torch/csrc/utils/python_arg_parser.cpp index 9aa80427929d..a1a1638f9120 100644 --- a/torch/csrc/utils/python_arg_parser.cpp +++ b/torch/csrc/utils/python_arg_parser.cpp @@ -268,7 +268,7 @@ static py::object dispatch_on_subclass( bool is_torch_function, const char* torch_function_name_str, std::optional maybe_mode_key = - c10::nullopt) { + std::nullopt) { py::object ret; for (auto& arg : overloaded_args) { py::object torch_function = @@ -1005,11 +1005,11 @@ std::string FunctionParameter::type_name() const { static inline std::optional parse_as_integer(const std::string& s) { if (s.empty()) - return c10::nullopt; + return std::nullopt; char* str_end = nullptr; long ans = strtol(s.c_str(), &str_end, 0); // *str_end == 0 if the entire string was parsed as an integer. - return (*str_end == 0) ? std::optional(ans) : c10::nullopt; + return (*str_end == 0) ? std::optional(ans) : std::nullopt; } /* diff --git a/torch/csrc/utils/python_arg_parser.h b/torch/csrc/utils/python_arg_parser.h index 8966131f9825..85a4d52bc16d 100644 --- a/torch/csrc/utils/python_arg_parser.h +++ b/torch/csrc/utils/python_arg_parser.h @@ -399,7 +399,7 @@ inline std::optional PythonArgs::optionalTensor(int i) { if (t.defined()) { return t; } else { - return c10::nullopt; + return std::nullopt; } } @@ -435,7 +435,7 @@ inline at::Scalar PythonArgs::scalarWithDefault( inline std::optional PythonArgs::scalarOptional(int i) { if (!args[i]) - return c10::nullopt; + return std::nullopt; return scalar_slow(i); } @@ -771,7 +771,7 @@ inline at::ScalarType PythonArgs::scalartype(int i) { inline std::optional PythonArgs::scalartypeOptional(int i) { if (!args[i]) - return c10::nullopt; + return std::nullopt; return scalartype(i); } @@ -796,7 +796,7 @@ inline at::Layout PythonArgs::layoutWithDefault( inline std::optional PythonArgs::layoutOptional(int i) { if (!args[i]) - return c10::nullopt; + return std::nullopt; return layout(i); } @@ -837,7 +837,7 @@ inline at::Device PythonArgs::deviceWithDefault( inline std::optional PythonArgs::deviceOptional(int i) { if (!args[i]) - return c10::nullopt; + return std::nullopt; return device(i); } @@ -863,7 +863,7 @@ inline std::vector parseDimnameList(PyObject* arg) { inline std::optional> PythonArgs:: toDimnameListOptional(int i) { if (!args[i]) - return c10::nullopt; + return std::nullopt; return parseDimnameList(args[i]); } @@ -890,7 +890,7 @@ inline at::MemoryFormat PythonArgs::memoryformat(int i) { inline std::optional PythonArgs::memoryformatOptional(int i) { if (!args[i]) - return c10::nullopt; + return std::nullopt; return memoryformat(i); } @@ -918,7 +918,7 @@ inline std::string PythonArgs::stringWithDefault( inline std::optional PythonArgs::stringOptional(int i) { if (!args[i]) - return c10::nullopt; + return std::nullopt; return THPUtils_unpackString(args[i]); } @@ -936,7 +936,7 @@ inline c10::string_view PythonArgs::stringViewWithDefault( inline std::optional PythonArgs::stringViewOptional(int i) { if (!args[i]) - return c10::nullopt; + return std::nullopt; return THPUtils_unpackStringView(args[i]); } @@ -990,26 +990,26 @@ inline int64_t PythonArgs::toInt64WithDefault(int i, int64_t default_int) { inline std::optional PythonArgs::toInt64Optional(int i) { if (!args[i]) - return c10::nullopt; + return std::nullopt; return toInt64(i); } inline std::optional PythonArgs::toSymIntOptional(int i) { if (!args[i]) - return c10::nullopt; + return std::nullopt; return toSymInt(i); } inline std::optional PythonArgs::toBoolOptional(int i) { if (!args[i]) { - return c10::nullopt; + return std::nullopt; } return toBool(i); } inline std::optional PythonArgs::toDoubleOptional(int i) { if (!args[i]) { - return c10::nullopt; + return std::nullopt; } return toDouble(i); } @@ -1071,7 +1071,7 @@ inline bool PythonArgs::isNone(int i) { inline std::optional PythonArgs::generator(int i) { if (!args[i]) - return c10::nullopt; + return std::nullopt; return reinterpret_cast(args[i])->cdata; } diff --git a/torch/csrc/utils/python_dispatch.cpp b/torch/csrc/utils/python_dispatch.cpp index ec0af99842d2..2d18978018a1 100644 --- a/torch/csrc/utils/python_dispatch.cpp +++ b/torch/csrc/utils/python_dispatch.cpp @@ -65,8 +65,8 @@ static c10::AliasAnalysisKind parseAliasAnalysisKind(const std::string& k) { template inline torch::CppFunction dispatch_str(const char* key, Func&& raw_f) { auto mb_key = std::string(key).empty() - ? c10::nullopt - : c10::make_optional(c10::parseDispatchKey(key)); + ? std::nullopt + : std::make_optional(c10::parseDispatchKey(key)); if (mb_key) { return torch::dispatch(*mb_key, std::forward(raw_f)); } else { @@ -217,7 +217,7 @@ static py::object ophandle_call_boxed( handle.schema(), std::move(args), kwargs, - /*self=*/c10::nullopt); + /*self=*/std::nullopt); { pybind11::gil_scoped_release no_gil_guard; handle.callBoxed(stack); @@ -264,7 +264,7 @@ void initDispatchBindings(PyObject* module) { handle.schema(), std::move(args), kwargs, - /*self=*/c10::nullopt); + /*self=*/std::nullopt); { pybind11::gil_scoped_release no_gil_guard; handle.redispatchBoxed(keyset, &stack); @@ -477,8 +477,8 @@ void initDispatchBindings(PyObject* module) { parseKind(kind), std::move(name), std::string(dispatch).empty() - ? c10::nullopt - : c10::make_optional(c10::parseDispatchKey(dispatch)), + ? std::nullopt + : std::make_optional(c10::parseDispatchKey(dispatch)), "/dev/null", // temporary workaround linenum); END_HANDLE_TH_ERRORS_PYBIND @@ -814,8 +814,8 @@ void initDispatchBindings(PyObject* module) { "_dispatch_print_registrations_for_dispatch_key", [](const char* dispatch_key = "") { auto k = std::string(dispatch_key).empty() - ? c10::nullopt - : c10::make_optional(c10::parseDispatchKey(dispatch_key)); + ? std::nullopt + : std::make_optional(c10::parseDispatchKey(dispatch_key)); auto op_names = c10::Dispatcher::singleton().getRegistrationsForDispatchKey(k); for (auto& op : op_names) { @@ -830,7 +830,7 @@ void initDispatchBindings(PyObject* module) { try { return c10::parseDispatchKey(dispatch_key); } catch (const c10::Error& err) { - return c10::nullopt; + return std::nullopt; } }); @@ -838,8 +838,8 @@ void initDispatchBindings(PyObject* module) { "_dispatch_get_registrations_for_dispatch_key", [](const char* dispatch_key = "") { auto k = std::string(dispatch_key).empty() - ? c10::nullopt - : c10::make_optional(c10::parseDispatchKey(dispatch_key)); + ? std::nullopt + : std::make_optional(c10::parseDispatchKey(dispatch_key)); auto op_names = c10::Dispatcher::singleton().getRegistrationsForDispatchKey(k); std::vector names; @@ -888,7 +888,7 @@ void initDispatchBindings(PyObject* module) { "Expected device_type string to not have a device index; got ", device_type); return c10::toString( - c10::computeDispatchKey(c10::nullopt, c10::nullopt, device)); + c10::computeDispatchKey(std::nullopt, std::nullopt, device)); }); m.def("_are_functorch_transforms_active", []() { diff --git a/torch/csrc/utils/python_raii.h b/torch/csrc/utils/python_raii.h index bc7b5c263e0d..af63d1efba54 100644 --- a/torch/csrc/utils/python_raii.h +++ b/torch/csrc/utils/python_raii.h @@ -1,5 +1,5 @@ -#include #include +#include #include namespace torch::impl { @@ -17,7 +17,7 @@ struct RAIIContextManager { } void exit() { - guard_ = c10::nullopt; + guard_ = std::nullopt; } private: @@ -50,7 +50,7 @@ struct DeprecatedRAIIContextManager { void enter() {} void exit() { - guard_ = c10::nullopt; + guard_ = std::nullopt; } private: diff --git a/torch/csrc/utils/python_symnode.h b/torch/csrc/utils/python_symnode.h index 15738b1a67e1..e82c30a8c98f 100644 --- a/torch/csrc/utils/python_symnode.h +++ b/torch/csrc/utils/python_symnode.h @@ -144,7 +144,7 @@ class PythonSymNodeImpl : public c10::SymNodeImpl { py::gil_scoped_acquire acquire; const auto& r = getPyObj().attr("maybe_as_int")(); if (r.is_none()) { - return c10::nullopt; + return std::nullopt; } else { return r.cast(); } diff --git a/torch/csrc/utils/schema_info.cpp b/torch/csrc/utils/schema_info.cpp index 0caa5b254d27..61eecc7cf007 100644 --- a/torch/csrc/utils/schema_info.cpp +++ b/torch/csrc/utils/schema_info.cpp @@ -8,7 +8,7 @@ void SchemaInfo::addArgumentValue( const at::IValue& value) { std::optional index = schema_.argumentIndexWithName(name); TORCH_INTERNAL_ASSERT( - index != c10::nullopt, "Schema has no argument named ", name); + index != std::nullopt, "Schema has no argument named ", name); value_map_[name] = value; alias_maps_current_ = false; } @@ -102,7 +102,7 @@ bool SchemaInfo::is_mutable(const c10::SchemaArgument& argument) { } bool SchemaInfo::has_argument(c10::string_view name) { - return schema_.argumentIndexWithName(name) != c10::nullopt; + return schema_.argumentIndexWithName(name) != std::nullopt; } bool SchemaInfo::is_mutable(c10::string_view name) { diff --git a/torch/csrc/utils/tensor_new.cpp b/torch/csrc/utils/tensor_new.cpp index 4fd398d1a8fa..e66c99bc4d49 100644 --- a/torch/csrc/utils/tensor_new.cpp +++ b/torch/csrc/utils/tensor_new.cpp @@ -28,8 +28,8 @@ #include #include #include -#include #include +#include #include #include @@ -53,7 +53,7 @@ thread_local bool kOnlyLiftCPUTensors = false; TensorOptions build_options( c10::TensorOptions options, at::ScalarType scalar_type, - const std::optional& device = c10::nullopt) { + const std::optional& device = std::nullopt) { options = options.dtype(scalar_type); if (device.has_value()) { return options.device(device); @@ -1257,7 +1257,7 @@ void _validate_sparse_coo_tensor_args( Tensor values = internal_new_from_data( options, scalar_type, - c10::nullopt, + std::nullopt, r.pyobject(1), /*copy_variables=*/false, /*copy_numpy=*/true, @@ -1266,7 +1266,7 @@ void _validate_sparse_coo_tensor_args( Tensor indices = internal_new_from_data( values.options(), kLong, - c10::nullopt, + std::nullopt, r.pyobject(0), /*copy_variables=*/false, /*copy_numpy=*/true, @@ -1298,7 +1298,7 @@ void _validate_sparse_compressed_tensor_args( Tensor values = internal_new_from_data( options, scalar_type, - c10::nullopt, + std::nullopt, r.pyobject(ARG_VALUES), /*copy_variables=*/false, /*copy_numpy=*/true, @@ -1307,7 +1307,7 @@ void _validate_sparse_compressed_tensor_args( Tensor compressed_indices = internal_new_from_data( values.options(), kInt, - c10::nullopt, + std::nullopt, r.pyobject(ARG_COMPRESSED_INDICES), /*copy_variables=*/false, /*copy_numpy=*/true, @@ -1315,7 +1315,7 @@ void _validate_sparse_compressed_tensor_args( Tensor plain_indices = internal_new_from_data( values.options(), kInt, - c10::nullopt, + std::nullopt, r.pyobject(ARG_PLAIN_INDICES), /*copy_variables=*/false, /*copy_numpy=*/true, @@ -1369,7 +1369,7 @@ void _validate_sparse_compressed_tensor_args_template( Tensor values = internal_new_from_data( options, scalar_type, - c10::nullopt, + std::nullopt, r.pyobject(ARG_VALUES), /*copy_variables=*/false, /*copy_numpy=*/true, @@ -1378,7 +1378,7 @@ void _validate_sparse_compressed_tensor_args_template( Tensor compressed_indices = internal_new_from_data( values.options(), kInt, - c10::nullopt, + std::nullopt, r.pyobject(ARG_COMPRESSED_INDICES), /*copy_variables=*/false, /*copy_numpy=*/true, @@ -1386,7 +1386,7 @@ void _validate_sparse_compressed_tensor_args_template( Tensor plain_indices = internal_new_from_data( values.options(), kInt, - c10::nullopt, + std::nullopt, r.pyobject(ARG_PLAIN_INDICES), /*copy_variables=*/false, /*copy_numpy=*/true, diff --git a/torch/csrc/utils/torch_dispatch_mode.h b/torch/csrc/utils/torch_dispatch_mode.h index 8ca451143573..2eb8ba7a1cbb 100644 --- a/torch/csrc/utils/torch_dispatch_mode.h +++ b/torch/csrc/utils/torch_dispatch_mode.h @@ -19,7 +19,7 @@ struct StashTorchDispatchModeGuard { } ~StashTorchDispatchModeGuard() { - if (saved_mode_key_ != c10::nullopt) { + if (saved_mode_key_ != std::nullopt) { c10::impl::TorchDispatchModeTLS::set_mode( saved_mode_, saved_mode_key_.value()); } else { diff --git a/torch/custom_class_detail.h b/torch/custom_class_detail.h index e27721c34986..135c49ac76a9 100644 --- a/torch/custom_class_detail.h +++ b/torch/custom_class_detail.h @@ -47,7 +47,7 @@ struct arg { // Explicit constructor. explicit arg(std::string name) - : name_(std::move(name)), value_(c10::nullopt) {} + : name_(std::move(name)), value_(std::nullopt) {} // Assignment operator. This enables the pybind-like syntax of // torch::arg("name") = value. arg& operator=(const c10::IValue& rhs) { diff --git a/torch/library.h b/torch/library.h index c860f4c20344..d75e6b019821 100644 --- a/torch/library.h +++ b/torch/library.h @@ -215,7 +215,7 @@ class TORCH_API CppFunction final { static CppFunction makeFromBoxedKernel(c10::BoxedKernel kernel) { return CppFunction( c10::KernelFunction::makeFromBoxedKernel(std::move(kernel)), - /* cpp_signature */ c10::nullopt, // not known for boxed functions + /* cpp_signature */ std::nullopt, // not known for boxed functions /* schema */ nullptr); } @@ -337,7 +337,7 @@ template inline CppFunction dispatch(c10::DispatchKey k, Func&& raw_f) { CppFunction f(std::forward(raw_f)); if (k == c10::DispatchKey::CatchAll) { - f.dispatch_key_ = c10::nullopt; + f.dispatch_key_ = std::nullopt; } else { f.dispatch_key_ = k; } @@ -930,7 +930,7 @@ class TorchLibraryInit final { torch::Library::DEF, \ &TORCH_LIBRARY_init_##ns, \ #ns, \ - c10::nullopt, \ + std::nullopt, \ __FILE__, \ __LINE__); \ void TORCH_LIBRARY_init_##ns(torch::Library& m) @@ -960,7 +960,7 @@ class TorchLibraryInit final { torch::Library::FRAGMENT, \ &C10_CONCATENATE(TORCH_LIBRARY_FRAGMENT_init_##ns##_, uid), \ #ns, \ - c10::nullopt, \ + std::nullopt, \ __FILE__, \ __LINE__); \ void C10_CONCATENATE( \ @@ -1024,7 +1024,7 @@ class TorchLibraryInit final { ? &C10_CONCATENATE(TORCH_LIBRARY_IMPL_init_##ns##_##k##_, uid) \ : [](torch::Library&) -> void {}), \ #ns, \ - c10::make_optional(c10::DispatchKey::k), \ + std::make_optional(c10::DispatchKey::k), \ __FILE__, \ __LINE__); \ void C10_CONCATENATE( \ @@ -1039,13 +1039,13 @@ class TorchLibraryInit final { /// \private #define MAKE_TORCH_LIBRARY(ns) \ - torch::Library(torch::Library::DEF, #ns, c10::nullopt, __FILE__, __LINE__) + torch::Library(torch::Library::DEF, #ns, std::nullopt, __FILE__, __LINE__) /// \private #define MAKE_TORCH_LIBRARY_IMPL(ns, k) \ torch::Library( \ torch::Library::IMPL, \ #ns, \ - c10::make_optional(c10::DispatchKey::k), \ + std::make_optional(c10::DispatchKey::k), \ __FILE__, \ __LINE__)