mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[1/N] Change #include <c10/util/Optional.h> to #include <optional> (#128301)
Fixes #ISSUE_NUMBER Pull Request resolved: https://github.com/pytorch/pytorch/pull/128301 Approved by: https://github.com/ezyang
This commit is contained in:
@ -3,7 +3,7 @@
|
||||
#include <ATen/core/Generator.h>
|
||||
#include <ATen/core/MT19937RNGEngine.h>
|
||||
#include <c10/core/GeneratorImpl.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <optional>
|
||||
|
||||
namespace at {
|
||||
|
||||
|
@ -4,7 +4,7 @@
|
||||
#include <c10/core/ScalarType.h>
|
||||
#include <c10/core/SymIntArrayRef.h>
|
||||
#include <c10/util/DimVector.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <optional>
|
||||
#include <sstream>
|
||||
#include <vector>
|
||||
|
||||
|
@ -32,7 +32,7 @@ void SavedTensorDefaultHooks::disable(const std::string& message) {
|
||||
}
|
||||
|
||||
void SavedTensorDefaultHooks::enable() {
|
||||
tls.disabled_error_message = c10::nullopt;
|
||||
tls.disabled_error_message = std::nullopt;
|
||||
}
|
||||
|
||||
/* static */ bool SavedTensorDefaultHooks::set_tracing(bool is_tracing) {
|
||||
|
@ -1,8 +1,8 @@
|
||||
#pragma once
|
||||
|
||||
#include <c10/macros/Export.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <c10/util/python_stub.h>
|
||||
#include <optional>
|
||||
#include <stack>
|
||||
#include <string>
|
||||
|
||||
|
@ -5,8 +5,8 @@
|
||||
#include <ATen/core/Tensor.h>
|
||||
#include <ATen/core/TensorBody.h>
|
||||
#include <c10/core/SymInt.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <c10/util/irange.h>
|
||||
#include <optional>
|
||||
|
||||
#ifndef AT_PER_OPERATOR_HEADERS
|
||||
#include <ATen/Functions.h>
|
||||
|
@ -1,6 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <c10/util/Optional.h>
|
||||
#include <optional>
|
||||
#include <c10/util/string_view.h>
|
||||
#include <ATen/Config.h>
|
||||
#include <ATen/native/DispatchStub.h>
|
||||
|
@ -3,8 +3,8 @@
|
||||
#include <ATen/core/ivalue.h>
|
||||
#include <ATen/core/operator_name.h>
|
||||
#include <c10/macros/Export.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <c10/util/SmallVector.h>
|
||||
#include <optional>
|
||||
|
||||
#include <array>
|
||||
#include <functional>
|
||||
|
@ -3,8 +3,8 @@
|
||||
#include <c10/core/SymNodeImpl.h>
|
||||
#include <c10/macros/Export.h>
|
||||
#include <c10/util/Exception.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <variant>
|
||||
|
||||
@ -73,14 +73,14 @@ class C10_API ConstantSymNodeImpl : public SymNodeImpl {
|
||||
if constexpr (is_int_()) {
|
||||
return ::std::get<int64_t>(value_);
|
||||
} else {
|
||||
return c10::nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
std::optional<bool> constant_bool() override {
|
||||
if constexpr (is_bool_()) {
|
||||
return ::std::get<bool>(value_);
|
||||
} else {
|
||||
return c10::nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
bool is_constant() override {
|
||||
|
@ -30,7 +30,7 @@ inline ScalarType typeMetaToScalarType(caffe2::TypeMeta dtype) {
|
||||
inline optional<at::ScalarType> optTypeMetaToScalarType(
|
||||
optional<caffe2::TypeMeta> type_meta) {
|
||||
if (!type_meta.has_value()) {
|
||||
return c10::nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
return type_meta->toScalarType();
|
||||
}
|
||||
|
@ -3,9 +3,9 @@
|
||||
#include <c10/core/SymNodeImpl.h>
|
||||
#include <c10/macros/Export.h>
|
||||
#include <c10/util/Exception.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <c10/util/intrusive_ptr.h>
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
#include <ostream>
|
||||
#include <utility>
|
||||
|
||||
@ -68,7 +68,7 @@ class C10_API SymBool {
|
||||
|
||||
std::optional<bool> maybe_as_bool() const {
|
||||
if (!is_heap_allocated()) {
|
||||
return c10::make_optional(data_);
|
||||
return std::make_optional(data_);
|
||||
}
|
||||
return toSymNodeImplUnowned()->constant_bool();
|
||||
}
|
||||
|
@ -5,7 +5,7 @@
|
||||
#include <c10/macros/Export.h>
|
||||
#include <c10/macros/Macros.h>
|
||||
#include <c10/util/Exception.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <optional>
|
||||
|
||||
#include <cstdint>
|
||||
#include <iterator>
|
||||
@ -231,7 +231,7 @@ class C10_API SymInt {
|
||||
|
||||
std::optional<int64_t> maybe_as_int() const {
|
||||
if (!is_heap_allocated()) {
|
||||
return c10::make_optional(data_);
|
||||
return std::make_optional(data_);
|
||||
}
|
||||
auto* node = toSymNodeImplUnowned();
|
||||
if (auto c = node->constant_int()) {
|
||||
|
@ -3,8 +3,8 @@
|
||||
#include <c10/core/SymInt.h>
|
||||
#include <c10/util/ArrayRef.h>
|
||||
#include <c10/util/Exception.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
|
||||
namespace c10 {
|
||||
using SymIntArrayRef = ArrayRef<SymInt>;
|
||||
@ -23,7 +23,7 @@ inline std::optional<at::IntArrayRef> asIntArrayRefSlowOpt(
|
||||
c10::SymIntArrayRef ar) {
|
||||
for (const c10::SymInt& sci : ar) {
|
||||
if (sci.is_heap_allocated()) {
|
||||
return c10::nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3,9 +3,9 @@
|
||||
#include <c10/macros/Export.h>
|
||||
#include <c10/util/ArrayRef.h>
|
||||
#include <c10/util/Exception.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <c10/util/intrusive_ptr.h>
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
#include <ostream>
|
||||
#include <string>
|
||||
|
||||
@ -207,19 +207,19 @@ class C10_API SymNodeImpl : public c10::intrusive_ptr_target {
|
||||
TORCH_CHECK(false, "NYI");
|
||||
};
|
||||
virtual std::optional<int64_t> nested_int() {
|
||||
return c10::nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
virtual std::optional<int64_t> nested_int_coeff() {
|
||||
return c10::nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
virtual std::optional<int64_t> constant_int() {
|
||||
return c10::nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
virtual std::optional<bool> constant_bool() {
|
||||
return c10::nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
virtual std::optional<int64_t> maybe_as_int() {
|
||||
return c10::nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
virtual bool is_constant() {
|
||||
return false;
|
||||
|
@ -56,7 +56,7 @@ normalize_sym_sizes_strides(SymIntArrayRef sizes, SymIntArrayRef strides) {
|
||||
// Couldn't find. Tell the caller to do the normal computation
|
||||
// Alternately, if everything is hinted, we want the normal computation
|
||||
// too
|
||||
return c10::nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
// Populate the SymNode array
|
||||
std::vector<SymNode> size_nodes;
|
||||
@ -69,7 +69,7 @@ normalize_sym_sizes_strides(SymIntArrayRef sizes, SymIntArrayRef strides) {
|
||||
for (const auto& s : strides) {
|
||||
stride_nodes.emplace_back(s.wrap_node(base));
|
||||
}
|
||||
return c10::make_optional(
|
||||
return std::make_optional(
|
||||
std::tuple<SymNode, std::vector<SymNode>, std::vector<SymNode>>(
|
||||
std::move(base), std::move(size_nodes), std::move(stride_nodes)));
|
||||
}
|
||||
|
@ -8,9 +8,9 @@
|
||||
#include <c10/core/impl/PyInterpreter.h>
|
||||
#include <c10/core/impl/TorchDispatchModeTLS.h>
|
||||
#include <c10/util/Logging.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <c10/util/accumulate.h>
|
||||
#include <c10/util/irange.h>
|
||||
#include <optional>
|
||||
|
||||
#include <utility>
|
||||
|
||||
|
@ -24,12 +24,12 @@
|
||||
#include <c10/util/DimVector.h>
|
||||
#include <c10/util/Exception.h>
|
||||
#include <c10/util/Flags.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <c10/util/accumulate.h>
|
||||
#include <c10/util/intrusive_ptr.h>
|
||||
#include <c10/util/irange.h>
|
||||
#include <c10/util/safe_numerics.h>
|
||||
#include <c10/util/typeid.h>
|
||||
#include <optional>
|
||||
|
||||
#include <algorithm>
|
||||
#include <atomic>
|
||||
@ -233,8 +233,8 @@ struct C10_API ExtraMeta {
|
||||
std::unique_ptr<c10::SymbolicShapeMeta> symbolic_shape_meta_ = nullptr;
|
||||
std::unique_ptr<c10::NamedTensorMetaInterface> named_tensor_meta_ = nullptr;
|
||||
intrusive_ptr<c10::BackendMeta> backend_meta_ = nullptr;
|
||||
std::optional<std::string> custom_data_ptr_error_msg_ = c10::nullopt;
|
||||
std::optional<std::string> custom_storage_error_msg_ = c10::nullopt;
|
||||
std::optional<std::string> custom_data_ptr_error_msg_ = std::nullopt;
|
||||
std::optional<std::string> custom_storage_error_msg_ = std::nullopt;
|
||||
|
||||
ExtraMeta() = default;
|
||||
ExtraMeta(const ExtraMeta& other) {
|
||||
@ -260,8 +260,8 @@ struct C10_API ExtraMeta {
|
||||
std::unique_ptr<c10::SymbolicShapeMeta> symbolic_shape_meta,
|
||||
std::unique_ptr<c10::NamedTensorMetaInterface> named_tensor_meta,
|
||||
intrusive_ptr<c10::BackendMeta> backend_meta,
|
||||
std::optional<std::string> custom_data_ptr_error_msg = c10::nullopt,
|
||||
std::optional<std::string> custom_storage_access_error_msg = c10::nullopt)
|
||||
std::optional<std::string> custom_data_ptr_error_msg = std::nullopt,
|
||||
std::optional<std::string> custom_storage_access_error_msg = std::nullopt)
|
||||
: symbolic_shape_meta_(std::move(symbolic_shape_meta)),
|
||||
named_tensor_meta_(std::move(named_tensor_meta)),
|
||||
backend_meta_(std::move(backend_meta)),
|
||||
@ -1737,7 +1737,7 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target {
|
||||
void set_sizes_and_strides(
|
||||
c10::SymIntArrayRef sizes,
|
||||
c10::SymIntArrayRef strides,
|
||||
std::optional<c10::SymInt> storage_offset = c10::nullopt);
|
||||
std::optional<c10::SymInt> storage_offset = std::nullopt);
|
||||
// This is renamed to avoid breaking overload BC
|
||||
void generic_set_sizes_contiguous(c10::SymIntArrayRef sizes);
|
||||
void generic_set_sizes_contiguous(c10::IntArrayRef sizes) {
|
||||
@ -1834,7 +1834,7 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target {
|
||||
void set_sizes_and_strides(
|
||||
IntArrayRef new_size,
|
||||
IntArrayRef new_stride,
|
||||
std::optional<int64_t> storage_offset = c10::nullopt) {
|
||||
std::optional<int64_t> storage_offset = std::nullopt) {
|
||||
TORCH_CHECK(
|
||||
allow_tensor_metadata_change(),
|
||||
"set_sizes_and_strides ",
|
||||
|
@ -13,7 +13,7 @@
|
||||
#include <c10/macros/Export.h>
|
||||
#include <c10/macros/Macros.h>
|
||||
#include <c10/util/Exception.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <optional>
|
||||
|
||||
#include <cstdint>
|
||||
#include <iosfwd>
|
||||
@ -284,10 +284,10 @@ struct C10_API TensorOptions {
|
||||
return has_device_;
|
||||
}
|
||||
|
||||
/// Returns the device of the `TensorOptions`, or `c10::nullopt` if
|
||||
/// Returns the device of the `TensorOptions`, or `std::nullopt` if
|
||||
/// device is not specified.
|
||||
std::optional<Device> device_opt() const noexcept {
|
||||
return has_device_ ? c10::make_optional(device_) : c10::nullopt;
|
||||
return has_device_ ? std::make_optional(device_) : std::nullopt;
|
||||
}
|
||||
|
||||
/// Returns the device index of the `TensorOptions`.
|
||||
@ -305,10 +305,10 @@ struct C10_API TensorOptions {
|
||||
return has_dtype_;
|
||||
}
|
||||
|
||||
/// Returns the dtype of the `TensorOptions`, or `c10::nullopt` if
|
||||
/// Returns the dtype of the `TensorOptions`, or `std::nullopt` if
|
||||
/// device is not specified.
|
||||
std::optional<caffe2::TypeMeta> dtype_opt() const noexcept {
|
||||
return has_dtype_ ? c10::make_optional(dtype_) : c10::nullopt;
|
||||
return has_dtype_ ? std::make_optional(dtype_) : std::nullopt;
|
||||
}
|
||||
|
||||
/// Returns the layout of the `TensorOptions`.
|
||||
@ -321,10 +321,10 @@ struct C10_API TensorOptions {
|
||||
return has_layout_;
|
||||
}
|
||||
|
||||
/// Returns the layout of the `TensorOptions`, or `c10::nullopt` if
|
||||
/// Returns the layout of the `TensorOptions`, or `std::nullopt` if
|
||||
/// layout is not specified.
|
||||
std::optional<Layout> layout_opt() const noexcept {
|
||||
return has_layout_ ? c10::make_optional(layout_) : c10::nullopt;
|
||||
return has_layout_ ? std::make_optional(layout_) : std::nullopt;
|
||||
}
|
||||
|
||||
/// Returns the `requires_grad` property of the `TensorOptions`.
|
||||
@ -338,10 +338,10 @@ struct C10_API TensorOptions {
|
||||
}
|
||||
|
||||
/// Returns the `requires_grad` property of the `TensorOptions`, or
|
||||
/// `c10::nullopt` if `requires_grad` is not specified.
|
||||
/// `std::nullopt` if `requires_grad` is not specified.
|
||||
std::optional<bool> requires_grad_opt() const noexcept {
|
||||
return has_requires_grad_ ? c10::make_optional(requires_grad_)
|
||||
: c10::nullopt;
|
||||
return has_requires_grad_ ? std::make_optional(requires_grad_)
|
||||
: std::nullopt;
|
||||
}
|
||||
|
||||
/// Returns the `pinned_memory` property of the `TensorOptions`.
|
||||
@ -378,10 +378,10 @@ struct C10_API TensorOptions {
|
||||
}
|
||||
|
||||
/// Returns the `pinned_memory` property of the `TensorOptions`, or
|
||||
/// `c10::nullopt` if `pinned_memory` is not specified.
|
||||
/// `std::nullopt` if `pinned_memory` is not specified.
|
||||
std::optional<bool> pinned_memory_opt() const noexcept {
|
||||
return has_pinned_memory_ ? c10::make_optional(pinned_memory_)
|
||||
: c10::nullopt;
|
||||
return has_pinned_memory_ ? std::make_optional(pinned_memory_)
|
||||
: std::nullopt;
|
||||
}
|
||||
|
||||
/// Returns whether the `memory_layout` is specified
|
||||
@ -393,10 +393,10 @@ struct C10_API TensorOptions {
|
||||
// behavior of memory_format varies from function to function.
|
||||
|
||||
/// Returns the `memory_layout` property of `TensorOptions, or
|
||||
/// `c10::nullopt` if `memory_format` is not specified.
|
||||
/// `std::nullopt` if `memory_format` is not specified.
|
||||
std::optional<MemoryFormat> memory_format_opt() const noexcept {
|
||||
return has_memory_format_ ? c10::make_optional(memory_format_)
|
||||
: c10::nullopt;
|
||||
return has_memory_format_ ? std::make_optional(memory_format_)
|
||||
: std::nullopt;
|
||||
}
|
||||
|
||||
// Resolves the ATen backend specified by the current construction axes.
|
||||
|
@ -5,7 +5,7 @@ namespace c10 {
|
||||
|
||||
// should this use the globalContext? Can it get a context passed in somehow?
|
||||
UndefinedTensorImpl::UndefinedTensorImpl()
|
||||
: TensorImpl(DispatchKey::Undefined, caffe2::TypeMeta(), c10::nullopt) {
|
||||
: TensorImpl(DispatchKey::Undefined, caffe2::TypeMeta(), std::nullopt) {
|
||||
set_storage_access_should_throw();
|
||||
// TODO: accessing the sizes on an undefined tensor is not meaningful
|
||||
// and should error too, but empirically it does not!
|
||||
|
@ -404,7 +404,7 @@ class InlineOptionalDeviceGuard {
|
||||
/// Returns the device that was set immediately prior to initialization of
|
||||
/// the, guard, or nullopt if the guard is uninitialized.
|
||||
optional<Device> original_device() const {
|
||||
return guard_.has_value() ? make_optional(guard_->original_device())
|
||||
return guard_.has_value() ? std::make_optional(guard_->original_device())
|
||||
: nullopt;
|
||||
}
|
||||
|
||||
@ -412,7 +412,7 @@ class InlineOptionalDeviceGuard {
|
||||
/// either from construction, or via set_device, if the guard is initialized,
|
||||
/// or nullopt if the guard is uninitialized.
|
||||
optional<Device> current_device() const {
|
||||
return guard_.has_value() ? make_optional(guard_->current_device())
|
||||
return guard_.has_value() ? std::make_optional(guard_->current_device())
|
||||
: nullopt;
|
||||
}
|
||||
|
||||
|
@ -173,7 +173,7 @@ class InlineOptionalStreamGuard {
|
||||
/// Returns the stream that was set at the time the guard was most recently
|
||||
/// initialized, or nullopt if the guard is uninitialized.
|
||||
optional<Stream> original_stream() const {
|
||||
return guard_.has_value() ? make_optional(guard_->original_stream())
|
||||
return guard_.has_value() ? std::make_optional(guard_->original_stream())
|
||||
: nullopt;
|
||||
}
|
||||
|
||||
@ -181,7 +181,7 @@ class InlineOptionalStreamGuard {
|
||||
/// either from construction, or via reset_stream, if the guard is
|
||||
/// initialized, or nullopt if the guard is uninitialized.
|
||||
optional<Stream> current_stream() const {
|
||||
return guard_.has_value() ? make_optional(guard_->current_stream())
|
||||
return guard_.has_value() ? std::make_optional(guard_->current_stream())
|
||||
: nullopt;
|
||||
}
|
||||
|
||||
|
@ -2,8 +2,8 @@
|
||||
|
||||
#include <c10/core/impl/HermeticPyObjectTLS.h>
|
||||
#include <c10/core/impl/PyInterpreter.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <c10/util/python_stub.h>
|
||||
#include <optional>
|
||||
|
||||
#include <atomic>
|
||||
|
||||
@ -106,13 +106,13 @@ struct C10_API PyObjectSlot {
|
||||
// after we query here. The only time when we can conclude a tensor
|
||||
// is definitely uninitialized is when we have just allocated it and
|
||||
// it cannot have escaped to other threads yet
|
||||
return c10::nullopt;
|
||||
return std::nullopt;
|
||||
} else if (interpreter == self_interpreter) {
|
||||
// NB: pyobj_ could still be null!
|
||||
if (!ignore_hermetic_tls && c10::impl::HermeticPyObjectTLS::get_state()) {
|
||||
return c10::nullopt;
|
||||
return std::nullopt;
|
||||
} else {
|
||||
return c10::make_optional(_unchecked_untagged_pyobj());
|
||||
return std::make_optional(_unchecked_untagged_pyobj());
|
||||
}
|
||||
} else {
|
||||
TORCH_CHECK(
|
||||
|
@ -16,7 +16,7 @@ bool TorchDispatchModeTLS::any_modes_set(bool skip_infra_modes) {
|
||||
if (!skip_infra_modes) {
|
||||
for (const auto i : c10::irange(
|
||||
static_cast<size_t>(TorchDispatchModeKey::NUM_MODE_KEYS))) {
|
||||
if (torchDispatchModeState.infra_modes_[i] != c10::nullopt) {
|
||||
if (torchDispatchModeState.infra_modes_[i] != std::nullopt) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -48,7 +48,7 @@ const std::shared_ptr<PyObject_TorchDispatchMode> TorchDispatchModeTLS::
|
||||
if (torchDispatchModeState.infra_modes_[i].has_value()) {
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
out = std::move(torchDispatchModeState.infra_modes_[i].value());
|
||||
torchDispatchModeState.infra_modes_[i] = c10::nullopt;
|
||||
torchDispatchModeState.infra_modes_[i] = std::nullopt;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -70,7 +70,7 @@ const std::
|
||||
if (torchDispatchModeState.infra_modes_[i].has_value()) {
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
auto out_mode = torchDispatchModeState.infra_modes_[i].value();
|
||||
torchDispatchModeState.infra_modes_[i] = c10::nullopt;
|
||||
torchDispatchModeState.infra_modes_[i] = std::nullopt;
|
||||
if (!any_modes_set()) {
|
||||
c10::impl::tls_set_dispatch_key_included(DispatchKey::Python, false);
|
||||
c10::impl::tls_set_dispatch_key_included(
|
||||
@ -114,7 +114,7 @@ int64_t TorchDispatchModeTLS::stack_len() {
|
||||
int64_t infra_modes_len = 0;
|
||||
for (const auto i :
|
||||
c10::irange(static_cast<size_t>(TorchDispatchModeKey::NUM_MODE_KEYS))) {
|
||||
if (torchDispatchModeState.infra_modes_[i] != c10::nullopt) {
|
||||
if (torchDispatchModeState.infra_modes_[i] != std::nullopt) {
|
||||
infra_modes_len += 1;
|
||||
}
|
||||
}
|
||||
@ -131,7 +131,7 @@ void TorchDispatchModeTLS::set_mode(
|
||||
TorchDispatchModeKey mode_key) {
|
||||
TORCH_CHECK(
|
||||
torchDispatchModeState.infra_modes_[static_cast<size_t>(mode_key)] ==
|
||||
c10::nullopt,
|
||||
std::nullopt,
|
||||
"trying to set the current ",
|
||||
to_string(mode_key),
|
||||
", but one already exists");
|
||||
@ -149,7 +149,7 @@ const std::optional<std::shared_ptr<PyObject_TorchDispatchMode>>
|
||||
TorchDispatchModeTLS::unset_mode(TorchDispatchModeKey mode_key) {
|
||||
auto out = torchDispatchModeState.infra_modes_[static_cast<size_t>(mode_key)];
|
||||
torchDispatchModeState.infra_modes_[static_cast<size_t>(mode_key)] =
|
||||
c10::nullopt;
|
||||
std::nullopt;
|
||||
if (out.has_value() && !any_modes_set()) {
|
||||
c10::impl::tls_set_dispatch_key_included(DispatchKey::Python, false);
|
||||
c10::impl::tls_set_dispatch_key_included(
|
||||
|
@ -411,7 +411,7 @@ struct ExpandableSegment {
|
||||
return rangeFromHandles(begin, end);
|
||||
}
|
||||
while (end > handles_.size()) {
|
||||
handles_.emplace_back(c10::nullopt);
|
||||
handles_.emplace_back(std::nullopt);
|
||||
}
|
||||
for (auto i : c10::irange(begin, end)) {
|
||||
TORCH_INTERNAL_ASSERT(!handles_.at(i));
|
||||
@ -426,7 +426,7 @@ struct ExpandableSegment {
|
||||
if (status == CUDA_ERROR_OUT_OF_MEMORY) {
|
||||
for (auto j : c10::irange(begin, i)) {
|
||||
auto h = handles_.at(j).value();
|
||||
handles_.at(j) = c10::nullopt;
|
||||
handles_.at(j) = std::nullopt;
|
||||
C10_CUDA_DRIVER_CHECK(DriverAPI::get()->cuMemRelease_(h));
|
||||
}
|
||||
trimHandles();
|
||||
@ -507,7 +507,7 @@ struct ExpandableSegment {
|
||||
C10_CUDA_CHECK(cudaStreamSynchronize(stream_));
|
||||
for (auto i : c10::irange(begin, end)) {
|
||||
CUmemGenericAllocationHandle h = handles_.at(i).value();
|
||||
handles_.at(i) = c10::nullopt;
|
||||
handles_.at(i) = std::nullopt;
|
||||
C10_CUDA_DRIVER_CHECK(DriverAPI::get()->cuMemUnmap_(
|
||||
ptr_ + segment_size_ * i, segment_size_));
|
||||
C10_CUDA_DRIVER_CHECK(DriverAPI::get()->cuMemRelease_(h));
|
||||
|
@ -166,7 +166,7 @@ std::optional<DeviceIndex> getDeviceIndexWithPrimaryContext() {
|
||||
return device_index;
|
||||
}
|
||||
}
|
||||
return c10::nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
namespace _internal {
|
||||
|
@ -242,7 +242,7 @@ struct OptionalCUDAStreamGuard {
|
||||
optional<CUDAStream> original_stream() const {
|
||||
auto r = guard_.original_stream();
|
||||
if (r.has_value()) {
|
||||
return make_optional(CUDAStream(CUDAStream::UNCHECKED, r.value()));
|
||||
return std::make_optional(CUDAStream(CUDAStream::UNCHECKED, r.value()));
|
||||
} else {
|
||||
return nullopt;
|
||||
}
|
||||
@ -254,7 +254,7 @@ struct OptionalCUDAStreamGuard {
|
||||
optional<CUDAStream> current_stream() const {
|
||||
auto r = guard_.current_stream();
|
||||
if (r.has_value()) {
|
||||
return make_optional(CUDAStream(CUDAStream::UNCHECKED, r.value()));
|
||||
return std::make_optional(CUDAStream(CUDAStream::UNCHECKED, r.value()));
|
||||
} else {
|
||||
return nullopt;
|
||||
}
|
||||
|
@ -14,9 +14,9 @@
|
||||
#include <c10/core/DeviceType.h>
|
||||
#include <c10/core/Stream.h>
|
||||
#include <c10/core/impl/PyInterpreter.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <cuda_runtime_api.h>
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
|
||||
namespace c10::cuda::impl {
|
||||
|
||||
@ -45,7 +45,7 @@ struct CUDAGuardImpl final : public c10::impl::DeviceGuardImplInterface {
|
||||
const auto err = C10_CUDA_ERROR_HANDLED(c10::cuda::GetDevice(&device));
|
||||
C10_CUDA_CHECK_WARN(err);
|
||||
if (err != cudaSuccess) {
|
||||
return c10::nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
return Device(DeviceType::CUDA, device);
|
||||
}
|
||||
|
@ -36,6 +36,7 @@ TEST(OptionalDeviceGuard, ResetDeviceDifferentDeviceType) {
|
||||
g.reset_device(Device(DeviceType::HIP, 2), &hip_impl);
|
||||
ASSERT_EQ(FakeGuardImpl<DeviceType::CUDA>::getDeviceIndex(), 0);
|
||||
ASSERT_EQ(FakeGuardImpl<DeviceType::HIP>::getDeviceIndex(), 2);
|
||||
ASSERT_EQ(g.current_device(), make_optional(Device(DeviceType::HIP, 2)));
|
||||
ASSERT_EQ(g.original_device(), make_optional(Device(DeviceType::HIP, 0)));
|
||||
ASSERT_EQ(g.current_device(), std::make_optional(Device(DeviceType::HIP, 2)));
|
||||
ASSERT_EQ(
|
||||
g.original_device(), std::make_optional(Device(DeviceType::HIP, 0)));
|
||||
}
|
||||
|
@ -8,7 +8,7 @@ using namespace c10;
|
||||
#ifndef C10_MOBILE
|
||||
static void check(int64_t value) {
|
||||
const auto i = SymInt(value);
|
||||
EXPECT_EQ(i.maybe_as_int(), c10::make_optional(value));
|
||||
EXPECT_EQ(i.maybe_as_int(), std::make_optional(value));
|
||||
}
|
||||
|
||||
TEST(SymIntTest, ConcreteInts) {
|
||||
|
@ -170,12 +170,12 @@ TEST(InlineOptionalDeviceGuard, SetDevice) {
|
||||
MaybeTestGuard g;
|
||||
DeviceIndex i = 1;
|
||||
g.set_device(dev(i));
|
||||
ASSERT_EQ(g.original_device(), make_optional(dev(init_i)));
|
||||
ASSERT_EQ(g.current_device(), make_optional(dev(i)));
|
||||
ASSERT_EQ(g.original_device(), std::make_optional(dev(init_i)));
|
||||
ASSERT_EQ(g.current_device(), std::make_optional(dev(i)));
|
||||
ASSERT_EQ(TestGuardImpl::getDeviceIndex(), i);
|
||||
g.set_device(dev(i));
|
||||
ASSERT_EQ(g.original_device(), make_optional(dev(init_i)));
|
||||
ASSERT_EQ(g.current_device(), make_optional(dev(i)));
|
||||
ASSERT_EQ(g.original_device(), std::make_optional(dev(init_i)));
|
||||
ASSERT_EQ(g.current_device(), std::make_optional(dev(i)));
|
||||
ASSERT_EQ(TestGuardImpl::getDeviceIndex(), i);
|
||||
}
|
||||
|
||||
@ -185,11 +185,11 @@ TEST(InlineOptionalDeviceGuard, SetIndex) {
|
||||
DeviceIndex i = 1;
|
||||
MaybeTestGuard g;
|
||||
g.set_index(i);
|
||||
ASSERT_EQ(g.original_device(), make_optional(dev(init_i)));
|
||||
ASSERT_EQ(g.current_device(), make_optional(dev(i)));
|
||||
ASSERT_EQ(g.original_device(), std::make_optional(dev(init_i)));
|
||||
ASSERT_EQ(g.current_device(), std::make_optional(dev(i)));
|
||||
ASSERT_EQ(TestGuardImpl::getDeviceIndex(), i);
|
||||
g.set_index(i);
|
||||
ASSERT_EQ(g.original_device(), make_optional(dev(init_i)));
|
||||
ASSERT_EQ(g.current_device(), make_optional(dev(i)));
|
||||
ASSERT_EQ(g.original_device(), std::make_optional(dev(init_i)));
|
||||
ASSERT_EQ(g.current_device(), std::make_optional(dev(i)));
|
||||
ASSERT_EQ(TestGuardImpl::getDeviceIndex(), i);
|
||||
}
|
||||
|
@ -109,8 +109,8 @@ TEST(InlineOptionalStreamGuard, Constructor) {
|
||||
ASSERT_EQ(TestGuardImpl::getDeviceIndex(), 1);
|
||||
ASSERT_EQ(TestGuardImpl::getCurrentStreamIdFor(1), 2);
|
||||
ASSERT_EQ(TestGuardImpl::getCurrentStreamIdFor(0), 0);
|
||||
ASSERT_EQ(g.original_stream(), make_optional(stream(0, 0)));
|
||||
ASSERT_EQ(g.current_stream(), make_optional(stream(1, 2)));
|
||||
ASSERT_EQ(g.original_stream(), std::make_optional(stream(0, 0)));
|
||||
ASSERT_EQ(g.current_stream(), std::make_optional(stream(1, 2)));
|
||||
}
|
||||
ASSERT_EQ(TestGuardImpl::getDeviceIndex(), 0);
|
||||
ASSERT_EQ(TestGuardImpl::getCurrentStreamIdFor(1), 0);
|
||||
@ -120,8 +120,8 @@ TEST(InlineOptionalStreamGuard, Constructor) {
|
||||
ASSERT_EQ(TestGuardImpl::getDeviceIndex(), 1);
|
||||
ASSERT_EQ(TestGuardImpl::getCurrentStreamIdFor(1), 2);
|
||||
ASSERT_EQ(TestGuardImpl::getCurrentStreamIdFor(0), 0);
|
||||
ASSERT_EQ(g.original_stream(), make_optional(stream(0, 0)));
|
||||
ASSERT_EQ(g.current_stream(), make_optional(stream(1, 2)));
|
||||
ASSERT_EQ(g.original_stream(), std::make_optional(stream(0, 0)));
|
||||
ASSERT_EQ(g.current_stream(), std::make_optional(stream(1, 2)));
|
||||
}
|
||||
ASSERT_EQ(TestGuardImpl::getDeviceIndex(), 0);
|
||||
ASSERT_EQ(TestGuardImpl::getCurrentStreamIdFor(1), 0);
|
||||
@ -146,8 +146,8 @@ TEST(InlineOptionalStreamGuard, ResetStreamSameDevice) {
|
||||
ASSERT_EQ(TestGuardImpl::getDeviceIndex(), 1);
|
||||
ASSERT_EQ(TestGuardImpl::getCurrentStreamIdFor(1), 3);
|
||||
ASSERT_EQ(TestGuardImpl::getCurrentStreamIdFor(0), 0);
|
||||
ASSERT_EQ(g.original_stream(), make_optional(stream(0, 0)));
|
||||
ASSERT_EQ(g.current_stream(), make_optional(stream(1, 3)));
|
||||
ASSERT_EQ(g.original_stream(), std::make_optional(stream(0, 0)));
|
||||
ASSERT_EQ(g.current_stream(), std::make_optional(stream(1, 3)));
|
||||
}
|
||||
ASSERT_EQ(TestGuardImpl::getDeviceIndex(), 0);
|
||||
ASSERT_EQ(TestGuardImpl::getCurrentStreamIdFor(1), 0);
|
||||
@ -164,8 +164,8 @@ TEST(InlineOptionalStreamGuard, ResetStreamDifferentDevice) {
|
||||
ASSERT_EQ(TestGuardImpl::getCurrentStreamIdFor(2), 3);
|
||||
ASSERT_EQ(TestGuardImpl::getCurrentStreamIdFor(1), 0);
|
||||
ASSERT_EQ(TestGuardImpl::getCurrentStreamIdFor(0), 0);
|
||||
ASSERT_EQ(g.original_stream(), make_optional(stream(0, 0)));
|
||||
ASSERT_EQ(g.current_stream(), make_optional(stream(2, 3)));
|
||||
ASSERT_EQ(g.original_stream(), std::make_optional(stream(0, 0)));
|
||||
ASSERT_EQ(g.current_stream(), std::make_optional(stream(2, 3)));
|
||||
}
|
||||
ASSERT_EQ(TestGuardImpl::getDeviceIndex(), 0);
|
||||
ASSERT_EQ(TestGuardImpl::getCurrentStreamIdFor(2), 0);
|
||||
|
@ -1,4 +1,4 @@
|
||||
#include <c10/util/Optional.h>
|
||||
#include <optional>
|
||||
|
||||
#include <gmock/gmock.h>
|
||||
#include <gtest/gtest.h>
|
||||
@ -67,7 +67,7 @@ TYPED_TEST(OptionalTest, Empty) {
|
||||
EXPECT_FALSE(empty.has_value());
|
||||
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access,hicpp-avoid-goto,cppcoreguidelines-avoid-goto)
|
||||
EXPECT_THROW(empty.value(), c10::bad_optional_access);
|
||||
EXPECT_THROW(empty.value(), std::bad_optional_access);
|
||||
}
|
||||
|
||||
TYPED_TEST(OptionalTest, Initialized) {
|
||||
@ -111,32 +111,32 @@ TEST_P(SelfCompareTest, SelfCompare) {
|
||||
INSTANTIATE_TEST_SUITE_P(
|
||||
nullopt,
|
||||
SelfCompareTest,
|
||||
testing::Values(c10::nullopt));
|
||||
testing::Values(std::nullopt));
|
||||
INSTANTIATE_TEST_SUITE_P(
|
||||
int,
|
||||
SelfCompareTest,
|
||||
testing::Values(c10::make_optional(2)));
|
||||
testing::Values(std::make_optional(2)));
|
||||
|
||||
TEST(OptionalTest, Nullopt) {
|
||||
std::optional<int> x = 2;
|
||||
|
||||
EXPECT_THAT(c10::nullopt, Not(Eq(x)));
|
||||
EXPECT_THAT(x, Not(Eq(c10::nullopt)));
|
||||
EXPECT_THAT(std::nullopt, Not(Eq(x)));
|
||||
EXPECT_THAT(x, Not(Eq(std::nullopt)));
|
||||
|
||||
EXPECT_THAT(x, Ne(c10::nullopt));
|
||||
EXPECT_THAT(c10::nullopt, Ne(x));
|
||||
EXPECT_THAT(x, Ne(std::nullopt));
|
||||
EXPECT_THAT(std::nullopt, Ne(x));
|
||||
|
||||
EXPECT_THAT(x, Not(Lt(c10::nullopt)));
|
||||
EXPECT_THAT(c10::nullopt, Lt(x));
|
||||
EXPECT_THAT(x, Not(Lt(std::nullopt)));
|
||||
EXPECT_THAT(std::nullopt, Lt(x));
|
||||
|
||||
EXPECT_THAT(x, Not(Le(c10::nullopt)));
|
||||
EXPECT_THAT(c10::nullopt, Le(x));
|
||||
EXPECT_THAT(x, Not(Le(std::nullopt)));
|
||||
EXPECT_THAT(std::nullopt, Le(x));
|
||||
|
||||
EXPECT_THAT(x, Gt(c10::nullopt));
|
||||
EXPECT_THAT(c10::nullopt, Not(Gt(x)));
|
||||
EXPECT_THAT(x, Gt(std::nullopt));
|
||||
EXPECT_THAT(std::nullopt, Not(Gt(x)));
|
||||
|
||||
EXPECT_THAT(x, Ge(c10::nullopt));
|
||||
EXPECT_THAT(c10::nullopt, Not(Ge(x)));
|
||||
EXPECT_THAT(x, Ge(std::nullopt));
|
||||
EXPECT_THAT(std::nullopt, Not(Ge(x)));
|
||||
}
|
||||
|
||||
// Ensure comparisons work...
|
||||
|
@ -1,7 +1,7 @@
|
||||
#include <c10/util/Backtrace.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <c10/util/Type.h>
|
||||
#include <c10/util/irange.h>
|
||||
#include <optional>
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
@ -150,19 +150,19 @@ std::optional<FrameInformation> parse_frame_information(
|
||||
|
||||
auto function_name_start = frame_string.find('(');
|
||||
if (function_name_start == std::string::npos) {
|
||||
return c10::nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
function_name_start += 1;
|
||||
|
||||
auto offset_start = frame_string.find('+', function_name_start);
|
||||
if (offset_start == std::string::npos) {
|
||||
return c10::nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
offset_start += 1;
|
||||
|
||||
const auto offset_end = frame_string.find(')', offset_start);
|
||||
if (offset_end == std::string::npos) {
|
||||
return c10::nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
frame.object_file = frame_string.substr(0, function_name_start - 1);
|
||||
@ -186,7 +186,7 @@ std::optional<FrameInformation> parse_frame_information(
|
||||
skip >> frame.offset_into_function;
|
||||
#else
|
||||
#warning Unknown standard library, backtraces may have incomplete debug information
|
||||
return c10::nullopt;
|
||||
return std::nullopt;
|
||||
#endif // defined(__GLIBCXX__)
|
||||
|
||||
// Some system-level functions don't have sufficient debug information, so
|
||||
|
@ -12,9 +12,9 @@
|
||||
#pragma once
|
||||
|
||||
#include <c10/util/ArrayRef.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <cstdint>
|
||||
#include <initializer_list>
|
||||
#include <optional>
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
|
||||
@ -27,16 +27,16 @@ class OptionalArrayRef final {
|
||||
|
||||
constexpr OptionalArrayRef() noexcept = default;
|
||||
|
||||
constexpr OptionalArrayRef(nullopt_t) noexcept {}
|
||||
constexpr OptionalArrayRef(std::nullopt_t) noexcept {}
|
||||
|
||||
OptionalArrayRef(const OptionalArrayRef& other) = default;
|
||||
|
||||
OptionalArrayRef(OptionalArrayRef&& other) noexcept = default;
|
||||
|
||||
constexpr OptionalArrayRef(const optional<ArrayRef<T>>& other) noexcept
|
||||
constexpr OptionalArrayRef(const std::optional<ArrayRef<T>>& other) noexcept
|
||||
: wrapped_opt_array_ref(other) {}
|
||||
|
||||
constexpr OptionalArrayRef(optional<ArrayRef<T>>&& other) noexcept
|
||||
constexpr OptionalArrayRef(std::optional<ArrayRef<T>>&& other) noexcept
|
||||
: wrapped_opt_array_ref(std::move(other)) {}
|
||||
|
||||
constexpr OptionalArrayRef(const T& value) noexcept
|
||||
@ -89,8 +89,8 @@ class OptionalArrayRef final {
|
||||
|
||||
// Assignment
|
||||
|
||||
constexpr OptionalArrayRef& operator=(nullopt_t) noexcept {
|
||||
wrapped_opt_array_ref = c10::nullopt;
|
||||
constexpr OptionalArrayRef& operator=(std::nullopt_t) noexcept {
|
||||
wrapped_opt_array_ref = std::nullopt;
|
||||
return *this;
|
||||
}
|
||||
|
||||
@ -99,13 +99,13 @@ class OptionalArrayRef final {
|
||||
OptionalArrayRef& operator=(OptionalArrayRef&& other) noexcept = default;
|
||||
|
||||
constexpr OptionalArrayRef& operator=(
|
||||
const optional<ArrayRef<T>>& other) noexcept {
|
||||
const std::optional<ArrayRef<T>>& other) noexcept {
|
||||
wrapped_opt_array_ref = other;
|
||||
return *this;
|
||||
}
|
||||
|
||||
constexpr OptionalArrayRef& operator=(
|
||||
optional<ArrayRef<T>>&& other) noexcept {
|
||||
std::optional<ArrayRef<T>>&& other) noexcept {
|
||||
wrapped_opt_array_ref = std::move(other);
|
||||
return *this;
|
||||
}
|
||||
@ -213,7 +213,7 @@ class OptionalArrayRef final {
|
||||
}
|
||||
|
||||
private:
|
||||
optional<ArrayRef<T>> wrapped_opt_array_ref;
|
||||
std::optional<ArrayRef<T>> wrapped_opt_array_ref;
|
||||
};
|
||||
|
||||
using OptionalIntArrayRef = OptionalArrayRef<int64_t>;
|
||||
|
@ -1,9 +1,9 @@
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <c10/util/Optional.h>
|
||||
#include <c10/util/irange.h>
|
||||
#include <c10/xpu/XPUStream.h>
|
||||
#include <c10/xpu/test/impl/XPUTest.h>
|
||||
#include <optional>
|
||||
|
||||
#include <thread>
|
||||
#include <unordered_set>
|
||||
|
@ -1,8 +1,8 @@
|
||||
#include <ATen/DeviceAccelerator.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <fmt/core.h>
|
||||
#include <sys/types.h>
|
||||
#include <torch/csrc/python_headers.h>
|
||||
#include <optional>
|
||||
|
||||
#ifndef _MSC_VER
|
||||
#include <sys/socket.h>
|
||||
@ -1817,7 +1817,7 @@ Call this whenever a new thread is created in order to propagate values from
|
||||
transposed_,
|
||||
output_padding_,
|
||||
std::move(groups_),
|
||||
c10::nullopt);
|
||||
std::nullopt);
|
||||
},
|
||||
py::arg("input"),
|
||||
py::arg("weight"),
|
||||
@ -1842,7 +1842,7 @@ Call this whenever a new thread is created in order to propagate values from
|
||||
at::SymIntArrayRef output_padding_,
|
||||
c10::SymInt groups_,
|
||||
std::optional<std::vector<c10::SymInt>> bias_sizes_opt) {
|
||||
c10::OptionalArrayRef<c10::SymInt> ref = c10::nullopt;
|
||||
c10::OptionalArrayRef<c10::SymInt> ref = std::nullopt;
|
||||
if (bias_sizes_opt) {
|
||||
ref = (*bias_sizes_opt);
|
||||
}
|
||||
@ -2031,7 +2031,7 @@ Call this whenever a new thread is created in order to propagate values from
|
||||
|
||||
py_module.def(
|
||||
"_get_accelerator",
|
||||
[](std::optional<bool> check = c10::nullopt) {
|
||||
[](std::optional<bool> check = std::nullopt) {
|
||||
return c10::Device(
|
||||
at::getAccelerator(check.value_or(false))
|
||||
.value_or(c10::DeviceType::CPU),
|
||||
|
@ -153,7 +153,7 @@ static bool THPStorage_isPreservable(THPStorage* self) {
|
||||
|
||||
if (storage.unsafeGetStorageImpl()->pyobj_slot()->check_pyobj(
|
||||
getPyInterpreter(), /*ignore_hermetic_tls=*/true) !=
|
||||
c10::make_optional((PyObject*)self)) {
|
||||
std::make_optional((PyObject*)self)) {
|
||||
return false;
|
||||
}
|
||||
if (storage.use_count() <= 1) {
|
||||
|
@ -2,8 +2,8 @@
|
||||
|
||||
#include <c10/util/ArrayRef.h>
|
||||
#include <c10/util/Exception.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <c10/util/irange.h>
|
||||
#include <optional>
|
||||
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
|
@ -15,9 +15,9 @@ namespace fft {
|
||||
/// ```
|
||||
inline Tensor fft(
|
||||
const Tensor& self,
|
||||
std::optional<SymInt> n = c10::nullopt,
|
||||
std::optional<SymInt> n = std::nullopt,
|
||||
int64_t dim = -1,
|
||||
std::optional<c10::string_view> norm = c10::nullopt) {
|
||||
std::optional<c10::string_view> norm = std::nullopt) {
|
||||
return torch::fft_fft_symint(self, n, dim, norm);
|
||||
}
|
||||
|
||||
@ -31,9 +31,9 @@ inline Tensor fft(
|
||||
/// ```
|
||||
inline Tensor ifft(
|
||||
const Tensor& self,
|
||||
std::optional<SymInt> n = c10::nullopt,
|
||||
std::optional<SymInt> n = std::nullopt,
|
||||
int64_t dim = -1,
|
||||
std::optional<c10::string_view> norm = c10::nullopt) {
|
||||
std::optional<c10::string_view> norm = std::nullopt) {
|
||||
return torch::fft_ifft_symint(self, n, dim, norm);
|
||||
}
|
||||
|
||||
@ -47,9 +47,9 @@ inline Tensor ifft(
|
||||
/// ```
|
||||
inline Tensor fft2(
|
||||
const Tensor& self,
|
||||
OptionalIntArrayRef s = c10::nullopt,
|
||||
OptionalIntArrayRef s = std::nullopt,
|
||||
IntArrayRef dim = {-2, -1},
|
||||
std::optional<c10::string_view> norm = c10::nullopt) {
|
||||
std::optional<c10::string_view> norm = std::nullopt) {
|
||||
return torch::fft_fft2(self, s, dim, norm);
|
||||
}
|
||||
|
||||
@ -63,9 +63,9 @@ inline Tensor fft2(
|
||||
/// ```
|
||||
inline Tensor ifft2(
|
||||
const Tensor& self,
|
||||
at::OptionalIntArrayRef s = c10::nullopt,
|
||||
at::OptionalIntArrayRef s = std::nullopt,
|
||||
IntArrayRef dim = {-2, -1},
|
||||
std::optional<c10::string_view> norm = c10::nullopt) {
|
||||
std::optional<c10::string_view> norm = std::nullopt) {
|
||||
return torch::fft_ifft2(self, s, dim, norm);
|
||||
}
|
||||
|
||||
@ -79,9 +79,9 @@ inline Tensor ifft2(
|
||||
/// ```
|
||||
inline Tensor fftn(
|
||||
const Tensor& self,
|
||||
at::OptionalIntArrayRef s = c10::nullopt,
|
||||
at::OptionalIntArrayRef dim = c10::nullopt,
|
||||
std::optional<c10::string_view> norm = c10::nullopt) {
|
||||
at::OptionalIntArrayRef s = std::nullopt,
|
||||
at::OptionalIntArrayRef dim = std::nullopt,
|
||||
std::optional<c10::string_view> norm = std::nullopt) {
|
||||
return torch::fft_fftn(self, s, dim, norm);
|
||||
}
|
||||
|
||||
@ -95,9 +95,9 @@ inline Tensor fftn(
|
||||
/// ```
|
||||
inline Tensor ifftn(
|
||||
const Tensor& self,
|
||||
at::OptionalIntArrayRef s = c10::nullopt,
|
||||
at::OptionalIntArrayRef dim = c10::nullopt,
|
||||
std::optional<c10::string_view> norm = c10::nullopt) {
|
||||
at::OptionalIntArrayRef s = std::nullopt,
|
||||
at::OptionalIntArrayRef dim = std::nullopt,
|
||||
std::optional<c10::string_view> norm = std::nullopt) {
|
||||
return torch::fft_ifftn(self, s, dim, norm);
|
||||
}
|
||||
|
||||
@ -112,9 +112,9 @@ inline Tensor ifftn(
|
||||
/// ```
|
||||
inline Tensor rfft(
|
||||
const Tensor& self,
|
||||
std::optional<SymInt> n = c10::nullopt,
|
||||
std::optional<SymInt> n = std::nullopt,
|
||||
int64_t dim = -1,
|
||||
std::optional<c10::string_view> norm = c10::nullopt) {
|
||||
std::optional<c10::string_view> norm = std::nullopt) {
|
||||
return torch::fft_rfft_symint(self, n, dim, norm);
|
||||
}
|
||||
|
||||
@ -131,9 +131,9 @@ inline Tensor rfft(
|
||||
/// ```
|
||||
inline Tensor irfft(
|
||||
const Tensor& self,
|
||||
std::optional<SymInt> n = c10::nullopt,
|
||||
std::optional<SymInt> n = std::nullopt,
|
||||
int64_t dim = -1,
|
||||
std::optional<c10::string_view> norm = c10::nullopt) {
|
||||
std::optional<c10::string_view> norm = std::nullopt) {
|
||||
return torch::fft_irfft_symint(self, n, dim, norm);
|
||||
}
|
||||
|
||||
@ -147,9 +147,9 @@ inline Tensor irfft(
|
||||
/// ```
|
||||
inline Tensor rfft2(
|
||||
const Tensor& self,
|
||||
at::OptionalIntArrayRef s = c10::nullopt,
|
||||
at::OptionalIntArrayRef s = std::nullopt,
|
||||
IntArrayRef dim = {-2, -1},
|
||||
std::optional<c10::string_view> norm = c10::nullopt) {
|
||||
std::optional<c10::string_view> norm = std::nullopt) {
|
||||
return torch::fft_rfft2(self, s, dim, norm);
|
||||
}
|
||||
|
||||
@ -163,9 +163,9 @@ inline Tensor rfft2(
|
||||
/// ```
|
||||
inline Tensor irfft2(
|
||||
const Tensor& self,
|
||||
at::OptionalIntArrayRef s = c10::nullopt,
|
||||
at::OptionalIntArrayRef s = std::nullopt,
|
||||
IntArrayRef dim = {-2, -1},
|
||||
std::optional<c10::string_view> norm = c10::nullopt) {
|
||||
std::optional<c10::string_view> norm = std::nullopt) {
|
||||
return torch::fft_irfft2(self, s, dim, norm);
|
||||
}
|
||||
|
||||
@ -179,9 +179,9 @@ inline Tensor irfft2(
|
||||
/// ```
|
||||
inline Tensor rfftn(
|
||||
const Tensor& self,
|
||||
at::OptionalIntArrayRef s = c10::nullopt,
|
||||
at::OptionalIntArrayRef dim = c10::nullopt,
|
||||
std::optional<c10::string_view> norm = c10::nullopt) {
|
||||
at::OptionalIntArrayRef s = std::nullopt,
|
||||
at::OptionalIntArrayRef dim = std::nullopt,
|
||||
std::optional<c10::string_view> norm = std::nullopt) {
|
||||
return torch::fft_rfftn(self, s, dim, norm);
|
||||
}
|
||||
|
||||
@ -195,9 +195,9 @@ inline Tensor rfftn(
|
||||
/// ```
|
||||
inline Tensor irfftn(
|
||||
const Tensor& self,
|
||||
at::OptionalIntArrayRef s = c10::nullopt,
|
||||
at::OptionalIntArrayRef dim = c10::nullopt,
|
||||
std::optional<c10::string_view> norm = c10::nullopt) {
|
||||
at::OptionalIntArrayRef s = std::nullopt,
|
||||
at::OptionalIntArrayRef dim = std::nullopt,
|
||||
std::optional<c10::string_view> norm = std::nullopt) {
|
||||
return torch::fft_irfftn(self, s, dim, norm);
|
||||
}
|
||||
|
||||
@ -215,9 +215,9 @@ inline Tensor irfftn(
|
||||
/// ```
|
||||
inline Tensor hfft(
|
||||
const Tensor& self,
|
||||
std::optional<SymInt> n = c10::nullopt,
|
||||
std::optional<SymInt> n = std::nullopt,
|
||||
int64_t dim = -1,
|
||||
std::optional<c10::string_view> norm = c10::nullopt) {
|
||||
std::optional<c10::string_view> norm = std::nullopt) {
|
||||
return torch::fft_hfft_symint(self, n, dim, norm);
|
||||
}
|
||||
|
||||
@ -234,9 +234,9 @@ inline Tensor hfft(
|
||||
/// ```
|
||||
inline Tensor ihfft(
|
||||
const Tensor& self,
|
||||
std::optional<SymInt> n = c10::nullopt,
|
||||
std::optional<SymInt> n = std::nullopt,
|
||||
int64_t dim = -1,
|
||||
std::optional<c10::string_view> norm = c10::nullopt) {
|
||||
std::optional<c10::string_view> norm = std::nullopt) {
|
||||
return torch::fft_ihfft_symint(self, n, dim, norm);
|
||||
}
|
||||
|
||||
@ -253,9 +253,9 @@ inline Tensor ihfft(
|
||||
/// ```
|
||||
inline Tensor hfft2(
|
||||
const Tensor& self,
|
||||
at::OptionalIntArrayRef s = c10::nullopt,
|
||||
at::OptionalIntArrayRef s = std::nullopt,
|
||||
IntArrayRef dim = {-2, -1},
|
||||
std::optional<c10::string_view> norm = c10::nullopt) {
|
||||
std::optional<c10::string_view> norm = std::nullopt) {
|
||||
return torch::fft_hfft2(self, s, dim, norm);
|
||||
}
|
||||
|
||||
@ -273,9 +273,9 @@ inline Tensor hfft2(
|
||||
/// ```
|
||||
inline Tensor ihfft2(
|
||||
const Tensor& self,
|
||||
at::OptionalIntArrayRef s = c10::nullopt,
|
||||
at::OptionalIntArrayRef s = std::nullopt,
|
||||
IntArrayRef dim = {-2, -1},
|
||||
std::optional<c10::string_view> norm = c10::nullopt) {
|
||||
std::optional<c10::string_view> norm = std::nullopt) {
|
||||
return torch::fft_ihfft2(self, s, dim, norm);
|
||||
}
|
||||
|
||||
@ -292,9 +292,9 @@ inline Tensor ihfft2(
|
||||
/// ```
|
||||
inline Tensor hfftn(
|
||||
const Tensor& self,
|
||||
at::OptionalIntArrayRef s = c10::nullopt,
|
||||
at::OptionalIntArrayRef s = std::nullopt,
|
||||
IntArrayRef dim = {-2, -1},
|
||||
std::optional<c10::string_view> norm = c10::nullopt) {
|
||||
std::optional<c10::string_view> norm = std::nullopt) {
|
||||
return torch::fft_hfftn(self, s, dim, norm);
|
||||
}
|
||||
|
||||
@ -312,9 +312,9 @@ inline Tensor hfftn(
|
||||
/// ```
|
||||
inline Tensor ihfftn(
|
||||
const Tensor& self,
|
||||
at::OptionalIntArrayRef s = c10::nullopt,
|
||||
at::OptionalIntArrayRef s = std::nullopt,
|
||||
IntArrayRef dim = {-2, -1},
|
||||
std::optional<c10::string_view> norm = c10::nullopt) {
|
||||
std::optional<c10::string_view> norm = std::nullopt) {
|
||||
return torch::fft_ihfftn(self, s, dim, norm);
|
||||
}
|
||||
|
||||
@ -364,7 +364,7 @@ inline Tensor rfftfreq(int64_t n, const TensorOptions& options) {
|
||||
/// ```
|
||||
inline Tensor fftshift(
|
||||
const Tensor& x,
|
||||
at::OptionalIntArrayRef dim = c10::nullopt) {
|
||||
at::OptionalIntArrayRef dim = std::nullopt) {
|
||||
return torch::fft_fftshift(x, dim);
|
||||
}
|
||||
|
||||
@ -381,7 +381,7 @@ inline Tensor fftshift(
|
||||
/// ```
|
||||
inline Tensor ifftshift(
|
||||
const Tensor& x,
|
||||
at::OptionalIntArrayRef dim = c10::nullopt) {
|
||||
at::OptionalIntArrayRef dim = std::nullopt) {
|
||||
return torch::fft_ifftshift(x, dim);
|
||||
}
|
||||
|
||||
|
@ -26,7 +26,7 @@ inline at::Tensor nested_tensor(
|
||||
auto out = at::_nested_tensor_from_tensor_list(
|
||||
nested_tensor_data,
|
||||
c10::typeMetaToScalarType(options.dtype()),
|
||||
c10::nullopt,
|
||||
std::nullopt,
|
||||
options.device(),
|
||||
options.pinned_memory());
|
||||
if (options.has_requires_grad() && options.requires_grad()) {
|
||||
@ -55,7 +55,7 @@ inline at::Tensor nested_tensor(
|
||||
auto out = at::_nested_tensor_from_tensor_list(
|
||||
tensor_list,
|
||||
c10::typeMetaToScalarType(options.dtype()),
|
||||
c10::nullopt,
|
||||
std::nullopt,
|
||||
options.device(),
|
||||
options.pinned_memory());
|
||||
if (options.has_requires_grad() && options.requires_grad()) {
|
||||
@ -72,10 +72,10 @@ inline at::Tensor nested_tensor(
|
||||
/// ```
|
||||
inline at::Tensor as_nested_tensor(
|
||||
at::TensorList list,
|
||||
std::optional<at::ScalarType> dtype = c10::nullopt,
|
||||
std::optional<at::Device> device = c10::nullopt) {
|
||||
std::optional<at::ScalarType> dtype = std::nullopt,
|
||||
std::optional<at::Device> device = std::nullopt) {
|
||||
return at::_nested_tensor_from_tensor_list(
|
||||
list, dtype, c10::nullopt, device, c10::nullopt);
|
||||
list, dtype, std::nullopt, device, std::nullopt);
|
||||
}
|
||||
|
||||
/// Nested to padded tensor
|
||||
@ -87,7 +87,7 @@ inline at::Tensor as_nested_tensor(
|
||||
inline at::Tensor to_padded_tensor(
|
||||
const at::Tensor& self,
|
||||
double padding,
|
||||
at::OptionalIntArrayRef output_size = c10::nullopt) {
|
||||
at::OptionalIntArrayRef output_size = std::nullopt) {
|
||||
return at::nested_to_padded_tensor(self, padding, output_size);
|
||||
}
|
||||
|
||||
|
@ -236,7 +236,7 @@ inline Tensor softmax(
|
||||
std::optional<torch::Dtype> dtype) {
|
||||
Tensor ret;
|
||||
|
||||
if (dtype == c10::nullopt) {
|
||||
if (dtype == std::nullopt) {
|
||||
ret = input.softmax(dim);
|
||||
} else {
|
||||
ret = input.softmax(dim, dtype);
|
||||
@ -273,7 +273,7 @@ inline Tensor softmin(
|
||||
std::optional<torch::Dtype> dtype) {
|
||||
Tensor ret;
|
||||
|
||||
if (dtype == c10::nullopt) {
|
||||
if (dtype == std::nullopt) {
|
||||
ret = (-input).softmax(dim);
|
||||
} else {
|
||||
ret = (-input).softmax(dim, dtype);
|
||||
@ -310,7 +310,7 @@ inline Tensor log_softmax(
|
||||
std::optional<torch::Dtype> dtype) {
|
||||
Tensor ret;
|
||||
|
||||
if (dtype == c10::nullopt) {
|
||||
if (dtype == std::nullopt) {
|
||||
ret = input.log_softmax(dim);
|
||||
} else {
|
||||
ret = input.log_softmax(dim, dtype);
|
||||
|
@ -31,7 +31,7 @@ inline Tensor embedding(
|
||||
bool sparse) {
|
||||
auto input_ = input;
|
||||
|
||||
if (padding_idx != c10::nullopt) {
|
||||
if (padding_idx != std::nullopt) {
|
||||
if (*padding_idx > 0) {
|
||||
TORCH_CHECK(
|
||||
*padding_idx < weight.size(0),
|
||||
@ -46,7 +46,7 @@ inline Tensor embedding(
|
||||
padding_idx = -1;
|
||||
}
|
||||
|
||||
if (max_norm != c10::nullopt) {
|
||||
if (max_norm != std::nullopt) {
|
||||
input_ = input_.contiguous();
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
|
||||
_no_grad_embedding_renorm_(weight, input_, *max_norm, norm_type);
|
||||
@ -149,7 +149,7 @@ inline Tensor embedding_bag(
|
||||
TORCH_CHECK(false, "mode has to be one of sum, mean or max");
|
||||
}
|
||||
|
||||
if (max_norm != c10::nullopt) {
|
||||
if (max_norm != std::nullopt) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
|
||||
_no_grad_embedding_renorm_(weight, input_, *max_norm, norm_type);
|
||||
}
|
||||
|
@ -346,7 +346,7 @@ inline Tensor smooth_l1_loss(
|
||||
const Tensor& input,
|
||||
const Tensor& target,
|
||||
SmoothL1LossFuncOptions::reduction_t reduction,
|
||||
std::optional<double> beta_opt = c10::nullopt) {
|
||||
std::optional<double> beta_opt = std::nullopt) {
|
||||
if (target.sizes() != input.sizes()) {
|
||||
TORCH_WARN(
|
||||
"Using a target size (",
|
||||
@ -405,7 +405,7 @@ inline Tensor smooth_l1_loss(
|
||||
const SmoothL1LossFuncOptions& options,
|
||||
double beta) {
|
||||
TORCH_CHECK(
|
||||
options.beta() == c10::nullopt,
|
||||
options.beta() == std::nullopt,
|
||||
"expected beta not to be provided in 'options', but got ",
|
||||
options.beta().value());
|
||||
return detail::smooth_l1_loss(input, target, options.reduction(), beta);
|
||||
|
@ -17,7 +17,7 @@ inline Tensor normalize(
|
||||
int64_t dim,
|
||||
double eps,
|
||||
std::optional<Tensor> out) {
|
||||
if (out == c10::nullopt) {
|
||||
if (out == std::nullopt) {
|
||||
auto denom = input.norm(p, dim, true).clamp_min(eps).expand_as(input);
|
||||
return input / denom;
|
||||
} else {
|
||||
@ -115,7 +115,7 @@ inline Tensor local_response_norm(
|
||||
/*padding=*/0,
|
||||
/*ceil_mode=*/false,
|
||||
/*count_include_pad=*/true,
|
||||
/*divisor_override=*/c10::nullopt)
|
||||
/*divisor_override=*/std::nullopt)
|
||||
.squeeze(1);
|
||||
} else {
|
||||
auto sizes = input.sizes();
|
||||
@ -132,7 +132,7 @@ inline Tensor local_response_norm(
|
||||
/*padding=*/0,
|
||||
/*ceil_mode=*/false,
|
||||
/*count_include_pad=*/true,
|
||||
/*divisor_override=*/c10::nullopt)
|
||||
/*divisor_override=*/std::nullopt)
|
||||
.squeeze(1);
|
||||
div = div.view(sizes);
|
||||
}
|
||||
|
@ -820,15 +820,15 @@ inline std::tuple<Tensor, Tensor> fractional_max_pool2d_with_indices(
|
||||
const std::optional<ExpandingArray<2>>& output_size,
|
||||
const std::optional<ExpandingArray<2, double>>& output_ratio,
|
||||
const Tensor& _random_samples) {
|
||||
if (output_size == c10::nullopt && output_ratio == c10::nullopt) {
|
||||
if (output_size == std::nullopt && output_ratio == std::nullopt) {
|
||||
TORCH_CHECK(
|
||||
false,
|
||||
"fractional_max_pool2d requires specifying either ",
|
||||
"an output_size or an output_ratio");
|
||||
}
|
||||
std::optional<ExpandingArray<2>> output_size_ = output_size;
|
||||
if (output_size_ == c10::nullopt) {
|
||||
TORCH_INTERNAL_ASSERT(output_ratio != c10::nullopt);
|
||||
if (output_size_ == std::nullopt) {
|
||||
TORCH_INTERNAL_ASSERT(output_ratio != std::nullopt);
|
||||
output_size_ = {
|
||||
(int64_t)(static_cast<double>(input.size(-2)) *
|
||||
(*output_ratio.value())[0]),
|
||||
@ -913,7 +913,7 @@ inline std::tuple<Tensor, Tensor> fractional_max_pool3d_with_indices(
|
||||
const std::optional<ExpandingArray<3>>& output_size,
|
||||
const std::optional<ExpandingArray<3, double>>& output_ratio,
|
||||
const Tensor& _random_samples) {
|
||||
if (output_size == c10::nullopt && output_ratio == c10::nullopt) {
|
||||
if (output_size == std::nullopt && output_ratio == std::nullopt) {
|
||||
TORCH_CHECK(
|
||||
false,
|
||||
"fractional_max_pool3d requires specifying either ",
|
||||
@ -921,8 +921,8 @@ inline std::tuple<Tensor, Tensor> fractional_max_pool3d_with_indices(
|
||||
}
|
||||
|
||||
std::optional<ExpandingArray<3>> output_size_ = output_size;
|
||||
if (output_size_ == c10::nullopt) {
|
||||
TORCH_INTERNAL_ASSERT(output_ratio != c10::nullopt);
|
||||
if (output_size_ == std::nullopt) {
|
||||
TORCH_INTERNAL_ASSERT(output_ratio != std::nullopt);
|
||||
output_size_ = {
|
||||
(int64_t)(static_cast<double>(input.size(-3)) *
|
||||
(*output_ratio.value())[0]),
|
||||
@ -1066,7 +1066,7 @@ inline Tensor lp_pool2d(
|
||||
/*padding=*/0,
|
||||
ceil_mode,
|
||||
/*count_include_pad=*/true,
|
||||
/*divisor_override=*/c10::nullopt);
|
||||
/*divisor_override=*/std::nullopt);
|
||||
|
||||
return (torch::sign(out) * relu(torch::abs(out)))
|
||||
.mul(kw * kh)
|
||||
@ -1116,7 +1116,7 @@ inline Tensor lp_pool3d(
|
||||
/*padding=*/0,
|
||||
ceil_mode,
|
||||
/*count_include_pad=*/true,
|
||||
/*divisor_override=*/c10::nullopt);
|
||||
/*divisor_override=*/std::nullopt);
|
||||
|
||||
return (torch::sign(out) * relu(torch::abs(out)))
|
||||
.mul(kd * kw * kh)
|
||||
|
@ -19,13 +19,13 @@ inline std::vector<int64_t> _interp_output_size(
|
||||
std::optional<std::vector<double>>,
|
||||
std::optional<bool>> closed_over_args) {
|
||||
auto [input, size, scale_factor, recompute_scale_factor] = closed_over_args;
|
||||
if (size == c10::nullopt && scale_factor == c10::nullopt) {
|
||||
if (size == std::nullopt && scale_factor == std::nullopt) {
|
||||
TORCH_CHECK(false, "either size or scale_factor should be defined");
|
||||
}
|
||||
if (size != c10::nullopt && scale_factor != c10::nullopt) {
|
||||
if (size != std::nullopt && scale_factor != std::nullopt) {
|
||||
TORCH_CHECK(false, "only one of size or scale_factor should be defined");
|
||||
}
|
||||
if (scale_factor != c10::nullopt) {
|
||||
if (scale_factor != std::nullopt) {
|
||||
if (static_cast<int64_t>(scale_factor.value().size()) != dim) {
|
||||
TORCH_CHECK(
|
||||
false,
|
||||
@ -36,14 +36,14 @@ inline std::vector<int64_t> _interp_output_size(
|
||||
torch::ArrayRef<double>(*scale_factor));
|
||||
}
|
||||
}
|
||||
if (size != c10::nullopt) {
|
||||
if (size != std::nullopt) {
|
||||
return *size;
|
||||
}
|
||||
|
||||
TORCH_INTERNAL_ASSERT(scale_factor != c10::nullopt);
|
||||
TORCH_INTERNAL_ASSERT(scale_factor != std::nullopt);
|
||||
auto scale_factors = *scale_factor;
|
||||
|
||||
if (recompute_scale_factor == c10::nullopt) {
|
||||
if (recompute_scale_factor == std::nullopt) {
|
||||
// only warn when the scales have floating values since
|
||||
// the result for ints is the same with/without recompute_scale_factor
|
||||
bool is_float_scale_factor = false;
|
||||
@ -83,14 +83,14 @@ inline Tensor interpolate(
|
||||
bool antialias) {
|
||||
if (std::holds_alternative<enumtype::kNearest>(mode) ||
|
||||
std::get_if<enumtype::kArea>(&mode)) {
|
||||
if (align_corners != c10::nullopt) {
|
||||
if (align_corners != std::nullopt) {
|
||||
TORCH_CHECK(
|
||||
false,
|
||||
"align_corners option can only be set with the "
|
||||
"interpolating modes: linear | bilinear | bicubic | trilinear");
|
||||
}
|
||||
} else {
|
||||
if (align_corners == c10::nullopt) {
|
||||
if (align_corners == std::nullopt) {
|
||||
TORCH_WARN(
|
||||
"Default upsampling behavior when mode=",
|
||||
enumtype::get_enum_name(mode),
|
||||
@ -114,8 +114,8 @@ inline Tensor interpolate(
|
||||
|
||||
auto scale_factor_len = input.dim() - 2;
|
||||
std::vector<std::optional<double>> scale_factor_list(
|
||||
scale_factor_len, c10::nullopt);
|
||||
if (scale_factor != c10::nullopt && !recompute_scale_factor.value_or(false)) {
|
||||
scale_factor_len, std::nullopt);
|
||||
if (scale_factor != std::nullopt && !recompute_scale_factor.value_or(false)) {
|
||||
auto _scale_factor_repeated = *scale_factor;
|
||||
scale_factor_list = {};
|
||||
for (const auto& elem : _scale_factor_repeated) {
|
||||
@ -181,7 +181,7 @@ inline Tensor interpolate(
|
||||
input, _interp_output_size(3, std::move(closed_over_args)));
|
||||
} else if (input.dim() == 3 && std::get_if<enumtype::kLinear>(&mode)) {
|
||||
TORCH_CHECK(
|
||||
align_corners != c10::nullopt, "align_corners should be specified.");
|
||||
align_corners != std::nullopt, "align_corners should be specified.");
|
||||
return torch::upsample_linear1d(
|
||||
input,
|
||||
_interp_output_size(1, std::move(closed_over_args)),
|
||||
@ -195,7 +195,7 @@ inline Tensor interpolate(
|
||||
TORCH_CHECK(false, "Got 4D input, but linear mode needs 3D input");
|
||||
} else if (input.dim() == 4 && std::get_if<enumtype::kBilinear>(&mode)) {
|
||||
TORCH_CHECK(
|
||||
align_corners != c10::nullopt, "align_corners should be specified.");
|
||||
align_corners != std::nullopt, "align_corners should be specified.");
|
||||
if (antialias) {
|
||||
return torch::_upsample_bilinear2d_aa(
|
||||
input,
|
||||
@ -218,7 +218,7 @@ inline Tensor interpolate(
|
||||
TORCH_CHECK(false, "Got 5D input, but bilinear mode needs 4D input");
|
||||
} else if (input.dim() == 5 && std::get_if<enumtype::kTrilinear>(&mode)) {
|
||||
TORCH_CHECK(
|
||||
align_corners != c10::nullopt, "align_corners should be specified.");
|
||||
align_corners != std::nullopt, "align_corners should be specified.");
|
||||
return torch::upsample_trilinear3d(
|
||||
input,
|
||||
_interp_output_size(3, std::move(closed_over_args)),
|
||||
@ -228,7 +228,7 @@ inline Tensor interpolate(
|
||||
scale_factor_list.at(2));
|
||||
} else if (input.dim() == 4 && std::get_if<enumtype::kBicubic>(&mode)) {
|
||||
TORCH_CHECK(
|
||||
align_corners != c10::nullopt, "align_corners should be specified.");
|
||||
align_corners != std::nullopt, "align_corners should be specified.");
|
||||
if (antialias) {
|
||||
return torch::_upsample_bicubic2d_aa(
|
||||
input,
|
||||
|
@ -106,7 +106,7 @@ class BatchNormImplBase : public NormImplBase<D, Derived, BatchNormOptions> {
|
||||
this->_check_input_dim(input);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
double exponential_average_factor;
|
||||
if (this->options.momentum() == c10::nullopt) {
|
||||
if (this->options.momentum() == std::nullopt) {
|
||||
exponential_average_factor = 0.0;
|
||||
} else {
|
||||
exponential_average_factor = this->options.momentum().value();
|
||||
@ -116,7 +116,7 @@ class BatchNormImplBase : public NormImplBase<D, Derived, BatchNormOptions> {
|
||||
if (this->num_batches_tracked.defined()) {
|
||||
this->num_batches_tracked += 1;
|
||||
if (this->options.momentum() ==
|
||||
c10::nullopt) { // use cumulative moving average
|
||||
std::nullopt) { // use cumulative moving average
|
||||
exponential_average_factor =
|
||||
1.0 / this->num_batches_tracked.template item<double>();
|
||||
} else { // use exponential moving average
|
||||
|
@ -350,7 +350,7 @@ class TORCH_API ConvTranspose1dImpl
|
||||
explicit ConvTranspose1dImpl(ConvTranspose1dOptions options_);
|
||||
Tensor forward(
|
||||
const Tensor& input,
|
||||
const std::optional<at::IntArrayRef>& output_size = c10::nullopt);
|
||||
const std::optional<at::IntArrayRef>& output_size = std::nullopt);
|
||||
|
||||
protected:
|
||||
FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(std::optional<at::IntArrayRef>())})
|
||||
@ -392,7 +392,7 @@ class TORCH_API ConvTranspose2dImpl
|
||||
explicit ConvTranspose2dImpl(ConvTranspose2dOptions options_);
|
||||
Tensor forward(
|
||||
const Tensor& input,
|
||||
const std::optional<at::IntArrayRef>& output_size = c10::nullopt);
|
||||
const std::optional<at::IntArrayRef>& output_size = std::nullopt);
|
||||
|
||||
protected:
|
||||
FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(std::optional<at::IntArrayRef>())})
|
||||
@ -434,7 +434,7 @@ class TORCH_API ConvTranspose3dImpl
|
||||
explicit ConvTranspose3dImpl(ConvTranspose3dOptions options_);
|
||||
Tensor forward(
|
||||
const Tensor& input,
|
||||
const std::optional<at::IntArrayRef>& output_size = c10::nullopt);
|
||||
const std::optional<at::IntArrayRef>& output_size = std::nullopt);
|
||||
|
||||
protected:
|
||||
FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(std::optional<at::IntArrayRef>())})
|
||||
|
@ -507,7 +507,7 @@ class TORCH_API MaxUnpool1dImpl : public MaxUnpoolImpl<1, MaxUnpool1dImpl> {
|
||||
Tensor forward(
|
||||
const Tensor& input,
|
||||
const Tensor& indices,
|
||||
const std::optional<std::vector<int64_t>>& output_size = c10::nullopt);
|
||||
const std::optional<std::vector<int64_t>>& output_size = std::nullopt);
|
||||
|
||||
protected:
|
||||
FORWARD_HAS_DEFAULT_ARGS({2, AnyValue(std::optional<std::vector<int64_t>>())})
|
||||
@ -539,7 +539,7 @@ class TORCH_API MaxUnpool2dImpl : public MaxUnpoolImpl<2, MaxUnpool2dImpl> {
|
||||
Tensor forward(
|
||||
const Tensor& input,
|
||||
const Tensor& indices,
|
||||
const std::optional<std::vector<int64_t>>& output_size = c10::nullopt);
|
||||
const std::optional<std::vector<int64_t>>& output_size = std::nullopt);
|
||||
|
||||
protected:
|
||||
FORWARD_HAS_DEFAULT_ARGS({2, AnyValue(std::optional<std::vector<int64_t>>())})
|
||||
@ -571,7 +571,7 @@ class TORCH_API MaxUnpool3dImpl : public MaxUnpoolImpl<3, MaxUnpool3dImpl> {
|
||||
Tensor forward(
|
||||
const Tensor& input,
|
||||
const Tensor& indices,
|
||||
const std::optional<std::vector<int64_t>>& output_size = c10::nullopt);
|
||||
const std::optional<std::vector<int64_t>>& output_size = std::nullopt);
|
||||
|
||||
protected:
|
||||
FORWARD_HAS_DEFAULT_ARGS({2, AnyValue(std::optional<std::vector<int64_t>>())})
|
||||
|
@ -1,8 +1,8 @@
|
||||
#pragma once
|
||||
|
||||
#include <c10/util/ArrayRef.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <c10/util/irange.h>
|
||||
#include <optional>
|
||||
|
||||
#include <vector>
|
||||
|
||||
|
@ -252,7 +252,7 @@ struct TORCH_API SoftmaxFuncOptions {
|
||||
/// If specified, the input tensor is casted to `dtype` before the operation
|
||||
/// is performed. This is useful for preventing data type overflows. Default:
|
||||
/// None.
|
||||
TORCH_ARG(std::optional<torch::Dtype>, dtype) = c10::nullopt;
|
||||
TORCH_ARG(std::optional<torch::Dtype>, dtype) = std::nullopt;
|
||||
};
|
||||
|
||||
} // namespace functional
|
||||
@ -293,7 +293,7 @@ struct TORCH_API SoftminFuncOptions {
|
||||
/// If specified, the input tensor is casted to `dtype` before the operation
|
||||
/// is performed. This is useful for preventing data type overflows. Default:
|
||||
/// None.
|
||||
TORCH_ARG(std::optional<torch::Dtype>, dtype) = c10::nullopt;
|
||||
TORCH_ARG(std::optional<torch::Dtype>, dtype) = std::nullopt;
|
||||
};
|
||||
|
||||
} // namespace functional
|
||||
@ -334,7 +334,7 @@ struct TORCH_API LogSoftmaxFuncOptions {
|
||||
/// If specified, the input tensor is casted to `dtype` before the operation
|
||||
/// is performed. This is useful for preventing data type overflows. Default:
|
||||
/// None.
|
||||
TORCH_ARG(std::optional<torch::Dtype>, dtype) = c10::nullopt;
|
||||
TORCH_ARG(std::optional<torch::Dtype>, dtype) = std::nullopt;
|
||||
};
|
||||
|
||||
} // namespace functional
|
||||
@ -640,10 +640,10 @@ struct TORCH_API MultiheadAttentionOptions {
|
||||
/// add a new batch of zeros to the key and value sequences at dim=1.
|
||||
TORCH_ARG(bool, add_zero_attn) = false;
|
||||
|
||||
/// total number of features in key. Default: c10::nullopt.
|
||||
/// total number of features in key. Default: std::nullopt.
|
||||
TORCH_ARG(int64_t, kdim);
|
||||
|
||||
/// total number of features in key. Default: c10::nullopt.
|
||||
/// total number of features in key. Default: std::nullopt.
|
||||
TORCH_ARG(int64_t, vdim);
|
||||
};
|
||||
|
||||
|
@ -28,10 +28,10 @@ struct TORCH_API EmbeddingOptions {
|
||||
/// Embedding, the embedding vector at `padding_idx` will default to all
|
||||
/// zeros, but can be updated to another value to be used as the padding
|
||||
/// vector.
|
||||
TORCH_ARG(std::optional<int64_t>, padding_idx) = c10::nullopt;
|
||||
TORCH_ARG(std::optional<int64_t>, padding_idx) = std::nullopt;
|
||||
/// If given, each embedding vector with norm larger than `max_norm` is
|
||||
/// renormalized to have norm `max_norm`.
|
||||
TORCH_ARG(std::optional<double>, max_norm) = c10::nullopt;
|
||||
TORCH_ARG(std::optional<double>, max_norm) = std::nullopt;
|
||||
/// The p of the p-norm to compute for the `max_norm` option. Default ``2``.
|
||||
TORCH_ARG(double, norm_type) = 2.;
|
||||
/// If given, this will scale gradients by the inverse of frequency of the
|
||||
@ -55,10 +55,10 @@ struct TORCH_API EmbeddingFromPretrainedOptions {
|
||||
/// If specified, the entries at `padding_idx` do not contribute to the
|
||||
/// gradient; therefore, the embedding vector at `padding_idx` is not updated
|
||||
/// during training, i.e. it remains as a fixed "pad".
|
||||
TORCH_ARG(std::optional<int64_t>, padding_idx) = c10::nullopt;
|
||||
TORCH_ARG(std::optional<int64_t>, padding_idx) = std::nullopt;
|
||||
/// If given, each embedding vector with norm larger than `max_norm` is
|
||||
/// renormalized to have norm `max_norm`.
|
||||
TORCH_ARG(std::optional<double>, max_norm) = c10::nullopt;
|
||||
TORCH_ARG(std::optional<double>, max_norm) = std::nullopt;
|
||||
/// The p of the p-norm to compute for the `max_norm` option. Default ``2``.
|
||||
TORCH_ARG(double, norm_type) = 2.;
|
||||
/// If given, this will scale gradients by the inverse of frequency of the
|
||||
@ -84,10 +84,10 @@ struct TORCH_API EmbeddingFuncOptions {
|
||||
/// If specified, the entries at `padding_idx` do not contribute to the
|
||||
/// gradient; therefore, the embedding vector at `padding_idx` is not updated
|
||||
/// during training, i.e. it remains as a fixed "pad".
|
||||
TORCH_ARG(std::optional<int64_t>, padding_idx) = c10::nullopt;
|
||||
TORCH_ARG(std::optional<int64_t>, padding_idx) = std::nullopt;
|
||||
/// If given, each embedding vector with norm larger than `max_norm` is
|
||||
/// renormalized to have norm `max_norm`.
|
||||
TORCH_ARG(std::optional<double>, max_norm) = c10::nullopt;
|
||||
TORCH_ARG(std::optional<double>, max_norm) = std::nullopt;
|
||||
/// The p of the p-norm to compute for the `max_norm` option. Default ``2``.
|
||||
TORCH_ARG(double, norm_type) = 2.;
|
||||
/// If given, this will scale gradients by the inverse of frequency of the
|
||||
@ -120,7 +120,7 @@ struct TORCH_API EmbeddingBagOptions {
|
||||
TORCH_ARG(int64_t, embedding_dim);
|
||||
/// If given, each embedding vector with norm larger than `max_norm` is
|
||||
/// renormalized to have norm `max_norm`.
|
||||
TORCH_ARG(std::optional<double>, max_norm) = c10::nullopt;
|
||||
TORCH_ARG(std::optional<double>, max_norm) = std::nullopt;
|
||||
/// The p of the p-norm to compute for the `max_norm` option. Default ``2``.
|
||||
TORCH_ARG(double, norm_type) = 2.;
|
||||
/// If given, this will scale gradients by the inverse of frequency of the
|
||||
@ -148,7 +148,7 @@ struct TORCH_API EmbeddingBagOptions {
|
||||
/// zeros, but can be updated to another value to be used as the padding
|
||||
/// vector. Note that the embedding vector at `padding_idx` is excluded from
|
||||
/// the reduction.
|
||||
TORCH_ARG(std::optional<int64_t>, padding_idx) = c10::nullopt;
|
||||
TORCH_ARG(std::optional<int64_t>, padding_idx) = std::nullopt;
|
||||
};
|
||||
|
||||
// ============================================================================
|
||||
@ -161,7 +161,7 @@ struct TORCH_API EmbeddingBagFromPretrainedOptions {
|
||||
TORCH_ARG(bool, freeze) = true;
|
||||
/// If given, each embedding vector with norm larger than `max_norm` is
|
||||
/// renormalized to have norm `max_norm`.
|
||||
TORCH_ARG(std::optional<double>, max_norm) = c10::nullopt;
|
||||
TORCH_ARG(std::optional<double>, max_norm) = std::nullopt;
|
||||
/// The p of the p-norm to compute for the `max_norm` option. Default ``2``.
|
||||
TORCH_ARG(double, norm_type) = 2.;
|
||||
/// If given, this will scale gradients by the inverse of frequency of the
|
||||
@ -184,7 +184,7 @@ struct TORCH_API EmbeddingBagFromPretrainedOptions {
|
||||
/// gradient; therefore, the embedding vector at padding_idx is not updated
|
||||
/// during training, i.e. it remains as a fixed "pad". Note that the embedding
|
||||
/// vector at `padding_idx` is excluded from the reduction.
|
||||
TORCH_ARG(std::optional<int64_t>, padding_idx) = c10::nullopt;
|
||||
TORCH_ARG(std::optional<int64_t>, padding_idx) = std::nullopt;
|
||||
};
|
||||
|
||||
// ============================================================================
|
||||
@ -205,7 +205,7 @@ struct TORCH_API EmbeddingBagFuncOptions {
|
||||
TORCH_ARG(torch::Tensor, offsets) = Tensor();
|
||||
/// If given, each embedding vector with norm larger than `max_norm` is
|
||||
/// renormalized to have norm `max_norm`.
|
||||
TORCH_ARG(std::optional<double>, max_norm) = c10::nullopt;
|
||||
TORCH_ARG(std::optional<double>, max_norm) = std::nullopt;
|
||||
/// The p of the p-norm to compute for the `max_norm` option. Default ``2``.
|
||||
TORCH_ARG(double, norm_type) = 2.;
|
||||
/// If given, this will scale gradients by the inverse of frequency of the
|
||||
@ -233,7 +233,7 @@ struct TORCH_API EmbeddingBagFuncOptions {
|
||||
/// gradient; therefore, the embedding vector at padding_idx is not updated
|
||||
/// during training, i.e. it remains as a fixed "pad". Note that the embedding
|
||||
/// vector at `padding_idx` is excluded from the reduction.
|
||||
TORCH_ARG(std::optional<int64_t>, padding_idx) = c10::nullopt;
|
||||
TORCH_ARG(std::optional<int64_t>, padding_idx) = std::nullopt;
|
||||
};
|
||||
|
||||
} // namespace functional
|
||||
|
@ -451,7 +451,7 @@ struct TORCH_API TripletMarginWithDistanceLossOptions {
|
||||
/// closeness of two tensors. If not specified, `F::pairwise_distance` will
|
||||
/// be used. Default: nullopt
|
||||
TORCH_ARG(std::optional<distance_function_t>, distance_function) =
|
||||
c10::nullopt;
|
||||
std::nullopt;
|
||||
/// Specifies a nonnegative margin representing the minimum difference
|
||||
/// between the positive and negative distances required for the loss to be 0.
|
||||
/// Larger margins penalize cases where the negative examples are not distance
|
||||
@ -548,7 +548,7 @@ struct TORCH_API SmoothL1LossOptions {
|
||||
/// Specifies the threshold at which to change between L1 and L2 loss.
|
||||
/// If beta is not specified, a value of 1.0 will be used.
|
||||
/// Default: nullopt
|
||||
TORCH_ARG(std::optional<double>, beta) = c10::nullopt;
|
||||
TORCH_ARG(std::optional<double>, beta) = std::nullopt;
|
||||
};
|
||||
|
||||
namespace functional {
|
||||
|
@ -133,7 +133,7 @@ struct TORCH_API NormalizeFuncOptions {
|
||||
TORCH_ARG(double, eps) = 1e-12;
|
||||
/// the output tensor. If `out` is used, this
|
||||
/// operation won't be differentiable.
|
||||
TORCH_ARG(std::optional<Tensor>, out) = c10::nullopt;
|
||||
TORCH_ARG(std::optional<Tensor>, out) = std::nullopt;
|
||||
};
|
||||
|
||||
} // namespace functional
|
||||
|
@ -32,7 +32,7 @@ struct AvgPoolOptions {
|
||||
/// if specified, it will be used as divisor, otherwise size of the pooling
|
||||
/// region will be used.
|
||||
|
||||
TORCH_ARG(std::optional<int64_t>, divisor_override) = c10::nullopt;
|
||||
TORCH_ARG(std::optional<int64_t>, divisor_override) = std::nullopt;
|
||||
};
|
||||
|
||||
/// `AvgPoolOptions` specialized for the `AvgPool1d` module.
|
||||
@ -401,7 +401,7 @@ struct MaxUnpoolFuncOptions {
|
||||
TORCH_ARG(ExpandingArray<D>, padding) = 0;
|
||||
|
||||
/// the targeted output size
|
||||
TORCH_ARG(std::optional<std::vector<int64_t>>, output_size) = c10::nullopt;
|
||||
TORCH_ARG(std::optional<std::vector<int64_t>>, output_size) = std::nullopt;
|
||||
};
|
||||
|
||||
/// `MaxUnpoolFuncOptions` specialized for
|
||||
@ -450,12 +450,12 @@ struct FractionalMaxPoolOptions {
|
||||
TORCH_ARG(ExpandingArray<D>, kernel_size);
|
||||
|
||||
/// the target output size of the image
|
||||
TORCH_ARG(std::optional<ExpandingArray<D>>, output_size) = c10::nullopt;
|
||||
TORCH_ARG(std::optional<ExpandingArray<D>>, output_size) = std::nullopt;
|
||||
|
||||
/// If one wants to have an output size as a ratio of the input size, this
|
||||
/// option can be given. This has to be a number or tuple in the range (0, 1)
|
||||
using ExpandingArrayDouble = torch::ExpandingArray<D, double>;
|
||||
TORCH_ARG(std::optional<ExpandingArrayDouble>, output_ratio) = c10::nullopt;
|
||||
TORCH_ARG(std::optional<ExpandingArrayDouble>, output_ratio) = std::nullopt;
|
||||
|
||||
TORCH_ARG(torch::Tensor, _random_samples) = Tensor();
|
||||
};
|
||||
|
@ -20,10 +20,10 @@ namespace nn {
|
||||
/// ```
|
||||
struct TORCH_API UpsampleOptions {
|
||||
/// output spatial sizes.
|
||||
TORCH_ARG(std::optional<std::vector<int64_t>>, size) = c10::nullopt;
|
||||
TORCH_ARG(std::optional<std::vector<int64_t>>, size) = std::nullopt;
|
||||
|
||||
/// multiplier for spatial size.
|
||||
TORCH_ARG(std::optional<std::vector<double>>, scale_factor) = c10::nullopt;
|
||||
TORCH_ARG(std::optional<std::vector<double>>, scale_factor) = std::nullopt;
|
||||
|
||||
/// the upsampling algorithm: one of "nearest", "linear", "bilinear",
|
||||
/// "bicubic" and "trilinear". Default: "nearest"
|
||||
@ -40,7 +40,7 @@ struct TORCH_API UpsampleOptions {
|
||||
/// aligned, and thus preserving the values at those pixels. This only has
|
||||
/// effect when :attr:`mode` is "linear", "bilinear", "bicubic", or
|
||||
/// "trilinear". Default: "False"
|
||||
TORCH_ARG(std::optional<bool>, align_corners) = c10::nullopt;
|
||||
TORCH_ARG(std::optional<bool>, align_corners) = std::nullopt;
|
||||
};
|
||||
|
||||
namespace functional {
|
||||
@ -65,10 +65,10 @@ struct TORCH_API InterpolateFuncOptions {
|
||||
mode_t;
|
||||
|
||||
/// output spatial sizes.
|
||||
TORCH_ARG(std::optional<std::vector<int64_t>>, size) = c10::nullopt;
|
||||
TORCH_ARG(std::optional<std::vector<int64_t>>, size) = std::nullopt;
|
||||
|
||||
/// multiplier for spatial size.
|
||||
TORCH_ARG(std::optional<std::vector<double>>, scale_factor) = c10::nullopt;
|
||||
TORCH_ARG(std::optional<std::vector<double>>, scale_factor) = std::nullopt;
|
||||
|
||||
/// the upsampling algorithm: one of "nearest", "linear", "bilinear",
|
||||
/// "bicubic", "trilinear", "area", "nearest-exact". Default: "nearest"
|
||||
@ -83,7 +83,7 @@ struct TORCH_API InterpolateFuncOptions {
|
||||
/// this operation *independent* of input size when `scale_factor` is
|
||||
/// kept the same. It is *required* when interpolating mode is "linear",
|
||||
/// "bilinear", "bicubic" or "trilinear". Default: "False"
|
||||
TORCH_ARG(std::optional<bool>, align_corners) = c10::nullopt;
|
||||
TORCH_ARG(std::optional<bool>, align_corners) = std::nullopt;
|
||||
|
||||
/// recompute the scale_factor for use in the
|
||||
/// interpolation calculation. When `scale_factor` is passed as a parameter,
|
||||
@ -95,7 +95,7 @@ struct TORCH_API InterpolateFuncOptions {
|
||||
/// used in the interpolation computation. Note that when `scale_factor` is
|
||||
/// floating-point, the recomputed scale_factor may differ from the one passed
|
||||
/// in due to rounding and precision issues.
|
||||
TORCH_ARG(std::optional<bool>, recompute_scale_factor) = c10::nullopt;
|
||||
TORCH_ARG(std::optional<bool>, recompute_scale_factor) = std::nullopt;
|
||||
|
||||
/// flag to apply anti-aliasing. Using anti-alias
|
||||
/// option together with :attr:`align_corners` equals "False", interpolation
|
||||
|
@ -28,7 +28,7 @@ struct TORCH_API GridSampleFuncOptions {
|
||||
/// padding mode for outside grid values. Default: Zeros
|
||||
TORCH_ARG(padding_mode_t, padding_mode) = torch::kZeros;
|
||||
/// Specifies perspective to pixel as point. Default: false
|
||||
TORCH_ARG(std::optional<bool>, align_corners) = c10::nullopt;
|
||||
TORCH_ARG(std::optional<bool>, align_corners) = std::nullopt;
|
||||
};
|
||||
|
||||
} // namespace functional
|
||||
|
@ -64,7 +64,7 @@ inline double clip_grad_norm_(
|
||||
// synchronizing the CPU and the gradients' device until the very end to
|
||||
// preserve async execution on the device. When checking for finite-ness, this
|
||||
// optional ensures we only sync once.
|
||||
std::optional<double> total_norm = c10::nullopt;
|
||||
std::optional<double> total_norm = std::nullopt;
|
||||
if (error_if_nonfinite) {
|
||||
total_norm = total_norm_tensor.item().toDouble();
|
||||
TORCH_CHECK(
|
||||
@ -79,7 +79,7 @@ inline double clip_grad_norm_(
|
||||
|
||||
auto clip_coef = max_norm / (total_norm_tensor + 1e-6);
|
||||
auto clip_coef_clamped =
|
||||
torch::clamp(clip_coef, c10::nullopt /* min */, 1.0 /* max */);
|
||||
torch::clamp(clip_coef, std::nullopt /* min */, 1.0 /* max */);
|
||||
for (auto& param : params_with_grad) {
|
||||
param.grad().data().mul_(clip_coef_clamped);
|
||||
}
|
||||
|
@ -15,7 +15,7 @@ inline std::optional<int64_t> _check_param_device(
|
||||
const torch::Tensor& param,
|
||||
std::optional<int64_t> old_param_device) {
|
||||
// Meet the first parameter
|
||||
if (old_param_device == c10::nullopt) {
|
||||
if (old_param_device == std::nullopt) {
|
||||
old_param_device = param.is_cuda() ? param.get_device() : -1;
|
||||
} else {
|
||||
bool warn = false;
|
||||
|
@ -17,11 +17,11 @@ struct TORCH_API LBFGSOptions : public OptimizerCloneableOptions<LBFGSOptions> {
|
||||
LBFGSOptions(double lr = 1);
|
||||
TORCH_ARG(double, lr) = 1;
|
||||
TORCH_ARG(int64_t, max_iter) = 20;
|
||||
TORCH_ARG(std::optional<int64_t>, max_eval) = c10::nullopt;
|
||||
TORCH_ARG(std::optional<int64_t>, max_eval) = std::nullopt;
|
||||
TORCH_ARG(double, tolerance_grad) = 1e-7;
|
||||
TORCH_ARG(double, tolerance_change) = 1e-9;
|
||||
TORCH_ARG(int64_t, history_size) = 100;
|
||||
TORCH_ARG(std::optional<std::string>, line_search_fn) = c10::nullopt;
|
||||
TORCH_ARG(std::optional<std::string>, line_search_fn) = std::nullopt;
|
||||
|
||||
public:
|
||||
void serialize(torch::serialize::InputArchive& archive) override;
|
||||
@ -45,7 +45,7 @@ struct TORCH_API LBFGSParamState
|
||||
TORCH_ARG(std::deque<Tensor>, old_dirs);
|
||||
TORCH_ARG(std::deque<Tensor>, old_stps);
|
||||
TORCH_ARG(std::deque<Tensor>, ro);
|
||||
TORCH_ARG(std::optional<std::vector<Tensor>>, al) = c10::nullopt;
|
||||
TORCH_ARG(std::optional<std::vector<Tensor>>, al) = std::nullopt;
|
||||
|
||||
public:
|
||||
void serialize(torch::serialize::InputArchive& archive) override;
|
||||
@ -66,13 +66,13 @@ class TORCH_API LBFGS : public Optimizer {
|
||||
TORCH_CHECK(
|
||||
param_groups_.size() == 1,
|
||||
"LBFGS doesn't support per-parameter options (parameter groups)");
|
||||
if (defaults.max_eval() == c10::nullopt) {
|
||||
if (defaults.max_eval() == std::nullopt) {
|
||||
auto max_eval_val = (defaults.max_iter() * 5) / 4;
|
||||
static_cast<LBFGSOptions&>(param_groups_[0].options())
|
||||
.max_eval(max_eval_val);
|
||||
static_cast<LBFGSOptions&>(*defaults_.get()).max_eval(max_eval_val);
|
||||
}
|
||||
_numel_cache = c10::nullopt;
|
||||
_numel_cache = std::nullopt;
|
||||
}
|
||||
explicit LBFGS(std::vector<Tensor> params, LBFGSOptions defaults = {})
|
||||
: LBFGS({OptimizerParamGroup(std::move(params))}, defaults) {}
|
||||
|
@ -186,22 +186,22 @@ class TORCH_API Optimizer {
|
||||
};
|
||||
|
||||
/* How do we decide whether to serialize undefined tensors or
|
||||
c10::nullopt values into the output archive?
|
||||
std::nullopt values into the output archive?
|
||||
Answer: we strictly follow the behavior of Python API. To be more specific:
|
||||
|
||||
For optimizer options:
|
||||
a) For undefined tensor: currently no tensor is used as an options argument in
|
||||
Python API, so we don't need to worry about it now. b) For c10::nullopt value:
|
||||
we serialize c10::nullopt values into the output archive, to follow the exact
|
||||
Python API, so we don't need to worry about it now. b) For std::nullopt value:
|
||||
we serialize std::nullopt values into the output archive, to follow the exact
|
||||
same behavior as Python API.
|
||||
|
||||
For optimizer param state:
|
||||
a) For undefined tensor: in param state, undefined tensor in C++ impl is
|
||||
equivalent to missing key in Python impl. Since we don't serialize missing keys
|
||||
in Python API, we skip undefined tensors when serializing the param state. b)
|
||||
For c10::nullopt value: in param state, c10::nullopt value in C++ impl is
|
||||
For std::nullopt value: in param state, std::nullopt value in C++ impl is
|
||||
equivalent to missing key in Python impl. Since we don't serialize missing keys
|
||||
in Python API, we skip c10::nullopt values when serializing the param state. */
|
||||
in Python API, we skip std::nullopt values when serializing the param state. */
|
||||
|
||||
/// Serializes an `Optimizer` into an `OutputArchive`.
|
||||
TORCH_API serialize::OutputArchive& operator<<(
|
||||
|
@ -1,10 +1,10 @@
|
||||
#pragma once
|
||||
|
||||
#include <c10/core/Device.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <torch/csrc/Export.h>
|
||||
#include <torch/csrc/jit/api/module.h>
|
||||
#include <torch/types.h>
|
||||
#include <optional>
|
||||
|
||||
#include <iosfwd>
|
||||
#include <memory>
|
||||
@ -76,27 +76,27 @@ class TORCH_API InputArchive final {
|
||||
/// is not specified, the module is loaded to the original device.
|
||||
void load_from(
|
||||
const std::string& filename,
|
||||
std::optional<torch::Device> device = c10::nullopt);
|
||||
std::optional<torch::Device> device = std::nullopt);
|
||||
|
||||
/// Loads the `InputArchive` from a serialized representation stored in the
|
||||
/// given `stream`. Storage are remapped using device option. If device
|
||||
/// is not specified, the module is loaded to the original device.
|
||||
void load_from(
|
||||
std::istream& stream,
|
||||
std::optional<torch::Device> device = c10::nullopt);
|
||||
std::optional<torch::Device> device = std::nullopt);
|
||||
|
||||
// Loads given the specified flat array.
|
||||
void load_from(
|
||||
const char* data,
|
||||
size_t size,
|
||||
std::optional<torch::Device> device = c10::nullopt);
|
||||
std::optional<torch::Device> device = std::nullopt);
|
||||
|
||||
// Loads given the specified read and size functions.
|
||||
void load_from(
|
||||
const std::function<size_t(uint64_t pos, void* buf, size_t nbytes)>&
|
||||
read_func,
|
||||
const std::function<size_t(void)>& size_func,
|
||||
std::optional<torch::Device> device = c10::nullopt);
|
||||
std::optional<torch::Device> device = std::nullopt);
|
||||
|
||||
// Returns the vector of keys in the input archive.
|
||||
std::vector<std::string> keys();
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
#include <ATen/ATen.h>
|
||||
|
||||
#include <c10/util/Optional.h>
|
||||
#include <optional>
|
||||
|
||||
#include <torch/csrc/autograd/generated/variable_factories.h>
|
||||
#include <torch/csrc/autograd/variable.h>
|
||||
@ -38,7 +38,7 @@ namespace torch {
|
||||
// the `func()` function defined in `at::` namespace is always hidden.
|
||||
using namespace at; // NOLINT
|
||||
|
||||
using c10::nullopt;
|
||||
using std::nullopt;
|
||||
using std::optional;
|
||||
|
||||
using Dtype = at::ScalarType;
|
||||
|
@ -11,7 +11,7 @@ namespace jit {
|
||||
|
||||
std::shared_ptr<CompilationUnit> compile(const std::string& source) {
|
||||
auto module = std::make_shared<CompilationUnit>();
|
||||
module->define(c10::nullopt, source, nativeResolver(), nullptr);
|
||||
module->define(std::nullopt, source, nativeResolver(), nullptr);
|
||||
return module;
|
||||
}
|
||||
|
||||
|
@ -130,7 +130,7 @@ void SoftmaxImpl::pretty_print(std::ostream& stream) const {
|
||||
}
|
||||
|
||||
Tensor SoftmaxImpl::forward(const Tensor& input) {
|
||||
return F::detail::softmax(input, options.dim(), c10::nullopt);
|
||||
return F::detail::softmax(input, options.dim(), std::nullopt);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
@ -144,7 +144,7 @@ void SoftminImpl::pretty_print(std::ostream& stream) const {
|
||||
}
|
||||
|
||||
Tensor SoftminImpl::forward(const Tensor& input) {
|
||||
return F::detail::softmin(input, options.dim(), c10::nullopt);
|
||||
return F::detail::softmin(input, options.dim(), std::nullopt);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
@ -159,7 +159,7 @@ void LogSoftmaxImpl::pretty_print(std::ostream& stream) const {
|
||||
}
|
||||
|
||||
Tensor LogSoftmaxImpl::forward(const Tensor& input) {
|
||||
return F::detail::log_softmax(input, options.dim(), c10::nullopt);
|
||||
return F::detail::log_softmax(input, options.dim(), std::nullopt);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
@ -174,7 +174,7 @@ Tensor Softmax2dImpl::forward(const Tensor& input) {
|
||||
TORCH_CHECK(
|
||||
input.dim() == 4 || input.dim() == 3,
|
||||
"Softmax2d requires a 3D or 4D tensor as input");
|
||||
return F::detail::softmax(input, /*dim=*/-3, c10::nullopt);
|
||||
return F::detail::softmax(input, /*dim=*/-3, std::nullopt);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
|
@ -176,7 +176,7 @@ std::vector<int64_t> ConvTransposeNdImpl<D, Derived>::_output_padding(
|
||||
std::vector<int64_t> ret;
|
||||
std::optional<at::IntArrayRef> output_size_ = output_size;
|
||||
|
||||
if (output_size_ == c10::nullopt) {
|
||||
if (output_size_ == std::nullopt) {
|
||||
ret = at::IntArrayRef(this->options.output_padding()).vec();
|
||||
} else {
|
||||
auto k = input.dim() - 2;
|
||||
|
@ -20,7 +20,7 @@ EmbeddingImpl::EmbeddingImpl(EmbeddingOptions options_)
|
||||
}
|
||||
|
||||
void EmbeddingImpl::reset() {
|
||||
if (options.padding_idx() != c10::nullopt) {
|
||||
if (options.padding_idx() != std::nullopt) {
|
||||
if (*options.padding_idx() > 0) {
|
||||
TORCH_CHECK(
|
||||
*options.padding_idx() < options.num_embeddings(),
|
||||
@ -50,7 +50,7 @@ void EmbeddingImpl::reset() {
|
||||
|
||||
void EmbeddingImpl::reset_parameters() {
|
||||
torch::nn::init::normal_(weight);
|
||||
if (options.padding_idx() != c10::nullopt) {
|
||||
if (options.padding_idx() != std::nullopt) {
|
||||
torch::NoGradGuard no_grad;
|
||||
weight[*options.padding_idx()].fill_(0);
|
||||
}
|
||||
@ -59,10 +59,10 @@ void EmbeddingImpl::reset_parameters() {
|
||||
void EmbeddingImpl::pretty_print(std::ostream& stream) const {
|
||||
stream << "torch::nn::Embedding(num_embeddings=" << options.num_embeddings()
|
||||
<< ", embedding_dim=" << options.embedding_dim();
|
||||
if (options.padding_idx() != c10::nullopt) {
|
||||
if (options.padding_idx() != std::nullopt) {
|
||||
stream << ", padding_idx=" << *options.padding_idx();
|
||||
}
|
||||
if (options.max_norm() != c10::nullopt) {
|
||||
if (options.max_norm() != std::nullopt) {
|
||||
stream << ", max_norm=" << *options.max_norm();
|
||||
}
|
||||
if (options.norm_type() != 2) {
|
||||
@ -154,7 +154,7 @@ void EmbeddingBagImpl::pretty_print(std::ostream& stream) const {
|
||||
stream << "torch::nn::EmbeddingBag(num_embeddings="
|
||||
<< options.num_embeddings()
|
||||
<< ", embedding_dim=" << options.embedding_dim();
|
||||
if (options.max_norm() != c10::nullopt) {
|
||||
if (options.max_norm() != std::nullopt) {
|
||||
stream << ", max_norm=" << *options.max_norm();
|
||||
}
|
||||
if (options.norm_type() != 2) {
|
||||
|
@ -281,19 +281,19 @@ FractionalMaxPool2dImpl::FractionalMaxPool2dImpl(
|
||||
void FractionalMaxPool2dImpl::reset() {
|
||||
_random_samples =
|
||||
register_buffer("_random_samples", options._random_samples());
|
||||
if (options.output_size() == c10::nullopt &&
|
||||
options.output_ratio() == c10::nullopt) {
|
||||
if (options.output_size() == std::nullopt &&
|
||||
options.output_ratio() == std::nullopt) {
|
||||
TORCH_CHECK(
|
||||
false,
|
||||
"FractionalMaxPool2d requires specifying either ",
|
||||
"an output size, or a pooling ratio");
|
||||
}
|
||||
if (options.output_size() != c10::nullopt &&
|
||||
options.output_ratio() != c10::nullopt) {
|
||||
if (options.output_size() != std::nullopt &&
|
||||
options.output_ratio() != std::nullopt) {
|
||||
TORCH_CHECK(
|
||||
false, "only one of output_size and output_ratio may be specified");
|
||||
}
|
||||
if (options.output_ratio() != c10::nullopt) {
|
||||
if (options.output_ratio() != std::nullopt) {
|
||||
at::ArrayRef<double> output_ratio =
|
||||
at::ArrayRef<double>(options.output_ratio().value());
|
||||
if (!(0 < output_ratio[0] && output_ratio[0] < 1 && 0 < output_ratio[1] &&
|
||||
@ -340,19 +340,19 @@ FractionalMaxPool3dImpl::FractionalMaxPool3dImpl(
|
||||
void FractionalMaxPool3dImpl::reset() {
|
||||
_random_samples =
|
||||
register_buffer("_random_samples", options._random_samples());
|
||||
if (options.output_size() == c10::nullopt &&
|
||||
options.output_ratio() == c10::nullopt) {
|
||||
if (options.output_size() == std::nullopt &&
|
||||
options.output_ratio() == std::nullopt) {
|
||||
TORCH_CHECK(
|
||||
false,
|
||||
"FractionalMaxPool3d requires specifying either ",
|
||||
"an output size, or a pooling ratio");
|
||||
}
|
||||
if (options.output_size() != c10::nullopt &&
|
||||
options.output_ratio() != c10::nullopt) {
|
||||
if (options.output_size() != std::nullopt &&
|
||||
options.output_ratio() != std::nullopt) {
|
||||
TORCH_CHECK(
|
||||
false, "only one of output_size and output_ratio may be specified");
|
||||
}
|
||||
if (options.output_ratio() != c10::nullopt) {
|
||||
if (options.output_ratio() != std::nullopt) {
|
||||
at::ArrayRef<double> output_ratio =
|
||||
at::ArrayRef<double>(options.output_ratio().value());
|
||||
if (!(0 < output_ratio[0] && output_ratio[0] < 1 && 0 < output_ratio[1] &&
|
||||
|
@ -15,7 +15,7 @@ void UpsampleImpl::reset() {}
|
||||
|
||||
void UpsampleImpl::pretty_print(std::ostream& stream) const {
|
||||
stream << "torch::nn::Upsample(";
|
||||
if (options.scale_factor() != c10::nullopt) {
|
||||
if (options.scale_factor() != std::nullopt) {
|
||||
stream << "scale_factor=" << at::ArrayRef<double>(*options.scale_factor());
|
||||
} else {
|
||||
stream << "size=" << at::ArrayRef<int64_t>(*options.size());
|
||||
@ -43,7 +43,7 @@ Tensor UpsampleImpl::forward(const Tensor& input) {
|
||||
options.scale_factor(),
|
||||
mode,
|
||||
options.align_corners(),
|
||||
c10::nullopt,
|
||||
std::nullopt,
|
||||
false);
|
||||
}
|
||||
|
||||
|
@ -68,7 +68,7 @@ bool if_container_equal(T lhs, T rhs) {
|
||||
|
||||
bool operator==(const LBFGSParamState& lhs, const LBFGSParamState& rhs) {
|
||||
auto isNull = [](const std::optional<std::vector<Tensor>>& val) {
|
||||
return val == c10::nullopt;
|
||||
return val == std::nullopt;
|
||||
};
|
||||
return (lhs.func_evals() == rhs.func_evals()) &&
|
||||
(lhs.n_iter() == rhs.n_iter()) && (lhs.t() == rhs.t()) &&
|
||||
@ -97,7 +97,7 @@ void LBFGSParamState::serialize(
|
||||
_TORCH_OPTIM_SERIALIZE_TORCH_ARG_DEQUE(old_stps);
|
||||
_TORCH_OPTIM_SERIALIZE_TORCH_ARG_DEQUE(ro);
|
||||
// Python version only serializes state vars if explicitly defined
|
||||
if (al() != c10::nullopt) {
|
||||
if (al() != std::nullopt) {
|
||||
_TORCH_OPTIM_SERIALIZE_TORCH_ARG(al);
|
||||
}
|
||||
}
|
||||
@ -131,7 +131,7 @@ Tensor LBFGS::_gather_flat_grad() {
|
||||
}
|
||||
|
||||
int64_t LBFGS::_numel() {
|
||||
if (_numel_cache == c10::nullopt) {
|
||||
if (_numel_cache == std::nullopt) {
|
||||
auto res = 0;
|
||||
for (const auto& p : param_groups_.at(0).params()) {
|
||||
res += p.numel();
|
||||
@ -194,12 +194,12 @@ static double _cubic_interpolate(
|
||||
double x2,
|
||||
double f2,
|
||||
double g2,
|
||||
std::optional<std::tuple<double, double>> bounds = c10::nullopt) {
|
||||
std::optional<std::tuple<double, double>> bounds = std::nullopt) {
|
||||
// ported from https://github.com/torch/optim/blob/master/polyinterp.lua
|
||||
// Compute bounds of interpolation area
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
double xmin_bound, xmax_bound;
|
||||
if (bounds != c10::nullopt) {
|
||||
if (bounds != std::nullopt) {
|
||||
std::tie(xmin_bound, xmax_bound) = *bounds;
|
||||
} else {
|
||||
std::tie(xmin_bound, xmax_bound) =
|
||||
@ -509,7 +509,7 @@ Tensor LBFGS::step(LossClosure closure) {
|
||||
// multiplied by the gradient
|
||||
int64_t num_old = static_cast<int64_t>(old_dirs.size());
|
||||
|
||||
if (state.al() == c10::nullopt) {
|
||||
if (state.al() == std::nullopt) {
|
||||
state.al(std::vector<Tensor>(history_size));
|
||||
}
|
||||
auto& al = state.al();
|
||||
@ -557,7 +557,7 @@ Tensor LBFGS::step(LossClosure closure) {
|
||||
|
||||
// optional line search: user function
|
||||
auto ls_func_evals = 0;
|
||||
if (line_search_fn != c10::nullopt) {
|
||||
if (line_search_fn != std::nullopt) {
|
||||
TORCH_CHECK(
|
||||
*line_search_fn == "strong_wolfe",
|
||||
"only 'strong_wolfe' is supported");
|
||||
@ -627,7 +627,7 @@ void LBFGS::load(serialize::InputArchive& archive) {
|
||||
TORCH_WARN(
|
||||
"Your serialized LBFGS optimizer is still using the old serialization format. "
|
||||
"The func_evals and n_iter value in state will be set to 0, ro will be set to an empty deque "
|
||||
"and al will be set to c10::nullopt because the old LBFGS optimizer didn't save these values."
|
||||
"and al will be set to std::nullopt because the old LBFGS optimizer didn't save these values."
|
||||
"You should re-save your LBFGS optimizer to use the new serialization format.");
|
||||
Tensor d, t, H_diag, prev_flat_grad, prev_loss;
|
||||
std::deque<Tensor> old_dirs, old_stps;
|
||||
|
@ -93,20 +93,20 @@ void InputArchive::read(const std::string& key, InputArchive& archive) {
|
||||
|
||||
void InputArchive::load_from(
|
||||
const std::string& filename,
|
||||
std::optional<torch::Device> device /*= c10::nullopt*/) {
|
||||
std::optional<torch::Device> device /*= std::nullopt*/) {
|
||||
module_ = torch::jit::load(filename, std::move(device));
|
||||
}
|
||||
|
||||
void InputArchive::load_from(
|
||||
std::istream& stream,
|
||||
std::optional<torch::Device> device /*= c10::nullopt*/) {
|
||||
std::optional<torch::Device> device /*= std::nullopt*/) {
|
||||
module_ = torch::jit::load(stream, std::move(device));
|
||||
}
|
||||
|
||||
void InputArchive::load_from(
|
||||
const char* data,
|
||||
size_t size,
|
||||
std::optional<torch::Device> device /*= c10::nullopt*/) {
|
||||
std::optional<torch::Device> device /*= std::nullopt*/) {
|
||||
using caffe2::serialize::ReadAdapterInterface;
|
||||
class OurAdapter : public ReadAdapterInterface {
|
||||
public:
|
||||
@ -136,7 +136,7 @@ void InputArchive::load_from(
|
||||
void InputArchive::load_from(
|
||||
const std::function<size_t(uint64_t, void*, size_t)>& read_func,
|
||||
const std::function<size_t(void)>& size_func,
|
||||
std::optional<torch::Device> device /*= c10::nullopt*/) {
|
||||
std::optional<torch::Device> device /*= std::nullopt*/) {
|
||||
using caffe2::serialize::ReadAdapterInterface;
|
||||
class OurAdapter : public ReadAdapterInterface {
|
||||
public:
|
||||
|
@ -630,7 +630,7 @@ Tensor div_tensor_self_backward(
|
||||
T other,
|
||||
ScalarType self_st) {
|
||||
return div_tensor_self_backward(
|
||||
grad, std::move(other), self_st, c10::nullopt);
|
||||
grad, std::move(other), self_st, std::nullopt);
|
||||
}
|
||||
template Tensor div_tensor_self_backward(const Tensor&, Tensor, ScalarType);
|
||||
template Tensor div_tensor_self_backward(const Tensor&, Scalar, ScalarType);
|
||||
@ -652,7 +652,7 @@ Tensor div_tensor_other_backward(
|
||||
const Tensor& grad,
|
||||
const Tensor& self,
|
||||
const Tensor& other) {
|
||||
return div_tensor_other_backward(grad, self, other, c10::nullopt);
|
||||
return div_tensor_other_backward(grad, self, other, std::nullopt);
|
||||
}
|
||||
|
||||
Tensor permute_backwards(const Tensor& grad, IntArrayRef fwd_dims) {
|
||||
@ -1282,12 +1282,12 @@ Tensor convolution_jvp(
|
||||
at::SymIntArrayRef output_padding,
|
||||
const c10::SymInt& groups) {
|
||||
auto bias_t_opt =
|
||||
bias_t.defined() ? std::optional<at::Tensor>(bias_t) : c10::nullopt;
|
||||
bias_t.defined() ? std::optional<at::Tensor>(bias_t) : std::nullopt;
|
||||
return (
|
||||
at::convolution_symint(
|
||||
input_t,
|
||||
weight_p,
|
||||
c10::nullopt,
|
||||
std::nullopt,
|
||||
stride,
|
||||
padding,
|
||||
dilation,
|
||||
@ -1324,12 +1324,12 @@ Tensor _convolution_jvp(
|
||||
bool cudnn_enabled,
|
||||
bool allow_tf32) {
|
||||
auto bias_t_opt =
|
||||
bias_t.defined() ? std::optional<at::Tensor>(bias_t) : c10::nullopt;
|
||||
bias_t.defined() ? std::optional<at::Tensor>(bias_t) : std::nullopt;
|
||||
return (
|
||||
at::_convolution_symint(
|
||||
input_t,
|
||||
weight_p,
|
||||
c10::nullopt,
|
||||
std::nullopt,
|
||||
stride,
|
||||
padding,
|
||||
dilation,
|
||||
@ -6193,7 +6193,7 @@ Tensor batch_norm_jvp(
|
||||
|
||||
std::optional<Tensor> result_p = weight_p.defined()
|
||||
? std::optional<Tensor>((input_p - mean_p) * invstd_p)
|
||||
: c10::nullopt;
|
||||
: std::nullopt;
|
||||
return _affine_jvp(
|
||||
result_p,
|
||||
result_t,
|
||||
@ -6232,7 +6232,7 @@ Tensor layer_norm_jvp(
|
||||
|
||||
std::optional<Tensor> result_p = weight_p.defined()
|
||||
? std::optional<Tensor>((input_p - mean_p) * invstd_p)
|
||||
: c10::nullopt;
|
||||
: std::nullopt;
|
||||
return _affine_jvp(
|
||||
result_p,
|
||||
result_t,
|
||||
@ -6273,7 +6273,7 @@ Tensor group_norm_jvp(
|
||||
/*eps=*/0)
|
||||
.view(input_shape);
|
||||
|
||||
std::optional<Tensor> result_p = c10::nullopt;
|
||||
std::optional<Tensor> result_p = std::nullopt;
|
||||
if (weight_p.defined()) {
|
||||
std::vector<int64_t> view_size(input_t_reshaped.dim(), 1);
|
||||
view_size[1] = input_t_reshaped.size(1);
|
||||
@ -6706,7 +6706,7 @@ std::tuple<Tensor, Tensor> _cudnn_convolution_backward(
|
||||
grad_output,
|
||||
self,
|
||||
weight,
|
||||
c10::nullopt,
|
||||
std::nullopt,
|
||||
stride,
|
||||
padding,
|
||||
dilation,
|
||||
@ -6956,7 +6956,7 @@ Tensor to_sparse_backward(
|
||||
if (self_layout == c10::kStrided) {
|
||||
return grad.to_dense();
|
||||
} else {
|
||||
OptionalIntArrayRef blocksize = c10::nullopt;
|
||||
OptionalIntArrayRef blocksize = std::nullopt;
|
||||
if (self_blocksize.has_value()) {
|
||||
blocksize = c10::asIntArrayRefSlowOpt(*self_blocksize);
|
||||
}
|
||||
|
@ -39,7 +39,7 @@ TORCH_API inline std::optional<Tensor> wrap_opt_if(
|
||||
const Tensor& t,
|
||||
const bool cond) {
|
||||
using OptTensor = std::optional<Tensor>;
|
||||
return cond ? OptTensor(t) : static_cast<OptTensor>(c10::nullopt);
|
||||
return cond ? OptTensor(t) : static_cast<OptTensor>(std::nullopt);
|
||||
}
|
||||
|
||||
TORCH_API Tensor
|
||||
|
@ -1,11 +1,11 @@
|
||||
#include <ATen/TracerMode.h>
|
||||
#include <ATen/core/op_registration/op_registration.h>
|
||||
#include <c10/core/ScalarType.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <c10/util/irange.h>
|
||||
#include <torch/csrc/jit/frontend/tracer.h>
|
||||
#include <torch/csrc/jit/ir/ir.h>
|
||||
#include <torch/library.h>
|
||||
#include <optional>
|
||||
|
||||
using namespace at;
|
||||
|
||||
|
@ -2,7 +2,6 @@
|
||||
#include <ATen/TracerMode.h>
|
||||
#include <ATen/core/op_registration/op_registration.h>
|
||||
#include <c10/core/ScalarType.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <c10/util/irange.h>
|
||||
#include <torch/csrc/autograd/FunctionsManual.h>
|
||||
#include <torch/csrc/autograd/VariableTypeUtils.h>
|
||||
@ -11,6 +10,7 @@
|
||||
#include <torch/csrc/autograd/generated/VariableType.h>
|
||||
#include <torch/csrc/autograd/generated/ViewFuncs.h>
|
||||
#include <torch/library.h>
|
||||
#include <optional>
|
||||
|
||||
#include <utility>
|
||||
|
||||
|
@ -217,7 +217,7 @@ inline at::Tensor as_view(
|
||||
tensor,
|
||||
diff_view_meta->get_backward_view().chain(
|
||||
base, tensor, std::move(view_func), std::move(rev_view_func)),
|
||||
c10::nullopt,
|
||||
std::nullopt,
|
||||
/*shared_view_info*/ true,
|
||||
creation_meta,
|
||||
allow_tensor_metadata_change);
|
||||
@ -225,7 +225,7 @@ inline at::Tensor as_view(
|
||||
return make_variable_differentiable_view(
|
||||
tensor,
|
||||
ViewInfo(base, std::move(view_func), std::move(rev_view_func)),
|
||||
c10::nullopt,
|
||||
std::nullopt,
|
||||
/*shared_view_info*/ true,
|
||||
creation_meta,
|
||||
allow_tensor_metadata_change);
|
||||
|
@ -47,7 +47,7 @@ namespace torch::autograd {
|
||||
TORCH_API void backward(
|
||||
const variable_list& tensors,
|
||||
const variable_list& grad_tensors = {},
|
||||
std::optional<bool> retain_graph = c10::nullopt,
|
||||
std::optional<bool> retain_graph = std::nullopt,
|
||||
bool create_graph = false,
|
||||
const variable_list& inputs = {});
|
||||
|
||||
@ -81,7 +81,7 @@ TORCH_API variable_list grad(
|
||||
const variable_list& outputs,
|
||||
const variable_list& inputs,
|
||||
const variable_list& grad_outputs = {},
|
||||
std::optional<bool> retain_graph = c10::nullopt,
|
||||
std::optional<bool> retain_graph = std::nullopt,
|
||||
bool create_graph = false,
|
||||
bool allow_unused = false);
|
||||
|
||||
|
@ -345,7 +345,7 @@ static void autogradNotImplementedFallbackImpl(
|
||||
[&](size_t idx, size_t _, const at::Tensor& t) {
|
||||
storage_saved.push_back(
|
||||
t.has_storage() ? std::optional<c10::Storage>(t.storage())
|
||||
: c10::nullopt);
|
||||
: std::nullopt);
|
||||
impl_saved.push_back(t.getIntrusivePtr());
|
||||
},
|
||||
&stack_args_copy,
|
||||
|
@ -735,10 +735,10 @@ void GraphTask::exec_post_processing() {
|
||||
for (const auto& leaf_stream : leaf_streams) {
|
||||
// stash_current_cuda/privateuse1_streams() stashed streams for all device
|
||||
// IDs that already had a CUDA/privateuse1 context before the GraphTask
|
||||
// executed. For inactive devices, it stashed a c10::nullopt. I don't
|
||||
// executed. For inactive devices, it stashed a std::nullopt. I don't
|
||||
// expect GraphTask's backward pass ran leaf nodes on any new devices, so
|
||||
// the stashed streams should be enough. If leaf_stream.device_index()
|
||||
// happens to be for a new device, operator* on the c10::nullopt should
|
||||
// happens to be for a new device, operator* on the std::nullopt should
|
||||
// throw an error.
|
||||
const auto caller_current_stream =
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
@ -1554,7 +1554,7 @@ void GraphTask::stash_current_streams() {
|
||||
idx)) {
|
||||
caller_current_streams_[idx] = guard.getStream({accelerator, idx});
|
||||
} else {
|
||||
caller_current_streams_[idx] = c10::nullopt;
|
||||
caller_current_streams_[idx] = std::nullopt;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -242,14 +242,14 @@ struct TORCH_API Node : std::enable_shared_from_this<Node> {
|
||||
std::optional<c10::Stream> stream() {
|
||||
auto opt_device_type = at::getAccelerator();
|
||||
if (!opt_device_type.has_value()) {
|
||||
return c10::nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
for (const auto& metadata : input_metadata_) {
|
||||
if (metadata.device().type() == opt_device_type.value())
|
||||
return metadata.stream();
|
||||
}
|
||||
|
||||
return c10::nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
void clear_input_metadata() {
|
||||
|
@ -224,7 +224,7 @@ struct TORCH_API AccumulateGrad : public Node {
|
||||
// variable_grad += new_grad;
|
||||
// } else {
|
||||
// result = at::empty_strided(variable.sizes(), variable.strides(),
|
||||
// variable.options().memory_format(c10::nullopt));
|
||||
// variable.options().memory_format(std::nullopt));
|
||||
// update_grad(at::native::add_out(result, variable_grad,
|
||||
// new_grad, 1.0);
|
||||
// }
|
||||
|
@ -105,7 +105,7 @@ variable_list Gather::apply(variable_list&& inputs) {
|
||||
std::move(source_devices),
|
||||
std::move(input_sizes),
|
||||
dim_,
|
||||
/*streams=*/c10::nullopt,
|
||||
/*streams=*/std::nullopt,
|
||||
/*unsqueeze_scalars=*/unsqueeze_scalars);
|
||||
grad_fn->set_next_edges(collect_next_edges(inputs));
|
||||
}
|
||||
|
@ -6,7 +6,7 @@
|
||||
|
||||
#include <ATen/ATen.h>
|
||||
#include <c10/cuda/CUDAStream.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <optional>
|
||||
|
||||
#include <cstddef>
|
||||
#include <vector>
|
||||
@ -17,10 +17,10 @@ namespace autograd {
|
||||
struct TORCH_CUDA_CU_API Scatter : public Node {
|
||||
explicit Scatter(
|
||||
std::vector<at::Device> devices,
|
||||
std::optional<std::vector<int64_t>> chunk_sizes = c10::nullopt,
|
||||
std::optional<std::vector<int64_t>> chunk_sizes = std::nullopt,
|
||||
int64_t dim = 0,
|
||||
std::optional<std::vector<std::optional<at::cuda::CUDAStream>>> streams =
|
||||
c10::nullopt,
|
||||
std::nullopt,
|
||||
bool unsqueeze_scalars = false);
|
||||
~Scatter() override;
|
||||
|
||||
|
@ -1084,7 +1084,7 @@ static PyObject* push_on_torch_dispatch_stack(
|
||||
using c10::impl::TorchDispatchModeKey;
|
||||
// When we push a mode onto the mode stack, we need to
|
||||
// check if it's an "infra" mode, by checking its _mode_key attribute.
|
||||
std::optional<c10::impl::TorchDispatchModeKey> mode_key = c10::nullopt;
|
||||
std::optional<c10::impl::TorchDispatchModeKey> mode_key = std::nullopt;
|
||||
py::object maybe_mode_key_obj =
|
||||
PyObject_FastGetAttrString(arg, "_mode_key");
|
||||
if (maybe_mode_key_obj) {
|
||||
@ -1108,7 +1108,7 @@ static PyObject* pop_torch_dispatch_stack(
|
||||
PyObject* _unused,
|
||||
PyObject* maybe_mode_key) {
|
||||
HANDLE_TH_ERRORS
|
||||
std::optional<c10::impl::TorchDispatchModeKey> mode_key = c10::nullopt;
|
||||
std::optional<c10::impl::TorchDispatchModeKey> mode_key = std::nullopt;
|
||||
PyObject* r = nullptr;
|
||||
if (maybe_mode_key != Py_None) {
|
||||
mode_key = py::cast<c10::impl::TorchDispatchModeKey>(maybe_mode_key);
|
||||
@ -1174,7 +1174,7 @@ static PyObject* get_dispatch_mode(PyObject* _unused, PyObject* arg) {
|
||||
auto mode_key = py::cast<c10::impl::TorchDispatchModeKey>(arg);
|
||||
|
||||
auto maybe_mode = c10::impl::TorchDispatchModeTLS::get_mode(mode_key);
|
||||
if (maybe_mode == c10::nullopt) {
|
||||
if (maybe_mode == std::nullopt) {
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
@ -1190,7 +1190,7 @@ static PyObject* unset_dispatch_mode(PyObject* _unused, PyObject* arg) {
|
||||
auto mode_key = py::cast<c10::impl::TorchDispatchModeKey>(arg);
|
||||
|
||||
const auto maybe_mode = c10::impl::TorchDispatchModeTLS::unset_mode(mode_key);
|
||||
if (maybe_mode == c10::nullopt) {
|
||||
if (maybe_mode == std::nullopt) {
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
|
@ -11,7 +11,7 @@
|
||||
#include <c10/core/DeviceGuard.h>
|
||||
#include <c10/core/Event.h>
|
||||
#include <c10/core/StreamGuard.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <optional>
|
||||
|
||||
#include <cstddef>
|
||||
#include <utility>
|
||||
@ -159,7 +159,7 @@ void InputBuffer::add(
|
||||
// Accumulation happens on the var device's default stream.
|
||||
|
||||
TORCH_INTERNAL_ASSERT(device_of(var));
|
||||
std::optional<c10::Stream> opt_accumulate_stream = c10::nullopt;
|
||||
std::optional<c10::Stream> opt_accumulate_stream = std::nullopt;
|
||||
const auto device_type = device_of(var).value().type();
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
if (device_of(var)->is_cuda() || device_of(var)->is_privateuseone()) {
|
||||
@ -179,7 +179,7 @@ void InputBuffer::add(
|
||||
record_stream_any_impl(var, *opt_accumulate_stream);
|
||||
}
|
||||
} else {
|
||||
std::optional<c10::Stream> opt_sync_stream = c10::nullopt;
|
||||
std::optional<c10::Stream> opt_sync_stream = std::nullopt;
|
||||
const auto guard = c10::impl::VirtualGuardImpl{device_type};
|
||||
if (on_consumer && !on_producer) {
|
||||
// (3a)
|
||||
|
@ -9,8 +9,8 @@
|
||||
#include <vector>
|
||||
|
||||
#include <c10/core/Stream.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <torch/csrc/autograd/variable.h>
|
||||
#include <optional>
|
||||
|
||||
namespace torch::autograd {
|
||||
|
||||
|
@ -122,7 +122,7 @@ using torch::profiler::impl::ProfilerStateBase;
|
||||
struct ProfilerLegacyThreadLocalState : public ProfilerStateBase {
|
||||
explicit ProfilerLegacyThreadLocalState(
|
||||
const torch::profiler::impl::ProfilerConfig& config)
|
||||
: ProfilerStateBase(config), remoteProfiledEvents_{c10::nullopt} {}
|
||||
: ProfilerStateBase(config), remoteProfiledEvents_{std::nullopt} {}
|
||||
~ProfilerLegacyThreadLocalState() override = default;
|
||||
|
||||
static ProfilerLegacyThreadLocalState* getTLS() {
|
||||
|
@ -336,7 +336,7 @@ TORCH_API void enableProfilerLegacy(
|
||||
using thread_event_lists = std::vector<std::vector<LegacyEvent>>;
|
||||
TORCH_API thread_event_lists disableProfilerLegacy(
|
||||
std::optional<ProfilerDisableOptions> profilerDisableOptions =
|
||||
c10::nullopt);
|
||||
std::nullopt);
|
||||
|
||||
// adds profiledEvents to the current thread local recorded events. Each event
|
||||
// will be marked with node ID given by fromNodeId.
|
||||
@ -377,9 +377,9 @@ struct TORCH_API TLSLegacyProfilerGuard {
|
||||
explicit TLSLegacyProfilerGuard(
|
||||
const torch::profiler::impl::ProfilerConfig& cfg,
|
||||
std::optional<std::function<void(const thread_event_lists&)>>
|
||||
resultCallback = c10::nullopt,
|
||||
resultCallback = std::nullopt,
|
||||
std::optional<ProfilerDisableOptions> profilerDisableOptions =
|
||||
c10::nullopt)
|
||||
std::nullopt)
|
||||
: cb_(std::move(resultCallback)),
|
||||
profilerDisableOptions_(profilerDisableOptions) {
|
||||
enableProfilerLegacy(cfg);
|
||||
|
@ -18,7 +18,6 @@
|
||||
#include <c10/util/ApproximateClock.h>
|
||||
#include <c10/util/Exception.h>
|
||||
#include <c10/util/Logging.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <c10/util/flat_hash_map.h>
|
||||
#include <c10/util/irange.h>
|
||||
#include <torch/csrc/autograd/python_variable.h>
|
||||
@ -29,6 +28,7 @@
|
||||
#include <torch/csrc/utils/pybind.h>
|
||||
#include <torch/csrc/utils/python_compat.h>
|
||||
#include <torch/csrc/utils/python_strings.h>
|
||||
#include <optional>
|
||||
|
||||
namespace py = pybind11;
|
||||
|
||||
@ -349,7 +349,7 @@ TensorMetadata toTensorMetadata(PyObject* self) {
|
||||
std::optional<TensorMetadata> ValueCache::recordIfTensor(py::handle p) {
|
||||
return THPVariable_CheckExact(p.ptr())
|
||||
? std::optional<TensorMetadata>{toTensorMetadata(p.ptr())}
|
||||
: c10::nullopt;
|
||||
: std::nullopt;
|
||||
}
|
||||
|
||||
std::vector<std::pair<std::string, TensorMetadata>> ValueCache::unpackTensorMap(
|
||||
@ -379,7 +379,7 @@ void ValueCache::store<CallType::PyCall>(const PyCallKey& key, no_ephemeral_t) {
|
||||
template <>
|
||||
ExtraFields<EventType::PyCall>::args_t ValueCache::load<CallType::PyCall>(
|
||||
const PyCallKey& key) const {
|
||||
return {std::get<CallType::PyCall>(state_).at(key), c10::nullopt};
|
||||
return {std::get<CallType::PyCall>(state_).at(key), std::nullopt};
|
||||
}
|
||||
|
||||
template <>
|
||||
@ -419,7 +419,7 @@ ExtraFields<EventType::PyCall>::args_t ValueCache::load<CallType::PyModuleCall>(
|
||||
return {
|
||||
/*frame_state_=*/std::get<CallType::PyCall>(state_).at(*cache.location_),
|
||||
/*module_info_=*/std::move(info),
|
||||
/*optimizer_info_=*/c10::nullopt};
|
||||
/*optimizer_info_=*/std::nullopt};
|
||||
}
|
||||
|
||||
template <>
|
||||
@ -465,7 +465,7 @@ ExtraFields<EventType::PyCall>::args_t ValueCache::load<
|
||||
return {
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
/*frame_state_=*/std::get<CallType::PyCall>(state_).at(*cache.location_),
|
||||
/*module_info_=*/c10::nullopt,
|
||||
/*module_info_=*/std::nullopt,
|
||||
/*optimizer_info_=*/std::move(info)};
|
||||
}
|
||||
|
||||
|
@ -778,7 +778,7 @@ static void _get_tensors_to_save(
|
||||
for (const auto i : c10::irange(num_saved)) {
|
||||
PyObject* obj = PyTuple_GET_ITEM(self->to_save, i);
|
||||
if (obj == Py_None) {
|
||||
tensors_to_save.emplace_back(c10::nullopt);
|
||||
tensors_to_save.emplace_back(std::nullopt);
|
||||
continue;
|
||||
} else if (THPVariable_Check(obj)) {
|
||||
const auto& tensor = THPVariable_Unpack(obj);
|
||||
|
@ -10,7 +10,7 @@
|
||||
#include <torch/csrc/utils/object_ptr.h>
|
||||
|
||||
#include <c10/core/DeviceGuard.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <optional>
|
||||
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
|
@ -347,7 +347,7 @@ bool isResurrectable(THPVariable* self) {
|
||||
// Check if this is hermetic. If it is, no resurrection.
|
||||
if (tensor.unsafeGetTensorImpl()->pyobj_slot()->check_pyobj(
|
||||
getPyInterpreter(), /*ignore_hermetic_tls=*/false) !=
|
||||
c10::make_optional((PyObject*)self)) {
|
||||
std::make_optional((PyObject*)self)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@ -455,7 +455,7 @@ static int THPVariable_clear(THPVariable* self) {
|
||||
if (!self->cdata.unsafeIsBorrowed() &&
|
||||
tensor.unsafeGetTensorImpl()->pyobj_slot()->check_pyobj(
|
||||
getPyInterpreter(), /*ignore_hermetic_tls=*/false) ==
|
||||
c10::make_optional((PyObject*)self)) {
|
||||
std::make_optional((PyObject*)self)) {
|
||||
// TODO: empirically, on OS X this assert appears to be untrue
|
||||
// In test_py_tensors_multi_async_call - ProcessGroupRpcTestWithSpawn
|
||||
// distributed/rpc/test_process_group_agent.py
|
||||
@ -587,14 +587,14 @@ static PyObject* view_func_impl(
|
||||
auto& view_func = view_info.view_fn();
|
||||
|
||||
// Determine new SymInt / tensor state as needed.
|
||||
std::optional<std::vector<c10::SymInt>> new_symints = c10::nullopt;
|
||||
std::optional<std::vector<c10::SymInt>> new_symints = std::nullopt;
|
||||
if (symint_visitor_fn != Py_None) {
|
||||
new_symints = map_py_func(
|
||||
py::cast<py::function>(symint_visitor_fn),
|
||||
view_func.get_symints());
|
||||
}
|
||||
|
||||
std::optional<std::vector<at::Tensor>> new_tensors = c10::nullopt;
|
||||
std::optional<std::vector<at::Tensor>> new_tensors = std::nullopt;
|
||||
if (tensor_visitor_fn != Py_None) {
|
||||
new_tensors = map_py_func(
|
||||
py::cast<py::function>(tensor_visitor_fn),
|
||||
|
@ -100,7 +100,7 @@ static inline Variable sequenceToVariable(
|
||||
c10::TensorOptions options,
|
||||
PyObject* seq) {
|
||||
return torch::utils::indexing_tensor_from_data(
|
||||
options, kLong, c10::nullopt, seq);
|
||||
options, kLong, std::nullopt, seq);
|
||||
}
|
||||
|
||||
inline Variable valueToTensor(
|
||||
@ -201,7 +201,7 @@ static inline Variable applySlicing(
|
||||
// as null may need to be changed after we reach a better solution for
|
||||
// nested tensor size
|
||||
std::optional<SymIntArrayRef> result_sizes = result.is_nested()
|
||||
? std::optional<SymIntArrayRef>(c10::nullopt)
|
||||
? std::optional<SymIntArrayRef>(std::nullopt)
|
||||
: std::optional<SymIntArrayRef>(result.sym_sizes());
|
||||
result = at::indexing::handleDimInMultiDimIndexing(
|
||||
/*prev_dim_result=*/result,
|
||||
|
@ -1,7 +1,7 @@
|
||||
#pragma once
|
||||
#include <ATen/record_function.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <torch/custom_class.h>
|
||||
#include <optional>
|
||||
|
||||
namespace torch::autograd::profiler {
|
||||
|
||||
@ -17,7 +17,7 @@ struct PythonRecordFunction : public torch::CustomClassHolder {
|
||||
// callbacks.
|
||||
TORCH_API c10::intrusive_ptr<PythonRecordFunction> record_function_enter_new(
|
||||
const std::string& name,
|
||||
const std::optional<std::string>& args = c10::nullopt);
|
||||
const std::optional<std::string>& args = std::nullopt);
|
||||
|
||||
// Schedules RecordFunction's end callbacks to be run on completion of a future.
|
||||
TORCH_API c10::intrusive_ptr<c10::ivalue::Future> _call_end_callbacks_on_fut_new(
|
||||
|
@ -67,7 +67,7 @@ inline at::Tensor clone_obey_contract(
|
||||
.new_empty_strided_symint(
|
||||
variable.sym_sizes(),
|
||||
variable.sym_strides(),
|
||||
variable.options().memory_format(c10::nullopt))
|
||||
variable.options().memory_format(std::nullopt))
|
||||
.copy_(new_grad));
|
||||
} else {
|
||||
// (2)
|
||||
|
@ -31,7 +31,7 @@ parse_to_conversion(PythonArgs& r, bool allow_copy) {
|
||||
if (!allow_copy && !r.isNone(2))
|
||||
throw std::runtime_error(".to() does not accept copy argument");
|
||||
return std::make_tuple(
|
||||
c10::nullopt,
|
||||
std::nullopt,
|
||||
r.scalartype(0),
|
||||
r.toBool(1),
|
||||
r.toBool(2),
|
||||
|
@ -351,8 +351,8 @@ struct TORCH_API ViewFunc {
|
||||
/// Returns a clone of this ViewFunc, optionally with the specified saved
|
||||
/// state.
|
||||
virtual std::unique_ptr<ViewFunc> clone_and_set(
|
||||
std::optional<std::vector<c10::SymInt>> = c10::nullopt,
|
||||
std::optional<std::vector<at::Tensor>> = c10::nullopt) const = 0;
|
||||
std::optional<std::vector<c10::SymInt>> = std::nullopt,
|
||||
std::optional<std::vector<at::Tensor>> = std::nullopt) const = 0;
|
||||
|
||||
protected:
|
||||
/// Sets the values of any SymInts in the saved state. The input vector size
|
||||
@ -382,8 +382,8 @@ struct ChainedViewFunc : public ViewFunc {
|
||||
}
|
||||
virtual at::Tensor operator()(const at::Tensor&) const override;
|
||||
virtual std::unique_ptr<ViewFunc> clone_and_set(
|
||||
std::optional<std::vector<c10::SymInt>> = c10::nullopt,
|
||||
std::optional<std::vector<at::Tensor>> = c10::nullopt) const override;
|
||||
std::optional<std::vector<c10::SymInt>> = std::nullopt,
|
||||
std::optional<std::vector<at::Tensor>> = std::nullopt) const override;
|
||||
|
||||
private:
|
||||
std::unique_ptr<ViewFunc> first;
|
||||
@ -398,8 +398,8 @@ struct ErroringViewFunc : public ViewFunc {
|
||||
TORCH_CHECK(false, error_msg);
|
||||
}
|
||||
virtual std::unique_ptr<ViewFunc> clone_and_set(
|
||||
std::optional<std::vector<c10::SymInt>> = c10::nullopt,
|
||||
std::optional<std::vector<at::Tensor>> = c10::nullopt) const override {
|
||||
std::optional<std::vector<c10::SymInt>> = std::nullopt,
|
||||
std::optional<std::vector<at::Tensor>> = std::nullopt) const override {
|
||||
return std::make_unique<ErroringViewFunc>(error_msg);
|
||||
}
|
||||
|
||||
|
@ -11,9 +11,9 @@
|
||||
#include <ATen/WrapDimUtils.h>
|
||||
#include <ATen/cuda/CUDAContext.h>
|
||||
#include <c10/cuda/CUDAGuard.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <c10/util/irange.h>
|
||||
#include <torch/csrc/autograd/variable.h>
|
||||
#include <optional>
|
||||
|
||||
#include <cstddef>
|
||||
#include <vector>
|
||||
|
@ -3,8 +3,8 @@
|
||||
#include <ATen/ATen.h>
|
||||
#include <ATen/cuda/ATenCUDAGeneral.h>
|
||||
#include <ATen/cuda/CUDAContext.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <torch/csrc/Export.h>
|
||||
#include <optional>
|
||||
|
||||
#include <cstddef>
|
||||
#include <vector>
|
||||
@ -29,15 +29,15 @@ TORCH_CUDA_CU_API std::vector<at::Tensor>& scatter_out(
|
||||
std::vector<at::Tensor>& out_tensors,
|
||||
int64_t dim = 0,
|
||||
const std::optional<std::vector<std::optional<at::cuda::CUDAStream>>>&
|
||||
streams = c10::nullopt);
|
||||
streams = std::nullopt);
|
||||
|
||||
TORCH_CUDA_CU_API std::vector<at::Tensor> scatter(
|
||||
const at::Tensor& tensor,
|
||||
at::IntArrayRef devices,
|
||||
const std::optional<std::vector<int64_t>>& chunk_sizes = c10::nullopt,
|
||||
const std::optional<std::vector<int64_t>>& chunk_sizes = std::nullopt,
|
||||
int64_t dim = 0,
|
||||
const std::optional<std::vector<std::optional<at::cuda::CUDAStream>>>&
|
||||
streams = c10::nullopt);
|
||||
streams = std::nullopt);
|
||||
|
||||
TORCH_CUDA_CU_API at::Tensor& gather_out(
|
||||
at::TensorList tensors,
|
||||
|
@ -1,8 +1,8 @@
|
||||
#pragma once
|
||||
|
||||
#include <c10/util/Optional.h>
|
||||
#include <torch/csrc/Export.h>
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
|
||||
namespace torch::cuda {
|
||||
|
@ -2,9 +2,9 @@
|
||||
|
||||
#include <ATen/ATen.h>
|
||||
#include <ATen/cuda/CUDAContext.h>
|
||||
#include <c10/util/Optional.h>
|
||||
|
||||
#include <cstddef>
|
||||
#include <optional>
|
||||
#include <vector>
|
||||
|
||||
// NCCL BFloat16 is enabled only for CUDA 11+ and NCCL versions 2.10+, or for
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user