mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 12:54:11 +08:00
[3/N] Change #include <c10/util/Optional.h> to #include <optional> (#130300)
Follows #130236 Pull Request resolved: https://github.com/pytorch/pytorch/pull/130300 Approved by: https://github.com/ezyang
This commit is contained in:
@ -303,7 +303,7 @@ Tensor FunctionalInverses::_nested_view_from_buffer_inverse(const Tensor& base,
|
||||
return Tensor();
|
||||
}
|
||||
|
||||
Tensor FunctionalInverses::_nested_view_from_jagged_inverse(const Tensor& base, const Tensor& mutated_view, InverseReturnMode inverse_return_mode, const Tensor& offsets, const Tensor& dummy, const std::optional<Tensor>& lengths, int64_t ragged_idx, const c10::optional<Tensor>& min_seqlen, const c10::optional<Tensor>& max_seqlen) {
|
||||
Tensor FunctionalInverses::_nested_view_from_jagged_inverse(const Tensor& base, const Tensor& mutated_view, InverseReturnMode inverse_return_mode, const Tensor& offsets, const Tensor& dummy, const std::optional<Tensor>& lengths, int64_t ragged_idx, const std::optional<Tensor>& min_seqlen, const std::optional<Tensor>& max_seqlen) {
|
||||
auto values = at::_nested_get_values(mutated_view);
|
||||
if (inverse_return_mode != InverseReturnMode::NeverView) {
|
||||
return values;
|
||||
@ -321,8 +321,8 @@ Tensor FunctionalInverses::_nested_get_values_inverse(const Tensor& base, const
|
||||
auto max_seqlen = at::_nested_get_max_seqlen(base);
|
||||
auto nt = at::_nested_view_from_jagged(
|
||||
mutated_view, offsets, dummy, lengths, ragged_idx,
|
||||
(min_seqlen.defined() ? c10::optional<Tensor>(min_seqlen) : std::nullopt),
|
||||
(max_seqlen.defined() ? c10::optional<Tensor>(max_seqlen) : std::nullopt));
|
||||
(min_seqlen.defined() ? std::optional<Tensor>(min_seqlen) : std::nullopt),
|
||||
(max_seqlen.defined() ? std::optional<Tensor>(max_seqlen) : std::nullopt));
|
||||
|
||||
if (inverse_return_mode != InverseReturnMode::NeverView) {
|
||||
return nt;
|
||||
|
@ -512,7 +512,7 @@ static optional<int64_t> maximum_indexable_location(
|
||||
IntArrayRef sizes, IntArrayRef strides, int64_t storage_offset) {
|
||||
auto result = native::storage_size_for(sizes, strides);
|
||||
if (result == 0) {
|
||||
return nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
return result + storage_offset;
|
||||
}
|
||||
|
@ -18,8 +18,8 @@ static std::vector<at::Tensor> get_tensor_vector() {
|
||||
return tensors;
|
||||
}
|
||||
|
||||
static std::vector<optional<at::Tensor>> get_boxed_opt_tensor_vector() {
|
||||
std::vector<optional<at::Tensor>> optional_tensors;
|
||||
static std::vector<std::optional<at::Tensor>> get_boxed_opt_tensor_vector() {
|
||||
std::vector<std::optional<at::Tensor>> optional_tensors;
|
||||
const size_t SIZE = 5;
|
||||
for (size_t i = 0; i < SIZE * 2; i++) {
|
||||
auto opt_tensor = (i % 2 == 0) ? optional<at::Tensor>(at::empty({0})) : nullopt;
|
||||
@ -234,7 +234,7 @@ TEST(ITensorListRefIteratorTest, Unboxed_Iterate) {
|
||||
|
||||
TEST(IOptTensorListRefTest, Boxed_Iterate) {
|
||||
auto vec = get_boxed_opt_tensor_vector();
|
||||
const List<optional<at::Tensor>> boxed(vec);
|
||||
const List<std::optional<at::Tensor>> boxed(vec);
|
||||
at::IOptTensorListRef list(boxed);
|
||||
size_t i = 0;
|
||||
for (const auto t : list) {
|
||||
|
@ -130,7 +130,7 @@ void internal_set_names_inplace(TensorImpl* impl, std::vector<Dimname>&& names,
|
||||
optional<DimnameList> get_opt_names(const TensorImpl* impl) {
|
||||
const auto* meta = get_named_tensor_meta(impl);
|
||||
if (meta == nullptr) {
|
||||
return nullopt;
|
||||
return std::nullopt;
|
||||
} else {
|
||||
return meta->names();
|
||||
}
|
||||
|
@ -120,14 +120,14 @@ struct OptionalArray {
|
||||
|
||||
operator std::optional<c10::ArrayRef<T>>() {
|
||||
if (!list) {
|
||||
return nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
return *list;
|
||||
}
|
||||
|
||||
operator c10::OptionalArrayRef<T>() {
|
||||
if (!list) {
|
||||
return nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
return *list;
|
||||
}
|
||||
|
@ -2392,7 +2392,7 @@ inline PyObject* IValue::toPyObject() const {
|
||||
template <typename T>
|
||||
inline optional<T> IValue::toOptional() {
|
||||
if (this->isNone()) {
|
||||
return nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
return this->to<T>();
|
||||
}
|
||||
@ -2400,7 +2400,7 @@ inline optional<T> IValue::toOptional() {
|
||||
template <typename T>
|
||||
inline optional<T> IValue::toOptional() const {
|
||||
if (this->isNone()) {
|
||||
return nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
return this->to<T>();
|
||||
}
|
||||
|
@ -455,7 +455,7 @@ struct TORCH_API Type {
|
||||
// this method.
|
||||
std::string annotation_str(const TypePrinter& printer) const {
|
||||
if (printer) {
|
||||
// the printer can return nullopt to fall through to the default impl
|
||||
// the printer can return std::nullopt to fall through to the default impl
|
||||
if (auto renamed = printer(*this)) {
|
||||
return *renamed;
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ optional<int64_t> valIfNonempty(optional<int64_t> maybe_empty, int64_t new_val)
|
||||
if (maybe_empty.has_value()) {
|
||||
return new_val;
|
||||
}
|
||||
return nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
int64_t getPhysicalDim(const Tensor& tensor, bool has_batch_dim, int64_t logical_dim) {
|
||||
|
@ -25,7 +25,7 @@ static optional<int64_t> compute_stat_bdim(
|
||||
if (input_bdim.has_value() && !is_empty_tensor(stat)) {
|
||||
return 0;
|
||||
}
|
||||
return nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
static Tensor padRight(const Tensor& tensor, optional<int64_t> has_bdim, int64_t logical_rank) {
|
||||
|
@ -393,7 +393,7 @@ std::optional<size_t> findAliasedOutput(const FunctionSchema& schema, const int6
|
||||
return res_idx; // for everything currently in native_functions, each input aliases at most one output (tensor list counts as one output)
|
||||
}
|
||||
}
|
||||
return nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
#ifdef HAS_TORCH_SHOW_DISPATCH_TRACE
|
||||
|
@ -289,7 +289,7 @@ static optional<c10::SymInt> maximum_indexable_location(
|
||||
c10::SymIntArrayRef sizes, c10::SymIntArrayRef strides, const c10::SymInt& storage_offset) {
|
||||
auto result = native::storage_size_for(sizes, strides);
|
||||
if (result == 0) {
|
||||
return nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
return result + storage_offset;
|
||||
}
|
||||
|
@ -330,7 +330,7 @@ struct OptionalHIPStreamGuardMasqueradingAsCUDA {
|
||||
if (r.has_value()) {
|
||||
return make_optional(HIPStreamMasqueradingAsCUDA(HIPStreamMasqueradingAsCUDA::UNCHECKED, r.value()));
|
||||
} else {
|
||||
return nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
@ -339,7 +339,7 @@ struct OptionalHIPStreamGuardMasqueradingAsCUDA {
|
||||
if (r.has_value()) {
|
||||
return make_optional(HIPStreamMasqueradingAsCUDA(HIPStreamMasqueradingAsCUDA::UNCHECKED, r.value()));
|
||||
} else {
|
||||
return nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -868,13 +868,13 @@ namespace at::native {
|
||||
// Pinned memory will be helpful on Apple Silicon Macs with Unified memory as we
|
||||
// will be able to use SharedStorageMode for MTLBuffer allocations. This will
|
||||
// avoid extra copies on DataLoading operations.
|
||||
bool is_pinned_mps(const Tensor& self, c10::optional<Device> device) {
|
||||
bool is_pinned_mps(const Tensor& self, std::optional<Device> device) {
|
||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!device.has_value() || device->is_mps());
|
||||
return at::mps::_getSharedAllocator().isSharedBuffer(self.storage().data());
|
||||
}
|
||||
|
||||
// torch.pin_memory() implementation
|
||||
Tensor _pin_memory_mps(const Tensor& self, c10::optional<Device> device) {
|
||||
Tensor _pin_memory_mps(const Tensor& self, std::optional<Device> device) {
|
||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!device.has_value() || device->is_mps());
|
||||
auto* shared_allocator = at::mps::getIMPSAllocator(true);
|
||||
TORCH_CHECK(shared_allocator, "unable to pin memory on a non-unified memory device");
|
||||
|
@ -59,7 +59,7 @@ static void mps_error_fallback(const c10::OperatorHandle& op, torch::jit::Stack*
|
||||
static Tensor slow_conv2d_forward_mps(const Tensor& self,
|
||||
const Tensor& weight,
|
||||
IntArrayRef kernel_size,
|
||||
const c10::optional<Tensor>& bias,
|
||||
const std::optional<Tensor>& bias,
|
||||
IntArrayRef stride,
|
||||
IntArrayRef padding) {
|
||||
TORCH_CHECK(self.device() == weight.device(),
|
||||
|
@ -13,7 +13,7 @@
|
||||
#include <c10/core/Storage.h>
|
||||
#include <c10/core/TensorOptions.h>
|
||||
#include <c10/util/Deprecated.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <optional>
|
||||
|
||||
${static_dispatch_ops_headers}
|
||||
|
||||
|
@ -68,7 +68,7 @@
|
||||
#include <c10/core/Storage.h>
|
||||
#include <c10/core/TensorOptions.h>
|
||||
#include <c10/util/Deprecated.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <optional>
|
||||
#include <c10/util/OptionalArrayRef.h>
|
||||
|
||||
#include <ATen/ops/from_blob.h>
|
||||
|
@ -6,7 +6,7 @@
|
||||
#include <c10/core/Storage.h>
|
||||
#include <c10/core/TensorOptions.h>
|
||||
#include <c10/util/Deprecated.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <optional>
|
||||
#include <c10/core/QScheme.h>
|
||||
#include <ATen/core/Reduction.h>
|
||||
#include <ATen/core/Tensor.h>
|
||||
|
@ -21,7 +21,7 @@
|
||||
#include <c10/core/Storage.h>
|
||||
#include <c10/core/TensorOptions.h>
|
||||
#include <c10/util/Deprecated.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <optional>
|
||||
#include <c10/core/QScheme.h>
|
||||
#include <ATen/core/Reduction.h>
|
||||
#include <ATen/core/Tensor.h>
|
||||
|
@ -6,7 +6,7 @@
|
||||
#include <c10/core/Storage.h>
|
||||
#include <c10/core/TensorOptions.h>
|
||||
#include <c10/util/Deprecated.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <optional>
|
||||
#include <c10/core/QScheme.h>
|
||||
#include <ATen/core/Reduction.h>
|
||||
#include <ATen/TensorIterator.h>
|
||||
|
@ -17,7 +17,7 @@
|
||||
#include <ATen/DeviceGuard.h>
|
||||
#include <c10/core/TensorOptions.h>
|
||||
#include <ATen/core/Reduction.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <optional>
|
||||
#include <ATen/TensorUtils.h>
|
||||
#include <ATen/Context.h>
|
||||
#include <ATen/TracerMode.h>
|
||||
|
@ -29,13 +29,13 @@ bool is_pinned(const Tensor& self, std::optional<at::Device> device) {
|
||||
return false;
|
||||
}
|
||||
// TODO: fetch scalar type from Tensor? But it doesn't really matter...
|
||||
DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(c10::nullopt, self.layout(), device.value_or(at::kCUDA)));
|
||||
DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(std::nullopt, self.layout(), device.value_or(at::kCUDA)));
|
||||
return at::_ops::is_pinned::redispatch(_dk, self, device);
|
||||
}
|
||||
|
||||
at::Tensor _pin_memory(const Tensor& self, std::optional<at::Device> device) {
|
||||
TORCH_CHECK(self.device().is_cpu(), "cannot pin '", self.toString(), "' only dense CPU tensors can be pinned");
|
||||
DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(c10::nullopt, self.layout(), device.value_or(at::kCUDA)));
|
||||
DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(std::nullopt, self.layout(), device.value_or(at::kCUDA)));
|
||||
if (self.is_nested()) {
|
||||
constexpr auto nested_key_set = c10::DispatchKeySet(
|
||||
{c10::DispatchKey::NestedTensor, c10::DispatchKey::AutogradNestedTensor});
|
||||
|
@ -33,7 +33,7 @@
|
||||
#include <c10/util/ExclusivelyOwned.h>
|
||||
#include <c10/util/Half.h>
|
||||
#include <c10/core/UndefinedTensorImpl.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <optional>
|
||||
#include <ATen/Tensor.h>
|
||||
#include <ATen/native/Resize.h>
|
||||
|
||||
|
@ -56,15 +56,15 @@ inline bool has_internal_overlap_helper(const at::Tensor t) {
|
||||
inline Tensor to_meta(const Tensor& t) {
|
||||
if (!t.defined()) return t;
|
||||
return at::native::empty_strided_meta_symint(t.sym_sizes(), t.sym_strides(),
|
||||
/*dtype=*/c10::make_optional(t.scalar_type()), /*layout=*/c10::make_optional(t.layout()),
|
||||
/*device=*/c10::make_optional(c10::Device(kMeta)), /*pin_memory=*/c10::nullopt);
|
||||
/*dtype=*/std::make_optional(t.scalar_type()), /*layout=*/std::make_optional(t.layout()),
|
||||
/*device=*/std::make_optional(c10::Device(kMeta)), /*pin_memory=*/std::nullopt);
|
||||
}
|
||||
|
||||
inline std::optional<Tensor> to_meta(const std::optional<Tensor>& t) {
|
||||
if (t.has_value()) {
|
||||
return c10::make_optional<Tensor>(to_meta(*t));
|
||||
return std::make_optional<Tensor>(to_meta(*t));
|
||||
}
|
||||
return c10::nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
inline std::vector<Tensor> to_meta(at::ITensorListRef t_list) {
|
||||
|
@ -24,7 +24,7 @@
|
||||
#include <c10/util/ExclusivelyOwned.h>
|
||||
#include <c10/util/Deprecated.h>
|
||||
#include <c10/util/MaybeOwned.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <optional>
|
||||
#include <c10/util/OptionalArrayRef.h>
|
||||
#include <c10/util/intrusive_ptr.h>
|
||||
#include <c10/macros/Export.h>
|
||||
@ -398,7 +398,7 @@ class TORCH_API Tensor: public TensorBase {
|
||||
/// // f requires grad, has no operation creating it
|
||||
/// @endcode
|
||||
|
||||
/// \fn void backward(const Tensor & gradient={}, std::optional<bool> retain_graph=c10::nullopt, bool create_graph=false, std::optional<TensorList> inputs=c10::nullopt) const;
|
||||
/// \fn void backward(const Tensor & gradient={}, std::optional<bool> retain_graph=std::nullopt, bool create_graph=false, std::optional<TensorList> inputs=std::nullopt) const;
|
||||
///
|
||||
/// Computes the gradient of current tensor with respect to graph leaves.
|
||||
///
|
||||
@ -433,7 +433,7 @@ class TORCH_API Tensor: public TensorBase {
|
||||
/// the current implementation will call its grad_fn (even though it is not strictly needed to get this gradients).
|
||||
/// It is an implementation detail on which the user should not rely.
|
||||
/// See https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for more details.
|
||||
void backward(const Tensor & gradient={}, std::optional<bool> retain_graph=c10::nullopt, bool create_graph=false, std::optional<TensorList> inputs=c10::nullopt) const {
|
||||
void backward(const Tensor & gradient={}, std::optional<bool> retain_graph=std::nullopt, bool create_graph=false, std::optional<TensorList> inputs=std::nullopt) const {
|
||||
// NB: Adding this wrapper to _backward here because we'd like our
|
||||
// 'backwards' api to accept the 'inputs' argument optionally. Since code gen
|
||||
// currently does not support optional of TensorList our approach is to replace
|
||||
|
@ -4,7 +4,7 @@
|
||||
#include <torch/types.h>
|
||||
|
||||
#include <c10/util/Exception.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <optional>
|
||||
|
||||
#include <chrono>
|
||||
#include <utility>
|
||||
|
@ -28,7 +28,6 @@
|
||||
#include <c10/core/StreamGuard.h>
|
||||
#include <c10/util/AbortHandler.h>
|
||||
#include <c10/util/Exception.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <c10/util/ThreadLocal.h>
|
||||
#include <c10/util/irange.h>
|
||||
#include <c10/util/thread_name.h>
|
||||
@ -41,6 +40,7 @@
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <queue>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
|
@ -6,7 +6,7 @@
|
||||
|
||||
#include <ATen/TensorGeometry.h>
|
||||
#include <ATen/core/DeprecatedTypeProperties.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <optional>
|
||||
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
|
@ -7,11 +7,11 @@
|
||||
#include <c10/core/ScalarType.h>
|
||||
#include <c10/core/TensorOptions.h>
|
||||
#include <c10/util/Exception.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <c10/util/intrusive_ptr.h>
|
||||
#include <c10/util/irange.h>
|
||||
#include <torch/csrc/distributed/c10d/ProcessGroup.hpp>
|
||||
#include <torch/csrc/distributed/c10d/ProcessGroupGloo.hpp>
|
||||
#include <optional>
|
||||
#include <stdexcept>
|
||||
#include <utility>
|
||||
|
||||
|
@ -1,6 +1,5 @@
|
||||
#pragma once
|
||||
|
||||
#include <c10/util/Optional.h>
|
||||
#include <torch/csrc/distributed/rpc/message.h>
|
||||
#include <torch/csrc/distributed/rpc/rpc_agent.h>
|
||||
#include <torch/csrc/distributed/rpc/rref_impl.h>
|
||||
@ -8,6 +7,7 @@
|
||||
#include <torch/csrc/distributed/rpc/utils.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <optional>
|
||||
|
||||
namespace torch {
|
||||
namespace distributed {
|
||||
|
@ -1,12 +1,12 @@
|
||||
#pragma once
|
||||
|
||||
#include <c10/util/Optional.h>
|
||||
#include <torch/csrc/Export.h>
|
||||
#include <torch/csrc/jit/codegen/fuser/kernel_spec.h>
|
||||
#include <torch/csrc/jit/ir/ir.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <optional>
|
||||
|
||||
namespace torch {
|
||||
namespace jit {
|
||||
|
@ -1,6 +1,5 @@
|
||||
#pragma once
|
||||
|
||||
#include <c10/util/Optional.h>
|
||||
#include <torch/csrc/jit/frontend/tree.h>
|
||||
|
||||
namespace torch {
|
||||
|
@ -1,10 +1,9 @@
|
||||
#pragma once
|
||||
|
||||
#include <torch/csrc/Export.h>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
|
||||
#include <c10/util/Optional.h>
|
||||
#include <torch/csrc/Export.h>
|
||||
|
||||
namespace torch {
|
||||
|
@ -1,9 +1,6 @@
|
||||
#include <torch/csrc/jit/passes/onnx/eliminate_unused_items.h>
|
||||
#include <torch/csrc/jit/passes/onnx/helper.h>
|
||||
|
||||
#include <c10/util/Optional.h>
|
||||
#include <algorithm>
|
||||
|
||||
namespace torch {
|
||||
namespace jit {
|
||||
|
||||
|
@ -3,7 +3,6 @@
|
||||
#include <torch/csrc/jit/passes/onnx/helper.h>
|
||||
#include <torch/torch.h>
|
||||
|
||||
#include <c10/util/Optional.h>
|
||||
#include <c10/util/irange.h>
|
||||
#include <algorithm>
|
||||
|
||||
|
@ -3,13 +3,13 @@
|
||||
#include <c10/core/Scalar.h>
|
||||
#include <c10/util/BFloat16.h>
|
||||
#include <c10/util/Half.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <torch/csrc/lazy/core/permutation_util.h>
|
||||
#include <torch/csrc/lazy/core/shape.h>
|
||||
#include <torch/csrc/lazy/core/util.h>
|
||||
|
||||
#include <complex>
|
||||
#include <functional>
|
||||
#include <optional>
|
||||
#include <tuple>
|
||||
#include <vector>
|
||||
|
||||
|
@ -1,7 +1,6 @@
|
||||
#include <torch/csrc/lazy/ts_backend/tensor_aten_ops.h>
|
||||
|
||||
#include <ATen/InferSize.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <torch/csrc/autograd/variable.h>
|
||||
#include <torch/csrc/lazy/core/helpers.h>
|
||||
#include <torch/csrc/lazy/core/ir_builder.h>
|
||||
@ -15,6 +14,7 @@
|
||||
#include <torch/csrc/lazy/generated/LazyIr.h>
|
||||
#include <algorithm>
|
||||
#include <functional>
|
||||
#include <optional>
|
||||
|
||||
namespace torch {
|
||||
namespace lazy {
|
||||
|
@ -532,10 +532,10 @@ def run_gen_lazy_tensor(
|
||||
for path in [
|
||||
"ATen/core/Formatting.h",
|
||||
"c10/core/ScalarType.h",
|
||||
"c10/util/Optional.h",
|
||||
"torch/csrc/lazy/core/hash.h",
|
||||
"torch/csrc/lazy/core/ir.h",
|
||||
"torch/csrc/lazy/core/shape.h",
|
||||
"optional",
|
||||
"vector",
|
||||
]
|
||||
],
|
||||
|
Reference in New Issue
Block a user