From 03e35efbf733da28d9e1c5a4b1b203fe335b5f94 Mon Sep 17 00:00:00 2001 From: cyy Date: Thu, 14 Sep 2023 20:52:21 +0000 Subject: [PATCH] replace torch::make_unique with std::make_unique (#108866) It should be safe to remove the old torch::make_unique functions. Pull Request resolved: https://github.com/pytorch/pytorch/pull/108866 Approved by: https://github.com/albanD --- .../src/main/cpp/pytorch_jni_jit.cpp | 2 +- .../src/main/cpp/pytorch_jni_lite.cpp | 2 +- build_variables.bzl | 1 - test/cpp/api/dataloader.cpp | 4 +- test/cpp/api/memory.cpp | 8 ++-- test/cpp/jit/test_alias_analysis.cpp | 3 +- test/cpp/jit/test_misc.cpp | 4 +- third_party/nvfuser/csrc/graph_fuser.cpp | 2 +- .../csrc/api/include/torch/data/dataloader.h | 5 +-- .../api/include/torch/data/dataloader/base.h | 10 ++--- .../include/torch/data/dataloader/stateful.h | 2 +- .../include/torch/data/dataloader/stateless.h | 4 +- .../api/include/torch/data/datasets/chunk.h | 3 +- .../include/torch/nn/modules/container/any.h | 3 +- .../nn/modules/container/any_module_holder.h | 4 +- .../torch/nn/modules/container/any_value.h | 7 ++-- .../torch/nn/modules/container/named_any.h | 1 - torch/csrc/autograd/TraceTypeManual.cpp | 1 - torch/csrc/autograd/VariableTypeManual.cpp | 1 - torch/csrc/autograd/engine.cpp | 3 +- torch/csrc/distributed/c10d/reducer.cpp | 3 +- torch/csrc/jit/api/compilation_unit.h | 3 +- torch/csrc/jit/api/function_impl.h | 3 +- torch/csrc/jit/api/module.h | 1 - .../jit/codegen/fuser/cpu/fused_kernel.cpp | 3 +- torch/csrc/jit/frontend/error_report.cpp | 1 - torch/csrc/jit/frontend/ir_emitter.cpp | 2 +- torch/csrc/jit/ir/alias_analysis.cpp | 1 - torch/csrc/jit/mobile/import.cpp | 4 +- torch/csrc/jit/mobile/import_data.cpp | 2 +- .../csrc/jit/passes/constant_propagation.cpp | 1 - .../jit/passes/create_functional_graphs.cpp | 3 +- .../csrc/jit/passes/dead_code_elimination.cpp | 1 - torch/csrc/jit/passes/freeze_module.cpp | 2 +- .../csrc/jit/passes/frozen_concat_linear.cpp | 1 - .../jit/passes/frozen_graph_optimizations.cpp | 1 - .../csrc/jit/passes/frozen_ops_to_mkldnn.cpp | 2 +- .../jit/passes/integer_value_refinement.cpp | 1 - torch/csrc/jit/passes/peephole.cpp | 1 - .../jit/passes/peephole_alias_sensitive.cpp | 3 +- .../csrc/jit/passes/peephole_list_idioms.cpp | 3 +- .../quantization/insert_quant_dequant.cpp | 2 +- torch/csrc/jit/passes/remove_mutation.h | 1 - .../jit/passes/remove_redundant_profiles.cpp | 1 - torch/csrc/jit/passes/restore_mutation.h | 1 - .../jit/passes/symbolic_shape_analysis.cpp | 1 - torch/csrc/jit/passes/tensorexpr_fuser.cpp | 3 +- torch/csrc/jit/passes/utils/memory_dag.cpp | 1 - .../csrc/jit/passes/value_refinement_utils.h | 1 - .../csrc/jit/runtime/interpreter/code_impl.h | 2 +- torch/csrc/jit/runtime/static/fusion.cpp | 2 +- torch/csrc/jit/serialization/import.cpp | 6 +-- torch/csrc/utils/invalid_arguments.cpp | 13 +++--- torch/csrc/utils/memory.h | 41 ------------------- 54 files changed, 53 insertions(+), 134 deletions(-) delete mode 100644 torch/csrc/utils/memory.h diff --git a/android/pytorch_android/src/main/cpp/pytorch_jni_jit.cpp b/android/pytorch_android/src/main/cpp/pytorch_jni_jit.cpp index ccb13572601f..c550d0261625 100644 --- a/android/pytorch_android/src/main/cpp/pytorch_jni_jit.cpp +++ b/android/pytorch_android/src/main/cpp/pytorch_jni_jit.cpp @@ -167,7 +167,7 @@ class PytorchJni : public facebook::jni::HybridClass { assetName->toStdString().c_str()); } JITCallGuard guard; - module_ = torch::jit::load(torch::make_unique( + module_ = torch::jit::load(std::make_unique( assetBuffer, AAsset_getLength(asset))); AAsset_close(asset); module_.eval(); diff --git a/android/pytorch_android/src/main/cpp/pytorch_jni_lite.cpp b/android/pytorch_android/src/main/cpp/pytorch_jni_lite.cpp index e103e5b309ae..db16d2347153 100644 --- a/android/pytorch_android/src/main/cpp/pytorch_jni_lite.cpp +++ b/android/pytorch_android/src/main/cpp/pytorch_jni_lite.cpp @@ -132,7 +132,7 @@ class PytorchJni : public facebook::jni::HybridClass { } LiteJITCallGuard guard; module_ = - torch::jit::_load_for_mobile(torch::make_unique( + torch::jit::_load_for_mobile(std::make_unique( assetBuffer, AAsset_getLength(asset))); AAsset_close(asset); deviceType_ = deviceJniCodeToDeviceType(device); diff --git a/build_variables.bzl b/build_variables.bzl index ad2dc1debe22..00895cdfddcb 100644 --- a/build_variables.bzl +++ b/build_variables.bzl @@ -71,7 +71,6 @@ def libtorch_generated_sources(gencode_pattern): # copied from https://github.com/pytorch/pytorch/blob/f99a693cd9ff7a9b5fdc71357dac66b8192786d3/aten/src/ATen/core/CMakeLists.txt jit_core_headers = [ - "torch/csrc/utils/memory.h", "torch/csrc/Export.h", "torch/csrc/jit/frontend/source_range.h", "torch/csrc/jit/serialization/callstack_debug_info_serialization.h", diff --git a/test/cpp/api/dataloader.cpp b/test/cpp/api/dataloader.cpp index 09a842f9323a..a374351b25a7 100644 --- a/test/cpp/api/dataloader.cpp +++ b/test/cpp/api/dataloader.cpp @@ -872,7 +872,7 @@ TEST(DataTest, DistributedRandomSamplerMultiReplicaProduceCorrectSamples) { for (const auto i : c10::irange(num_replicas)) { samplers.emplace_back( - torch::make_unique( + std::make_unique( sample_count, num_replicas, i, allow_duplicates)); } @@ -969,7 +969,7 @@ TEST(DataTest, DistributedSequentialSamplerMultiReplicaProduceCorrectSamples) { for (const auto i : c10::irange(num_replicas)) { samplers.emplace_back( - torch::make_unique( + std::make_unique( sample_count, num_replicas, i, allow_duplicates)); } diff --git a/test/cpp/api/memory.cpp b/test/cpp/api/memory.cpp index e80d632354e8..d9f44ea3f7a4 100644 --- a/test/cpp/api/memory.cpp +++ b/test/cpp/api/memory.cpp @@ -1,7 +1,5 @@ #include -#include - #include struct TestValue { @@ -13,7 +11,7 @@ struct TestValue { }; TEST(MakeUniqueTest, ForwardRvaluesCorrectly) { - auto ptr = torch::make_unique(123); + auto ptr = std::make_unique(123); ASSERT_FALSE(ptr->lvalue_.has_value()); ASSERT_TRUE(ptr->rvalue_.has_value()); ASSERT_EQ(*ptr->rvalue_, 123); @@ -21,7 +19,7 @@ TEST(MakeUniqueTest, ForwardRvaluesCorrectly) { TEST(MakeUniqueTest, ForwardLvaluesCorrectly) { int x = 5; - auto ptr = torch::make_unique(x); + auto ptr = std::make_unique(x); ASSERT_TRUE(ptr->lvalue_.has_value()); ASSERT_EQ(*ptr->lvalue_, 5); ASSERT_FALSE(ptr->rvalue_.has_value()); @@ -29,7 +27,7 @@ TEST(MakeUniqueTest, ForwardLvaluesCorrectly) { TEST(MakeUniqueTest, CanConstructUniquePtrOfArray) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) - auto ptr = torch::make_unique(3); + auto ptr = std::make_unique(3); // Value initialization is required by the standard. ASSERT_EQ(ptr[0], 0); ASSERT_EQ(ptr[1], 0); diff --git a/test/cpp/jit/test_alias_analysis.cpp b/test/cpp/jit/test_alias_analysis.cpp index 13955108bfbd..deb0e6cc1afc 100644 --- a/test/cpp/jit/test_alias_analysis.cpp +++ b/test/cpp/jit/test_alias_analysis.cpp @@ -7,7 +7,6 @@ #include #include #include -#include #include @@ -23,7 +22,7 @@ class TopologicalMoveTest : public ::testing::Test { protected: TopologicalMoveTest() { createGraph(); - aliasDb = torch::make_unique(graph); + aliasDb = std::make_unique(graph); } // Nodes are named after their output. diff --git a/test/cpp/jit/test_misc.cpp b/test/cpp/jit/test_misc.cpp index dd3c8f688748..ad91de0a6a54 100644 --- a/test/cpp/jit/test_misc.cpp +++ b/test/cpp/jit/test_misc.cpp @@ -3007,7 +3007,7 @@ graph(%x.1 : Tensor): return (%y))IR", &*graph); { - auto func = torch::make_unique( + auto func = std::make_unique( "name", graph, [](GraphFunction&) {}, ExecutorExecutionMode::PROFILING); auto a = at::rand({2, 2, 2}, TensorOptions(kCPU).dtype(at::kFloat)); Stack stack = {a}; @@ -3020,7 +3020,7 @@ graph(%x.1 : Tensor): ->run(*g); } { - auto func = torch::make_unique( + auto func = std::make_unique( "name", graph, [](GraphFunction&) {}, ExecutorExecutionMode::SIMPLE); auto a = at::rand({2, 2, 2}, TensorOptions(kCPU).dtype(at::kFloat)); Stack stack = {a}; diff --git a/third_party/nvfuser/csrc/graph_fuser.cpp b/third_party/nvfuser/csrc/graph_fuser.cpp index 6e486d05b7c2..f74cff84eab7 100644 --- a/third_party/nvfuser/csrc/graph_fuser.cpp +++ b/third_party/nvfuser/csrc/graph_fuser.cpp @@ -1147,7 +1147,7 @@ struct CudaGraphFuser { } void refreshAliasDb() { - aliasDb_ = torch::make_unique(graph_); + aliasDb_ = std::make_unique(graph_); } void removeNoopBinaryOps(Block* block) { diff --git a/torch/csrc/api/include/torch/data/dataloader.h b/torch/csrc/api/include/torch/data/dataloader.h index 61b220ad4819..06ea83d8a232 100644 --- a/torch/csrc/api/include/torch/data/dataloader.h +++ b/torch/csrc/api/include/torch/data/dataloader.h @@ -3,7 +3,6 @@ #include #include -#include #include #include @@ -23,7 +22,7 @@ torch::disable_if_t< Dataset::is_stateful, std::unique_ptr>> make_data_loader(Dataset dataset, Sampler sampler, DataLoaderOptions options) { - return torch::make_unique>( + return std::make_unique>( std::move(dataset), std::move(sampler), std::move(options)); } @@ -51,7 +50,7 @@ template > std::unique_ptr> make_data_loader( Dataset dataset, DataLoaderOptions options = DataLoaderOptions()) { - return torch::make_unique>( + return std::make_unique>( std::move(dataset), std::move(options)); } } // namespace data diff --git a/torch/csrc/api/include/torch/data/dataloader/base.h b/torch/csrc/api/include/torch/data/dataloader/base.h index 47d2db48e556..72c4f337fbf4 100644 --- a/torch/csrc/api/include/torch/data/dataloader/base.h +++ b/torch/csrc/api/include/torch/data/dataloader/base.h @@ -8,7 +8,6 @@ #include #include -#include #include #include @@ -62,15 +61,14 @@ class DataLoaderBase { "Attempted to get a new DataLoader iterator " "while another iterator is not yet exhausted"); reset(); - return Iterator(torch::make_unique>( + return Iterator(std::make_unique>( [this] { return this->next(); })); } /// Returns a special "sentinel" iterator that compares equal with a /// non-sentinel iterator once the DataLoader is exhausted. Iterator end() { - return Iterator( - torch::make_unique>()); + return Iterator(std::make_unique>()); } /// Joins the DataLoader's worker threads and drains internal queues. @@ -215,10 +213,10 @@ class DataLoaderBase { /// `enforce_ordering` option. std::unique_ptr> new_sequencer() { if (options_.enforce_ordering) { - return torch::make_unique>( + return std::make_unique>( options_.max_jobs); } - return torch::make_unique>(); + return std::make_unique>(); } /// The options the DataLoader was configured with. diff --git a/torch/csrc/api/include/torch/data/dataloader/stateful.h b/torch/csrc/api/include/torch/data/dataloader/stateful.h index 8cd98c83d8e0..e8eb85861f77 100644 --- a/torch/csrc/api/include/torch/data/dataloader/stateful.h +++ b/torch/csrc/api/include/torch/data/dataloader/stateful.h @@ -38,7 +38,7 @@ class StatefulDataLoader : public DataLoaderBase< StatefulDataLoader(Dataset dataset, DataLoaderOptions options) : super( std::move(options), - torch::make_unique(std::move(dataset))) { + std::make_unique(std::move(dataset))) { for (const auto w : c10::irange(this->options_.workers)) { // As opposed to the stateless case, here all worker threads access the // same underlying dataset. diff --git a/torch/csrc/api/include/torch/data/dataloader/stateless.h b/torch/csrc/api/include/torch/data/dataloader/stateless.h index 1965adc44064..9c2612bb86c3 100644 --- a/torch/csrc/api/include/torch/data/dataloader/stateless.h +++ b/torch/csrc/api/include/torch/data/dataloader/stateless.h @@ -3,8 +3,6 @@ #include #include -#include - #include #include @@ -52,7 +50,7 @@ class StatelessDataLoader : public DataLoaderBase< } if (this->options_.workers == 0) { this->main_thread_dataset_ = - torch::make_unique(std::move(dataset)); + std::make_unique(std::move(dataset)); } } diff --git a/torch/csrc/api/include/torch/data/datasets/chunk.h b/torch/csrc/api/include/torch/data/datasets/chunk.h index e3ce0500a9ec..1f211be9ab6f 100644 --- a/torch/csrc/api/include/torch/data/datasets/chunk.h +++ b/torch/csrc/api/include/torch/data/datasets/chunk.h @@ -2,7 +2,6 @@ #include #include -#include #include #include #include @@ -391,7 +390,7 @@ class ChunkDataset final // Throw out any existing cached batch in the buffer and re-creates a new // chunk buffer. - batch_buffer_ = torch::make_unique< + batch_buffer_ = std::make_unique< detail::BatchDataBuffer>( options_.batch_size(), example_sampler_, options_.cache_size()); diff --git a/torch/csrc/api/include/torch/nn/modules/container/any.h b/torch/csrc/api/include/torch/nn/modules/container/any.h index b96efc4812c8..05983b1ea106 100644 --- a/torch/csrc/api/include/torch/nn/modules/container/any.h +++ b/torch/csrc/api/include/torch/nn/modules/container/any.h @@ -8,7 +8,6 @@ #include #include -#include #include #include @@ -340,7 +339,7 @@ std::unique_ptr AnyModule::make_holder( !std::is_void::value, "AnyModule cannot store modules that return void " "(you can return a dummy value)."); - return torch::make_unique< + return std::make_unique< AnyModuleHolder, ArgumentTypes...>>( std::move(module)); } diff --git a/torch/csrc/api/include/torch/nn/modules/container/any_module_holder.h b/torch/csrc/api/include/torch/nn/modules/container/any_module_holder.h index ef9d483178c8..cd1dca9ff7a0 100644 --- a/torch/csrc/api/include/torch/nn/modules/container/any_module_holder.h +++ b/torch/csrc/api/include/torch/nn/modules/container/any_module_holder.h @@ -116,12 +116,12 @@ struct AnyModuleHolder : public AnyModulePlaceholder { } std::unique_ptr copy() const override { - return torch::make_unique(*this); + return std::make_unique(*this); } std::unique_ptr clone_module( optional device) const override { - return torch::make_unique( + return std::make_unique( std::dynamic_pointer_cast(module->clone(device))); } diff --git a/torch/csrc/api/include/torch/nn/modules/container/any_value.h b/torch/csrc/api/include/torch/nn/modules/container/any_value.h index 3a836f759230..1f8cc55f5928 100644 --- a/torch/csrc/api/include/torch/nn/modules/container/any_value.h +++ b/torch/csrc/api/include/torch/nn/modules/container/any_value.h @@ -6,7 +6,6 @@ #include #include -#include #include #include @@ -41,8 +40,8 @@ class AnyValue { template // NOLINTNEXTLINE(bugprone-forwarding-reference-overload) explicit AnyValue(T&& value) - : content_( - torch::make_unique>>(std::forward(value))) {} + : content_(std::make_unique>>(std::forward(value))) { + } /// Returns a pointer to the value contained in the `AnyValue` if the type /// passed as template parameter matches the type of the value stored, and @@ -110,7 +109,7 @@ class AnyValue { explicit Holder(U&& value_) noexcept : Placeholder(typeid(T)), value(std::forward(value_)) {} std::unique_ptr clone() const override { - return torch::make_unique>(value); + return std::make_unique>(value); } T value; }; diff --git a/torch/csrc/api/include/torch/nn/modules/container/named_any.h b/torch/csrc/api/include/torch/nn/modules/container/named_any.h index 5342ba39cd21..00d39de17f40 100644 --- a/torch/csrc/api/include/torch/nn/modules/container/named_any.h +++ b/torch/csrc/api/include/torch/nn/modules/container/named_any.h @@ -7,7 +7,6 @@ #include #include -#include #include #include diff --git a/torch/csrc/autograd/TraceTypeManual.cpp b/torch/csrc/autograd/TraceTypeManual.cpp index d08de86b82c0..729963d02299 100644 --- a/torch/csrc/autograd/TraceTypeManual.cpp +++ b/torch/csrc/autograd/TraceTypeManual.cpp @@ -5,7 +5,6 @@ #include #include #include -#include #include using namespace at; diff --git a/torch/csrc/autograd/VariableTypeManual.cpp b/torch/csrc/autograd/VariableTypeManual.cpp index b42d22d0fa95..c9d99d23eeb7 100644 --- a/torch/csrc/autograd/VariableTypeManual.cpp +++ b/torch/csrc/autograd/VariableTypeManual.cpp @@ -9,7 +9,6 @@ #include #include #include -#include #include #include diff --git a/torch/csrc/autograd/engine.cpp b/torch/csrc/autograd/engine.cpp index 9e6a04dec6b7..0e67b363db43 100644 --- a/torch/csrc/autograd/engine.cpp +++ b/torch/csrc/autograd/engine.cpp @@ -7,7 +7,6 @@ #include #include #include -#include #include #include @@ -1586,7 +1585,7 @@ void GraphTask::init_to_execute( // In terms of populating the rest of exec_info though, you can basically // think of this as the same as setting `needed_` is true directly. if (!info.captures_) { - info.captures_ = make_unique>(); + info.captures_ = std::make_unique>(); } info.captures_->emplace_back(output_edge.input_nr, output_idx++); } diff --git a/torch/csrc/distributed/c10d/reducer.cpp b/torch/csrc/distributed/c10d/reducer.cpp index e2b9637682ab..ca4aa8b7b248 100644 --- a/torch/csrc/distributed/c10d/reducer.cpp +++ b/torch/csrc/distributed/c10d/reducer.cpp @@ -20,7 +20,6 @@ #include #include #include -#include namespace c10d { namespace { @@ -185,7 +184,7 @@ Reducer::Reducer( // Hook to execute after the gradient accumulator has executed. hooks_.emplace_back( grad_accumulator->add_post_hook( - torch::make_unique( + std::make_unique( [=](const torch::autograd::variable_list& outputs, const torch::autograd::variable_list& /* unused */) { #ifndef _WIN32 diff --git a/torch/csrc/jit/api/compilation_unit.h b/torch/csrc/jit/api/compilation_unit.h index 6313923e2deb..7eca228286c8 100644 --- a/torch/csrc/jit/api/compilation_unit.h +++ b/torch/csrc/jit/api/compilation_unit.h @@ -8,7 +8,6 @@ #include #include -#include #include #include @@ -132,7 +131,7 @@ struct TORCH_API CompilationUnit { if (shouldMangle) { name = mangle(name); } - auto fn = torch::make_unique( + auto fn = std::make_unique( std::move(name), std::move(graph), nullptr); auto ret = fn.get(); register_function(std::move(fn)); diff --git a/torch/csrc/jit/api/function_impl.h b/torch/csrc/jit/api/function_impl.h index 4a46c90df5c4..fec9dbbf2496 100644 --- a/torch/csrc/jit/api/function_impl.h +++ b/torch/csrc/jit/api/function_impl.h @@ -3,7 +3,6 @@ #include #include #include -#include namespace torch { namespace jit { @@ -77,7 +76,7 @@ struct TORCH_API GraphFunction : public Function { } Function& setSchema(FunctionSchema schema) override { - schema_ = make_unique(std::move(schema)); + schema_ = std::make_unique(std::move(schema)); return *this; } diff --git a/torch/csrc/jit/api/module.h b/torch/csrc/jit/api/module.h index 570ce12d1fa9..efd57621b675 100644 --- a/torch/csrc/jit/api/module.h +++ b/torch/csrc/jit/api/module.h @@ -11,7 +11,6 @@ #include #include #include -#include #include #include diff --git a/torch/csrc/jit/codegen/fuser/cpu/fused_kernel.cpp b/torch/csrc/jit/codegen/fuser/cpu/fused_kernel.cpp index 8da7e63a6935..57aa22a69bec 100644 --- a/torch/csrc/jit/codegen/fuser/cpu/fused_kernel.cpp +++ b/torch/csrc/jit/codegen/fuser/cpu/fused_kernel.cpp @@ -6,7 +6,6 @@ #include #include #include -#include #include #include @@ -333,7 +332,7 @@ FusedKernelCPU::FusedKernelCPU( runCompiler(cpp_file.name(), so_file.name()); if (debugFuser() >= 2) disas(so_file.name()); - so_lib = make_unique(so_file.name().c_str()); + so_lib = std::make_unique(so_file.name().c_str()); #pragma GCC diagnostic ignored "-Wpedantic" kernel = reinterpret_cast(so_lib->sym(name_.c_str())); diff --git a/torch/csrc/jit/frontend/error_report.cpp b/torch/csrc/jit/frontend/error_report.cpp index f1f4f43ab6da..e50c688311f9 100644 --- a/torch/csrc/jit/frontend/error_report.cpp +++ b/torch/csrc/jit/frontend/error_report.cpp @@ -2,7 +2,6 @@ #include #include -#include namespace torch::jit { diff --git a/torch/csrc/jit/frontend/ir_emitter.cpp b/torch/csrc/jit/frontend/ir_emitter.cpp index e9966397d098..cd0df41aac7c 100644 --- a/torch/csrc/jit/frontend/ir_emitter.cpp +++ b/torch/csrc/jit/frontend/ir_emitter.cpp @@ -5432,7 +5432,7 @@ std::unique_ptr CompilationUnit::define( auto graph = std::make_shared(); graph->set_op_version(operator_set_version); - auto fn = torch::make_unique(std::move(name), graph, creator); + auto fn = std::make_unique(std::move(name), graph, creator); if (self) { // Register this as a method on `self`'s type if (type == CompilationUnit::FunctionType::Hook) { diff --git a/torch/csrc/jit/ir/alias_analysis.cpp b/torch/csrc/jit/ir/alias_analysis.cpp index 87031ec5867f..efdf287b9d86 100644 --- a/torch/csrc/jit/ir/alias_analysis.cpp +++ b/torch/csrc/jit/ir/alias_analysis.cpp @@ -8,7 +8,6 @@ #include #include #include -#include #include namespace torch::jit { diff --git a/torch/csrc/jit/mobile/import.cpp b/torch/csrc/jit/mobile/import.cpp index 2de664b2b2e6..55081c5adee7 100644 --- a/torch/csrc/jit/mobile/import.cpp +++ b/torch/csrc/jit/mobile/import.cpp @@ -525,7 +525,7 @@ mobile::Module _load_for_mobile_impl( } const size_t model_size = rai != nullptr ? rai->size() : 0; - auto reader = torch::make_unique(std::move(rai)); + auto reader = std::make_unique(std::move(rai)); if (module_load_options & MobileModuleLoadOptions::PARSE_ALL_EXTRA_FILE_MAPS) { // ExtraFilesMap is serialized with a "extra/", hence it is necessary to @@ -694,7 +694,7 @@ void _load_extra_only_for_mobile( case FileFormat::ZipFileFormat: { std::unique_ptr rai = std::make_unique(filename); - auto reader = torch::make_unique(std::move(rai)); + auto reader = std::make_unique(std::move(rai)); BytecodeDeserializer deserializer(std::move(reader)); deserializer.deserialize_only_extra(device, extra_files); break; diff --git a/torch/csrc/jit/mobile/import_data.cpp b/torch/csrc/jit/mobile/import_data.cpp index 960caf439365..e9380bbf97f0 100644 --- a/torch/csrc/jit/mobile/import_data.cpp +++ b/torch/csrc/jit/mobile/import_data.cpp @@ -170,7 +170,7 @@ c10::IValue IValueUnpickler::readArchive( std::map load_parameters_from_zip( std::unique_ptr rai, c10::optional device) { - auto reader = torch::make_unique(std::move(rai)); + auto reader = std::make_unique(std::move(rai)); IValueUnpickler unpickler(std::move(reader)); auto result = unpickler.deserialize(device).toGenericDict(); std::map map; diff --git a/torch/csrc/jit/passes/constant_propagation.cpp b/torch/csrc/jit/passes/constant_propagation.cpp index 4bd656788d77..cd3fb6b1e2b0 100644 --- a/torch/csrc/jit/passes/constant_propagation.cpp +++ b/torch/csrc/jit/passes/constant_propagation.cpp @@ -13,7 +13,6 @@ #include #include #include -#include #include diff --git a/torch/csrc/jit/passes/create_functional_graphs.cpp b/torch/csrc/jit/passes/create_functional_graphs.cpp index c929e311b376..b339a93603de 100644 --- a/torch/csrc/jit/passes/create_functional_graphs.cpp +++ b/torch/csrc/jit/passes/create_functional_graphs.cpp @@ -4,7 +4,6 @@ #include #include #include -#include #include #include @@ -28,7 +27,7 @@ struct FunctionalGraphSlicer { // subgraphs, invalidating the AliasDb, so we need to do our analysis // first. for (size_t i = 0; i < MAX_NUM_ITERATIONS && changed; ++i) { - aliasDb_ = torch::make_unique(graph_); + aliasDb_ = std::make_unique(graph_); AnalyzeFunctionalSubset(graph_->block()); changed = CreateFunctionalGraphsImpl(graph_->block()); } diff --git a/torch/csrc/jit/passes/dead_code_elimination.cpp b/torch/csrc/jit/passes/dead_code_elimination.cpp index 2f6a6de86dbf..7c52551a3775 100644 --- a/torch/csrc/jit/passes/dead_code_elimination.cpp +++ b/torch/csrc/jit/passes/dead_code_elimination.cpp @@ -4,7 +4,6 @@ #include #include #include -#include #include diff --git a/torch/csrc/jit/passes/freeze_module.cpp b/torch/csrc/jit/passes/freeze_module.cpp index 434488d69ce5..6fb388470611 100644 --- a/torch/csrc/jit/passes/freeze_module.cpp +++ b/torch/csrc/jit/passes/freeze_module.cpp @@ -344,7 +344,7 @@ class AttributePropagator { void recordMutableAttrs(std::shared_ptr& graph) { std::stack blocks({graph->block()}); std::unique_ptr aliasDb = - torch::make_unique(graph, /* isFrozen */ true); + std::make_unique(graph, /* isFrozen */ true); while (!blocks.empty()) { Block* block = blocks.top(); blocks.pop(); diff --git a/torch/csrc/jit/passes/frozen_concat_linear.cpp b/torch/csrc/jit/passes/frozen_concat_linear.cpp index 6984d486be62..2cad8353b54e 100644 --- a/torch/csrc/jit/passes/frozen_concat_linear.cpp +++ b/torch/csrc/jit/passes/frozen_concat_linear.cpp @@ -9,7 +9,6 @@ #include #include #include -#include #ifndef AT_PER_OPERATOR_HEADERS #include diff --git a/torch/csrc/jit/passes/frozen_graph_optimizations.cpp b/torch/csrc/jit/passes/frozen_graph_optimizations.cpp index b807aaa551a6..a370e1d5ec82 100644 --- a/torch/csrc/jit/passes/frozen_graph_optimizations.cpp +++ b/torch/csrc/jit/passes/frozen_graph_optimizations.cpp @@ -7,7 +7,6 @@ #include #include #include -#include namespace torch { namespace jit { diff --git a/torch/csrc/jit/passes/frozen_ops_to_mkldnn.cpp b/torch/csrc/jit/passes/frozen_ops_to_mkldnn.cpp index ef6ee1e81477..6ae259b207f4 100644 --- a/torch/csrc/jit/passes/frozen_ops_to_mkldnn.cpp +++ b/torch/csrc/jit/passes/frozen_ops_to_mkldnn.cpp @@ -110,7 +110,7 @@ void InplaceMKLDNNSubgraph(std::shared_ptr graph) { // CALCULATE ALIASING SETS - auto aliasDb = torch::make_unique(graph); + auto aliasDb = std::make_unique(graph); // map from Value to its Aliasing Set std::unordered_map alias_mapping; diff --git a/torch/csrc/jit/passes/integer_value_refinement.cpp b/torch/csrc/jit/passes/integer_value_refinement.cpp index 8a6fe1ebe8ee..e3a339efe6d7 100644 --- a/torch/csrc/jit/passes/integer_value_refinement.cpp +++ b/torch/csrc/jit/passes/integer_value_refinement.cpp @@ -3,7 +3,6 @@ #include #include #include -#include #include diff --git a/torch/csrc/jit/passes/peephole.cpp b/torch/csrc/jit/passes/peephole.cpp index 7e5271f072eb..b1e38697ef59 100644 --- a/torch/csrc/jit/passes/peephole.cpp +++ b/torch/csrc/jit/passes/peephole.cpp @@ -12,7 +12,6 @@ #include #include #include -#include namespace torch { namespace jit { diff --git a/torch/csrc/jit/passes/peephole_alias_sensitive.cpp b/torch/csrc/jit/passes/peephole_alias_sensitive.cpp index 8e35bd8e601c..65a4c26554d3 100644 --- a/torch/csrc/jit/passes/peephole_alias_sensitive.cpp +++ b/torch/csrc/jit/passes/peephole_alias_sensitive.cpp @@ -6,7 +6,6 @@ #include #include #include -#include #include namespace torch { @@ -20,7 +19,7 @@ struct PeepholeOptimizeAliasSensitiveImpl { std::shared_ptr graph, bool shape_peepholes) : graph_(std::move(graph)), - aliasDb_(torch::make_unique(graph_)), + aliasDb_(std::make_unique(graph_)), shape_peepholes_(shape_peepholes) {} bool run() { diff --git a/torch/csrc/jit/passes/peephole_list_idioms.cpp b/torch/csrc/jit/passes/peephole_list_idioms.cpp index 4464bbd83588..15f4c807335f 100644 --- a/torch/csrc/jit/passes/peephole_list_idioms.cpp +++ b/torch/csrc/jit/passes/peephole_list_idioms.cpp @@ -8,7 +8,6 @@ #include #include #include -#include #include #include @@ -161,7 +160,7 @@ struct PeepholeOptimizeListIdiomsImpl { std::shared_ptr graph, bool refine_list_len) : graph_(std::move(graph)), - aliasDb_(torch::make_unique(graph_)), + aliasDb_(std::make_unique(graph_)), refine_list_len_(refine_list_len) {} bool run() { diff --git a/torch/csrc/jit/passes/quantization/insert_quant_dequant.cpp b/torch/csrc/jit/passes/quantization/insert_quant_dequant.cpp index a5cafa8ef353..93683a308dc8 100644 --- a/torch/csrc/jit/passes/quantization/insert_quant_dequant.cpp +++ b/torch/csrc/jit/passes/quantization/insert_quant_dequant.cpp @@ -971,7 +971,7 @@ std::unique_ptr SubGraphCloneHelper::buildGraphFromNodes( auto build_observer_graph = [&](GraphFunction& func) { buildObserverSubgraph(nodes, func.graph()); }; - return torch::make_unique( + return std::make_unique( name, observer_subgraph, build_observer_graph); } diff --git a/torch/csrc/jit/passes/remove_mutation.h b/torch/csrc/jit/passes/remove_mutation.h index 0bbab73588a5..eb8cf195ee4c 100644 --- a/torch/csrc/jit/passes/remove_mutation.h +++ b/torch/csrc/jit/passes/remove_mutation.h @@ -4,7 +4,6 @@ #include #include #include -#include namespace torch { namespace jit { diff --git a/torch/csrc/jit/passes/remove_redundant_profiles.cpp b/torch/csrc/jit/passes/remove_redundant_profiles.cpp index 026aae97ab84..15fd593371ae 100644 --- a/torch/csrc/jit/passes/remove_redundant_profiles.cpp +++ b/torch/csrc/jit/passes/remove_redundant_profiles.cpp @@ -4,7 +4,6 @@ #include #include #include -#include namespace torch { namespace jit { diff --git a/torch/csrc/jit/passes/restore_mutation.h b/torch/csrc/jit/passes/restore_mutation.h index cf128fbd7b08..48ce9fdb9ed2 100644 --- a/torch/csrc/jit/passes/restore_mutation.h +++ b/torch/csrc/jit/passes/restore_mutation.h @@ -5,7 +5,6 @@ #include #include #include -#include namespace torch { namespace jit { diff --git a/torch/csrc/jit/passes/symbolic_shape_analysis.cpp b/torch/csrc/jit/passes/symbolic_shape_analysis.cpp index 530cc8ce3f1c..a35f1ab0f3f8 100644 --- a/torch/csrc/jit/passes/symbolic_shape_analysis.cpp +++ b/torch/csrc/jit/passes/symbolic_shape_analysis.cpp @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include diff --git a/torch/csrc/jit/passes/tensorexpr_fuser.cpp b/torch/csrc/jit/passes/tensorexpr_fuser.cpp index 360b9f4b8c3f..3ebaaf5d1687 100644 --- a/torch/csrc/jit/passes/tensorexpr_fuser.cpp +++ b/torch/csrc/jit/passes/tensorexpr_fuser.cpp @@ -23,7 +23,6 @@ #include #include #include -#include #include @@ -550,7 +549,7 @@ class TensorExprFuser { } void run() { - aliasDb_ = torch::make_unique(graph_); + aliasDb_ = std::make_unique(graph_); RemoveRedundantProfiles(graph_); GRAPH_DUMP("After removing redundant profile nodes: ", graph_); createFusionGroups(graph_->block()); diff --git a/torch/csrc/jit/passes/utils/memory_dag.cpp b/torch/csrc/jit/passes/utils/memory_dag.cpp index 9d57f49bfe3d..3ecbbb8273a4 100644 --- a/torch/csrc/jit/passes/utils/memory_dag.cpp +++ b/torch/csrc/jit/passes/utils/memory_dag.cpp @@ -1,7 +1,6 @@ #include #include -#include #include #include diff --git a/torch/csrc/jit/passes/value_refinement_utils.h b/torch/csrc/jit/passes/value_refinement_utils.h index 3d6f705d3d61..aa2ab4ea421f 100644 --- a/torch/csrc/jit/passes/value_refinement_utils.h +++ b/torch/csrc/jit/passes/value_refinement_utils.h @@ -8,7 +8,6 @@ #include #include #include -#include namespace torch { namespace jit { diff --git a/torch/csrc/jit/runtime/interpreter/code_impl.h b/torch/csrc/jit/runtime/interpreter/code_impl.h index 24e5ac3fa7b4..e6fcc4ec04ec 100644 --- a/torch/csrc/jit/runtime/interpreter/code_impl.h +++ b/torch/csrc/jit/runtime/interpreter/code_impl.h @@ -562,7 +562,7 @@ struct CodeImpl { }; auto empty_graph = std::make_shared(); - auto func = torch::make_unique( + auto func = std::make_unique( "bailout", empty_graph, build_bailout_graph); function_table_.emplace_back(func.get()); bailout_functions_.emplace_back(std::move(func)); diff --git a/torch/csrc/jit/runtime/static/fusion.cpp b/torch/csrc/jit/runtime/static/fusion.cpp index b4d4f7f7fa28..2c24e1206e87 100644 --- a/torch/csrc/jit/runtime/static/fusion.cpp +++ b/torch/csrc/jit/runtime/static/fusion.cpp @@ -32,7 +32,7 @@ void fuseStaticSubgraphs(std::shared_ptr graph, size_t min_size) { RemoveTensorMutation(graph); ConstantPropagation(graph); EliminateDeadCode(graph); - auto aliasDb = torch::make_unique(graph); + auto aliasDb = std::make_unique(graph); createFusionGroups(graph->block(), aliasDb.get(), min_size); ConstantPooling(graph); ConstantPropagation(graph); diff --git a/torch/csrc/jit/serialization/import.cpp b/torch/csrc/jit/serialization/import.cpp index c0f6e3aa1551..bcefa512bab3 100644 --- a/torch/csrc/jit/serialization/import.cpp +++ b/torch/csrc/jit/serialization/import.cpp @@ -383,7 +383,7 @@ Module import_ir_module( // NOTE: Zipformat can be large files. So using stream version directly // instead of reading the file all at once. if (getFileFormat(in) != FileFormat::FlatbufferFileFormat) { - auto reader = torch::make_unique(&in); + auto reader = std::make_unique(&in); reader->setShouldLoadDebugSymbol(load_debug_files); ScriptModuleDeserializer deserializer(std::move(cu), std::move(reader)); return deserializer.deserialize(device, extra_files, restore_shapes); @@ -432,7 +432,7 @@ Module import_ir_module( // NOTE: Zipformat can be large files. So using stream version directly // instead of reading the file all at once. if (getFileFormat(filename) != FileFormat::FlatbufferFileFormat) { - auto reader = torch::make_unique(filename); + auto reader = std::make_unique(filename); reader->setShouldLoadDebugSymbol(load_debug_files); ScriptModuleDeserializer deserializer(std::move(cu), std::move(reader)); return deserializer.deserialize(device, extra_files, restore_shapes); @@ -548,7 +548,7 @@ Module _load_jit_module_from_bytes( } case FileFormat::ZipFileFormat: { auto rai = std::make_unique(data.get(), size); - auto reader = torch::make_unique(std::move(rai)); + auto reader = std::make_unique(std::move(rai)); ScriptModuleDeserializer deserializer(std::move(cu), std::move(reader)); return deserializer.deserialize(device, extra_files, restore_shapes); } diff --git a/torch/csrc/utils/invalid_arguments.cpp b/torch/csrc/utils/invalid_arguments.cpp index 0967611cf2e1..cd85931c3c70 100644 --- a/torch/csrc/utils/invalid_arguments.cpp +++ b/torch/csrc/utils/invalid_arguments.cpp @@ -1,6 +1,5 @@ #include -#include #include #include @@ -136,25 +135,25 @@ std::vector _splitString( std::unique_ptr _buildType(std::string type_name, bool is_nullable) { std::unique_ptr result; if (type_name == "float") { - result = torch::make_unique(MultiType{"float", "int", "long"}); + result = std::make_unique(MultiType{"float", "int", "long"}); } else if (type_name == "int") { - result = torch::make_unique(MultiType{"int", "long"}); + result = std::make_unique(MultiType{"int", "long"}); } else if (type_name.find("tuple[") == 0) { auto type_list = type_name.substr(6); type_list.pop_back(); std::vector> types; for (auto& type : _splitString(type_list, ",")) types.emplace_back(_buildType(type, false)); - result = torch::make_unique(std::move(types)); + result = std::make_unique(std::move(types)); } else if (type_name.find("sequence[") == 0) { auto subtype = type_name.substr(9); subtype.pop_back(); - result = torch::make_unique(_buildType(subtype, false)); + result = std::make_unique(_buildType(subtype, false)); } else { - result = torch::make_unique(type_name); + result = std::make_unique(type_name); } if (is_nullable) - result = torch::make_unique(std::move(result)); + result = std::make_unique(std::move(result)); return result; } diff --git a/torch/csrc/utils/memory.h b/torch/csrc/utils/memory.h deleted file mode 100644 index 16cd95e0cdb5..000000000000 --- a/torch/csrc/utils/memory.h +++ /dev/null @@ -1,41 +0,0 @@ -#pragma once - -#include - -namespace torch { - -// Reference: -// https://github.com/llvm-mirror/libcxx/blob/master/include/memory#L3091 - -template -struct unique_type_for { - using value = std::unique_ptr; -}; - -template -// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) -struct unique_type_for { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) - using unbounded_array = std::unique_ptr; -}; - -template -// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) -struct unique_type_for { - using bounded_array = void; -}; - -template -typename unique_type_for::value make_unique(Args&&... args) { - return std::unique_ptr(new T(std::forward(args)...)); -} - -template -typename unique_type_for::unbounded_array make_unique(size_t size) { - using U = typename std::remove_extent::type; - return std::unique_ptr(new U[size]()); -} - -template -typename unique_type_for::bounded_array make_unique(Args&&...) = delete; -} // namespace torch