diff --git a/aten/src/ATen/native/cpu/SoftMaxKernel.cpp b/aten/src/ATen/native/cpu/SoftMaxKernel.cpp index 317647123d4c..979e41985075 100644 --- a/aten/src/ATen/native/cpu/SoftMaxKernel.cpp +++ b/aten/src/ATen/native/cpu/SoftMaxKernel.cpp @@ -647,8 +647,8 @@ _vec_softmax( parallel_for( 0, outer_size * inner_size, 0, [&](int64_t begin, int64_t end) { int64_t idx = begin; - std::unique_ptr temp_vec_input(new float[dim_size*vectorized_step]()); - std::unique_ptr temp_vec_output(new float[dim_size*vectorized_step]()); + auto temp_vec_input = std::make_unique(dim_size * vectorized_step); + auto temp_vec_output = std::make_unique(dim_size * vectorized_step); float* temp_vec_input_data = temp_vec_input.get(); float* temp_vec_output_data = temp_vec_output.get(); while (idx < end) { diff --git a/aten/src/ATen/native/cudnn/Conv_v7.cpp b/aten/src/ATen/native/cudnn/Conv_v7.cpp index 4d869e5679f8..7e64af0c6636 100644 --- a/aten/src/ATen/native/cudnn/Conv_v7.cpp +++ b/aten/src/ATen/native/cudnn/Conv_v7.cpp @@ -285,7 +285,7 @@ struct algorithm_search { sizeof(algos) / sizeof(algos[0]) == num_algos, "Missing cuDNN convolution forward algorithms"); int perf_count; - std::unique_ptr perf_results(new perf_t[num_algos]); + auto perf_results = std::make_unique(num_algos); if (!benchmark) { AT_CUDNN_CHECK_WITH_SHAPES( cudnnGetConvolutionForwardAlgorithm_v7( @@ -369,7 +369,7 @@ struct algorithm_search { sizeof(algos) / sizeof(algos[0]) == num_algos, "Missing cuDNN convolution backward data algorithms."); int perf_count; - std::unique_ptr perf_results(new perf_t[num_algos]); + auto perf_results = std::make_unique(num_algos); if (!benchmark) { AT_CUDNN_CHECK_WITH_SHAPES( cudnnGetConvolutionBackwardDataAlgorithm_v7( @@ -456,7 +456,7 @@ struct algorithm_search { static_assert( sizeof(algos) / sizeof(algos[0]) == num_algos, "Missing cuDNN convolution backward filter algorithms."); - std::unique_ptr perf_results(new perf_t[num_algos]); + auto perf_results = std::make_unique(num_algos); int perf_count; if (!benchmark) { AT_CUDNN_CHECK_WITH_SHAPES( diff --git a/aten/src/ATen/native/quantized/cpu/UpSampleNearest2d.cpp b/aten/src/ATen/native/quantized/cpu/UpSampleNearest2d.cpp index e9e32e43ae02..a8f1c8a7d00f 100644 --- a/aten/src/ATen/native/quantized/cpu/UpSampleNearest2d.cpp +++ b/aten/src/ATen/native/quantized/cpu/UpSampleNearest2d.cpp @@ -53,7 +53,7 @@ static void upsample_nearest2d_out_frame( return; } - std::unique_ptr input_offset_arr(new int64_t[output_width]); + auto input_offset_arr = std::make_unique(output_width); int64_t* input_offset = input_offset_arr.get(); for (const auto w2 : c10::irange(output_width)) { diff --git a/aten/src/ATen/native/sparse/cuda/SparseCUDATensorMath.cu b/aten/src/ATen/native/sparse/cuda/SparseCUDATensorMath.cu index fe0ddd087dd3..8013dc989189 100644 --- a/aten/src/ATen/native/sparse/cuda/SparseCUDATensorMath.cu +++ b/aten/src/ATen/native/sparse/cuda/SparseCUDATensorMath.cu @@ -800,7 +800,7 @@ Tensor& bmm_out_sparse_cuda(const SparseTensor& self, const Tensor& mat2, Tensor Tensor indices_dim1 = indices[1].to(ScalarType::Int); Tensor indices_dim2 = indices[2].to(ScalarType::Int); - std::unique_ptr mat_el_end_indices_host(new int64_t[num_matrices]); + auto mat_el_end_indices_host = std::make_unique(num_matrices); { auto& allocator = *::c10::cuda::CUDACachingAllocator::get(); diff --git a/functorch/csrc/dim/minpybind.h b/functorch/csrc/dim/minpybind.h index e1ac428864a9..ceced399b40d 100644 --- a/functorch/csrc/dim/minpybind.h +++ b/functorch/csrc/dim/minpybind.h @@ -602,7 +602,7 @@ struct vector_args { _PyArg_ParseStackAndKeywords((PyObject*const*)args, nargs, kwnames.ptr(), _parser, &dummy, &dummy, &dummy, &dummy, &dummy); #else _PyArg_Parser* _parser = new _PyArg_Parser{NULL, &names_buf[0], fname_cstr, 0}; - std::unique_ptr buf(new PyObject*[names.size()]); + auto buf = std::make_unique(names.size()); _PyArg_UnpackKeywords((PyObject*const*)args, nargs, NULL, kwnames.ptr(), _parser, required, (Py_ssize_t)values.size() - kwonly, 0, &buf[0]); #endif throw exception_set(); diff --git a/torch/_inductor/codegen/cpp_micro_gemm.py b/torch/_inductor/codegen/cpp_micro_gemm.py index 13d946863425..67c725b3a53d 100644 --- a/torch/_inductor/codegen/cpp_micro_gemm.py +++ b/torch/_inductor/codegen/cpp_micro_gemm.py @@ -195,7 +195,7 @@ inline void {{kernel_name}}( ALLOCATE_WEIGHT_BUFFER = r""" {%- if is_msvc_compiler %} // MSVC doesn't support stack-allocated dynamic-sized arrays, so using heap memory here. - std::unique_ptr<{{buffer_dtype}}[]> heap_deq_b_buf_ptr(new {{buffer_dtype}}[{{buffer_size}}]); + auto heap_deq_b_buf_ptr = std::make_unique<{{buffer_dtype}}[]>({{buffer_size}}); {{buffer_dtype}}* {{buffer_name}} = heap_deq_b_buf_ptr.get(); {%- else %} // It's safe to use a stack-allocated array since the blocking strategy would diff --git a/torch/csrc/distributed/c10d/ProcessGroupGloo.cpp b/torch/csrc/distributed/c10d/ProcessGroupGloo.cpp index 9625bbfdde35..1391067c7ec4 100644 --- a/torch/csrc/distributed/c10d/ProcessGroupGloo.cpp +++ b/torch/csrc/distributed/c10d/ProcessGroupGloo.cpp @@ -528,7 +528,7 @@ std::shared_ptr<::gloo::transport::Device> ProcessGroupGloo:: // use. Note: if the hostname does not resolve to an address (e.g. // because of misconfigured /etc/hosts file), this will not work. const auto hostNameMax = sysconf(_SC_HOST_NAME_MAX); - auto hostname = std::unique_ptr(new char[hostNameMax]); + auto hostname = std::make_unique(hostNameMax); auto rv = gethostname(hostname.get(), hostNameMax); if (rv != 0) { C10_THROW_ERROR(DistBackendError, c10::utils::str_error(errno)); diff --git a/torch/csrc/serialization.cpp b/torch/csrc/serialization.cpp index b2bcabc363c1..539106ec02a3 100644 --- a/torch/csrc/serialization.cpp +++ b/torch/csrc/serialization.cpp @@ -359,7 +359,7 @@ c10::intrusive_ptr THPStorage_readFileRaw( data = static_cast(storage->mutable_data()); } else { // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) - cpu_data = std::unique_ptr(new char[nbytes]); + cpu_data = std::make_unique(nbytes); data = (uint8_t*)cpu_data.get(); } diff --git a/torch/library.h b/torch/library.h index ea3f2183a23d..f906e04ddecf 100644 --- a/torch/library.h +++ b/torch/library.h @@ -926,7 +926,7 @@ class TorchLibraryInit final { } void initialize() { - lib = std::unique_ptr(new Library(kind, ns, key, file, line)); + lib = std::make_unique(kind, ns, key, file, line); init_function(*lib); } };