mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Using std::make_unique<T>() instead of unique<T>(new T()) (#160723)
As the title stated. Pull Request resolved: https://github.com/pytorch/pytorch/pull/160723 Approved by: https://github.com/Skylion007
This commit is contained in:
@ -647,8 +647,8 @@ _vec_softmax(
|
||||
parallel_for(
|
||||
0, outer_size * inner_size, 0, [&](int64_t begin, int64_t end) {
|
||||
int64_t idx = begin;
|
||||
std::unique_ptr<float[]> temp_vec_input(new float[dim_size*vectorized_step]());
|
||||
std::unique_ptr<float[]> temp_vec_output(new float[dim_size*vectorized_step]());
|
||||
auto temp_vec_input = std::make_unique<float[]>(dim_size * vectorized_step);
|
||||
auto temp_vec_output = std::make_unique<float[]>(dim_size * vectorized_step);
|
||||
float* temp_vec_input_data = temp_vec_input.get();
|
||||
float* temp_vec_output_data = temp_vec_output.get();
|
||||
while (idx < end) {
|
||||
|
@ -285,7 +285,7 @@ struct algorithm_search<cudnnConvolutionFwdAlgoPerf_t> {
|
||||
sizeof(algos) / sizeof(algos[0]) == num_algos,
|
||||
"Missing cuDNN convolution forward algorithms");
|
||||
int perf_count;
|
||||
std::unique_ptr<perf_t[]> perf_results(new perf_t[num_algos]);
|
||||
auto perf_results = std::make_unique<perf_t[]>(num_algos);
|
||||
if (!benchmark) {
|
||||
AT_CUDNN_CHECK_WITH_SHAPES(
|
||||
cudnnGetConvolutionForwardAlgorithm_v7(
|
||||
@ -369,7 +369,7 @@ struct algorithm_search<cudnnConvolutionBwdDataAlgoPerf_t> {
|
||||
sizeof(algos) / sizeof(algos[0]) == num_algos,
|
||||
"Missing cuDNN convolution backward data algorithms.");
|
||||
int perf_count;
|
||||
std::unique_ptr<perf_t[]> perf_results(new perf_t[num_algos]);
|
||||
auto perf_results = std::make_unique<perf_t[]>(num_algos);
|
||||
if (!benchmark) {
|
||||
AT_CUDNN_CHECK_WITH_SHAPES(
|
||||
cudnnGetConvolutionBackwardDataAlgorithm_v7(
|
||||
@ -456,7 +456,7 @@ struct algorithm_search<cudnnConvolutionBwdFilterAlgoPerf_t> {
|
||||
static_assert(
|
||||
sizeof(algos) / sizeof(algos[0]) == num_algos,
|
||||
"Missing cuDNN convolution backward filter algorithms.");
|
||||
std::unique_ptr<perf_t[]> perf_results(new perf_t[num_algos]);
|
||||
auto perf_results = std::make_unique<perf_t[]>(num_algos);
|
||||
int perf_count;
|
||||
if (!benchmark) {
|
||||
AT_CUDNN_CHECK_WITH_SHAPES(
|
||||
|
@ -53,7 +53,7 @@ static void upsample_nearest2d_out_frame(
|
||||
return;
|
||||
}
|
||||
|
||||
std::unique_ptr<int64_t []> input_offset_arr(new int64_t[output_width]);
|
||||
auto input_offset_arr = std::make_unique<int64_t[]>(output_width);
|
||||
int64_t* input_offset = input_offset_arr.get();
|
||||
|
||||
for (const auto w2 : c10::irange(output_width)) {
|
||||
|
@ -800,7 +800,7 @@ Tensor& bmm_out_sparse_cuda(const SparseTensor& self, const Tensor& mat2, Tensor
|
||||
Tensor indices_dim1 = indices[1].to(ScalarType::Int);
|
||||
Tensor indices_dim2 = indices[2].to(ScalarType::Int);
|
||||
|
||||
std::unique_ptr<int64_t[]> mat_el_end_indices_host(new int64_t[num_matrices]);
|
||||
auto mat_el_end_indices_host = std::make_unique<int64_t[]>(num_matrices);
|
||||
|
||||
{
|
||||
auto& allocator = *::c10::cuda::CUDACachingAllocator::get();
|
||||
|
@ -602,7 +602,7 @@ struct vector_args {
|
||||
_PyArg_ParseStackAndKeywords((PyObject*const*)args, nargs, kwnames.ptr(), _parser, &dummy, &dummy, &dummy, &dummy, &dummy);
|
||||
#else
|
||||
_PyArg_Parser* _parser = new _PyArg_Parser{NULL, &names_buf[0], fname_cstr, 0};
|
||||
std::unique_ptr<PyObject*[]> buf(new PyObject*[names.size()]);
|
||||
auto buf = std::make_unique<PyObject*[]>(names.size());
|
||||
_PyArg_UnpackKeywords((PyObject*const*)args, nargs, NULL, kwnames.ptr(), _parser, required, (Py_ssize_t)values.size() - kwonly, 0, &buf[0]);
|
||||
#endif
|
||||
throw exception_set();
|
||||
|
@ -195,7 +195,7 @@ inline void {{kernel_name}}(
|
||||
ALLOCATE_WEIGHT_BUFFER = r"""
|
||||
{%- if is_msvc_compiler %}
|
||||
// MSVC doesn't support stack-allocated dynamic-sized arrays, so using heap memory here.
|
||||
std::unique_ptr<{{buffer_dtype}}[]> heap_deq_b_buf_ptr(new {{buffer_dtype}}[{{buffer_size}}]);
|
||||
auto heap_deq_b_buf_ptr = std::make_unique<{{buffer_dtype}}[]>({{buffer_size}});
|
||||
{{buffer_dtype}}* {{buffer_name}} = heap_deq_b_buf_ptr.get();
|
||||
{%- else %}
|
||||
// It's safe to use a stack-allocated array since the blocking strategy would
|
||||
|
@ -528,7 +528,7 @@ std::shared_ptr<::gloo::transport::Device> ProcessGroupGloo::
|
||||
// use. Note: if the hostname does not resolve to an address (e.g.
|
||||
// because of misconfigured /etc/hosts file), this will not work.
|
||||
const auto hostNameMax = sysconf(_SC_HOST_NAME_MAX);
|
||||
auto hostname = std::unique_ptr<char[]>(new char[hostNameMax]);
|
||||
auto hostname = std::make_unique<char[]>(hostNameMax);
|
||||
auto rv = gethostname(hostname.get(), hostNameMax);
|
||||
if (rv != 0) {
|
||||
C10_THROW_ERROR(DistBackendError, c10::utils::str_error(errno));
|
||||
|
@ -359,7 +359,7 @@ c10::intrusive_ptr<c10::StorageImpl> THPStorage_readFileRaw(
|
||||
data = static_cast<uint8_t*>(storage->mutable_data());
|
||||
} else {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
|
||||
cpu_data = std::unique_ptr<char[]>(new char[nbytes]);
|
||||
cpu_data = std::make_unique<char[]>(nbytes);
|
||||
data = (uint8_t*)cpu_data.get();
|
||||
}
|
||||
|
||||
|
@ -926,7 +926,7 @@ class TorchLibraryInit final {
|
||||
}
|
||||
|
||||
void initialize() {
|
||||
lib = std::unique_ptr<Library>(new Library(kind, ns, key, file, line));
|
||||
lib = std::make_unique<Library>(kind, ns, key, file, line);
|
||||
init_function(*lib);
|
||||
}
|
||||
};
|
||||
|
Reference in New Issue
Block a user