diff --git a/aten/src/ATen/native/AdaptiveMaxPooling3d.cpp b/aten/src/ATen/native/AdaptiveMaxPooling3d.cpp index 001e3c7d2d56..1c037c31c6f6 100644 --- a/aten/src/ATen/native/AdaptiveMaxPooling3d.cpp +++ b/aten/src/ATen/native/AdaptiveMaxPooling3d.cpp @@ -100,7 +100,7 @@ static void adaptive_max_pool3d_single_out_frame( at::parallel_for(0, sizeD, 0, [&](int64_t start, int64_t end) { for (const auto d : c10::irange(start, end)) { /* loop over output */ - int64_t ot, oh, ow; + int64_t ot = 0, oh = 0, ow = 0; for(ot = 0; ot < osizeT; ot++) { int64_t istartT = start_index(ot, osizeT, isizeT); @@ -209,7 +209,7 @@ static void adaptive_max_pool3d_backward_single_out_frame( const int64_t *ind_p_d = ind_p + d*osizeT*osizeH*osizeW; /* calculate max points */ - int64_t ot, oh, ow; + int64_t ot = 0, oh = 0, ow = 0; for(ot = 0; ot < osizeT; ot++) { for(oh = 0; oh < osizeH; oh++) @@ -357,13 +357,13 @@ TORCH_IMPL_FUNC(adaptive_max_pool3d_backward_out_cpu) int dimH = 2; int dimW = 3; int64_t sizeB = 1; - int64_t sizeD; - int64_t isizeT; - int64_t isizeH; - int64_t isizeW; - int64_t osizeT; - int64_t osizeH; - int64_t osizeW; + int64_t sizeD = 0; + int64_t isizeT = 0; + int64_t isizeH = 0; + int64_t isizeW = 0; + int64_t osizeT = 0; + int64_t osizeH = 0; + int64_t osizeW = 0; /* get contiguous gradOutput */ auto gradOutput_ = gradOutput.contiguous(); diff --git a/aten/src/ATen/native/BatchLinearAlgebra.cpp b/aten/src/ATen/native/BatchLinearAlgebra.cpp index 036b7b40c20a..71011ab92d58 100644 --- a/aten/src/ATen/native/BatchLinearAlgebra.cpp +++ b/aten/src/ATen/native/BatchLinearAlgebra.cpp @@ -1508,7 +1508,7 @@ void _linalg_check_errors( return; } - int32_t info; + int32_t info = 0; std::string batch_str; if (is_matrix) { info = infos.item(); @@ -2079,7 +2079,7 @@ TORCH_IMPL_FUNC(lu_unpack_out)(const Tensor& LU, .set_check_mem_overlap(false) .check_all_same_dtype(false) .resize_outputs(false) - .declare_static_shape(pivots.sizes(), /*squash_dim=*/pivots.dim() - 1) + .declare_static_shape(pivots.sizes(), /*squash_dims=*/pivots.dim() - 1) .add_output(perm) .add_owned_const_input(pivots.contiguous()) .build(); diff --git a/aten/src/ATen/native/BatchLinearAlgebraKernel.cpp b/aten/src/ATen/native/BatchLinearAlgebraKernel.cpp index 79e7b8b04938..f16457f067bb 100644 --- a/aten/src/ATen/native/BatchLinearAlgebraKernel.cpp +++ b/aten/src/ATen/native/BatchLinearAlgebraKernel.cpp @@ -500,8 +500,8 @@ void apply_lstsq(const Tensor& A, Tensor& B, Tensor& rank, Tensor& singular_valu auto infos_data = infos.data_ptr(); // only 'gels' driver does not compute the rank - int rank_32; - int64_t* rank_data; + int rank_32 = 0; + int64_t* rank_data = nullptr; int64_t* rank_working_ptr = nullptr; if (driver_t::Gels != driver_type) { rank_data = rank.data_ptr(); @@ -510,9 +510,9 @@ void apply_lstsq(const Tensor& A, Tensor& B, Tensor& rank, Tensor& singular_valu // 'gelsd' and 'gelss' are SVD-based algorithms // so we can get singular values - value_t* s_data; + value_t* s_data = nullptr; value_t* s_working_ptr = nullptr; - int64_t s_stride; + int64_t s_stride = 0; if (driver_t::Gelsd == driver_type || driver_t::Gelss == driver_type) { s_data = singular_values.data_ptr(); s_working_ptr = s_data; @@ -531,7 +531,7 @@ void apply_lstsq(const Tensor& A, Tensor& B, Tensor& rank, Tensor& singular_valu int lwork = -1; // default value to decide the opt size for workspace arrays scalar_t work_opt; value_t rwork_opt; - int iwork_opt; + int iwork_opt = 0; lapack_func(trans, m, n, nrhs, A_data, lda, B_data, ldb, @@ -550,9 +550,9 @@ void apply_lstsq(const Tensor& A, Tensor& B, Tensor& rank, Tensor& singular_valu // 'rwork' only used for complex inputs and 'gelsy', 'gelsd' and 'gelss' drivers Tensor rwork; - value_t* rwork_data; + value_t* rwork_data = nullptr; if (A.is_complex() && driver_t::Gels != driver_type) { - int64_t rwork_len; + int64_t rwork_len = 0; switch (driver_type) { case driver_t::Gelsy: rwork_len = std::max(1, 2 * n); @@ -570,7 +570,7 @@ void apply_lstsq(const Tensor& A, Tensor& B, Tensor& rank, Tensor& singular_valu // 'iwork' workspace array is relevant only for 'gelsd' Tensor iwork; - int* iwork_data; + int* iwork_data = nullptr; if (driver_t::Gelsd == driver_type) { iwork = at::empty({std::max(1, iwork_opt)}, A.options().dtype(at::kInt)); iwork_data = iwork.mutable_data_ptr(); diff --git a/aten/src/ATen/native/BinaryOps.cpp b/aten/src/ATen/native/BinaryOps.cpp index 3fe3ac2b4a25..f2e45442d189 100644 --- a/aten/src/ATen/native/BinaryOps.cpp +++ b/aten/src/ATen/native/BinaryOps.cpp @@ -1417,7 +1417,7 @@ Tensor& comparison_op_(Tensor& self, const Scalar& other, OutImpl& out_impl) { // We need explicit cast to OutFunc because each *_out func is overloaded twice. Without An explicit cast, merely // referring to *_out function is ambiguous. -using OutFunc = std::add_const::type; +using OutFunc = std::add_const_t; // less, alias for torch.lt Tensor& less_out(const Tensor& self, const Tensor& other, Tensor& result) { return at::lt_out(result, self, other); } diff --git a/aten/src/ATen/native/Normalization.cpp b/aten/src/ATen/native/Normalization.cpp index d9637f365c9b..1fb64c323705 100644 --- a/aten/src/ATen/native/Normalization.cpp +++ b/aten/src/ATen/native/Normalization.cpp @@ -87,12 +87,12 @@ DEFINE_DISPATCH(batch_norm_cpu_backward_stub); DEFINE_DISPATCH(renorm_scale_factor_stub); namespace { - void check_dims_match_num_input_features(const char* arg_name, SymInt expected, SymInt actual){ + void check_dims_match_num_input_features(const char* arg_name, const SymInt& expected, const SymInt& actual){ TORCH_CHECK(actual == expected, arg_name, " should contain ", expected, " elements not ", actual); } - static inline Tensor repeat_if_defined(const Tensor& t, SymInt repeat) { + static inline Tensor repeat_if_defined(const Tensor& t, const SymInt& repeat) { if (t.defined()) { return t.repeat_symint(repeat); } @@ -173,7 +173,7 @@ std::tuple batch_norm_cpu_transform_input_template( return 1 / at::sqrt(running_var + eps); } }()); - constexpr bool mixed_type = !std::is_same::value; + constexpr bool mixed_type = !std::is_same_v; const auto dtype = mixed_type ? kFloat : input.scalar_type(); auto w = weight.defined() ? as_nd(weight) : at::detail::scalar_tensor_static(1, dtype, kCPU); @@ -208,7 +208,7 @@ std::tuple batch_norm_cpu_update_stats_template( int64_t n = input.numel() / n_input; bool all_contiguous = is_contiguous(input); - constexpr bool mixed_type = !std::is_same::value; + constexpr bool mixed_type = !std::is_same_v; const auto dtype = mixed_type ? kFloat : input.scalar_type(); auto save_mean_a = save_mean.accessor(); @@ -292,7 +292,7 @@ std::tuple batch_norm_cpu_update_stats_template( reduce_dims[i - 1] = i; } - constexpr bool mixed_type = !std::is_same::value; + constexpr bool mixed_type = !std::is_same_v; const auto dtype = mixed_type ? kFloat : input.scalar_type(); Tensor save_mean = is_contiguous(input) ? at::empty({n_input}, input.options().dtype(dtype)) : at::mean(input, /*dim=*/reduce_dims, /*keepdim=*/false, dtype); Tensor save_var_transform = at::empty({n_input}, input.options().dtype(dtype)); @@ -307,7 +307,7 @@ std::tuple batch_norm_backward_cpu_template( using accscalar_t = at::acc_type; - constexpr bool mixed_type = !std::is_same::value; + constexpr bool mixed_type = !std::is_same_v; const auto dtype = mixed_type ? kFloat : input.scalar_type(); Tensor grad_input; @@ -360,7 +360,7 @@ std::tuple batch_norm_backward_cpu_template( reduce_dims[i - 1] = i; } - auto sum = at::sum(grad_out_, /*dims=*/reduce_dims); + auto sum = at::sum(grad_out_, /*dim=*/reduce_dims); auto sum_a = sum.accessor(); auto reduce_iter = TensorIteratorConfig() @@ -406,7 +406,7 @@ std::tuple batch_norm_backward_cpu_template( for (const auto f : c10::irange(b_begin, b_end)) { param_t w = weight.defined() ? weight_a[f] : param_t(1); - param_t mean, invstd; + param_t mean{}, invstd{}; if (train) { mean = save_mean_a[f]; invstd = save_invstd_a[f]; diff --git a/aten/src/ATen/native/RNN.cpp b/aten/src/ATen/native/RNN.cpp index 345503d5d759..bf6c1740cc52 100644 --- a/aten/src/ATen/native/RNN.cpp +++ b/aten/src/ATen/native/RNN.cpp @@ -209,7 +209,7 @@ struct CellParams : public CellParamsBase { TORCH_INTERNAL_ASSERT(false, "Not yet implemented"); } static c10::intrusive_ptr __setstate__( - CellParamsSerializationType state) { + const CellParamsSerializationType& state) { TORCH_INTERNAL_ASSERT(false, "Not yet implemented"); } }; @@ -289,9 +289,9 @@ struct QuantizedCellParams : public CellParamsBase { zero_point_hh.toLong()}; return CellParamsSerializationType( "quantized", - std::move(tensors_to_serialize), - std::move(doubles_to_serialize), - std::move(longs_to_serialize), + tensors_to_serialize, + doubles_to_serialize, + longs_to_serialize, {}); } static c10::intrusive_ptr __setstate__( @@ -355,10 +355,10 @@ c10::intrusive_ptr make_quantized_cell_params( /*packed_hh=*/std::move(packed_hh), /*col_offsets_ih=*/std::move(col_offsets_ih), /*col_offsets_hh=*/std::move(col_offsets_hh), - /*scale_ih=*/std::move(scale_ih), - /*scale_hh=*/std::move(scale_hh), - /*zero_point_ih=*/std::move(zero_point_ih), - /*zero_point_hh=*/std::move(zero_point_hh)); + /*scale_ih=*/scale_ih, + /*scale_hh=*/scale_hh, + /*zero_point_ih=*/zero_point_ih, + /*zero_point_hh=*/zero_point_hh); } // QuantizedCellParams vs. QuantizedCellParamsDynamic @@ -431,10 +431,10 @@ struct QuantizedCellParamsDynamic : public CellParamsBase { // reduce_range parameter is serialized along with the int field values. return CellParamsSerializationType( "quantized_dynamic", - std::move(tensors_to_serialize), + tensors_to_serialize, {}, {reduce_range_}, - std::move(packed_params_to_serialize)); + packed_params_to_serialize); } static c10::intrusive_ptr __setstate__( CellParamsSerializationType state) { @@ -507,7 +507,7 @@ struct QuantizedCellParamsFP16 : public CellParamsBase { packed_params_to_serialize{packed_ih, packed_hh}; return CellParamsSerializationType( - "quantized_fp16", {}, {}, {}, std::move(packed_params_to_serialize)); + "quantized_fp16", {}, {}, {}, packed_params_to_serialize); } static c10::intrusive_ptr __setstate__( CellParamsSerializationType state) { @@ -667,13 +667,13 @@ tpair_of hidden_slice(const tpair_of& t, int64_t start, int64_t // It's a struct only because functional programming in C++ is a pain, and it's easier // to pass around "vtable pointers" than actual function pointers. -void check_rnn_cell_forward_input(const Tensor& input, c10::SymInt input_size) { +void check_rnn_cell_forward_input(const Tensor& input, const c10::SymInt& input_size) { TORCH_CHECK( input.sym_size(1) == input_size, "input has inconsistent input_size: got ", input.sym_size(1), " expected ", input_size); } -void check_rnn_cell_forward_hidden(const Tensor& input, const Tensor& hx, c10::SymInt hidden_size, c10::SymInt hidden_label) { +void check_rnn_cell_forward_hidden(const Tensor& input, const Tensor& hx, const c10::SymInt& hidden_size, const c10::SymInt& hidden_label) { TORCH_CHECK( input.sym_size(0) == hx.sym_size(0), "Input batch size ", input.sym_size(0), " doesn't match hidden", hidden_label, " batch size ", hx.sym_size(0)); diff --git a/aten/src/ATen/native/ReduceOps.cpp b/aten/src/ATen/native/ReduceOps.cpp index e6270e43aa8c..b4dbfbb2d1f9 100644 --- a/aten/src/ATen/native/ReduceOps.cpp +++ b/aten/src/ATen/native/ReduceOps.cpp @@ -250,7 +250,7 @@ static void meta_func_cum_ops( maybe_wrap_dim(dim, self.dim()); const auto& result = meta.maybe_get_output(); - ScalarType out_dtype; + ScalarType out_dtype{}; if (result.defined()) { out_dtype = dtype.value_or(result.scalar_type()); @@ -1639,7 +1639,7 @@ Tensor allany_dims_default(const Tensor &self, OptionalIntArrayRef dim, bool kee return out; } - if (dim->size() == 0) { + if (dim->empty()) { if (self.scalar_type() == kByte) { // Convert to a 1 or 0 mask auto out = at::empty_like(self); diff --git a/aten/src/ATen/native/ReduceOpsUtils.h b/aten/src/ATen/native/ReduceOpsUtils.h index 4f0e9f9f8632..928853ed44ca 100644 --- a/aten/src/ATen/native/ReduceOpsUtils.h +++ b/aten/src/ATen/native/ReduceOpsUtils.h @@ -370,7 +370,7 @@ inline void resize_reduction( auto shape = get_reduction_shape(self, dims_, keepdim, allow_empty_dims); if (self.layout() == kStrided) { meta.set_output_raw_strided(0, shape, {}, self.options().dtype(out_dtype)); - } else if (shape.size() == 0) { + } else if (shape.empty()) { meta.set_output_raw_strided(0, shape, {}, self.options().dtype(out_dtype).layout(kStrided)); } else { TORCH_CHECK(false, "resize_reduction: support for output with ", self.layout(), " layout is not implemented yet"); diff --git a/aten/src/ATen/native/Repeat.cpp b/aten/src/ATen/native/Repeat.cpp index 8bd253134b7a..2a62a74103c4 100644 --- a/aten/src/ATen/native/Repeat.cpp +++ b/aten/src/ATen/native/Repeat.cpp @@ -87,7 +87,7 @@ Tensor repeat_interleave_symint( } auto ret = input.index_select( - dim.value(), at::repeat_interleave_symint(repeats_, output_size)); + dim.value(), at::repeat_interleave_symint(repeats_, std::move(output_size))); // Restore conj and neg bits if (conj) { ret = ret.conj(); diff --git a/aten/src/ATen/native/Repeat.h b/aten/src/ATen/native/Repeat.h index 879a09bddd99..b8d6f92553a4 100644 --- a/aten/src/ATen/native/Repeat.h +++ b/aten/src/ATen/native/Repeat.h @@ -28,7 +28,7 @@ static inline Tensor repeat_interleave_common( } Tensor repeats_ = repeats.contiguous(); Tensor cumsum = repeats.cumsum(0); - int64_t total; + int64_t total = 0; if (output_size.has_value()) { total = output_size.value(); } else { diff --git a/aten/src/ATen/native/Resize.cpp b/aten/src/ATen/native/Resize.cpp index 01d11e652928..f4d895c2c585 100644 --- a/aten/src/ATen/native/Resize.cpp +++ b/aten/src/ATen/native/Resize.cpp @@ -274,7 +274,7 @@ const Tensor& resize__symint( return _resize_(self, size, optional_memory_format); } -void resize_bytes_nocuda(const Storage& storage, c10::SymInt newsize) { +void resize_bytes_nocuda(const Storage& storage, const c10::SymInt& newsize) { // handles all devices except cuda (which needs to be in a different .so) c10::DeviceType device_type = storage.device_type(); if (device_type == at::kCPU) { diff --git a/aten/src/ATen/native/Resize.h b/aten/src/ATen/native/Resize.h index fb6a7ecb6634..951a08e84c77 100644 --- a/aten/src/ATen/native/Resize.h +++ b/aten/src/ATen/native/Resize.h @@ -38,7 +38,7 @@ TORCH_API bool resize_output_check_symint(const Tensor& output, SymIntArrayRef s TORCH_API void resize_bytes_cpu(StorageImpl* storage, size_t size_bytes); TORCH_API void resize_bytes_meta(StorageImpl* storage, c10::SymInt size_bytes); -TORCH_API void resize_bytes_nocuda(const Storage& storage, c10::SymInt size_bytes); +TORCH_API void resize_bytes_nocuda(const Storage& storage, const c10::SymInt& size_bytes); inline void maybe_resize_storage_cpu(TensorImpl* self, size_t new_size_bytes) { // It does not make sense to try to resize a storage diff --git a/aten/src/ATen/native/cpu/AmpGradScalerKernels.cpp b/aten/src/ATen/native/cpu/AmpGradScalerKernels.cpp index 005b9c15060c..f4d2cb35c986 100644 --- a/aten/src/ATen/native/cpu/AmpGradScalerKernels.cpp +++ b/aten/src/ATen/native/cpu/AmpGradScalerKernels.cpp @@ -1,7 +1,7 @@ #define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include -#include +#include #include #include #include @@ -32,7 +32,7 @@ void _amp_foreach_non_finite_check_and_unscale_cpu_kernel( TensorList scaled_grads, at::Tensor& found_inf, const at::Tensor& inv_scale) { - if (scaled_grads.size() == 0) { + if (scaled_grads.empty()) { return; } @@ -55,7 +55,7 @@ void _amp_foreach_non_finite_check_and_unscale_cpu_kernel( t.layout() == at::kStrided, "one of scaled_grads was not a strided tensor."); auto iter = at::TensorIterator::unary_op( - const_cast(t), const_cast(t)); + const_cast(t), t); if (at::isReducedFloatingType(iter.dtype())) { AT_DISPATCH_REDUCED_FLOATING_TYPES( iter.dtype(), diff --git a/aten/src/ATen/native/cpu/AvgPoolKernel.cpp b/aten/src/ATen/native/cpu/AvgPoolKernel.cpp index ab7586d21d40..5a8597d413a2 100644 --- a/aten/src/ATen/native/cpu/AvgPoolKernel.cpp +++ b/aten/src/ATen/native/cpu/AvgPoolKernel.cpp @@ -71,7 +71,7 @@ void cpu_avg_pool2d( acc_t sum = 0; - int64_t divide_factor; + int64_t divide_factor = 0; if (divisor_override.has_value()) { divide_factor = divisor_override.value(); } else { @@ -100,7 +100,7 @@ void cpu_avg_pool2d( } template ::value, int>::type = 0> + std::enable_if_t::value, int> = 0> void cpu_avg_pool2d_channels_last( const Tensor& output_, const Tensor& input_, @@ -147,7 +147,7 @@ void cpu_avg_pool2d_channels_last( ih1 = std::min(ih1, input_height); iw1 = std::min(iw1, input_width); - int64_t divide_factor; + int64_t divide_factor = 0; if (divisor_override.has_value()) { divide_factor = divisor_override.value(); } else { @@ -214,7 +214,7 @@ void cpu_avg_pool2d_channels_last( } template ::value, int>::type = 0> + std::enable_if_t::value, int> = 0> void cpu_avg_pool2d_channels_last( const Tensor& output_, const Tensor& input_, @@ -266,7 +266,7 @@ void cpu_avg_pool2d_channels_last( ih1 = std::min(ih1, input_height); iw1 = std::min(iw1, input_width); - int64_t divide_factor; + int64_t divide_factor = 0; if (divisor_override.has_value()) { divide_factor = divisor_override.value(); } else { @@ -386,7 +386,7 @@ void cpu_avg_pool2d_backward( ih1 = std::min(ih1, input_height); iw1 = std::min(iw1, input_width); - int64_t divide_factor; + int64_t divide_factor = 0; if (divisor_override.has_value()) { divide_factor = divisor_override.value(); } else { @@ -455,7 +455,7 @@ void cpu_avg_pool2d_backward_channels_last( ih1 = std::min(ih1, input_height); iw1 = std::min(iw1, input_width); - int64_t divide_factor; + int64_t divide_factor = 0; if (divisor_override.has_value()) { divide_factor = divisor_override.value(); } else { @@ -611,7 +611,7 @@ void cpu_avg_pool3d( acc_t sum = 0; - int64_t divide_factor; + int64_t divide_factor = 0; if (divisor_override.has_value()) { divide_factor = divisor_override.value(); } else { @@ -642,7 +642,7 @@ void cpu_avg_pool3d( } template ::value, int>::type = 0> + std::enable_if_t::value, int> = 0> void cpu_avg_pool3d_channels_last( const Tensor& output_, const Tensor& input_, @@ -696,7 +696,7 @@ void cpu_avg_pool3d_channels_last( ih1 = std::min(ih1, input_height); iw1 = std::min(iw1, input_width); - int64_t divide_factor; + int64_t divide_factor = 0; if (divisor_override.has_value()) { divide_factor = divisor_override.value(); } else { @@ -765,7 +765,7 @@ void cpu_avg_pool3d_channels_last( } template ::value, int>::type = 0> + std::enable_if_t::value, int> = 0> void cpu_avg_pool3d_channels_last( const Tensor& output_, const Tensor& input_, @@ -824,7 +824,7 @@ void cpu_avg_pool3d_channels_last( ih1 = std::min(ih1, input_height); iw1 = std::min(iw1, input_width); - int64_t divide_factor; + int64_t divide_factor = 0; if (divisor_override.has_value()) { divide_factor = divisor_override.value(); } else { @@ -952,7 +952,7 @@ void cpu_avg_pool3d_backward( ih1 = std::min(ih1, input_height); iw1 = std::min(iw1, input_width); - int64_t divide_factor; + int64_t divide_factor = 0; if (divisor_override.has_value()) { divide_factor = divisor_override.value(); } else { @@ -1031,7 +1031,7 @@ void cpu_avg_pool3d_backward_channels_last( ih1 = std::min(ih1, input_height); iw1 = std::min(iw1, input_width); - int64_t divide_factor; + int64_t divide_factor = 0; if (divisor_override.has_value()) { divide_factor = divisor_override.value(); } else {