Remove unnecessary "static" for definitions in anonymous namespace (#165035)

This PR removes unnecessary "static" for C++ functions and variables in anonymous namespace as detected by clang-tidy. This enhances code readability. The related rules are planed to be enabled in follow-up PRs.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/165035
Approved by: https://github.com/Skylion007
This commit is contained in:
Yuanyuan Chen
2025-10-11 00:04:23 +00:00
committed by PyTorch MergeBot
parent 2d9f3f57f1
commit ef50c9b557
82 changed files with 262 additions and 262 deletions

View File

@ -58,7 +58,7 @@ namespace at {
namespace{
// PyTorch allows operations to specify dim 0 and dim -1 on a scalar tensor.
static bool is_allowed_dim_on_scalar_tensor(int64_t dim) {
bool is_allowed_dim_on_scalar_tensor(int64_t dim) {
return dim == 0 || dim == -1;
}
@ -365,7 +365,7 @@ Tensor select_batching_rule(const Tensor& self, int64_t dim, int64_t index) {
return self_physical.getPhysicalToLogicalMap().apply(result);
}
static int64_t getGradInputPhysicalDim(int64_t dim, IntArrayRef input_sizes, int64_t num_batch_dims) {
int64_t getGradInputPhysicalDim(int64_t dim, IntArrayRef input_sizes, int64_t num_batch_dims) {
return maybe_wrap_dim(dim, static_cast<int64_t>(input_sizes.size())) + num_batch_dims;
}
@ -488,7 +488,7 @@ Tensor view_as_complex_batching_rule(const Tensor& self) {
// Checks that the smallest batch stride is greater than the largest example
// stride. This is something we can support but we choose not to because it's
// potentially error prone.
static void checkBatchDimsAtFrontInLayout(IntArrayRef physical_strides, int64_t num_batch_dims) {
void checkBatchDimsAtFrontInLayout(IntArrayRef physical_strides, int64_t num_batch_dims) {
auto smallest_batch_stride = std::min_element(
physical_strides.begin(), physical_strides.begin() + num_batch_dims);
auto largest_example_stride = std::max_element(
@ -508,7 +508,7 @@ static void checkBatchDimsAtFrontInLayout(IntArrayRef physical_strides, int64_t
// given (sizes, strides, storage_offset) returns the maximum location that
// can be indexed (or nullopt if such a location doesn't exist, e.g., tensors
// with zero-size dims).
static std::optional<int64_t> maximum_indexable_location(
std::optional<int64_t> maximum_indexable_location(
IntArrayRef sizes, IntArrayRef strides, int64_t storage_offset) {
auto result = native::storage_size_for(sizes, strides);
if (result == 0) {
@ -521,7 +521,7 @@ static std::optional<int64_t> maximum_indexable_location(
// This checks that the range of possible memory locations accessible by
// x.as_strided(sizes, strides, maybe_storage_offset)
// are within the bounds of possible memory locations accessible by x.
static void checkBasicAsStridedValidForSlice(
void checkBasicAsStridedValidForSlice(
const Tensor& physical_tensor,
int64_t num_batch_dims,
IntArrayRef sizes,

View File

@ -13,7 +13,7 @@ namespace {
// and left at true for the rest of the execution.
// It's an optimization so that users who never use default hooks don't need to
// read the thread_local variables pack_hook_ and unpack_hook_.
static bool is_initialized(false);
bool is_initialized(false);
}
static void assertSavedTensorHooksNotDisabled() {

View File

@ -56,7 +56,7 @@ inline void get_strides(int64_t* strides, ArrayRef<OperandInfo> operands, int64_
}
}
static OptionalTensorRef make_otr(const TensorBase &tensor) {
OptionalTensorRef make_otr(const TensorBase &tensor) {
if (tensor.defined()) {
return OptionalTensorRef(tensor);
} else {

View File

@ -36,7 +36,7 @@ namespace {
using weakref_type = c10::weak_intrusive_ptr<TensorImpl, UndefinedTensorImpl>;
using val_type = std::tuple<weakref_type, Tensor>;
static ska::flat_hash_map<TensorImpl*, val_type>& get_cached_casts() {
ska::flat_hash_map<TensorImpl*, val_type>& get_cached_casts() {
static ska::flat_hash_map<TensorImpl*, val_type> cached_casts;
return cached_casts;
}

View File

@ -6,9 +6,9 @@ namespace at {
namespace {
static std::array<HostAllocator*, at::COMPILE_TIME_MAX_DEVICE_TYPES>
std::array<HostAllocator*, at::COMPILE_TIME_MAX_DEVICE_TYPES>
allocator_array{};
static std::array<uint8_t, at::COMPILE_TIME_MAX_DEVICE_TYPES>
std::array<uint8_t, at::COMPILE_TIME_MAX_DEVICE_TYPES>
allocator_priority{};
} // anonymous namespace

View File

@ -108,7 +108,7 @@ static hipblasStatus_t rocBLASStatusToHIPStatus(rocblas_status error)
namespace {
static cublasOperation_t _cublasOpFromChar(char op) {
cublasOperation_t _cublasOpFromChar(char op) {
// NOLINTNEXTLINE(bugprone-switch-missing-default-case)
switch (op) {
case 'n':
@ -128,7 +128,7 @@ static cublasOperation_t _cublasOpFromChar(char op) {
"_cublasOpFromChar input should be 't', 'n' or 'c' but got `", op, "`");
}
static void _cublasAdjustLdLevel2(int64_t m, int64_t n, int64_t* lda) {
void _cublasAdjustLdLevel2(int64_t m, int64_t n, int64_t* lda) {
// Note: leading dimensions generally are checked that they are > 0
// and at least as big the result requires (even if the value won't
// be used).
@ -142,7 +142,7 @@ static void _cublasAdjustLdLevel2(int64_t m, int64_t n, int64_t* lda) {
*lda = std::max<int64_t>(m, 1);
}
static void _cublasAdjustLdLevel3(
void _cublasAdjustLdLevel3(
char transa,
char transb,
int64_t m,

View File

@ -15,19 +15,19 @@ namespace cuda::detail {
namespace {
// Total number of gpus in the system.
static int64_t num_gpus;
int64_t num_gpus;
// Ensures default_gens_cuda is initialized once.
static std::deque<c10::once_flag> cuda_gens_init_flag;
std::deque<c10::once_flag> cuda_gens_init_flag;
// Default, global CUDA generators, one per GPU.
static std::vector<Generator> default_gens_cuda;
std::vector<Generator> default_gens_cuda;
/*
* Populates the global variables related to CUDA generators
* Warning: this function must only be called once!
*/
static void initCUDAGenVector() {
void initCUDAGenVector() {
// Ensures we only call cudaGetDeviceCount only once.
static bool num_gpu_init_flag [[maybe_unused]] = []() {
num_gpus = static_cast<int32_t>(c10::cuda::device_count());

View File

@ -39,7 +39,7 @@ Tensor vdot_decomp(const Tensor& A, const Tensor& B) {
// NB: I wrote this like this because we *might* want its for a future matmul
// batch rule that isn't decomposed...
// "tv" = tensor @ vector
static std::tuple<Tensor, std::optional<int64_t>> tv_batch_rule(
std::tuple<Tensor, std::optional<int64_t>> tv_batch_rule(
const Tensor& self, std::optional<int64_t> self_bdim,
const Tensor& other, std::optional<int64_t> other_bdim) {
if (self_bdim && other_bdim) {
@ -66,7 +66,7 @@ static std::tuple<Tensor, std::optional<int64_t>> tv_batch_rule(
TORCH_INTERNAL_ASSERT(false, "can't get here");
}
static std::tuple<Tensor, std::optional<int64_t>> mv_batch_rule(
std::tuple<Tensor, std::optional<int64_t>> mv_batch_rule(
const Tensor& self, std::optional<int64_t> self_bdim,
const Tensor& other, std::optional<int64_t> other_bdim) {
auto self_logical_rank = rankWithoutBatchDim(self, self_bdim);
@ -79,7 +79,7 @@ static std::tuple<Tensor, std::optional<int64_t>> mv_batch_rule(
return tv_batch_rule(self, self_bdim, other, other_bdim);
}
static std::tuple<Tensor, std::optional<int64_t>> mm_batch_rule(
std::tuple<Tensor, std::optional<int64_t>> mm_batch_rule(
const Tensor& self, std::optional<int64_t> self_bdim,
const Tensor& other, std::optional<int64_t> other_bdim) {
auto self_logical_rank = rankWithoutBatchDim(self, self_bdim);
@ -94,7 +94,7 @@ static std::tuple<Tensor, std::optional<int64_t>> mm_batch_rule(
return std::make_tuple( at::matmul(self_, other_), 0 );
}
static std::tuple<Tensor, std::optional<int64_t>> bmm_batch_rule(
std::tuple<Tensor, std::optional<int64_t>> bmm_batch_rule(
const Tensor& self, std::optional<int64_t> self_bdim,
const Tensor& other, std::optional<int64_t> other_bdim) {
auto self_logical_rank = rankWithoutBatchDim(self, self_bdim);
@ -250,7 +250,7 @@ struct LinalgCheckMatrixBinaryRuleHelper<op_name, F, Func, typelist<A, B, T...>>
}
};
static void expect_at_least_rank(
void expect_at_least_rank(
const Tensor& tensor,
std::optional<int64_t> tensor_bdim,
int64_t expected_rank,
@ -472,7 +472,7 @@ atol_rtol_tensor_batch_rule(
return std::make_tuple(Func(input_, atol_, rtol_, hermitian), 0);
}
static std::tuple<Tensor, std::optional<int64_t>>
std::tuple<Tensor, std::optional<int64_t>>
pinv_batch_rule(
const Tensor& input, std::optional<int64_t> input_bdim, const std::optional<Tensor>& atol,
const std::optional<int64_t> atol_bdim, const std::optional<Tensor>& rtol,

View File

@ -19,7 +19,7 @@
namespace at::functorch {
namespace {
static bool any_has_value(ArrayRef<std::optional<int64_t>> bdims) {
bool any_has_value(ArrayRef<std::optional<int64_t>> bdims) {
for (const auto& bdim : bdims) {
if (bdim.has_value()) {
return true;
@ -28,7 +28,7 @@ static bool any_has_value(ArrayRef<std::optional<int64_t>> bdims) {
return false;
}
static int64_t get_num_leading_nones(ArrayRef<std::optional<Tensor>> indices) {
int64_t get_num_leading_nones(ArrayRef<std::optional<Tensor>> indices) {
int64_t result = 0;
for (const auto& idx : indices) {
if (!idx.has_value() || !idx->defined()) {
@ -40,7 +40,7 @@ static int64_t get_num_leading_nones(ArrayRef<std::optional<Tensor>> indices) {
return result;
}
static int64_t get_max_index_logical_dim(
int64_t get_max_index_logical_dim(
ArrayRef<std::optional<Tensor>> indices,
ArrayRef<std::optional<int64_t>> indices_bdims) {
int64_t max_logical_dim = -1;
@ -57,7 +57,7 @@ static int64_t get_max_index_logical_dim(
return max_logical_dim;
}
static std::vector<std::optional<Tensor>> batchIndices(
std::vector<std::optional<Tensor>> batchIndices(
at::TensorOptions options,
ArrayRef<std::optional<Tensor>> indices,
ArrayRef<std::optional<int64_t>> indices_bdims,
@ -126,7 +126,7 @@ static std::vector<std::optional<Tensor>> batchIndices(
// Define an "advanced index" to be a selection object that is
// a non-trivial Tensor (i.e. it does not represent :).
static bool is_advanced_index(const std::optional<Tensor>& idx) {
bool is_advanced_index(const std::optional<Tensor>& idx) {
if (!idx.has_value()) {
return false;
}
@ -137,7 +137,7 @@ static bool is_advanced_index(const std::optional<Tensor>& idx) {
}
// See NOTE: [advanced indices adjacent] for definition
static bool are_advanced_indices_adjacent(ArrayRef<std::optional<Tensor>> indices) {
bool are_advanced_indices_adjacent(ArrayRef<std::optional<Tensor>> indices) {
int64_t num_advanced_indices_regions = 0;
bool in_advanced_indices_region = false;
for (const auto& idx : indices) {
@ -165,7 +165,7 @@ static bool are_advanced_indices_adjacent(ArrayRef<std::optional<Tensor>> indice
// - result: Tensor[B, 4, 5, 6, 2, 3, 7, 8]
// ------- ----
// region2 region1
static Tensor swap_regions(const Tensor& tensor, int64_t first_region_size, int64_t second_region_size) {
Tensor swap_regions(const Tensor& tensor, int64_t first_region_size, int64_t second_region_size) {
VmapDimVector permutation(tensor.dim(), 0);
std::iota(permutation.begin(), permutation.end(), 0);
std::rotate(
@ -553,7 +553,7 @@ Tensor &_index_put_impl__plumbing(Tensor &self, const List<std::optional<Tensor>
return self;
}
static Tensor maybe_permute_values(
Tensor maybe_permute_values(
const Tensor& values,
ArrayRef<std::optional<Tensor>> orig_indices,
ArrayRef<std::optional<int64_t>> orig_indices_bdims) {
@ -1052,7 +1052,7 @@ std::tuple<Tensor, std::optional<int64_t>> index_add_batch_rule(
other, other_bdim, alpha, false);
}
static std::tuple<Tensor,Tensor> binary_pointwise_align(
std::tuple<Tensor,Tensor> binary_pointwise_align(
const Tensor & self,
std::optional<int64_t> self_bdim,
const Tensor & mask,

View File

@ -346,7 +346,7 @@ std::tuple<Tensor, std::optional<int64_t>> slice_batch_rule(
return std::make_tuple(std::move(result), 0);
}
static bool is_allowed_dim_on_scalar_tensor(int64_t dim) {
bool is_allowed_dim_on_scalar_tensor(int64_t dim) {
return dim == 0 || dim == -1;
}

View File

@ -68,18 +68,18 @@ namespace at::functorch {
namespace{
// PyTorch allows operations to specify dim 0 and dim -1 on a scalar tensor.
static bool is_allowed_dim_on_scalar_tensor(int64_t dim) {
bool is_allowed_dim_on_scalar_tensor(int64_t dim) {
return dim == 0 || dim == -1;
}
static int64_t get_current_level() {
int64_t get_current_level() {
auto maybe_level = maybeCurrentDynamicLayer();
TORCH_INTERNAL_ASSERT(maybe_level.has_value());
return maybe_level->layerId();
}
// This check should probably go into the dispatcher...
static bool participatesInCurrentLevel(const Tensor& self) {
bool participatesInCurrentLevel(const Tensor& self) {
auto current_level = get_current_level();
auto* maybe_batched_impl = maybeGetBatchedImpl(self);
if (!maybe_batched_impl) {
@ -90,7 +90,7 @@ static bool participatesInCurrentLevel(const Tensor& self) {
return self_level == current_level;
}
static bool participatesInCurrentLevel(ITensorListRef self) {
bool participatesInCurrentLevel(ITensorListRef self) {
for (const Tensor& tensor : self) {
if (participatesInCurrentLevel(tensor)) {
return true;
@ -285,7 +285,7 @@ std::vector<Tensor> unbind_batching_rule(const Tensor& self, int64_t dim) {
// given (sizes, strides, storage_offset) returns the maximum location that
// can be indexed (or nullopt if such a location doesn't exist, e.g., tensors
// with zero-size dims).
static std::optional<c10::SymInt> maximum_indexable_location(
std::optional<c10::SymInt> maximum_indexable_location(
c10::SymIntArrayRef sizes, c10::SymIntArrayRef strides, const c10::SymInt& storage_offset) {
auto result = native::storage_size_for(sizes, strides);
if (result == 0) {
@ -298,7 +298,7 @@ static std::optional<c10::SymInt> maximum_indexable_location(
// This checks that the range of possible memory locations accessible by
// x.as_strided(sizes, strides, maybe_storage_offset)
// are within the bounds of possible memory locations accessible by x.
static void checkBasicAsStridedValidForSlice(
void checkBasicAsStridedValidForSlice(
const Tensor& physical_tensor,
int64_t num_batch_dims,
c10::SymIntArrayRef sizes,

View File

@ -71,7 +71,7 @@ Tensor linear_hack(const Tensor& input, const Tensor& weight, const std::optiona
return output;
}
static inline at::Tensor apply_loss_reduction(const at::Tensor& unreduced, int64_t reduction) {
inline at::Tensor apply_loss_reduction(const at::Tensor& unreduced, int64_t reduction) {
if (reduction == at::Reduction::Mean) {
return unreduced.mean();
} else if (reduction == at::Reduction::Sum) {
@ -127,7 +127,7 @@ namespace {
template<bool inplace>
using Ctype = std::conditional_t<inplace, Tensor&, Tensor>;
static Tensor make_feature_noise(const Tensor& input) {
Tensor make_feature_noise(const Tensor& input) {
auto input_sizes = input.sizes();
TORCH_CHECK(input.dim() >= 2, "Feature dropout requires at least 2 dimensions in the input");
std::vector<int64_t> sizes;
@ -141,7 +141,7 @@ static Tensor make_feature_noise(const Tensor& input) {
return at::empty(sizes, input.options());
}
static bool is_fused_kernel_acceptable(const Tensor& input, double p) {
bool is_fused_kernel_acceptable(const Tensor& input, double p) {
return (input.is_cuda() || input.is_xpu() || input.is_lazy() || input.is_privateuseone()) && p > 0 && p < 1 && input.numel() > 0;
}
@ -210,7 +210,7 @@ ALIAS_SPECIALIZATION(_feature_dropout, true, false)
ALIAS_SPECIALIZATION(_alpha_dropout, false, true )
ALIAS_SPECIALIZATION(_feature_alpha_dropout, true, true )
static Tensor dropout(const Tensor& input, double p, bool train) {
Tensor dropout(const Tensor& input, double p, bool train) {
auto result = [&]() {
NoNamesGuard guard;
if (train && is_fused_kernel_acceptable(input, p)) {

View File

@ -24,7 +24,7 @@ namespace at::native {
namespace {
template <typename scalar_t>
static void adaptive_avg_pool3d_out_frame(
void adaptive_avg_pool3d_out_frame(
const scalar_t* input_p,
scalar_t* output_p,
int64_t sizeD,
@ -176,7 +176,7 @@ void adaptive_avg_pool3d_out_cpu_template(
}
template <typename scalar_t>
static void adaptive_avg_pool3d_backward_out_frame(
void adaptive_avg_pool3d_backward_out_frame(
scalar_t* gradInput_p,
const scalar_t* gradOutput_p,
int64_t sizeD,

View File

@ -93,7 +93,7 @@ namespace {
// 5d tensor B x D x T x H x W
template <typename scalar_t>
static void adaptive_max_pool3d_single_out_frame(
void adaptive_max_pool3d_single_out_frame(
const scalar_t *input_p,
scalar_t *output_p,
int64_t *ind_p,
@ -170,7 +170,7 @@ static void adaptive_max_pool3d_single_out_frame(
}
template <typename scalar_t>
static void adaptive_max_pool3d_out_frame(
void adaptive_max_pool3d_out_frame(
const scalar_t *input_data,
scalar_t *output_data,
int64_t *indices_data,
@ -202,7 +202,7 @@ static void adaptive_max_pool3d_out_frame(
}
template <typename scalar_t>
static void adaptive_max_pool3d_backward_single_out_frame(
void adaptive_max_pool3d_backward_single_out_frame(
scalar_t *gradInput_p,
const scalar_t *gradOutput_p,
const int64_t *ind_p,
@ -241,7 +241,7 @@ static void adaptive_max_pool3d_backward_single_out_frame(
}
template <typename scalar_t>
static void adaptive_max_pool3d_backward_out_frame(
void adaptive_max_pool3d_backward_out_frame(
scalar_t *gradInput_data,
const scalar_t *gradOutput_data,
const int64_t *indices_data,

View File

@ -153,7 +153,7 @@ namespace at::native {
namespace {
template <typename scalar_t>
static void avg_pool3d_out_frame(
void avg_pool3d_out_frame(
const scalar_t *input_p,
scalar_t *output_p,
int64_t nslices,
@ -333,7 +333,7 @@ TORCH_IMPL_FUNC(avg_pool3d_out_cpu) (
namespace {
template <typename scalar_t>
static void avg_pool3d_backward_out_frame(
void avg_pool3d_backward_out_frame(
scalar_t *gradInput_p,
const scalar_t *gradOutput_p,
int64_t nslices,

View File

@ -143,13 +143,13 @@ Tensor& cholesky_inverse_kernel_impl(Tensor& result, Tensor& infos, bool upper)
For more info see https://github.com/pytorch/pytorch/issues/145801#issuecomment-2631781776
*/
template <typename T>
static inline
inline
std::enable_if_t<std::is_floating_point_v<T>, int> lapack_work_to_int(const T val) {
const auto next_after = std::nextafter(val, std::numeric_limits<T>::infinity());
return std::max<int>(1, std::ceil(next_after));
}
template <typename T>
static inline
inline
std::enable_if_t<c10::is_complex<T>::value, int> lapack_work_to_int(const T val) {
return lapack_work_to_int(val.real());
}
@ -343,7 +343,7 @@ void linalg_eigh_kernel(const Tensor& eigenvalues, const Tensor& eigenvectors, c
For further details, please see the LAPACK documentation for GEQRF.
*/
template <typename scalar_t>
static void apply_geqrf(const Tensor& input, const Tensor& tau) {
void apply_geqrf(const Tensor& input, const Tensor& tau) {
#if !AT_BUILD_WITH_LAPACK()
TORCH_CHECK(
false,
@ -1039,7 +1039,7 @@ void lu_solve_kernel(const Tensor& LU, const Tensor& pivots, const Tensor& B, Tr
}
template <typename scalar_t>
static void apply_svd(const Tensor& A,
void apply_svd(const Tensor& A,
const bool full_matrices,
const bool compute_uv,
const Tensor& U,

View File

@ -71,7 +71,7 @@
namespace at::native {
namespace {
static void col2im_out_cpu_template(
void col2im_out_cpu_template(
Tensor& output,
const Tensor& input_,
IntArrayRef output_size,

View File

@ -25,7 +25,7 @@ namespace at::native {
namespace {
static Tensor compute_columns2d(
Tensor compute_columns2d(
const Tensor& input,
IntArrayRef padding,
IntArrayRef stride,
@ -93,7 +93,7 @@ static Tensor compute_columns2d(
return columns.contiguous();
}
static inline void slow_conv2d_shape_check(
inline void slow_conv2d_shape_check(
const Tensor& input,
const Tensor& grad_output,
const Tensor& weight,
@ -205,7 +205,7 @@ static inline void slow_conv2d_shape_check(
}
}
static inline Tensor view_weight_2d(const Tensor& weight_,
inline Tensor view_weight_2d(const Tensor& weight_,
at::MemoryFormat memory_format = at::MemoryFormat::Contiguous) {
Tensor weight = weight_.contiguous(memory_format);
if (weight.dim() == 4) {
@ -220,7 +220,7 @@ static inline Tensor view_weight_2d(const Tensor& weight_,
}
template <typename scalar_t>
static void slow_conv2d_update_output_frame(
void slow_conv2d_update_output_frame(
TensorAccessor<const scalar_t, 3> input,
TensorAccessor<scalar_t, 3> output,
TensorAccessor<const scalar_t, 2> weight,
@ -480,7 +480,7 @@ void slow_conv2d_backward_weight_frame(
}
}
static void slow_conv2d_backward_weight_out_cpu_template(
void slow_conv2d_backward_weight_out_cpu_template(
Tensor& grad_weight,
const Tensor& input,
const Tensor& grad_output_,

View File

@ -28,7 +28,7 @@ namespace at::native {
namespace {
static Tensor compute_columns3d(
Tensor compute_columns3d(
const Tensor& input_,
IntArrayRef stride,
IntArrayRef padding,
@ -108,7 +108,7 @@ static Tensor compute_columns3d(
return columns;
}
static inline void slow_conv3d_shape_check(
inline void slow_conv3d_shape_check(
const Tensor& input,
const Tensor& grad_output,
const Tensor& weight,
@ -273,7 +273,7 @@ static inline void slow_conv3d_shape_check(
}
}
static Tensor view_weight_2d(const Tensor& weight_) {
Tensor view_weight_2d(const Tensor& weight_) {
Tensor weight = weight_.contiguous();
if (weight.dim() == 5) {
const int64_t s1 = weight.size(0);
@ -286,7 +286,7 @@ static Tensor view_weight_2d(const Tensor& weight_) {
}
template <typename scalar_t>
static void slow_conv3d_update_output_frame(
void slow_conv3d_update_output_frame(
TensorAccessor<const scalar_t, 4> input,
TensorAccessor<scalar_t, 4> output,
TensorAccessor<const scalar_t, 2> weight,
@ -515,7 +515,7 @@ void slow_conv3d_backward_weight_frame(
grad_weight.data(), ldc, grad_weight.stride(0) * n);
}
static void slow_conv3d_backward_parameters_out_cpu_template(
void slow_conv3d_backward_parameters_out_cpu_template(
Tensor& grad_weight,
const Tensor& input,
const Tensor& grad_output,

View File

@ -108,7 +108,7 @@ bool is_fast_path(const Tensor& src, const std::optional<Tensor>& scale, Tensor&
// index_add (using add_indices as the index), without creating an intermediary
// tensor to hold the selected embeddings
template <typename data_t, typename index_t>
static std::enable_if_t<std::is_same_v<data_t, double>, void>
std::enable_if_t<std::is_same_v<data_t, double>, void>
index_select_add(
const Tensor& select_indices,
const Tensor& add_indices,
@ -494,7 +494,7 @@ index_select_add(const Tensor &select_indices,
// mul (scaling by per_sample_weights)
// index_add (using add_indices as the index)
template <typename data_t, typename index_t>
static std::enable_if_t<std::is_same_v<data_t, double>, void>
std::enable_if_t<std::is_same_v<data_t, double>, void>
index_select_scale_add(
const Tensor& select_indices,
const Tensor& add_indices,

View File

@ -130,7 +130,7 @@ namespace native {
namespace {
template <typename scalar_t>
static void fractional_max_pool2d_out_single_batch_frame(
void fractional_max_pool2d_out_single_batch_frame(
const scalar_t* input,
scalar_t* output,
int64_t* indices,
@ -188,7 +188,7 @@ static void fractional_max_pool2d_out_single_batch_frame(
}
template <typename scalar_t>
static void fractional_max_pool2d_out_frame(
void fractional_max_pool2d_out_frame(
const scalar_t* input,
scalar_t* output,
int64_t* indices,
@ -220,7 +220,7 @@ static void fractional_max_pool2d_out_frame(
}
template <typename scalar_t>
static void fractional_max_pool2d_backward_out_single_batch_frame(
void fractional_max_pool2d_backward_out_single_batch_frame(
scalar_t* gradInput,
const scalar_t* gradOutput,
const int64_t* indices,
@ -247,7 +247,7 @@ static void fractional_max_pool2d_backward_out_single_batch_frame(
}
template <typename scalar_t>
static void fractional_max_pool2d_backward_out_frame(
void fractional_max_pool2d_backward_out_frame(
scalar_t* gradInput,
const scalar_t* gradOutput,
const int64_t* indices,

View File

@ -99,7 +99,7 @@ namespace at::native {
namespace {
template<typename scalar_t>
static void fractional_max_pool3d_out_single_batch_frame(
void fractional_max_pool3d_out_single_batch_frame(
const scalar_t* input,
scalar_t* output,
int64_t* indices,
@ -169,7 +169,7 @@ static void fractional_max_pool3d_out_single_batch_frame(
}
template<typename scalar_t>
static void fractional_max_pool3d_out_frame(
void fractional_max_pool3d_out_frame(
const scalar_t* input,
scalar_t* output,
int64_t* indices,
@ -257,7 +257,7 @@ TORCH_IMPL_FUNC(fractional_max_pool3d_out_cpu)(
namespace {
template<typename scalar_t>
static void fractional_max_pool3d_backward_out_single_batch_frame(
void fractional_max_pool3d_backward_out_single_batch_frame(
scalar_t* gradInput,
const scalar_t* gradOutput,
const int64_t* indices,
@ -287,7 +287,7 @@ static void fractional_max_pool3d_backward_out_single_batch_frame(
}
template<typename scalar_t>
static void fractional_max_pool3d_backward_out_frame(
void fractional_max_pool3d_backward_out_frame(
scalar_t* gradInput,
const scalar_t* gradOutput,
const int64_t* indices,

View File

@ -19,7 +19,7 @@
namespace at::native {
namespace {
static void im2col_out_cpu_template(
void im2col_out_cpu_template(
Tensor& output,
const Tensor& input_,
IntArrayRef kernel_size,

View File

@ -61,7 +61,7 @@
constexpr float EPSILON = 1e-12;
namespace {
static inline at::Tensor apply_loss_reduction(const at::Tensor& unreduced, int64_t reduction) {
inline at::Tensor apply_loss_reduction(const at::Tensor& unreduced, int64_t reduction) {
if (reduction == at::Reduction::Mean) {
return unreduced.mean();
} else if (reduction == at::Reduction::Sum) {

View File

@ -44,7 +44,7 @@ namespace {
// this ad-hoc converts from targets (l in [1]) to augmented targets (l' in [1]) note that no bound-checking is done
template<typename target_t>
static inline int64_t get_target_prime(target_t* target, int64_t offset, int64_t stride, int64_t idx, int64_t BLANK) {
inline int64_t get_target_prime(target_t* target, int64_t offset, int64_t stride, int64_t idx, int64_t BLANK) {
if (idx % 2 == 0) {
return BLANK;
} else {

View File

@ -58,7 +58,7 @@ inline scalar_t multilabel_margin_loss_forward_inner_sum_cpu(
}
template <typename scalar_t>
static void multilabel_margin_loss_forward_out_frame(
void multilabel_margin_loss_forward_out_frame(
const Tensor& input_contiguous,
const Tensor& target_contiguous,
Tensor& output,
@ -108,7 +108,7 @@ static void multilabel_margin_loss_forward_out_frame(
}
}
static void multilabel_margin_loss_forward_out_cpu_template(
void multilabel_margin_loss_forward_out_cpu_template(
const Tensor& input,
const Tensor& target,
Tensor& output,
@ -153,7 +153,7 @@ static void multilabel_margin_loss_forward_out_cpu_template(
}
template <typename scalar_t>
static void multilabel_margin_loss_backward_out_frame(
void multilabel_margin_loss_backward_out_frame(
Tensor& grad_input,
const Tensor& grad_output,
const Tensor& input_contiguous,
@ -222,7 +222,7 @@ static void multilabel_margin_loss_backward_out_frame(
}
}
static void multilabel_margin_loss_backward_out_cpu_template(
void multilabel_margin_loss_backward_out_cpu_template(
Tensor& grad_input,
const Tensor& grad_output,
const Tensor& input,

View File

@ -57,7 +57,7 @@ inline int64_t target_index_checked(
}
template <typename scalar_t>
static inline void multi_margin_loss_cpu_kernel(
inline void multi_margin_loss_cpu_kernel(
Tensor& output,
const scalar_t* input_data,
const int64_t* target_data,
@ -148,7 +148,7 @@ void multi_margin_loss_out_cpu_template(
}
template <typename scalar_t>
static void multi_margin_loss_backward_cpu_kernel(
void multi_margin_loss_backward_cpu_kernel(
scalar_t* grad_input_data,
const Tensor& grad_output,
const scalar_t* input_data,

View File

@ -159,7 +159,7 @@ inline scalar_t* optional_data(const Tensor& source) {
}
template <typename scalar_t, typename target_t>
static void nll_loss_out_frame(
void nll_loss_out_frame(
const Tensor& output,
const Tensor& total_weight,
const Tensor& input,
@ -338,7 +338,7 @@ void nll_loss_forward_out_cpu_template(
}
template <typename scalar_t, typename target_t>
static void nll_loss_backward_out_frame(
void nll_loss_backward_out_frame(
const Tensor& grad_input,
const Tensor& grad_output,
const Tensor& input,

View File

@ -99,7 +99,7 @@ inline void check_gradout_shape_nll_loss2d(
template <typename scalar_t>
static void nll_loss2d_forward_out_frame(
void nll_loss2d_forward_out_frame(
Tensor& output,
Tensor& total_weight,
const Tensor& input,
@ -280,7 +280,7 @@ void nll_loss2d_forward_out_cpu_template(
}
template <typename scalar_t>
static void nll_loss2d_backward_out_frame(
void nll_loss2d_backward_out_frame(
Tensor& grad_input,
const Tensor& grad_output,
const Tensor& input,

View File

@ -24,7 +24,7 @@
namespace at {
namespace {
static inline void slow_conv_transpose2d_shape_check(
inline void slow_conv_transpose2d_shape_check(
const Tensor& input,
const Tensor& grad_output,
const Tensor& weight,
@ -386,7 +386,7 @@ void slow_conv_transpose2d_out_cpu_template(
}
}
static void slow_conv_transpose2d_backward_out_cpu_template(
void slow_conv_transpose2d_backward_out_cpu_template(
const Tensor& input_,
const Tensor& grad_output_,
Tensor& grad_input,

View File

@ -22,7 +22,7 @@ namespace at::native {
namespace {
static inline void slow_conv_transpose3d_shape_check(
inline void slow_conv_transpose3d_shape_check(
const Tensor& input,
const Tensor& grad_output,
const Tensor& weight,

View File

@ -92,7 +92,7 @@ namespace {
arg_name, " should contain ", expected, " elements not ", actual);
}
static inline Tensor repeat_if_defined(const Tensor& t, const SymInt& repeat) {
inline Tensor repeat_if_defined(const Tensor& t, const SymInt& repeat) {
if (t.defined()) {
return t.repeat_symint(repeat);
}

View File

@ -538,7 +538,7 @@ c10::intrusive_ptr<CellParamsBase> make_quantized_cell_params_fp16(
std::move(w_ih_packed), std::move(w_hh_packed));
}
static std::unordered_map<
std::unordered_map<
std::string,
c10::intrusive_ptr<CellParamsBase> (*)(CellParamsSerializationType)>
cell_params_deserializers = {
@ -578,7 +578,7 @@ struct QRNNCellParamsWrapper {
// Gathers every two elements of a vector in a vector of pairs
template<typename T>
static std::vector<pair_of<T>> pair_vec(const std::vector<T>& vals) {
std::vector<pair_of<T>> pair_vec(const std::vector<T>& vals) {
TORCH_CHECK(vals.size() % 2 == 0, "Odd number of params or hiddens given to a bidirectional RNN");
std::vector<pair_of<T>> result;
result.reserve(vals.size() / 2);
@ -590,7 +590,7 @@ static std::vector<pair_of<T>> pair_vec(const std::vector<T>& vals) {
// Flattens a vector of pairs
template<typename T>
static std::vector<T> unpair_vec(std::vector<pair_of<T>>&& vals) {
std::vector<T> unpair_vec(std::vector<pair_of<T>>&& vals) {
std::vector<T> result;
result.reserve(vals.size() * 2);
for (const auto i : c10::irange(vals.size())) {
@ -601,7 +601,7 @@ static std::vector<T> unpair_vec(std::vector<pair_of<T>>&& vals) {
}
// Parses a flat list of parameter tensors into a list of CellParams
static std::vector<CellParams> gather_params(TensorList params, bool has_biases, bool has_projections = false) {
std::vector<CellParams> gather_params(TensorList params, bool has_biases, bool has_projections = false) {
static at::Tensor undefined;
std::vector<CellParams> result;
if (has_biases) {
@ -1894,10 +1894,10 @@ static DEFINE_QUANTIZED_RNN_CELL_DYNAMIC(quantized_rnn_tanh_cell_dynamic, simple
namespace {
[[maybe_unused]] static auto ensure_linear_params_registered =
[[maybe_unused]] auto ensure_linear_params_registered =
register_linear_params();
static auto cell_params_base_registry =
auto cell_params_base_registry =
torch::selective_class_<CellParamsBase>("rnn", TORCH_SELECTIVE_CLASS("CellParamsBase"))
.def_pickle(
[](const c10::intrusive_ptr<CellParamsBase>& self)

View File

@ -2676,7 +2676,7 @@ inline std::tuple<Tensor, Tensor, int64_t> _take_along_dim_helper(
std::move(dim));
}
static inline void checkDevice(CheckedFrom c, const Tensor& t, Device device) {
inline void checkDevice(CheckedFrom c, const Tensor& t, Device device) {
TORCH_CHECK(
!t.defined() || t.device() == device,
"Expected tensor to have ",
@ -2689,7 +2689,7 @@ static inline void checkDevice(CheckedFrom c, const Tensor& t, Device device) {
")");
}
static inline void checkDevice(
inline void checkDevice(
CheckedFrom c,
at::ArrayRef<Tensor> tensors,
Device device) {

View File

@ -3641,7 +3641,7 @@ Tensor& transpose_(Tensor& self, int64_t dim0, int64_t dim1) {
namespace {
// Transpose implementation for sparse compressed layouts
// NB: We assume that dim1,dim0 have already been wrapped
static inline Tensor sparse_compressed_transpose(
inline Tensor sparse_compressed_transpose(
const Tensor& self,
int64_t dim0,
int64_t dim1) {

View File

@ -29,7 +29,7 @@ namespace {
// grad_in does not mean that it is a gradient wrt to input,
// grad_in/grad_out is just an input/output of unfold_backward kernel.
[[maybe_unused]] static TensorIterator _make_unfold_backward_iter_over_grad_out(
[[maybe_unused]] TensorIterator _make_unfold_backward_iter_over_grad_out(
Tensor& grad_out,
const Tensor& grad_in,
int64_t dim,

View File

@ -105,7 +105,7 @@ namespace at::native {
namespace {
template <typename scalar_t>
static void upsample_bicubic2d_backward_out_frame(
void upsample_bicubic2d_backward_out_frame(
const scalar_t* odata,
scalar_t* idata,
int64_t input_height,
@ -177,7 +177,7 @@ static void upsample_bicubic2d_backward_out_frame(
});
}
static void upsample_bicubic2d_backward_kernel(
void upsample_bicubic2d_backward_kernel(
const Tensor& grad_input,
const Tensor& grad_output_,
IntArrayRef output_size,

View File

@ -39,6 +39,6 @@ int register_linear_params() {
}
namespace {
[[maybe_unused]] static auto linear_params = register_linear_params();
[[maybe_unused]] auto linear_params = register_linear_params();
} // namespace
} // namespace ao::sparse

View File

@ -30,7 +30,7 @@ namespace {
// Workaround for gcc-14.2.0 ICE during RTL pass: expand when compiling for NEON
__attribute__((optimize("no-tree-vectorize")))
#endif
static void log_sigmoid_cpu_kernel(TensorBase &output, TensorBase &buffer, const TensorBase &input) {
void log_sigmoid_cpu_kernel(TensorBase &output, TensorBase &buffer, const TensorBase &input) {
if (at::isReducedFloatingType(input.scalar_type())) {
AT_DISPATCH_REDUCED_FLOATING_TYPES(input.scalar_type(), "log_sigmoid_cpu", [&]() {
using Vec = Vectorized<scalar_t>;
@ -96,7 +96,7 @@ static void log_sigmoid_cpu_kernel(TensorBase &output, TensorBase &buffer, const
}
}
static void log_sigmoid_backward_cpu_kernel(TensorIterator& iter) {
void log_sigmoid_backward_cpu_kernel(TensorIterator& iter) {
if (at::isReducedFloatingType(iter.dtype())) {
AT_DISPATCH_REDUCED_FLOATING_TYPES(iter.dtype(), "log_sigmoid_backward_cpu", [&]() {
using Vec = Vectorized<scalar_t>;
@ -150,7 +150,7 @@ static void log_sigmoid_backward_cpu_kernel(TensorIterator& iter) {
}
}
static void threshold_kernel(
void threshold_kernel(
TensorIteratorBase& iter,
const Scalar& threshold_scalar,
const Scalar& value_scalar) {
@ -868,7 +868,7 @@ void hardswish_backward_kernel(TensorIterator& iter) {
}
}
static void leaky_relu_kernel(TensorIteratorBase& iter, const Scalar& negval_) {
void leaky_relu_kernel(TensorIteratorBase& iter, const Scalar& negval_) {
if (at::isReducedFloatingType(iter.dtype())) {
AT_DISPATCH_REDUCED_FLOATING_TYPES(iter.dtype(), "leaky_relu_cpu", [&]() {
auto zero_vec = Vectorized<float>((float)(0));
@ -907,7 +907,7 @@ static void leaky_relu_kernel(TensorIteratorBase& iter, const Scalar& negval_) {
}
}
static void leaky_relu_backward_kernel(TensorIteratorBase& iter, const Scalar& negval_) {
void leaky_relu_backward_kernel(TensorIteratorBase& iter, const Scalar& negval_) {
if (at::isReducedFloatingType(iter.dtype())) {
AT_DISPATCH_REDUCED_FLOATING_TYPES(iter.dtype(), "leaky_relu_backward_cpu", [&]() {
auto zero_vec = Vectorized<float>((float)(0));

View File

@ -369,7 +369,7 @@ void gemm_notrans_(
#endif // defined(__aarch64__) && !defined(C10_MOBILE)
#if !defined(C10_MOBILE)
static float compute_dot(const at::Half* a, const at::Half* b, int64_t len) {
float compute_dot(const at::Half* a, const at::Half* b, int64_t len) {
return at::native::CPU_CAPABILITY::fp16_dot_with_fp32_arith(
a, b, len);
}
@ -406,7 +406,7 @@ void gemm_transa_(
});
}
static float compute_dot(const at::BFloat16* a, const at::BFloat16* b, int64_t len) {
float compute_dot(const at::BFloat16* a, const at::BFloat16* b, int64_t len) {
return at::native::CPU_CAPABILITY::bf16_dot_with_fp32_arith(a, b, len);
}

View File

@ -15,12 +15,12 @@ namespace at::native {
inline namespace CPU_CAPABILITY {
namespace {
static bool reduced_input(ScalarType input_t, ScalarType output_t) {
bool reduced_input(ScalarType input_t, ScalarType output_t) {
return !at::isFloat8Type(input_t) && at::isReducedFloatingType(input_t) &&
output_t == kFloat;
}
static bool reduced_output(ScalarType input_t, ScalarType output_t) {
bool reduced_output(ScalarType input_t, ScalarType output_t) {
return !at::isFloat8Type(output_t) && at::isReducedFloatingType(output_t) &&
input_t == kFloat;
}

View File

@ -15,7 +15,7 @@ namespace at::native {
namespace {
template<typename scalar_t>
static void apply_cross(const Tensor& result, const Tensor& a, const Tensor& b, const int64_t dim) {
void apply_cross(const Tensor& result, const Tensor& a, const Tensor& b, const int64_t dim) {
int64_t total = a.numel() / 3;
int64_t a_stride = a.stride(dim);
int64_t b_stride = b.stride(dim);
@ -68,7 +68,7 @@ static void apply_cross(const Tensor& result, const Tensor& a, const Tensor& b,
});
}
static void cross_kernel_impl(const Tensor& result, const Tensor& a, const Tensor& b, const int64_t dim) {
void cross_kernel_impl(const Tensor& result, const Tensor& a, const Tensor& b, const int64_t dim) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kBFloat16, kHalf, result.scalar_type(), "cross", [&]() {
apply_cross<scalar_t>(result, a, b, dim);
});

View File

@ -422,19 +422,19 @@ void pdist_forward_kernel_impl(Tensor& result, const Tensor& self, const double
});
}
static void pdist_backward_kernel_impl(Tensor& result, const Tensor& grad, const Tensor& self, const double p, const Tensor& dist) {
void pdist_backward_kernel_impl(Tensor& result, const Tensor& grad, const Tensor& self, const double p, const Tensor& dist) {
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "pdist_backward", [&] {
Dist<scalar_t>::apply_backward_pdist(result, grad, self, p, dist);
});
}
static void cdist_kernel_impl(Tensor& result, const Tensor& x1, const Tensor& x2, const double p) {
void cdist_kernel_impl(Tensor& result, const Tensor& x1, const Tensor& x2, const double p) {
AT_DISPATCH_FLOATING_TYPES(result.scalar_type(), "cdist", [&] {
Dist<scalar_t>::apply_cdist(result, x1, x2, p);
});
}
static void cdist_backward_kernel_impl(Tensor& result, const Tensor& grad, const Tensor& x1, const Tensor& x2, const double p, const Tensor& dist) {
void cdist_backward_kernel_impl(Tensor& result, const Tensor& grad, const Tensor& x1, const Tensor& x2, const double p, const Tensor& dist) {
AT_DISPATCH_FLOATING_TYPES(result.scalar_type(), "cdist_backward", [&] {
Dist<scalar_t>::apply_backward_cdist(result, grad, x1, x2, p, dist);
});

View File

@ -27,7 +27,7 @@
namespace at::native {
namespace {
static void cauchy_kernel(TensorIteratorBase& iter, double median, double sigma, std::optional<Generator> gen) {
void cauchy_kernel(TensorIteratorBase& iter, double median, double sigma, std::optional<Generator> gen) {
CPUGeneratorImpl* generator = get_generator_or_default<CPUGeneratorImpl>(gen, detail::getDefaultCPUGenerator());
templates::cpu::cauchy_kernel(iter, median, sigma, generator);
}
@ -101,7 +101,7 @@ void bernoulli_scalar_kernel(const TensorBase &self, double p, std::optional<Gen
}
#endif
static void exponential_kernel_default(TensorIteratorBase& iter, double lambda, std::optional<Generator> gen) {
void exponential_kernel_default(TensorIteratorBase& iter, double lambda, std::optional<Generator> gen) {
CPUGeneratorImpl* generator = get_generator_or_default<CPUGeneratorImpl>(gen, detail::getDefaultCPUGenerator());
templates::cpu::exponential_kernel(iter, lambda, generator);
}
@ -198,12 +198,12 @@ void exponential_kernel(TensorIteratorBase &iter, double lambda, std::optional<G
}
#endif
static void geometric_kernel(TensorIteratorBase& iter, double p, std::optional<Generator> gen) {
void geometric_kernel(TensorIteratorBase& iter, double p, std::optional<Generator> gen) {
CPUGeneratorImpl* generator = get_generator_or_default<CPUGeneratorImpl>(gen, detail::getDefaultCPUGenerator());
templates::cpu::geometric_kernel(iter, p, generator);
}
static void log_normal_kernel(TensorIteratorBase& iter, double mean, double std, std::optional<Generator> gen) {
void log_normal_kernel(TensorIteratorBase& iter, double mean, double std, std::optional<Generator> gen) {
CPUGeneratorImpl* generator = get_generator_or_default<CPUGeneratorImpl>(gen, detail::getDefaultCPUGenerator());
templates::cpu::log_normal_kernel(iter, mean, std, generator);
}
@ -218,12 +218,12 @@ void normal_kernel(const TensorBase &self, double mean, double std, std::optiona
templates::cpu::normal_kernel(self, mean, std, generator);
}
static void random_from_to_kernel(TensorIteratorBase& iter, uint64_t range, int64_t base, std::optional<Generator> gen) {
void random_from_to_kernel(TensorIteratorBase& iter, uint64_t range, int64_t base, std::optional<Generator> gen) {
CPUGeneratorImpl* generator = get_generator_or_default<CPUGeneratorImpl>(gen, detail::getDefaultCPUGenerator());
templates::cpu::random_from_to_kernel(iter, range, base, generator);
}
static void random_kernel(TensorIteratorBase& iter, std::optional<Generator> gen) {
void random_kernel(TensorIteratorBase& iter, std::optional<Generator> gen) {
CPUGeneratorImpl* generator = get_generator_or_default<CPUGeneratorImpl>(gen, detail::getDefaultCPUGenerator());
templates::cpu::random_kernel(iter, generator);
}
@ -231,7 +231,7 @@ static void random_kernel(TensorIteratorBase& iter, std::optional<Generator> gen
// This is the special kernel to handle single specific case:
// from(inclusive) = std::numeric_limits<int64_t>::lowest()
// to(exclusive) = None (= std::numeric_limits<int64_t>::max() + 1)
static void random_full_64_bits_range_kernel(TensorIteratorBase& iter, std::optional<Generator> gen) {
void random_full_64_bits_range_kernel(TensorIteratorBase& iter, std::optional<Generator> gen) {
CPUGeneratorImpl* generator = get_generator_or_default<CPUGeneratorImpl>(gen, detail::getDefaultCPUGenerator());
templates::cpu::random_full_64_bits_range_kernel(iter, generator);
}

View File

@ -85,7 +85,7 @@ struct RandomKernel {
// ==================================================== Normal ========================================================
#ifdef CPU_CAPABILITY_AVX2
static void normal_fill_16_AVX2(float *data,
void normal_fill_16_AVX2(float *data,
const __m256* two_pi,
const __m256* one,
const __m256* minus_two,
@ -136,7 +136,7 @@ void normal_fill_AVX2(const TensorBase &self, const float mean, const float std,
#endif
template <typename scalar_t>
static void normal_fill_16(scalar_t *data, const scalar_t mean, const scalar_t std) {
void normal_fill_16(scalar_t *data, const scalar_t mean, const scalar_t std) {
for (const auto j : c10::irange(8)) {
const scalar_t u1 = 1 - data[j]; // [0, 1) -> (0, 1] for log.
const scalar_t u2 = data[j + 8];

View File

@ -158,14 +158,14 @@ inline void _mul_reduce_max_fusion_kernel(
}
template <typename scalar_t>
static inline scalar_t* conditional_data_ptr(scalar_t* ptr, scalar_t* ptr2) {
inline scalar_t* conditional_data_ptr(scalar_t* ptr, scalar_t* ptr2) {
TORCH_CHECK(ptr2 == nullptr);
return ptr;
}
template <typename scalar_t,
typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
static inline scalar_t* conditional_data_ptr(float* ptr, scalar_t* ptr2) {
inline scalar_t* conditional_data_ptr(float* ptr, scalar_t* ptr2) {
return ptr2;
}

View File

@ -441,7 +441,7 @@ struct ComputeLocation<scalar_t, GridSamplerPadding::Reflection, align_corners>
// See NOTE [ Grid Sample CPU Kernels ] for details.
template<typename scalar_t>
static inline void
inline void
mask_scatter_add(const scalar_t *src, scalar_t* base_addr,
const int_same_size_t<scalar_t> *offsets,
const int_same_size_t<scalar_t> *mask, int64_t len) {
@ -1030,7 +1030,7 @@ struct ApplyGridSample<scalar_t, 2, GridSamplerInterpolation::Bicubic,
// See NOTE [ Grid Sample CPU Kernels ] for details.
template<typename scalar_t, typename ApplyFn>
static inline void grid_sample_2d_grid_slice_iterator(
inline void grid_sample_2d_grid_slice_iterator(
const TensorAccessor<const scalar_t, 3>& grid_slice, const ApplyFn &apply_fn) {
int64_t out_H = grid_slice.size(0);
int64_t out_W = grid_slice.size(1);

View File

@ -259,7 +259,7 @@ void histogramdd_out_cpu_template(const Tensor& self, const std::optional<Tensor
*
* Refer to histogramdd_out_cpu_template for more details.
*/
static void histogramdd_kernel_impl(const Tensor& self, const std::optional<Tensor>& weight, bool density,
void histogramdd_kernel_impl(const Tensor& self, const std::optional<Tensor>& weight, bool density,
Tensor& hist, const TensorList& bin_edges) {
histogramdd_out_cpu_template<BINARY_SEARCH>(self, weight, density, hist, bin_edges);
}
@ -269,7 +269,7 @@ static void histogramdd_kernel_impl(const Tensor& self, const std::optional<Tens
*
* Refer to histogramdd_out_cpu_template for more details.
*/
static void histogramdd_linear_kernel_impl(const Tensor& self, const std::optional<Tensor>& weight,
void histogramdd_linear_kernel_impl(const Tensor& self, const std::optional<Tensor>& weight,
bool density, Tensor& hist, const TensorList& bin_edges, bool local_search) {
if (local_search) {
// histogramdd codepath: both hist and bin_edges are eventually returned as output,
@ -298,7 +298,7 @@ void infer_bin_edges_from_input(const Tensor& input, const int64_t N,
std::copy(max_data, max_data + N, rightmost_edges.begin());
}
static void histogram_select_outer_bin_edges_impl(const Tensor& input, const int64_t N,
void histogram_select_outer_bin_edges_impl(const Tensor& input, const int64_t N,
std::vector<double> &leftmost_edges, std::vector<double> &rightmost_edges) {
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "histogramdd", [&]() {
infer_bin_edges_from_input<scalar_t>(input, N, leftmost_edges, rightmost_edges);

View File

@ -210,7 +210,7 @@ multinomial_with_replacement_apply(
}
}
static void multinomial_with_replacement_kernel_impl(
void multinomial_with_replacement_kernel_impl(
Tensor& result,
const Tensor& self,
const int64_t n_sample,

View File

@ -96,7 +96,7 @@ struct ReplicationPad {
};
template <typename scalar_t>
static inline void copy_stub(scalar_t* out, const scalar_t* in, int64_t size) {
inline void copy_stub(scalar_t* out, const scalar_t* in, int64_t size) {
using Vec = Vectorized<scalar_t>;
int64_t d = 0;
for (; d < size - (size % Vec::size()); d += Vec::size()) {
@ -112,7 +112,7 @@ static inline void copy_stub(scalar_t* out, const scalar_t* in, int64_t size) {
}
template <typename scalar_t>
static inline void add_stub(scalar_t* grad_in, const scalar_t* grad_out, int64_t size) {
inline void add_stub(scalar_t* grad_in, const scalar_t* grad_out, int64_t size) {
using Vec = Vectorized<scalar_t>;
int64_t d = 0;
for (; d < size - (size % Vec::size()); d += Vec::size()) {

View File

@ -9,7 +9,7 @@
namespace at::native {
namespace {
static void addcmul_cpu_kernel(TensorIteratorBase& iter, const Scalar& value) {
void addcmul_cpu_kernel(TensorIteratorBase& iter, const Scalar& value) {
ScalarType dtype = iter.common_dtype();
if (at::isReducedFloatingType(dtype)) {
AT_DISPATCH_REDUCED_FLOATING_TYPES(dtype, "addcmul_cpu_out", [&]() {
@ -50,7 +50,7 @@ static void addcmul_cpu_kernel(TensorIteratorBase& iter, const Scalar& value) {
}
}
static void addcdiv_cpu_kernel(TensorIteratorBase& iter, const Scalar& value) {
void addcdiv_cpu_kernel(TensorIteratorBase& iter, const Scalar& value) {
ScalarType dtype = iter.common_dtype();
if (at::isReducedFloatingType(dtype)) {
AT_DISPATCH_REDUCED_FLOATING_TYPES(dtype, "addcdiv_cpu_out", [&]() {
@ -90,7 +90,7 @@ static void addcdiv_cpu_kernel(TensorIteratorBase& iter, const Scalar& value) {
}
}
static void smooth_l1_backward_cpu_kernel(TensorIterator& iter, const Scalar& norm, double beta) {
void smooth_l1_backward_cpu_kernel(TensorIterator& iter, const Scalar& norm, double beta) {
ScalarType dtype = iter.dtype(0);
if (dtype == kBFloat16) {
auto norm_val = norm.to<float>();
@ -176,7 +176,7 @@ static void smooth_l1_backward_cpu_kernel(TensorIterator& iter, const Scalar& no
}
}
static void huber_backward_cpu_kernel(TensorIterator& iter, const Scalar& norm, double delta) {
void huber_backward_cpu_kernel(TensorIterator& iter, const Scalar& norm, double delta) {
ScalarType dtype = iter.dtype(0);
AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, dtype, "huber_backward_cpu_out", [&] {
auto norm_val = norm.to<scalar_t>();
@ -215,7 +215,7 @@ static void huber_backward_cpu_kernel(TensorIterator& iter, const Scalar& norm,
});
}
static void mse_backward_cpu_kernel(TensorIterator& iter, const Scalar& value) {
void mse_backward_cpu_kernel(TensorIterator& iter, const Scalar& value) {
ScalarType dtype = iter.dtype(0);
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, dtype, "mse_backward_cpu_out", [&] {
scalar_t scalar_val = value.to<scalar_t>();

View File

@ -18,7 +18,7 @@ namespace {
using namespace vec;
static void arange_kernel(TensorIterator& iter, const Scalar& scalar_start, const Scalar& scalar_steps, const Scalar& scalar_step) {
void arange_kernel(TensorIterator& iter, const Scalar& scalar_start, const Scalar& scalar_steps, const Scalar& scalar_step) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "arange_cpu", [&]() {
using accscalar_t = at::acc_type<scalar_t, false>;
auto start = scalar_start.to<accscalar_t>();
@ -42,7 +42,7 @@ static void arange_kernel(TensorIterator& iter, const Scalar& scalar_start, cons
});
}
static void linspace_kernel(TensorIterator& iter, const Scalar& scalar_start, const Scalar& scalar_end, int64_t steps) {
void linspace_kernel(TensorIterator& iter, const Scalar& scalar_start, const Scalar& scalar_end, int64_t steps) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kHalf, kBFloat16, iter.dtype(), "linspace_cpu", [&]() {
// step should be of double type for all integral types
using step_t = std::conditional_t<std::is_integral_v<scalar_t>, double, scalar_t>;

View File

@ -62,7 +62,7 @@ inline void reduce_all_impl(
output.fill_(result);
}
static void min_all_kernel_impl(Tensor& result, const Tensor& input) {
void min_all_kernel_impl(Tensor& result, const Tensor& input) {
if (input.scalar_type() == ScalarType::Bool) {
TensorIterator iter = TensorIteratorConfig()
.add_input(input)
@ -87,7 +87,7 @@ static void min_all_kernel_impl(Tensor& result, const Tensor& input) {
}
}
static void max_all_kernel_impl(Tensor& result, const Tensor& input) {
void max_all_kernel_impl(Tensor& result, const Tensor& input) {
if (input.scalar_type() == ScalarType::Bool) {
TensorIterator iter = TensorIteratorConfig()
.add_input(input)
@ -167,7 +167,7 @@ inline void reduce_all_impl_vec_two_outputs(
output2.fill_(result.second);
}
static void aminmax_allreduce_kernel(
void aminmax_allreduce_kernel(
const Tensor& input,
Tensor& min_result,
Tensor& max_result) {

View File

@ -28,7 +28,7 @@ namespace at::native { namespace {
using namespace vec;
template <typename scalar_t, typename func_t>
static inline void cpu_cum_base_kernel(const Tensor& result,
inline void cpu_cum_base_kernel(const Tensor& result,
const Tensor& self,
int64_t dim,
const func_t& f,
@ -76,7 +76,7 @@ static inline void cpu_cum_base_kernel(const Tensor& result,
iter.for_each(loop, grain_size);
}
static void cumsum_cpu_kernel(const Tensor& result, const Tensor& self, int64_t dim) {
void cumsum_cpu_kernel(const Tensor& result, const Tensor& self, int64_t dim) {
auto wrap_dim = maybe_wrap_dim(dim, self.dim());
int64_t self_dim_size = ensure_nonempty_size(self, wrap_dim);
@ -95,7 +95,7 @@ static void cumsum_cpu_kernel(const Tensor& result, const Tensor& self, int64_t
});
}
static void cumprod_cpu_kernel(const Tensor& result, const Tensor& self, int64_t dim) {
void cumprod_cpu_kernel(const Tensor& result, const Tensor& self, int64_t dim) {
auto wrap_dim = maybe_wrap_dim(dim, self.dim());
int64_t self_dim_size = ensure_nonempty_size(self, wrap_dim);
@ -114,7 +114,7 @@ static void cumprod_cpu_kernel(const Tensor& result, const Tensor& self, int64_t
});
}
static void logcumsumexp_cpu_kernel(Tensor& result, const Tensor& self, int64_t dim) {
void logcumsumexp_cpu_kernel(Tensor& result, const Tensor& self, int64_t dim) {
auto wrap_dim = maybe_wrap_dim(dim, self.dim());
int64_t self_dim_size = ensure_nonempty_size(self, wrap_dim);
@ -135,7 +135,7 @@ static void logcumsumexp_cpu_kernel(Tensor& result, const Tensor& self, int64_t
});
}
static void std_var_kernel_impl(TensorIterator& iter, double correction, bool take_sqrt) {
void std_var_kernel_impl(TensorIterator& iter, double correction, bool take_sqrt) {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "std_cpu", [&] {
binary_kernel_reduce(
iter,
@ -148,7 +148,7 @@ static void std_var_kernel_impl(TensorIterator& iter, double correction, bool ta
});
}
static void prod_kernel_impl(TensorIterator& iter) {
void prod_kernel_impl(TensorIterator& iter) {
// Workaround for the error: '*' in boolean context, suggest '&&' instead
if (iter.dtype() == ScalarType::Bool) {
using scalar_t = bool;
@ -203,7 +203,7 @@ void norm_kernel_cpu_impl(TensorIterator& iter, const double& val) {
}
}
static void norm_kernel_tensor_iterator_impl(
void norm_kernel_tensor_iterator_impl(
TensorIterator& iter,
const Scalar& p) {
double val = 0;
@ -274,7 +274,7 @@ static void norm_kernel_tensor_iterator_impl(
}
}
static void and_kernel_impl(TensorIterator& iter) {
void and_kernel_impl(TensorIterator& iter) {
if (iter.dtype() == ScalarType::Byte) {
// Refer [all, any : uint8 compatibility]
binary_kernel_reduce_vec(
@ -312,7 +312,7 @@ static void and_kernel_impl(TensorIterator& iter) {
}
}
static void or_kernel_impl(TensorIterator& iter) {
void or_kernel_impl(TensorIterator& iter) {
if (iter.dtype() == ScalarType::Byte) {
// Refer [all, any : uint8 compatibility]
binary_kernel_reduce_vec(
@ -346,7 +346,7 @@ struct MinValuesOps: public at::native::MinOps<scalar_t> {
}
};
static void min_values_kernel_impl(TensorIterator& iter) {
void min_values_kernel_impl(TensorIterator& iter) {
if (iter.dtype() == kLong) {
// This case is special because of Vectorized<int64_t> does not
// handle upper_bound<int64_t>().
@ -367,7 +367,7 @@ static void min_values_kernel_impl(TensorIterator& iter) {
});
}
static void max_values_kernel_impl(TensorIterator& iter) {
void max_values_kernel_impl(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.dtype(), "max_values_cpu", [&iter] {
binary_kernel_reduce_vec(
iter,
@ -377,7 +377,7 @@ static void max_values_kernel_impl(TensorIterator& iter) {
});
}
static void argmax_kernel_impl(TensorIterator &iter) {
void argmax_kernel_impl(TensorIterator &iter) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(1), "argmax_cpu", [&] {
if (is_reduce_lastdim(iter)) {
using arg_t = std::pair<scalar_t, int64_t>;
@ -401,7 +401,7 @@ static void argmax_kernel_impl(TensorIterator &iter) {
});
}
static void argmin_kernel_impl(TensorIterator &iter) {
void argmin_kernel_impl(TensorIterator &iter) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(1), "argmin_cpu", [&] {
if (is_reduce_lastdim(iter)) {
using arg_t = std::pair<scalar_t, int64_t>;
@ -459,7 +459,7 @@ struct XorSumOps {
}
};
static void xor_sum_kernel_impl(TensorIterator& iter) {
void xor_sum_kernel_impl(TensorIterator& iter) {
// Use iter.dtype(1) to dispatch based on the type of the input tensor
AT_DISPATCH_ALL_TYPES_AND3(
kBFloat16, kHalf, kBool, iter.dtype(1), "xor_sum_cpu", [&] {

View File

@ -41,7 +41,7 @@ public:
*self_data = c10::load(self_data) && c10::load(src_data);
}
};
static ReduceMultiply reduce_multiply;
ReduceMultiply reduce_multiply;
class ReduceAdd {
public:
@ -51,7 +51,7 @@ public:
*self_data += opmath_t(c10::load(src_data));
}
};
static ReduceAdd reduce_add;
ReduceAdd reduce_add;
class ReduceMean {
public:
@ -61,7 +61,7 @@ public:
*self_data += opmath_t(c10::load(src_data));
}
};
static ReduceMean reduce_mean;
ReduceMean reduce_mean;
class ReduceMaximum {
public:
@ -73,7 +73,7 @@ public:
*self_data = at::_isnan<scalar_t>(src_value) ? opmath_t(src_value) : std::max(self_value, opmath_t(src_value));
}
};
static ReduceMaximum reduce_maximum;
ReduceMaximum reduce_maximum;
class ReduceMinimum {
public:
@ -85,7 +85,7 @@ public:
*self_data = at::_isnan<scalar_t>(src_value) ? opmath_t(src_value) : std::min(self_value, opmath_t(src_value));
}
};
static ReduceMinimum reduce_minimum;
ReduceMinimum reduce_minimum;
class TensorAssign {
public:
@ -95,7 +95,7 @@ public:
*self_data = opmath_t(c10::load(src_data));
}
};
static TensorAssign tensor_assign;
TensorAssign tensor_assign;
template <bool is_scatter_like = true>
struct _cpu_scatter_gather_dim_loop {

View File

@ -968,7 +968,7 @@ struct vec_host_softmax_backward {
}
};
static void softmax_lastdim_kernel_impl(
void softmax_lastdim_kernel_impl(
const Tensor& result,
const Tensor& self) {
AT_DISPATCH_FLOATING_TYPES_AND2(
@ -977,13 +977,13 @@ static void softmax_lastdim_kernel_impl(
[&] { vec_host_softmax_lastdim<scalar_t, false>::apply(result, self); });
}
static void softmax_kernel_impl(const Tensor& result, const Tensor& self, int64_t dim) {
void softmax_kernel_impl(const Tensor& result, const Tensor& self, int64_t dim) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::BFloat16, at::ScalarType::Half, self.scalar_type(),
"softmax_kernel_impl",
[&] { vec_softmax<scalar_t, false>::apply(result, self, dim); });
}
static void log_softmax_lastdim_kernel_impl(
void log_softmax_lastdim_kernel_impl(
const Tensor& result,
const Tensor& self) {
AT_DISPATCH_FLOATING_TYPES_AND2(
@ -992,13 +992,13 @@ static void log_softmax_lastdim_kernel_impl(
[&] { vec_host_softmax_lastdim<scalar_t, true>::apply(result, self); });
}
static void log_softmax_kernel_impl(const Tensor& result, const Tensor& self, int64_t dim) {
void log_softmax_kernel_impl(const Tensor& result, const Tensor& self, int64_t dim) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::BFloat16, at::ScalarType::Half, self.scalar_type(),
"softmax_kernel_impl",
[&] { vec_softmax<scalar_t, true>::apply(result, self, dim); });
}
static void softmax_backward_lastdim_kernel_impl(
void softmax_backward_lastdim_kernel_impl(
const Tensor& grad_input,
const Tensor& grad,
const Tensor& output) {
@ -1010,7 +1010,7 @@ static void softmax_backward_lastdim_kernel_impl(
});
}
static void log_softmax_backward_lastdim_kernel_impl(
void log_softmax_backward_lastdim_kernel_impl(
const Tensor& grad_input,
const Tensor& grad,
const Tensor& output) {
@ -1022,7 +1022,7 @@ static void log_softmax_backward_lastdim_kernel_impl(
});
}
static void softmax_backward_kernel_impl(
void softmax_backward_kernel_impl(
const Tensor& grad_input,
const Tensor& grad,
const Tensor& output,
@ -1038,7 +1038,7 @@ static void softmax_backward_kernel_impl(
});
}
static void log_softmax_backward_kernel_impl(
void log_softmax_backward_kernel_impl(
const Tensor& grad_input,
const Tensor& grad,
const Tensor& output,

View File

@ -90,7 +90,7 @@ struct KeyValueCompDesc {
};
#ifdef USE_FBGEMM
static bool can_use_radix_sort(const TensorBase& values, const bool descending) {
bool can_use_radix_sort(const TensorBase& values, const bool descending) {
// radix_sort can be used only for 1D data
if (values.dim() != 1) return false;
// radix_sort sorts in ascending order
@ -106,7 +106,7 @@ static bool can_use_radix_sort(const TensorBase& values, const bool descending)
return true;
}
static void parallel_sort1d_kernel(
void parallel_sort1d_kernel(
const TensorBase& values,
const TensorBase& indices) {
AT_DISPATCH_INTEGRAL_TYPES(values.scalar_type(), "parallel_sort1d_kernel", [&] {
@ -140,7 +140,7 @@ static void parallel_sort1d_kernel(
#endif
template <typename scalar_t, typename value_accessor_t, typename indices_accessor_t>
static inline void sort_kernel_impl(const value_accessor_t& value_accessor,
inline void sort_kernel_impl(const value_accessor_t& value_accessor,
const indices_accessor_t& indices_accessor,
int64_t dim_size, bool descending, bool stable) {
auto composite_accessor = CompositeRandomAccessorCPU<
@ -165,7 +165,7 @@ static inline void sort_kernel_impl(const value_accessor_t& value_accessor,
}
}
static void sort_kernel(
void sort_kernel(
const TensorBase& self,
const TensorBase& values,
const TensorBase& indices,
@ -222,7 +222,7 @@ static void sort_kernel(
);
}
static void topk_kernel(
void topk_kernel(
const TensorBase &values,
const TensorBase &indices,
const TensorBase &self,

View File

@ -286,12 +286,12 @@ struct CastStoreAccumulate {
};
template <typename StorePolicy, typename scalar_t>
static void store(char * C10_RESTRICT data, int64_t stride, int64_t index, scalar_t value) {
void store(char * C10_RESTRICT data, int64_t stride, int64_t index, scalar_t value) {
StorePolicy::store(data, stride, index, value);
}
template <typename StorePolicy, typename scalar_t, size_t numel>
static void store(char * C10_RESTRICT data, int64_t stride, int64_t index,
void store(char * C10_RESTRICT data, int64_t stride, int64_t index,
const std::array<scalar_t, numel> &values) {
auto *base_ptr = data + stride * index;
for (const auto k : c10::irange(numel)) {
@ -301,7 +301,7 @@ static void store(char * C10_RESTRICT data, int64_t stride, int64_t index,
}
template <typename StorePolicy, typename scalar_t>
static void store(char * C10_RESTRICT data, int64_t stride, int64_t index,
void store(char * C10_RESTRICT data, int64_t stride, int64_t index,
const Vectorized<scalar_t> &values) {
using vec_t = Vectorized<scalar_t>;
alignas(64) std::array<scalar_t, vec_t::size()> array_values{};

View File

@ -29,7 +29,7 @@
namespace at::native { namespace {
template <typename scalar_t, typename scalar_t_2 = int64_t, typename loop1d_t>
static inline void compare_base_kernel_core(
inline void compare_base_kernel_core(
const Tensor& result1,
const Tensor& result2,
const Tensor& self,
@ -71,7 +71,7 @@ static inline void compare_base_kernel_core(
}
template <typename scalar_t, typename scalar_t_2=int64_t, typename func_t>
static inline void compare_base_kernel(const Tensor& result1, const Tensor& result2,
inline void compare_base_kernel(const Tensor& result1, const Tensor& result2,
const Tensor& self,
int64_t dim,
bool keepdim,
@ -98,7 +98,7 @@ static inline void compare_base_kernel(const Tensor& result1, const Tensor& resu
result1, result2, self, dim, keepdim, loop);
}
static void min_kernel_impl(
void min_kernel_impl(
const Tensor& result,
const Tensor& indice,
const Tensor& self,
@ -131,7 +131,7 @@ static void min_kernel_impl(
});
}
static void max_kernel_impl(
void max_kernel_impl(
const Tensor& result,
const Tensor& indice,
const Tensor& self,
@ -164,7 +164,7 @@ static void max_kernel_impl(
});
}
static void aminmax_kernel(
void aminmax_kernel(
const Tensor& self,
int64_t dim,
bool keepdim,
@ -212,7 +212,7 @@ static void aminmax_kernel(
});
}
static void where_kernel_impl(TensorIterator &iter) {
void where_kernel_impl(TensorIterator &iter) {
AT_DISPATCH_V2(
iter.dtype(), "where_cpu", [&] {
cpu_kernel(
@ -224,19 +224,19 @@ static void where_kernel_impl(TensorIterator &iter) {
kComplexHalf, kHalf, kBFloat16, kBool, AT_EXPAND(AT_ALL_TYPES_AND_COMPLEX), AT_EXPAND(AT_FLOAT8_TYPES));
}
static void isposinf_kernel_impl(TensorIteratorBase& iter) {
void isposinf_kernel_impl(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.input_dtype(), "isposinf_cpu", [&]() {
cpu_kernel(iter, [](scalar_t a) -> bool { return a == std::numeric_limits<scalar_t>::infinity(); });
});
}
static void isneginf_kernel_impl(TensorIteratorBase& iter) {
void isneginf_kernel_impl(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.input_dtype(), "isneginf_cpu", [&]() {
cpu_kernel(iter, [](scalar_t a) -> bool { return a == -std::numeric_limits<scalar_t>::infinity(); });
});
}
static void mode_kernel_impl(
void mode_kernel_impl(
Tensor& values,
Tensor& indices,
const Tensor& self,
@ -308,7 +308,7 @@ static void mode_kernel_impl(
// Default brute force implementation of isin(). Used when the number of test elements is small.
// Iterates through each element and checks it against each test element.
static void isin_default_kernel_cpu(
void isin_default_kernel_cpu(
const Tensor& elements,
const Tensor& test_elements,
bool invert,
@ -339,7 +339,7 @@ static void isin_default_kernel_cpu(
});
}
static void clamp_kernel_impl(TensorIteratorBase& iter) {
void clamp_kernel_impl(TensorIteratorBase& iter) {
AT_DISPATCH_ALL_TYPES_AND2(kBFloat16, kHalf, iter.common_dtype(), "clamp_cpu", [&]() {
cpu_kernel_vec(iter,
[](scalar_t a, scalar_t min, scalar_t max) -> scalar_t {
@ -355,7 +355,7 @@ static void clamp_kernel_impl(TensorIteratorBase& iter) {
});
}
static void clamp_scalar_kernel_impl(TensorIteratorBase& iter, const Scalar& min_, const Scalar& max_) {
void clamp_scalar_kernel_impl(TensorIteratorBase& iter, const Scalar& min_, const Scalar& max_) {
AT_DISPATCH_ALL_TYPES_AND2(kBFloat16, kHalf, iter.common_dtype(), "clamp_scalar_cpu", [&]() {
const auto min = min_.to<scalar_t>();
const auto max = max_.to<scalar_t>();
@ -371,7 +371,7 @@ static void clamp_scalar_kernel_impl(TensorIteratorBase& iter, const Scalar& min
});
}
static void clamp_max_scalar_kernel_impl(TensorIteratorBase& iter, Scalar max_) {
void clamp_max_scalar_kernel_impl(TensorIteratorBase& iter, Scalar max_) {
AT_DISPATCH_ALL_TYPES_AND2(kBFloat16, kHalf, iter.common_dtype(), "clamp_max_scalar_cpu", [&]() {
const auto max = max_.to<scalar_t>();
const Vectorized<scalar_t> max_vec(max);
@ -385,7 +385,7 @@ static void clamp_max_scalar_kernel_impl(TensorIteratorBase& iter, Scalar max_)
});
}
static void clamp_min_scalar_kernel_impl(TensorIteratorBase& iter, Scalar min_) {
void clamp_min_scalar_kernel_impl(TensorIteratorBase& iter, Scalar min_) {
AT_DISPATCH_ALL_TYPES_AND2(kBFloat16, kHalf, iter.common_dtype(), "clamp_min_scalar_cpu", [&]() {
const auto min = min_.to<scalar_t>();
const Vectorized<scalar_t> min_vec(min);

View File

@ -13,7 +13,7 @@ namespace at::native {
namespace {
template <typename scalar_t>
static inline void cadd(
inline void cadd(
scalar_t* z,
const scalar_t* x,
const scalar_t* y,
@ -34,7 +34,7 @@ static inline void cadd(
}
template <typename scalar_t>
static void unfolded2d_acc(
void unfolded2d_acc(
scalar_t* finput_data,
scalar_t* input_data,
int64_t kH,
@ -113,7 +113,7 @@ static void unfolded2d_acc(
}
template <typename scalar_t>
static void unfolded2d_acc_channels_last(
void unfolded2d_acc_channels_last(
scalar_t* finput_data,
scalar_t* input_data,
int64_t kH,
@ -225,7 +225,7 @@ void unfolded2d_acc_kernel(
}
template <typename scalar_t>
static void unfolded2d_copy(
void unfolded2d_copy(
const scalar_t* input_data,
scalar_t* finput_data,
int64_t kH,
@ -326,7 +326,7 @@ static void unfolded2d_copy(
}
template <typename scalar_t>
static void unfolded2d_copy_channels_last(
void unfolded2d_copy_channels_last(
const scalar_t* input_data,
scalar_t* finput_data,
int64_t kH,

View File

@ -157,13 +157,13 @@ struct Interpolate<1, scalar_t, opmath_t, index_t, 2> {
};
template <int n, typename scalar_t, typename index_t, int interp_size>
static inline scalar_t interpolate(char* src, char** data, const int64_t* strides, int64_t i) {
inline scalar_t interpolate(char* src, char** data, const int64_t* strides, int64_t i) {
using opmath_t = at::opmath_type<scalar_t>;
return Interpolate<n, scalar_t, opmath_t, index_t, interp_size>::eval(src, data, strides, i);
}
template <typename scalar_t, typename index_t>
static inline scalar_t interpolate_aa_single_dim_zero_strides(
inline scalar_t interpolate_aa_single_dim_zero_strides(
char* src,
char** data,
const index_t ids_stride) {
@ -187,7 +187,7 @@ static inline scalar_t interpolate_aa_single_dim_zero_strides(
}
template <typename scalar_t, typename index_t>
static inline scalar_t interpolate_aa_single_dim(
inline scalar_t interpolate_aa_single_dim(
char* src,
char** data,
const int64_t* strides,
@ -213,7 +213,7 @@ static inline scalar_t interpolate_aa_single_dim(
}
template<int m>
static inline bool is_zero_stride(const int64_t* strides) {
inline bool is_zero_stride(const int64_t* strides) {
bool output = strides[0] == 0;
for (const auto i : c10::irange(1, m)) {
output &= (strides[i] == 0);
@ -222,7 +222,7 @@ static inline bool is_zero_stride(const int64_t* strides) {
}
template <typename scalar_t, typename index_t, int interp_size>
static inline bool is_contiguous_stride(const int64_t* strides) {
inline bool is_contiguous_stride(const int64_t* strides) {
bool output = (strides[0] == sizeof(index_t)) && (strides[1] == sizeof(scalar_t));
for (int i=2; i<2 * interp_size; i+=2) {
output &= (strides[i] == sizeof(index_t)) && (strides[i + 1] == sizeof(scalar_t));
@ -282,13 +282,13 @@ struct CheckAlmostAllZeroStrides<0, non_zero_stride_dim, scalar_t, index_t, inte
};
template <int n, int s, typename scalar_t, typename index_t, int interp_size>
static inline bool check_almost_all_zero_stride(const int64_t* strides) {
inline bool check_almost_all_zero_stride(const int64_t* strides) {
return CheckAlmostAllZeroStrides<n, s, scalar_t, index_t, interp_size>::eval(strides);
}
// Helper method to compute interpolation for nearest, linear, cubic modes
template <typename scalar_t, typename index_t, int out_ndims, int interp_size>
static inline void basic_loop(char** data, const int64_t* strides, int64_t n) {
inline void basic_loop(char** data, const int64_t* strides, int64_t n) {
char* dst = data[0];
char* src = data[1];
for (const auto i : c10::irange(n)) {
@ -298,7 +298,7 @@ static inline void basic_loop(char** data, const int64_t* strides, int64_t n) {
}
template <typename scalar_t>
static inline void basic_loop_aa_vertical(
inline void basic_loop_aa_vertical(
char** data,
const int64_t* strides,
int64_t n,
@ -354,7 +354,7 @@ inline void basic_loop_aa_vertical<uint8_t>(
}
template <typename scalar_t>
static inline void basic_loop_aa_horizontal(
inline void basic_loop_aa_horizontal(
char** data,
const int64_t* strides,
int64_t n,

View File

@ -35,7 +35,7 @@ Like PIL, Pillow is licensed under the open source HPND License
namespace {
static inline __m128i mm_cvtsi32_si128(const uint8_t* C10_RESTRICT ptr, bool i32_aligned) {
inline __m128i mm_cvtsi32_si128(const uint8_t* C10_RESTRICT ptr, bool i32_aligned) {
int32_t v;
if (i32_aligned) {
v = *(const int32_t*)ptr;
@ -45,11 +45,11 @@ static inline __m128i mm_cvtsi32_si128(const uint8_t* C10_RESTRICT ptr, bool i32
return _mm_cvtsi32_si128(v);
}
static inline __m128i mm_cvtepu8_epi32(const uint8_t* C10_RESTRICT ptr, bool i32_aligned) {
inline __m128i mm_cvtepu8_epi32(const uint8_t* C10_RESTRICT ptr, bool i32_aligned) {
return _mm_cvtepu8_epi32(mm_cvtsi32_si128(ptr, i32_aligned));
}
static inline void _write_endline_rgb_as_uint32(
inline void _write_endline_rgb_as_uint32(
uint8_t* C10_RESTRICT output,
uint32_t data
) {

View File

@ -838,7 +838,7 @@ void dyn_quant_pack_4bit_weight_kernel(
}
}
static void ref_dyn_quant_matmul_4bit_channelwise_kernel(
void ref_dyn_quant_matmul_4bit_channelwise_kernel(
size_t m,
size_t n,
size_t k,
@ -997,7 +997,7 @@ static void ref_dyn_quant_matmul_4bit_channelwise_kernel(
}
}
static void ref_dyn_quant_matmul_4bit_groupwise_kernel(
void ref_dyn_quant_matmul_4bit_groupwise_kernel(
size_t m,
size_t n,
size_t k,

View File

@ -100,7 +100,7 @@ inline void tinygemm_kernel(
#elif defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
static inline float _mm256_reduce_add_ps(__m256& v) {
inline float _mm256_reduce_add_ps(__m256& v) {
__m256 v1 = _mm256_permute2f128_ps(v, v, 0x1);
v = _mm256_add_ps(v, v1);
v1 = _mm256_shuffle_ps(v, v, 0x4E);

View File

@ -296,7 +296,7 @@ static bool isSupportedHipLtROCmArch(int index) {
#endif
template <typename scalar_t>
static void launchTunableGemmAndBias(cublasCommonArgs &args, const Scalar& alpha, const scalar_t* bias, cuda::blas::GEMMAndBiasActivationEpilogue activation) {
void launchTunableGemmAndBias(cublasCommonArgs &args, const Scalar& alpha, const scalar_t* bias, cuda::blas::GEMMAndBiasActivationEpilogue activation) {
bool transa_ = ((args.transa != 'n') && (args.transa != 'N'));
bool transb_ = ((args.transb != 'n') && (args.transb != 'N'));
at::cuda::tunable::GemmAndBiasParams<scalar_t> params;

View File

@ -163,7 +163,7 @@ bool has_large_prime_factor(int64_t n) {
}
// Execute a general fft operation (can be c2c, onesided r2c or onesided c2r)
static const Tensor& _exec_fft(Tensor& out, const Tensor& self, IntArrayRef out_sizes,
const Tensor& _exec_fft(Tensor& out, const Tensor& self, IntArrayRef out_sizes,
IntArrayRef dim, bool forward) {
const auto ndim = self.dim();
const int64_t signal_ndim = dim.size();

View File

@ -252,7 +252,7 @@ struct CacheKeyFusedWrapper : ParamsWrapper<CacheKeyFused> {
}
};
static int getLRUCacheLimit() {
int getLRUCacheLimit() {
constexpr int DEFAULT_LIMIT =
10000; // roughly corresponds to 2GiB assuming 200KiB per ExecutionPlan
// 0 is used to indicate no limit

View File

@ -14,7 +14,7 @@ DEFINE_DISPATCH(index_put_kernel_quantized_stub);
DEFINE_DISPATCH(index_put_with_sort_quantized_stub);
namespace {
static TensorIterator make_index_put_iterator(const AdvancedIndex& info, const Tensor& value) {
TensorIterator make_index_put_iterator(const AdvancedIndex& info, const Tensor& value) {
TORCH_CHECK(is_expandable_to(value.sizes(), info.src.sizes()), "shape mismatch: value tensor of shape ", value.sizes(),
" cannot be broadcast to indexing result of shape ", info.src.sizes());
TensorIteratorConfig config;
@ -30,7 +30,7 @@ static TensorIterator make_index_put_iterator(const AdvancedIndex& info, const T
return config.build();
}
static Tensor & masked_fill_impl_quantized_cpu(Tensor & self, const Tensor & mask, const Scalar& value) {
Tensor & masked_fill_impl_quantized_cpu(Tensor & self, const Tensor & mask, const Scalar& value) {
NoNamesGuard guard;
TORCH_CHECK(mask.dtype() == ScalarType::Bool, "masked_fill only supports boolean masks, "
"but got dtype ", mask.dtype());

View File

@ -54,7 +54,7 @@ inline int end_index(int out_idx, int out_len, int in_len) {
// adaptive avg pool for 2D and 3D inputs
template <typename scalar_t>
static void adaptive_avg_pool_single_out_frame(
void adaptive_avg_pool_single_out_frame(
scalar_t* input_p,
scalar_t* output_p,
int64_t sizeC,

View File

@ -31,7 +31,7 @@ DEFINE_DISPATCH(qavg_pool2d_nhwc_stub);
namespace {
template <typename scalar_t>
static void avg_pool2d_out_frame(
void avg_pool2d_out_frame(
const Tensor& input,
Tensor& output,
int64_t nInputPlane,

View File

@ -35,7 +35,7 @@ struct UpsampleBilinearParamW {
// at::native functions for the native_functions.yaml
template <typename scalar_t>
static void upsample_bilinear2d_out_frame(
void upsample_bilinear2d_out_frame(
Tensor& output,
const Tensor& input,
int64_t input_height,

View File

@ -543,9 +543,9 @@ int register_embedding_params() {
namespace {
[[maybe_unused]] static auto conv2d_params = register_conv_params<2>();
[[maybe_unused]] static auto conv3d_params = register_conv_params<3>();
[[maybe_unused]] static auto linear_params = register_linear_params();
[[maybe_unused]] static auto embedding_params = register_embedding_params();
[[maybe_unused]] auto conv2d_params = register_conv_params<2>();
[[maybe_unused]] auto conv3d_params = register_conv_params<3>();
[[maybe_unused]] auto linear_params = register_linear_params();
[[maybe_unused]] auto embedding_params = register_embedding_params();
} // namespace

View File

@ -608,7 +608,7 @@ void qrelu_kernel(const Tensor& qx, Tensor& qy) {
});
}
static void leaky_qrelu_out_kernel(Tensor& out, const Tensor& qx,
void leaky_qrelu_out_kernel(Tensor& out, const Tensor& qx,
const Scalar& negval_) {
int64_t i_zp = qx.q_zero_point();
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
@ -660,7 +660,7 @@ static void leaky_qrelu_out_kernel(Tensor& out, const Tensor& qx,
});
}
static void qprelu_out_kernel(Tensor& out,
void qprelu_out_kernel(Tensor& out,
const Tensor& qx,
const Tensor& qw) {
int32_t i_zp = static_cast<int32_t>(qx.q_zero_point());

View File

@ -31,7 +31,7 @@ using at::sparse::get_sparse_impl;
// ForwardIt: only legacy random access iterator is supported.
template<class ForwardIt, class T, bool is_lower = true>
static FUNCAPI INLINE
FUNCAPI INLINE
ForwardIt find_bound(ForwardIt first, ForwardIt last, const T& value) {
ForwardIt RESTRICT it;
typename std::iterator_traits<ForwardIt>::difference_type count, step;

View File

@ -273,7 +273,7 @@ Tensor sparse_coo_tensor(IntArrayRef size,
// helper
namespace {
static inline Tensor expand_values_if_needed(const Tensor& values) {
inline Tensor expand_values_if_needed(const Tensor& values) {
// expand
if (values.dim() == 0) {
// Mimic Numpy behavior here and treat it as a 1D tensor

View File

@ -145,7 +145,7 @@ INVARIANT_CHECK_FUNC_API _check_idx_sorted_distinct_vals_slices_with_cidx(
}
}
static inline int64_t indexCount(IntArrayRef sizes) {
inline int64_t indexCount(IntArrayRef sizes) {
int64_t res = 1;
for (const auto& s : sizes) {
res *= s;

View File

@ -77,7 +77,7 @@ typedef struct _SerializedModel {
* Get the physically stored size of a value. All values are padded out
* to a multiple of 4 bytes to ensure the next value is 4-byte aligned.
*/
static uint32_t value_physical_size(uint32_t len) {
uint32_t value_physical_size(uint32_t len) {
uint32_t phys = len;
if (len % 4 == 0) {
return len;

View File

@ -33,7 +33,7 @@ std::atomic<int64_t> defaultNodeId(-1);
std::atomic<uint64_t> next_thread_id_{0};
thread_local uint64_t current_thread_id_ = 0;
static constexpr size_t NumRecordScopes =
constexpr size_t NumRecordScopes =
static_cast<size_t>(RecordScope::NUM_SCOPES);
RecordFunctionCallbacks::iterator findCallback(

View File

@ -56,7 +56,7 @@ void memset_junk(void* data, size_t num) {
}
#if defined(__linux__) && !defined(__ANDROID__)
static inline bool is_thp_alloc_enabled() {
inline bool is_thp_alloc_enabled() {
static bool value = [&] {
auto env = c10::utils::check_env("THP_MEM_ALLOC_ENABLE");
return env.has_value() ? env.value() : 0;

View File

@ -850,7 +850,7 @@ struct RestoreResult {
std::vector<Block*> allocations_created;
};
static bool BlockComparatorSize(const Block* a, const Block* b) {
bool BlockComparatorSize(const Block* a, const Block* b) {
if (a->stream != b->stream) {
return (uintptr_t)a->stream < (uintptr_t)b->stream;
}
@ -859,7 +859,7 @@ static bool BlockComparatorSize(const Block* a, const Block* b) {
}
return (uintptr_t)a->ptr < (uintptr_t)b->ptr;
}
static bool BlockComparatorAddress(const Block* a, const Block* b) {
bool BlockComparatorAddress(const Block* a, const Block* b) {
if (a->stream != b->stream) {
return (uintptr_t)a->stream < (uintptr_t)b->stream;
}

View File

@ -15,14 +15,14 @@ namespace c10::cuda {
namespace {
// Global stream state and constants
static c10::once_flag init_flag;
static DeviceIndex num_gpus = -1;
static constexpr int kStreamsPerPoolBits = 5;
static constexpr int kStreamsPerPool = 1 << kStreamsPerPoolBits;
static constexpr unsigned int kDefaultFlags = cudaStreamNonBlocking;
static constexpr int kStreamTypeBits = 4;
c10::once_flag init_flag;
DeviceIndex num_gpus = -1;
constexpr int kStreamsPerPoolBits = 5;
constexpr int kStreamsPerPool = 1 << kStreamsPerPoolBits;
constexpr unsigned int kDefaultFlags = cudaStreamNonBlocking;
constexpr int kStreamTypeBits = 4;
static int max_stream_priorities;
int max_stream_priorities;
// Non-default streams
// Note: the number of CUDA devices is determined at run time,
@ -39,14 +39,14 @@ static int max_stream_priorities;
// the destruction.
#if !defined(USE_ROCM)
// CUDA-only: used to initializes the stream pools (once)
static std::array<c10::once_flag, C10_COMPILE_TIME_MAX_GPUS> device_flags;
std::array<c10::once_flag, C10_COMPILE_TIME_MAX_GPUS> device_flags;
#endif
static std::array<
std::array<
std::array<std::atomic<uint32_t>, C10_COMPILE_TIME_MAX_GPUS>,
c10::cuda::max_compile_time_stream_priorities>
priority_counters;
static std::array<
std::array<
std::array<
std::array<cudaStream_t, kStreamsPerPool>,
C10_COMPILE_TIME_MAX_GPUS>,
@ -137,7 +137,7 @@ std::ostream& operator<<(std::ostream& stream, StreamIdType s) {
// We rely on streamIdIndex and streamIdType being non-negative;
// see Note [Hazard when concatenating signed integers]
static inline StreamIdType streamIdType(StreamId s) {
inline StreamIdType streamIdType(StreamId s) {
// Externally allocated streams have their id being the cudaStream_ptr
// so the last bit will be 0
if ((!(s & 1)) && s) {
@ -151,7 +151,7 @@ static inline StreamIdType streamIdType(StreamId s) {
return StreamIdType(val);
}
static inline size_t streamIdIndex(StreamId s) {
inline size_t streamIdIndex(StreamId s) {
return static_cast<size_t>(
(s >> (kStreamTypeBits + 1)) & ((1 << kStreamsPerPoolBits) - 1));
}
@ -166,11 +166,11 @@ StreamId makeStreamId(StreamIdType st, size_t si) {
// Thread-local current streams
// NOLINTNEXTLINE(*-arrays)
static thread_local std::unique_ptr<StreamId[]> current_streams = nullptr;
thread_local std::unique_ptr<StreamId[]> current_streams = nullptr;
// Populates global values.
// Warning: this function must only be called once!
static void initGlobalStreamState() {
void initGlobalStreamState() {
num_gpus = device_count();
// Check if the number of GPUs matches the expected compile-time max number
// of GPUs.
@ -199,7 +199,7 @@ static void initGlobalStreamState() {
// Init a single CUDA or HIP stream
// See Note [HIP Lazy Streams]
static void initSingleStream(int p, DeviceIndex device_index, int i) {
void initSingleStream(int p, DeviceIndex device_index, int i) {
CUDAGuard device_guard(device_index);
auto& stream = streams[p][device_index][i];
auto pri = -p; // lower number is higher priority
@ -215,7 +215,7 @@ static void initSingleStream(int p, DeviceIndex device_index, int i) {
// Creates the low and high priority stream pools for the specified device
// Warning: only call once per device!
static void initDeviceStreamState(DeviceIndex device_index) {
void initDeviceStreamState(DeviceIndex device_index) {
for (const auto i : c10::irange(kStreamsPerPool)) {
for (const auto p : c10::irange(max_stream_priorities)) {
initSingleStream(p, device_index, i);
@ -224,7 +224,7 @@ static void initDeviceStreamState(DeviceIndex device_index) {
}
// Init front-end to ensure initialization only occurs once
static void initCUDAStreamsOnce() {
void initCUDAStreamsOnce() {
// Inits default streams (once, globally)
c10::call_once(init_flag, initGlobalStreamState);
@ -241,7 +241,7 @@ static void initCUDAStreamsOnce() {
}
// Helper to verify the GPU index is valid
static inline void check_gpu(DeviceIndex device_index) {
inline void check_gpu(DeviceIndex device_index) {
TORCH_CHECK(
device_index >= 0 && device_index < num_gpus,
"Device index value ",
@ -253,7 +253,7 @@ static inline void check_gpu(DeviceIndex device_index) {
// Helper to determine the index of the stream to return
// Note: Streams are returned round-robin (see note in CUDAStream.h)
static uint32_t get_idx(std::atomic<uint32_t>& counter) {
uint32_t get_idx(std::atomic<uint32_t>& counter) {
auto raw_idx = counter++;
return raw_idx % kStreamsPerPool;
}

View File

@ -15,7 +15,7 @@ using std::string;
C10_DEFINE_REGISTRY(C10FlagsRegistry, C10FlagParser, const string&)
namespace {
static bool gCommandLineFlagsParsed = false;
bool gCommandLineFlagsParsed = false;
// Since flags is going to be loaded before logging, we would
// need to have a stringstream to hold the messages instead of directly
// using caffe logging.
@ -23,7 +23,7 @@ std::stringstream& GlobalInitStream() {
static std::stringstream ss;
return ss;
}
static const char* gUsageMessage = "(Usage message not set.)";
const char* gUsageMessage = "(Usage message not set.)";
} // namespace
C10_EXPORT void SetUsageMessage(const string& str) {