[clang-tidy] Exclude cppcoreguidelines-avoid-magic-numbers (#57841)

Summary:
Add cppcoreguidelines-avoid-magic-numbers exclusion to clang-tidy
Remove existing nolint warnings using following script:
```
for file in `git ls-files | grep -v \.py`; do gsed '/^ *\/\/ NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)/d' -i  $file; done
```

Pull Request resolved: https://github.com/pytorch/pytorch/pull/57841

Reviewed By: samestep

Differential Revision: D28295045

Pulled By: malfet

fbshipit-source-id: 7c6e8d1213c9593f169ed3df6a916498f1a97163
This commit is contained in:
Nikita Shulga
2021-05-07 19:59:13 -07:00
committed by Facebook GitHub Bot
parent bc2540f0be
commit 3a66a1cb99
458 changed files with 1 additions and 9602 deletions

View File

@ -8,6 +8,7 @@ bugprone-*,
-bugprone-lambda-function-name,
-bugprone-reserved-identifier,
cppcoreguidelines-*,
-cppcoreguidelines-avoid-magic-numbers,
-cppcoreguidelines-interfaces-global-init,
-cppcoreguidelines-macro-usage,
-cppcoreguidelines-owning-memory,

View File

@ -71,7 +71,6 @@ Generator createCPUGenerator(uint64_t seed_val) {
* and return them as a 64 bit unsigned int
*/
inline uint64_t make64BitsFrom32Bits(uint32_t hi, uint32_t lo) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
return (static_cast<uint64_t>(hi) << 32) | lo;
}
@ -157,7 +156,6 @@ void CPUGeneratorImpl::set_state(const c10::TensorImpl& new_state) {
// intermediate values.
if (legacy_pod->normal_is_valid) {
auto r = legacy_pod->normal_rho;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto theta = 2.0 * c10::pi<double> * legacy_pod->normal_x;
// we return the sin version of the normal sample when in caching mode
double_normal_sample = c10::optional<double>(r * ::sin(theta));

View File

@ -101,7 +101,6 @@ bool Context::checkCuBLASConfigDeterministic() {
bool cublas_config_deterministic = true;
// If using CUDA 10.2 or greater, need to make sure CuBLAS workspace config
// is set to deterministic setting
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (hasCUDART() && (versionCUDART() >= 10020)) {
char* workspace_config = std::getenv(cublas_config_var_name);
cublas_config_deterministic = (workspace_config != nullptr) && (
@ -277,7 +276,6 @@ void Context::setDefaultMobileCPUAllocator() {
"Cannot set another allocator.");
// Setting the priority high to make sure no other allocator gets used instead of this.
prev_allocator_ptr_ = c10::GetCPUAllocator();
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
c10::SetCPUAllocator(c10::GetDefaultMobileCPUAllocator(), /*priority*/ 100);
}
@ -286,7 +284,6 @@ void Context::unsetDefaultMobileCPUAllocator() {
"setDefaultMobileCPUAllocator must have been called "
"before unsetDefaultMobileCPUAllocator.");
// Setting the priority high to make sure no other allocator gets used instead of this.
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
c10::SetCPUAllocator(prev_allocator_ptr_ , /*priority*/ 100);
prev_allocator_ptr_ = nullptr;
}

View File

@ -10,7 +10,6 @@ namespace at {
DLDataType getDLDataType(const Tensor& t) {
DLDataType dtype;
dtype.lanes = 1;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
dtype.bits = t.element_size() * 8;
switch (t.scalar_type()) {
case ScalarType::Byte:
@ -126,7 +125,6 @@ ScalarType toScalarType(const DLDataType& dtype) {
switch (dtype.code) {
case DLDataTypeCode::kDLUInt:
switch (dtype.bits) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
case 8:
stype = ScalarType::Byte;
break;
@ -137,19 +135,15 @@ ScalarType toScalarType(const DLDataType& dtype) {
break;
case DLDataTypeCode::kDLInt:
switch (dtype.bits) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
case 8:
stype = ScalarType::Char;
break;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
case 16:
stype = ScalarType::Short;
break;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
case 32:
stype = ScalarType::Int;
break;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
case 64:
stype = ScalarType::Long;
break;
@ -160,15 +154,12 @@ ScalarType toScalarType(const DLDataType& dtype) {
break;
case DLDataTypeCode::kDLFloat:
switch (dtype.bits) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
case 16:
stype = ScalarType::Half;
break;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
case 32:
stype = ScalarType::Float;
break;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
case 64:
stype = ScalarType::Double;
break;

View File

@ -95,7 +95,6 @@ Tensor coo_to_csr(const int64_t* indices, int64_t dim, int64_t nnz) {
if (nnz > 0) {
auto csr_accessor = csr.accessor<int64_t, 1>();
// Convert the sparse matrix to CSR format
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
at::parallel_for(0, nnz, 10000, [&](int64_t start, int64_t end) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
int64_t h, hp0, hp1;

View File

@ -12,7 +12,6 @@ static void quantize_per_channel_4d_contiguous(benchmark::State& state) {
at::Tensor a = at::rand({batches, channels, height, width});
at::Tensor scales = at::rand({channels});
at::Tensor zero_points = at::randint(
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
0, 10, {channels}, at::TensorOptions().dtype(at::ScalarType::Int));
at::Tensor qa;
@ -33,7 +32,6 @@ static void quantize_per_channel_4d_channels_last(benchmark::State& state) {
at::TensorOptions().memory_format(at::MemoryFormat::ChannelsLast));
at::Tensor scales = at::rand({channels});
at::Tensor zero_points = at::randint(
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
0, 10, {channels}, at::TensorOptions().dtype(at::ScalarType::Int));
at::Tensor qa;
@ -50,7 +48,6 @@ static void quantize_per_channel_2d(benchmark::State& state) {
at::Tensor a = at::rand({channels, nelem});
at::Tensor scales = at::rand({channels});
at::Tensor zero_points = at::randint(
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
0, 10, {channels}, at::TensorOptions().dtype(at::ScalarType::Int));
at::Tensor qa;
@ -63,11 +60,8 @@ static void quantize_per_channel_2d(benchmark::State& state) {
static void GenerateSizes4d(benchmark::internal::Benchmark* b) {
b->ArgNames({"N", "C", "H", "W"});
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for (size_t n = 16; n < 256; n *= 2) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for (size_t c = 4; c < 256; c *= 2) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for (size_t hw = 4; hw < 256; hw *= 2) {
b->Args({n, c, hw, hw});
}
@ -78,9 +72,7 @@ static void GenerateSizes4d(benchmark::internal::Benchmark* b) {
static void GenerateSizes2d(benchmark::internal::Benchmark* b) {
b->ArgNames({"C", "N"});
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for (size_t c = 4; c < 512; c *= 2) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for (size_t n = 4; n < 512; n *= 2) {
b->Args({c, n});
}

View File

@ -33,7 +33,6 @@ static void stateful_conv1d(benchmark::State& state) {
)");
std::vector<std::vector<torch::jit::IValue>> inputs;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for (int i = 0; i < 10; ++i) {
std::vector<torch::jit::IValue> input;
// NOLINTNEXTLINE(modernize-use-emplace)
@ -69,15 +68,10 @@ static void GenerateSizes(benchmark::internal::Benchmark* b) {
"Width",
"Optimized"});
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for (size_t input_channels = 32; input_channels < 256; input_channels *= 2) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for (size_t output_channels = 32; output_channels < 256; output_channels *= 2) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for (size_t kernel = 3; kernel < 8; ++kernel) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for (size_t batch_size = 1; batch_size < 5; ++batch_size) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for (size_t width = 32; width < 256; width *= 2) {
b->Args({input_channels, output_channels, kernel, batch_size, width, true});
b->Args({input_channels, output_channels, kernel, batch_size, width, false});

View File

@ -17,9 +17,7 @@ static void tensor_add(benchmark::State& state) {
static void GenerateSizes(benchmark::internal::Benchmark* b) {
b->ArgNames({"N", "C"});
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for (size_t n = 8; n < 1024;) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for (size_t c = 8; c < 1024;) {
b->Args({n, c});
c *= 2;

View File

@ -96,9 +96,7 @@ static std::tuple<double, int64_t> __printFormat(std::ostream& stream, const Ten
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
int64_t sz;
if(intMode) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if(expMax > 9) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
sz = 11;
stream << std::scientific << std::setprecision(4);
} else {
@ -107,27 +105,20 @@ static std::tuple<double, int64_t> __printFormat(std::ostream& stream, const Ten
}
} else {
if(expMax-expMin > 4) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
sz = 11;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if(std::fabs(expMax) > 99 || std::fabs(expMin) > 99) {
sz = sz + 1;
}
stream << std::scientific << std::setprecision(4);
} else {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if(expMax > 5 || expMax < 0) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
sz = 7;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
scale = std::pow(10, expMax-1);
stream << std::fixed << std::setprecision(4);
} else {
if(expMax == 0) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
sz = 7;
} else {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
sz = expMax+6;
}
stream << std::fixed << std::setprecision(4);

View File

@ -324,7 +324,6 @@ TEST(ListTest_IValueBasedList, givenList_whenErasingFullRange_thenIsEmpty) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(ListTest_IValueBasedList, whenCallingReserve_thenDoesntCrash) {
List<string> list;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
list.reserve(100);
}
@ -680,7 +679,6 @@ TEST(ListTest_NonIValueBasedList, whenCallingExtractWithNonExistingPosition_then
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(ListTest_NonIValueBasedList, whenCallingCopyingSetWithExistingPosition_thenChangesElement) {
List<int64_t> list({3, 4});
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int64_t value = 5;
list.set(1, value);
EXPECT_EQ(3, list.get(0));
@ -690,7 +688,6 @@ TEST(ListTest_NonIValueBasedList, whenCallingCopyingSetWithExistingPosition_then
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(ListTest_NonIValueBasedList, whenCallingMovingSetWithExistingPosition_thenChangesElement) {
List<int64_t> list({3, 4});
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int64_t value = 5;
// NOLINTNEXTLINE(performance-move-const-arg)
list.set(1, std::move(value));
@ -701,7 +698,6 @@ TEST(ListTest_NonIValueBasedList, whenCallingMovingSetWithExistingPosition_thenC
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(ListTest_NonIValueBasedList, whenCallingCopyingSetWithNonExistingPosition_thenThrowsException) {
List<int64_t> list({3, 4});
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int64_t value = 5;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto)
EXPECT_THROW(list.set(2, value), std::out_of_range);
@ -710,7 +706,6 @@ TEST(ListTest_NonIValueBasedList, whenCallingCopyingSetWithNonExistingPosition_t
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(ListTest_NonIValueBasedList, whenCallingMovingSetWithNonExistingPosition_thenThrowsException) {
List<int64_t> list({3, 4});
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int64_t value = 5;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,performance-move-const-arg,hicpp-avoid-goto)
EXPECT_THROW(list.set(2, std::move(value)), std::out_of_range);
@ -725,9 +720,7 @@ TEST(ListTest_NonIValueBasedList, whenCallingAccessOperatorWithExistingPosition_
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(ListTest_NonIValueBasedList, whenAssigningToAccessOperatorWithExistingPosition_thenSetsElement) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
List<int64_t> list({3, 4, 5});
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
list[1] = 6;
EXPECT_EQ(3, list.get(0));
EXPECT_EQ(6, list.get(1));
@ -736,7 +729,6 @@ TEST(ListTest_NonIValueBasedList, whenAssigningToAccessOperatorWithExistingPosit
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(ListTest_NonIValueBasedList, whenAssigningToAccessOperatorFromAccessOperator_thenSetsElement) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
List<int64_t> list({3, 4, 5});
list[1] = list[2];
EXPECT_EQ(3, list.get(0));
@ -746,7 +738,6 @@ TEST(ListTest_NonIValueBasedList, whenAssigningToAccessOperatorFromAccessOperato
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(ListTest_NonIValueBasedList, whenSwappingFromAccessOperator_thenSwapsElements) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
List<int64_t> list({3, 4, 5});
swap(list[1], list[2]);
EXPECT_EQ(3, list.get(0));
@ -763,9 +754,7 @@ TEST(ListTest_NonIValueBasedList, whenCallingAccessOperatorWithNonExistingPositi
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(ListTest_NonIValueBasedList, whenCallingInsertOnIteratorWithLValue_thenInsertsElement) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
List<int64_t> list({3, 4, 6});
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int64_t v = 5;
list.insert(list.begin() + 2, v);
EXPECT_EQ(4, list.size());
@ -774,9 +763,7 @@ TEST(ListTest_NonIValueBasedList, whenCallingInsertOnIteratorWithLValue_thenInse
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(ListTest_NonIValueBasedList, whenCallingInsertOnIteratorWithRValue_thenInsertsElement) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
List<int64_t> list({3, 4, 6});
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int64_t v = 5;
// NOLINTNEXTLINE(performance-move-const-arg)
list.insert(list.begin() + 2, std::move(v));
@ -786,9 +773,7 @@ TEST(ListTest_NonIValueBasedList, whenCallingInsertOnIteratorWithRValue_thenInse
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(ListTest_NonIValueBasedList, whenCallingInsertWithLValue_thenReturnsIteratorToNewElement) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
List<int64_t> list({3, 4, 6});
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int64_t v = 5;
List<int64_t>::iterator result = list.insert(list.begin() + 2, v);
EXPECT_EQ(list.begin() + 2, result);
@ -796,9 +781,7 @@ TEST(ListTest_NonIValueBasedList, whenCallingInsertWithLValue_thenReturnsIterato
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(ListTest_NonIValueBasedList, whenCallingInsertWithRValue_thenReturnsIteratorToNewElement) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
List<int64_t> list({3, 4, 6});
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int64_t v = 5;
// NOLINTNEXTLINE(performance-move-const-arg)
List<int64_t>::iterator result = list.insert(list.begin() + 2, std::move(v));
@ -807,9 +790,7 @@ TEST(ListTest_NonIValueBasedList, whenCallingInsertWithRValue_thenReturnsIterato
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(ListTest_NonIValueBasedList, whenCallingEmplaceWithLValue_thenInsertsElement) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
List<int64_t> list({3, 4, 6});
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int64_t v = 5;
list.emplace(list.begin() + 2, v);
EXPECT_EQ(4, list.size());
@ -818,9 +799,7 @@ TEST(ListTest_NonIValueBasedList, whenCallingEmplaceWithLValue_thenInsertsElemen
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(ListTest_NonIValueBasedList, whenCallingEmplaceWithRValue_thenInsertsElement) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
List<int64_t> list({3, 4, 6});
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int64_t v = 5;
// NOLINTNEXTLINE(performance-move-const-arg)
list.emplace(list.begin() + 2, std::move(v));
@ -830,9 +809,7 @@ TEST(ListTest_NonIValueBasedList, whenCallingEmplaceWithRValue_thenInsertsElemen
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(ListTest_NonIValueBasedList, whenCallingEmplaceWithConstructorArg_thenInsertsElement) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
List<int64_t> list({3, 4, 6});
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
list.emplace(list.begin() + 2, 5); // const char* is a constructor arg to std::int64_t
EXPECT_EQ(4, list.size());
EXPECT_EQ(5, list.get(2));
@ -841,7 +818,6 @@ TEST(ListTest_NonIValueBasedList, whenCallingEmplaceWithConstructorArg_thenInser
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(ListTest_NonIValueBasedList, whenCallingPushBackWithLValue_ThenInsertsElement) {
List<int64_t> list;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int64_t v = 5;
list.push_back(v);
EXPECT_EQ(1, list.size());
@ -851,7 +827,6 @@ TEST(ListTest_NonIValueBasedList, whenCallingPushBackWithLValue_ThenInsertsEleme
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(ListTest_NonIValueBasedList, whenCallingPushBackWithRValue_ThenInsertsElement) {
List<int64_t> list;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int64_t v = 5;
// NOLINTNEXTLINE(performance-move-const-arg)
list.push_back(std::move(v));
@ -862,7 +837,6 @@ TEST(ListTest_NonIValueBasedList, whenCallingPushBackWithRValue_ThenInsertsEleme
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(ListTest_NonIValueBasedList, whenCallingEmplaceBackWithLValue_ThenInsertsElement) {
List<int64_t> list;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int64_t v = 5;
list.emplace_back(v);
EXPECT_EQ(1, list.size());
@ -872,7 +846,6 @@ TEST(ListTest_NonIValueBasedList, whenCallingEmplaceBackWithLValue_ThenInsertsEl
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(ListTest_NonIValueBasedList, whenCallingEmplaceBackWithRValue_ThenInsertsElement) {
List<int64_t> list;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int64_t v = 5;
// NOLINTNEXTLINE(performance-move-const-arg)
list.emplace_back(std::move(v));
@ -883,7 +856,6 @@ TEST(ListTest_NonIValueBasedList, whenCallingEmplaceBackWithRValue_ThenInsertsEl
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(ListTest_NonIValueBasedList, whenCallingEmplaceBackWithConstructorArg_ThenInsertsElement) {
List<int64_t> list;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
list.emplace_back(5); // const char* is a constructor arg to std::int64_t
EXPECT_EQ(1, list.size());
EXPECT_EQ(5, list.get(0));
@ -901,7 +873,6 @@ TEST(ListTest_NonIValueBasedList, givenEmptyList_whenIterating_thenBeginIsEnd) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(ListTest_NonIValueBasedList, whenIterating_thenFindsElements) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
List<int64_t> list({3, 5});
bool found_first = false;
bool found_second = false;
@ -910,7 +881,6 @@ TEST(ListTest_NonIValueBasedList, whenIterating_thenFindsElements) {
if (static_cast<int64_t>(*iter) == 3) {
EXPECT_FALSE(found_first);
found_first = true;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if (static_cast<int64_t>(*iter) == 5) {
EXPECT_FALSE(found_second);
found_second = true;
@ -924,7 +894,6 @@ TEST(ListTest_NonIValueBasedList, whenIterating_thenFindsElements) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(ListTest_NonIValueBasedList, whenIteratingWithForeach_thenFindsElements) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
List<int64_t> list({3, 5});
bool found_first = false;
bool found_second = false;
@ -933,7 +902,6 @@ TEST(ListTest_NonIValueBasedList, whenIteratingWithForeach_thenFindsElements) {
if (elem == 3) {
EXPECT_FALSE(found_first);
found_first = true;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if (elem == 5) {
EXPECT_FALSE(found_second);
found_second = true;
@ -969,7 +937,6 @@ TEST(ListTest_NonIValueBasedList, givenList_whenErasingFullRange_thenIsEmpty) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(ListTest_NonIValueBasedList, whenCallingReserve_thenDoesntCrash) {
List<int64_t> list;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
list.reserve(100);
}
@ -1092,7 +1059,6 @@ TEST(ListTest_NonIValueBasedList, givenIterator_whenPrefixDecrementing_thenMoves
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(ListTest_NonIValueBasedList, givenIterator_whenIncreasing_thenMovesToNextAndReturnsNewPosition) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
List<int64_t> list({3, 4, 5});
List<int64_t>::iterator iter1 = list.begin();
@ -1103,7 +1069,6 @@ TEST(ListTest_NonIValueBasedList, givenIterator_whenIncreasing_thenMovesToNextAn
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(ListTest_NonIValueBasedList, givenIterator_whenDecreasing_thenMovesToNextAndReturnsNewPosition) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
List<int64_t> list({3, 4, 5});
List<int64_t>::iterator iter1 = list.end();
@ -1114,7 +1079,6 @@ TEST(ListTest_NonIValueBasedList, givenIterator_whenDecreasing_thenMovesToNextAn
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(ListTest_NonIValueBasedList, givenIterator_whenAdding_thenReturnsNewIterator) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
List<int64_t> list({3, 4, 5});
List<int64_t>::iterator iter1 = list.begin();
@ -1125,7 +1089,6 @@ TEST(ListTest_NonIValueBasedList, givenIterator_whenAdding_thenReturnsNewIterato
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(ListTest_NonIValueBasedList, givenIterator_whenSubtracting_thenReturnsNewIterator) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
List<int64_t> list({3, 4, 5});
List<int64_t>::iterator iter1 = list.end() - 1;
@ -1218,7 +1181,6 @@ TEST(ListTest_NonIValueBasedList, givenEmptyList_whenCallingResize_thenResizesAn
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(ListTest_NonIValueBasedList, givenEmptyList_whenCallingResizeWithValue_thenResizesAndSetsValue) {
List<int64_t> list;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
list.resize(2, 5);
EXPECT_EQ(2, list.size());
EXPECT_EQ(5, list.get(0));

View File

@ -42,7 +42,6 @@ void boxed_func_with_return(const OperatorHandle& /*opHandle*/, Stack* stack) {
called_with_args = tuple<int64_t, int64_t>(stack->at(0).toInt(), stack->at(1).toInt());
stack->clear();
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
stack->push_back(5);
}
@ -71,7 +70,6 @@ void boxed_func_with_multi_return(const OperatorHandle& /*opHandle*/, Stack* sta
struct unboxed_functor_with_return final : OperatorKernel {
int64_t operator()(int64_t a, int64_t b) {
called_with_args = tuple<int64_t, int64_t>(a, b);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
return 5;
}
};
@ -96,7 +94,6 @@ struct unboxed_functor_without_return_factory final {
int64_t unboxed_function_with_return(int64_t a, int64_t b) {
called_with_args = tuple<int64_t, int64_t>(a, b);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
return 5;
}
@ -107,7 +104,6 @@ void unboxed_function_without_return(int64_t a, int64_t b) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
auto unboxed_lambda_with_return = [] (int64_t a, int64_t b) -> int64_t {
called_with_args = tuple<int64_t, int64_t>(a, b);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
return 5;
};
@ -267,7 +263,6 @@ void expectOutOfPlaceMultiBoxedCallingWorks(const KernelFunction& func) {
OperatorHandle dummy = makeDummyOperatorHandle();
auto s1 = 1.0f;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto s2 = 2.0f;
auto t1 = at::zeros({1});
auto t2 = at::zeros({1});
@ -368,7 +363,6 @@ void expectOutOfPlaceMultiUnboxedCallingWorks(const KernelFunction& func) {
OperatorHandle dummy = makeDummyOperatorHandle();
auto s1 = 1.0f;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto s2 = 2.0f;
auto t1 = at::zeros({1});
auto t2 = at::zeros({1});

View File

@ -50,7 +50,6 @@ void expectCallsIncrement(DispatchKey dispatch_key) {
// assert that schema and cpu kernel are present
auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""});
ASSERT_TRUE(op.has_value());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto result = callOp(*op, dummyTensor(dispatch_key), 5);
EXPECT_EQ(1, result.size());
EXPECT_EQ(6, result[0].toInt());
@ -62,7 +61,6 @@ void expectCallsDecrement(DispatchKey dispatch_key) {
// assert that schema and cpu kernel are present
auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""});
ASSERT_TRUE(op.has_value());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto result = callOp(*op, dummyTensor(dispatch_key), 5);
EXPECT_EQ(1, result.size());
EXPECT_EQ(4, result[0].toInt());
@ -155,7 +153,6 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithIntOutpu
auto op = c10::Dispatcher::singleton().findSchema({"_test::int_output", ""});
ASSERT_TRUE(op.has_value());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto result = callOp(*op, dummyTensor(DispatchKey::CPU), 3, 6);
EXPECT_EQ(1, result.size());
EXPECT_EQ(9, result[0].toInt());
@ -214,7 +211,6 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithIntListO
auto op = c10::Dispatcher::singleton().findSchema({"_test::list_output", ""});
ASSERT_TRUE(op.has_value());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto result = callOp(*op, dummyTensor(DispatchKey::CPU), 2, 4, 6);
EXPECT_EQ(1, result.size());
EXPECT_EQ(3, result[0].toIntVector().size());
@ -229,7 +225,6 @@ std::tuple<Tensor, int64_t, std::vector<Tensor>, c10::optional<int64_t>, Dict<st
dict.insert("second", dummyTensor(DispatchKey::CUDA));
return std::tuple<Tensor, int64_t, std::vector<Tensor>, c10::optional<int64_t>, Dict<string, Tensor>>(
dummyTensor(DispatchKey::CUDA),
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
5,
{dummyTensor(DispatchKey::CPU), dummyTensor(DispatchKey::CUDA)},
c10::optional<int64_t>(c10::in_place, 0),
@ -399,7 +394,6 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithIntListI
ASSERT_TRUE(op.has_value());
captured_input_list_size = 0;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto outputs = callOp(*op, dummyTensor(DispatchKey::CPU), c10::List<int64_t>({2, 4, 6}));
EXPECT_EQ(0, outputs.size());
EXPECT_EQ(3, captured_input_list_size);
@ -417,7 +411,6 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithIntListI
auto op = c10::Dispatcher::singleton().findSchema({"_test::int_list_input", ""});
ASSERT_TRUE(op.has_value());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto outputs = callOp(*op, dummyTensor(DispatchKey::CPU), c10::List<int64_t>({2, 4, 6}));
EXPECT_EQ(1, outputs.size());
EXPECT_EQ(3, outputs[0].toInt());
@ -696,9 +689,7 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithMapOfLis
ASSERT_TRUE(op.has_value());
c10::Dict<string, c10::List<int64_t>> dict;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
dict.insert("key1", c10::List<int64_t>({10, 20}));
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
dict.insert("key2", c10::List<int64_t>({30, 40}));
auto outputs = callOp(*op, dict);
EXPECT_EQ(1, outputs.size());
@ -727,15 +718,11 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithMapOfLis
c10::Dict<string, c10::List<c10::Dict<int64_t, string>>> dict;
c10::Dict<int64_t, string> dict1;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
dict1.insert(10, "10");
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
dict1.insert(20, "20");
dict.insert("key1", c10::List<c10::Dict<int64_t, string>>({dict1}));
c10::Dict<int64_t, string> dict2;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
dict2.insert(30, "30");
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
dict2.insert(40, "40");
dict.insert("key2", c10::List<c10::Dict<int64_t, string>>({dict2}));
auto outputs = callOp(*op, dict);
@ -800,9 +787,7 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithListOfMa
dict1.insert("1", c10::List<int64_t>({1, 2}));
dict1.insert("3", c10::List<int64_t>({3, 4}));
c10::Dict<string, c10::List<int64_t>> dict2;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
dict2.insert("5", c10::List<int64_t>({5, 6}));
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
dict2.insert("7", c10::List<int64_t>({7, 8}));
c10::List<c10::Dict<string, c10::List<int64_t>>> list({ dict1, dict2 });
auto outputs = callOp(*op, list);

View File

@ -39,7 +39,6 @@ void expectCallsIncrement(DispatchKey dispatch_key) {
// assert that schema and cpu kernel are present
auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""});
ASSERT_TRUE(op.has_value());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto result = callOp(*op, dummyTensor(dispatch_key), 5);
EXPECT_EQ(1, result.size());
EXPECT_EQ(6, result[0].toInt());
@ -51,7 +50,6 @@ void expectCallsDecrement(DispatchKey dispatch_key) {
// assert that schema and cpu kernel are present
auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""});
ASSERT_TRUE(op.has_value());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto result = callOp(*op, dummyTensor(dispatch_key), 5);
EXPECT_EQ(1, result.size());
EXPECT_EQ(4, result[0].toInt());
@ -171,7 +169,6 @@ TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithIntOutput_when
auto op = c10::Dispatcher::singleton().findSchema({"_test::int_output", ""});
ASSERT_TRUE(op.has_value());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto result = callOp(*op, dummyTensor(DispatchKey::CPU), 3, 6);
EXPECT_EQ(1, result.size());
EXPECT_EQ(9, result[0].toInt());
@ -231,7 +228,6 @@ TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithIntListOutput_
auto op = c10::Dispatcher::singleton().findSchema({"_test::list_output", ""});
ASSERT_TRUE(op.has_value());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto result = callOp(*op, dummyTensor(DispatchKey::CPU), 2, 4, 6);
EXPECT_EQ(1, result.size());
EXPECT_EQ(3, result[0].toIntVector().size());
@ -246,7 +242,6 @@ std::tuple<Tensor, int64_t, c10::List<Tensor>, c10::optional<int64_t>, Dict<stri
dict.insert("second", dummyTensor(DispatchKey::CUDA));
return std::tuple<Tensor, int64_t, c10::List<Tensor>, c10::optional<int64_t>, Dict<string, Tensor>>(
dummyTensor(DispatchKey::CUDA),
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
5,
c10::List<Tensor>({dummyTensor(DispatchKey::CPU), dummyTensor(DispatchKey::CUDA)}),
c10::optional<int64_t>(c10::in_place, 0),
@ -421,7 +416,6 @@ TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithIntListInput_w
ASSERT_TRUE(op.has_value());
captured_input_list_size = 0;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto outputs = callOp(*op, dummyTensor(DispatchKey::CPU), c10::List<int64_t>({2, 4, 6}));
EXPECT_EQ(0, outputs.size());
EXPECT_EQ(3, captured_input_list_size);
@ -439,7 +433,6 @@ TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithIntListInput_w
auto op = c10::Dispatcher::singleton().findSchema({"_test::int_list_input", ""});
ASSERT_TRUE(op.has_value());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto outputs = callOp(*op, dummyTensor(DispatchKey::CPU), c10::List<int64_t>({2, 4, 6}));
EXPECT_EQ(1, outputs.size());
EXPECT_EQ(3, outputs[0].toInt());

View File

@ -36,7 +36,6 @@ void expectCallsIncrement(DispatchKey dispatch_key) {
// assert that schema and cpu kernel are present
auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""});
ASSERT_TRUE(op.has_value());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto result = callOp(*op, dummyTensor(dispatch_key), 5);
EXPECT_EQ(1, result.size());
EXPECT_EQ(6, result[0].toInt());
@ -139,7 +138,6 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithIntOutput_
auto op = c10::Dispatcher::singleton().findSchema({"_test::int_output", ""});
ASSERT_TRUE(op.has_value());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto result = callOp(*op, dummyTensor(DispatchKey::CPU), 3, 6);
EXPECT_EQ(1, result.size());
EXPECT_EQ(9, result[0].toInt());
@ -192,7 +190,6 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithIntListOut
auto op = c10::Dispatcher::singleton().findSchema({"_test::list_output", ""});
ASSERT_TRUE(op.has_value());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto result = callOp(*op, dummyTensor(DispatchKey::CPU), 2, 4, 6);
EXPECT_EQ(1, result.size());
EXPECT_EQ(3, result[0].toIntVector().size());
@ -210,7 +207,6 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithMultipleOu
dict.insert("second", dummyTensor(DispatchKey::CUDA));
return std::tuple<Tensor, int64_t, std::vector<Tensor>, c10::optional<int64_t>, Dict<string, Tensor>>(
dummyTensor(DispatchKey::CUDA),
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
5,
{dummyTensor(DispatchKey::CPU), dummyTensor(DispatchKey::CUDA)},
c10::optional<int64_t>(c10::in_place, 0),
@ -362,7 +358,6 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithIntListInp
ASSERT_TRUE(op.has_value());
captured_input_list_size = 0;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto outputs = callOp(*op, dummyTensor(DispatchKey::CPU), c10::List<int64_t>({2, 4, 6}));
EXPECT_EQ(0, outputs.size());
EXPECT_EQ(3, captured_input_list_size);
@ -378,7 +373,6 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithIntListInp
auto op = c10::Dispatcher::singleton().findSchema({"_test::int_list_input", ""});
ASSERT_TRUE(op.has_value());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto outputs = callOp(*op, dummyTensor(DispatchKey::CPU), c10::List<int64_t>({2, 4, 6}));
EXPECT_EQ(1, outputs.size());
EXPECT_EQ(3, outputs[0].toInt());
@ -630,9 +624,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithMapOfList_
ASSERT_TRUE(op.has_value());
c10::Dict<string, c10::List<int64_t>> dict;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
dict.insert("key1", c10::List<int64_t>({10, 20}));
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
dict.insert("key2", c10::List<int64_t>({30, 40}));
auto outputs = callOp(*op, dict);
EXPECT_EQ(1, outputs.size());
@ -660,15 +652,11 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithMapOfListO
c10::Dict<string, c10::List<c10::Dict<int64_t, string>>> dict;
c10::Dict<int64_t, string> dict1;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
dict1.insert(10, "10");
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
dict1.insert(20, "20");
dict.insert("key1", c10::List<c10::Dict<int64_t, string>>({dict1}));
c10::Dict<int64_t, string> dict2;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
dict2.insert(30, "30");
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
dict2.insert(40, "40");
dict.insert("key2", c10::List<c10::Dict<int64_t, string>>({dict2}));
auto outputs = callOp(*op, dict);
@ -729,9 +717,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithListOfMapO
dict1.insert("1", c10::List<int64_t>({1, 2}));
dict1.insert("3", c10::List<int64_t>({3, 4}));
c10::Dict<string, c10::List<int64_t>> dict2;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
dict2.insert("5", c10::List<int64_t>({5, 6}));
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
dict2.insert("7", c10::List<int64_t>({7, 8}));
c10::List<c10::Dict<string, c10::List<int64_t>>> list({ dict1, dict2 });
auto outputs = callOp(*op, list);

View File

@ -26,7 +26,6 @@ void expectCallsIncrement(DispatchKey dispatch_key) {
// assert that schema and cpu kernel are present
auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""});
ASSERT_TRUE(op.has_value());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto result = callOp(*op, dummyTensor(dispatch_key), 5);
EXPECT_EQ(1, result.size());
EXPECT_EQ(6, result[0].toInt());
@ -38,7 +37,6 @@ void expectCallsDecrement(DispatchKey dispatch_key) {
// assert that schema and cpu kernel are present
auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""});
ASSERT_TRUE(op.has_value());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto result = callOp(*op, dummyTensor(dispatch_key), 5);
EXPECT_EQ(1, result.size());
EXPECT_EQ(4, result[0].toInt());
@ -140,7 +138,6 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithIntOutput_whenRe
auto op = c10::Dispatcher::singleton().findSchema({"_test::int_output", ""});
ASSERT_TRUE(op.has_value());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto result = callOp(*op, dummyTensor(DispatchKey::CPU), 3, 6);
EXPECT_EQ(1, result.size());
EXPECT_EQ(9, result[0].toInt());
@ -191,7 +188,6 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithIntListOutput_wh
auto op = c10::Dispatcher::singleton().findSchema({"_test::list_output", ""});
ASSERT_TRUE(op.has_value());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto result = callOp(*op, dummyTensor(DispatchKey::CPU), 2, 4, 6);
EXPECT_EQ(1, result.size());
EXPECT_EQ(3, result[0].toIntVector().size());
@ -210,7 +206,6 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithMultipleOutputs_
dict.insert("second", dummyTensor(DispatchKey::CUDA));
return std::tuple<Tensor, int64_t, c10::List<Tensor>, c10::optional<int64_t>, Dict<string, Tensor>>(
dummyTensor(DispatchKey::CUDA),
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
5,
c10::List<Tensor>({dummyTensor(DispatchKey::CPU), dummyTensor(DispatchKey::CUDA)}),
c10::optional<int64_t>(c10::in_place, 0),
@ -359,7 +354,6 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithIntListInput_wit
ASSERT_TRUE(op.has_value());
captured_input_list_size = 0;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto outputs = callOp(*op, dummyTensor(DispatchKey::CPU), c10::List<int64_t>({2, 4, 6}));
EXPECT_EQ(0, outputs.size());
EXPECT_EQ(3, captured_input_list_size);
@ -374,7 +368,6 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithIntListInput_wit
auto op = c10::Dispatcher::singleton().findSchema({"_test::int_list_input", ""});
ASSERT_TRUE(op.has_value());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto outputs = callOp(*op, dummyTensor(DispatchKey::CPU), c10::List<int64_t>({2, 4, 6}));
EXPECT_EQ(1, outputs.size());
EXPECT_EQ(3, outputs[0].toInt());

View File

@ -49,7 +49,6 @@ void expectCallsIncrement(c10::DispatchKeySet ks) {
// assert that schema and cpu kernel are present
auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""});
ASSERT_TRUE(op.has_value());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto result = callOp(*op, dummyTensor(ks), 5);
EXPECT_EQ(1, result.size());
EXPECT_EQ(6, result[0].toInt());
@ -65,7 +64,6 @@ void expectCallsIncrementUnboxed(DispatchKey dispatch_key) {
// assert that schema and cpu kernel are present
auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""});
ASSERT_TRUE(op.has_value());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int64_t result = callOpUnboxed<int64_t, at::Tensor, int64_t>(*op, dummyTensor(dispatch_key), 5);
EXPECT_EQ(6, result);
}
@ -76,7 +74,6 @@ void expectCallsDecrement(DispatchKey dispatch_key) {
// assert that schema and cpu kernel are present
auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""});
ASSERT_TRUE(op.has_value());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto result = callOp(*op, dummyTensor(dispatch_key), 5);
EXPECT_EQ(1, result.size());
EXPECT_EQ(4, result[0].toInt());

View File

@ -46,7 +46,6 @@ void expectCallsIncrement(DispatchKey dispatch_key) {
// assert that schema and cpu kernel are present
auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""});
ASSERT_TRUE(op.has_value());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto result = callOp(*op, dummyTensor(dispatch_key), 5);
EXPECT_EQ(1, result.size());
EXPECT_EQ(6, result[0].toInt());
@ -58,7 +57,6 @@ void expectCallsDecrement(DispatchKey dispatch_key) {
// assert that schema and cpu kernel are present
auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""});
ASSERT_TRUE(op.has_value());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto result = callOp(*op, dummyTensor(dispatch_key), 5);
EXPECT_EQ(1, result.size());
EXPECT_EQ(4, result[0].toInt());
@ -143,7 +141,6 @@ TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithIntOutput_whenR
auto op = c10::Dispatcher::singleton().findSchema({"_test::int_output", ""});
ASSERT_TRUE(op.has_value());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto result = callOp(*op, dummyTensor(DispatchKey::CPU), 3, 6);
EXPECT_EQ(1, result.size());
EXPECT_EQ(9, result[0].toInt());
@ -209,7 +206,6 @@ TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithIntListOutput_w
auto op = c10::Dispatcher::singleton().findSchema({"_test::list_output", ""});
ASSERT_TRUE(op.has_value());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto result = callOp(*op, dummyTensor(DispatchKey::CPU), 2, 4, 6);
EXPECT_EQ(1, result.size());
EXPECT_EQ(3, result[0].toIntVector().size());
@ -225,7 +221,6 @@ struct KernelWithMultipleOutputs final : OperatorKernel {
dict.insert("second", dummyTensor(DispatchKey::CUDA));
return std::tuple<Tensor, int64_t, c10::List<Tensor>, c10::optional<int64_t>, Dict<string, Tensor>>(
dummyTensor(DispatchKey::CUDA),
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
5,
c10::List<Tensor>({dummyTensor(DispatchKey::CPU), dummyTensor(DispatchKey::CUDA)}),
c10::optional<int64_t>(c10::in_place, 0),
@ -415,7 +410,6 @@ TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithIntListInput_wi
ASSERT_TRUE(op.has_value());
captured_input_list_size = 0;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto outputs = callOp(*op, dummyTensor(DispatchKey::CPU), c10::List<int64_t>({2, 4, 6}));
EXPECT_EQ(0, outputs.size());
EXPECT_EQ(3, captured_input_list_size);
@ -435,7 +429,6 @@ TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithIntListInput_wi
auto op = c10::Dispatcher::singleton().findSchema({"_test::int_list_input", ""});
ASSERT_TRUE(op.has_value());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto outputs = callOp(*op, dummyTensor(DispatchKey::CPU), c10::List<int64_t>({2, 4, 6}));
EXPECT_EQ(1, outputs.size());
EXPECT_EQ(3, outputs[0].toInt());
@ -579,7 +572,6 @@ TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithTupleInput_with
auto op = c10::Dispatcher::singleton().findSchema({"_test::tuple_input", ""});
ASSERT_TRUE(op.has_value());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
std::tuple<string, int64_t, float> tup{"foobar", 123, 420.1337};
auto outputs = callOp(*op, tup);
EXPECT_EQ(1, outputs.size());
@ -661,7 +653,6 @@ private:
TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithMultipleConstructorArgs_whenRegistered_thenCanBeCalled) {
auto registrar = RegisterOperators()
.op("_test::offset_op(Tensor tensor, int input) -> int", RegisterOperators::options().kernel<KernelWithMultipleConstructorArgs>(DispatchKey::CPU, 2, 3)
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
.kernel<KernelWithMultipleConstructorArgs>(DispatchKey::CUDA, 4, 5));
auto op = c10::Dispatcher::singleton().findSchema({"_test::offset_op", ""});

View File

@ -90,9 +90,7 @@ TEST(BackendFallbackTest, TestBackendFallbackWithMode) {
c10::impl::IncludeDispatchKeyGuard guard(DispatchKey::TESTING_ONLY_GenericMode);
override_call_count = 0;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
Tensor a = ones({5, 5}, kDouble);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
Tensor b = batch_norm(a, {}, {}, {}, {}, true, 0.1, 1e-05, false);
ASSERT_EQ(override_call_count, 2);
}
@ -103,9 +101,7 @@ TEST(BackendFallbackTest, TestBackendFallbackWithWrapper) {
m.fallback(torch::CppFunction::makeFromBoxedFunction<&generic_wrapper_fallback>());
override_call_count = 0;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
Tensor a = at::detail::make_tensor<GenericWrapperTensorImpl>(ones({5, 5}, kDouble));
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
Tensor b = batch_norm(a, {}, {}, {}, {}, true, 0.1, 1e-05, false);
ASSERT_EQ(override_call_count, 1);
}
@ -122,7 +118,6 @@ TEST(BackendFallbackTest, TestFallthroughBackendFallback) {
override_call_count = 0;
// Doesn't trigger, as we fallthrough
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
Tensor a = zeros({5, 5}, kDouble);
ASSERT_EQ(override_call_count, 0);
// Does trigger, because we explicitly set it

View File

@ -508,7 +508,6 @@ std::ostream& IValue::repr(
case IValue::Tag::Double: {
double d = v.toDouble();
int c = std::fpclassify(d);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if ((c == FP_NORMAL || c == FP_ZERO ) && std::abs(d) < 1e10) {
int64_t i = int64_t(d);
if (double(i) == d) {

View File

@ -859,9 +859,7 @@ TEST(OperatorRegistrationTest, testAvailableArgTypes) {
// primitive types
testArgTypes<double>::test(
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
1.5, [] (const double& v) {EXPECT_EQ(1.5, v);},
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
2.5, [] (const IValue& v) {EXPECT_EQ(2.5, v.toDouble());},
"(float a) -> float");
testArgTypes<int64_t>::test(
@ -888,9 +886,7 @@ TEST(OperatorRegistrationTest, testAvailableArgTypes) {
// optional types (with has_value() == true)
testArgTypes<c10::optional<double>>::test(
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
c10::optional<double>(1.5), [] (const c10::optional<double>& v) {EXPECT_EQ(1.5, v.value());},
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
c10::optional<double>(2.5), [] (const IValue& v) {EXPECT_EQ(2.5, v.toDouble());},
"(float? a) -> float?");
testArgTypes<c10::optional<int64_t>>::test(
@ -963,9 +959,7 @@ TEST(OperatorRegistrationTest, testAvailableArgTypes) {
// list types (with non-empty list)
testArgTypes<c10::List<double>>::test(
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
c10::List<double>({1.5, 2.5}), [] (const c10::List<double>& v) {expectListEquals({1.5, 2.5}, v);},
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
c10::List<double>({3.5, 4.5}), [] (const IValue& v) {expectListEquals({3.5, 4.5}, v.to<c10::List<double>>());},
"(float[] a) -> float[]");
testArgTypes<c10::List<int64_t>>::test(
@ -1014,9 +1008,7 @@ TEST(OperatorRegistrationTest, testAvailableArgTypes) {
// list types (with non-empty list)
testArgTypes<c10::ArrayRef<double>, c10::List<double>>::test(
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
c10::ArrayRef<double>({1.5, 2.5}), [] (c10::ArrayRef<double> v) {expectListEquals({1.5, 2.5}, v);},
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
c10::List<double>({3.5, 4.5}), [] (const IValue& v) {expectListEquals({3.5, 4.5}, v.to<c10::List<double>>());},
"(float[] a) -> float[]");
testArgTypes<c10::ArrayRef<int64_t>, c10::List<int64_t>>::test(
@ -1066,9 +1058,7 @@ TEST(OperatorRegistrationTest, testAvailableArgTypes) {
// std::array list types (with non-empty list)
testArgTypes<std::array<double, 2>>::test(
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
std::array<double, 2>({1.5, 2.5}), [] (std::array<double, 2> v) {expectListEquals({1.5, 2.5}, v);},
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
std::array<double, 2>({3.5, 4.5}), [] (const IValue& v) {expectListEquals({3.5, 4.5}, v.to<std::array<double, 2>>());},
"(float[2] a) -> float[2]");
testArgTypes<std::array<int64_t, 2>>::test(
@ -1119,9 +1109,7 @@ TEST(OperatorRegistrationTest, testAvailableArgTypes) {
// deprecated list types (with non-empty list)
testArgTypes<std::vector<double>>::test<TestLegacyAPI>(
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
std::vector<double>({1.5, 2.5}), [] (const std::vector<double>& v) {expectListEquals({1.5, 2.5}, v);},
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
std::vector<double>({3.5, 4.5}), [] (const IValue& v) {expectListEquals({3.5, 4.5}, v.to<c10::List<double>>());},
"(float[] a) -> float[]");
testArgTypes<std::vector<int64_t>>::test<TestLegacyAPI>(

View File

@ -207,7 +207,6 @@ Tensor selu(const Tensor & self) {
}
Tensor relu6(const Tensor & self) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
return at::hardtanh(self, /*min_val=*/0, /*max_val=*/6);
}
@ -216,7 +215,6 @@ Tensor & selu_(Tensor & self) {
}
Tensor & relu6_(Tensor & self) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
return at::hardtanh_(self, /*min_val=*/0, /*max_val=*/6);
}
@ -355,7 +353,6 @@ Tensor rrelu_with_noise_backward(
bool is_result) {
auto lower_tensor = scalar_to_tensor(lower);
auto upper_tensor = scalar_to_tensor(upper);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (training && (upper_tensor - lower_tensor).item().to<float>() > 1E-6) {
return grad_output.mul(noise);
} else {
@ -464,7 +461,6 @@ void inline prelu_cpu_kernel_share_weights(
auto input_data = input.data_ptr<scalar_t>();
auto weight_val = weight.data_ptr<scalar_t>()[0];
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
at::parallel_for(0, input_numel, 1000, [&](int64_t start, int64_t end) {
for (auto i = start; i < end; i++) {
scalar_t input_data_val = input_data[i];
@ -505,7 +501,6 @@ void inline prelu_cpu_kernel_multi_weights(
}
}
};
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (input.numel() > 1000) {
at::parallel_for(0, input_dim0_size, 0, loop);
} else {
@ -579,7 +574,6 @@ void inline prelu_cpu_backward_kernel_share_weights(
auto input_grad_data = input_grad.data_ptr<scalar_t>();
auto weight_grad_data = weight_grad.data_ptr<scalar_t>();
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
scalar_t sum = at::parallel_reduce(0, input_numel, 1000, scalar_t(0),
[&](int64_t start, int64_t end, scalar_t ident) -> scalar_t {
scalar_t partial_sum = ident;
@ -634,7 +628,6 @@ void inline prelu_cpu_backward_kernel_multi_weights(
}
}
};
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (input.numel() > 1000) {
at::parallel_for(0, input_dim0_size, 0, loop);
} else {
@ -785,9 +778,7 @@ Tensor infinitely_differentiable_gelu_backward(
const Tensor& grad,
const Tensor& self) {
constexpr double kAlpha = M_2_SQRTPI * M_SQRT1_2 * 0.5;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
Tensor cdf = (1.0 + (self * M_SQRT1_2).erf_()).mul_(0.5);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
Tensor pdf = (-0.5 * self * self).exp_();
return cdf.addcmul_(self, pdf, kAlpha).mul_(grad);
}

View File

@ -139,7 +139,6 @@ void adaptive_avg_pool3d_out_cpu_template(
istrideW);
});
} else {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
output.resize_({input.size(-5), sizeD, osizeT, osizeH, osizeW});
int64_t n = input.size(0);

View File

@ -2504,7 +2504,6 @@ std::tuple<Tensor&, Tensor&> linalg_eig_out_info(const Tensor& input, Tensor& va
// See: https://github.com/pytorch/pytorch/pull/52491#issuecomment-795685687
// Here we call CPU path for matrices smaller than 2048x2048
// that should be in general significantly faster than calling MAGMA
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (input.size(-1) <= 2048) {
linalg_eig_stub(at::kCPU, real_imag_values, maybe_complex_vectors, infos, input.to(kCPU), compute_eigenvectors);
} else {
@ -2780,7 +2779,6 @@ static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT,
auto lda = std::max<int64_t>(1, m);
auto ldvt = std::max<int64_t>(1, n);
auto mn = std::min(m, n);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
Tensor iwork = at::empty({8 * mn}, at::kInt);
auto iwork_data = iwork.data_ptr<int>();
Tensor rwork;

View File

@ -81,7 +81,6 @@ void apply_reflect_conj_tri_single(scalar_t* self, int64_t n, int64_t stride, bo
};
}
// For small matrices OpenMP overhead is too large
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (n < 256) {
loop(0, n);
} else {

View File

@ -1103,12 +1103,10 @@ Tensor& heaviside_(Tensor& self, const Tensor& values) {
}
Tensor& ldexp_out(const Tensor& self, const Tensor& other, Tensor& result) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
return at::mul_out(result, self, at::pow(2.0, other));
}
Tensor ldexp(const Tensor& self, const Tensor& other) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
return at::mul(self, at::pow(2.0, other));
}

View File

@ -254,13 +254,11 @@ auto ConvParams::use_mkldnn(const at::Tensor& input, const at::Tensor& weight) c
!transposed && // or transposed tensors
// For 1x1 filters, MKLDNN is faster than THNN when multi-threaded,
// but THNN is faster when single-threaded.
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
(is_strided() || is_dilated() || input.size(0) >= 16 ||
weight.size(-1) != 1 || weight.size(-2) != 1 || at::get_num_threads() > 1) &&
(groups > 1
|| (weight.size(-1) > 3 && weight.size(-2) > 3)
|| input.size(0) > 1
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|| input.size(0)*input.size(1)*input.size(2)*input.size(3) > 20480) // for some case, native is faster
);
@ -277,10 +275,8 @@ auto ConvParams::use_nnpack(const at::Tensor& input, const at::Tensor& weight) c
!transposed && // or transposed tensors
input.ndimension() == 4 && // must be in NCHW format
weight.ndimension() == 4 &&
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
(weight.size(2) < 17) && (weight.size(3) < 17) // NNPACK only supports kernels up to 16x16
#if !defined(C10_MOBILE)
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
&& input.size(0) >= 16 // ensure large enough batch size to ensure perf, tuneable
#endif
;
@ -316,7 +312,6 @@ auto ConvParams::is_depthwise(
const at::Tensor& input, const at::Tensor& weight) const -> bool {
return input.is_cuda() &&
!transposed &&
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
(input.ndimension() == 4 || input.ndimension() == 5) &&
input.size(1) == groups &&
groups > 1 && // no point if there is only a single group
@ -329,145 +324,113 @@ bool check_cudnn_depthwise_workload(const at::Tensor& input, int stride) {
int ch = input.size(1);
int bs = input.size(0);
if (stride==1) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (w >= 7) {
// All batch sizes and nb_channels
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (w >= 112) {
return true;
}
// large nb_channels
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (ch >= 1024) {
// NOLINTNEXTLINE(bugprone-branch-clone,cppcoreguidelines-avoid-magic-numbers)
if (w >= 56) {
return true;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if (bs >= 32) {
return true;
}
}
// batch_size specific
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (bs >= 128) {
// NOLINTNEXTLINE(bugprone-branch-clone,cppcoreguidelines-avoid-magic-numbers)
if (ch >= 512) {
return true;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if (ch >= 64) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (w >= 14) {
return true;
}
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if ((ch >= 32) && (w >=28)) {
return true;
}
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if (bs >= 64) {
// NOLINTNEXTLINE(bugprone-branch-clone,cppcoreguidelines-avoid-magic-numbers)
if ((ch >= 256) && (w >= 14)) {
return true;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if ((ch >= 32) && (w >= 28)) {
return true;
}
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if (bs >= 32) {
// NOLINTNEXTLINE(bugprone-branch-clone,cppcoreguidelines-avoid-magic-numbers)
if ((ch >= 256) && (w >= 14)) {
return true;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if ((ch >= 128) && (w >= 28)) {
return true;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if ((ch >= 32) && (w >= 56)) {
return true;
}
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if (bs >= 16) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if ((ch >= 1024) && (w >= 14)) {
return true;
}
// NOLINTNEXTLINE(bugprone-branch-clone,cppcoreguidelines-avoid-magic-numbers)
if ((ch >= 256) && (w >= 28)) {
return true;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if ((ch >= 32) && (w >= 56)) {
return true;
}
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if (bs >= 8) {
// NOLINTNEXTLINE(bugprone-branch-clone,cppcoreguidelines-avoid-magic-numbers)
if ((ch >= 512) && (w >= 28)) {
return true;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if ((ch >= 64) && (w >= 56)) {
return true;
}
}
}
} else if (stride==2) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (ch < 256) {
return false;
}
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (w >= 7) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (bs >= 128) {
// NOLINTNEXTLINE(bugprone-branch-clone,cppcoreguidelines-avoid-magic-numbers)
if (ch >= 1024) {
return true;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if ((ch >= 512) && (w >= 14)) {
return true;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if (w >= 28) {
return true;
}
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if (bs >= 64) {
// NOLINTNEXTLINE(bugprone-branch-clone,cppcoreguidelines-avoid-magic-numbers)
if ((ch >= 512) && (w >= 14)) {
return true;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if (w >= 28) {
return true;
}
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if (bs >= 32) {
// NOLINTNEXTLINE(bugprone-branch-clone,cppcoreguidelines-avoid-magic-numbers)
if ((ch >= 1024) && (w >= 14)) {
return true;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if (w >= 28) {
return true;
}
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if (bs >= 16) {
// NOLINTNEXTLINE(bugprone-branch-clone,cppcoreguidelines-avoid-magic-numbers)
if ((ch >= 512) && (w >= 28)) {
return true;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if (w >= 56) {
return true;
}
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if (bs >= 8) {
// NOLINTNEXTLINE(bugprone-branch-clone,cppcoreguidelines-avoid-magic-numbers)
if ((ch >= 1024) && (w >= 28)) {
return true;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if (w >= 56) {
return true;
}
} else if (bs >= 1) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if ((ch >= 512) && (w >=112)) {
return true;
}
@ -484,7 +447,6 @@ auto ConvParams::use_cudnn_depthwise(
}
if (detail::getCUDAHooks().supportsDepthwiseConvolutionWithCuDNN()) {
long cudnn_version = detail::getCUDAHooks().versionCuDNN();
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
bool kernel_cond = (cudnn_version >= 7600 &&
use_cudnn(input, weight) &&
input.scalar_type() == kHalf && // only for FP16
@ -492,12 +454,10 @@ auto ConvParams::use_cudnn_depthwise(
is_depthwise(input, weight) &&
input.ndimension() == 4 && // TODO: 5-D contiguous depthwise is not supported yet, need benchmarks
weight.size(2) == weight.size(3) && // only square kernels
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
input.size(2) >= 7 && // min width/height 7
!is_dilated() && // no dilation supported
stride[0] == stride[1] && // equal strides
((weight.size(3) == 3) || (weight.size(3) == 1)) &&
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
input.size(1) >= 32); // min 32 channels supported)
if (kernel_cond) {
return check_cudnn_depthwise_workload(input, stride[0]);
@ -895,7 +855,6 @@ at::Tensor _convolution(
at::MemoryFormat cudnn_memory_format = at::MemoryFormat::Contiguous;
if (cudnn_conv_use_channels_last(input, weight)) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
cudnn_memory_format = (k == 5) ? at::MemoryFormat::ChannelsLast3d : at::MemoryFormat::ChannelsLast;
}
@ -1008,7 +967,6 @@ at::Tensor _convolution(
params.padding,
params.groups);
} else if (
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
!params.transposed && (input.ndimension() == 5) &&
(input.device().is_cpu()) &&
!params.is_dilated()) {
@ -1092,7 +1050,6 @@ at::Tensor _convolution_nogroup(
return at::slow_conv_transpose2d(
input, weight, kernel_size, bias,
stride, padding, output_padding, dilation);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if (dim == 5) {
return at::slow_conv_transpose3d(
input, weight, kernel_size, bias,
@ -1118,12 +1075,10 @@ at::Tensor _convolution_nogroup(
stride, padding);
}
}
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if (dim == 5 && (input.is_cuda() || dilated)) {
return at::slow_conv_dilated3d(
input, weight, kernel_size, bias,
stride, padding, dilation);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if (dim == 5) { /* dim == 5, CPU, non-dilated */
/* CPU implementation has specialized MM kernels
for non-dilated case here */

View File

@ -67,7 +67,6 @@ static inline void slow_conv3d_shape_check(
const int64_t dim_width = 4;
// Allow for empty batch size but not other dimensions
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
bool valid_empty = ndim == 5 && input.size(dim_batch) == 0 &&
input.size(dim_planes) != 0 && input.size(dim_depth) != 0 &&
input.size(dim_height) != 0 && input.size(dim_width) != 0;
@ -156,7 +155,6 @@ static inline void slow_conv3d_shape_check(
static Tensor view_weight_2d(const Tensor& weight_) {
Tensor weight = weight_.contiguous();
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (weight.dim() == 5) {
const int64_t s1 = weight.size(0);
const int64_t s2 =

View File

@ -68,7 +68,6 @@ static Tensor cdist_impl(const Tensor& x1, const Tensor& x2, const double p, c10
// See Note [cdist relies on cdist_impl redispatching]
// Keep this condition in sync with the condition at the Note
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (!(p == 2 && (mode == 1 || (mode == 0 && (r1 > 25 || r2 > 25))))) {
TORCH_CHECK(device1 == kCPU || device1 == kCUDA, "cdist only supports CPU and CUDA devices, X1 got: ", device1);
TORCH_CHECK(device2 == kCPU || device2 == kCUDA, "cdist only supports CPU and CUDA devices, X2 got: ", device2);
@ -102,7 +101,6 @@ static Tensor cdist_impl(const Tensor& x1, const Tensor& x2, const double p, c10
result = at::empty(output_shape, x1.options());
} else if (c1 == 0) {
result = at::zeros(output_shape, x1.options());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if (p == 2 && (mode == 1 || (mode == 0 && (r1 > 25 || r2 > 25)))) {
// See Note [cdist relies on cdist_impl redispatching]
// Keep the condition above in sync with the condition at the Note
@ -134,7 +132,6 @@ Tensor cdist(const Tensor& x1, const Tensor& x2, const double p, c10::optional<i
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// This is for pytorch to figure the backward pass itself
// when p=2. Keep this condition in sync with the See Note reference
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (p == 2 && (mode == 1 || (mode == 0 && (r1 > 25 || r2 > 25)))) {
return cdist_impl(x1, x2, p, compute_mode);
} else {

View File

@ -62,7 +62,6 @@ namespace {
int64_t sample_poisson(double lambda, at::CPUGeneratorImpl* generator) {
TORCH_CHECK(lambda >= 0, "invalid Poisson rate, expected rate to be non-negative");
at::uniform_real_distribution<double> standard_uniform(0.0, 1.0);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (lambda >= 10) {
// transformed rejection method, (Hoermann, 1993)
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
@ -72,29 +71,20 @@ int64_t sample_poisson(double lambda, at::CPUGeneratorImpl* generator) {
double slam = std::sqrt(lambda);
double loglam = std::log(lambda);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
b = 0.931 + 2.53 * slam;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
a = -0.059 + 0.02483 * b;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
invalpha = 1.1239 + 1.1328 / (b - 3.4);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
vr = 0.9277 - 3.6224 / (b - 2);
// NOLINTNEXTLINE(modernize-use-bool-literals)
while (1) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
U = standard_uniform(generator) - 0.5;
V = standard_uniform(generator);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
us = 0.5 - std::fabs(U);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
k = (int64_t)std::floor((2 * a / us + b) * U + lambda + 0.43);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if ((us >= 0.07) && (V <= vr)) {
return k;
}
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if ((k < 0) || ((us < 0.013) && (V > us))) {
continue;
}

View File

@ -158,7 +158,6 @@ Tensor & embedding_renorm_cpu_(
auto row = self[sorted_indices[i]];
auto norm = row.norm(norm_type).item<double>();
if (norm > max_norm) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto scale = max_norm / (norm + 1e-7);
row *= scale;
}

View File

@ -150,7 +150,6 @@ index_select_add(const Tensor &select_indices,
/* block_size */ddim,
/* has_weight */false,
/* normalize_by_lengths */false,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
/* prefetch */16,
/* is_weight_positional */false,
/* use_offsets */true
@ -312,7 +311,6 @@ index_select_scale_add(const Tensor &select_indices,
/* block_size */ddim,
/* has_weight */true,
/* normalize_by_lengths */false,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
/* prefetch */16,
/* is_weight_positional */false,
/* use_offsets */true

View File

@ -172,7 +172,6 @@ void fractional_max_pool3d_out_cpu_template(
"fractional_max_pool3d_out(): non-empty 4D or 5D (batch mode) tensor ",
" expected for input, but got: ", ndims);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (ndims == 5) {
numBatch = input_.size(0);
planeDim++;
@ -312,7 +311,6 @@ void fractional_max_pool3d_backward_out_cpu_template(
int64_t widthDim = 3;
int64_t ndims = input.ndimension();
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (ndims == 5) {
numBatch = input.size(0);
planeDim = 1;

View File

@ -873,7 +873,6 @@ Tensor grid_sampler(const Tensor& input, const Tensor& grid,
static_cast<GridSamplerPadding>(padding_mode) == GridSamplerPadding::Zeros &&
align_corners &&
input.dim() == 4 &&
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
input.size(1) <= 1024) {
return cudnn_grid_sampler(input, grid);
}

View File

@ -20,14 +20,12 @@ Tensor do_trapz(const Tensor& y, const Tensor& dx, int64_t dim) {
Tensor left = y.slice(dim, 0, -1);
Tensor right = y.slice(dim, 1);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
return ((left + right) * dx).sum(dim) / 2.;
}
// When dx is constant, the above formula simplifies
// to dx * [(\sum_{i=1}^n y_i) - (y_1 + y_n)/2]
Tensor do_trapz(const Tensor& y, double dx, int64_t dim) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
return (y.sum(dim) - (y.select(dim, 0) + y.select(dim, -1)) * (0.5)) * dx;
}

View File

@ -1209,7 +1209,6 @@ static inline Tensor& bmm_out_or_baddbmm_(Tensor& self_or_result, const Tensor&
|| (strides[1] == 1 && strides[2] >= sizes[1]);
};
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (contraction_size * res_rows * res_cols < 400) {
if (is_bmm_out) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kHalf, kBFloat16, batch1.scalar_type(), "bmm", [&] {
@ -1574,7 +1573,6 @@ Tensor compute_T2(const Tensor& A) {
auto As = _allocate_buffer(A, 3);
// 3 for {I, A, A^2}
_fill_matrix_powers(As, A, 3);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
As.select(0, 2).div_(2.0);
return As.sum(0);
}
@ -1594,7 +1592,6 @@ Tensor compute_T4(const Tensor& A) {
// computes (I / 2 + A / 6 + A^2 / 24)
at::native::_compute_linear_combination(
As.narrow(0, 0, 3),
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
_blob_to_Tensor<scalar_t>({1 / 2.0, 1 / 6.0, 1 / 24.0}, A)
)
);
@ -1617,7 +1614,6 @@ Tensor compute_T8(const Tensor& A) {
constexpr scalar_t x7 = (89. - sqrt_177) / (5040. * x3);
constexpr scalar_t y2 = (857. - 58. * sqrt_177) / 630.;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto As = _allocate_buffer(A, 5);
// 3 for {I, A, A^2}
_fill_matrix_powers(As, A, 3);
@ -1662,43 +1658,27 @@ Tensor compute_T12(const Tensor& A) {
constexpr int num_prods = 4;
array2d<scalar_t, num_prods, num_prods> b = {{
{
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
9.0198e-16,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
0.46932117595418237389,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
-0.20099424927047284052,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
-0.04623946134063071740
},
{
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
5.31597895759871264183,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
1.19926790417132231573,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
0.01179296240992997031,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
0.01108844528519167989
},
{
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
0.18188869982170434744,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
0.05502798439925399070,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
0.09351590770535414968,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
0.00610700528898058230
},
{
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
-2.0861320e-13,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
-0.13181061013830184015,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
-0.02027855540589259079,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
-0.00675951846863086359
}
}};
@ -1740,57 +1720,37 @@ Tensor compute_T18(const Tensor& A) {
array2d<scalar_t, num_prods, num_prods> b = {{
{
0.,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
-1.00365581030144618291e-01,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
-8.02924648241156932449e-03,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
-8.92138498045729985177e-04,
0.
},
{
0.,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
3.97849749499645077844e-01,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
1.36783778460411720168e+00,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
4.98289622525382669416e-01,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
-6.37898194594723280150e-04
},
{
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
-1.09676396052962061844e+01,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
1.68015813878906206114e+00,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
5.71779846478865511061e-02,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
-6.98210122488052056106e-03,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
3.34975017086070470649e-05
},
{
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
-9.04316832390810593223e-02,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
-6.76404519071381882256e-02,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
6.75961301770459654925e-02,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
2.95552570429315521194e-02,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
-1.39180257516060693404e-05
},
{
0.,
0.,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
-9.23364619367118555360e-02,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
-1.69364939002081722752e-02,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
-1.40086798182036094347e-05
}
}};
@ -2161,7 +2121,6 @@ static Tensor _norm_min_max(Tensor& self, double ord, int64_t dim, bool keepdim)
static Tensor& _linalg_norm_matrix_out(Tensor& result, const Tensor &self, const optional<Scalar>& opt_ord,
IntArrayRef dim, bool keepdim, optional<ScalarType> opt_dtype) {
Tensor result_;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto ord = opt_ord.value_or(2.0).toDouble();
TORCH_CHECK(self.layout() == Layout::Strided,
"matrix norm only supports strided layout, got: ", self.layout());
@ -2434,7 +2393,6 @@ void _linalg_cond_check_ord(c10::variant<Scalar, std::string> ord_variant) {
if (ord_variant.index() == 0) {
Scalar* ord = c10::get_if<Scalar>(&ord_variant);
double abs_ord = std::abs(ord->toDouble());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
TORCH_CHECK(abs_ord == 2.0 || abs_ord == 1.0 || abs_ord == INFINITY,
"linalg_cond got an invalid norm type: ", ord->toDouble());
} else if (ord_variant.index() == 1) {
@ -2465,14 +2423,12 @@ Tensor linalg_cond(const Tensor& self, const optional<Scalar>& opt_ord) {
}
// If ord == None or ord == ±2
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (std::abs(ord.toDouble()) == 2.0) {
auto singular_values = std::get<1>(at::svd(self));
// singular values are sorted in descending order
auto s_max = at::narrow(singular_values, /*dim=*/-1, /*start=*/0, /*length=*/1);
auto s_min = at::narrow(singular_values, /*dim=*/-1, /*start=*/-1, /*length=*/1);
Tensor result;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (ord.toDouble() == -2.0) {
result = s_min / s_max;
} else {
@ -2642,11 +2598,8 @@ struct KronImpl final {
maxdim = std::max(self.dim(), other.dim());
int64_t pad_self = maxdim - self.dim();
int64_t pad_other = maxdim - other.dim();
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
a_reshape = c10::SmallVector<int64_t, 10>(2 * maxdim);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
b_reshape = c10::SmallVector<int64_t, 10>(2 * maxdim);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
result_reshape = c10::SmallVector<int64_t, 10>(maxdim);
for (int64_t i = 0; i < maxdim; i++) {
a_reshape[2 * i] = (i >= pad_self ? self.sizes()[i - pad_self] : 1);
@ -2662,7 +2615,6 @@ struct KronImpl final {
Tensor& kron_out(Tensor& result) const {
TORCH_INTERNAL_ASSERT(result.defined(), "Cannot call kron_out with an undefined result tensor as the out argument. Please allocate a Tensor before calling kron_out with it.");
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
c10::SmallVector<int64_t, 10> mul_shape(2 * maxdim);
for (int64_t i = 0; i < maxdim; i++) {
mul_shape[2 * i] = a_reshape[2 * i];
@ -2682,11 +2634,8 @@ struct KronImpl final {
int64_t maxdim;
Tensor self_view;
Tensor other_view;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
c10::SmallVector<int64_t, 10> result_reshape;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
c10::SmallVector<int64_t, 10> a_reshape;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
c10::SmallVector<int64_t, 10> b_reshape;
};
}

View File

@ -280,7 +280,6 @@ Tensor poisson_nll_loss(const Tensor& input, const Tensor& target, const bool lo
}
if (full) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto stirling_term = target * at::log(target) - target + 0.5 * at::log(2 * c10::pi<double> * target);
loss += stirling_term.masked_fill(target <= 1, 0);
}
@ -449,7 +448,6 @@ Tensor mse_loss_backward(const Tensor& grad_output, const Tensor& input, const T
Tensor& mse_loss_backward_out(const Tensor& grad_output,
const Tensor& input, const Tensor& target, int64_t reduction, Tensor& grad_input) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto norm = reduction == Reduction::Mean ? 2. / input.numel() : 2.;
auto iter = at::TensorIteratorConfig()
.add_output(grad_input)

View File

@ -158,7 +158,6 @@ static void multilabel_margin_loss_backward_out_frame(
int64_t dim) {
// NOLINTNEXTLINE(clang-diagnostic-unused-variable)
CheckedFrom c = "multilabel_margin_loss_backward_out_frame";
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto is_target_arg = TensorArg(is_target_contiguous, "is_target", 5);
TORCH_CHECK(
@ -228,7 +227,6 @@ static void multilabel_margin_loss_backward_out_cpu_template(
int64_t nframe, dim;
CheckedFrom c = "multilabel_margin_loss_backward_cpu_template";
auto target_arg = TensorArg(target, "target", 3);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto is_target_arg = TensorArg(is_target, "is_target", 5);
const int64_t ndims = input.dim();

View File

@ -142,7 +142,6 @@ Tensor max_unpooling3d_forward_out_cpu_frame(
int64_t dimh = 2;
int64_t dimt = 1;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (input.ndimension() == 5) {
nBatch = input.size(0);
dimw++;
@ -252,7 +251,6 @@ static void max_unpooling3d_shape_check(
int dimt = 1;
int dimn = 0;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (input.ndimension() == 5) {
dimw++;
dimh++;
@ -303,7 +301,6 @@ Tensor& max_unpooling3d_forward_out_cpu(const Tensor& self_,
max_unpooling3d_shape_check(
self_, Tensor(), indices_, output_size, stride, padding);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (self_.ndimension() == 5) {
output.resize_({self.size(0), self.size(1), oT, oH, oW});
} else {
@ -567,7 +564,6 @@ Tensor& max_unpooling3d_backward_out_cpu(const Tensor& grad_output_,
/* resize */
grad_input.resize_as_(self);
grad_input.zero_();
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (self.ndimension() == 5) {
nbatch = self.size(0);
dimt++;

View File

@ -104,7 +104,6 @@ static inline void slow_conv_transpose3d_shape_check(
int dimh = 2;
int dimw = 3;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (ndim == 5) {
dimf++;
dimd++;

View File

@ -560,7 +560,6 @@ Tensor slow_conv_dilated3d_cpu(
stride_size,
pad_size,
dilation_size);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto is_batch = input.dim() == 5;
auto options = input.options();
// calculate output tensor size
@ -610,7 +609,6 @@ std::tuple<Tensor, Tensor, Tensor> slow_conv_dilated3d_backward_cpu(
stride_size,
pad_size,
dilation_size);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto is_batch = input.dim() == 5;
auto options = grad_output.options();
// template function assumes batched tensors. unsqueeze(0) will

View File

@ -431,16 +431,11 @@ std::tuple<Tensor, Tensor, Tensor, Tensor, int64_t> _batch_norm_impl_index(
&& weight.defined() && bias.defined()
&& ((running_mean.defined() && running_var.defined())
|| (!running_mean.defined() && !running_var.defined() && training))
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
&& ((input.dim() == 2 && input.size(0) <= 131070 && training) // per-activation, training
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|| (input.dim() == 2 && input.size(0) <= 262136 && !training) // per-activation, eval
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|| (input.dim() >= 3 && input.size(0) <= 880801 && training) // spatial, training
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|| (input.dim() >= 3 && input.size(0) <= 65535 && !training)) //spatial, eval
&& detail::getCUDAHooks().compiledWithCuDNN()
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
&& cudnn_enabled && detail::getCUDAHooks().versionCuDNN() >= 5110L);
if (use_cudnn && eps >= detail::getCUDAHooks().batchnormMinEpsilonCuDNN()) {

View File

@ -46,7 +46,6 @@ Tensor pixel_shuffle(const Tensor& self, int64_t upscale_factor) {
std::vector<int64_t> permutation(self.sizes().begin(), self_sizes_batch_end);
// std::iota is used to maintain the batch dims within the permutation.
std::iota(permutation.begin(), permutation.end(), 0);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
permutation.insert(permutation.end(), {-5 /* oc */, -2 /* h */, -4 /* 1st upscale_factor */, -1 /* w */,
-3 /* 2nd upscale_factor */});
const auto input_permuted = input_reshaped.permute(permutation);
@ -98,7 +97,6 @@ Tensor pixel_unshuffle(const Tensor& self, int64_t downscale_factor) {
std::vector<int64_t> permutation(self.sizes().begin(), self_sizes_batch_end);
// std::iota is used to maintain the batch dims within the permutation.
std::iota(permutation.begin(), permutation.end(), 0);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
permutation.insert(permutation.end(), {-5 /* c */, -3 /* 1st downscale_factor */, -1 /*2nd downscale_factor */,
-4 /* oh */, -2 /* ow */});
const auto input_permuted = input_reshaped.permute(permutation);

View File

@ -238,7 +238,6 @@ struct QuantizedCellParams : public CellParamsBase {
at::Tensor qw_ih = std::move(tensors[0]), qw_hh = std::move(tensors[1]),
b_ih = std::move(tensors[2]), b_hh = std::move(tensors[3]),
col_offsets_ih = std::move(tensors[4]),
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
col_offsets_hh = std::move(tensors[5]);
double scale_ih = doubles[0], scale_hh = doubles[1];
int64_t zero_point_ih = longs[0], zero_point_hh = longs[1];
@ -555,7 +554,6 @@ static std::vector<CellParams> gather_params(TensorList params, bool has_biases,
if (has_biases) {
if (has_projections) {
TORCH_CHECK(params.size() % 5 == 0, "got an incorrect number of RNN parameters");
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for (size_t i = 0; i < params.size(); i += 5) {
result.emplace_back(params[i], params[i + 1], params[i + 2], params[i + 3], params[i + 4]);
}
@ -589,7 +587,6 @@ static c10::List<c10::intrusive_ptr<CellParamsBase>> gather_quantized_params(
static at::Tensor undefined;
std::vector<c10::intrusive_ptr<CellParamsBase>> result;
TORCH_CHECK(params.size() % 12 == 0, "got an incorrect number of quantized RNN parameters");
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for (size_t i = 0; i < params.size(); i += 12) {
result.emplace_back(c10::make_intrusive<QuantizedCellParams>(
static_cast<at::Tensor>(params[i]),
@ -597,19 +594,12 @@ static c10::List<c10::intrusive_ptr<CellParamsBase>> gather_quantized_params(
static_cast<at::Tensor>(params[i + 2]),
static_cast<at::Tensor>(params[i + 3]),
static_cast<at::Tensor>(params[i + 4]),
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
static_cast<at::Tensor>(params[i + 5]),
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
static_cast<at::Tensor>(params[i + 6]),
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
static_cast<at::Tensor>(params[i + 7]),
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
static_cast<at::Tensor>(params[i + 8]).item(),
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
static_cast<at::Tensor>(params[i + 9]).item(),
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
static_cast<at::Tensor>(params[i + 10]).item(),
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
static_cast<at::Tensor>(params[i + 11]).item()));
}
return c10::List<c10::intrusive_ptr<CellParamsBase>>(result);

View File

@ -876,7 +876,6 @@ Tensor& logsumexp_out(const Tensor& self, DimnameList dims, bool keepdim, Tensor
static Tensor& norm_out(Tensor &result, const Tensor &self, const optional<Scalar>& opt_p,
IntArrayRef dim, bool keepdim, optional<ScalarType> opt_dtype) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto p = opt_p.value_or(2.0).to<double>();
TORCH_CHECK(self.device().is_cpu() || self.is_cuda(),
"norm only supports CPU and CUDA device types, but got: ", self.device().type());

View File

@ -149,7 +149,6 @@ static inline void shapeCheck3d(
"Expected 4D or 5D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ",
input.sizes());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (input.dim() == 5)
{
dimw++;
@ -185,7 +184,6 @@ TORCH_META_FUNC(replication_pad3d) (
int64_t ptop = paddingSize[2];
int64_t pbottom = paddingSize[3];
int64_t pfront = paddingSize[4];
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int64_t pback = paddingSize[5];
int64_t dimw = 3;
int64_t dimh = 2;
@ -195,7 +193,6 @@ TORCH_META_FUNC(replication_pad3d) (
shapeCheck3d(input, pleft, pright, ptop, pbottom, pfront, pback);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (input.dim() == 5)
{
nbatch = input.size(0);
@ -740,7 +737,6 @@ Tensor& replication_pad3d_backward_out_cpu_template(
int ptop = paddingSize[2];
int pbottom = paddingSize[3];
int pfront = paddingSize[4];
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int pback = paddingSize[5];
int dimw = 3;
int dimh = 2;
@ -748,7 +744,6 @@ Tensor& replication_pad3d_backward_out_cpu_template(
int dimslices = 0;
int64_t nbatch = 1;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (input.dim() == 5)
{
nbatch = input.size(0);
@ -1034,7 +1029,6 @@ TORCH_IMPL_FUNC(replication_pad3d_out_cpu) (
int64_t ptop = paddingSize[2];
int64_t pbottom = paddingSize[3];
int64_t pfront = paddingSize[4];
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int64_t pback = paddingSize[5];
int64_t dimw = 3;
int64_t dimh = 2;
@ -1045,7 +1039,6 @@ TORCH_IMPL_FUNC(replication_pad3d_out_cpu) (
/* get contiguous input */
auto input = input_.contiguous();
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (input.dim() == 5) {
nbatch = input.size(0);
dimw++;

View File

@ -230,7 +230,6 @@ void quantile_impl(
interpolation == QUANTILE_INTERPOLATION_MODE::MIDPOINT) {
// calculate weights for linear and midpoint
Tensor weights = interpolation == QUANTILE_INTERPOLATION_MODE::MIDPOINT
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
? at::full_like(ranks, 0.5)
: ranks - ranks_below;

View File

@ -931,7 +931,6 @@ Tensor istft(const Tensor& self, const int64_t n_fft, const optional<int64_t> ho
y = y.slice(2, start, end, 1);
window_envelop = window_envelop.slice(2, start, end, 1);
const auto window_envelop_lowest = window_envelop.abs().min().item().toDouble();
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (window_envelop_lowest < 1e-11) {
std::ostringstream ss;
REPR(ss) << "window overlap add min: " << window_envelop_lowest;

View File

@ -1125,7 +1125,6 @@ Tensor bartlett_window(
window_length += 1;
}
auto window = native::arange(window_length, dtype, layout, device, pin_memory)
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
.mul_(2. / static_cast<double>(window_length - 1));
const int64_t first_half_size = ((window_length - 1) >> 1) + 1;
window.narrow(0, first_half_size, window_length - first_half_size).mul_(-1).add_(2);
@ -1167,7 +1166,6 @@ Tensor blackman_window(
auto window =
native::arange(window_length, dtype, layout, device, pin_memory)
.mul_(c10::pi<double> / static_cast<double>(window_length - 1));
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
window = window.mul(4).cos_().mul_(0.08) - window.mul(2).cos_().mul_(0.5) + 0.42;
return periodic ? window.narrow(0, 0, window_length - 1) : window;
}
@ -1193,7 +1191,6 @@ Tensor hamming_window(
return native::hamming_window(
window_length,
periodic,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
/*alpha=*/0.54,
dtype,
layout,
@ -1210,7 +1207,6 @@ Tensor hamming_window(
c10::optional<Device> device,
c10::optional<bool> pin_memory) {
return native::hamming_window(
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
window_length, periodic, alpha, /*beta=*/0.46, dtype, layout, device, pin_memory);
}
@ -1237,7 +1233,6 @@ Tensor hamming_window(
window_length += 1;
}
auto window = native::arange(window_length, dtype, layout, device, pin_memory);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
window.mul_(c10::pi<double> * 2. / static_cast<double>(window_length - 1)).cos_().mul_(-beta).add_(alpha);
return periodic ? window.narrow(0, 0, window_length - 1) : window;
}
@ -1264,7 +1259,6 @@ Tensor hann_window(
window_function_checks("hann_window", options, window_length);
return native::hamming_window(
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
window_length, periodic, /*alpha=*/0.5, /*beta=*/0.5, dtype, layout, device, pin_memory);
}
@ -1278,7 +1272,6 @@ Tensor kaiser_window(int64_t window_length,
return native::kaiser_window(
window_length,
/*periodic=*/true,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
/*beta=*/12.0,
dtype,
layout,
@ -1291,7 +1284,6 @@ Tensor kaiser_window(int64_t window_length, bool periodic,
c10::optional<Layout> layout,
c10::optional<Device> device,
c10::optional<bool> pin_memory) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
return native::kaiser_window(window_length, periodic, /*beta=*/12.0, dtype, layout, device, pin_memory);
}

View File

@ -110,7 +110,6 @@ static void parallel_dim_reduction(TensorIteratorBase& iter, loop2d_t loop) {
if (should_round_columns) {
// round columns to multiples of 128 bytes if adjacent columns are
// contiguous in memory.
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int64_t cols_per_128_bytes = 128 / element_size;
std::tie(begin, end) = round_columns(iter, dim, cols_per_128_bytes, begin, end);
}

View File

@ -27,7 +27,6 @@ void inline flip_cpu_kernel(
auto sizes_v = in_tensor.sizes().vec();
auto strides_v = in_tensor.strides().vec();
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
at::parallel_for(0, numel, 1000, [&](int64_t start, int64_t end) {
for (auto i = start; i < end; i++) {
int64_t cur_indices = i;

View File

@ -37,7 +37,6 @@ TORCH_META_FUNC(upsample_nearest3d_backward) (
grad_output.dim() == 5,
"Expected grad_output to be a tensor of dimension 5 but got: dimension ", grad_output.dim());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for (int i = 0; i < 5; ++i) {
TORCH_CHECK(
grad_output.size(i) == full_output_size[i],

View File

@ -42,7 +42,6 @@ TORCH_META_FUNC(upsample_trilinear3d_backward) (
grad_output.dim() == 5,
"Expected grad_output to be a tensor of dimension 5 but got: dimension ", grad_output.dim());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for (int i = 0; i < 5; ++i) {
TORCH_CHECK(
grad_output.size(i) == full_output_size[i],

View File

@ -62,7 +62,6 @@ at::Tensor PackedLinearWeightQnnp::apply_dynamic_impl<false>(
/*min=*/x_min,
/*max=*/x_max,
/*qmin=*/0,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
/*qmax=*/255);
// Quantize input

View File

@ -186,7 +186,6 @@ PackedLinearWeightQnnp::PackedLinearWeightQnnp(
int8_t* w_data =
reinterpret_cast<int8_t*>(weight_contig.data_ptr<c10::qint8>());
for (int i = 0; i < wt_numel; ++i) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
qnnp_w_data[i] = static_cast<c10::quint8>(w_data[i] + 128);
}
bcsr_matrix_ = qnnpack::generateBlockCSRMatrix(

View File

@ -279,7 +279,6 @@ void GeluKernelImpl(TensorIterator& it) {
it,
[](scalar_t x) {
constexpr scalar_t kAlpha = M_SQRT1_2;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
return x * scalar_t(0.5) * (scalar_t(1) + std::erf(x * kAlpha));
},
[&](Vec x_vec) {
@ -355,7 +354,6 @@ void hardsigmoid_backward_kernel(TensorIterator& iter) {
const scalar_t one_sixth(1.0f / 6.0f);
using Vec = Vec256<scalar_t>;
Vec kZeroVec(0.0f);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
Vec kOneSixthVec(1.0f / 6.0f);
cpu_kernel_vec(
iter,

View File

@ -183,7 +183,6 @@ void div_floor_kernel(TensorIteratorBase& iter) {
scalar_t floordiv;
if (div != 0) {
floordiv = std::floor(div);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (div - floordiv > scalar_t(0.5)) {
floordiv += scalar_t(1.0);
}
@ -639,9 +638,7 @@ void smooth_l1_kernel(TensorIterator& iter, double beta) {
[&beta_val](scalar_t a, scalar_t b) -> scalar_t {
auto z = std::abs(a - b);
return z < beta_val
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
? static_cast<scalar_t>(0.5) * z * z / beta_val
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
: z - static_cast<scalar_t>(0.5) * beta_val;
},
[&beta_val_vec, &point_five_vec](Vec a, Vec b) {
@ -662,9 +659,7 @@ void huber_kernel(TensorIterator& iter, double delta) {
iter,
[&delta_val](scalar_t a, scalar_t b) -> scalar_t {
auto z = std::abs(a - b);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
return z < delta_val ? static_cast<scalar_t>(0.5) * z * z :
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
delta_val * (z - static_cast<scalar_t>(0.5) * delta_val);
},
[&delta_val_vec, &point_five_vec](Vec a, Vec b) {
@ -847,7 +842,6 @@ void logaddexp2_kernel(TensorIteratorBase& iter) {
[=](Vec256<scalar_t> a, Vec256<scalar_t> b) {
Vec256<scalar_t> inf(std::numeric_limits<scalar_t>::infinity());
Vec256<scalar_t> one(1.0);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
Vec256<scalar_t> two(2.0);
Vec256<scalar_t> m = maximum(a, b);
return Vec256<scalar_t>::blendv(

View File

@ -155,10 +155,8 @@ struct Dist {
// vector from the input, j is the second, and k is the result index. This
// parallelizes over the range of k and infers what i and j are from the
// value of k.
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
parallel_for(0, combs, internal::GRAIN_SIZE / (16 * m), [p, self_start, self_end, n, m, res_start](int64_t k, int64_t end) {
const Vec pvec(p);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
double n2 = n - .5;
// The -1 accounts for floating point truncation issues
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
@ -191,7 +189,6 @@ struct Dist {
run_parallel_pdist<zdist_calc<Vec>>(result, self, p);
} else if (p == 1.0) {
run_parallel_pdist<odist_calc<Vec>>(result, self, p);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if (p == 2.0) {
run_parallel_pdist<tdist_calc<Vec>>(result, self, p);
} else if (std::isinf(p)) {
@ -215,7 +212,6 @@ struct Dist {
int64_t size1 = r1 * m;
int64_t size2 = r2 * m;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
parallel_for(0, combs * d, internal::GRAIN_SIZE / (16 * m), [=](int64_t start, int64_t end) {
scalar_t * res = res_start + start;
const scalar_t * const res_end = res_start + end;
@ -257,7 +253,6 @@ struct Dist {
run_parallel_cdist<zdist_calc<scalar_t>>(result, x1, x2, p);
} else if (p == 1.0) {
run_parallel_cdist<odist_calc<scalar_t>>(result, x1, x2, p);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if (p == 2.0) {
run_parallel_cdist<tdist_calc<scalar_t>>(result, x1, x2, p);
} else if (std::isinf(p)) {
@ -306,7 +301,6 @@ struct Dist {
// The only way to parallelize and avoid locking requires parallelizing
// over the columns of the input, i.e. we compute the gradient for the
// first section of each vector independentaly of the second section, etc.
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
at::parallel_for(0, m / Vec::size(), internal::GRAIN_SIZE / (8 * n * n), [p, n, m, gs, grad_start, dist_start, self_start, res_start](int64_t l, int64_t end) {
const Vec pvec(p);
@ -329,10 +323,8 @@ struct Dist {
if (p == 0.0) {
} else if (p == 1.0) {
run_backward_parallel_pdist<odist_calc<Vec>>(result, grad, self, p, dist);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if (p < 2.0) {
run_backward_parallel_pdist<lttdist_calc>(result, grad, self, p, dist);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if (p == 2.0) {
run_backward_parallel_pdist<tdist_calc<Vec>>(result, grad, self, p, dist);
} else if (std::isinf(p)) {
@ -347,10 +339,8 @@ struct Dist {
if (p == 0.0) {
} else if (p == 1.0) {
run_backward_parallel_cdist<odist_calc<Vec>>(result, grad, x1, x2, p, dist);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if (p < 2.0) {
run_backward_parallel_cdist<lttdist_calc>(result, grad, x1, x2, p, dist);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if (p == 2.0) {
run_backward_parallel_cdist<tdist_calc<Vec>>(result, grad, x1, x2, p, dist);
} else if (std::isinf(p)) {
@ -380,7 +370,6 @@ struct Dist {
const scalar_t * const t2_start = t2.data_ptr<scalar_t>();
scalar_t * const res_start = result.data_ptr<scalar_t>();
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
at::parallel_for(0, m / Vec::size(), internal::GRAIN_SIZE / (16 * r1), [=](int64_t l, int64_t end) {
const Vec pvec(p);

View File

@ -276,13 +276,11 @@ struct ComputeLocationBase<scalar_t, /*align_corners=*/false> {
ComputeLocationBase(int64_t size)
: max_val(static_cast<scalar_t>(size - 1))
, scaling_factor(static_cast<scalar_t>(size) / 2)
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
, low(static_cast<scalar_t>(-0.5))
, twice_span(static_cast<scalar_t>(size) * 2)
, empty(size <= 0) {}
inline Vec unnormalize(const Vec &in) const {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
return (in + Vec(1)) * Vec(scaling_factor) - Vec(0.5);
}
@ -544,25 +542,16 @@ struct ApplyGridSample<scalar_t, 2, GridSamplerInterpolation::Bilinear,
auto interp_params = compute_interp_params(x, y);
auto nw = std::get<4>(interp_params);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto ne = std::get<5>(interp_params);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto sw = std::get<6>(interp_params);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto se = std::get<7>(interp_params);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto nw_mask = std::get<8>(interp_params);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto ne_mask = std::get<9>(interp_params);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto sw_mask = std::get<10>(interp_params);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto se_mask = std::get<11>(interp_params);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto i_y_n = std::get<12>(interp_params);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto i_x_w = std::get<13>(interp_params);
auto i_nw_offset = i_y_n * iVec(inp_sH) + i_x_w * iVec(inp_sW);
@ -838,14 +827,12 @@ struct ApplyGridSample<scalar_t, 2, GridSamplerInterpolation::Bicubic,
inline void get_cubic_coefficients(Vec (&coeffs)[4], const Vec& tx) const {
Vec x;
x = tx + Vec(1); // 1 < x = |-1 - tx| < 2
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
coeffs[0] = ((A * x - Vec(5) * A) * x + Vec(8) * A) * x - Vec(4) * A;
x = tx; // x = |0 - tx| <= 1
coeffs[1] = ((A + Vec(2)) * x - (A + Vec(3))) * x * x + Vec(1);
x = Vec(1) - tx; // x = |1 - tx| <= 1
coeffs[2] = ((A + Vec(2)) * x - (A + Vec(3))) * x * x + Vec(1);
x = Vec(2) - tx; // 1 < x = |2 - tx| < 2
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
coeffs[3] = ((A * x - Vec(5) * A) * x + Vec(8) * A) * x - Vec(4) * A;
}
@ -854,14 +841,12 @@ struct ApplyGridSample<scalar_t, 2, GridSamplerInterpolation::Bicubic,
inline void get_cubic_coefficients_grad(Vec (&coeffs)[4], const Vec& tx) const {
Vec x;
x = Vec(-1) - tx; // 1 < x = |-1 - tx| < 2
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
coeffs[0] = (Vec(-3) * A * x - Vec(10) * A ) * x - Vec(8) * A;
x = Vec(0) - tx; // x = |0 - tx| <= 1
coeffs[1] = (Vec(-3) * (A + Vec(2)) * x - Vec(2) * (A + Vec(3))) * x;
x = Vec(1) - tx; // x = |1 - tx| <= 1
coeffs[2] = (Vec(3) * (A + Vec(2)) * x - Vec(2) * (A + Vec(3))) * x;
x = Vec(2) - tx; // 1 < x = |2 - tx| < 2
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
coeffs[3] = (Vec(3) * A * x - Vec(10) * A) * x + Vec(8) * A;
}
@ -1220,7 +1205,6 @@ grid_sampler_2d_backward_cpu_kernel_impl(const Tensor& grad_output_,
auto N = input.size(0);
auto spatial_size = grid.size(1) * grid.size(2);
auto grain_size = spatial_size == 0 ? (N + 1)
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
: at::divup(at::internal::GRAIN_SIZE, spatial_size * 10 /* 2d * 5 tensors*/);
#define HANDLE_CASE(interp, padding, align_corners) \

View File

@ -22,7 +22,6 @@ static void lerp_kernel_scalar(
at::native::cpu_kernel(
iter,
[weight_val](scalar_t self_val, scalar_t end_val) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
return (zabs<scalar_t, value_t>(weight_val) < 0.5)
? self_val + weight_val * (end_val - self_val)
: end_val - (end_val - self_val) * (scalar_t(1) - weight_val);
@ -48,7 +47,6 @@ static void lerp_kernel_tensor(
at::native::cpu_kernel(
iter,
[](scalar_t self_val, scalar_t end_val, scalar_t weight_val) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
return (zabs<scalar_t, value_t>(weight_val) < 0.5)
? self_val + weight_val * (end_val - self_val)
: end_val - (end_val - self_val) * (scalar_t(1) - weight_val);

View File

@ -69,7 +69,6 @@ void multinomial_with_replacement_apply(
/* normalize cumulative probability distribution so that last val is 1
i.e. doesn't assume original self row sums to one */
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if ((sum > 0) || ((sum < 1.00001) && (sum > 0.99999))) {
for (int64_t j = 0; j < n_categories; j++) {
cum_dist_ptr[j * cum_dist_stride_0] /= sum;

View File

@ -48,7 +48,6 @@ void pow_tensor_tensor_kernel(TensorIteratorBase& iter) {
template <typename scalar_t, typename cast_scalar_t, typename exp_scalar_t>
void pow_tensor_scalar_optimized_kernel(TensorIteratorBase& iter, const exp_scalar_t exp) {
using Vec = Vec256<scalar_t>;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (exp == 0.5) {
cpu_kernel_vec(iter,
[](scalar_t base) -> scalar_t {
@ -56,7 +55,6 @@ void pow_tensor_scalar_optimized_kernel(TensorIteratorBase& iter, const exp_scal
},
[](Vec base) -> Vec { return base.sqrt(); }
);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if (exp == 2.0) {
cpu_kernel_vec(iter,
[](scalar_t base) -> scalar_t {
@ -64,7 +62,6 @@ void pow_tensor_scalar_optimized_kernel(TensorIteratorBase& iter, const exp_scal
},
[](Vec base) -> Vec { return base * base; }
);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if (exp == 3.0) {
cpu_kernel_vec(iter,
[](scalar_t base) -> scalar_t {
@ -72,7 +69,6 @@ void pow_tensor_scalar_optimized_kernel(TensorIteratorBase& iter, const exp_scal
},
[](Vec base) -> Vec { return base * base * base; }
);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if (exp == -0.5) {
cpu_kernel_vec(iter,
[](scalar_t base) __ubsan_ignore_float_divide_by_zero__ -> scalar_t {
@ -87,7 +83,6 @@ void pow_tensor_scalar_optimized_kernel(TensorIteratorBase& iter, const exp_scal
},
[](Vec base) -> Vec { return base.reciprocal(); }
);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if (exp == -2.0) {
cpu_kernel_vec(iter,
[](scalar_t base) -> scalar_t {

View File

@ -30,7 +30,6 @@ inline void _vec_log_softmax_lastdim(
int64_t dim_size) {
using Vec = vec256::Vec256<scalar_t>;
static constexpr int64_t CHUNK_SIZE = (128 / sizeof(scalar_t)) * Vec::size();
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int64_t grain_size = internal::GRAIN_SIZE / (16 * dim_size * CHUNK_SIZE);
if (grain_size < CHUNK_SIZE)
grain_size = CHUNK_SIZE;
@ -102,7 +101,6 @@ inline void _vec_softmax_lastdim(
int64_t outer_size,
int64_t dim_size) {
using Vec = vec256::Vec256<scalar_t>;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int64_t grain_size = internal::GRAIN_SIZE / (16 * dim_size);
if (grain_size < 1)
grain_size = 1;
@ -144,7 +142,6 @@ inline void _vec_host_softmax_backward_lastdim(
int64_t outer_size,
int64_t dim_size) {
using Vec = vec256::Vec256<scalar_t>;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int64_t grain_size = internal::GRAIN_SIZE / (16 * dim_size);
if (grain_size < 1)
grain_size = 1;

View File

@ -430,7 +430,6 @@ static void kaiser_window_kernel(TensorIteratorBase& iter, int64_t window_length
AT_DISPATCH_FLOATING_TYPES_AND(kBFloat16, iter.dtype(), "kaiser_window_cpu", [&](){
const scalar_t alpha = static_cast<scalar_t>((window_length - 1) / 2.0);
cpu_kernel(iter, [=](scalar_t a){
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
return calc_i0(static_cast<scalar_t>(beta) * std::sqrt(1 - std::pow((a - alpha) / alpha, static_cast<scalar_t>(2.0)))) / calc_i0(static_cast<scalar_t>(beta));
});
});

View File

@ -269,9 +269,7 @@ void cpu_upsample_nearest_channels_last(
int64_t num_batches = input_sizes[0];
int64_t channels = input_sizes[1];
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int64_t input_depth = (ndim == 5) ? input_sizes[2] : 1;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int64_t output_depth = (ndim == 5) ? output_sizes[2] : 1;
int64_t input_height = (ndim >= 4) ? input_sizes[ndim - 2] : 1;
int64_t output_height = (ndim >= 4) ? output_sizes[ndim - 2] : 1;
@ -367,9 +365,7 @@ void cpu_upsample_linear_channels_last(
int64_t num_batches = input_sizes[0];
int64_t channels = input_sizes[1];
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int64_t input_depth = (ndim == 5) ? input_sizes[2] : 1;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int64_t output_depth = (ndim == 5) ? output_sizes[2] : 1;
int64_t input_height = (ndim >= 4) ? input_sizes[ndim - 2] : 1;
int64_t output_height = (ndim >= 4) ? output_sizes[ndim - 2] : 1;
@ -508,7 +504,6 @@ void cpu_upsample_linear_channels_last(
} else {
// upsample nearest 3d
TORCH_INTERNAL_ASSERT(ndim == 5);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
at::parallel_for(0, num_batches, at::internal::GRAIN_SIZE / output_slice_size / 8, loop3d);
}
@ -903,9 +898,7 @@ void cpu_upsample_nearest_backward(
// treat nbatch and channels as one dimension
int64_t channels = input_sizes[0] * input_sizes[1];
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int64_t input_depth = (ndim == 5) ? input_sizes[2] : 1;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int64_t output_depth = (ndim == 5) ? output_sizes[2] : 1;
int64_t input_height = (ndim >= 4) ? input_sizes[ndim - 2] : 1;
int64_t output_height = (ndim >= 4) ? output_sizes[ndim - 2] : 1;

View File

@ -35,9 +35,7 @@ void cpu_upsample_linear_backward(
// treat nbatch and channels as one dimension
int64_t channels = input_sizes[0] * input_sizes[1];
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int64_t input_depth = (ndim == 5) ? input_sizes[2] : 1;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int64_t output_depth = (ndim == 5) ? output_sizes[2] : 1;
int64_t input_height = (ndim >= 4) ? input_sizes[ndim - 2] : 1;
int64_t output_height = (ndim >= 4) ? output_sizes[ndim - 2] : 1;
@ -149,7 +147,6 @@ void cpu_upsample_linear_backward(
} else {
// upsample trilinear 3d
TORCH_INTERNAL_ASSERT(ndim == 5);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
at::parallel_for(0, channels, at::internal::GRAIN_SIZE / output_slice_size / 8, loop3d);
}

View File

@ -88,7 +88,6 @@ Tensor mkldnn_reorder_conv2d_weight(
// [o, i, h, w]. Ideally we should reorder the weight back in serialization.
// For backward compatibility, we squash the first two dims (g * o/g) back to
// its original form.
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (w.ndims() == 5) {
auto wdims = w.get_dims();
w.reshape({wdims[0] * wdims[1], wdims[2], wdims[3], wdims[4]});

View File

@ -31,7 +31,6 @@ Tensor& mkldnn_zero_(Tensor& self) {
auto n = x.get_nelems();
auto* x_ = static_cast<float*>(x.get_data_handle());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
parallel_for(0, n, 2048, [x_](int64_t begin, int64_t end) {
vec256::map(
[](Vec /* unused */) { return 0.0; },

View File

@ -225,7 +225,6 @@ std::tuple<double, int64_t> _choose_qparams_per_tensor(
/*min=*/x_min,
/*max=*/x_max,
/*qmin=*/0,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
/*qmax=*/255,
/*preserve_sparsity=*/false,
/*force_scale_power_of_two=*/false,

View File

@ -25,7 +25,6 @@ namespace fbgemm_utils {
namespace {
bool IsChannelsLast3d(const Tensor& tensor) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (tensor.dim() != 5) {
return false;
}

View File

@ -15,7 +15,6 @@ Tensor int_repr_quantized_cpu(const Tensor& self) {
// NOLINTNEXTLINE(clang-diagnostic-unused-variable)
AT_DISPATCH_QINT_AND_SUB_BYTE_TYPES(self.scalar_type(), "int_repr", [&]() {
if (bit_width == 4) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int64_t out_size = std::ceil(self.numel() * 0.5);
dst = at::empty(
{out_size},

View File

@ -437,7 +437,6 @@ void qrelu6_kernel(const Tensor& qx, Tensor& qy) {
using Vec = Vec256<scalar_t>;
auto iter = TensorIterator::unary_op(qy, qx);
scalar_t six = at::native::quantize_val<scalar_t>(
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
qx.q_scale(), qx.q_zero_point(), 6.0);
auto zero_point_vec = Vec(scalar_t(zero_point));
auto six_vec = Vec(six);
@ -564,11 +563,9 @@ void qhardsigmoid_kernel(const Tensor& qx, Tensor& qy) {
AT_DISPATCH_QINT_TYPES(qx.scalar_type(), "qhardsigmoid", [&]() {
// - Output scale is set to 1.0 / 2^(BIT_NUM)
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
float output_scale = 0.00390625; // 1.0 / 2^8
// NOLINTNEXTLINE(clang-analyzer-core.NullDereference)
if (SCALAR_TYPE == at::kQInt32) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
output_scale = 2.3283064365386963e-10; // 1.0 / 2^32
}
float inv_output_scale = 1.0 / output_scale;
@ -592,9 +589,7 @@ void qhardsigmoid_kernel(const Tensor& qx, Tensor& qy) {
using qVec = Vec256<scalar_t>;
using fVec = Vec256<float>;
fVec kZeroVec(0.0f);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
fVec kThreeVec(3.0f);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
fVec kSixVec(6.0f);
// Naive implemenentation: uses dequantize/execute/quantize routine
@ -800,9 +795,7 @@ void qhardswish_kernel(const Tensor& qx, Tensor& qy) {
fVec i_zero_point_vec(i_zero_point);
fVec i_scale_neg_zp_premul_vec = i_scale_vec * i_zero_point_vec.neg();
fVec zero_vec(0.0f);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
fVec three_vec(3.0f);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
fVec six_vec(6.0f);
AT_DISPATCH_QINT_TYPES(qx.scalar_type(), "qhardswish", [&]() {
@ -845,12 +838,10 @@ void qtanh_kernel(const Tensor& qx, Tensor& qy) {
// - Output scale is set to 2.0 / 2^(BIT_NUM)
// - For signed types output zero point is set to 0
// - For unsigned types output zero point is set to (qmax + qmin) / 2.0
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
float output_scale = 0.0078125; // 2.0 / 512
int64_t output_zero_point = 0;
// NOLINTNEXTLINE(clang-analyzer-core.NullDereference)
if (SCALAR_TYPE == at::kQInt32) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
output_scale = 4.656612873077393e-10; // 2.0 / 2^32
} else if (SCALAR_TYPE == at::kQUInt8) {
output_zero_point = 128;
@ -2249,9 +2240,7 @@ void fake_quantize_learnable_channel_grad_kernel_cpu(
float* dzero_point_output = (float*)(data[2] + i * strides[2]);
float* x_input = (float*)(data[3] + i * strides[3]);
float* dy_input = (float*)(data[4] + i * strides[4]);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
float* scale_input = (float*)(data[5] + i * strides[5]);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
float* zero_point_input = (float*)(data[6] + i * strides[6]);
float inv_scale = 1.0f / (*scale_input);
@ -2918,7 +2907,6 @@ void dequantize_per_channel_affine_kernel(
Tensor scales,
Tensor zero_points,
int64_t axis,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int bit_width=8) {
// For contiguous tensors, e.g. NCHW, arbitrary axis can be used.
@ -2949,7 +2937,6 @@ void dequantize_per_channel_affine_kernel(
// We need to convert the qint8 value to float to ensure the
// subtraction subexpression returns a float
auto qvalue = qd[i / elem_per_byte].val_;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (bit_width < 8) {
qvalue >>= (i % elem_per_byte) * bit_width;
qvalue &= (1 << bit_width) - 1;
@ -2968,7 +2955,6 @@ void dequantize_per_channel_affine_kernel(
// subtraction subexpression returns a float
// NOLINTNEXTLINE(clang-analyzer-core.DivideZero)
auto qvalue = qd[i / elem_per_byte].val_;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (bit_width < 8) {
qvalue >>= (i % elem_per_byte) * bit_width;
qvalue &= (1 << bit_width) - 1;

View File

@ -357,7 +357,6 @@ Tensor q_batch_norm_impl(
} else if (dim == 4) {
qy = q_batch_norm2d_impl<ReluFused>(
qx, mb_weight, mb_bias, mean, var, eps, output_scale, output_zero_point);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if (dim == 5) {
qy = q_batch_norm3d_impl<ReluFused>(
qx, mb_weight, mb_bias, mean, var, eps, output_scale, output_zero_point);

View File

@ -138,7 +138,6 @@ at::SmallVector<int64_t, 4> MakeConvOutputShape<2>(
}
template <>
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
at::SmallVector<int64_t, 5> MakeConvOutputShape<3>(
int N,
int M,
@ -183,7 +182,6 @@ at::SmallVector<int64_t, 4> MakeConvOutputShape<2>(
}
template <>
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
at::SmallVector<int64_t, 5> MakeConvOutputShape<3>(
int N, // mini-batch
int M, // output channels
@ -642,7 +640,6 @@ at::Tensor PackedConvWeightsQnnp<kSpatialDim>::apply_impl(
auto* qnnp_w_data = qnnp_weight.template data_ptr<c10::quint8>();
auto wt_numel = weight_contig.numel();
for (int i = 0; i < wt_numel; ++i) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
qnnp_w_data[i] = static_cast<c10::quint8>(w_data[i] + 128);
}
at::Tensor qbias;

View File

@ -90,28 +90,21 @@ at::Tensor& embedding_lookup_fallback_impl(
}
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
float scale, bias;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (BIT_RATE == 8) {
const uint8_t* scale_bias =
weight_data + (idx + 1) * weight_size - 2 * sizeof(float);
uint32_t scale_val_int32 = 0;
scale_val_int32 = scale_val_int32 |
(scale_bias[0]) |
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
(scale_bias[1] << 8) |
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
(scale_bias[2] << 16) |
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
(scale_bias[3] << 24);
float scale_val = (reinterpret_cast<float*>(&scale_val_int32))[0];
uint32_t bias_val_int32 = 0;
bias_val_int32 = bias_val_int32 |
(scale_bias[4]) |
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
(scale_bias[5] << 8) |
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
(scale_bias[6] << 16) |
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
(scale_bias[7] << 24);
float bias_val = (reinterpret_cast<float*>(&bias_val_int32))[0];
scale = weight_val * scale_val;
@ -122,13 +115,11 @@ at::Tensor& embedding_lookup_fallback_impl(
uint16_t scale_val_int16 = 0;
scale_val_int16 = scale_val_int16 |
(scale_bias[0]) |
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
(scale_bias[1] << 8);
at::Half scale_val = (reinterpret_cast<at::Half*>(&scale_val_int16))[0];
uint16_t bias_val_int16 = 0;
bias_val_int16 = bias_val_int16 |
(scale_bias[2]) |
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
(scale_bias[3] << 8);
at::Half bias_val = (reinterpret_cast<at::Half*>(&bias_val_int16))[0];
scale = weight_val * scale_val;
@ -805,7 +796,6 @@ class QEmbeddingBag final {
const c10::optional<Tensor>& per_sample_weights_,
const c10::optional<Tensor>& compressed_indices_mapping,
bool include_last_offset) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (bit_rate == 8) {
return packed_weight->embeddingbag_byte(
indices,
@ -841,7 +831,6 @@ class QEmbedding final {
const auto offsets_size = indices.numel();
at::Tensor offsets = at::arange(0, offsets_size, indices.scalar_type());
at::Tensor output;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (bit_rate == 8) {
return packed_weight->embeddingbag_byte(
indices,

View File

@ -34,9 +34,7 @@ c10::intrusive_ptr<EmbeddingPackedParamsBase> PackedEmbeddingBagWeight::prepack(
int bit_width, scale_bias_bytes;
uint8_t* weight_data = static_cast<uint8_t*>(weight_contig.data_ptr());
if (qweight.scalar_type() == c10::kQUInt8) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
bit_width = 8;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
scale_bias_bytes = 8; // extra 8 bytes to store FP scale and bias per row.
} else {
bit_width = 4;
@ -77,7 +75,6 @@ c10::intrusive_ptr<EmbeddingPackedParamsBase> PackedEmbeddingBagWeight::prepack(
weight_contig.suggest_memory_format());
auto* output_data = output.data_ptr<uint8_t>();
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (bit_width == 8) {
at::parallel_for(
0, embedding_rows, 1, [&](int32_t start_idx, int32_t end_idx) {
@ -276,7 +273,6 @@ Tensor _qembeddingbag_nbit_prepack_helper(
"bit_width must be either 2 or 4 to use 'qembeddingbag_nbit_prepack'."
"For 8bit, consider using 'embedding_bag_byte_prepack'.");
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int NUM_ELEM_PER_BYTE = 8 / bit_width;
TORCH_CHECK(
weight_contig.size(weight.dim() - 1) % NUM_ELEM_PER_BYTE == 0,

View File

@ -10,18 +10,15 @@ at::Tensor PackedEmbeddingBagWeight::unpack() {
auto packed_weight = packed_w;
at::Tensor weight_origin;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (bit_rate_ == 8 || bit_rate_ == 4) {
const auto input_rows = packed_weight.size(0);
const auto input_columns = packed_weight.size(1);
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
int scale_bias_bytes;
const auto num_elem_per_byte = 8 / bit_rate_;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (bit_rate_ == 8) {
// The last 2 values are used to store the FP32 scale and zero_point
// values per row.
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
scale_bias_bytes = 8;
} else {
scale_bias_bytes = 4;
@ -45,7 +42,6 @@ at::Tensor PackedEmbeddingBagWeight::unpack() {
uint8_t* output_data;
// Allocate output weight tensor based on the bit_width
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (bit_rate_ == 8) {
weight_origin = at::_empty_per_channel_affine_quantized(
output_shape,
@ -161,7 +157,6 @@ Tensor _qembeddingbag_nbit_unpack_helper(
const auto input_rows = packed_weight.size(0);
const auto input_columns = packed_weight.size(1);
const auto* input_data = packed_weight.data_ptr<uint8_t>();
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int NUM_ELEM_PER_BYTE = 8 / BIT_RATE;
// The last 4 bytes per row are two fp16 scale and zero_point.

View File

@ -267,7 +267,6 @@ at::Tensor PackedLinearWeightsQnnp::apply_impl(
auto* qnnp_w_data = qnnp_weight.data_ptr<c10::quint8>();
auto wt_numel = weight_contig.numel();
for (int i = 0; i < wt_numel; ++i) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
qnnp_w_data[i] = static_cast<c10::quint8>(w_data[i] + 128);
}
// Original bias was float, so we requantize it here.

View File

@ -264,7 +264,6 @@ at::Tensor PackedLinearWeightsQnnp::apply_dynamic_impl(at::Tensor input) {
/*min=*/x_min,
/*max=*/x_max,
/*qmin=*/0,
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
/*qmax=*/255);
float* weight_scales_data = w_scales.data_ptr<float>();
if (!input_scale.has_value() || input_scale.value() != q_params.scale) {
@ -289,7 +288,6 @@ at::Tensor PackedLinearWeightsQnnp::apply_dynamic_impl(at::Tensor input) {
int8_t* w_data = (int8_t*)weight_contig.data_ptr<c10::qint8>();
auto wt_numel = weight_contig.numel();
for (int i = 0; i < wt_numel; ++i) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
qnnp_w_data[i] = static_cast<c10::quint8>(w_data[i] + 128);
}

View File

@ -148,7 +148,6 @@ Tensor quantized_relu6_(Tensor& qx) {
scalar_t six = at::native::quantize_val<scalar_t>(
qx.q_scale(),
qx.q_zero_point(),
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
/*value=*/6.0);
auto six_vec = Vec(six);
cpu_kernel_vec(

View File

@ -101,12 +101,10 @@ Tensor sigmoid_quantized_cpu(const Tensor& qx) {
// - For unsigned types output zero point is set to (qmax + qmin) / 2.0
// See https://stackoverflow.com/a/34448562/3606192 for potential
// optimizations
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
double output_scale = 0.00390625; // 1.0 / 2^8
int64_t output_zero_point = 0;
// NOLINTNEXTLINE(clang-analyzer-core.NullDereference)
if (SCALAR_TYPE == at::kQInt32) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
output_scale = 2.3283064365386963e-10; // 1.0 / 2^32
} else if (SCALAR_TYPE == at::kQInt8) {
output_zero_point = -128;

View File

@ -109,7 +109,6 @@ int64_t _get_zero_point_from_tensor(
int64_t quant_max,
bool is_forward) {
float zero_point_fp = zero_point[0].item<float>();
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
zero_point_fp = is_forward ? std::nearbyint(zero_point_fp) : zero_point_fp + 0.5f;
float zero_point_clamped = std::min(std::max(zero_point_fp, static_cast<float>(quant_min)),
static_cast<float>(quant_max));

View File

@ -637,7 +637,6 @@ void inline sparse_mask_out_cpu_kernel(
auto mask_indices_accessor = mask_indices.accessor<int64_t, 2>();
scalar_t* t_ptr = t.data_ptr<scalar_t>();
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
at::parallel_for(0, r_nnz, 1000, [&](int64_t start, int64_t end) {
for (auto i = start; i < end; i++) {
int64_t idx = 0;

View File

@ -69,7 +69,6 @@ TORCH_LIBRARY(xnnpack, m) {
std::move(std::get<2>(state)),
std::move(std::get<3>(state)),
std::move(std::get<4>(state)),
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
std::move(std::get<5>(state)),
// NOLINTNEXTLINE(performance-move-const-arg,cppcoreguidelines-avoid-magic-numbers)
std::move(std::get<6>(state)),

View File

@ -191,7 +191,6 @@ int load_nnapi_model(
CAFFE_ENFORCE(len == 12);
uint32_t buffer_number = *(uint32_t*)stored_pointer;
uint32_t buffer_offset = *(uint32_t*)(stored_pointer + 4);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
uint32_t operand_length = *(uint32_t*)(stored_pointer + 8);
CAFFE_ENFORCE(buffer_number < num_buffers);
CAFFE_ENFORCE(buffer_offset + operand_length >= buffer_offset); // No integer overflow

View File

@ -84,7 +84,6 @@ int64_t get_sub_byte_tensor_size(int64_t size_bytes, at::ScalarType t) {
int64_t new_size_bytes;
switch(t) {
case at::ScalarType::QUInt4x2:
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
new_size_bytes = std::ceil(size_bytes * 0.5);
break;
default:

View File

@ -113,7 +113,6 @@ TEST(DictTest, givenEmptyDict_whenIterating_thenBeginIsEnd) {
TEST(DictTest, givenMutableDict_whenIterating_thenFindsElements) {
Dict<int64_t, string> dict;
dict.insert(3, "3");
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
dict.insert(5, "5");
bool found_first = false;
bool found_second = false;
@ -122,7 +121,6 @@ TEST(DictTest, givenMutableDict_whenIterating_thenFindsElements) {
EXPECT_EQ("3", iter->value());
EXPECT_FALSE(found_first);
found_first = true;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if (iter->key() == 5) {
EXPECT_EQ("5", iter->value());
EXPECT_FALSE(found_second);
@ -139,7 +137,6 @@ TEST(DictTest, givenMutableDict_whenIterating_thenFindsElements) {
TEST(DictTest, givenMutableDict_whenIteratingWithForeach_thenFindsElements) {
Dict<int64_t, string> dict;
dict.insert(3, "3");
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
dict.insert(5, "5");
bool found_first = false;
bool found_second = false;
@ -148,7 +145,6 @@ TEST(DictTest, givenMutableDict_whenIteratingWithForeach_thenFindsElements) {
EXPECT_EQ("3", elem.value());
EXPECT_FALSE(found_first);
found_first = true;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if (elem.key() == 5) {
EXPECT_EQ("5", elem.value());
EXPECT_FALSE(found_second);
@ -165,7 +161,6 @@ TEST(DictTest, givenMutableDict_whenIteratingWithForeach_thenFindsElements) {
TEST(DictTest, givenConstDict_whenIterating_thenFindsElements) {
Dict<int64_t, string> dict_;
dict_.insert(3, "3");
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
dict_.insert(5, "5");
const Dict<int64_t, string>& dict = dict_;
bool found_first = false;
@ -175,7 +170,6 @@ TEST(DictTest, givenConstDict_whenIterating_thenFindsElements) {
EXPECT_EQ("3", iter->value());
EXPECT_FALSE(found_first);
found_first = true;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if (iter->key() == 5) {
EXPECT_EQ("5", iter->value());
EXPECT_FALSE(found_second);
@ -192,7 +186,6 @@ TEST(DictTest, givenConstDict_whenIterating_thenFindsElements) {
TEST(DictTest, givenConstDict_whenIteratingWithForeach_thenFindsElements) {
Dict<int64_t, string> dict_;
dict_.insert(3, "3");
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
dict_.insert(5, "5");
const Dict<int64_t, string>& dict = dict_;
bool found_first = false;
@ -202,7 +195,6 @@ TEST(DictTest, givenConstDict_whenIteratingWithForeach_thenFindsElements) {
EXPECT_EQ("3", elem.value());
EXPECT_FALSE(found_first);
found_first = true;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
} else if (elem.key() == 5) {
EXPECT_EQ("5", elem.value());
EXPECT_FALSE(found_second);
@ -281,7 +273,6 @@ TEST(DictTest, givenMutableDict_whenCallingFindOnNonExistingKey_thenReturnsEnd)
Dict<int64_t, string> dict;
dict.insert(3, "3");
dict.insert(4, "4");
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
Dict<int64_t, string>::iterator found = dict.find(5);
EXPECT_EQ(dict.end(), found);
}
@ -303,7 +294,6 @@ TEST(DictTest, givenConstDict_whenCallingFindOnNonExistingKey_thenReturnsEnd) {
dict_.insert(3, "3");
dict_.insert(4, "4");
const Dict<int64_t, string>& dict = dict_;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
Dict<int64_t, string>::iterator found = dict.find(5);
EXPECT_EQ(dict.end(), found);
}
@ -327,7 +317,6 @@ TEST(DictTest, whenCallingContainsWithNonExistingKey_thenReturnsFalse) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(DictTest, whenCallingReserve_thenDoesntCrash) {
Dict<int64_t, string> dict;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
dict.reserve(100);
}
@ -503,13 +492,11 @@ TEST(ListTest_IValueBasedList, givenIterator_whenWritingToValueFromIterator_then
Dict<int64_t, string> dict;
dict.insert(3, "3");
dict.insert(4, "4");
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
dict.insert(5, "5");
(*dict.find(3)).setValue(dict.find(4)->value());
EXPECT_EQ("4", dict.find(3)->value());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
dict.find(3)->setValue(dict.find(5)->value());
EXPECT_EQ("5", dict.find(3)->value());
}
@ -556,7 +543,6 @@ TEST(DictTest, dictTensorAsKey) {
EXPECT_EQ("three", found_key1->value());
Dict<at::Tensor, string>::iterator found_nokey1 = dict.find(at::tensor(3));
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
Dict<at::Tensor, string>::iterator found_nokey2 = dict.find(at::tensor(5));
EXPECT_EQ(dict.end(), found_nokey1);
EXPECT_EQ(dict.end(), found_nokey2);

View File

@ -20,15 +20,12 @@ static Dimname dimnameFromString(const std::string& str) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(NamedTensorTest, isNamed) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto tensor = at::zeros({3, 2, 5, 7});
ASSERT_FALSE(tensor.has_names());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
tensor = at::zeros({3, 2, 5, 7});
ASSERT_FALSE(tensor.has_names());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
tensor = at::zeros({3, 2, 5, 7});
auto N = dimnameFromString("N");
auto C = dimnameFromString("C");
@ -55,7 +52,6 @@ static bool dimnames_equal(at::DimnameList names, at::DimnameList other) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(NamedTensorTest, attachMetadata) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto tensor = at::zeros({3, 2, 5, 7});
auto N = dimnameFromString("N");
auto C = dimnameFromString("C");
@ -75,7 +71,6 @@ TEST(NamedTensorTest, attachMetadata) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(NamedTensorTest, internalSetNamesInplace) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto tensor = at::zeros({3, 2, 5, 7});
auto N = dimnameFromString("N");
auto C = dimnameFromString("C");

View File

@ -109,7 +109,6 @@ void test(DeprecatedTypeProperties& type, IntArrayRef shape, int64_t a = 0, int6
// apply utils test 2-dim small contiguous
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(ApplyUtilsTest, Contiguous2D) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
manual_seed(123);
test(CPU(kDouble), {2, 1}, -1, -1);
}
@ -117,7 +116,6 @@ TEST(ApplyUtilsTest, Contiguous2D) {
// apply utils test 2-dim small
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(ApplyUtilsTest, Small2D) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
manual_seed(123);
test(CPU(kDouble), {2, 1});
}
@ -125,16 +123,13 @@ TEST(ApplyUtilsTest, Small2D) {
// apply utils test 2-dim
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(ApplyUtilsTest, _2D) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
manual_seed(123);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
test(CPU(kDouble), {20, 10});
}
// apply utils test 3-dim
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(ApplyUtilsTest, _3D) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
manual_seed(123);
test(CPU(kDouble), {3, 4, 2});
}
@ -142,17 +137,13 @@ TEST(ApplyUtilsTest, _3D) {
// apply utils test 3-dim medium
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(ApplyUtilsTest, Medium3D) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
manual_seed(123);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
test(CPU(kDouble), {3, 40, 2});
}
// apply utils test 10-dim
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(ApplyUtilsTest, _10D) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
manual_seed(123);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
test(CPU(kDouble), {3, 4, 2, 5, 2, 1, 3, 4, 2, 3});
}

View File

@ -9,15 +9,11 @@ using namespace at;
class atest : public ::testing::Test {
protected:
void SetUp() override {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
x_tensor = tensor({10, -1, 0, 1, -10});
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
y_tensor = tensor({-10, 1, 0, -1, 10});
x_logical = tensor({1, 1, 0, 1, 0});
y_logical = tensor({0, 1, 0, 1, 1});
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
x_float = tensor({2.0, 2.4, 5.6, 7.0, 36.0});
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
y_float = tensor({1.0, 1.1, 8.7, 10.0, 24.0});
}
@ -53,7 +49,6 @@ void unit_binary_ops_test(
const Tensor& exp,
ScalarType dtype,
Args... args) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto out_tensor = empty({5}, dtype);
func(out_tensor, x_tensor.to(dtype), y_tensor.to(dtype), args...);
ASSERT_EQ(out_tensor.dtype(), dtype);
@ -101,7 +96,6 @@ void run_binary_ops_test(
}
void trace() {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
Tensor foo = rand({12, 12});
// ASSERT foo is 2-dimensional and holds floats.
@ -117,9 +111,7 @@ void trace() {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST_F(atest, operators) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int a = 0b10101011;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int b = 0b01111011;
auto a_tensor = tensor({a});
@ -202,14 +194,12 @@ TEST_F(atest, ne_operators) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST_F(atest, add_operators) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto exp_tensor = tensor({-10, 1, 0, -1, 10});
run_binary_ops_test(add_out, x_tensor, y_tensor, exp_tensor, INTBOOL, 2);
}
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST_F(atest, max_operators) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto exp_tensor = tensor({10, 1, 0, 1, 10});
run_binary_ops_test<
at::Tensor& (*)(at::Tensor&, const at::Tensor&, const at::Tensor&)>(
@ -218,7 +208,6 @@ TEST_F(atest, max_operators) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST_F(atest, min_operators) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto exp_tensor = tensor({-10, -1, 0, -1, -10});
run_binary_ops_test<
at::Tensor& (*)(at::Tensor&, const at::Tensor&, const at::Tensor&)>(
@ -227,7 +216,6 @@ TEST_F(atest, min_operators) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST_F(atest, sigmoid_backward_operator) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto exp_tensor = tensor({-1100, 0, 0, -2, 900});
// only test with type Float
run_binary_ops_test<
@ -237,7 +225,6 @@ TEST_F(atest, sigmoid_backward_operator) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST_F(atest, fmod_tensor_operators) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto exp_tensor = tensor({0.0, 0.2, 5.6, 7.0, 12.0});
run_binary_ops_test<
at::Tensor& (*)(at::Tensor&, const at::Tensor&, const at::Tensor&)>(
@ -247,10 +234,8 @@ TEST_F(atest, fmod_tensor_operators) {
// TEST_CASE( "atest", "[]" ) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST_F(atest, atest) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
manual_seed(123);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto foo = rand({12, 6});
ASSERT_EQ(foo.size(0), 12);

View File

@ -26,7 +26,6 @@ void TestResize(DeprecatedTypeProperties& type) {
auto a = at::empty({0}, type.options());
a.resize_({3, 4});
ASSERT_EQ_RESOLVED(a.numel(), 12);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
a.resize_({5, 7});
ASSERT_EQ_RESOLVED(a.numel(), 35);
}
@ -56,7 +55,6 @@ void TestSort(DeprecatedTypeProperties& type) {
void TestRandperm(DeprecatedTypeProperties& type) {
if (type.backend() != Backend::CUDA) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
Tensor b = randperm(15, type);
Tensor rv, ri;
std::tie(rv, ri) = sort(b, 0);
@ -75,7 +73,6 @@ void TestAdd(DeprecatedTypeProperties& type) {
Tensor b = rand({3, 4}, type);
Tensor c = add(a, add(a, b));
// TODO:0-dim Tensor d(3.f);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
Scalar d = 3.f;
if (type.backend() == Backend::CPU && type.scalarType() == kHalf) {
ASSERT_TRUE(add(c, d).allclose(a + a + b + d, 1e-2));
@ -86,11 +83,8 @@ void TestAdd(DeprecatedTypeProperties& type) {
void TestZeros(DeprecatedTypeProperties& type) {
auto begin = std::chrono::high_resolution_clock::now();
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
Tensor a = zeros({1024, 1024}, type);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for (int i = 1; i < 1000; ++i) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
a = zeros({128, 128}, type);
}
auto end = std::chrono::high_resolution_clock::now();
@ -108,7 +102,6 @@ void TestLoadsOfAdds(DeprecatedTypeProperties& type) {
auto begin = std::chrono::high_resolution_clock::now();
Tensor d = ones({3, 4}, type);
Tensor r = zeros({3, 4}, type);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for (auto i = 0; i < 100000; i++) {
add_out(r, r, d);
}
@ -126,7 +119,6 @@ void TestLoadOfAddsWithCopy(DeprecatedTypeProperties& type) {
auto begin = std::chrono::high_resolution_clock::now();
Tensor d = ones({3, 4}, type);
Tensor r = zeros({3, 4}, type);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for (auto i = 0; i < 100000; i++) {
r = add(r, d);
}
@ -148,7 +140,6 @@ void TestIsContiguous(DeprecatedTypeProperties& type) {
}
void TestPermute(DeprecatedTypeProperties& type) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
Tensor a = rand({3, 4, 5}, type);
Tensor b = a.permute({1, 2, 0});
ASSERT_TRUE(b.sizes().equals({4, 5, 3}));
@ -212,7 +203,6 @@ void TestAddingAValueWithScalar(DeprecatedTypeProperties& type) {
}
void TestSelect(DeprecatedTypeProperties& type) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
Tensor a = rand({3, 7}, type);
auto a_13 = select(a, 1, 3);
auto a_13_02 = select(select(a, 1, 3), 0, 2);
@ -239,7 +229,6 @@ void TestZeroDim(DeprecatedTypeProperties& type) {
void TestToCFloat() {
Tensor a = zeros({3, 4});
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
Tensor b = ones({3, 7});
Tensor c = cat({a, b}, 1);
ASSERT_EQ_RESOLVED(c.size(1), 11);
@ -248,7 +237,6 @@ void TestToCFloat() {
ASSERT_EQ_RESOLVED(*e.data_ptr<float>(), e.sum().item<float>());
}
void TestToString() {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
Tensor b = ones({3, 7}) * .0000001f;
std::stringstream s;
s << b << "\n";
@ -257,7 +245,6 @@ void TestToString() {
}
void TestIndexingByScalar() {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
Tensor tensor = arange(0, 10, kInt);
Tensor one = ones({}, kInt);
for (int64_t i = 0; i < tensor.numel(); ++i) {
@ -283,7 +270,6 @@ void TestIndexingByScalar() {
}
void TestIndexingByZerodimTensor() {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
Tensor tensor = arange(0, 10, kInt);
Tensor one = ones({}, kInt);
for (int i = 0; i < tensor.numel(); ++i) {
@ -301,17 +287,13 @@ void TestIndexingByZerodimTensor() {
ASSERT_ANY_THROW(tensor[ones({2, 3, 4}, kInt)].equal(one));
}
void TestIndexingMixedDevice(DeprecatedTypeProperties& type) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
Tensor tensor = randn({20, 20}, type);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
Tensor index = arange(10, kLong).cpu();
Tensor result = tensor.index({index});
ASSERT_TRUE(result[0].equal(tensor[0]));
}
void TestDispatch() {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
Tensor tensor = randn({20, 20});
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
Tensor other = randn({20, 20});
auto result = tensor.m(relu).m(mse_loss, other, at::Reduction::Mean);
ASSERT_TRUE(result.allclose(mse_loss(relu(tensor), other)));
@ -322,7 +304,6 @@ void TestNegativeDim(DeprecatedTypeProperties& type) {
ASSERT_ANY_THROW(empty({5, -5, 5}, type.options()));
// NOLINTNEXTLINE(hicpp-avoid-goto,cppcoreguidelines-avoid-goto)
ASSERT_ANY_THROW(empty({5, -5, -5}, type.options()));
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
Tensor tensor = empty({5, 5}, type.options());
// NOLINTNEXTLINE(hicpp-avoid-goto,cppcoreguidelines-avoid-goto)
ASSERT_ANY_THROW(tensor.reshape({-5, -5}));
@ -334,7 +315,6 @@ void TestView(DeprecatedTypeProperties& type) {
// for details
Tensor tensor = randn({3, 4}, type);;
Tensor viewed = tensor.view({3, 4});
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
tensor.resize_({6, 2});
ASSERT_TRUE(tensor.sizes().equals({6, 2}));
ASSERT_TRUE(viewed.sizes().equals({3, 4}));
@ -382,7 +362,6 @@ void test(DeprecatedTypeProperties& type) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(BasicTest, BasicTestCPU) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
manual_seed(123);
test(CPU(kFloat));
@ -390,7 +369,6 @@ TEST(BasicTest, BasicTestCPU) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(BasicTest, BasicTestHalfCPU) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
manual_seed(234);
test(CPU(kHalf));
@ -398,7 +376,6 @@ TEST(BasicTest, BasicTestHalfCPU) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(BasicTest, BasicTestCUDA) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
manual_seed(123);
if (at::hasCUDA()) {

View File

@ -15,9 +15,7 @@ void TestEmptyTensor(DeprecatedTypeProperties& T) {
// out-place function with 2 args
void TestOut2Basic(DeprecatedTypeProperties& T) {
auto a = randn({3, 1}, T);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto b = randn({5}, T);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
std::vector<int64_t> expanded_sizes = {3, 5};
ASSERT_TRUE(
(a + b).equal(a.expand(expanded_sizes) + b.expand(expanded_sizes)));
@ -26,7 +24,6 @@ void TestOut2Basic(DeprecatedTypeProperties& T) {
// with scalar
void TestOut2WithScalar(DeprecatedTypeProperties& T) {
auto aScalar = ones({}, T);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto b = randn({3, 5}, T);
ASSERT_TRUE(
(aScalar + b).equal(aScalar.expand(b.sizes()) + b.expand(b.sizes())));
@ -34,9 +31,7 @@ void TestOut2WithScalar(DeprecatedTypeProperties& T) {
// old fallback behavior yields error
void TestOut2OldFallback(DeprecatedTypeProperties& T) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto a = randn({3, 5}, T);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto b = randn({5, 3}, T);
// NOLINTNEXTLINE(hicpp-avoid-goto,cppcoreguidelines-avoid-goto)
ASSERT_ANY_THROW(a + b);
@ -44,9 +39,7 @@ void TestOut2OldFallback(DeprecatedTypeProperties& T) {
// with mismatched sizes
void TestOut2MismatchedSizes(DeprecatedTypeProperties& T) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto a = randn({3, 5}, T);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto b = randn({7, 5}, T);
// NOLINTNEXTLINE(hicpp-avoid-goto,cppcoreguidelines-avoid-goto)
ASSERT_ANY_THROW(a + b);
@ -56,9 +49,7 @@ void TestOut2MismatchedSizes(DeprecatedTypeProperties& T) {
void TestOut3Basic(DeprecatedTypeProperties& T) {
auto a = randn({3, 1, 1}, T);
auto b = randn({1, 2, 1}, T);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto c = randn({1, 1, 5}, T);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
std::vector<int64_t> expanded_sizes = {3, 2, 5};
ASSERT_TRUE((a + b + c).equal(
a.expand(expanded_sizes) + b.expand(expanded_sizes) +
@ -69,9 +60,7 @@ void TestOut3Basic(DeprecatedTypeProperties& T) {
void TestOut3WithScalar(DeprecatedTypeProperties& T) {
auto aTensorScalar = ones({}, T);
auto b = randn({3, 2, 1}, T);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto c = randn({1, 2, 5}, T);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
std::vector<int64_t> expanded_sizes = {3, 2, 5};
ASSERT_TRUE(aTensorScalar.addcmul(b, c).equal(
aTensorScalar.expand(expanded_sizes)
@ -80,11 +69,8 @@ void TestOut3WithScalar(DeprecatedTypeProperties& T) {
// old fallback behavior yields error
void TestOut3OldFallback(DeprecatedTypeProperties& T) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto a = randn({3, 2, 5}, T);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto b = randn({2, 3, 5}, T);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto c = randn({5, 3, 2}, T);
// NOLINTNEXTLINE(hicpp-avoid-goto,cppcoreguidelines-avoid-goto)
ASSERT_ANY_THROW(a.addcmul(b, c));
@ -92,11 +78,8 @@ void TestOut3OldFallback(DeprecatedTypeProperties& T) {
// with mismatched sizes
void TestOut3MismatchedSizes(DeprecatedTypeProperties& T) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto a = randn({3, 2, 5}, T);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto b = randn({2, 3, 5}, T);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto c = randn({5, 5, 5}, T);
// NOLINTNEXTLINE(hicpp-avoid-goto,cppcoreguidelines-avoid-goto)
ASSERT_ANY_THROW(a.addcmul(b, c));
@ -104,7 +87,6 @@ void TestOut3MismatchedSizes(DeprecatedTypeProperties& T) {
// in-place function with 2 args
void TestIn2Basic(DeprecatedTypeProperties& T) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto a = randn({3, 5}, T);
auto b = randn({3, 1}, T);
ASSERT_TRUE((a + b).equal(a + b.expand({3, 5})));
@ -112,7 +94,6 @@ void TestIn2Basic(DeprecatedTypeProperties& T) {
// with scalar
void TestIn2WithScalar(DeprecatedTypeProperties& T) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto a = randn({3, 5}, T);
auto bScalar = ones({}, T);
ASSERT_TRUE((a + bScalar).equal(a + bScalar.expand(a.sizes())));
@ -120,7 +101,6 @@ void TestIn2WithScalar(DeprecatedTypeProperties& T) {
// error: would have to expand inplace arg
void TestIn2ExpandError(DeprecatedTypeProperties& T) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto a = randn({1, 5}, T);
auto b = randn({3, 1}, T);
// NOLINTNEXTLINE(hicpp-avoid-goto,cppcoreguidelines-avoid-goto)
@ -129,10 +109,8 @@ void TestIn2ExpandError(DeprecatedTypeProperties& T) {
// in-place function with 3 args
void TestIn3Basic(DeprecatedTypeProperties& T) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto a = randn({3, 5, 2}, T);
auto b = randn({3, 1, 2}, T);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto c = randn({1, 5, 1}, T);
auto aClone = a.clone();
ASSERT_TRUE(a.addcmul_(b, c).equal(
@ -141,10 +119,8 @@ void TestIn3Basic(DeprecatedTypeProperties& T) {
// with scalar
void TestIn3WithScalar(DeprecatedTypeProperties& T) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto a = randn({3, 5, 2}, T);
auto b = randn({3, 1, 2}, T);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto c = randn({1, 5, 1}, T);
auto aClone = a.clone();
auto bScalar = ones({}, T);
@ -155,7 +131,6 @@ void TestIn3WithScalar(DeprecatedTypeProperties& T) {
// error: would have to expand inplace arg
void TestIn3ExpandError(DeprecatedTypeProperties& T) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto a = randn({1, 3, 5}, T);
auto b = randn({4, 1, 1}, T);
auto c = randn({1, 3, 1}, T);
@ -166,9 +141,7 @@ void TestIn3ExpandError(DeprecatedTypeProperties& T) {
// explicit dim specification
void TestExplicitDimBasic(DeprecatedTypeProperties& T) {
auto a = randn({1}, T);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto b = randn({5, 3}, T);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto c = randn({3, 7}, T);
ASSERT_TRUE(a.addmm(b, c).equal(a.expand({5, 7}).addmm(b, c)));
}
@ -176,9 +149,7 @@ void TestExplicitDimBasic(DeprecatedTypeProperties& T) {
// with scalar
void TestExplicitDimWithScalar(DeprecatedTypeProperties& T) {
auto a = randn({1}, T);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto b = randn({5, 3}, T);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto c = randn({3, 7}, T);
Tensor aScalar = ones({}, T);
ASSERT_TRUE(aScalar.addmm(b, c).equal(aScalar.expand({5, 7}).addmm(b, c)));
@ -186,9 +157,7 @@ void TestExplicitDimWithScalar(DeprecatedTypeProperties& T) {
// with mismatched sizes
void TestExplicitDimWithMismatchedSizes(DeprecatedTypeProperties& T) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto b = randn({5, 3}, T);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto c = randn({3, 7}, T);
auto a = randn({3, 3}, T);
// NOLINTNEXTLINE(hicpp-avoid-goto,cppcoreguidelines-avoid-goto)
@ -197,7 +166,6 @@ void TestExplicitDimWithMismatchedSizes(DeprecatedTypeProperties& T) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(BroadcastTest, Broadcast) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
manual_seed(123);
DeprecatedTypeProperties& T = CPU(kFloat);

View File

@ -10,11 +10,9 @@ TEST(CPUCachingAllocatorTest, check_alloc_free) {
c10::CPUCachingAllocator caching_allocator;
c10::WithCPUCachingAllocatorGuard cachine_allocator_guard(
&caching_allocator);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
at::Tensor a = at::rand({23, 23});
float* data_ptr = a.data_ptr<float>();
a.reset();
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
a = at::rand({23, 23});
ASSERT_TRUE(data_ptr == a.data_ptr<float>());
}
@ -23,7 +21,6 @@ TEST(CPUCachingAllocatorTest, check_alloc_free) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(CPUCachingAllocatorTest, check_alloc_outside_free_inside) {
c10::CPUCachingAllocator caching_allocator;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
at::Tensor a = at::rand({23, 23});
{
c10::WithCPUCachingAllocatorGuard cachine_allocator_guard(
@ -31,7 +28,6 @@ TEST(CPUCachingAllocatorTest, check_alloc_outside_free_inside) {
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
float* data_ptr = a.data_ptr<float>();
a.reset();
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
a = at::rand({23, 23});
}
}
@ -43,7 +39,6 @@ TEST(CPUCachingAllocatorTest, check_alloc_inside_free_outside) {
{
c10::WithCPUCachingAllocatorGuard cachine_allocator_guard(
&caching_allocator);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
a = at::rand({23, 23});
}
a.reset();

View File

@ -93,7 +93,6 @@ TEST(CPUGeneratorImpl, TestGetSetCurrentSeed) {
// See Note [Acquire lock when using random generators]
auto foo = at::detail::getDefaultCPUGenerator();
std::lock_guard<std::mutex> lock(foo.mutex());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
foo.set_current_seed(123);
auto current_seed = foo.current_seed();
ASSERT_EQ(current_seed, 123);
@ -134,12 +133,9 @@ TEST(CPUGeneratorImpl, TestRNGForking) {
std::lock_guard<std::mutex> lock(default_gen.mutex());
current_gen = default_gen.clone(); // capture the current state of default generator
}
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto target_value = at::randn({1000});
// Dramatically alter the internal state of the main generator
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto x = at::randn({100000});
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto forked_value = at::randn({1000}, current_gen);
ASSERT_EQ(target_value.sum().item<double>(), forked_value.sum().item<double>());
}
@ -168,14 +164,11 @@ TEST(CPUGeneratorImpl, TestPhiloxEngineOffset1) {
// make another engine increment to until the
// first 8 values. Assert that the first call
// of engine2 and the 9th call of engine1 are equal.
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
at::Philox4_32_10 engine1(123, 1, 0);
// Note: offset is a multiple of 4.
// So if you want to skip 8 values, offset would
// be 2, since 2*4=8.
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
at::Philox4_32_10 engine2(123, 1, 2);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for(int i = 0; i < 8; i++){
// Note: instead of using the engine() call 8 times
// we could have achieved the same functionality by
@ -194,9 +187,7 @@ TEST(CPUGeneratorImpl, TestPhiloxEngineOffset2) {
// make engine2 skip to the 2^64th 128 bit while being at 2^64th thread
// Assert that engine2 should be increment_val+1 steps behind engine1.
unsigned long long increment_val = std::numeric_limits<uint64_t>::max();
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
at::Philox4_32_10 engine1(123, 0, increment_val);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
at::Philox4_32_10 engine2(123, increment_val, increment_val);
engine2.incr_n(increment_val);
@ -213,9 +204,7 @@ TEST(CPUGeneratorImpl, TestPhiloxEngineOffset3) {
// start engine2 at thread 1, with offset 0
// Assert that engine1 is 1 step behind engine2.
unsigned long long increment_val = std::numeric_limits<uint64_t>::max();
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
at::Philox4_32_10 engine1(123, 0, increment_val);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
at::Philox4_32_10 engine2(123, 1, 0);
engine1.incr();
ASSERT_EQ(engine1(), engine2());
@ -227,9 +216,7 @@ TEST(CPUGeneratorImpl, TestPhiloxEngineIndex) {
// Tests if thread indexing is working properly.
// create two engines with different thread index but same offset.
// Assert that the engines have different sequences.
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
at::Philox4_32_10 engine1(123456, 0, 4);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
at::Philox4_32_10 engine2(123456, 1, 4);
ASSERT_NE(engine1(), engine2());
}
@ -247,17 +234,13 @@ TEST(CPUGeneratorImpl, TestMT19937EngineReproducibility) {
// test with zero seed
at::mt19937 engine1(0);
std::mt19937 engine2(0);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for(int i = 0; i < 10000; i++) {
ASSERT_EQ(engine1(), engine2());
}
// test with large seed
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
engine1 = at::mt19937(2147483647);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
engine2 = std::mt19937(2147483647);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for(int i = 0; i < 10000; i++) {
ASSERT_EQ(engine1(), engine2());
}
@ -267,7 +250,6 @@ TEST(CPUGeneratorImpl, TestMT19937EngineReproducibility) {
auto seed = rd();
engine1 = at::mt19937(seed);
engine2 = std::mt19937(seed);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for(int i = 0; i < 10000; i++) {
ASSERT_EQ(engine1(), engine2());
}

View File

@ -34,14 +34,11 @@ at::Tensor run_with_control_flow(
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(CPUAllocationPlanTest, with_control_flow) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
at::Tensor a = at::rand({23, 16, 16, 16});
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
at::Tensor conv_weight = at::rand({16, 16, 3, 3});
// output shape
// 23, 16, 14, 14
// Flattened shape = 23, 3136
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
at::Tensor linear_weight = at::rand({32, 3136});
at::Tensor output, ref_output;
std::vector<void*> pointers;
@ -66,7 +63,6 @@ TEST(CPUAllocationPlanTest, with_control_flow) {
run_with_control_flow(a, conv_weight, linear_weight, record_mode, pointers);
}
bool success{true};
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for (uint64_t i = 0; i < 10; ++i) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
bool validation_success;
@ -88,14 +84,11 @@ TEST(CPUAllocationPlanTest, with_control_flow) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(CPUAllocationPlanTest, with_profiling_alloc) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
at::Tensor a = at::rand({23, 16, 16, 16});
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
at::Tensor conv_weight = at::rand({16, 16, 3, 3});
// output shape
// 23, 16, 14, 14
// Flattened shape = 23, 3136
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
at::Tensor linear_weight = at::rand({32, 3136});
at::Tensor output, ref_output;
std::vector<void*> pointers;
@ -141,7 +134,6 @@ TEST(CPUAllocationPlanTest, with_profiling_alloc) {
validate_pointers,
false);
}
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for (uint64_t i = 0; i < 10; ++i) {
{
c10::WithProfilingAllocatorGuard
@ -184,14 +176,12 @@ TEST(CPUAllocationPlanTest, with_profiling_alloc) {
int main(int argc, char* argv[]) {
// Setting the priority high to make sure no other allocator gets used instead of this.
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
c10::SetCPUAllocator(c10::GetDefaultMobileCPUAllocator(), /*priority*/ 100);
// Need to disable mkldnn for this test since it allocatred memory
// via raw_allocate inteface which requires context pointer and raw
// pointer to be the same. Tis is not true for mobile allocator.
at::globalContext().setUserEnabledMkldnn(false);
::testing::InitGoogleTest(&argc, argv);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
at::manual_seed(42);
return RUN_ALL_TESTS();
}

View File

@ -208,7 +208,6 @@ TEST_F(RNGTest, Normal) {
const auto std = 67.89;
auto gen = at::make_generator<TestCPUGenerator>(MAGIC_NUMBER);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto actual = torch::empty({10});
actual.normal_(mean, std, gen);
@ -224,9 +223,7 @@ TEST_F(RNGTest, Normal_float_Tensor_out) {
const auto std = 67.89;
auto gen = at::make_generator<TestCPUGenerator>(MAGIC_NUMBER);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto actual = torch::empty({10});
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
at::normal_out(actual, mean, torch::full({10}, std), gen);
auto expected = torch::empty_like(actual);
@ -241,9 +238,7 @@ TEST_F(RNGTest, Normal_Tensor_float_out) {
const auto std = 67.89;
auto gen = at::make_generator<TestCPUGenerator>(MAGIC_NUMBER);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto actual = torch::empty({10});
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
at::normal_out(actual, torch::full({10}, mean), std, gen);
auto expected = torch::empty_like(actual);
@ -258,9 +253,7 @@ TEST_F(RNGTest, Normal_Tensor_Tensor_out) {
const auto std = 67.89;
auto gen = at::make_generator<TestCPUGenerator>(MAGIC_NUMBER);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto actual = torch::empty({10});
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
at::normal_out(actual, torch::full({10}, mean), torch::full({10}, std), gen);
auto expected = torch::empty_like(actual);
@ -275,7 +268,6 @@ TEST_F(RNGTest, Normal_float_Tensor) {
const auto std = 67.89;
auto gen = at::make_generator<TestCPUGenerator>(MAGIC_NUMBER);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto actual = at::normal(mean, torch::full({10}, std), gen);
auto expected = torch::empty_like(actual);
@ -290,7 +282,6 @@ TEST_F(RNGTest, Normal_Tensor_float) {
const auto std = 67.89;
auto gen = at::make_generator<TestCPUGenerator>(MAGIC_NUMBER);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto actual = at::normal(torch::full({10}, mean), std, gen);
auto expected = torch::empty_like(actual);
@ -305,7 +296,6 @@ TEST_F(RNGTest, Normal_Tensor_Tensor) {
const auto std = 67.89;
auto gen = at::make_generator<TestCPUGenerator>(MAGIC_NUMBER);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto actual = at::normal(torch::full({10}, mean), torch::full({10}, std), gen);
auto expected = torch::empty_like(actual);
@ -358,7 +348,6 @@ TEST_F(RNGTest, LogNormal) {
const auto std = 6.789;
auto gen = at::make_generator<TestCPUGenerator>(MAGIC_NUMBER);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto actual = torch::empty({10});
actual.log_normal_(mean, std, gen);

View File

@ -11,7 +11,6 @@
using namespace at;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(TestDlconvertor, TestDlconvertor) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
manual_seed(123);
Tensor a = rand({3, 4});
@ -24,7 +23,6 @@ TEST(TestDlconvertor, TestDlconvertor) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(TestDlconvertor, TestDlconvertorNoStrides) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
manual_seed(123);
Tensor a = rand({3, 4});

View File

@ -50,7 +50,6 @@ TORCH_LIBRARY_IMPL(aten, MSNPU, m) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(BackendExtensionTest, TestRegisterOp) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
Tensor a = empty({5, 5}, at::kMSNPU);
ASSERT_EQ(a.device().type(), at::kMSNPU);
ASSERT_EQ(a.device().index(), 1);
@ -66,7 +65,6 @@ TEST(BackendExtensionTest, TestRegisterOp) {
ASSERT_EQ(test_int, 2);
// Ensure that non-MSNPU operator still works
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
Tensor d = empty({5, 5}, at::kCPU);
ASSERT_EQ(d.device().type(), at::kCPU);
}

View File

@ -42,7 +42,6 @@ TEST(TestHalf, Comparisions) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(TestHalf, Cast) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
Half value = 1.5f;
ASSERT_EQ((int)value, 1);
ASSERT_EQ((short)value, 1);
@ -126,7 +125,6 @@ ASSERT_SAME_TYPE(tinyness_before);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(TestHalf, CommonMath) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
float threshold = 0.00001;
assert(std::abs(std::lgamma(Half(10.0)) - std::lgamma(10.0f)) <= threshold);
assert(std::abs(std::exp(Half(1.0)) - std::exp(1.0f)) <= threshold);

View File

@ -16,7 +16,6 @@ namespace c10 {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(IValueTest, Basic) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
c10::List<int64_t> foo({3, 4, 5});
ASSERT_EQ(foo.use_count(), 1);
IValue bar{foo};
@ -28,7 +27,6 @@ TEST(IValueTest, Basic) {
ASSERT_TRUE(foo2.isIntList());
// NOLINTNEXTLINE(bugprone-use-after-move,clang-analyzer-cplusplus.Move)
ASSERT_TRUE(bar.isNone());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
foo2 = IValue(4.0);
ASSERT_TRUE(foo2.isDouble());
ASSERT_EQ(foo2.toDouble(), 4.0);
@ -42,18 +40,15 @@ TEST(IValueTest, Basic) {
IValue i(4);
ASSERT_TRUE(i.isInt());
ASSERT_EQ(i.toInt(), 4);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
IValue dlist(c10::List<double>({3.5}));
ASSERT_TRUE(dlist.isDoubleList());
ASSERT_TRUE(dlist.toDoubleVector() == std::vector<double>({3.5}));
std::move(dlist).toDoubleList();
// NOLINTNEXTLINE(bugprone-use-after-move)
ASSERT_TRUE(dlist.isNone());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
dlist = IValue(c10::List<double>({3.4}));
ASSERT_TRUE(dlist.toDoubleVector() == std::vector<double>({3.4}));
IValue the_list(
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
at::ivalue::Tuple::create({IValue(3.4), IValue(4), IValue(foo)}));
ASSERT_EQ(foo.use_count(), 3);
ASSERT_TRUE(the_list.isTuple());
@ -70,7 +65,6 @@ TEST(IValueTest, Basic) {
auto elem1 = c10::complex<double>(3, 4);
auto elem2 = c10::complex<double>(3, -4);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto elem3 = c10::complex<double>(5, 0);
c10::List<c10::complex<double>> foo1({elem1, elem2, elem3});
ASSERT_EQ(foo1.use_count(), 1);
@ -91,7 +85,6 @@ TEST(IValueTest, Basic) {
ASSERT_TRUE(baz1.toComplexDoubleVector() == std::vector<c10::complex<double>>({elem1, elem2, elem3}));
IValue complex_tuple(
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
at::ivalue::Tuple::create({IValue(c10::complex<double>(3.4, 4.7)), IValue(foo1)}));
ASSERT_TRUE(complex_tuple.isTuple());
ASSERT_EQ(complex_tuple.toTuple()->elements()[0].toComplexDouble(), c10::complex<double>(3.4, 4.7));
@ -102,9 +95,7 @@ TEST(IValueTest, Basic) {
TEST(IValueTest, ComplexDict) {
typedef c10::complex<double> c_type;
c10::Dict<c_type, c_type> m;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto num1 = c_type(2.3, -3.5);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto num2 = c_type(0, 5);
m.insert(num1, 2 * num1);
m.insert(num2, 2 * num2);
@ -113,15 +104,11 @@ TEST(IValueTest, ComplexDict) {
ASSERT_EQ(m_.at(num1), 2 * num1);
ASSERT_EQ(m_.at(num2), 2 * num2);
}
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
static std::array<IValue, 5> makeSampleIValues() {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
return { at::rand({3, 4}), "hello", 42, true, 1.5 };
}
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
static std::array<IValue, 5> makeMoreSampleIValues() {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
return { at::rand({3, 4}), "goodbye", 23, false, 0.5 };
}
@ -211,7 +198,6 @@ TEST(IValueTest, MoveAssign) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(IValueTest, Tuple) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
std::tuple<int64_t, at::Tensor> t = std::make_tuple(123, at::randn({1}));
auto iv = IValue(t);
auto t_ = iv.to<std::tuple<int64_t, at::Tensor>>();
@ -299,7 +285,6 @@ TEST(IValueTest, BasicFuture) {
auto f1 = c10::make_intrusive<ivalue::Future>(IntType::get());
ASSERT_FALSE(f1->completed());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
f1->markCompleted(IValue(42));
ASSERT_TRUE(f1->completed());
ASSERT_EQ(42, f1->value().toInt());
@ -317,7 +302,6 @@ TEST(IValueTest, FutureCallbacks) {
ASSERT_EQ(f2.value().toInt(), 43);
++calledTimesA;
});
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
f2->markCompleted(IValue(43));
ASSERT_EQ(calledTimesA, 1);
ASSERT_EQ(calledTimesB, 0);
@ -555,7 +539,6 @@ TEST(IValueTest, EnumEquality) {
TEST(IValueTest, isPtrType) {
IValue tensor(at::rand({3, 4}));
IValue undefinedTensor((at::Tensor()));
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
IValue integer(42);
IValue str("hello");
@ -638,7 +621,6 @@ TEST(IValueTest, IdentityComparisonAndHashing) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(IValueTest, getSubValues) {
// Scalars have no subvalues.
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
IValue integer(42), float_(1.5), complex(c10::complex<double>(2, 3));
IValue::HashAliasedIValues subvalues;

View File

@ -4,7 +4,6 @@
using namespace at;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
bool allClose(const at::Tensor& t1, const at::Tensor& t2, double rtol=1e-5, double atol=1e-8) {
if (!t1.is_same_size(t2)) {
std::cerr << "Difference in tensor shapes: "
@ -27,7 +26,6 @@ bool allClose(const at::Tensor& t1, const at::Tensor& t2, double rtol=1e-5, doub
// and rely on backward tests of each at:: function used in math kernels.
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(MathKernelTest, NativeGroupNorm) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
int num_channels = 6;
int N = 2;
int H = 2, W = 2;
@ -36,7 +34,6 @@ TEST(MathKernelTest, NativeGroupNorm) {
const auto input = randn({N, num_channels, H, W});
const auto weight = randn({num_channels});
const auto bias = randn({num_channels});
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
double eps = 1e-05;
for (bool undef_weight: {true, false}) {
for (int num_groups: {3, 6, 1}) {
@ -61,12 +58,10 @@ TEST(MathKernelTest, NativeLayerNorm) {
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
const auto input_ndim = input.dim();
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
double eps = 1e-05;
for (bool undef_weight: {true, false}) {
for (int normalized_size: {2, 3}) {
Tensor undef;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
std::vector<int64_t> normalized_shape(normalized_size, 10);
const auto weight = rand(normalized_shape);
const auto bias = rand(normalized_shape);
@ -117,7 +112,6 @@ TEST(MathKernelTest, SiluBackward) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(MathKernelTest, NarrowCopy) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto x = rand({5, 8, 7});
for (int64_t dim = 0; dim < 3; ++dim) {
const int64_t start = 1, length = 4;
@ -136,8 +130,6 @@ TEST(MathKernelTest, Bmm) {
EXPECT_THROW(auto z = at::bmm(x, y), std::exception);
};
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
test_bmm(5);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
test_bmm(1000);
}

Some files were not shown because too many files have changed in this diff Show More