[2/N] Fix cppcoreguidelines-init-variables suppression (#146237)

This PR removes all `cppcoreguidelines-init-variables` suppressions.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/146237
Approved by: https://github.com/ezyang
This commit is contained in:
cyy
2025-06-19 23:26:42 +00:00
committed by PyTorch MergeBot
parent 52f873adc2
commit 3c2324c64a
18 changed files with 28 additions and 134 deletions

View File

@ -1388,7 +1388,7 @@ bool TensorIteratorBase::fast_set_up(const TensorIteratorConfig& config) {
case FastSetupType::NON_OVERLAPPING_DENSE:
{
// find the index of a defined tensor in operands_ start from input tensor
int i_defined; // NOLINT(cppcoreguidelines-init-variables)
int i_defined = -1;
for (i_defined = ntensors() - 1; i_defined >= 0; --i_defined) {
if (tensor(i_defined).defined()) break;
}

View File

@ -1787,8 +1787,7 @@ TEST(NewOperatorRegistrationTest, dispatchAutogradPrecedence) {
}
TEST(NewOperatorRegistrationTest, throwsWhenRegisterToBackendMapsToAutogradOther) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
bool fpga_called, math_called = false;
bool fpga_called = false, math_called = false;
auto m = MAKE_TORCH_LIBRARY(test);
m.def("fn", torch::dispatch(c10::DispatchKey::FPGA, [&](const Tensor& x) { fpga_called = true; return x; }));
m.impl("fn", c10::DispatchKey::CompositeImplicitAutograd, [&](const Tensor& x) { math_called = true; return x; });

View File

@ -63,8 +63,7 @@ TEST(CPUAllocationPlanTest, with_control_flow) {
}
bool success{true};
for (uint64_t i = 0; i < 10; ++i) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
bool validation_success;
bool validation_success = false;
{
c10::WithValidateAllocationPlanGuard
validation_guard(&plan, &validation_success);

View File

@ -80,8 +80,7 @@ TEST(Vitals, OnAndOff) {
TEST(Vitals, APIVitals) {
std::stringstream buffer;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
bool rvalue;
bool rvalue = false;
std::streambuf* sbuf = std::cout.rdbuf();
std::cout.rdbuf(buffer.rdbuf());
{

View File

@ -19,8 +19,7 @@ float halfbits2float(unsigned short h) {
exponent = 0xff;
} else if (!exponent) { /* Denorm or Zero */
if (mantissa) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
unsigned int msb;
unsigned int msb = 0;
exponent = 0x71;
do {
msb = (mantissa & 0x400000);

View File

@ -610,8 +610,7 @@ inline uint64_t GreatestCommonDivisor64(uint64_t A, uint64_t B) {
/// This function takes a 64-bit integer and returns the bit equivalent double.
inline double BitsToDouble(uint64_t Bits) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
double D;
double D = 0;
static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes");
memcpy(&D, &Bits, sizeof(Bits));
return D;

View File

@ -3359,12 +3359,10 @@ static bool EmbeddingLookupIdx_int32_t_uint8_t_float__avx2_fma(
return false;
}
float wgt = 1.f;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
float bio;
if (weights) {
wgt = weights[IS_WEIGHT_POSITIONAL ? (dataInd - start) : dataInd];
}
bio = wgt * scale_bias[2 * idx + 1];
float bio = wgt * scale_bias[2 * idx + 1];
wgt = wgt * scale_bias[2 * idx];
__m256 vbio = _mm256_set1_ps(bio);
__m256 vwgt = _mm256_set1_ps(wgt);
@ -3539,12 +3537,10 @@ static bool EmbeddingLookupIdx_int32_t_uint8_t_float__avx2_fma(
return false;
}
float wgt = 1.f;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
float bio;
if (weights) {
wgt = weights[IS_WEIGHT_POSITIONAL ? (dataInd - start) : dataInd];
}
bio = wgt * scale_bias[2 * idx + 1];
float bio = wgt * scale_bias[2 * idx + 1];
wgt = wgt * scale_bias[2 * idx];
__m256 vbio = _mm256_set1_ps(bio);
__m256 vwgt = _mm256_set1_ps(wgt);
@ -3650,12 +3646,10 @@ static bool EmbeddingLookupIdx_int32_t_uint8_t_float__avx2_fma(
return false;
}
float wgt = 1.f;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
float bio;
if (weights) {
wgt = weights[IS_WEIGHT_POSITIONAL ? (dataInd - start) : dataInd];
}
bio = wgt * scale_bias[2 * idx + 1];
float bio = wgt * scale_bias[2 * idx + 1];
wgt = wgt * scale_bias[2 * idx];
__m256 vbio = _mm256_set1_ps(bio);
__m256 vwgt = _mm256_set1_ps(wgt);
@ -3727,12 +3721,10 @@ static bool EmbeddingLookupIdx_int32_t_uint8_t_float__avx2_fma(
return false;
}
float wgt = 1.f;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
float bio;
if (weights) {
wgt = weights[IS_WEIGHT_POSITIONAL ? (dataInd - start) : dataInd];
}
bio = wgt * scale_bias[2 * idx + 1];
float bio = wgt * scale_bias[2 * idx + 1];
wgt = wgt * scale_bias[2 * idx];
__m256 vbio = _mm256_set1_ps(bio);
__m256 vwgt = _mm256_set1_ps(wgt);
@ -3794,12 +3786,10 @@ static bool EmbeddingLookupIdx_int32_t_uint8_t_float__avx2_fma(
return false;
}
float wgt = 1.f;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
float bio;
if (weights) {
wgt = weights[IS_WEIGHT_POSITIONAL ? (dataInd - start) : dataInd];
}
bio = wgt * scale_bias[2 * idx + 1];
float bio = wgt * scale_bias[2 * idx + 1];
wgt = wgt * scale_bias[2 * idx];
__m256 vbio = _mm256_set1_ps(bio);
__m256 vwgt = _mm256_set1_ps(wgt);
@ -3946,12 +3936,10 @@ static bool EmbeddingLookupIdx_int64_t_uint8_t_float__avx2_fma(
return false;
}
float wgt = 1.f;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
float bio;
if (weights) {
wgt = weights[IS_WEIGHT_POSITIONAL ? (dataInd - start) : dataInd];
}
bio = wgt * scale_bias[2 * idx + 1];
float bio = wgt * scale_bias[2 * idx + 1];
wgt = wgt * scale_bias[2 * idx];
__m256 vbio = _mm256_set1_ps(bio);
__m256 vwgt = _mm256_set1_ps(wgt);
@ -4126,12 +4114,10 @@ static bool EmbeddingLookupIdx_int64_t_uint8_t_float__avx2_fma(
return false;
}
float wgt = 1.f;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
float bio;
if (weights) {
wgt = weights[IS_WEIGHT_POSITIONAL ? (dataInd - start) : dataInd];
}
bio = wgt * scale_bias[2 * idx + 1];
float bio = wgt * scale_bias[2 * idx + 1];
wgt = wgt * scale_bias[2 * idx];
__m256 vbio = _mm256_set1_ps(bio);
__m256 vwgt = _mm256_set1_ps(wgt);
@ -4237,12 +4223,10 @@ static bool EmbeddingLookupIdx_int64_t_uint8_t_float__avx2_fma(
return false;
}
float wgt = 1.f;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
float bio;
if (weights) {
wgt = weights[IS_WEIGHT_POSITIONAL ? (dataInd - start) : dataInd];
}
bio = wgt * scale_bias[2 * idx + 1];
float bio = wgt * scale_bias[2 * idx + 1];
wgt = wgt * scale_bias[2 * idx];
__m256 vbio = _mm256_set1_ps(bio);
__m256 vwgt = _mm256_set1_ps(wgt);
@ -4314,12 +4298,10 @@ static bool EmbeddingLookupIdx_int64_t_uint8_t_float__avx2_fma(
return false;
}
float wgt = 1.f;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
float bio;
if (weights) {
wgt = weights[IS_WEIGHT_POSITIONAL ? (dataInd - start) : dataInd];
}
bio = wgt * scale_bias[2 * idx + 1];
float bio = wgt * scale_bias[2 * idx + 1];
wgt = wgt * scale_bias[2 * idx];
__m256 vbio = _mm256_set1_ps(bio);
__m256 vwgt = _mm256_set1_ps(wgt);
@ -4381,12 +4363,10 @@ static bool EmbeddingLookupIdx_int64_t_uint8_t_float__avx2_fma(
return false;
}
float wgt = 1.f;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
float bio;
if (weights) {
wgt = weights[IS_WEIGHT_POSITIONAL ? (dataInd - start) : dataInd];
}
bio = wgt * scale_bias[2 * idx + 1];
float bio = wgt * scale_bias[2 * idx + 1];
wgt = wgt * scale_bias[2 * idx];
__m256 vbio = _mm256_set1_ps(bio);
__m256 vwgt = _mm256_set1_ps(wgt);

View File

@ -113,8 +113,6 @@ def unroll(uf, IndexType, InType, OutType, use_weights, isa, fused, use_offsets)
if InType == "uint8_t":
code.append(" " + OutType + " wgt = 1.f;")
code.append(" // NOLINTNEXTLINE(cppcoreguidelines-init-variables)")
code.append(" " + OutType + " bio;")
code.append(" if (weights) {")
code.append(
" wgt = weights[IS_WEIGHT_POSITIONAL ? (dataInd - start) : dataInd];"
@ -125,7 +123,7 @@ def unroll(uf, IndexType, InType, OutType, use_weights, isa, fused, use_offsets)
" const float* scale_bias = reinterpret_cast<const float*>(\n"
" &input[idx * fused_block_size + block_size]);"
)
code.append(" bio = wgt * scale_bias[1];")
code.append(" " + OutType + " bio = wgt * scale_bias[1];")
code.append(" wgt = wgt * scale_bias[0];")
else:
code.append(" bio = wgt * scale_bias[2 * idx + 1];")
@ -316,8 +314,6 @@ def generic(IndexType, InType, OutType, use_weights, isa, fused, use_offsets):
if InType == "uint8_t":
code.append(" " + OutType + " wgt = 1.f;")
code.append(" // NOLINTNEXTLINE(cppcoreguidelines-init-variables)")
code.append(" " + OutType + " bio;")
code.append(" if (weights) {")
code.append(
" wgt = weights[IS_WEIGHT_POSITIONAL ? (dataInd - start) : dataInd];"
@ -328,10 +324,10 @@ def generic(IndexType, InType, OutType, use_weights, isa, fused, use_offsets):
" const float* scale_bias = reinterpret_cast<const float*>(\n"
" &input[idx * fused_block_size + block_size]);"
)
code.append(" bio = wgt * scale_bias[1];")
code.append(" " + OutType + " bio = wgt * scale_bias[1];")
code.append(" wgt = wgt * scale_bias[0];")
else:
code.append(" bio = wgt * scale_bias[2 * idx + 1];")
code.append(" " + OutType + " bio = wgt * scale_bias[2 * idx + 1];")
code.append(" wgt = wgt * scale_bias[2 * idx];")
code.append(" __m256 vbio = _mm256_set1_ps(bio);")
else:

View File

@ -196,8 +196,7 @@ void PyTorchStreamReader::init() {
// version check
at::DataPtr version_ptr;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
size_t version_size;
size_t version_size = 0;
if (hasRecord(".data/version")) {
std::tie(version_ptr, version_size) = getRecord(".data/version");
} else {

View File

@ -2890,7 +2890,6 @@ TEST_F(ModulesTest, TanhGELU) {
ASSERT_TRUE(torch::allclose(y, y_exp, 1.4e-06, 1e-05));
}
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST_F(ModulesTest, Mish) {
Mish model;
auto x = torch::randn(100) * 10;

View File

@ -19,8 +19,7 @@ TEST(SubgraphUtilsTest, Basic) {
for (bool reverse_iterate : {true, false}) {
// Merge everything into a single subgraph
bool first = true;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
Node* subgraph;
Node* subgraph = nullptr;
auto it =
reverse_iterate ? graph->nodes().rbegin() : graph->nodes().begin();
auto end = reverse_iterate ? graph->nodes().rend() : graph->nodes().end();
@ -84,8 +83,7 @@ graph(%a : Tensor, %b : Tensor, %c : Tensor):
while (graph2->next() != *graph->nodes().end()) {
SubgraphUtils::mergeNodeIntoSubgraph(graph2->next(), graph2);
}
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
Node* subgraph;
Node* subgraph = nullptr;
if (reverse_merge) {
SubgraphUtils::mergeNodeIntoSubgraph(graph2, graph1);
subgraph = graph1;

View File

@ -202,9 +202,7 @@ TEST(BoundsInference, _5) {
Tensor b = Compute("b", {n}, [&](const VarHandle& i) { return a.load(i); });
LoopNest l({b});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr inner;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr tail;
std::vector<ForPtr> loops = l.getLoopStmtsFor(b);
LoopNest::splitWithTail(loops[0], 16, &inner, &tail);
@ -680,7 +678,6 @@ TEST(BoundsInference, GetPotentialHazardsLoopSplit) {
});
LoopNest l({A});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr inner, tail;
// Splitting with tail by something offset creates a tail which also writes to

View File

@ -164,9 +164,7 @@ TEST(LoopNest, ExprSliceHeadWithLoopOptions) {
};
Tensor tensor = Compute("f", {10}, func);
LoopNest l({tensor});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr head;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr tail;
std::vector<ForPtr> loops = l.getAllLoopNestsWritingToBuf(tensor.buf()).at(0);
loops[0]->set_gpu_block_index(LoopOptions::IDX_Y);
@ -187,16 +185,12 @@ TEST(LoopNest, ExprSliceTailWithLoopOptions) {
};
Tensor tensor = Compute("f", {10}, func);
LoopNest l({tensor});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr head;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr tail;
std::vector<ForPtr> loops = l.getAllLoopNestsWritingToBuf(tensor.buf()).at(0);
LoopNest::sliceTail(loops[0], 4, &head, &tail);
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr tail_head;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr tail_tail;
tail->set_gpu_block_index(LoopOptions::IDX_Y);
LoopNest::sliceTail(tail, 2, &tail_head, &tail_tail);
@ -219,9 +213,7 @@ TEST(LoopNest, ExprSliceHeadWhenFactorEqualsSize) {
};
Tensor tensor = Compute("f", {10}, func);
LoopNest l({tensor});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr head;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr tail;
std::vector<ForPtr> loops = l.getAllLoopNestsWritingToBuf(tensor.buf()).at(0);
LoopNest::sliceHead(loops[0], 10, &head, &tail);
@ -239,9 +231,7 @@ TEST(LoopNest, ExprSliceHeadWhenFactorLargerThanSize) {
};
Tensor tensor = Compute("f", {10}, func);
LoopNest l({tensor});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr head;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr tail;
std::vector<ForPtr> loops = l.getAllLoopNestsWritingToBuf(tensor.buf()).at(0);
LoopNest::sliceHead(loops[0], 100, &head, &tail);
@ -259,9 +249,7 @@ TEST(LoopNest, ExprSliceHead) {
};
Tensor tensor = Compute("f", {10}, func);
LoopNest l({tensor});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr head;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr tail;
std::vector<ForPtr> loops = l.getAllLoopNestsWritingToBuf(tensor.buf()).at(0);
LoopNest::sliceHead(loops[0], 4, &head, &tail);
@ -283,9 +271,7 @@ TEST(LoopNest, ExprSliceHeadWithNonZeroStart) {
LoopNest l({tensor});
std::vector<ForPtr> loops = l.getAllLoopNestsWritingToBuf(tensor.buf()).at(0);
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr head;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr tail;
LoopNest::sliceTail(loops[0], 4, &head, &tail);
// head: [0, 6)
@ -307,9 +293,7 @@ TEST(LoopNest, ExprSliceTailWhenFactorEqualsSize) {
};
Tensor tensor = Compute("f", {10}, func);
LoopNest l({tensor});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr head;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr tail;
std::vector<ForPtr> loops = l.getAllLoopNestsWritingToBuf(tensor.buf()).at(0);
LoopNest::sliceTail(loops[0], 10, &head, &tail);
@ -329,9 +313,7 @@ TEST(LoopNest, ExprSliceTailWhenFactorLargerThanSize) {
};
Tensor tensor = Compute("f", {10}, func);
LoopNest l({tensor});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr head;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr tail;
std::vector<ForPtr> loops = l.getAllLoopNestsWritingToBuf(tensor.buf()).at(0);
LoopNest::sliceTail(loops[0], 100, &head, &tail);
@ -349,9 +331,7 @@ TEST(LoopNest, ExprSliceTail) {
};
Tensor tensor = Compute("f", {10}, func);
LoopNest l({tensor});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr head;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr tail;
std::vector<ForPtr> loops = l.getAllLoopNestsWritingToBuf(tensor.buf()).at(0);
LoopNest::sliceTail(loops[0], 4, &head, &tail);
@ -375,9 +355,7 @@ TEST(LoopNest, ExprSplitAndSlice) {
Tensor tensor = Compute("f", {100}, func);
LoopNest l({tensor});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr inner;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr tail;
std::vector<ForPtr> loops = l.getAllLoopNestsWritingToBuf(tensor.buf()).at(0);
// outer: [0, 4)
@ -428,9 +406,7 @@ TEST(LoopNest, ExprSliceAndNormalize) {
LoopNest l({tensor});
std::vector<ForPtr> loops = l.getAllLoopNestsWritingToBuf(tensor.buf()).at(0);
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr head;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr tail;
LoopNest::sliceHead(loops[0], 2, &head, &tail);
// head: [0, 2)
@ -460,9 +436,7 @@ TEST(LoopNest, ExprSliceWithVariableDimension) {
std::vector<ForPtr> loops =
l.getAllLoopNestsWritingToBuf(tensor.buf()).at(0);
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr head;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr tail;
LoopNest::sliceHead(loops[0], 2, &head, &tail);
@ -850,7 +824,6 @@ TEST(LoopNest, SplitWithTailWithLoopOptions) {
Tensor tensor = Compute("f", {M}, [&](const ExprHandle& m) {
return a_buf.load(m) + b_buf.load(m) + 1.0f;
});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr inner, tail;
LoopNest l({tensor});
@ -880,7 +853,6 @@ TEST(LoopNest, SplitWithMaskWithLoopOptions) {
Tensor tensor = Compute("f", {M}, [&](const ExprHandle& m) {
return a_buf.load(m) + b_buf.load(m) + 1.0f;
});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr inner;
LoopNest l({tensor});
@ -1433,7 +1405,6 @@ TEST(LoopNest, ScheduleSplitTwiceThenInline) {
Tensor a = Compute("a", {18}, [&](const VarHandle& i) { return i * i; });
Tensor b = Compute(
"b", {2}, [&](const VarHandle& j) { return a.load(j + ExprHandle(8)); });
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr i_inner;
LoopNest l({b}, {a, b});
@ -3410,9 +3381,7 @@ TEST(LoopNest, NormalizeAndSplitWithTail) {
LoopNest::normalize(for_stmt);
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr x_inner;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr x_tail;
LoopNest::splitWithTail(for_stmt, 10, &x_inner, &x_tail);
@ -3454,9 +3423,7 @@ TEST(LoopNest, NotNormalizeAndSplitWithTail) {
auto for_stmt = For::make(x, 5, 15, Store::make(a_buf, {x}, x * 2));
auto parent_block = Block::make({for_stmt});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr x_inner;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr x_tail;
LoopNest::splitWithTail(for_stmt, 8, &x_inner, &x_tail);
@ -5349,7 +5316,6 @@ TEST(LoopNest, fuseLoopsSimple) {
auto forJ = For::make(j, 0, 100, Store::make(a_buf, {j}, Mul::make(10, j)));
auto forK = For::make(k, 0, 100, Store::make(b_buf, {k}, Mul::make(20, k)));
auto par = Block::make({forJ, forK});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr fused_loop;
ASSERT_TRUE(LoopNest::fuseLoops({forJ, forK}, &fused_loop));
@ -5389,7 +5355,6 @@ TEST(LoopNest, fuseLoopsMultiple) {
auto forJ = For::make(j, 0, 100, Store::make(a_buf, {j}, Mul::make(10, j)));
auto forK = For::make(k, 0, 100, Store::make(b_buf, {k}, Mul::make(20, k)));
auto par = Block::make({forI, forJ, forK});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr fused_loop;
ASSERT_TRUE(LoopNest::fuseLoops({forI, forJ, forK}, &fused_loop));
@ -5446,7 +5411,6 @@ TEST(LoopNest, fuseLoopsNested) {
auto forM = For::make(m, 0, 20, Block::make({initA, forJ}));
auto forN = For::make(n, 0, 20, Block::make({initB, forK}));
auto par = Block::make({forM, forN});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr fused_loop;
ASSERT_TRUE(LoopNest::fuseLoops({forM, forN}, &fused_loop));
@ -5506,7 +5470,6 @@ TEST(LoopNest, fuseLoopsNested2D) {
50,
Store::make(b_buf, {m, n}, Add::make(m, Mul::make(n, 100)))));
auto par = Block::make({forI, forM});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr fused_loop;
ASSERT_TRUE(LoopNest::fuseLoops({forI, forM}, &fused_loop));
@ -5547,7 +5510,6 @@ TEST(LoopNest, fuseLoopsNested2DInner) {
auto forN = For::make(
n, 0, 100, Store::make(b_buf, {i, n}, Add::make(i, Mul::make(n, 100))));
auto forI = For::make(i, 0, 20, Block::make({forJ, forN}));
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr fused_loop;
ASSERT_TRUE(LoopNest::fuseLoops({forJ, forN}, &fused_loop));
@ -5583,7 +5545,6 @@ TEST(LoopNest, fuseLoopsDifferentStopBounds) {
auto forK = For::make(k, 0, 50, Store::make(b_buf, {j}, Mul::make(20, k)));
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
auto par = Block::make({forJ, forK});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr fused_loop;
ASSERT_FALSE(LoopNest::fuseLoops({forJ, forK}, &fused_loop));
}
@ -5604,7 +5565,6 @@ TEST(LoopNest, fuseLoopsDifferentStartBounds) {
auto forK = For::make(k, 50, 100, Store::make(b_buf, {j}, Mul::make(20, k)));
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
auto par = Block::make({forJ, forK});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr fused_loop;
ASSERT_FALSE(LoopNest::fuseLoops({forJ, forK}, &fused_loop));
}
@ -5627,7 +5587,6 @@ TEST(LoopNest, fuseLoopsNotContiguous) {
auto forK = For::make(k, 0, 100, Store::make(b_buf, {j}, Mul::make(20, k)));
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
auto par = Block::make({forJ, initB, forK});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr fused_loop;
ASSERT_FALSE(LoopNest::fuseLoops({forJ, forK}, &fused_loop));
}
@ -5654,7 +5613,6 @@ TEST(LoopNest, fuseLoopsWithDifferentParents) {
auto forK = For::make(k, 50, 100, Store::make(b_buf, {j}, Mul::make(20, k)));
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
auto par = Block::make({forI, initB, forK});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr fused_loop;
ASSERT_FALSE(LoopNest::fuseLoops({forJ, forK}, &fused_loop));
}
@ -5676,7 +5634,6 @@ TEST(LoopNest, fuseLoopsWithVariableBounds) {
// NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks,cppcoreguidelines-avoid-magic-numbers)
auto forK = For::make(k, 0, N, Store::make(b_buf, {j}, Mul::make(20, k)));
auto par = Block::make({forJ, forK});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr fused_loop;
ASSERT_TRUE(LoopNest::fuseLoops({forJ, forK}, &fused_loop));
@ -5712,7 +5669,6 @@ TEST(LoopNest, fuseLoopsWithExprBounds) {
auto forJ = For::make(j, 0, M + N, Store::make(a_buf, {j}, Mul::make(10, j)));
auto forK = For::make(k, 0, M + N, Store::make(b_buf, {j}, Mul::make(20, k)));
auto par = Block::make({forJ, forK});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr fused_loop;
ASSERT_TRUE(LoopNest::fuseLoops({forJ, forK}, &fused_loop));
@ -5749,7 +5705,6 @@ TEST(LoopNest, fuseLoopsWithDifferentExprBounds) {
// NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks,cppcoreguidelines-avoid-magic-numbers)
auto forK = For::make(k, M, N + N, Store::make(b_buf, {j}, Mul::make(20, k)));
auto par = Block::make({forJ, forK});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr fused_loop;
ASSERT_TRUE(LoopNest::fuseLoops({forJ, forK}, &fused_loop));
@ -5784,7 +5739,6 @@ TEST(LoopNest, fuseLoopsWithNonOverlappingBufferAccesses) {
For::make(k, 10, 100, Store::make(a_buf, {k + 100}, Mul::make(30, k)));
auto par = Block::make({forJ, forK});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr fused_loop;
ASSERT_TRUE(LoopNest::fuseLoops({forJ, forK}, &fused_loop));
@ -5830,7 +5784,6 @@ TEST(LoopNest, fuseLoopsWithNonOverlapping2DBufferAccesses) {
auto forM = For::make(m, 0, 20, forN);
auto par = Block::make({forI, forM});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr fused_loop;
ASSERT_TRUE(LoopNest::fuseLoops({forI, forM}, &fused_loop));
@ -5876,7 +5829,6 @@ TEST(LoopNest, fuseLoopsWithReductions) {
auto forM =
For::make(m, 0, 20, Store::make(c_buf, {m}, Load::make(a_buf, {m})));
auto par = Block::make({forI, forM});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr fused_loop;
ASSERT_TRUE(LoopNest::fuseLoops({forI, forM}, &fused_loop));
@ -5932,7 +5884,6 @@ TEST(LoopNest, fuseLoopsWith2DReductions) {
auto forM = For::make(m, 0, 20, For::make(n, 0, 40, storeC));
auto par = Block::make({forI, forM});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr fused_loop;
ASSERT_TRUE(LoopNest::fuseLoops({forI, forM}, &fused_loop));
@ -5980,7 +5931,6 @@ TEST(LoopNest, fuseLoopsWithComplexIndices) {
auto forM = For::make(m, 0, 20, For::make(n, 0, 20, storeB));
auto par = Block::make({forI, forM});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr fused_loop;
ASSERT_TRUE(LoopNest::fuseLoops({forI, forM}, &fused_loop));
@ -6025,7 +5975,6 @@ TEST(LoopNest, fuseLoopsWithMixedLoopVarsAsIndices) {
auto forM = For::make(m, 0, 20, For::make(n, 0, 20, storeB));
auto par = Block::make({forI, forM});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr fused_loop;
ASSERT_FALSE(LoopNest::fuseLoops({forI, forM}, &fused_loop));
}
@ -6054,7 +6003,6 @@ TEST(LoopNest, fuseLoopsWithTranspose) {
auto forM = For::make(m, 0, 20, For::make(n, 0, 20, storeB));
auto par = Block::make({forI, forM});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr fused_loop;
ASSERT_FALSE(LoopNest::fuseLoops({forI, forM}, &fused_loop));
}
@ -6075,7 +6023,6 @@ TEST(LoopNest, fuseLoopsThatViolateDependencies1) {
For::make(k, 10, 100, Store::make(a_buf, {k - 1}, Mul::make(20, k)));
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
auto par = Block::make({forJ, forK});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr fused_loop;
ASSERT_FALSE(LoopNest::fuseLoops({forJ, forK}, &fused_loop));
}
@ -6096,7 +6043,6 @@ TEST(LoopNest, fuseLoopsThatViolateDependencies2) {
For::make(k, 10, 100, Store::make(a_buf, {k + 50}, Mul::make(20, k)));
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
auto par = Block::make({forJ, forK});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr fused_loop;
ASSERT_FALSE(LoopNest::fuseLoops({forJ, forK}, &fused_loop));
}
@ -6139,7 +6085,6 @@ TEST(LoopNest, fuseLoopsThatViolateDependencies3) {
auto forN = For::make(n, 0, 20, Block::make({initB, forK}));
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
auto par = Block::make({forM, forN});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr fused_loop;
ASSERT_FALSE(LoopNest::fuseLoops({forM, forN}, &fused_loop));
}
@ -6181,7 +6126,6 @@ TEST(LoopNest, fuseLoopsThatViolateDependencies4) {
Store::make(a_buf, {m + 1, n}, Add::make(m, Mul::make(n, 100)))));
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
auto par = Block::make({forI, forM});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr fused_loop;
ASSERT_FALSE(LoopNest::fuseLoops({forI, forM}, &fused_loop));
}
@ -6209,7 +6153,6 @@ TEST(LoopNest, fuseLoopsThatViolateDependencies5) {
Store::make(a_buf, {i, n + 1}, Add::make(i, Mul::make(n, 100))));
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores,cppcoreguidelines-avoid-magic-numbers)
auto forI = For::make(i, 0, 20, Block::make({forJ, forN}));
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr fused_loop;
ASSERT_FALSE(LoopNest::fuseLoops({forJ, forN}, &fused_loop));
}
@ -6235,7 +6178,6 @@ TEST(LoopNest, fuseLoopsThatViolateDependencies6) {
b_buf, {k}, Mul::make(20, Load::make(a_buf, {ExprHandle(99) - k}))));
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
auto par = Block::make({forJ, forK});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr fused_loop;
ASSERT_FALSE(LoopNest::fuseLoops({forJ, forK}, &fused_loop));
}
@ -6261,7 +6203,6 @@ TEST(LoopNest, fuseLoopsThatViolateDependencies7) {
auto forJ = For::make(j, 0, 100, Store::make(a_buf, {j}, Mul::make(10, j)));
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
auto par = Block::make({forK, forJ});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr fused_loop;
ASSERT_FALSE(LoopNest::fuseLoops({forK, forJ}, &fused_loop));
}

View File

@ -1066,7 +1066,6 @@ TEST(Reductions, ReduceOverSplitRfactor) {
Tensor c = Reduce("sum", {}, Sum(), b, {N, K});
LoopNest loop({c});
std::vector<ForPtr> loops = loop.getLoopStmtsFor(c);
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr i, t;
LoopNest::splitWithTail(loops[1], SPLIT_FACTOR, &i, &t);
LoopNest::reorderAxis(loops[0], i);
@ -1573,7 +1572,6 @@ TEST(Reductions, ReductionSplitCacheConsumerAccess) {
LoopNest l({e}, {c, d, e});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr inner;
// Split outer reduction axis.
@ -1623,7 +1621,6 @@ TEST(Reductions, ReductionReorderCacheConsumerAccess) {
LoopNest l({e}, {c, d, e});
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ForPtr inner;
// reorder outer reduction axes.
@ -1678,7 +1675,6 @@ TEST(Reductions, ReductionRfactorCacheTempOuter) {
LoopNest::reorderAxis(loops.at(0), loops.at(1));
loops = loop.getLoopStmtsFor(c);
auto c_body = loop.getAllWritesToBuf(c.buf())[1];
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
BufPtr rfac_buf;
ASSERT_TRUE(loop.rfactor(c_body, loops.at(0), &rfac_buf));
loop.distributeLoop(loops.at(0));
@ -1744,7 +1740,6 @@ TEST(Reductions, ReductionRfactorCacheTempInner) {
LoopNest::reorderAxis(loops.at(0), loops.at(1));
loops = loop.getLoopStmtsFor(c);
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
BufPtr rfac_buf;
ASSERT_TRUE(loop.rfactor(c_body, loops.at(0), &rfac_buf));
loop.distributeLoop(loops.at(0));

View File

@ -331,8 +331,7 @@ static PyObject* THPStorage_shareCuda(PyObject* self, PyObject* noargs) {
_ref_counter = PyBytes_FromString((sent_data->handle()).c_str());
_ref_counter_offset = THPUtils_packUInt64(sent_data->offset());
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
cudaIpcEventHandle_t ipc_event_handle;
cudaIpcEventHandle_t ipc_event_handle{};
if (sent_data->event_sync_required_) {
C10_CUDA_CHECK(

View File

@ -92,8 +92,7 @@ static PyObject* THCPEvent_from_ipc_handle(
}
THCPEvent* self = (THCPEvent*)ptr.get();
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
cudaIpcEventHandle_t handle;
cudaIpcEventHandle_t handle{};
std::memcpy(&handle, handle_string.c_str(), handle_string.size());
new (&self->cuda_event) at::cuda::CUDAEvent(device.index(), &handle);
@ -175,8 +174,7 @@ static PyObject* THCPEvent_synchronize(PyObject* _self, PyObject* noargs) {
static PyObject* THCPEvent_ipc_handle(PyObject* _self, PyObject* noargs) {
HANDLE_TH_ERRORS
auto self = (THCPEvent*)_self;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
cudaIpcEventHandle_t handle;
cudaIpcEventHandle_t handle{};
self->cuda_event.ipc_handle(&handle);
return PyBytes_FromStringAndSize((const char*)&handle, sizeof(handle));
END_HANDLE_TH_ERRORS

View File

@ -91,8 +91,7 @@ void gds_deregister_buffer(const at::Storage& storage) {
int64_t gds_register_handle(int fd) {
CUfileDescr_t cf_descr;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
CUfileHandle_t cf_handle;
CUfileHandle_t cf_handle{};
memset((void*)&cf_descr, 0, sizeof(CUfileDescr_t));
cf_descr.handle.fd = fd;
cf_descr.type = CU_FILE_HANDLE_TYPE_OPAQUE_FD;

View File

@ -27,8 +27,7 @@ static void start_manager() {
std::array<int, 2> pipe_ends;
SYSCHECK_ERR_RETURN_NEG1(pipe(pipe_ends.data()));
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
pid_t pid;
pid_t pid = -1;
SYSCHECK_ERR_RETURN_NEG1(pid = fork());
if (!pid) {
SYSCHECK_ERR_RETURN_NEG1(close(pipe_ends[0]));
@ -99,8 +98,7 @@ THManagedMapAllocatorInit::THManagedMapAllocatorInit(
: manager_handle_(manager_handle ? manager_handle : "") {
// TODO: unlock GIL when contacting the manager
try {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
ClientSocket* socket;
ClientSocket* socket = nullptr;
if (!manager_handle_.empty()) {
socket = &get_manager_socket(manager_handle_);
} else {