C10_UNUSED to [[maybe_unused]] (#6357) (#138364)

Summary: Pull Request resolved: https://github.com/pytorch/executorch/pull/6357

Pull Request resolved: https://github.com/pytorch/pytorch/pull/138364
Approved by: https://github.com/Skylion007, https://github.com/eqy
This commit is contained in:
Richard Barnes
2024-10-19 13:17:43 +00:00
committed by PyTorch MergeBot
parent 2f6a70bfea
commit fddabc6e0b
139 changed files with 834 additions and 690 deletions

View File

@ -2220,7 +2220,7 @@ TEST(DataLoaderTest, ChunkDatasetCrossChunkShuffle) {
for (const auto i : c10::irange(
(chunk_count + cross_chunk_shuffle_count - 1) /
cross_chunk_shuffle_count)) {
for (C10_UNUSED const auto j : c10::irange(chunk_size)) {
for ([[maybe_unused]] const auto j : c10::irange(chunk_size)) {
for (const auto k : c10::irange(cross_chunk_shuffle_count)) {
if (i * cross_chunk_shuffle_count + k < chunk_count) {
expected_result.push_back(i * cross_chunk_shuffle_count + k);

View File

@ -1343,7 +1343,7 @@ TEST_F(FunctionalTest, GumbelSoftmax) {
auto counts = torch::zeros_like(logits);
torch::Tensor y_draw;
for (C10_UNUSED const auto i : c10::irange(num_draws)) {
for ([[maybe_unused]] const auto i : c10::irange(num_draws)) {
y_draw =
F::gumbel_softmax(logits, F::GumbelSoftmaxFuncOptions().hard(true));
counts += y_draw;

View File

@ -123,7 +123,7 @@ bool test_mnist(
torch::Device device(with_cuda ? torch::kCUDA : torch::kCPU);
model->to(device);
for (C10_UNUSED const auto epoch : c10::irange(number_of_epochs)) {
for ([[maybe_unused]] const auto epoch : c10::irange(number_of_epochs)) {
// NOLINTNEXTLINE(performance-for-range-copy)
for (torch::data::Example<> batch : *data_loader) {
auto data = batch.data.to(device);

View File

@ -3511,7 +3511,7 @@ void _multihead_attn_test_helper(
std::uniform_int_distribution<int> d_2_10(2, 10);
std::uniform_int_distribution<int> d_3_10(3, 10);
bool registration_checked = false;
for (C10_UNUSED const auto i : c10::irange(100)) {
for ([[maybe_unused]] const auto i : c10::irange(100)) {
const auto batch_sz = d_2_10(generator);
const auto seq_len = d_2_10(generator);
const auto d_head = d_3_10(generator);

View File

@ -398,7 +398,8 @@ std::vector<torch::Tensor> PackedSequenceTest_ordered_sequence(
torch::ScalarType tensor_type) {
std::vector<torch::Tensor> seqs;
seqs.reserve(PackedSequenceTest_batch_size);
for (C10_UNUSED const auto i : c10::irange(PackedSequenceTest_batch_size)) {
for ([[maybe_unused]] const auto i :
c10::irange(PackedSequenceTest_batch_size)) {
seqs.emplace_back(torch::empty(
{torch::randint(1, PackedSequenceTest_max_length, {1}).item<int64_t>()},
tensor_type));

View File

@ -12,7 +12,7 @@ struct OperationTest : torch::test::SeedingFixture {
};
TEST_F(OperationTest, Lerp) {
for (C10_UNUSED const auto i : c10::irange(TEST_AMOUNT)) {
for ([[maybe_unused]] const auto i : c10::irange(TEST_AMOUNT)) {
// test lerp_kernel_scalar
auto start = torch::rand({3, 5});
auto end = torch::rand({3, 5});
@ -36,7 +36,7 @@ TEST_F(OperationTest, Lerp) {
}
TEST_F(OperationTest, Cross) {
for (C10_UNUSED const auto i : c10::irange(TEST_AMOUNT)) {
for ([[maybe_unused]] const auto i : c10::irange(TEST_AMOUNT)) {
// input
auto a = torch::rand({10, 3});
auto b = torch::rand({10, 3});

View File

@ -157,7 +157,7 @@ void check_exact_values(
TEST(OptimTest, OptimizerAccessors) {
auto options = AdagradOptions(1.0);
std::vector<torch::Tensor> params;
for (C10_UNUSED const auto i : c10::irange(3)) {
for ([[maybe_unused]] const auto i : c10::irange(3)) {
params.push_back(torch::randn(10));
}
auto optimizer = Adagrad(params, options);