Fix sign-compare violations in cpp tests

Prerequisite change for enabling `-Werror=sign-compare` across PyTorch repo

Pull Request resolved: https://github.com/pytorch/pytorch/pull/75080

Approved by: https://github.com/atalman
This commit is contained in:
Nikita Shulga
2022-04-01 19:29:33 -07:00
committed by PyTorch MergeBot
parent ef56497ea0
commit 81d765ef1f
14 changed files with 24 additions and 29 deletions

View File

@ -65,7 +65,7 @@ void showRtol(const at::Tensor& a, const at::Tensor& b) {
}
static void gen_allpermutations(std::vector<std::vector<int64_t>>& out, std::vector<int64_t> in, int i) {
static void gen_allpermutations(std::vector<std::vector<int64_t>>& out, std::vector<int64_t> in, unsigned i) {
// generate all permutations of a given dims
if (i == in.size()) {
out.push_back(in);

View File

@ -1982,7 +1982,7 @@ TEST(DataLoaderTest, ChunkDatasetSave) {
for (const auto epoch_index : c10::irange(epoch_count)) {
(void)epoch_index; // Suppress unused variable warning
int iteration_count = 0;
unsigned iteration_count = 0;
for (auto iterator = data_loader->begin(); iterator != data_loader->end();
++iterator, ++iteration_count) {
if ((iteration_count + 1) % save_interval == 0) {
@ -2316,7 +2316,7 @@ TEST(DataLoaderTest, CustomPreprocessPolicy) {
++iterator) {
auto batch_result = *iterator;
if (batch_result.size() > chunk_size * cross_chunk_shuffle_count) {
for (int i = 0; i < batch_result.size(); i += chunk_size) {
for (unsigned i = 0; i < batch_result.size(); i += chunk_size) {
ASSERT_TRUE(std::is_sorted(
batch_result.begin() + i,
batch_result.begin() + i + chunk_size));

View File

@ -19,7 +19,7 @@ void check_exact_values(
auto layerParameters = parameters[i];
auto expectedLayerParameters = expected_parameters[i];
if (layerParameters.size(0) != expectedLayerParameters.size()) {
if (static_cast<size_t>(layerParameters.size(0)) != expectedLayerParameters.size()) {
std::cout << "layer #" << i
<< " layerParameters size: " << layerParameters.size(0)
<< " != "

View File

@ -615,7 +615,7 @@ TEST_F(NNUtilsTest, PackPaddedSequence) {
}
int64_t offset = 0;
std::vector<torch::Tensor> tensors_to_be_cat;
for (int64_t i = 1; i < sorted_lengths.size() + 1; i++) {
for (int64_t i = 1; i < static_cast<int64_t>(sorted_lengths.size() + 1); i++) {
int64_t l = sorted_lengths.at(i-1);
tensors_to_be_cat.emplace_back(pad(i * 100 + torch::arange(1., 5 * l + 1).view({l, 1, 5}), max_length));
}

View File

@ -105,7 +105,7 @@ TEST_F(ParameterDictTest, Values) {
auto dict = torch::nn::ParameterDict(params);
std::vector<torch::Tensor> values = dict->values();
std::vector<torch::Tensor> true_values{ta, tb, tc};
for (auto i = 0; i < values.size(); i += 1) {
for (auto i = 0U; i < values.size(); i += 1) {
ASSERT_TRUE(torch::all(torch::eq(values[i], true_values[i])).item<bool>());
}
}

View File

@ -129,7 +129,7 @@ void test_serialize_optimizer(DerivedOptimizerOptions options, bool only_has_glo
// optim3_2 and optim1 should have param_groups and state of size 1 and state_size respectively
ASSERT_TRUE(optim3_2_param_groups.size() == 1);
// state_size = 2 for all optimizers except LBFGS as LBFGS only maintains one global state
int state_size = only_has_global_state ? 1 : 2;
unsigned state_size = only_has_global_state ? 1 : 2;
ASSERT_TRUE(optim3_2_state.size() == state_size);
// optim3_2 and optim1 should have param_groups and state of same size

View File

@ -62,7 +62,7 @@ void assert_ordering(
ASSERT_EQ(expected.size(), actual.size())
<< "Got " << actual.size() << " elements (" << actual << ")"
<< " expected " << expected.size() << " elements (" << expected << ")";
for (int i = 0; i < expected.size(); i++) {
for (unsigned i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], actual[i])
<< "Difference at index " << i << " in " << actual << " (expected "
<< actual << ")";

View File

@ -599,7 +599,7 @@ void runAndCheckTorchScriptModel(
std::stringstream& input_model_stream,
const std::vector<IValue>& input_data,
const std::vector<IValue>& expect_result_list,
const int64_t expect_version) {
const uint64_t expect_version) {
auto actual_version = _get_model_bytecode_version(input_model_stream);
AT_ASSERT(actual_version == expect_version);
@ -616,7 +616,7 @@ void runAndCheckBytecodeModel(
std::stringstream& input_model_stream,
const std::vector<IValue>& input_data,
const std::vector<IValue>& expect_result_list,
const int64_t expect_version) {
const uint64_t expect_version) {
auto actual_version = _get_model_bytecode_version(input_model_stream);
AT_ASSERT(actual_version == expect_version);
@ -634,13 +634,14 @@ void backportAllVersionCheck(
std::stringstream& test_model_file_stream,
std::vector<IValue>& input_data,
std::vector<IValue>& expect_result_list,
const int64_t expect_from_version) {
const uint64_t expect_from_version) {
auto from_version = _get_model_bytecode_version(test_model_file_stream);
AT_ASSERT(from_version == expect_from_version);
AT_ASSERT(from_version > 0);
// Backport script_module_v5.ptl to an older version
constexpr int64_t minimum_to_version = 4;
int64_t current_to_version = from_version - 1;
auto current_to_version = from_version - 1;
// Verify all candidate to_version work as expected. All backport to version
// larger than minimum_to_version should success.

View File

@ -17,21 +17,15 @@ static inline void trim(std::string& s) {
[](unsigned char ch) { return !std::isspace(ch); })
.base(),
s.end());
for (int64_t i = 0; i < s.size(); ++i) {
if (s[i] == '\n') {
for (size_t i = 0; i < s.size(); ++i) {
while (i < s.size() && s[i] == '\n') {
s.erase(i, 1);
i--;
}
}
for (int64_t i = 0; i < s.size(); ++i) {
for (size_t i = 0; i < s.size(); ++i) {
if (s[i] == ' ') {
for (int64_t j = i + 1; j < s.size(); j++) {
if (s[j] == ' ') {
s.erase(j, 1);
j--;
} else {
break;
}
while (i + 1 < s.size() && s[i + 1] == ' ') {
s.erase(i + 1, 1);
}
}
}

View File

@ -4132,7 +4132,7 @@ TEST_F(LazyOpsTest, TestDropoutInPlace) {
}
TEST_F(LazyOpsTest, TestRandperm) {
int n = 5;
unsigned n = 5;
torch::Tensor shuffle = torch::randperm(
n, torch::TensorOptions(torch::kLong).device(torch::kLazy));
torch::Tensor shuffle_cpu = CopyToDevice(shuffle, torch::kCPU);

View File

@ -165,7 +165,7 @@ void TestBackward(
// Check grad of sum(outs) w.r.t inputs_w_grad.
torch::Tensor sum = torch::zeros_like(outs[0]).sum();
torch::Tensor xsum = torch::zeros_like(xouts[0]).sum();
for (int i = 0; i < outs.size(); ++i) {
for (size_t i = 0; i < outs.size(); ++i) {
if (outs[i].requires_grad()) {
sum += outs[i].sum();
xsum += xouts[i].sum();

View File

@ -78,7 +78,7 @@ static void assertAllEqual(const std::vector<T>& vec, const T& val) {
template <typename T>
static void assertAllEqual(const std::vector<T>& v1, const std::vector<T>& v2) {
ASSERT_EQ(v1.size(), v2.size());
for (int i = 0; i < v1.size(); i++) {
for (size_t i = 0; i < v1.size(); ++i) {
ASSERT_EQ(v1[i], v2[i]);
}
}

View File

@ -274,7 +274,7 @@ TEST(MemDependency, BoundSubtractMultiDim) {
if (x.size() != y.size()) {
return false;
}
for (auto i = 0; i < x.size(); ++i) {
for (auto i = 0U; i < x.size(); ++i) {
if (!indexBoundsEquals(x[i], y[i])) {
return false;
}
@ -338,7 +338,7 @@ TEST(MemDependency, BoundSubtractMultiDimSymbolic) {
if (x.size() != y.size()) {
return false;
}
for (auto i = 0; i < x.size(); ++i) {
for (auto i = 0U; i < x.size(); ++i) {
if (!indexBoundsEquals(x[i], y[i])) {
return false;
}

View File

@ -24,7 +24,7 @@ TEST(Ops, Sum) {
constexpr int N = 16;
std::vector<IntList> testDims = {{0}, {1}, {0, 1}};
std::vector<std::vector<ExprHandle>> outputShapes = {{N}, {M}, {}};
for (int idx = 0; idx < testDims.size(); idx++) {
for (unsigned idx = 0; idx < testDims.size(); idx++) {
const auto& dims = testDims[idx];
const auto& outShape = outputShapes[idx];