mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
turn on -Werror=type-limits in our Bazel CPU build
Summary: We also fix any existing issues. Test Plan: Built locally, rely on CI to confirm. Reviewers: malfet Subscribers: Tasks: Tags: Pull Request resolved: https://github.com/pytorch/pytorch/pull/79139 Approved by: https://github.com/seemethere, https://github.com/osalpekar, https://github.com/albanD
This commit is contained in:
committed by
PyTorch MergeBot
parent
28c541776c
commit
f96d96a7fc
21
.bazelrc
21
.bazelrc
@ -30,3 +30,24 @@ build:cpu-only --platform_suffix=-cpu-only
|
||||
build --copt=-isystem --copt=bazel-out/k8-fastbuild-cpu-only/bin
|
||||
# rules_cuda configuration
|
||||
build:cpu-only --@rules_cuda//cuda:enable_cuda=False
|
||||
|
||||
# Set additional warnings to error level.
|
||||
#
|
||||
# Implementation notes:
|
||||
# * we use file extensions to determine if we are using the C++
|
||||
# compiler or the cuda compiler
|
||||
# * we use ^// at the start of the regex to only permit matching
|
||||
# PyTorch files. This excludes external repos.
|
||||
#
|
||||
# Note that because this is logically a command-line flag, it is
|
||||
# considered the word on what warnings are enabled. This has the
|
||||
# unfortunate consequence of preventing us from disabling an error at
|
||||
# the target level because those flags will come before these flags in
|
||||
# the action invocation. Instead we provide per-file exceptions after
|
||||
# this.
|
||||
#
|
||||
# On the bright side, this means we don't have to more broadly apply
|
||||
# the exceptions to an entire target.
|
||||
build \
|
||||
--per_file_copt='^//.*\.(cpp|cc)$'@-Werror=type-limits \
|
||||
--per_file_copt=^//.*\.cu$@--compiler-options=-Werror=type-limits
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <ATen/native/cpu/Loops.h>
|
||||
#include <ATen/native/Math.h>
|
||||
#include <c10/macros/Macros.h>
|
||||
#include <c10/util/TypeSafeSignMath.h>
|
||||
#include <c10/util/copysign.h>
|
||||
|
||||
namespace at {
|
||||
@ -133,7 +134,7 @@ void div_floor_kernel(TensorIteratorBase& iter) {
|
||||
AT_DISPATCH_INTEGRAL_TYPES(dtype, "div_floor_cpu", [&]() {
|
||||
cpu_kernel(iter, [](scalar_t a, scalar_t b) -> scalar_t {
|
||||
TORCH_CHECK(b != 0, "ZeroDivisionError");
|
||||
if ((a < 0) != (b < 0)) {
|
||||
if (c10::is_negative(a) != c10::is_negative(b)) {
|
||||
// Subtracts one from the results of truncation division if the
|
||||
// divisor and dividend have different sign(bit)s and the remainder of
|
||||
// the division is nonzero
|
||||
@ -198,7 +199,7 @@ void remainder_kernel(TensorIteratorBase& iter) {
|
||||
cpu_kernel(iter, [](scalar_t a, scalar_t b) -> scalar_t {
|
||||
TORCH_CHECK(b != 0, "ZeroDivisionError");
|
||||
scalar_t r = a % b;
|
||||
if ((r != 0) && ((r < 0) != (b < 0))) {
|
||||
if ((r != 0) && (c10::is_negative(r) != c10::is_negative(b))) {
|
||||
r += b;
|
||||
}
|
||||
return r;
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <c10/util/math_compat.h>
|
||||
#include <c10/util/MathConstants.h>
|
||||
#include <c10/core/Scalar.h>
|
||||
#include <c10/util/TypeSafeSignMath.h>
|
||||
#include <c10/util/irange.h>
|
||||
|
||||
#if AT_MKL_ENABLED()
|
||||
@ -284,7 +285,7 @@ void sign_kernel(TensorIteratorBase& iter){
|
||||
|
||||
cpu_kernel_vec(
|
||||
iter,
|
||||
[=](scalar_t a) -> scalar_t { return (0 < a) - (a < 0); },
|
||||
[=](scalar_t a) -> scalar_t { return (0 < a) - c10::is_negative(a); },
|
||||
[=](Vectorized<scalar_t> self_vec){
|
||||
|
||||
// Comparison operators returns bitmask.
|
||||
@ -301,7 +302,7 @@ static void signbit_kernel(TensorIteratorBase& iter){
|
||||
// NOTE: signbit does not always support integral arguments.
|
||||
if (at::isIntegralType(iter.input_dtype(), /*includeBool=*/false)) {
|
||||
AT_DISPATCH_INTEGRAL_TYPES(iter.input_dtype(), "signbit_cpu", [&]() {
|
||||
cpu_kernel(iter, [](scalar_t a) -> bool { return a < 0; }); });
|
||||
cpu_kernel(iter, [](scalar_t a) -> bool { return c10::is_negative(a); }); });
|
||||
} else {
|
||||
AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, ScalarType::Half, iter.input_dtype(), "signbit_cpu", [&]() {
|
||||
using opmath_t = at::opmath_type<scalar_t>;
|
||||
|
@ -51,7 +51,7 @@ void ProfDAGCounters::AddPerOpStartTime(size_t op_id) {
|
||||
return;
|
||||
}
|
||||
|
||||
CAFFE_ENFORCE(op_id >= 0 && op_id < op_start_times_run_.size());
|
||||
CAFFE_ENFORCE(op_id < op_start_times_run_.size());
|
||||
op_start_times_run_[op_id] = timer_.MilliSeconds();
|
||||
}
|
||||
|
||||
@ -60,7 +60,7 @@ void ProfDAGCounters::AddPerOpEndTime(size_t op_id) {
|
||||
return;
|
||||
}
|
||||
|
||||
CAFFE_ENFORCE(op_id >= 0 && op_id < op_end_times_run_.size());
|
||||
CAFFE_ENFORCE(op_id < op_end_times_run_.size());
|
||||
op_end_times_run_[op_id] = timer_.MilliSeconds();
|
||||
}
|
||||
|
||||
@ -69,7 +69,7 @@ void ProfDAGCounters::AddPerOpAsyncEndTime(size_t op_id) {
|
||||
return;
|
||||
}
|
||||
|
||||
CAFFE_ENFORCE(op_id >= 0 && op_id < op_async_end_times_run_.size());
|
||||
CAFFE_ENFORCE(op_id < op_async_end_times_run_.size());
|
||||
op_async_end_times_run_[op_id] = timer_.MilliSeconds();
|
||||
}
|
||||
|
||||
|
@ -563,8 +563,7 @@ TEST(LiteInterpreterTest, GetContainTypes) {
|
||||
std::stringstream ss;
|
||||
m._save_for_mobile(ss, {}, true);
|
||||
|
||||
auto contained_types = _get_mobile_model_contained_types(ss);
|
||||
AT_ASSERT(contained_types.size() >= 0);
|
||||
_get_mobile_model_contained_types(ss);
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
@ -384,19 +384,16 @@ TEST(LiteTrainerTest, RandomSamplerReturnsIndicesInCorrectRange) {
|
||||
|
||||
std::vector<size_t> indices = sampler.next(3).value();
|
||||
for (auto i : indices) {
|
||||
AT_ASSERT(i >= 0);
|
||||
AT_ASSERT(i < 10);
|
||||
}
|
||||
|
||||
indices = sampler.next(5).value();
|
||||
for (auto i : indices) {
|
||||
AT_ASSERT(i >= 0);
|
||||
AT_ASSERT(i < 10);
|
||||
}
|
||||
|
||||
indices = sampler.next(2).value();
|
||||
for (auto i : indices) {
|
||||
AT_ASSERT(i >= 0);
|
||||
AT_ASSERT(i < 10);
|
||||
}
|
||||
|
||||
|
@ -492,8 +492,8 @@ std::vector<at::IValue> readWrappedPayload(
|
||||
// Read the additional payload remove it from the payload.
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int64_t additionalPayloadSize;
|
||||
TORCH_INTERNAL_ASSERT(payload.size() >= sizeof(int64_t));
|
||||
size_t indexToRead = payload.size() - sizeof(int64_t);
|
||||
TORCH_INTERNAL_ASSERT(indexToRead >= 0);
|
||||
torch::utils::THP_decodeInt64Buffer(
|
||||
&additionalPayloadSize,
|
||||
reinterpret_cast<uint8_t*>(payload.data()) + indexToRead,
|
||||
|
@ -905,7 +905,7 @@ ComputeAt::ComputeAt(
|
||||
" producer: ",
|
||||
producer_);
|
||||
TORCH_INTERNAL_ASSERT(
|
||||
reference_position_ >= 0 && reference_position_ <= reference_->nDims(),
|
||||
reference_position_ <= reference_->nDims(),
|
||||
"Invalid computeAt axis, received ",
|
||||
reference_position_,
|
||||
" but should be > -",
|
||||
|
@ -186,7 +186,7 @@ void checkDimSize(
|
||||
ExpressionEvaluator const_eval(tv->fusion());
|
||||
for (auto axis_index : c10::irange(axis.size())) {
|
||||
TORCH_INTERNAL_ASSERT(
|
||||
((axis[axis_index] + tv->nDims()) >= 0) &&
|
||||
((axis[axis_index] + static_cast<int>(tv->nDims())) >= 0) &&
|
||||
(axis[axis_index] < (int)tv->nDims()),
|
||||
"CheckDimSize: axis position out of bound ",
|
||||
axis[axis_index],
|
||||
|
@ -115,7 +115,7 @@ class MergeTransform final : public ViewTransform {
|
||||
const std::vector<IterDomain*>& new_root_domain,
|
||||
std::vector<IterDomain*>& rfactor_domain) override {
|
||||
TORCH_INTERNAL_ASSERT(
|
||||
index_ >= 0 && (index_ + 1) < new_root_domain.size(),
|
||||
(index_ + 1) < new_root_domain.size(),
|
||||
"Index: \t",
|
||||
index_,
|
||||
"\t Domain Size:\t",
|
||||
@ -174,7 +174,7 @@ class SplitTransform final : public ViewTransform {
|
||||
const std::vector<IterDomain*>& new_root_domain,
|
||||
std::vector<IterDomain*>& rfactor_domain) override {
|
||||
TORCH_INTERNAL_ASSERT(
|
||||
index_ >= 0 && index_ < new_root_domain.size(),
|
||||
index_ < new_root_domain.size(),
|
||||
"Index: \t",
|
||||
index_,
|
||||
"\t Domain Size:\t",
|
||||
@ -237,7 +237,7 @@ class KeepTransform final : public ViewTransform {
|
||||
const std::vector<IterDomain*>& new_root_domain,
|
||||
std::vector<IterDomain*>& rfactor_domain) override {
|
||||
TORCH_INTERNAL_ASSERT(
|
||||
index_ >= 0 && index_ < new_root_domain.size(),
|
||||
index_ < new_root_domain.size(),
|
||||
"Index: \t",
|
||||
index_,
|
||||
"\t Domain Size:\t",
|
||||
|
@ -211,13 +211,13 @@ c10::optional<std::function<void(Stack&)>> makeOperatorFunction(
|
||||
out_args.push_back(stack.back());
|
||||
stack.pop_back();
|
||||
}
|
||||
size_t start_index = num_specified_args.value() - out_args.size();
|
||||
TORCH_CHECK(
|
||||
start_index >= 0,
|
||||
num_specified_args.value() >= out_args.size(),
|
||||
"The number of output arguments is: ",
|
||||
out_args.size(),
|
||||
", which is more then the number of specified arguments: ",
|
||||
num_specified_args.value());
|
||||
size_t start_index = num_specified_args.value() - out_args.size();
|
||||
for (size_t i = start_index; i < (args.size() - out_args.size()); ++i) {
|
||||
TORCH_CHECK(
|
||||
args[i].default_value().has_value(),
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <torch/csrc/jit/api/function_impl.h>
|
||||
#include <torch/csrc/jit/serialization/pickler.h>
|
||||
#include <string>
|
||||
#include <type_traits>
|
||||
|
||||
namespace torch {
|
||||
namespace jit {
|
||||
@ -595,18 +596,19 @@ void Pickler::pushDict(const IValue& ivalue) {
|
||||
|
||||
push<PickleOpCode>(PickleOpCode::EMPTY_DICT);
|
||||
|
||||
if (dict.size() >= 0) {
|
||||
push<PickleOpCode>(PickleOpCode::MARK);
|
||||
static_assert(
|
||||
std::is_unsigned<decltype(dict.size())>::value,
|
||||
"Expected size to be non-negative.");
|
||||
push<PickleOpCode>(PickleOpCode::MARK);
|
||||
|
||||
// Sort the dict for deterministic keys
|
||||
for (const auto& entry : dict) {
|
||||
pushIValue(entry.key());
|
||||
pushIValue(entry.value());
|
||||
}
|
||||
|
||||
push<PickleOpCode>(PickleOpCode::SETITEMS);
|
||||
// Sort the dict for deterministic keys
|
||||
for (const auto& entry : dict) {
|
||||
pushIValue(entry.key());
|
||||
pushIValue(entry.value());
|
||||
}
|
||||
|
||||
push<PickleOpCode>(PickleOpCode::SETITEMS);
|
||||
|
||||
endTypeTag(ivalue);
|
||||
}
|
||||
|
||||
|
@ -574,9 +574,6 @@ void loopnestRandomization(int64_t seed, LoopNest& l) {
|
||||
|
||||
case COMPRESS_BUFFER: {
|
||||
auto buffers = NodeFinder<Buf>::find(l.root_stmt());
|
||||
if (buffers.size() < 0) {
|
||||
break;
|
||||
}
|
||||
int buffer_n = std::rand() % (int)buffers.size();
|
||||
auto buffer = buffers[buffer_n];
|
||||
|
||||
@ -589,9 +586,6 @@ void loopnestRandomization(int64_t seed, LoopNest& l) {
|
||||
|
||||
case COMPRESS_ALL_BUFFERS: {
|
||||
auto buffers = BufFinder::find(l.root_stmt());
|
||||
if (buffers.size() < 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
message = "compressAllBuffers(l.root_stmt());\n";
|
||||
randomization_helper::printHistory(n_transform, message);
|
||||
|
Reference in New Issue
Block a user