mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 12:54:11 +08:00
Enable bugprone-unchecked-optional-access (#144226)
We can actually enable bugprone-unchecked-optional-access without the risk of hang. Pull Request resolved: https://github.com/pytorch/pytorch/pull/144226 Approved by: https://github.com/albanD
This commit is contained in:
@ -1,8 +1,9 @@
|
||||
---
|
||||
# NOTE there must be no spaces before the '-', so put the comma last.
|
||||
# The check bugprone-unchecked-optional-access is also turned off atm
|
||||
# because it causes clang-tidy to hang randomly. The tracking issue
|
||||
# The check bugprone-unchecked-optional-access is also turned on.
|
||||
# Note that it can cause clang-tidy to hang randomly. The tracking issue
|
||||
# can be found at https://github.com/llvm/llvm-project/issues/69369.
|
||||
# When that happens, we can disable it on the problematic code by NOLINT.
|
||||
InheritParentConfig: true
|
||||
Checks: '
|
||||
bugprone-*,
|
||||
@ -12,7 +13,6 @@ bugprone-*,
|
||||
-bugprone-lambda-function-name,
|
||||
-bugprone-reserved-identifier,
|
||||
-bugprone-swapped-arguments,
|
||||
-bugprone-unchecked-optional-access,
|
||||
clang-analyzer-core.*,
|
||||
clang-analyzer-cplusplus.*,
|
||||
clang-analyzer-nullability.*,
|
||||
|
@ -54,6 +54,7 @@ bool isAccelerator(c10::DeviceType device_type) {
|
||||
}
|
||||
}
|
||||
|
||||
// NOLINTBEGIN(bugprone-unchecked-optional-access)
|
||||
c10::DeviceIndex deviceCount() {
|
||||
const auto device_type = getAccelerator(false);
|
||||
if (!device_type.has_value()) {
|
||||
@ -99,5 +100,6 @@ void synchronizeDevice(c10::DeviceIndex device_index) {
|
||||
// impl.synchronizeDevice should can be safely called from any device
|
||||
impl.synchronizeDevice(device_index);
|
||||
}
|
||||
// NOLINTEND(bugprone-unchecked-optional-access)
|
||||
|
||||
} // namespace at::accelerator
|
||||
|
@ -656,10 +656,11 @@ struct TORCH_API TensorType : public SharedType {
|
||||
const auto& shape = sizes();
|
||||
|
||||
for (size_t i = 0; i < shape.size(); i++) {
|
||||
if (!shape[i].has_value()) {
|
||||
auto const &s = shape[i];
|
||||
if (!s.has_value()) {
|
||||
return std::optional<size_t>{};
|
||||
}
|
||||
prod *= shape[i].value();
|
||||
prod *= s.value();
|
||||
}
|
||||
return prod;
|
||||
}
|
||||
@ -727,10 +728,11 @@ struct TORCH_API TensorType : public SharedType {
|
||||
|
||||
TensorTypePtr contiguous() const {
|
||||
auto cloned = clone();
|
||||
TORCH_INTERNAL_ASSERT(sizes().concrete_sizes().has_value());
|
||||
auto concrete_sizes = sizes().concrete_sizes();
|
||||
TORCH_INTERNAL_ASSERT(concrete_sizes.has_value());
|
||||
auto strides = computeStrideProps(
|
||||
*sizes().concrete_sizes(),
|
||||
contiguousStridesOf(*sizes().concrete_sizes()));
|
||||
*concrete_sizes,
|
||||
contiguousStridesOf(*concrete_sizes));
|
||||
cloned->strides_ = strides;
|
||||
return cloned;
|
||||
}
|
||||
@ -1516,8 +1518,8 @@ struct TORCH_API FunctionType : public NamedType {
|
||||
FunctionType(torch::jit::Function* function);
|
||||
std::string annotation_str_impl(
|
||||
[[maybe_unused]] const TypePrinter& printer = nullptr) const override {
|
||||
const auto& n = name().value();
|
||||
return n.qualifiedName();
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
return name()->qualifiedName();
|
||||
}
|
||||
torch::jit::Function* function_;
|
||||
};
|
||||
@ -2133,6 +2135,7 @@ struct MatchTypeReturn {
|
||||
return !reason_.has_value();
|
||||
}
|
||||
const std::string& reason() const {
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
return reason_.value();
|
||||
}
|
||||
|
||||
@ -2181,6 +2184,7 @@ struct TORCH_API InterfaceType : public NamedType {
|
||||
}
|
||||
|
||||
std::string str() const override {
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
return std::string("InterfaceType<") + name()->name() + ">";
|
||||
}
|
||||
|
||||
@ -2208,6 +2212,7 @@ struct TORCH_API InterfaceType : public NamedType {
|
||||
|
||||
std::string annotation_str_impl(
|
||||
[[maybe_unused]] const TypePrinter& printer = nullptr) const override {
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
return name()->qualifiedName();
|
||||
}
|
||||
|
||||
|
@ -904,6 +904,7 @@ bool ListType::isSubtypeOfExt(const Type& rhs_, std::ostream* why_not) const {
|
||||
std::string TupleType::str() const {
|
||||
std::stringstream ss;
|
||||
if (schema_ && name().has_value()) {
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
ss << name()->qualifiedName();
|
||||
} else {
|
||||
ss << "(";
|
||||
|
@ -16,6 +16,7 @@
|
||||
// registered to FuncTorchVmapMode. This is because we need to interpose on
|
||||
// random operations even if they're not on a BatchedTensor.
|
||||
|
||||
// NOLINTBEGIN(bugprone-unchecked-optional-access)
|
||||
namespace at::functorch {
|
||||
|
||||
template <typename F, F Func, typename... ExtraArgs>
|
||||
@ -501,3 +502,4 @@ TORCH_LIBRARY_IMPL(aten, FuncTorchVmapMode, m) {
|
||||
}
|
||||
|
||||
} // namespace at::functorch
|
||||
// NOLINTEND(bugprone-unchecked-optional-access)
|
||||
|
@ -11,6 +11,7 @@
|
||||
|
||||
#include <utility>
|
||||
|
||||
// NOLINTBEGIN(bugprone-unchecked-optional-access)
|
||||
namespace at::functorch {
|
||||
|
||||
static bool is_allowed_dim_on_scalar_tensor(int64_t dim) {
|
||||
@ -510,3 +511,4 @@ TORCH_LIBRARY_IMPL(aten, FuncTorchBatched, m) {
|
||||
}
|
||||
|
||||
} // namespace at::functorch
|
||||
// NOLINTEND(bugprone-unchecked-optional-access)
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <torch/library.h>
|
||||
|
||||
|
||||
// NOLINTBEGIN(bugprone-unchecked-optional-access)
|
||||
namespace at::functorch {
|
||||
|
||||
namespace {
|
||||
@ -1283,3 +1284,4 @@ TORCH_LIBRARY_IMPL(aten, FuncTorchBatched, m) {
|
||||
}
|
||||
|
||||
} // namespace at::functorch
|
||||
// NOLINTEND(bugprone-unchecked-optional-access)
|
||||
|
@ -156,6 +156,7 @@ const Tensor& resize__plumbing(
|
||||
"resize_: batching rule only supports None or Contiguous MemoryFormat");
|
||||
auto maybe_layer = maybeCurrentDynamicLayer();
|
||||
vmap_check_escaped(maybe_layer, "resize__plumbing");
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
int64_t cur_level = maybe_layer->layerId();
|
||||
if (!isBatchedAtLevel(self, cur_level)) {
|
||||
c10::impl::ExcludeDispatchKeyGuard guard2(DispatchKey::FuncTorchBatched);
|
||||
|
@ -41,6 +41,7 @@ DynamicLayer::DynamicLayer(
|
||||
}
|
||||
switch (transform_type) {
|
||||
case TransformType::Vmap:
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
interpreter_ = Interpreter::Vmap(layerId, std::move(batchSize.value()), randomness.value());
|
||||
break;
|
||||
case TransformType::Grad:
|
||||
@ -50,6 +51,7 @@ DynamicLayer::DynamicLayer(
|
||||
interpreter_ = Interpreter::Jvp(layerId, prev_fwd_grad_mode.value());
|
||||
break;
|
||||
case TransformType::Functionalize:
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
interpreter_ = Interpreter::Functionalize(layerId, functionalize_add_back_views.value());
|
||||
break;
|
||||
default:
|
||||
@ -345,9 +347,7 @@ void foreachTensorInplaceWithFlag(std::vector<IValue>& args, int64_t begin, int6
|
||||
if (!ivalue.isTensor()) {
|
||||
continue;
|
||||
}
|
||||
Tensor value = ivalue.toTensor();
|
||||
Tensor replacement = func(value, flag);
|
||||
args[idx] = std::move(replacement);
|
||||
args[idx] = func(ivalue.toTensor(), flag);
|
||||
// sanity checks
|
||||
if (ivalue.toTensor().defined()) {
|
||||
TORCH_INTERNAL_ASSERT(args[idx].toTensor().defined());
|
||||
|
@ -118,6 +118,7 @@ static Tensor moveDimToFrontAndExpand(Tensor tensor, std::optional<int64_t> dim,
|
||||
// to `batch_sizes`
|
||||
VmapPhysicalViewVec
|
||||
MultiBatchVmapTransform::logicalToPhysical(ITensorListRef logical_tensors) {
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
auto cur_level = maybeCurrentDynamicLayer().value().layerId();
|
||||
c10::SymInt bdim_size = -1;
|
||||
|
||||
|
@ -406,6 +406,7 @@ struct ExpandableSegment {
|
||||
DriverAPI::get()->cuMemCreate_(&handle, segment_size_, &prop, 0);
|
||||
if (status == CUDA_ERROR_OUT_OF_MEMORY) {
|
||||
for (auto j : c10::irange(begin, i)) {
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
auto h = handles_.at(j).value();
|
||||
handles_.at(j) = std::nullopt;
|
||||
C10_CUDA_DRIVER_CHECK(DriverAPI::get()->cuMemRelease_(h.handle));
|
||||
@ -444,6 +445,7 @@ struct ExpandableSegment {
|
||||
ShareHeader header{getpid(), segment_size_, end - begin};
|
||||
buf.write((const char*)&header, sizeof(ShareHeader));
|
||||
for (auto i : c10::irange(begin, end)) {
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
auto& handle = handles_.at(i).value();
|
||||
if (!handle.fd) {
|
||||
int fd = 0;
|
||||
@ -493,6 +495,7 @@ struct ExpandableSegment {
|
||||
close((int)pidfd);
|
||||
for (auto& h : segment->handles_) {
|
||||
C10_CUDA_DRIVER_CHECK(
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
DriverAPI::get()->cuMemRelease_(h.value().handle));
|
||||
h = std::nullopt;
|
||||
}
|
||||
@ -555,6 +558,7 @@ struct ExpandableSegment {
|
||||
ptr_ + i * segment_size_,
|
||||
segment_size_,
|
||||
0,
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
handles_.at(i).value().handle,
|
||||
0ULL));
|
||||
}
|
||||
@ -579,6 +583,7 @@ struct ExpandableSegment {
|
||||
C10_CUDA_CHECK(cudaDeviceSynchronize());
|
||||
}
|
||||
for (auto i : c10::irange(begin, end)) {
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
Handle h = handles_.at(i).value();
|
||||
handles_.at(i) = std::nullopt;
|
||||
C10_CUDA_DRIVER_CHECK(DriverAPI::get()->cuMemUnmap_(
|
||||
|
@ -162,6 +162,7 @@ class OptionalArrayRef final {
|
||||
}
|
||||
|
||||
constexpr const ArrayRef<T>& value() const& {
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
return wrapped_opt_array_ref.value();
|
||||
}
|
||||
|
||||
|
@ -404,7 +404,7 @@ inline Tensor smooth_l1_loss(
|
||||
TORCH_CHECK(
|
||||
!options.beta().has_value(),
|
||||
"expected beta not to be provided in 'options', but got ",
|
||||
options.beta().value());
|
||||
options.beta());
|
||||
return detail::smooth_l1_loss(input, target, options.reduction(), beta);
|
||||
}
|
||||
|
||||
|
@ -25,8 +25,9 @@ void EmbeddingImpl::reset() {
|
||||
TORCH_CHECK(
|
||||
options.padding_idx() >= -options.num_embeddings(),
|
||||
"Padding_idx must be within num_embedding");
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
options.padding_idx(options.num_embeddings() + *options.padding_idx());
|
||||
options.padding_idx(
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
options.num_embeddings() + *options.padding_idx());
|
||||
}
|
||||
}
|
||||
|
||||
@ -49,6 +50,7 @@ void EmbeddingImpl::reset_parameters() {
|
||||
torch::nn::init::normal_(weight);
|
||||
if (options.padding_idx().has_value()) {
|
||||
torch::NoGradGuard no_grad;
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
weight[*options.padding_idx()].fill_(0);
|
||||
}
|
||||
}
|
||||
@ -56,11 +58,13 @@ void EmbeddingImpl::reset_parameters() {
|
||||
void EmbeddingImpl::pretty_print(std::ostream& stream) const {
|
||||
stream << "torch::nn::Embedding(num_embeddings=" << options.num_embeddings()
|
||||
<< ", embedding_dim=" << options.embedding_dim();
|
||||
if (options.padding_idx().has_value()) {
|
||||
stream << ", padding_idx=" << *options.padding_idx();
|
||||
auto const& padding_idx_opt = options.padding_idx();
|
||||
if (padding_idx_opt.has_value()) {
|
||||
stream << ", padding_idx=" << padding_idx_opt.value();
|
||||
}
|
||||
if (options.max_norm().has_value()) {
|
||||
stream << ", max_norm=" << *options.max_norm();
|
||||
auto const& max_norm_opt = options.max_norm();
|
||||
if (max_norm_opt.has_value()) {
|
||||
stream << ", max_norm=" << max_norm_opt.value();
|
||||
}
|
||||
if (options.norm_type() != 2) {
|
||||
stream << ", norm_type=" << options.norm_type();
|
||||
@ -93,8 +97,9 @@ EmbeddingBagImpl::EmbeddingBagImpl(EmbeddingBagOptions options_)
|
||||
}
|
||||
|
||||
void EmbeddingBagImpl::reset() {
|
||||
if (options.padding_idx().has_value()) {
|
||||
auto padding_idx = options.padding_idx().value();
|
||||
auto const& padding_idx_opt = options.padding_idx();
|
||||
if (padding_idx_opt.has_value()) {
|
||||
auto padding_idx = padding_idx_opt.value();
|
||||
if (padding_idx > 0) {
|
||||
TORCH_CHECK(
|
||||
padding_idx < options.num_embeddings(),
|
||||
@ -122,9 +127,10 @@ void EmbeddingBagImpl::reset() {
|
||||
}
|
||||
|
||||
void EmbeddingBagImpl::reset_parameters() {
|
||||
if (options.padding_idx().has_value()) {
|
||||
auto const& padding_idx_opt = options.padding_idx();
|
||||
if (padding_idx_opt.has_value()) {
|
||||
torch::NoGradGuard no_grad;
|
||||
weight[options.padding_idx().value()].fill_(0);
|
||||
weight[*padding_idx_opt].fill_(0);
|
||||
}
|
||||
torch::nn::init::normal_(weight);
|
||||
}
|
||||
@ -151,8 +157,9 @@ void EmbeddingBagImpl::pretty_print(std::ostream& stream) const {
|
||||
stream << "torch::nn::EmbeddingBag(num_embeddings="
|
||||
<< options.num_embeddings()
|
||||
<< ", embedding_dim=" << options.embedding_dim();
|
||||
if (options.max_norm().has_value()) {
|
||||
stream << ", max_norm=" << *options.max_norm();
|
||||
auto const& max_norm_opt = options.max_norm();
|
||||
if (max_norm_opt.has_value()) {
|
||||
stream << ", max_norm=" << *max_norm_opt;
|
||||
}
|
||||
if (options.norm_type() != 2) {
|
||||
stream << ", norm_type=" << options.norm_type();
|
||||
@ -171,8 +178,9 @@ void EmbeddingBagImpl::pretty_print(std::ostream& stream) const {
|
||||
stream << ", include_last_offset=" << std::boolalpha
|
||||
<< options.include_last_offset();
|
||||
}
|
||||
if (options.padding_idx().has_value()) {
|
||||
stream << ", padding_idx=" << options.padding_idx().value();
|
||||
auto const& padding_idx_opt = options.padding_idx();
|
||||
if (padding_idx_opt.has_value()) {
|
||||
stream << ", padding_idx=" << padding_idx_opt.value();
|
||||
}
|
||||
stream << ")";
|
||||
}
|
||||
|
@ -15,6 +15,7 @@
|
||||
namespace {
|
||||
// If the type is subtype of named type, return its qualifiedname, otherwise
|
||||
// return its type str.
|
||||
// NOLINTBEGIN(bugprone-unchecked-optional-access)
|
||||
std::string getTypeStr(const c10::TypePtr& type) {
|
||||
switch (type->kind()) {
|
||||
case c10::TypeKind::FunctionType:
|
||||
@ -29,6 +30,7 @@ std::string getTypeStr(const c10::TypePtr& type) {
|
||||
return type->annotation_str();
|
||||
}
|
||||
}
|
||||
// NOLINTEND(bugprone-unchecked-optional-access)
|
||||
|
||||
} // namespace
|
||||
|
||||
|
Reference in New Issue
Block a user