mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[4/N] Apply bugprone-unchecked-optional-access (#142832)
Fixes #ISSUE_NUMBER Pull Request resolved: https://github.com/pytorch/pytorch/pull/142832 Approved by: https://github.com/albanD
This commit is contained in:
@ -1129,6 +1129,7 @@ TEST(ListTest, canAccessOptionalStringByReference) {
|
||||
EXPECT_EQ("two", str1);
|
||||
EXPECT_FALSE(str2.has_value());
|
||||
EXPECT_TRUE(strRef1.has_value());
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
EXPECT_EQ("two", strRef1.value().get());
|
||||
EXPECT_FALSE(strRef2.has_value());
|
||||
}
|
||||
|
@ -66,7 +66,7 @@ struct TORCH_API EnumType : public NamedType {
|
||||
}
|
||||
|
||||
const QualifiedName& qualifiedClassName() const {
|
||||
// NOLINTLEXTLINE(bugprone-unchecked-optional-access)
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
return name().value();
|
||||
}
|
||||
|
||||
|
@ -95,7 +95,7 @@ struct TORCH_API Argument {
|
||||
const TypePtr& real_type() const {
|
||||
return real_type_;
|
||||
}
|
||||
std::optional<int32_t> N() const {
|
||||
const std::optional<int32_t>& N() const {
|
||||
return N_;
|
||||
}
|
||||
const std::optional<IValue>& default_value() const {
|
||||
@ -651,11 +651,11 @@ template<>
|
||||
hash = c10::hash_combine(hash, type_hash);
|
||||
hash = c10::hash_combine(hash, kwarg_only_hash);
|
||||
// hashing optional fields if they exist
|
||||
if (arg.default_value()) {
|
||||
auto default_value_hash = c10::hash<c10::IValue>{}(arg.default_value().value());
|
||||
if (arg.default_value().has_value()) {
|
||||
auto default_value_hash = c10::hash<c10::IValue>{}(*arg.default_value());
|
||||
hash = c10::hash_combine(hash, default_value_hash);
|
||||
}
|
||||
if (arg.N()) {
|
||||
if (arg.N().has_value()) {
|
||||
auto N_hash = std::hash<int64_t>{}(*arg.N());
|
||||
hash = c10::hash_combine(hash, N_hash);
|
||||
}
|
||||
|
@ -1546,11 +1546,11 @@ struct WeakOrStrongCompilationUnit {
|
||||
}
|
||||
|
||||
bool holdingStrongRef() const {
|
||||
return strong_ptr_ != std::nullopt;
|
||||
return strong_ptr_.has_value();
|
||||
}
|
||||
|
||||
bool holdingEmptyStrongRef() const {
|
||||
return holdingStrongRef() && *strong_ptr_ == nullptr;
|
||||
return strong_ptr_ == nullptr;
|
||||
}
|
||||
|
||||
std::optional<std::shared_ptr<torch::jit::CompilationUnit>> strong_ptr_;
|
||||
|
@ -625,13 +625,13 @@ struct TORCH_API TensorType : public SharedType {
|
||||
return strides_;
|
||||
}
|
||||
|
||||
std::optional<at::Device> device() const {
|
||||
const std::optional<at::Device>& device() const {
|
||||
return device_;
|
||||
}
|
||||
std::optional<at::ScalarType> scalarType() const {
|
||||
const std::optional<at::ScalarType>& scalarType() const {
|
||||
return scalar_type_;
|
||||
}
|
||||
std::optional<bool> requiresGrad() const {
|
||||
const std::optional<bool>& requiresGrad() const {
|
||||
return requires_grad_;
|
||||
}
|
||||
bool requires_grad() const override {
|
||||
@ -656,7 +656,7 @@ struct TORCH_API TensorType : public SharedType {
|
||||
const auto& shape = sizes();
|
||||
|
||||
for (size_t i = 0; i < shape.size(); i++) {
|
||||
if (!shape[i]) {
|
||||
if (!shape[i].has_value()) {
|
||||
return std::optional<size_t>{};
|
||||
}
|
||||
prod *= shape[i].value();
|
||||
|
@ -292,7 +292,7 @@ TensorTypePtr TensorType::create(
|
||||
scalar_type, device, symbol_sizes, sprops, requires_grad, undefined);
|
||||
} else {
|
||||
// strides are all null, but still have number of strides equal to number of ranks
|
||||
TORCH_INTERNAL_ASSERT(sizes.sizes() && sizes.size());
|
||||
TORCH_INTERNAL_ASSERT(sizes.sizes().has_value() && sizes.size());
|
||||
auto symbol_sizes = SymbolicShape(*sizes.sizes());
|
||||
return TensorType::create(
|
||||
scalar_type, device, symbol_sizes, VaryingShape<Stride>(*sizes.size()), requires_grad, undefined);
|
||||
|
@ -61,8 +61,8 @@ std::ostream& operator<<(std::ostream & out, const Type & t) {
|
||||
} else {
|
||||
out << "Tensor";
|
||||
}
|
||||
if (auto ndim = value->sizes().size()) {
|
||||
bool has_valid_strides_info = *ndim > 0 &&
|
||||
if (auto ndim = value->sizes().size(); ndim.has_value()) {
|
||||
bool has_valid_strides_info = ndim > 0 &&
|
||||
value->strides().isComplete() && value->strides().size() == ndim;
|
||||
|
||||
out << "(";
|
||||
@ -87,7 +87,7 @@ std::ostream& operator<<(std::ostream & out, const Type & t) {
|
||||
if (i > 0) {
|
||||
out << ", ";
|
||||
}
|
||||
out << *value->strides()[i];
|
||||
out << value->strides()[i].value();
|
||||
}
|
||||
out << "]";
|
||||
}
|
||||
@ -903,7 +903,7 @@ bool ListType::isSubtypeOfExt(const Type& rhs_, std::ostream* why_not) const {
|
||||
|
||||
std::string TupleType::str() const {
|
||||
std::stringstream ss;
|
||||
if (schema_ && name()) {
|
||||
if (schema_ && name().has_value()) {
|
||||
ss << name()->qualifiedName();
|
||||
} else {
|
||||
ss << "(";
|
||||
|
@ -42,8 +42,9 @@ static Tensor materializeGradWrappers(const Tensor& tensor, int64_t current_leve
|
||||
if (!wrapper) {
|
||||
return makeTensorWrapper(tensor, current_level, /*is_immutable=*/true);
|
||||
}
|
||||
TORCH_INTERNAL_ASSERT(wrapper->level().value() <= current_level, "escaped?");
|
||||
if (wrapper->level() == current_level) {
|
||||
auto level = wrapper->level();
|
||||
TORCH_INTERNAL_ASSERT(level.has_value() && level <= current_level, "escaped?");
|
||||
if (level == current_level) {
|
||||
TORCH_INTERNAL_ASSERT(tensor.defined());
|
||||
return tensor;
|
||||
}
|
||||
|
@ -54,6 +54,8 @@ struct BinaryRandomPointwiseBatchRuleHelper<F, Func, typelist<T1, T2, T...>> {
|
||||
static Tensor apply(const Tensor& tensor, const Tensor& other, T... extra_args) {
|
||||
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchVmapMode);
|
||||
auto maybe_layer = maybeCurrentDynamicLayer();
|
||||
TORCH_INTERNAL_ASSERT(maybe_layer.has_value())
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
auto cur_level = maybe_layer->layerId();
|
||||
RandomnessType randomness = maybe_layer->randomness();
|
||||
|
||||
|
@ -19,6 +19,7 @@ struct NewBlahBatchRuleHelperSymInt<F, Func, typelist<A, B, T...>> {
|
||||
std::optional<int64_t> batch_dim,
|
||||
SymIntArrayRef shape,
|
||||
T... extra_args) {
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
const auto bdim_size = tensor.sym_size(batch_dim.value());
|
||||
c10::SmallVector<c10::SymInt> new_shape;
|
||||
new_shape.reserve(shape.size() + 1);
|
||||
|
@ -9,7 +9,7 @@
|
||||
|
||||
namespace at::functorch {
|
||||
|
||||
Tensor moveBatchDimToFront(const Tensor& tensor, std::optional<int64_t> maybe_batch_dim) {
|
||||
Tensor moveBatchDimToFront(Tensor tensor, std::optional<int64_t> maybe_batch_dim) {
|
||||
if (!maybe_batch_dim.has_value()) {
|
||||
return tensor;
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ TORCH_API Tensor reshape_dim_outof(int64_t src, int64_t size1, const Tensor& x);
|
||||
|
||||
TORCH_API Tensor reshape_dim_outof_symint(int64_t src, const c10::SymInt& size1, const Tensor& x);
|
||||
|
||||
Tensor moveBatchDimToFront(const Tensor& tensor, std::optional<int64_t> maybe_batch_dim);
|
||||
Tensor moveBatchDimToFront(Tensor tensor, std::optional<int64_t> maybe_batch_dim);
|
||||
int64_t rankWithoutBatchDim(const Tensor& tensor, std::optional<int64_t> maybe_batch_dim);
|
||||
int64_t numelWithoutBatchDim(const Tensor& tensor, std::optional<int64_t> maybe_batch_dim);
|
||||
std::optional<int64_t> valIfNonempty(std::optional<int64_t> maybe_empty, int64_t new_val);
|
||||
@ -243,9 +243,8 @@ inline void boxed_existing_bdim_all_batch_rule(
|
||||
const auto num_arguments = static_cast<int64_t>(schema.arguments().size());
|
||||
|
||||
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
||||
auto maybe_layer = maybeCurrentDynamicLayer();
|
||||
const auto maybe_layer = maybeCurrentDynamicLayer();
|
||||
vmap_check_escaped(maybe_layer, "boxed_existing_bdim_all_batch_rule");
|
||||
int64_t cur_level = maybe_layer->layerId();
|
||||
|
||||
const auto arguments = torch::jit::last(stack, num_arguments);
|
||||
if (std::none_of(arguments.begin(), arguments.end(), ivalueParticipatesInCurrentLevel)) {
|
||||
@ -257,6 +256,8 @@ inline void boxed_existing_bdim_all_batch_rule(
|
||||
SmallVector<UnpackedBatchedTensor, 5> tensor_inputs;
|
||||
SmallVector<int64_t, 5> tensor_pos;
|
||||
int64_t batch_size = 0;
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
int64_t cur_level = maybe_layer->layerId();
|
||||
|
||||
find_and_unpack_tensors(
|
||||
stack, num_arguments, cur_level,
|
||||
|
@ -492,6 +492,7 @@ _scaled_dot_product_flash_attention_batch_rule(
|
||||
) {
|
||||
if (dropout_p > 0) {
|
||||
auto maybe_layer = maybeCurrentDynamicLayer();
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
RandomnessType randomness = maybe_layer->randomness();
|
||||
check_randomness(randomness, query_bdim.has_value() || key_bdim.has_value() || value_bdim.has_value());
|
||||
}
|
||||
@ -543,6 +544,7 @@ fourOutputs _scaled_dot_product_efficient_attention_batch_rule(
|
||||
) {
|
||||
if (dropout_p > 0) {
|
||||
auto maybe_layer = maybeCurrentDynamicLayer();
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
RandomnessType randomness = maybe_layer->randomness();
|
||||
check_randomness(randomness, query_bdim.has_value() || key_bdim.has_value() || value_bdim.has_value());
|
||||
}
|
||||
@ -585,6 +587,7 @@ _scaled_dot_product_cudnn_attention_batch_rule(
|
||||
) {
|
||||
if (dropout_p > 0) {
|
||||
auto maybe_layer = maybeCurrentDynamicLayer();
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
RandomnessType randomness = maybe_layer->randomness();
|
||||
check_randomness(randomness, query_bdim.has_value() || key_bdim.has_value() || value_bdim.has_value());
|
||||
}
|
||||
|
@ -90,6 +90,7 @@ static Tensor binary_cross_entropy_plumbing(
|
||||
const std::optional<Tensor>& weight, int64_t reduction) {
|
||||
auto maybe_layer = maybeCurrentDynamicLayer();
|
||||
vmap_check_escaped(maybe_layer, "binary_cross_entropy_plumbing");
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
int64_t cur_level = maybe_layer->layerId();
|
||||
|
||||
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)
|
||||
@ -126,6 +127,7 @@ static Tensor binary_cross_entropy_backward_plumbing(
|
||||
const std::optional<Tensor>& weight_opt, int64_t reduction) {
|
||||
auto maybe_layer = maybeCurrentDynamicLayer();
|
||||
vmap_check_escaped(maybe_layer, "binary_cross_entropy_backward_plumbing");
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
int64_t cur_level = maybe_layer->layerId();
|
||||
|
||||
if (!areAnyBatchedAtLevel({grad, input, target, weight_opt}, cur_level)) {
|
||||
|
@ -57,7 +57,7 @@ embedding_dense_backward_batch_rule(
|
||||
c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq) {
|
||||
Tensor grad = grad_;
|
||||
Tensor indices = indices_;
|
||||
if (!indices_bdim && grad_bdim) {
|
||||
if (!indices_bdim.has_value() && grad_bdim) {
|
||||
const auto bdim_size = grad.sym_size(*grad_bdim);
|
||||
grad = reshape_dim_into(*grad_bdim, -1, grad);
|
||||
auto result = at::embedding_dense_backward_symint(
|
||||
@ -162,12 +162,12 @@ grid_sample_backward_helper_in(
|
||||
static std::tuple<Tensor, std::optional<int64_t>, Tensor, std::optional<int64_t>>
|
||||
grid_sample_backward_helper_out(
|
||||
std::tuple<Tensor, Tensor> bw_out,
|
||||
std::optional<int64_t> grad_input_out_bdim,
|
||||
std::optional<int64_t> grad_grid_out_bdim,
|
||||
int64_t grad_input_out_bdim,
|
||||
int64_t grad_grid_out_bdim,
|
||||
int64_t bdim_size) {
|
||||
auto& [grad_input, grad_grid] = bw_out;
|
||||
grad_input = reshape_dim_outof(*grad_input_out_bdim, bdim_size, grad_input);
|
||||
grad_grid = reshape_dim_outof(*grad_grid_out_bdim, bdim_size, grad_grid);
|
||||
grad_input = reshape_dim_outof(grad_input_out_bdim, bdim_size, grad_input);
|
||||
grad_grid = reshape_dim_outof(grad_grid_out_bdim, bdim_size, grad_grid);
|
||||
return std::make_tuple(std::move(grad_input), grad_input_out_bdim, std::move(grad_grid), grad_grid_out_bdim);
|
||||
}
|
||||
|
||||
|
@ -218,6 +218,8 @@ std::tuple<at::Tensor,at::Tensor,at::Tensor> batch_norm_backward_plumbing(
|
||||
c10::MaybeOwned<Tensor> running_var_maybe_owned = at::borrow_from_optional_tensor(running_var_opt);
|
||||
const Tensor& running_var = *running_var_maybe_owned;
|
||||
// NB: not sure why these are optional...these are required from the forward
|
||||
TORCH_INTERNAL_ASSERT(save_mean_opt.has_value());
|
||||
TORCH_INTERNAL_ASSERT(save_rstd_opt.has_value());
|
||||
const Tensor& save_mean = *save_mean_opt;
|
||||
const Tensor& save_rstd = *save_rstd_opt;
|
||||
TORCH_INTERNAL_ASSERT(save_mean.defined());
|
||||
@ -226,6 +228,7 @@ std::tuple<at::Tensor,at::Tensor,at::Tensor> batch_norm_backward_plumbing(
|
||||
// plumbing
|
||||
auto maybe_layer = maybeCurrentDynamicLayer();
|
||||
vmap_check_escaped(maybe_layer, "batch_norm_backward_plumbing");
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
int64_t cur_level = maybe_layer->layerId();
|
||||
|
||||
auto [grad_out_value, grad_out_bdim] = unwrapTensorAtLevel(grad_out, cur_level);
|
||||
@ -298,6 +301,7 @@ static std::tuple<Tensor,Tensor,Tensor> native_group_norm_plumbing(
|
||||
|
||||
auto maybe_layer = maybeCurrentDynamicLayer();
|
||||
vmap_check_escaped(maybe_layer, "native_group_norm_plumbing");
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
int64_t cur_level = maybe_layer->layerId();
|
||||
|
||||
if (!areAnyBatchedAtLevel({input, weight_opt, bias_opt}, cur_level)) {
|
||||
@ -380,6 +384,7 @@ static std::tuple<Tensor,Tensor,Tensor> native_group_norm_backward_plumbing(
|
||||
// plumbing
|
||||
auto maybe_layer = maybeCurrentDynamicLayer();
|
||||
vmap_check_escaped(maybe_layer, "native_group_norm_backward_plumbing");
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
int64_t cur_level = maybe_layer->layerId();
|
||||
|
||||
if (!areAnyBatchedAtLevel({grad_out, input, mean, rstd, weight_opt}, cur_level)) {
|
||||
@ -579,6 +584,7 @@ static std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm_backward_p
|
||||
// plumbing
|
||||
auto maybe_layer = maybeCurrentDynamicLayer();
|
||||
vmap_check_escaped(maybe_layer, "native_layer_norm_backward_plumbing");
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
int64_t cur_level = maybe_layer->layerId();
|
||||
if (!areAnyBatchedAtLevel({grad_out, input, mean, rstd, weight_opt, bias_opt}, cur_level)) {
|
||||
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
||||
@ -721,6 +727,7 @@ struct NativeBatchNormBackwardBatchRuleHelper {
|
||||
|
||||
auto maybe_layer = maybeCurrentDynamicLayer();
|
||||
vmap_check_escaped(maybe_layer, "NativeBatchNormBackwardBatchRuleHelper.apply");
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
int64_t cur_level = maybe_layer->layerId();
|
||||
|
||||
if (!areAnyBatchedAtLevel({grad_out, input, weight_opt, running_mean_opt,
|
||||
@ -751,6 +758,7 @@ struct CudnnBatchNormBackwardBatchRuleHelper {
|
||||
|
||||
auto maybe_layer = maybeCurrentDynamicLayer();
|
||||
vmap_check_escaped(maybe_layer, "CudnnBatchNormBackwardBatchRuleHelper.apply");
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
int64_t cur_level = maybe_layer->layerId();
|
||||
|
||||
if (!areAnyBatchedAtLevel({input, grad_out, weight, running_mean_opt,
|
||||
@ -779,6 +787,7 @@ struct MiopenBatchNormBackwardBatchRuleHelper {
|
||||
|
||||
auto maybe_layer = maybeCurrentDynamicLayer();
|
||||
vmap_check_escaped(maybe_layer, "MiopenBatchNormBackwardBatchRuleHelper.apply");
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
int64_t cur_level = maybe_layer->layerId();
|
||||
|
||||
if (!areAnyBatchedAtLevel({input, grad_out, weight, running_mean_opt,
|
||||
|
@ -28,8 +28,10 @@ max_pool_with_indices_batch_rule_helper(
|
||||
return std::make_tuple(std::move(std::get<0>(result)), 0, std::move(std::get<1>(result)), 0);
|
||||
}
|
||||
// Tensor[B, N, logical_rank...] -> Tensor[B * N, logical_rank...]
|
||||
auto bdim_size = self.size(*self_bdim);
|
||||
auto self_ = reshape_dim_into(*self_bdim, 0, self);
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
auto bdim_size = self.size(self_bdim.value());
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
auto self_ = reshape_dim_into(self_bdim.value(), 0, self);
|
||||
auto result = pooling_fn(
|
||||
self_, kernel_size, stride, padding, dilation, ceil_mode);
|
||||
return std::make_tuple(
|
||||
|
@ -25,6 +25,7 @@ Tensor random_batching_rule(SymIntArrayRef shape, ExtraArgs... extra_args) {
|
||||
c10::SmallVector<SymInt> shapeVec(1, maybe_layer->batchSize());
|
||||
shapeVec.reserve(shape.size() + 1);
|
||||
shapeVec.insert(shapeVec.end(), shape.begin(), shape.end());
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
RandomnessType randomness = maybe_layer->randomness();
|
||||
check_randomness(randomness);
|
||||
if (randomness == RandomnessType::Different) {
|
||||
@ -38,9 +39,11 @@ template <typename F, F Func, typename... ExtraArgs>
|
||||
Tensor& random_inplace_batching_rule(Tensor& self, ExtraArgs... extra_args) {
|
||||
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchVmapMode);
|
||||
auto maybe_layer = maybeCurrentDynamicLayer();
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
const auto cur_level = maybe_layer->layerId();
|
||||
auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level);
|
||||
self_value = moveBatchDimToFront(self_value, self_bdim);
|
||||
self_value = moveBatchDimToFront(std::move(self_value), self_bdim);
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
RandomnessType randomness = maybe_layer->randomness();
|
||||
check_randomness(randomness);
|
||||
TORCH_CHECK(
|
||||
|
@ -76,6 +76,7 @@ PyObject* THPSize_NewFromSymSizes(const at::Tensor& self_) {
|
||||
throw python_error();
|
||||
PyTuple_SET_ITEM(ret.get(), i, py_size_tensor);
|
||||
} else {
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
PyTuple_SET_ITEM(ret.get(), i, THPUtils_packInt64(m.value()));
|
||||
}
|
||||
}
|
||||
|
@ -15,7 +15,7 @@ inline Tensor batch_norm(
|
||||
Tensor weight,
|
||||
Tensor bias,
|
||||
bool training,
|
||||
std::optional<double> momentum,
|
||||
double momentum,
|
||||
double eps) {
|
||||
TORCH_CHECK(
|
||||
input.dim() >= 2,
|
||||
@ -40,7 +40,7 @@ inline Tensor batch_norm(
|
||||
running_mean,
|
||||
running_var,
|
||||
training,
|
||||
momentum.value(),
|
||||
momentum,
|
||||
eps,
|
||||
at::globalContext().userEnabledCuDNN());
|
||||
}
|
||||
|
@ -402,7 +402,7 @@ inline Tensor smooth_l1_loss(
|
||||
const SmoothL1LossFuncOptions& options,
|
||||
double beta) {
|
||||
TORCH_CHECK(
|
||||
options.beta() == std::nullopt,
|
||||
!options.beta().has_value(),
|
||||
"expected beta not to be provided in 'options', but got ",
|
||||
options.beta().value());
|
||||
return detail::smooth_l1_loss(input, target, options.reduction(), beta);
|
||||
|
@ -81,7 +81,7 @@ struct TORCH_API BatchNormFuncOptions {
|
||||
|
||||
/// A momentum multiplier for the mean and variance.
|
||||
/// Changing this parameter after construction __is effective__.
|
||||
TORCH_ARG(std::optional<double>, momentum) = 0.1;
|
||||
TORCH_ARG(double, momentum) = 0.1;
|
||||
|
||||
/// The epsilon value added for numerical stability.
|
||||
/// Changing this parameter after construction __is effective__.
|
||||
|
@ -25,6 +25,7 @@ void EmbeddingImpl::reset() {
|
||||
TORCH_CHECK(
|
||||
options.padding_idx() >= -options.num_embeddings(),
|
||||
"Padding_idx must be within num_embedding");
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
options.padding_idx(options.num_embeddings() + *options.padding_idx());
|
||||
}
|
||||
}
|
||||
@ -46,7 +47,7 @@ void EmbeddingImpl::reset() {
|
||||
|
||||
void EmbeddingImpl::reset_parameters() {
|
||||
torch::nn::init::normal_(weight);
|
||||
if (options.padding_idx() != std::nullopt) {
|
||||
if (options.padding_idx().has_value()) {
|
||||
torch::NoGradGuard no_grad;
|
||||
weight[*options.padding_idx()].fill_(0);
|
||||
}
|
||||
@ -55,10 +56,10 @@ void EmbeddingImpl::reset_parameters() {
|
||||
void EmbeddingImpl::pretty_print(std::ostream& stream) const {
|
||||
stream << "torch::nn::Embedding(num_embeddings=" << options.num_embeddings()
|
||||
<< ", embedding_dim=" << options.embedding_dim();
|
||||
if (options.padding_idx() != std::nullopt) {
|
||||
if (options.padding_idx().has_value()) {
|
||||
stream << ", padding_idx=" << *options.padding_idx();
|
||||
}
|
||||
if (options.max_norm() != std::nullopt) {
|
||||
if (options.max_norm().has_value()) {
|
||||
stream << ", max_norm=" << *options.max_norm();
|
||||
}
|
||||
if (options.norm_type() != 2) {
|
||||
@ -150,7 +151,7 @@ void EmbeddingBagImpl::pretty_print(std::ostream& stream) const {
|
||||
stream << "torch::nn::EmbeddingBag(num_embeddings="
|
||||
<< options.num_embeddings()
|
||||
<< ", embedding_dim=" << options.embedding_dim();
|
||||
if (options.max_norm() != std::nullopt) {
|
||||
if (options.max_norm().has_value()) {
|
||||
stream << ", max_norm=" << *options.max_norm();
|
||||
}
|
||||
if (options.norm_type() != 2) {
|
||||
|
@ -287,13 +287,13 @@ void FractionalMaxPool2dImpl::reset() {
|
||||
"FractionalMaxPool2d requires specifying either ",
|
||||
"an output size, or a pooling ratio");
|
||||
}
|
||||
if (options.output_size() != std::nullopt &&
|
||||
options.output_ratio() != std::nullopt) {
|
||||
if (options.output_size().has_value() && options.output_ratio().has_value()) {
|
||||
TORCH_CHECK(
|
||||
false, "only one of output_size and output_ratio may be specified");
|
||||
}
|
||||
if (options.output_ratio() != std::nullopt) {
|
||||
if (options.output_ratio().has_value()) {
|
||||
at::ArrayRef<double> output_ratio =
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
at::ArrayRef<double>(options.output_ratio().value());
|
||||
if (!(0 < output_ratio[0] && output_ratio[0] < 1 && 0 < output_ratio[1] &&
|
||||
output_ratio[1] < 1)) {
|
||||
@ -346,13 +346,13 @@ void FractionalMaxPool3dImpl::reset() {
|
||||
"FractionalMaxPool3d requires specifying either ",
|
||||
"an output size, or a pooling ratio");
|
||||
}
|
||||
if (options.output_size() != std::nullopt &&
|
||||
options.output_ratio() != std::nullopt) {
|
||||
if (options.output_size().has_value() && options.output_ratio().has_value()) {
|
||||
TORCH_CHECK(
|
||||
false, "only one of output_size and output_ratio may be specified");
|
||||
}
|
||||
if (options.output_ratio() != std::nullopt) {
|
||||
if (options.output_ratio().has_value()) {
|
||||
at::ArrayRef<double> output_ratio =
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
at::ArrayRef<double>(options.output_ratio().value());
|
||||
if (!(0 < output_ratio[0] && output_ratio[0] < 1 && 0 < output_ratio[1] &&
|
||||
output_ratio[1] < 1 && 0 < output_ratio[2] && output_ratio[2] < 1)) {
|
||||
|
@ -1,23 +1,22 @@
|
||||
#include <torch/nn/modules/upsampling.h>
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace F = torch::nn::functional;
|
||||
|
||||
namespace torch::nn {
|
||||
|
||||
UpsampleImpl::UpsampleImpl(
|
||||
const UpsampleOptions& options_) // NOLINT(modernize-pass-by-value)
|
||||
UpsampleImpl::UpsampleImpl(const UpsampleOptions& options_)
|
||||
: options(options_) {}
|
||||
|
||||
void UpsampleImpl::reset() {}
|
||||
|
||||
void UpsampleImpl::pretty_print(std::ostream& stream) const {
|
||||
stream << "torch::nn::Upsample(";
|
||||
if (options.scale_factor() != std::nullopt) {
|
||||
if (options.scale_factor().has_value()) {
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
stream << "scale_factor=" << at::ArrayRef<double>(*options.scale_factor());
|
||||
} else {
|
||||
stream << "size=" << at::ArrayRef<int64_t>(*options.size());
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
stream << "size=" << at::ArrayRef<int64_t>(options.size().value());
|
||||
}
|
||||
stream << ", mode=" << enumtype::get_enum_name(options.mode()) << ")";
|
||||
}
|
||||
|
@ -874,6 +874,7 @@ const InputMetadata& get_input_metadata(const T& thing);
|
||||
template <>
|
||||
const InputMetadata& get_input_metadata<c10::optional<InputMetadata>>(
|
||||
const c10::optional<InputMetadata>& thing) {
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
return thing.value();
|
||||
}
|
||||
|
||||
|
@ -185,7 +185,7 @@ auto PyNode::apply(variable_list&& inputs) -> variable_list {
|
||||
|
||||
auto PyNode::defer_to_dynamo(
|
||||
variable_list&& inputs,
|
||||
std::optional<PyObject*> compiler) -> variable_list {
|
||||
const std::optional<PyObject*>& compiler) -> variable_list {
|
||||
pybind11::gil_scoped_acquire gil;
|
||||
at::OptionalDeviceGuard _device_guard;
|
||||
THPFunction* py_fn = (THPFunction*)obj;
|
||||
@ -238,7 +238,8 @@ auto PyNode::defer_to_dynamo(
|
||||
"indices should already be set by compiled_args, called before apply_with_saved");
|
||||
TORCH_INTERNAL_ASSERT(!_backward_state_idx.has_value());
|
||||
THPObjectPtr r(PyObject_CallMethod(
|
||||
*compiler,
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
compiler.value(),
|
||||
"proxy_call_backward",
|
||||
"OOOi",
|
||||
pyInputs.get(),
|
||||
@ -724,8 +725,9 @@ static void _wrap_outputs(
|
||||
|
||||
for (const auto i : c10::irange(num_outputs)) {
|
||||
PyObject* obj = PyTuple_GetItem(raw_output, i);
|
||||
const auto& wrapped_output = wrapped_outputs[i];
|
||||
// Keep the non-tensor outputs as is.
|
||||
if (!THPVariable_Check(obj)) {
|
||||
if (!THPVariable_Check(obj) || !wrapped_output.has_value()) {
|
||||
if (is_executable) {
|
||||
self->output_info.emplace_back();
|
||||
}
|
||||
@ -736,18 +738,15 @@ static void _wrap_outputs(
|
||||
// If one of the grad outputs is undefined, a correctly-shaped zeros
|
||||
// should be used instead. To construct these for NJT, zeros_like() must
|
||||
// be used until we have factory function support.
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
bool is_differentiable =
|
||||
(non_differentiable.count(
|
||||
wrapped_outputs[i]->unsafeGetTensorImpl()) == 0 &&
|
||||
isDifferentiableType(wrapped_outputs[i]->scalar_type()));
|
||||
bool use_zeros_like = is_differentiable && num_outputs > 1 &&
|
||||
wrapped_outputs[i]->is_nested();
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
self->output_info.emplace_back(*wrapped_outputs[i], use_zeros_like);
|
||||
(non_differentiable.count(wrapped_output->unsafeGetTensorImpl()) ==
|
||||
0 &&
|
||||
isDifferentiableType(wrapped_output->scalar_type()));
|
||||
bool use_zeros_like =
|
||||
is_differentiable && num_outputs > 1 && wrapped_output->is_nested();
|
||||
self->output_info.emplace_back(wrapped_output.value(), use_zeros_like);
|
||||
}
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
PyTuple_SetItem(outputs, i, THPVariable_Wrap(*wrapped_outputs[i]));
|
||||
PyTuple_SetItem(outputs, i, THPVariable_Wrap(wrapped_output.value()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -36,7 +36,7 @@ struct PyNode : public Node {
|
||||
variable_list apply(variable_list&& inputs) override;
|
||||
variable_list defer_to_dynamo(
|
||||
variable_list&& inputs,
|
||||
std::optional<PyObject*> compiler);
|
||||
const std::optional<PyObject*>& compiler);
|
||||
|
||||
void release_variables() override;
|
||||
std::string name() const override;
|
||||
|
@ -18,7 +18,8 @@ SequenceNum::SequenceNum(const SequenceNum& other) {
|
||||
|
||||
uint64_t SequenceNum::get() const {
|
||||
std::lock_guard<std::mutex> lock(lock_);
|
||||
return *num_;
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
return num_.value();
|
||||
}
|
||||
|
||||
void SequenceNum::increment() {
|
||||
|
@ -23,18 +23,20 @@ ScriptCall::ScriptCall(
|
||||
isAsyncExecution_(isAsyncExecution) {}
|
||||
|
||||
bool ScriptCall::hasOp() const {
|
||||
return op_ ? true : false;
|
||||
return op_.has_value();
|
||||
}
|
||||
|
||||
std::shared_ptr<Operator> ScriptCall::op() const {
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
return op_.value();
|
||||
}
|
||||
|
||||
bool ScriptCall::hasQualifiedName() const {
|
||||
return qualifiedName_ ? true : false;
|
||||
return qualifiedName_.has_value();
|
||||
}
|
||||
|
||||
const c10::QualifiedName& ScriptCall::qualifiedName() const {
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
return qualifiedName_.value();
|
||||
}
|
||||
|
||||
@ -51,7 +53,7 @@ void ScriptCall::toIValues(std::vector<at::IValue>& ivalues) const {
|
||||
ivalues.push_back(value);
|
||||
}
|
||||
|
||||
if (hasOp()) {
|
||||
if (op_.has_value()) {
|
||||
TORCH_CHECK(
|
||||
!hasQualifiedName(),
|
||||
"It is builtin operator call, qualifiedName_ should not be set.");
|
||||
@ -73,7 +75,7 @@ void ScriptCall::toIValues(std::vector<at::IValue>& ivalues) const {
|
||||
TORCH_CHECK(
|
||||
!hasOp(),
|
||||
"It is TorchScript function call, operator should not be set.");
|
||||
ivalues.emplace_back((*qualifiedName_).qualifiedName());
|
||||
ivalues.emplace_back(qualifiedName().qualifiedName());
|
||||
} else {
|
||||
TORCH_INTERNAL_ASSERT(
|
||||
false,
|
||||
|
@ -324,6 +324,7 @@ class CompiledNodeArgs {
|
||||
template <typename T>
|
||||
void collect(const std::optional<T>& t) {
|
||||
if (cond(t.has_value())) {
|
||||
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
|
||||
collect(*t);
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user