mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Bugfix for doing negative padding (#161639)
Fixes #161014 This bug fix introduces a fix that is consistent with the exception handling. Outlined in issue #161014, there is an edge case where the negative padding does not make the tensor size negative but still triggers the exception that the size is negative. The fix is simply adding `new_dim >=0` to include the zero dim and letting the operator return an empty tensor. In the PR I have added the edge case where the test will now check the negative padding where the dimension gets reduced to zero. But the sample is only for the `constant` type of padding. I would like some feedback if it is necessary to put the same sample on the `reduce` type as well. This is my first PR to contribute to PyTorch and any help/feedback will be welcome! Thank you! @malfet @manuelcandales @janeyx99 @ezyang Pull Request resolved: https://github.com/pytorch/pytorch/pull/161639 Approved by: https://github.com/manuelcandales
This commit is contained in:
committed by
PyTorch MergeBot
parent
248156ed06
commit
df9a4824e6
@ -73,7 +73,7 @@ Tensor constant_pad_nd(const Tensor& self, IntArrayRef pad, const Scalar& value)
|
||||
for (const auto i : c10::irange((size_t)l_pad)) {
|
||||
auto pad_idx = pad.size() - ((i + 1) * 2);
|
||||
auto new_dim = input_sizes[l_diff + i] + pad[pad_idx] + pad[pad_idx + 1];
|
||||
TORCH_CHECK(new_dim > 0, "The input size ", input_sizes[l_diff + i], ", plus negative padding ",
|
||||
TORCH_CHECK(new_dim >= 0, "The input size ", input_sizes[l_diff + i], ", plus negative padding ",
|
||||
pad[pad_idx], " and ", pad[pad_idx + 1], " resulted in a negative output size, "
|
||||
"which is invalid. Check dimension ", l_diff + i, " of your input.");
|
||||
new_shape.emplace_back(new_dim);
|
||||
|
@ -193,7 +193,6 @@ dtensor_fails = {
|
||||
xfail("linalg.lu_factor_ex"),
|
||||
xfail("linalg.lu_solve"),
|
||||
xfail("linalg.matrix_power"),
|
||||
xfail("linalg.multi_dot"),
|
||||
xfail("linalg.pinv"),
|
||||
xfail("linalg.pinv", "hermitian"),
|
||||
xfail("linalg.slogdet"),
|
||||
|
@ -2995,7 +2995,7 @@ def constant_pad_nd(
|
||||
pad_idx = len(pad) - ((i + 1) * 2)
|
||||
new_dim = input_sizes[l_diff + i] + pad[pad_idx] + pad[pad_idx + 1]
|
||||
torch._check(
|
||||
new_dim > 0,
|
||||
new_dim >= 0,
|
||||
lambda: f"The input size {input_sizes[l_diff + i]}, plus negative padding "
|
||||
f"{pad[pad_idx]} and {pad[pad_idx + 1]} resulted in a negative output size, "
|
||||
f"which is invalid. Check dimension {l_diff + i} of your input.",
|
||||
|
@ -225,7 +225,7 @@ std::vector<Shape> compute_shape_constant_pad_nd(
|
||||
auto pad_idx = pad.size() - ((i + 1) * 2);
|
||||
auto new_dim = input_sizes[l_diff + i] + pad[pad_idx] + pad[pad_idx + 1];
|
||||
TORCH_CHECK(
|
||||
new_dim > 0,
|
||||
new_dim >= 0,
|
||||
"The input size ",
|
||||
input_sizes[l_diff + i],
|
||||
", plus negative padding ",
|
||||
|
@ -5905,6 +5905,7 @@ def sample_inputs_nn_pad(op_info, device, dtype, requires_grad, mode, **kwargs):
|
||||
((1, 3), (1, 2)),
|
||||
((1, 3), (0, 1)),
|
||||
((1, 3), (0, 2, 0, 1)),
|
||||
((5, 3), (-1, -2, 1, 1)),
|
||||
((0, 3, 3), (1, 2)),
|
||||
((0, 3, 3), (0, 1)),
|
||||
((0, 3, 3), (0, 2, 0, 1)),
|
||||
|
Reference in New Issue
Block a user