Compare commits

...

4 Commits

Author SHA1 Message Date
d47d0439bd Ensuring the unfold_backward op test runs on all backend 2025-11-14 12:36:47 +05:30
1cf0a295a1 Move unfold_backward step validation to common code
Move TORCH_CHECK_VALUE for step > 0 from CPU kernel to shared
unfold_backward function to ensure all backends validate the
parameter.
2025-11-12 13:27:55 +05:30
e7559d82f3 Adding check for step size=0 in unfold backward to avoid divide by 0 or FPE for cpu kernel. 2025-11-12 13:18:16 +05:30
c0b0542391 Adding check for step size=0 in unfold backward to avoid divide by 0 or FPE. 2025-11-12 13:18:16 +05:30
2 changed files with 10 additions and 0 deletions

View File

@ -21,6 +21,7 @@ Tensor unfold_backward(
int64_t size,
int64_t step
) {
TORCH_CHECK_VALUE(step > 0, "step is ", step, " but must be > 0");
auto grad_input = at::zeros(input_sizes, grad.options());
if (step >= size) {
auto gI_unfolded = grad_input.unfold(dim, size, step);

View File

@ -843,6 +843,15 @@ class TestShapeOps(TestCase):
with self.assertRaisesRegex(RuntimeError, "step is -1 but must be > 0"):
x.unfold(0, 1, -1)
def test_unfold_backward_errors(self, device):
grad_in = torch.randn(2, 3, device=device)
input_sizes = [6]
with self.assertRaisesRegex(ValueError, "step is 0 but must be > 0"):
torch.ops.aten.unfold_backward(grad_in, input_sizes, 0, 3, 0)
with self.assertRaisesRegex(RuntimeError, "size is -1 but must be >= 0"):
torch.ops.aten.unfold_backward(grad_in, input_sizes, 0, -1, 1)
instantiate_device_type_tests(TestShapeOps, globals())