Compare commits

..

1 Commits

Author SHA1 Message Date
402b289f3b Properly register parameter for binary folding test (#128356)
This PR properly registers the tensor used in the module compute as a parameter. This bug was hidden previously because all tensors on the nn modules would be considered constant by dynamo, with inlining NN modules, this is no longer the case.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/128356
Approved by: https://github.com/anijain2305
ghstack dependencies: #128355
2024-06-11 06:48:26 +00:00

View File

@ -56,7 +56,7 @@ class BinaryFoldingTemplate(TestCase):
self.use_scalar = scalar
tensor_size = [1 for _ in range(self.conv.weight.ndim)]
tensor_size[1] = self.conv.weight.size(0)
self.tensor = (
self.tensor = torch.nn.Parameter(
add_tensor
if add_tensor is not None
else torch.rand(tensor_size).to(device)
@ -136,7 +136,11 @@ class BinaryFoldingTemplate(TestCase):
nn.Conv2d,
pytorch_op,
False,
add_tensor=torch.rand(32, 1, 32).to(self.device),
add_tensor=torch.rand(
32,
1,
32,
).to(self.device),
expect_success=False,
)
@ -156,7 +160,7 @@ class BinaryFoldingTemplate(TestCase):
nn.Conv2d,
pytorch_op,
False,
add_tensor=torch.tensor([2]).to(torch.int).to(self.device),
add_tensor=torch.tensor([2]).to(torch.float64).to(self.device),
expect_success=False,
)