Files
pytorch/test/jit/test_parametrization.py
Anthony Barbier bf7e290854 Add __main__ guards to jit tests (#154725)
This PR is part of a series attempting to re-submit https://github.com/pytorch/pytorch/pull/134592 as smaller PRs.

In jit tests:

- Add and use a common raise_on_run_directly method for when a user runs a test file directly which should not be run this way. Print the file which the user should have run.
- Raise a RuntimeError on tests which have been disabled (not run)

Pull Request resolved: https://github.com/pytorch/pytorch/pull/154725
Approved by: https://github.com/clee2000
2025-06-16 10:28:45 +00:00

68 lines
2.4 KiB
Python

# Owner(s): ["oncall: jit"]
import torch
import torch.nn.utils.parametrize as parametrize
from torch import nn
from torch.testing._internal.common_utils import raise_on_run_directly
from torch.testing._internal.jit_utils import JitTestCase
class TestParametrization(JitTestCase):
# Define some parametrization
class Symmetric(nn.Module):
def forward(self, X):
return X.triu() + X.triu(1).mT
def test_traceable(self):
r"""Test the jit scripting and tracing of a parametrized model."""
model = nn.Linear(5, 5)
parametrize.register_parametrization(model, "weight", self.Symmetric())
x = torch.randn(3, 5)
y = model(x)
# Check the tracing works. Because traced functions cannot be called
# directly, we run the comparison on the activations.
traced_model = torch.jit.trace_module(model, {"forward": x})
y_hat = traced_model(x)
self.assertEqual(y, y_hat)
# Check traced model works with caching
with parametrize.cached():
y_hat = traced_model(x)
self.assertEqual(y, y_hat)
# Check the tracing throws an error when caching
with self.assertRaisesRegex(RuntimeError, "Cannot trace a model while caching"):
with parametrize.cached():
traced_model = torch.jit.trace_module(model, {"forward": x})
def test_scriptable(self):
# TODO: Need to fix the scripting in parametrizations
# Currently, all the tests below will throw torch.jit.Error
model = nn.Linear(5, 5)
parametrize.register_parametrization(model, "weight", self.Symmetric())
x = torch.randn(3, 5)
y = model(x)
with self.assertRaises(torch.jit.Error):
# Check scripting works
scripted_model = torch.jit.script(model)
y_hat = scripted_model(x)
self.assertEqual(y, y_hat)
with parametrize.cached():
# Check scripted model works when caching
y_hat = scripted_model(x)
self.assertEqual(y, y_hat)
# Check the scripting process throws an error when caching
with self.assertRaisesRegex(RuntimeError, "Caching is not implemented"):
scripted_model = torch.jit.trace_module(model)
if __name__ == "__main__":
raise_on_run_directly("test/test_jit.py")