Revert "[Dynamo] Allow inlining into AO quantization modules (#152934)" (#161567)

This reverts commit 20e2ca3e29ce9eb33eef17db077696222c175764.

Fixes https://github.com/pytorch/pytorch/issues/157434

Pull Request resolved: https://github.com/pytorch/pytorch/pull/161567
Approved by: https://github.com/Lucaskabela
This commit is contained in:
Michael Lazos
2025-08-27 03:33:01 +00:00
committed by PyTorch MergeBot
parent 8b78ba07b1
commit be55d7ac9e
2 changed files with 0 additions and 16 deletions

View File

@ -6486,21 +6486,6 @@ def forward(self, s77 : torch.SymInt, s27 : torch.SymInt, L_x_ : torch.Tensor):
with torch.no_grad():
model(x)
def test_ao_fake_quantize_tracing(self):
import torch.ao.quantization.fake_quantize
q = torch.ao.quantization.FusedMovingAvgObsFakeQuantize()
def fn(x):
return q(x)
x = torch.ones(2, 2)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
res = opt_fn(x)
eager_res = fn(x)
self.assertEqual(res, eager_res)
def test_typed_dict(self):
class LlavaImagePixelInputs(TypedDict):
type: Literal["pixel_values"]

View File

@ -3420,7 +3420,6 @@ MOD_INLINELIST = [
"torch._tensor",
"torch.amp.autocast_mode",
"torch.ao.nn",
"torch.ao.quantization.fake_quantize",
"torch.autograd.function",
"torch.backends.cuda",
"torch.cuda.amp.autocast_mode",