mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
This reverts commit 20e2ca3e29ce9eb33eef17db077696222c175764. Fixes https://github.com/pytorch/pytorch/issues/157434 Pull Request resolved: https://github.com/pytorch/pytorch/pull/161567 Approved by: https://github.com/Lucaskabela
This commit is contained in:
committed by
PyTorch MergeBot
parent
8b78ba07b1
commit
be55d7ac9e
@ -6486,21 +6486,6 @@ def forward(self, s77 : torch.SymInt, s27 : torch.SymInt, L_x_ : torch.Tensor):
|
||||
with torch.no_grad():
|
||||
model(x)
|
||||
|
||||
def test_ao_fake_quantize_tracing(self):
|
||||
import torch.ao.quantization.fake_quantize
|
||||
|
||||
q = torch.ao.quantization.FusedMovingAvgObsFakeQuantize()
|
||||
|
||||
def fn(x):
|
||||
return q(x)
|
||||
|
||||
x = torch.ones(2, 2)
|
||||
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
|
||||
res = opt_fn(x)
|
||||
eager_res = fn(x)
|
||||
|
||||
self.assertEqual(res, eager_res)
|
||||
|
||||
def test_typed_dict(self):
|
||||
class LlavaImagePixelInputs(TypedDict):
|
||||
type: Literal["pixel_values"]
|
||||
|
@ -3420,7 +3420,6 @@ MOD_INLINELIST = [
|
||||
"torch._tensor",
|
||||
"torch.amp.autocast_mode",
|
||||
"torch.ao.nn",
|
||||
"torch.ao.quantization.fake_quantize",
|
||||
"torch.autograd.function",
|
||||
"torch.backends.cuda",
|
||||
"torch.cuda.amp.autocast_mode",
|
||||
|
Reference in New Issue
Block a user