Revert "unskipped mobilenet_v3 quantization and mobilenet_v2 quantization plus tests from https://github.com/pytorch/pytorch/issues/125438 (#157786)"

This reverts commit 3a2c3c8ed365eb4e4cf4620c25d70b2f70483762.

Reverted https://github.com/pytorch/pytorch/pull/157786 on behalf of https://github.com/albanD due to Breaks lint ([comment](https://github.com/pytorch/pytorch/pull/157786#issuecomment-3164126250))
This commit is contained in:
PyTorch MergeBot
2025-08-07 13:09:33 +00:00
parent 8cb91e20bc
commit a53d14d5f8
2 changed files with 6 additions and 1 deletions

View File

@ -1,6 +1,7 @@
# Owner(s): ["oncall: quantization"]
# ruff: noqa: F841
import unittest
import torch
import torch.ao.nn.quantized as nnq
@ -37,7 +38,7 @@ from torch.testing._internal.common_quantization import (
test_only_eval_fn,
)
from torch.testing._internal.common_quantized import override_qengines
from torch.testing._internal.common_utils import raise_on_run_directly
from torch.testing._internal.common_utils import IS_ARM64, raise_on_run_directly
class SubModule(torch.nn.Module):
@ -599,12 +600,14 @@ class TestNumericSuiteEager(QuantizationTestCase):
act_compare_dict = get_matching_activations(float_model, qmodel)
@skip_if_no_torchvision
@unittest.skipIf(IS_ARM64, "Not working on arm right now")
def test_mobilenet_v2(self):
from torchvision.models.quantization import mobilenet_v2
self._test_vision_model(mobilenet_v2(pretrained=True, quantize=False))
@skip_if_no_torchvision
@unittest.skipIf(IS_ARM64, "Not working on arm right now")
def test_mobilenet_v3(self):
from torchvision.models.quantization import mobilenet_v3_large

View File

@ -1401,6 +1401,8 @@ class TestLinalg(TestCase):
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble, torch.bfloat16, torch.float16)
def test_vector_norm(self, device, dtype):
if IS_ARM64 and device == 'cpu' and dtype in [torch.float16, torch.bfloat16, torch.float32]:
raise unittest.SkipTest("Fails on ARM, see https://github.com/pytorch/pytorch/issues/125438")
# have to use torch.randn(...).to(bfloat16) instead of
# This test compares torch.linalg.vector_norm's output with
# torch.linalg.norm given a flattened tensor