mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Revert "unskipped mobilenet_v3 quantization and mobilenet_v2 quantization plus tests from https://github.com/pytorch/pytorch/issues/125438 (#157786)"
This reverts commit 3a2c3c8ed365eb4e4cf4620c25d70b2f70483762. Reverted https://github.com/pytorch/pytorch/pull/157786 on behalf of https://github.com/albanD due to Breaks lint ([comment](https://github.com/pytorch/pytorch/pull/157786#issuecomment-3164126250))
This commit is contained in:
@ -1,6 +1,7 @@
|
||||
# Owner(s): ["oncall: quantization"]
|
||||
# ruff: noqa: F841
|
||||
|
||||
import unittest
|
||||
|
||||
import torch
|
||||
import torch.ao.nn.quantized as nnq
|
||||
@ -37,7 +38,7 @@ from torch.testing._internal.common_quantization import (
|
||||
test_only_eval_fn,
|
||||
)
|
||||
from torch.testing._internal.common_quantized import override_qengines
|
||||
from torch.testing._internal.common_utils import raise_on_run_directly
|
||||
from torch.testing._internal.common_utils import IS_ARM64, raise_on_run_directly
|
||||
|
||||
|
||||
class SubModule(torch.nn.Module):
|
||||
@ -599,12 +600,14 @@ class TestNumericSuiteEager(QuantizationTestCase):
|
||||
act_compare_dict = get_matching_activations(float_model, qmodel)
|
||||
|
||||
@skip_if_no_torchvision
|
||||
@unittest.skipIf(IS_ARM64, "Not working on arm right now")
|
||||
def test_mobilenet_v2(self):
|
||||
from torchvision.models.quantization import mobilenet_v2
|
||||
|
||||
self._test_vision_model(mobilenet_v2(pretrained=True, quantize=False))
|
||||
|
||||
@skip_if_no_torchvision
|
||||
@unittest.skipIf(IS_ARM64, "Not working on arm right now")
|
||||
def test_mobilenet_v3(self):
|
||||
from torchvision.models.quantization import mobilenet_v3_large
|
||||
|
||||
|
@ -1401,6 +1401,8 @@ class TestLinalg(TestCase):
|
||||
|
||||
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble, torch.bfloat16, torch.float16)
|
||||
def test_vector_norm(self, device, dtype):
|
||||
if IS_ARM64 and device == 'cpu' and dtype in [torch.float16, torch.bfloat16, torch.float32]:
|
||||
raise unittest.SkipTest("Fails on ARM, see https://github.com/pytorch/pytorch/issues/125438")
|
||||
# have to use torch.randn(...).to(bfloat16) instead of
|
||||
# This test compares torch.linalg.vector_norm's output with
|
||||
# torch.linalg.norm given a flattened tensor
|
||||
|
Reference in New Issue
Block a user