[BE][Easy][19/19] enforce style for empty lines in import segments in torch/[o-z]*/ (#129771)

See https://github.com/pytorch/pytorch/pull/129751#issue-2380881501. Most changes are auto-generated by linter.

You can review these PRs via:

```bash
git diff --ignore-all-space --ignore-blank-lines HEAD~1
```

Pull Request resolved: https://github.com/pytorch/pytorch/pull/129771
Approved by: https://github.com/justinchuby, https://github.com/janeyx99
This commit is contained in:
Xuehai Pan
2024-07-31 19:56:45 +08:00
committed by PyTorch MergeBot
parent c59f3fff52
commit 30293319a8
120 changed files with 163 additions and 101 deletions

View File

@ -1,16 +1,14 @@
# mypy: allow-untyped-defs
from .quantize import * # noqa: F403
from .observer import * # noqa: F403
from .qconfig import * # noqa: F403
from .fake_quantize import * # noqa: F403
from .fuse_modules import fuse_modules
from .stubs import * # noqa: F403
from .quant_type import * # noqa: F403
from .quantize_jit import * # noqa: F403
# from .quantize_fx import *
from .quantization_mappings import * # noqa: F403
from .fuser_method_mappings import * # noqa: F403
from .observer import * # noqa: F403
from .qconfig import * # noqa: F403
from .quant_type import * # noqa: F403
from .quantization_mappings import * # noqa: F403
from .quantize import * # noqa: F403
from .quantize_jit import * # noqa: F403
from .stubs import * # noqa: F403
def default_eval_fn(model, calib_data):

View File

@ -15,6 +15,7 @@ from torch.ao.quantization.fx.pattern_utils import (
QuantizeHandler,
)
# QuantizeHandler.__module__ = _NAMESPACE
_register_fusion_pattern.__module__ = "torch.ao.quantization.fx.pattern_utils"
get_default_fusion_patterns.__module__ = "torch.ao.quantization.fx.pattern_utils"

View File

@ -23,6 +23,7 @@ from torch.ao.quantization.fx.quantize_handler import (
StandaloneModuleQuantizeHandler,
)
QuantizeHandler.__module__ = "torch.ao.quantization.fx.quantization_patterns"
BinaryOpQuantizeHandler.__module__ = "torch.ao.quantization.fx.quantization_patterns"
CatQuantizeHandler.__module__ = "torch.ao.quantization.fx.quantization_patterns"