mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
fix: flake8-bugbear code B024 (#107265)
See #106571 item B024 This fix concerns the addition of `abstractmethod` to methods declared inside abstract classes. Should I also include PEP8 compliant reformatting on the files I had to modify ? Pull Request resolved: https://github.com/pytorch/pytorch/pull/107265 Approved by: https://github.com/kit1980
This commit is contained in:
committed by
PyTorch MergeBot
parent
449271f3f1
commit
053367b1ed
2
.flake8
2
.flake8
@ -14,7 +14,7 @@ ignore =
|
||||
# to line this up with executable bit
|
||||
EXE001,
|
||||
# these ignores are from flake8-bugbear; please fix!
|
||||
B007,B008,B017,B019,B020,B023,B024,B026,B028,B903,B904,B905,B906,B907
|
||||
B007,B008,B017,B019,B020,B023,B026,B028,B903,B904,B905,B906,B907
|
||||
# these ignores are from flake8-comprehensions; please fix!
|
||||
C407,
|
||||
# these ignores are from flake8-logging-format; please fix!
|
||||
|
@ -29,7 +29,7 @@ ignore = [
|
||||
"B007", "B008", "B017",
|
||||
"B018", # Useless expression
|
||||
"B019", "B020",
|
||||
"B023", "B024", "B026",
|
||||
"B023", "B026",
|
||||
"B028", # No explicit `stacklevel` keyword argument found
|
||||
"B904",
|
||||
"E402",
|
||||
|
@ -1,24 +1,18 @@
|
||||
import torch
|
||||
from torch.fx.graph import (
|
||||
Node,
|
||||
)
|
||||
from abc import ABC
|
||||
from typing import Callable, Dict, List, Optional, Type
|
||||
|
||||
import torch
|
||||
|
||||
from .utils import (
|
||||
all_node_args_have_no_tensors,
|
||||
)
|
||||
from torch.ao.quantization.backend_config import (
|
||||
BackendConfig,
|
||||
DTypeConfig,
|
||||
ObservationType,
|
||||
)
|
||||
from torch.ao.quantization.utils import (
|
||||
NodePattern,
|
||||
Pattern,
|
||||
QuantizerCls,
|
||||
)
|
||||
from torch.ao.quantization.utils import NodePattern, Pattern, QuantizerCls
|
||||
from torch.fx.graph import Node
|
||||
|
||||
from .utils import all_node_args_have_no_tensors
|
||||
|
||||
from abc import ABC
|
||||
from typing import Callable, Dict, List, Type, Optional
|
||||
|
||||
__all__ = [
|
||||
"QuantizeHandler",
|
||||
@ -45,7 +39,7 @@ def _default_root_node_getter(node_pattern):
|
||||
return node_pattern
|
||||
|
||||
# Base Pattern Handler
|
||||
class QuantizeHandler(ABC):
|
||||
class QuantizeHandler(ABC): # noqa: B024
|
||||
""" Base handler class for the quantizer patterns
|
||||
"""
|
||||
def __init__(
|
||||
|
@ -37,7 +37,7 @@ SUPPORTED_QSCHEMES = [
|
||||
]
|
||||
|
||||
|
||||
class QuantizationSpecBase(ABC):
|
||||
class QuantizationSpecBase(ABC): # noqa: B024
|
||||
"""Base class for different types of quantization specs that allows users to
|
||||
specify how to quantize a Tensor (input/output of a Node) in the model
|
||||
"""
|
||||
|
@ -21,7 +21,7 @@ if TYPE_CHECKING:
|
||||
# from run-time to resolve circular dependency.
|
||||
from torch.distributed._shard.sharded_tensor import ShardedTensor
|
||||
|
||||
class PlacementSpec(ABC):
|
||||
class PlacementSpec(ABC): # noqa: B024
|
||||
"""
|
||||
Base class representing the placement of an entity. Subclasses of this
|
||||
class can be used to specify customized placements which might not be
|
||||
|
@ -1,4 +1,4 @@
|
||||
from abc import ABC
|
||||
from abc import ABC, abstractmethod
|
||||
import inspect
|
||||
from typing import Dict, Type
|
||||
|
||||
@ -39,12 +39,14 @@ class OverlappedOptimizer(ABC):
|
||||
"""
|
||||
self.optim_cls = optim_cls
|
||||
|
||||
@abstractmethod
|
||||
def register_ddp(self, ddp: DistributedDataParallel) -> None:
|
||||
"""Registers the overlapped optimizer with DDP."""
|
||||
raise NotImplementedError(
|
||||
f"{self.__class__.__name__} does not support overlapped DDP."
|
||||
)
|
||||
|
||||
@abstractmethod
|
||||
def register_fsdp(self, fsdp: FullyShardedDataParallel) -> None:
|
||||
"""Registers the overlapped optimizer with FSDP."""
|
||||
raise NotImplementedError(
|
||||
@ -70,7 +72,11 @@ class _OverlappedStandardOptimizer(OverlappedOptimizer):
|
||||
)
|
||||
|
||||
# TODO: register_fsdp once FSDP supports communication hook.
|
||||
|
||||
def register_fsdp(self, fsdp: FullyShardedDataParallel) -> None:
|
||||
"""Registers the overlapped optimizer with FSDP."""
|
||||
raise NotImplementedError(
|
||||
f"{self.__class__.__name__} does not support overlapped FSDP."
|
||||
)
|
||||
|
||||
def _as_overlapped_optim(optim_cls: Type, params, *args, **kwargs):
|
||||
"""
|
||||
|
@ -296,7 +296,7 @@ class Transform(abc.ABC):
|
||||
return module
|
||||
|
||||
|
||||
class AnalysisResult(abc.ABC):
|
||||
class AnalysisResult(abc.ABC): # noqa: B024
|
||||
...
|
||||
|
||||
|
||||
|
@ -12,7 +12,7 @@ if we want to generate code for another C++ library.
|
||||
Add new types to `types.py` if these types are ATen/c10 related.
|
||||
Add new types to `types_base.py` if they are basic and not attached to ATen/c10.
|
||||
"""
|
||||
from abc import ABC
|
||||
from abc import ABC, abstractmethod
|
||||
from dataclasses import dataclass
|
||||
from enum import auto, Enum
|
||||
from typing import List, Optional, Union
|
||||
@ -61,12 +61,15 @@ voidT = BaseCppType("", "void")
|
||||
|
||||
|
||||
class CType(ABC):
|
||||
@abstractmethod
|
||||
def cpp_type(self, *, strip_ref: bool = False) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
def cpp_type_registration_declarations(self) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
def remove_const_ref(self) -> "CType":
|
||||
return self
|
||||
|
||||
|
Reference in New Issue
Block a user