mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Remove unnecessary noqa suppressions (#164106)
This PR removes unused `noqa` suppressions in Python code. Pull Request resolved: https://github.com/pytorch/pytorch/pull/164106 Approved by: https://github.com/albanD
This commit is contained in:
committed by
PyTorch MergeBot
parent
f02e3947f6
commit
b8194268a6
@ -310,7 +310,7 @@ class SamplingMethod(Enum):
|
||||
)
|
||||
try:
|
||||
new_default = new_type()
|
||||
except Exception: # noqa: E722
|
||||
except Exception:
|
||||
# if default constructor doesn't work, try None
|
||||
new_default = None
|
||||
|
||||
@ -779,7 +779,7 @@ class ConfigFuzzer:
|
||||
test_model_fn = self.test_model_fn_factory()
|
||||
try:
|
||||
test_model_fn()
|
||||
except Exception as exc: # noqa: E722
|
||||
except Exception as exc:
|
||||
return handle_return(
|
||||
"Eager exception", Status.FAILED_RUN_EAGER_EXCEPTION, True, exc
|
||||
)
|
||||
@ -788,7 +788,7 @@ class ConfigFuzzer:
|
||||
try:
|
||||
test_model_fn2 = self.test_model_fn_factory()
|
||||
comp = torch.compile(test_model_fn2, backend="inductor")
|
||||
except Exception as exc: # noqa: E722
|
||||
except Exception as exc:
|
||||
return handle_return(
|
||||
"Exception compiling", Status.FAILED_COMPILE, True, exc
|
||||
)
|
||||
@ -796,7 +796,7 @@ class ConfigFuzzer:
|
||||
# try running compiled
|
||||
try:
|
||||
compile_result = comp()
|
||||
except Exception as exc: # noqa: E722
|
||||
except Exception as exc:
|
||||
return handle_return(
|
||||
"Exception running compiled",
|
||||
Status.FAILED_RUN_COMPILE_EXCEPTION,
|
||||
|
@ -699,7 +699,7 @@ Examples:
|
||||
|
||||
TORCH_LOGS_OUT=/tmp/output.txt will output the logs to /tmp/output.txt as
|
||||
well. This is useful when the output is long.
|
||||
""" # flake8: noqa: B950
|
||||
"""
|
||||
msg = f"""
|
||||
TORCH_LOGS Info
|
||||
{examples}
|
||||
|
@ -1,4 +1,3 @@
|
||||
# flake8: noqa: F401
|
||||
r"""Intrinsic QAT Modules.
|
||||
|
||||
This file is in the process of migration to `torch/ao/nn/intrinsic/qat`, and
|
||||
|
@ -1,4 +1,3 @@
|
||||
# flake8: noqa: F401
|
||||
r"""Intrinsic QAT Modules.
|
||||
|
||||
This file is in the process of migration to `torch/ao/nn/intrinsic/qat`, and
|
||||
|
@ -1,4 +1,3 @@
|
||||
# flake8: noqa: F401
|
||||
r"""Intrinsic QAT Modules.
|
||||
|
||||
This file is in the process of migration to `torch/ao/nn/intrinsic/qat`, and
|
||||
|
@ -298,7 +298,7 @@ class MaxPool3d(_MaxPoolNd):
|
||||
|
||||
.. _link:
|
||||
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
|
||||
""" # noqa: E501
|
||||
"""
|
||||
|
||||
kernel_size: _size_3_t
|
||||
stride: _size_3_t
|
||||
|
@ -775,7 +775,7 @@ class DistributedDataParallel(Module, Joinable):
|
||||
"DistributedDataParallel device_ids and output_device arguments "
|
||||
"only work with single-device/multiple-device GPU modules or CPU modules, "
|
||||
f"but got device_ids {device_ids}, output_device {output_device}, "
|
||||
f"and module parameters { ({p.device for p in self._module_parameters}) }.", # noqa: E201,E202
|
||||
f"and module parameters { ({p.device for p in self._module_parameters}) }.",
|
||||
)
|
||||
|
||||
self.device_ids = None
|
||||
|
@ -146,7 +146,7 @@ class NamedMemberAccessor:
|
||||
f"{module._get_name()} has no attribute `{attr}`"
|
||||
) from ex
|
||||
if not isinstance(submodule, torch.nn.Module):
|
||||
raise TypeError( # noqa: B904
|
||||
raise TypeError(
|
||||
f"submodule `{name}`: {submodule} is not an instance of torch.nn.Module"
|
||||
)
|
||||
self.memo[name] = submodule
|
||||
|
@ -16,7 +16,7 @@ def has_triton_package() -> bool:
|
||||
@functools.cache
|
||||
def get_triton_version(fallback: tuple[int, int] = (0, 0)) -> tuple[int, int]:
|
||||
try:
|
||||
import triton # noqa: F401
|
||||
import triton
|
||||
|
||||
major, minor = tuple(int(v) for v in triton.__version__.split(".")[:2])
|
||||
return (major, minor)
|
||||
|
Reference in New Issue
Block a user