Remove unnecessary noqa suppressions (#164106)

This PR removes unused `noqa` suppressions in Python code.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/164106
Approved by: https://github.com/albanD
This commit is contained in:
Yuanyuan Chen
2025-10-18 04:52:41 +00:00
committed by PyTorch MergeBot
parent f02e3947f6
commit b8194268a6
9 changed files with 9 additions and 12 deletions

View File

@ -310,7 +310,7 @@ class SamplingMethod(Enum):
) )
try: try:
new_default = new_type() new_default = new_type()
except Exception: # noqa: E722 except Exception:
# if default constructor doesn't work, try None # if default constructor doesn't work, try None
new_default = None new_default = None
@ -779,7 +779,7 @@ class ConfigFuzzer:
test_model_fn = self.test_model_fn_factory() test_model_fn = self.test_model_fn_factory()
try: try:
test_model_fn() test_model_fn()
except Exception as exc: # noqa: E722 except Exception as exc:
return handle_return( return handle_return(
"Eager exception", Status.FAILED_RUN_EAGER_EXCEPTION, True, exc "Eager exception", Status.FAILED_RUN_EAGER_EXCEPTION, True, exc
) )
@ -788,7 +788,7 @@ class ConfigFuzzer:
try: try:
test_model_fn2 = self.test_model_fn_factory() test_model_fn2 = self.test_model_fn_factory()
comp = torch.compile(test_model_fn2, backend="inductor") comp = torch.compile(test_model_fn2, backend="inductor")
except Exception as exc: # noqa: E722 except Exception as exc:
return handle_return( return handle_return(
"Exception compiling", Status.FAILED_COMPILE, True, exc "Exception compiling", Status.FAILED_COMPILE, True, exc
) )
@ -796,7 +796,7 @@ class ConfigFuzzer:
# try running compiled # try running compiled
try: try:
compile_result = comp() compile_result = comp()
except Exception as exc: # noqa: E722 except Exception as exc:
return handle_return( return handle_return(
"Exception running compiled", "Exception running compiled",
Status.FAILED_RUN_COMPILE_EXCEPTION, Status.FAILED_RUN_COMPILE_EXCEPTION,

View File

@ -699,7 +699,7 @@ Examples:
TORCH_LOGS_OUT=/tmp/output.txt will output the logs to /tmp/output.txt as TORCH_LOGS_OUT=/tmp/output.txt will output the logs to /tmp/output.txt as
well. This is useful when the output is long. well. This is useful when the output is long.
""" # flake8: noqa: B950 """
msg = f""" msg = f"""
TORCH_LOGS Info TORCH_LOGS Info
{examples} {examples}

View File

@ -1,4 +1,3 @@
# flake8: noqa: F401
r"""Intrinsic QAT Modules. r"""Intrinsic QAT Modules.
This file is in the process of migration to `torch/ao/nn/intrinsic/qat`, and This file is in the process of migration to `torch/ao/nn/intrinsic/qat`, and

View File

@ -1,4 +1,3 @@
# flake8: noqa: F401
r"""Intrinsic QAT Modules. r"""Intrinsic QAT Modules.
This file is in the process of migration to `torch/ao/nn/intrinsic/qat`, and This file is in the process of migration to `torch/ao/nn/intrinsic/qat`, and

View File

@ -1,4 +1,3 @@
# flake8: noqa: F401
r"""Intrinsic QAT Modules. r"""Intrinsic QAT Modules.
This file is in the process of migration to `torch/ao/nn/intrinsic/qat`, and This file is in the process of migration to `torch/ao/nn/intrinsic/qat`, and

View File

@ -298,7 +298,7 @@ class MaxPool3d(_MaxPoolNd):
.. _link: .. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
""" # noqa: E501 """
kernel_size: _size_3_t kernel_size: _size_3_t
stride: _size_3_t stride: _size_3_t

View File

@ -775,7 +775,7 @@ class DistributedDataParallel(Module, Joinable):
"DistributedDataParallel device_ids and output_device arguments " "DistributedDataParallel device_ids and output_device arguments "
"only work with single-device/multiple-device GPU modules or CPU modules, " "only work with single-device/multiple-device GPU modules or CPU modules, "
f"but got device_ids {device_ids}, output_device {output_device}, " f"but got device_ids {device_ids}, output_device {output_device}, "
f"and module parameters { ({p.device for p in self._module_parameters}) }.", # noqa: E201,E202 f"and module parameters { ({p.device for p in self._module_parameters}) }.",
) )
self.device_ids = None self.device_ids = None

View File

@ -146,7 +146,7 @@ class NamedMemberAccessor:
f"{module._get_name()} has no attribute `{attr}`" f"{module._get_name()} has no attribute `{attr}`"
) from ex ) from ex
if not isinstance(submodule, torch.nn.Module): if not isinstance(submodule, torch.nn.Module):
raise TypeError( # noqa: B904 raise TypeError(
f"submodule `{name}`: {submodule} is not an instance of torch.nn.Module" f"submodule `{name}`: {submodule} is not an instance of torch.nn.Module"
) )
self.memo[name] = submodule self.memo[name] = submodule

View File

@ -16,7 +16,7 @@ def has_triton_package() -> bool:
@functools.cache @functools.cache
def get_triton_version(fallback: tuple[int, int] = (0, 0)) -> tuple[int, int]: def get_triton_version(fallback: tuple[int, int] = (0, 0)) -> tuple[int, int]:
try: try:
import triton # noqa: F401 import triton
major, minor = tuple(int(v) for v in triton.__version__.split(".")[:2]) major, minor = tuple(int(v) for v in triton.__version__.split(".")[:2])
return (major, minor) return (major, minor)