Add pyrefly suppressions (3/n) (#164588)

Adds suppressions to pyrefly will typecheck clean: https://github.com/pytorch/pytorch/issues/163283

Test plan:
dmypy restart && python3 scripts/lintrunner.py -a
pyrefly check

step 1: uncomment lines in the pyrefly.toml file
step 2: run pyrefly check
step 3: add suppressions, clean up unused suppressions
before: https://gist.github.com/maggiemoss/bb31574ac8a59893c9cf52189e67bb2d

after:

 0 errors (1,970 ignored)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/164588
Approved by: https://github.com/oulgen
This commit is contained in:
Maggie Moss
2025-10-03 22:02:59 +00:00
committed by PyTorch MergeBot
parent e438db2546
commit f414aa8e0d
49 changed files with 244 additions and 29 deletions

View File

@ -496,12 +496,14 @@ class cudaStatus:
class CudaError(RuntimeError):
def __init__(self, code: int) -> None:
# pyrefly: ignore # missing-attribute
msg = _cudart.cudaGetErrorString(_cudart.cudaError(code))
super().__init__(f"{msg} ({code})")
def check_error(res: int) -> None:
r"""Raise an error if the result of a CUDA runtime API call is not success."""
# pyrefly: ignore # missing-attribute
if res != _cudart.cudaError.success:
raise CudaError(res)
@ -601,6 +603,7 @@ def get_device_capability(device: "Device" = None) -> tuple[int, int]:
return prop.major, prop.minor
# pyrefly: ignore # not-a-type
def get_device_properties(device: "Device" = None) -> _CudaDeviceProperties:
r"""Get the properties of a device.
@ -651,6 +654,7 @@ class StreamContext:
self.idx = _get_device_index(None, True)
if not torch.jit.is_scripting():
if self.idx is None:
# pyrefly: ignore # bad-assignment
self.idx = -1
self.src_prev_stream = (
@ -953,7 +957,9 @@ def _device_count_amdsmi() -> int:
if raw_cnt <= 0:
return raw_cnt
# Trim the list up to a maximum available device
# pyrefly: ignore # bad-argument-type
for idx, val in enumerate(visible_devices):
# pyrefly: ignore # redundant-cast
if cast(int, val) >= raw_cnt:
return idx
except OSError:
@ -987,7 +993,9 @@ def _device_count_nvml() -> int:
if raw_cnt <= 0:
return raw_cnt
# Trim the list up to a maximum available device
# pyrefly: ignore # bad-argument-type
for idx, val in enumerate(visible_devices):
# pyrefly: ignore # redundant-cast
if cast(int, val) >= raw_cnt:
return idx
except OSError:
@ -1203,7 +1211,9 @@ def _get_pynvml_handler(device: "Device" = None):
if not _HAS_PYNVML:
raise ModuleNotFoundError(
"pynvml does not seem to be installed or it can't be imported."
# pyrefly: ignore # invalid-inheritance
) from _PYNVML_ERR
# pyrefly: ignore # import-error
from pynvml import NVMLError_DriverNotLoaded
try:
@ -1220,6 +1230,7 @@ def _get_amdsmi_handler(device: "Device" = None):
if not _HAS_PYNVML:
raise ModuleNotFoundError(
"amdsmi does not seem to be installed or it can't be imported."
# pyrefly: ignore # invalid-inheritance
) from _PYNVML_ERR
try:
amdsmi.amdsmi_init()
@ -1483,6 +1494,7 @@ def _get_rng_state_offset(device: Union[int, str, torch.device] = "cuda") -> int
return default_generator.get_offset()
# pyrefly: ignore # deprecated
from .memory import * # noqa: F403
from .random import * # noqa: F403
@ -1699,6 +1711,7 @@ def _register_triton_kernels():
def kernel_impl(*args, **kwargs):
from torch.sparse._triton_ops import bsr_dense_mm
# pyrefly: ignore # not-callable
return bsr_dense_mm(*args, skip_checks=True, **kwargs)
@_WrappedTritonKernel

View File

@ -279,6 +279,7 @@ class _CudaModule:
return self._kernels[name]
# Import the CUDA library inside the method
# pyrefly: ignore # missing-module-attribute
from torch.cuda._utils import _get_gpu_runtime_library
libcuda = _get_gpu_runtime_library()

View File

@ -1,3 +1,4 @@
# pyrefly: ignore # deprecated
from .autocast_mode import autocast, custom_bwd, custom_fwd
from .common import amp_definitely_not_available
from .grad_scaler import GradScaler

View File

@ -259,6 +259,7 @@ class graph:
self.cuda_graph.capture_begin(
# type: ignore[misc]
*self.pool,
# pyrefly: ignore # bad-keyword-argument
capture_error_mode=self.capture_error_mode,
)
@ -524,6 +525,7 @@ def make_graphed_callables(
) -> Callable[..., object]:
class Graphed(torch.autograd.Function):
@staticmethod
# pyrefly: ignore # bad-override
def forward(ctx: object, *inputs: Tensor) -> tuple[Tensor, ...]:
# At this stage, only the user args may (potentially) be new tensors.
for i in range(len_user_args):
@ -535,6 +537,7 @@ def make_graphed_callables(
@staticmethod
@torch.autograd.function.once_differentiable
# pyrefly: ignore # bad-override
def backward(ctx: object, *grads: Tensor) -> tuple[Tensor, ...]:
assert len(grads) == len(static_grad_outputs)
for g, grad in zip(static_grad_outputs, grads):
@ -548,7 +551,9 @@ def make_graphed_callables(
# Input args that didn't require grad expect a None gradient.
assert isinstance(static_grad_inputs, tuple)
return tuple(
b.detach() if b is not None else b for b in static_grad_inputs
# pyrefly: ignore # bad-argument-type
b.detach() if b is not None else b
for b in static_grad_inputs
)
def functionalized(*user_args: object) -> object:

View File

@ -770,6 +770,7 @@ def list_gpu_processes(device: "Device" = None) -> str:
import pynvml # type: ignore[import]
except ModuleNotFoundError:
return "pynvml module not found, please install pynvml"
# pyrefly: ignore # import-error
from pynvml import NVMLError_DriverNotLoaded
try:
@ -852,6 +853,7 @@ def _record_memory_history_legacy(
_C._cuda_record_memory_history_legacy( # type: ignore[call-arg]
enabled,
record_context,
# pyrefly: ignore # bad-argument-type
trace_alloc_max_entries,
trace_alloc_record_context,
record_context_cpp,

View File

@ -53,6 +53,7 @@ def range_start(msg) -> int:
Args:
msg (str): ASCII message to associate with the range.
"""
# pyrefly: ignore # missing-attribute
return _nvtx.rangeStartA(msg)
@ -63,6 +64,7 @@ def range_end(range_id) -> None:
Args:
range_id (int): an unique handle for the start range.
"""
# pyrefly: ignore # missing-attribute
_nvtx.rangeEnd(range_id)
@ -83,6 +85,7 @@ def _device_range_start(msg: str, stream: int = 0) -> object:
msg (str): ASCII message to associate with the range.
stream (int): CUDA stream id.
"""
# pyrefly: ignore # missing-attribute
return _nvtx.deviceRangeStart(msg, stream)
@ -95,6 +98,7 @@ def _device_range_end(range_handle: object, stream: int = 0) -> None:
range_handle: an unique handle for the start range.
stream (int): CUDA stream id.
"""
# pyrefly: ignore # missing-attribute
_nvtx.deviceRangeEnd(range_handle, stream)