[BE]: Update Typeguard to TypeIs for better type inference (#133814)

Uses TypeIs instead of TypeGuard for better inference. See https://peps.python.org/pep-0742/

Pull Request resolved: https://github.com/pytorch/pytorch/pull/133814
Approved by: https://github.com/ezyang
This commit is contained in:
Aaron Gokaslan
2024-10-21 17:20:05 +00:00
committed by PyTorch MergeBot
parent 9bb327bfc6
commit 16caa8c1b3
12 changed files with 26 additions and 26 deletions

View File

@ -4,7 +4,7 @@ import contextlib
import warnings
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Set, Union, Protocol, Tuple, Sequence, overload, Deque, Type
from typing_extensions import TypeGuard
from typing_extensions import TypeIs
from collections import deque
import torch
@ -365,7 +365,7 @@ class TensorWithFlatten(Protocol):
def is_traceable_wrapper_subclass(t: object) -> TypeGuard[TensorWithFlatten]:
def is_traceable_wrapper_subclass(t: object) -> TypeIs[TensorWithFlatten]:
"""
Returns whether or not a tensor subclass that implements __torch_dispatch__
is 'traceable' with torch.compile.
@ -402,7 +402,7 @@ def is_traceable_wrapper_subclass(t: object) -> TypeGuard[TensorWithFlatten]:
and hasattr(t, "__tensor_unflatten__")
)
def is_traceable_wrapper_subclass_type(t: Type) -> TypeGuard[Type[TensorWithFlatten]]:
def is_traceable_wrapper_subclass_type(t: Type) -> TypeIs[Type[TensorWithFlatten]]:
"""Same as above, but takes a type argument instead of an instance."""
return (issubclass(t, torch.Tensor) and t != torch.Tensor
and hasattr(t, "__tensor_flatten__") and hasattr(t, "__tensor_unflatten__"))