Upgrade mypy to version 0.812 (#55712)

Summary:
Fixes https://github.com/pytorch/pytorch/issues/54211

This was a little more annoying than expected, because the `exclude = ` key in `mypy.ini` is weird. I'll file an upstream issue about that.

I ignored one file, `torch/distributed/elastic/agent/server/api.py` that had ~8 errors that were hard to figure out. This can be done in a follow-up.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/55712

Reviewed By: walterddr

Differential Revision: D27694976

Pulled By: malfet

fbshipit-source-id: 228d8be6af040343ce46595dabaca212e69ccc68
This commit is contained in:
Ralf Gommers
2021-04-12 18:06:44 -07:00
committed by Facebook GitHub Bot
parent 68e0796466
commit 48ddc9762b
25 changed files with 48 additions and 38 deletions

View File

@ -113,7 +113,7 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
boto3==1.16.34 \
coverage \
hypothesis==4.53.2 \
mypy==0.770 \
mypy==0.812 \
tb-nightly
# Update scikit-learn to a python-3.8 compatible version

View File

@ -293,7 +293,7 @@ jobs:
run: |
set -eux
pip install -r requirements.txt
pip install mypy==0.770
pip install mypy==0.812
- name: Run autogen
run: |
set -eux

View File

@ -25,6 +25,6 @@ jobs:
run: |
set -eux
pip install -r requirements.txt
pip install boto3==1.16.34 mypy==0.770
pip install boto3==1.16.34 mypy==0.812
- name: Run tests
run: python -m unittest discover -vs tools/test -p 'test_*.py'

View File

@ -29,7 +29,7 @@ class Fusions(serial.SerializedTestCase):
tensor_min = min(0, np.min(tensor))
scale = np.float32(np.float16((tensor_max - tensor_min) / 255.0))
if scale < 1e-6:
scale = 1e-6
scale = np.float32(1e-6)
zero_point = 0 - tensor_min / scale
zero_point = int(round(np.clip(zero_point, 0, 255.0)))
return (scale, zero_point)

View File

@ -186,8 +186,7 @@ class FCTest(serial.SerializedTestCase):
0.0680542, 0.4255371, -0.42895508, -0.4128418,
-0.47973633, 0.33251953,
0.27807617, 0.3701172]], dtype=np.float32)
b0 = [0.47851562]
b0 = np.array(b0, dtype=np.float32)
b0 = np.array([0.47851562], dtype=np.float32)
workspace.FeedBlob("W0", W0)
workspace.FeedBlob("b0", b0)
@ -252,7 +251,6 @@ class FCTest(serial.SerializedTestCase):
"m": m,
"k": k,
"n": n,
"X": X0,
"W0": W0,
"b0": b0,
"Y_glow": Y_glow,

View File

@ -21,7 +21,7 @@ class Int8OpsTest(serial.SerializedTestCase):
tensor_min = min(0, np.min(tensor))
scale = np.float32(np.float16((tensor_max - tensor_min) / 255.0))
if scale < 1e-6:
scale = 1e-6
scale = np.float32(1e-6)
zero_point = 0 - tensor_min / scale
zero_point = int(round(np.clip(zero_point, 0, 255.0)))
return (scale, zero_point)

View File

@ -122,7 +122,7 @@ class LayerNorm(serial.SerializedTestCase):
tensor_min = min(0, np.min(tensor))
scale = np.float32(np.float16((tensor_max - tensor_min) / 255.0))
if scale < 1e-6:
scale = 1e-6
scale = np.float32(1e-6)
zero_point = 0 - tensor_min / scale
zero_point = int(round(np.clip(zero_point, 0, 255.0)))
return (scale, zero_point)

View File

@ -122,10 +122,10 @@ class SparseLengthsSum4BitFakeNNPIFp16Test(serial.SerializedTestCase):
data = data * 1e-3
lengths = np.random.choice(np.arange(1, num_rows), batch_size).astype(np.int32)
indices = []
_indices = []
for length in lengths:
indices.extend(np.random.choice(np.arange(1, num_rows), length))
indices = np.asarray(indices).astype(np.int64)
_indices.extend(np.random.choice(np.arange(1, num_rows), length))
indices = np.asarray(_indices).astype(np.int64)
weights = np.random.uniform(
low=0,

View File

@ -1,4 +1,5 @@
import unittest
from typing import Dict, Any
# Must happen before importing caffe2.python.*
import caffe2.python.fakelowp.init_shared_libs # noqa
@ -41,7 +42,7 @@ class SparseLengthsSum8BitFakeNNPIFp16Test(serial.SerializedTestCase):
fp16_c2_net = core.Net("test_fp16_c2")
fp16_c2_net.SparseLengthsSumFakeFP16AccFP16(["D", "I", "L"], "fp16_out")
input_dict = {}
input_dict : Dict[Any, Any] = {}
pred_net = caffe2_pb2.NetDef()
pred_net.name = "pred"
@ -214,10 +215,10 @@ class SparseLengthsSum8BitFakeNNPIFp16Test(serial.SerializedTestCase):
data = np.random.rand(num_rows, embedding_dim).astype(np.float32)
lengths = np.random.choice(np.arange(1, num_rows), batch_size).astype(np.int32)
indices = []
_indices = []
for length in lengths:
indices.extend(np.random.choice(np.arange(1, num_rows), length))
indices = np.asarray(indices).astype(np.int64)
_indices.extend(np.random.choice(np.arange(1, num_rows), length))
indices = np.asarray(_indices).astype(np.int64)
weights = np.random.uniform(
low=0,

View File

@ -49,10 +49,10 @@ class SparseLengthsSum8BitFakeNNPIFp32Test(serial.SerializedTestCase):
data = np.random.rand(num_rows, embedding_dim).astype(np.float32)
lengths = np.random.choice(np.arange(1, num_rows), batch_size).astype(np.int32)
indices = []
_indices = []
for length in lengths:
indices.extend(np.random.choice(np.arange(1, num_rows), length))
indices = np.asarray(indices).astype(np.int64)
_indices.extend(np.random.choice(np.arange(1, num_rows), length))
indices = np.asarray(_indices).astype(np.int64)
weights = np.random.uniform(
low=0,
@ -246,9 +246,6 @@ class SparseLengthsSum8BitFakeNNPIFp32Test(serial.SerializedTestCase):
"test_small_sls_acc32",
{
"seed": seed,
"num_rows": num_rows,
"embedding_dim": embedding_dim,
"batch_size": batch_size,
"indices": indices,
"data": data,
"quantized_data": quantized_data,

View File

@ -38,6 +38,10 @@ files =
tools/generate_torch_version.py,
tools/stats_utils/*.py
#
# `exclude` is a regex, not a list of paths like `files` (sigh)
#
exclude = torch/include/|torch/csrc/|torch/distributed/elastic/agent/server/api.py
# Minimum version supported - variable annotations were introduced
# in Python 3.6

View File

@ -15,7 +15,7 @@ from glob import glob
from pathlib import Path
from typing import (Any, DefaultDict, Dict, Iterable, Iterator, List, Optional,
Set, Tuple, cast)
from xml.dom import minidom # type: ignore[import]
from xml.dom import minidom
import requests
from typing_extensions import TypedDict

View File

@ -101,7 +101,7 @@ def _print_cont(msg):
def _run_printable(cmd):
proc = subprocess.run(shlex.split(cmd), capture_output=True)
proc = subprocess.run(shlex.split(cmd), capture_output=True) # type: ignore
assert proc.returncode == 0
buffer = io.BytesIO()
@ -212,9 +212,12 @@ def run_worker(rank, world_size):
if rank == (NUM_TRAINERS + NUM_PS):
rpc.init_rpc(
"master", rank=rank, backend=BackendType.TENSORPIPE, world_size=world_size
"master", rank=rank,
backend=BackendType.TENSORPIPE, # type: ignore[attr-defined]
world_size=world_size
)
# Build the Embedding tables on the Parameter Servers.
emb_rref_list = []
index = 0
@ -278,7 +281,7 @@ def run_worker(rank, world_size):
ps_name,
rank=rank,
world_size=world_size,
backend=BackendType.TENSORPIPE,
backend=BackendType.TENSORPIPE, # type: ignore[attr-defined]
rpc_backend_options=rpc_backend_options,
)
# parameter server do nothing

View File

@ -95,6 +95,7 @@ class WorkerSpec:
if isinstance(self.entrypoint, str):
return os.path.basename(self.entrypoint)
else:
assert self.entrypoint is not None
return self.entrypoint.__qualname__

View File

@ -20,7 +20,7 @@ from torch.distributed.elastic.agent.server.api import (
WorkerState,
)
from torch.distributed.elastic.metrics.api import prof
from torch.distributed.elastic.multiprocessing import start_processes
from torch.distributed.elastic.multiprocessing import start_processes, PContext
log = logging.getLogger(__name__)
@ -104,7 +104,7 @@ class LocalElasticAgent(SimpleElasticAgent):
):
super().__init__(spec, exit_barrier_timeout)
self._start_method = start_method
self._pcontext = None
self._pcontext : Optional[PContext] = None
rdzv_run_id = spec.rdzv_handler.get_run_id()
self._log_dir = self._make_log_dir(log_dir, rdzv_run_id)
@ -161,6 +161,7 @@ class LocalElasticAgent(SimpleElasticAgent):
shutil.rmtree(attempt_log_dir, ignore_errors=True)
os.makedirs(attempt_log_dir)
assert spec.entrypoint is not None
self._pcontext = start_processes(
name=spec.role,
entrypoint=spec.entrypoint,
@ -184,6 +185,7 @@ class LocalElasticAgent(SimpleElasticAgent):
def _monitor_workers(self, worker_group: WorkerGroup) -> RunResult:
role = worker_group.spec.role
worker_pids = {w.id for w in worker_group.workers}
assert self._pcontext is not None
pc_pids = set(self._pcontext.pids().values())
if worker_pids != pc_pids:
log.error(

View File

@ -15,7 +15,7 @@ from torch.nn import Module
ScriptMethodStub = collections.namedtuple('ScriptMethodStub', ('resolution_callback', 'def_', 'original_method'))
PropertyStub = collections.namedtuple('Property', ('resolution_callback', 'def_'))
PropertyStub = collections.namedtuple('PropertyStub', ('resolution_callback', 'def_'))
# TODO: there should be a more principled way of doing this.

View File

@ -191,8 +191,10 @@ class _ConvBnNd(nn.modules.conv._ConvNd, nni._FusedModule):
Args: `mod` a float module, either produced by torch.quantization utilities
or directly from user
"""
# The ignore is because _FLOAT_MODULE is a TypeVar here where the bound
# has no __name__ (code is fine though)
assert type(mod) == cls._FLOAT_MODULE, 'qat.' + cls.__name__ + '.from_float only works for ' + \
cls._FLOAT_MODULE.__name__
cls._FLOAT_MODULE.__name__ # type: ignore
assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'
assert mod.qconfig, 'Input float module must have a valid qconfig'
qconfig = mod.qconfig

View File

@ -12,7 +12,7 @@ from .module import Module
from ..functional import log_softmax
_ASMoutput = namedtuple('ASMoutput', ['output', 'loss'])
_ASMoutput = namedtuple('_ASMoutput', ['output', 'loss'])
class AdaptiveLogSoftmaxWithLoss(Module):

View File

@ -549,7 +549,7 @@ class _ConvTransposeNd(_ConvNd):
"""
# derived classes override cls._FLOAT_MODULE attribute
msg = ' nnq.' + cls.__name__ + '.from_float only works for ' + \
cls._FLOAT_MODULE.__name__
cls._FLOAT_MODULE.__name__ # type: ignore
assert type(mod) == cls._FLOAT_MODULE, msg
assert hasattr(mod, 'qconfig'), \
'Input float module must have qconfig defined.'

View File

@ -3,7 +3,7 @@ from typing import Any, Optional, overload, Union, TypeVar, Tuple, Sequence
from torch import Tensor
from torch.types import _dtype, _device
PackedSequence_ = namedtuple('PackedSequence', ['data', 'batch_sizes', 'sorted_indices', 'unsorted_indices'])
PackedSequence_ = namedtuple('PackedSequence_', ['data', 'batch_sizes', 'sorted_indices', 'unsorted_indices'])
def bind(optional: Any, fn: Any): ...

View File

@ -154,7 +154,7 @@ def _equalize_attributes(a: torch.Tensor, b: torch.Tensor) -> Tuple[torch.Tensor
return a, b
_Trace = namedtuple("Trace", ("total", "abs", "rel", "idx", "diff", "a", "b"))
_Trace = namedtuple("_Trace", ("total", "abs", "rel", "idx", "diff", "a", "b"))
def _trace_mismatches(a: torch.Tensor, b: torch.Tensor, mismatches: torch.Tensor) -> _Trace:

View File

@ -461,7 +461,8 @@ def filter_desired_device_types(device_type_test_bases, except_for=None, only_fo
_TORCH_TEST_DEVICES = os.environ.get('TORCH_TEST_DEVICES', None)
if _TORCH_TEST_DEVICES:
for path in _TORCH_TEST_DEVICES.split(':'):
mod = runpy.run_path(path, init_globals=globals())
# runpy (a stdlib module) lacks annotations
mod = runpy.run_path(path, init_globals=globals()) # type: ignore[func-returns-value]
device_type_test_bases.append(mod['TEST_CLASS'])

View File

@ -39,7 +39,7 @@ def main():
[
benchmark_utils.FuzzedSparseTensor(
name=name,
size=[f"k{i}" for i in range(3)],
size=tuple([f"k{i}" for i in range(3)]),
min_elements=64 * 1024,
max_elements=128 * 1024,
sparse_dim="sparse_dim",

View File

@ -86,14 +86,15 @@ def _decompose_type(t, to_list=True):
ts = [t.__bound__]
else:
# For T_co, __constraints__ is ()
ts = t.__constraints__
ts = list(t.__constraints__)
elif hasattr(t, '__origin__') and t.__origin__ == Union:
ts = t.__args__
else:
if not to_list:
return None
ts = [t]
ts = list(TYPE2ABC.get(_t, _t) for _t in ts)
# Ignored: Generator has incompatible item type "object"; expected "Type[Any]"
ts = list(TYPE2ABC.get(_t, _t) for _t in ts) # type: ignore[misc]
return ts