diff --git a/torch/_deploy.py b/torch/_deploy.py index 30c022eac879..35e8d4976940 100644 --- a/torch/_deploy.py +++ b/torch/_deploy.py @@ -88,7 +88,7 @@ def _load_storages(id, zip_reader, obj_bytes, serialized_storages, serialized_dt importer = sys_importer unpickler = PackageUnpickler(importer, io.BytesIO(obj_bytes)) - unpickler.persistent_load = persistent_load # type: ignore[assignment] + unpickler.persistent_load = persistent_load # type: ignore[method-assign] result = _deploy_objects[id] = unpickler.load() return result diff --git a/torch/_lobpcg.py b/torch/_lobpcg.py index a5ed5cf8fcfd..6ca1e7294217 100644 --- a/torch/_lobpcg.py +++ b/torch/_lobpcg.py @@ -648,7 +648,7 @@ def _lobpcg( bparams["ortho_use_drop"] = bparams.get("ortho_use_drop", False) if not torch.jit.is_scripting(): - LOBPCG.call_tracker = LOBPCG_call_tracker # type: ignore[assignment] + LOBPCG.call_tracker = LOBPCG_call_tracker # type: ignore[method-assign] if len(A.shape) > 2: N = int(torch.prod(torch.tensor(A.shape[:-2]))) @@ -672,7 +672,7 @@ def _lobpcg( bXret[i] = worker.X[:, :k] if not torch.jit.is_scripting(): - LOBPCG.call_tracker = LOBPCG_call_tracker_orig # type: ignore[assignment] + LOBPCG.call_tracker = LOBPCG_call_tracker_orig # type: ignore[method-assign] return bE.reshape(A.shape[:-2] + (k,)), bXret.reshape(A.shape[:-2] + (m, k)) @@ -684,7 +684,7 @@ def _lobpcg( worker.run() if not torch.jit.is_scripting(): - LOBPCG.call_tracker = LOBPCG_call_tracker_orig # type: ignore[assignment] + LOBPCG.call_tracker = LOBPCG_call_tracker_orig # type: ignore[method-assign] return worker.E[:k], worker.X[:, :k] diff --git a/torch/_refs/__init__.py b/torch/_refs/__init__.py index d18e24bb4019..22df61b1f171 100644 --- a/torch/_refs/__init__.py +++ b/torch/_refs/__init__.py @@ -2161,7 +2161,7 @@ def _reduction( computation_dtype, result_dtype = utils.reduction_dtypes( a, output_dtype_kind, dtype ) - a = _maybe_convert_to_dtype(a, computation_dtype) # type: ignore[assignment] + a = _maybe_convert_to_dtype(a, computation_dtype) # type: ignore[method-assign] result = prim(a, dims) if keepdims: output_shape = [a.shape[i] if i not in dims else 1 for i in range(a.ndim)] @@ -2480,7 +2480,7 @@ def mean( nelem = 1 if a.ndim == 0 else reduce(operator.mul, (a.shape[i] for i in dims), 1) result = true_divide(result, nelem) result_dtype = a.dtype if dtype is None else dtype - result = _maybe_convert_to_dtype(result, result_dtype) # type: ignore[assignment] + result = _maybe_convert_to_dtype(result, result_dtype) # type: ignore[method-assign] if out is not None: assert isinstance(out, TensorLike) out = _maybe_resize_out(out, result.shape) diff --git a/torch/onnx/symbolic_helper.py b/torch/onnx/symbolic_helper.py index c8b55c7dec99..fbba99c4ec69 100644 --- a/torch/onnx/symbolic_helper.py +++ b/torch/onnx/symbolic_helper.py @@ -287,7 +287,7 @@ def parse_args(*arg_descriptors: _ValueDescriptor): arg_names = [None] * len(args) # type: ignore[list-item] fn_name = None args = [ - _parse_arg(arg, arg_desc, arg_name, fn_name) # type: ignore[assignment] + _parse_arg(arg, arg_desc, arg_name, fn_name) # type: ignore[method-assign] for arg, arg_desc, arg_name in zip(args, arg_descriptors, arg_names) ] # only support _outputs in kwargs diff --git a/torch/testing/_internal/common_utils.py b/torch/testing/_internal/common_utils.py index e289f89c55b9..68da3dfcf9b8 100644 --- a/torch/testing/_internal/common_utils.py +++ b/torch/testing/_internal/common_utils.py @@ -743,9 +743,8 @@ def prof_func_call(*args, **kwargs): def prof_meth_call(*args, **kwargs): return prof_callable(meth_call, *args, **kwargs) -# TODO fix when https://github.com/python/mypy/issues/2427 is address -torch._C.ScriptFunction.__call__ = prof_func_call # type: ignore[assignment] -torch._C.ScriptMethod.__call__ = prof_meth_call # type: ignore[assignment] +torch._C.ScriptFunction.__call__ = prof_func_call # type: ignore[method-assign] +torch._C.ScriptMethod.__call__ = prof_meth_call # type: ignore[method-assign] def _get_test_report_path(): # allow users to override the test file location. We need this