mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[pytorch] Update caffe2/python to eliminate Pyre errors (#52083)
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/52083 This makes minor fixes in `caffe2/python` to address all errors currently reported by Pyre. I update the code to fix errors when doing so looked simple and safe, and added `pyre-fixme` comments in other places. ghstack-source-id: 121109695 Test Plan: Confirmed that Pyre no longer reports errors under `caffe2/python` Differential Revision: D26272279 fbshipit-source-id: b1eb19d323b613f23280ce9c71e800e874ca1162
This commit is contained in:
committed by
Facebook GitHub Bot
parent
c4eb22009e
commit
81b9aa743b
@ -135,4 +135,5 @@ class HelperWrapper(object):
|
||||
return helper_name in self._registry
|
||||
|
||||
|
||||
# pyre-fixme[6]: incompatible parameter type: expected ModuleType, got HelperWrapper
|
||||
sys.modules[__name__] = HelperWrapper(sys.modules[__name__])
|
||||
|
@ -21,31 +21,33 @@ from caffe2.python.net_builder import ops, NetBuilder
|
||||
from caffe2.proto import caffe2_pb2
|
||||
|
||||
import unittest
|
||||
from typing import Optional
|
||||
|
||||
|
||||
if workspace.has_gpu_support and workspace.NumGpuDevices() > 0:
|
||||
gpu_device_option = caffe2_pb2.DeviceOption()
|
||||
gpu_device_option.device_type = workspace.GpuDeviceType
|
||||
_gpu_dev_option = caffe2_pb2.DeviceOption()
|
||||
_gpu_dev_option.device_type = workspace.GpuDeviceType
|
||||
cpu_device_option = caffe2_pb2.DeviceOption()
|
||||
gpu_device_checker = device_checker.DeviceChecker(
|
||||
0.01, [gpu_device_option]
|
||||
0.01, [_gpu_dev_option]
|
||||
)
|
||||
device_checker = device_checker.DeviceChecker(
|
||||
0.01, [gpu_device_option, cpu_device_option]
|
||||
0.01, [_gpu_dev_option, cpu_device_option]
|
||||
)
|
||||
gpu_gradient_checkers = [
|
||||
gradient_checker.GradientChecker(
|
||||
0.005, 0.05, gpu_device_option, "gpu_checker_ws"
|
||||
0.005, 0.05, _gpu_dev_option, "gpu_checker_ws"
|
||||
),
|
||||
]
|
||||
gradient_checkers = [
|
||||
gradient_checker.GradientChecker(
|
||||
0.005, 0.05, gpu_device_option, "gpu_checker_ws"
|
||||
0.005, 0.05, _gpu_dev_option, "gpu_checker_ws"
|
||||
),
|
||||
gradient_checker.GradientChecker(
|
||||
0.01, 0.05, cpu_device_option, "cpu_checker_ws"
|
||||
),
|
||||
]
|
||||
gpu_device_option: Optional[caffe2_pb2.DeviceOption] = _gpu_dev_option
|
||||
else:
|
||||
cpu_device_option = caffe2_pb2.DeviceOption()
|
||||
gpu_device_option = None
|
||||
|
@ -20,6 +20,8 @@ from caffe2.proto import caffe2_pb2
|
||||
import numpy as np
|
||||
|
||||
|
||||
# pyre-fixme[13]: Pyre can't detect attribute initialization through the
|
||||
# super().__new__ call
|
||||
class OpSpec(namedtuple("OpSpec", "type input output arg")):
|
||||
|
||||
def __new__(cls, op_type, op_input, op_output, op_arg=None):
|
||||
|
@ -111,9 +111,12 @@ class Tags(object):
|
||||
return wrapper
|
||||
|
||||
|
||||
# pyre-fixme[16]: Tags has no attribute `TRAIN_ONLY`
|
||||
Tags.TRAIN_ONLY = [Tags.EXCLUDE_FROM_PREDICTION, Tags.EXCLUDE_FROM_EVAL,
|
||||
Tags.EXCLUDE_FROM_ACCUMULATE_PRED]
|
||||
# pyre-fixme[16]: Tags has no attribute `EVAL_ONLY`
|
||||
Tags.EVAL_ONLY = [Tags.EXCLUDE_FROM_PREDICTION, Tags.EXCLUDE_FROM_TRAIN,
|
||||
Tags.EXCLUDE_FROM_ACCUMULATE_PRED]
|
||||
# pyre-fixme[16]: Tags has no attribute `PREDICTION_ONLY`
|
||||
Tags.PREDICTION_ONLY = [Tags.EXCLUDE_FROM_TRAIN, Tags.EXCLUDE_FROM_EVAL,
|
||||
Tags.EXCLUDE_FROM_ACCUMULATE_PRED]
|
||||
|
@ -7,7 +7,9 @@ import numpy as np
|
||||
import nvd3
|
||||
import os
|
||||
import sys
|
||||
# pyre-fixme[21]: Could not find a module corresponding to import `tornado.httpserver`
|
||||
import tornado.httpserver
|
||||
# pyre-fixme[21]: Could not find a module corresponding to import `tornado.wsgi`
|
||||
import tornado.wsgi
|
||||
|
||||
__folder__ = os.path.abspath(os.path.dirname(__file__))
|
||||
|
@ -53,6 +53,7 @@ if __name__ == "__main__":
|
||||
import unittest
|
||||
import random
|
||||
random.seed(2603)
|
||||
# pyre-fixme[10]: Name `workspace` is used but not defined in the current scope
|
||||
workspace.GlobalInit([
|
||||
'caffe2',
|
||||
'--caffe2_log_level=0',
|
||||
|
@ -52,6 +52,7 @@ if __name__ == "__main__":
|
||||
import unittest
|
||||
import random
|
||||
random.seed(2006)
|
||||
# pyre-fixme[10]: Name `workspace` is used but not defined in the current scope
|
||||
workspace.GlobalInit([
|
||||
'caffe2',
|
||||
'--caffe2_log_level=0',
|
||||
|
@ -7,6 +7,7 @@ from caffe2.python import brew, core, workspace
|
||||
from caffe2.python.model_helper import ModelHelper
|
||||
from functools import partial
|
||||
from hypothesis import given, settings
|
||||
from typing import Optional, Tuple
|
||||
|
||||
import caffe2.python.hypothesis_test_util as hu
|
||||
import caffe2.python.serialized_test.serialized_test_util as serial
|
||||
@ -324,9 +325,14 @@ class TestLayerNormOp(serial.SerializedTestCase):
|
||||
@settings(deadline=1000)
|
||||
def test_layer_norm_op_jit(self, X, eps, elementwise_affine, gc, dc):
|
||||
@torch.jit.script
|
||||
def jit_layer_norm(X, gamma=None, beta=None, axis=1, eps=1e-5,
|
||||
elementwise_affine=False):
|
||||
# type: (Tensor, Optional[Tensor], Optional[Tensor], int, float, bool) -> Tuple[Tensor, Tensor, Tensor]
|
||||
def jit_layer_norm(
|
||||
X: torch.Tensor,
|
||||
gamma: Optional[torch.Tensor] = None,
|
||||
beta: Optional[torch.Tensor] = None,
|
||||
axis: int = 1,
|
||||
eps: float = 1e-5,
|
||||
elementwise_affine: bool = False,
|
||||
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
||||
return torch.ops._caffe2.LayerNorm(
|
||||
X, gamma, beta, axis, eps, elementwise_affine)
|
||||
|
||||
|
@ -21,6 +21,7 @@ SIZE = 0
|
||||
|
||||
def SetupMPI():
|
||||
try:
|
||||
# pyre-fixme[21]: undefined import
|
||||
from mpi4py import MPI
|
||||
global _has_mpi, COMM, RANK, SIZE
|
||||
_has_mpi = core.IsOperatorWithEngine("CreateCommonWorld", "MPI")
|
||||
|
@ -61,4 +61,6 @@ class TestScaleOps(serial.SerializedTestCase):
|
||||
np.testing.assert_allclose(o, o_ref)
|
||||
|
||||
if __name__ == '__main__':
|
||||
import unittest
|
||||
|
||||
unittest.main()
|
||||
|
@ -33,10 +33,11 @@ def get_predictor_exporter_helper(submodelNetName):
|
||||
return pred_meta
|
||||
|
||||
|
||||
# pyre-fixme[13]: Pyre can't detect the attribute initialization via cls.super() here
|
||||
class PredictorExportMeta(collections.namedtuple(
|
||||
'PredictorExportMeta',
|
||||
'predict_net, parameters, inputs, outputs, shapes, name, \
|
||||
extra_init_net, global_init_net, net_type, num_workers, trainer_prefix')):
|
||||
'predict_net, parameters, inputs, outputs, shapes, name, '
|
||||
'extra_init_net, global_init_net, net_type, num_workers, trainer_prefix')):
|
||||
"""
|
||||
Metadata to be used for serializaing a net.
|
||||
|
||||
|
@ -28,6 +28,7 @@ from past.builtins import basestring
|
||||
from future.utils import viewitems, viewkeys, viewvalues
|
||||
from itertools import islice
|
||||
from six import StringIO
|
||||
from typing import Sequence
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -67,6 +68,7 @@ FeatureSpec = namedtuple(
|
||||
]
|
||||
)
|
||||
|
||||
# pyre-fixme[16]: `FeatureSpec.__new__` has no attribute `__defaults__`
|
||||
FeatureSpec.__new__.__defaults__ = (None, None, None, None, None, None)
|
||||
|
||||
|
||||
@ -87,9 +89,10 @@ class Metadata(
|
||||
`feature_specs` - information about the features that contained in this
|
||||
field. For example if field have more than 1 feature it can have list of
|
||||
feature names contained in this field."""
|
||||
__slots__ = ()
|
||||
__slots__: Sequence[str] = ()
|
||||
|
||||
|
||||
# pyre-fixme[16]: `Metadata.__new__` has no attribute `__defaults__`
|
||||
Metadata.__new__.__defaults__ = (None, None, None)
|
||||
|
||||
|
||||
@ -97,7 +100,7 @@ class Field(object):
|
||||
"""Represents an abstract field type in a dataset.
|
||||
"""
|
||||
|
||||
__slots__ = ("_parent", "_field_offsets")
|
||||
__slots__: Sequence[str] = ("_parent", "_field_offsets")
|
||||
|
||||
def __init__(self, children):
|
||||
"""Derived classes must call this after their initialization."""
|
||||
@ -205,7 +208,7 @@ class List(Field):
|
||||
the parent domain.
|
||||
"""
|
||||
|
||||
__slots__ = ("lengths", "_items")
|
||||
__slots__: Sequence[str] = ("lengths", "_items")
|
||||
|
||||
def __init__(self, values, lengths_blob=None):
|
||||
if isinstance(lengths_blob, Field):
|
||||
@ -285,7 +288,7 @@ class ListWithEvicted(List):
|
||||
LRU Hashing.
|
||||
"""
|
||||
|
||||
__slots__ = ("_evicted_values",)
|
||||
__slots__: Sequence[str] = ("_evicted_values",)
|
||||
|
||||
def __init__(self, values, lengths_blob=None, evicted_values=None):
|
||||
if isinstance(evicted_values, Field):
|
||||
@ -368,7 +371,7 @@ class Struct(Field):
|
||||
"""Represents a named list of fields sharing the same domain.
|
||||
"""
|
||||
|
||||
__slots__ = ("fields", "_frozen")
|
||||
__slots__: Sequence[str] = ("fields", "_frozen")
|
||||
|
||||
def __init__(self, *fields):
|
||||
""" fields is a list of tuples in format of (name, field). The name is
|
||||
@ -718,7 +721,7 @@ class Scalar(Field):
|
||||
a conversion to numpy.ndarray is attempted.
|
||||
"""
|
||||
|
||||
__slots__ = ("_metadata", "dtype", "_original_dtype", "_blob")
|
||||
__slots__: Sequence[str] = ("_metadata", "dtype", "_original_dtype", "_blob")
|
||||
|
||||
def __init__(self, dtype=None, blob=None, metadata=None):
|
||||
self._metadata = None
|
||||
@ -980,7 +983,7 @@ def from_dtype(dtype, _outer_shape=()):
|
||||
class _SchemaNode(object):
|
||||
"""This is a private class used to represent a Schema Node"""
|
||||
|
||||
__slots__ = ("name", "children", "type_str", "field")
|
||||
__slots__: Sequence[str] = ("name", "children", "type_str", "field")
|
||||
|
||||
def __init__(self, name, type_str=''):
|
||||
self.name = name
|
||||
|
@ -63,12 +63,16 @@ if has_cuda_support:
|
||||
GetDeviceProperties = C.get_device_properties
|
||||
GetGPUMemoryInfo = C.get_gpu_memory_info
|
||||
else:
|
||||
# pyre-fixme[9]: incompatible type assignment
|
||||
NumCudaDevices = lambda: 0 # noqa
|
||||
# pyre-fixme[9]: incompatible type assignment
|
||||
GetCUDAVersion = lambda: 0 # noqa
|
||||
# pyre-fixme[9]: incompatible type assignment
|
||||
GetCuDNNVersion = lambda: 0 # noqa
|
||||
|
||||
if has_hip_support:
|
||||
GpuDeviceType = caffe2_pb2.HIP
|
||||
# pyre-fixme[9]: incompatible type assignment
|
||||
NumGpuDevices = C.num_hip_devices
|
||||
GetHIPVersion = C.get_hip_version
|
||||
|
||||
@ -81,9 +85,11 @@ if not has_gpu_support:
|
||||
# setting cuda as the default GpuDeviceType as some tests
|
||||
# like core, scope tests use GpuDeviceType even without gpu support
|
||||
GpuDeviceType = caffe2_pb2.CUDA
|
||||
# pyre-fixme[9]: incompatible type assignment
|
||||
NumGpuDevices = lambda: 0 # noqa
|
||||
GetDeviceProperties = lambda x: None # noqa
|
||||
GetGpuPeerAccessPattern = lambda: np.array([]) # noqa
|
||||
# pyre-fixme[9]: incompatible type assignment
|
||||
GetGPUMemoryInfo = lambda: None # noqa
|
||||
|
||||
IsNUMAEnabled = C.is_numa_enabled
|
||||
|
@ -710,9 +710,10 @@ class MyModule(torch.jit.ScriptModule):
|
||||
def forward(self, x):
|
||||
return self.mult.mm(x)
|
||||
|
||||
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
|
||||
# `torch.jit.script_method`
|
||||
@torch.jit.script_method
|
||||
def multi_input(self, x, y, z=2):
|
||||
# type: (Tensor, Tensor, int) -> Tensor
|
||||
def multi_input(self, x: torch.Tensor, y: torch.Tensor, z: int = 2) -> torch.Tensor:
|
||||
return x + y + z
|
||||
|
||||
@torch.jit.script_method
|
||||
|
Reference in New Issue
Block a user