mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Add type annotations to conv-relu (#47680)
Summary: Fixes https://github.com/pytorch/pytorch/issues/47679 Pull Request resolved: https://github.com/pytorch/pytorch/pull/47680 Reviewed By: zhangguanheng66 Differential Revision: D25416628 Pulled By: malfet fbshipit-source-id: 103bea1e8c300990f74689787a71b1cfe916cfef
This commit is contained in:
committed by
Facebook GitHub Bot
parent
e9ef1fe309
commit
5375a479aa
15
mypy.ini
15
mypy.ini
@ -101,15 +101,15 @@ ignore_errors = True
|
||||
[mypy-torch.nn.quantized.modules.conv]
|
||||
ignore_errors = True
|
||||
|
||||
[mypy-torch._lobpcg]
|
||||
ignore_errors = True
|
||||
|
||||
[mypy-torch._appdirs]
|
||||
ignore_errors = True
|
||||
|
||||
[mypy-torch._utils]
|
||||
ignore_errors = True
|
||||
|
||||
[mypy-torch._overrides]
|
||||
ignore_errors = True
|
||||
|
||||
[mypy-torch.utils.tensorboard._caffe2_graph]
|
||||
ignore_errors = True
|
||||
|
||||
@ -131,15 +131,6 @@ ignore_errors = True
|
||||
[mypy-torch.nn.quantized.modules.batchnorm]
|
||||
ignore_errors = True
|
||||
|
||||
[mypy-torch.nn.intrinsic.quantized.modules.conv_relu]
|
||||
ignore_errors = True
|
||||
|
||||
[mypy-torch.nn.intrinsic.quantized.modules.bn_relu]
|
||||
ignore_errors = True
|
||||
|
||||
[mypy-torch.nn.intrinsic.quantized.modules.linear_relu]
|
||||
ignore_errors = True
|
||||
|
||||
[mypy-torch.nn.intrinsic.qat.modules.conv_fused]
|
||||
ignore_errors = True
|
||||
|
||||
|
@ -262,7 +262,7 @@ def _symeig_backward(D_grad, U_grad, A, D, U, largest):
|
||||
class LOBPCGAutogradFunction(torch.autograd.Function):
|
||||
|
||||
@staticmethod
|
||||
def forward(ctx,
|
||||
def forward(ctx, # type: ignore[override]
|
||||
A: Tensor,
|
||||
k: Optional[int] = None,
|
||||
B: Optional[Tensor] = None,
|
||||
@ -606,7 +606,7 @@ def _lobpcg(A: Tensor,
|
||||
bparams['ortho_use_drop'] = bparams.get('ortho_use_drop', False)
|
||||
|
||||
if not torch.jit.is_scripting():
|
||||
LOBPCG.call_tracker = LOBPCG_call_tracker
|
||||
LOBPCG.call_tracker = LOBPCG_call_tracker # type: ignore
|
||||
|
||||
if len(A.shape) > 2:
|
||||
N = int(torch.prod(torch.tensor(A.shape[:-2])))
|
||||
@ -628,7 +628,7 @@ def _lobpcg(A: Tensor,
|
||||
bXret[i] = worker.X[:, :k]
|
||||
|
||||
if not torch.jit.is_scripting():
|
||||
LOBPCG.call_tracker = LOBPCG_call_tracker_orig
|
||||
LOBPCG.call_tracker = LOBPCG_call_tracker_orig # type: ignore
|
||||
|
||||
return bE.reshape(A.shape[:-2] + (k,)), bXret.reshape(A.shape[:-2] + (m, k))
|
||||
|
||||
@ -640,7 +640,7 @@ def _lobpcg(A: Tensor,
|
||||
worker.run()
|
||||
|
||||
if not torch.jit.is_scripting():
|
||||
LOBPCG.call_tracker = LOBPCG_call_tracker_orig
|
||||
LOBPCG.call_tracker = LOBPCG_call_tracker_orig # type: ignore
|
||||
|
||||
return worker.E[:k], worker.X[:, :k]
|
||||
|
||||
|
@ -16,7 +16,7 @@ class ConvReLU1d(nnq.Conv1d):
|
||||
Same as torch.nn.quantized.Conv1d
|
||||
|
||||
"""
|
||||
_FLOAT_MODULE = torch.nn.intrinsic.ConvReLU1d
|
||||
_FLOAT_MODULE = torch.nn.intrinsic.ConvReLU1d # type: ignore[assignment]
|
||||
|
||||
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
|
||||
padding=0, dilation=1, groups=1, bias=True,
|
||||
@ -55,7 +55,7 @@ class ConvReLU2d(nnq.Conv2d):
|
||||
Same as torch.nn.quantized.Conv2d
|
||||
|
||||
"""
|
||||
_FLOAT_MODULE = torch.nn.intrinsic.ConvReLU2d
|
||||
_FLOAT_MODULE = torch.nn.intrinsic.ConvReLU2d # type: ignore[assignment]
|
||||
|
||||
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
|
||||
padding=0, dilation=1, groups=1, bias=True,
|
||||
@ -94,7 +94,7 @@ class ConvReLU3d(nnq.Conv3d):
|
||||
Attributes: Same as torch.nn.quantized.Conv3d
|
||||
|
||||
"""
|
||||
_FLOAT_MODULE = torch.nn.intrinsic.ConvReLU3d
|
||||
_FLOAT_MODULE = torch.nn.intrinsic.ConvReLU3d # type: ignore[assignment]
|
||||
|
||||
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
|
||||
padding=0, dilation=1, groups=1, bias=True,
|
||||
|
Reference in New Issue
Block a user