Add __all__ for torch.optim and torch.nn.modules modules (#80237)

Pull Request resolved: https://github.com/pytorch/pytorch/pull/80237
Approved by: https://github.com/albanD
This commit is contained in:
anjali411
2022-06-24 18:56:55 +00:00
committed by PyTorch MergeBot
parent 84c0a308a1
commit bda04e9f5e
22 changed files with 28 additions and 142 deletions

View File

@ -2022,60 +2022,6 @@
"torch.nn.intrinsic.quantized.modules.conv_relu": [
"fuse_conv_bn_weights"
],
"torch.nn.modules.activation": [
"Module",
"NonDynamicallyQuantizableLinear",
"Optional",
"Parameter",
"Tensor",
"Tuple",
"constant_",
"xavier_normal_",
"xavier_uniform_"
],
"torch.nn.modules.adaptive": [
"Linear",
"List",
"Module",
"ModuleList",
"Sequence",
"Sequential",
"Tensor",
"log_softmax",
"namedtuple"
],
"torch.nn.modules.batchnorm": [
"Any",
"LazyModuleMixin",
"Module",
"Optional",
"Parameter",
"Tensor",
"UninitializedBuffer",
"UninitializedParameter",
"sync_batch_norm"
],
"torch.nn.modules.channelshuffle": [
"Module",
"Tensor"
],
"torch.nn.modules.container": [
"Any",
"Dict",
"Iterable",
"Iterator",
"Mapping",
"Module",
"Optional",
"OrderedDict",
"Parameter",
"Tuple",
"TypeVar",
"Union",
"chain",
"islice",
"overload"
],
"torch.nn.modules.conv": [
"LazyModuleMixin",
"List",
@ -2405,94 +2351,6 @@
"TensorProtoDataType",
"TrainingMode"
],
"torch.optim.adadelta": [
"List",
"Optimizer",
"Optional",
"Tensor"
],
"torch.optim.adagrad": [
"List",
"Optimizer",
"Optional",
"Tensor"
],
"torch.optim.adam": [
"List",
"Optimizer",
"Optional",
"Tensor"
],
"torch.optim.adamax": [
"List",
"Optimizer",
"Optional",
"Tensor"
],
"torch.optim.adamw": [
"List",
"Optimizer",
"Optional",
"Tensor"
],
"torch.optim.asgd": [
"List",
"Optimizer",
"Optional",
"Tensor"
],
"torch.optim.lbfgs": [
"Optimizer",
"reduce"
],
"torch.optim.lr_scheduler": [
"Counter",
"Optimizer",
"bisect_right",
"wraps"
],
"torch.optim.nadam": [
"List",
"Optimizer",
"Optional",
"Tensor"
],
"torch.optim.optimizer": [
"chain",
"deepcopy",
"defaultdict"
],
"torch.optim.radam": [
"List",
"Optimizer",
"Optional",
"Tensor"
],
"torch.optim.rmsprop": [
"List",
"Optimizer",
"Optional",
"Tensor"
],
"torch.optim.rprop": [
"List",
"Optimizer",
"Optional",
"Tensor"
],
"torch.optim.sgd": [
"List",
"Optimizer",
"Optional",
"Tensor"
],
"torch.optim.sparse_adam": [
"Optimizer"
],
"torch.optim.swa_utils": [
"Module",
"deepcopy"
],
"torch.overrides": [
"BaseTorchFunctionMode",
"TorchFunctionMode",

View File

@ -9,6 +9,10 @@ from torch.nn.parameter import Parameter
from .module import Module
from .. import functional as F
__all__ = ['Threshold', 'ReLU', 'RReLU', 'Hardtanh', 'ReLU6', 'Sigmoid', 'Hardsigmoid', 'Tanh',
'SiLU', 'Mish', 'Hardswish', 'ELU', 'CELU', 'SELU', 'GLU', 'GELU', 'Hardshrink', 'LeakyReLU',
'LogSigmoid', 'Softplus', 'Softshrink', 'MultiheadAttention', 'PReLU', 'Softsign', 'Tanhshrink',
'Softmin', 'Softmax', 'Softmax2d', 'LogSoftmax']
class Threshold(Module):
r"""Thresholds each element of the input Tensor.

View File

@ -11,6 +11,7 @@ from . import Sequential, ModuleList, Linear
from .module import Module
from ..functional import log_softmax
__all__ = ['AdaptiveLogSoftmaxWithLoss']
_ASMoutput = namedtuple('_ASMoutput', ['output', 'loss'])

View File

@ -10,6 +10,8 @@ from ._functions import SyncBatchNorm as sync_batch_norm
from .lazy import LazyModuleMixin
from .module import Module
__all__ = ['BatchNorm1d', 'LazyBatchNorm1d', 'BatchNorm2d', 'LazyBatchNorm2d', 'BatchNorm3d',
'LazyBatchNorm3d', 'SyncBatchNorm']
class _NormBase(Module):
"""Common base of _InstanceNorm and _BatchNorm"""

View File

@ -3,6 +3,7 @@ from .. import functional as F
from torch import Tensor
__all__ = ['ChannelShuffle']
class ChannelShuffle(Module):
r"""Divide the channels in a tensor of shape :math:`(*, C , H, W)`

View File

@ -10,6 +10,8 @@ from torch._jit_internal import _copy_to_script_wrapper
from typing import Any, Dict, Iterable, Iterator, Mapping, Optional, overload, Tuple, TypeVar, Union
__all__ = ['Container', 'Sequential', 'ModuleList', 'ModuleDict', 'ParameterList', 'ParameterDict']
T = TypeVar('T', bound=Module)

View File

@ -4,6 +4,7 @@ from torch import Tensor
from .optimizer import Optimizer
from typing import List, Optional
__all__ = ['Adadelta', 'adadelta']
class Adadelta(Optimizer):
r"""Implements Adadelta algorithm.

View File

@ -4,6 +4,7 @@ from torch import Tensor
from .optimizer import Optimizer
from typing import List, Optional
__all__ = ['Adagrad', 'adagrad']
class Adagrad(Optimizer):
r"""Implements Adagrad algorithm.

View File

@ -4,6 +4,7 @@ from torch import Tensor
from .optimizer import Optimizer
from typing import List, Optional
__all__ = ['Adam', 'adam']
class Adam(Optimizer):
r"""Implements Adam algorithm.

View File

@ -4,6 +4,7 @@ from torch import Tensor
from .optimizer import Optimizer
from typing import List, Optional
__all__ = ['Adamax', 'adamax']
class Adamax(Optimizer):
r"""Implements Adamax algorithm (a variant of Adam based on infinity norm).

View File

@ -4,6 +4,7 @@ from torch import Tensor
from .optimizer import Optimizer
from typing import List, Optional
__all__ = ['AdamW', 'adamw']
class AdamW(Optimizer):
r"""Implements AdamW algorithm.

View File

@ -5,6 +5,7 @@ from torch import Tensor
from .optimizer import Optimizer
from typing import List, Optional
__all__ = ['ASGD', 'asgd']
class ASGD(Optimizer):
"""Implements Averaged Stochastic Gradient Descent.

View File

@ -2,6 +2,7 @@ import torch
from functools import reduce
from .optimizer import Optimizer
__all__ = ['LBFGS']
def _cubic_interpolate(x1, f1, g1, x2, f2, g2, bounds=None):
# ported from https://github.com/torch/optim/blob/master/polyinterp.lua

View File

@ -9,6 +9,9 @@ from bisect import bisect_right
from .optimizer import Optimizer
__all__ = ['LambdaLR', 'MultiplicativeLR', 'StepLR', 'MultiStepLR', 'ConstantLR', 'LinearLR',
'ExponentialLR', 'SequentialLR', 'CosineAnnealingLR', 'ChainedScheduler', 'ReduceLROnPlateau',
'CyclicLR', 'CosineAnnealingWarmRestarts', 'OneCycleLR']
EPOCH_DEPRECATION_WARNING = (
"The epoch parameter in `scheduler.step()` was not necessary and is being "

View File

@ -4,6 +4,7 @@ from torch import Tensor
from .optimizer import Optimizer
from typing import List, Optional
__all__ = ['NAdam', 'nadam']
class NAdam(Optimizer):
r"""Implements NAdam algorithm.

View File

@ -6,6 +6,7 @@ from itertools import chain
import warnings
import functools
__all__ = ['Optimizer']
class _RequiredParameter(object):
"""Singleton class representing a required parameter for an Optimizer."""

View File

@ -5,6 +5,7 @@ from torch import Tensor
from .optimizer import Optimizer
from typing import List, Optional
__all__ = ['RAdam', 'radam']
class RAdam(Optimizer):
r"""Implements RAdam algorithm.

View File

@ -3,6 +3,7 @@ from torch import Tensor
from .optimizer import Optimizer
from typing import List, Optional
__all__ = ['RMSprop', 'rmsprop']
class RMSprop(Optimizer):
r"""Implements RMSprop algorithm.

View File

@ -3,6 +3,7 @@ from torch import Tensor
from .optimizer import Optimizer
from typing import List, Optional
__all__ = ['Rprop', 'rprop']
class Rprop(Optimizer):
r"""Implements the resilient backpropagation algorithm.

View File

@ -3,6 +3,7 @@ from torch import Tensor
from .optimizer import Optimizer, required
from typing import List, Optional
__all__ = ['SGD', 'sgd']
class SGD(Optimizer):
r"""Implements stochastic gradient descent (optionally with momentum).

View File

@ -2,6 +2,7 @@ import torch
from . import _functional as F
from .optimizer import Optimizer
__all__ = ['SparseAdam']
class SparseAdam(Optimizer):
r"""Implements lazy version of Adam algorithm suitable for sparse tensors.

View File

@ -7,6 +7,7 @@ import torch
from torch.nn import Module
from torch.optim.lr_scheduler import _LRScheduler
__all__ = ['AveragedModel', 'update_bn', 'SWALR']
class AveragedModel(Module):
r"""Implements averaged model for Stochastic Weight Averaging (SWA).