mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-21 05:34:18 +08:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/18598 ghimport-source-id: c74597e5e7437e94a43c163cee0639b20d0d0c6a Stack from [ghstack](https://github.com/ezyang/ghstack): * **#18598 Turn on F401: Unused import warning.** This was requested by someone at Facebook; this lint is turned on for Facebook by default. "Sure, why not." I had to noqa a number of imports in __init__. Hypothetically we're supposed to use __all__ in this case, but I was too lazy to fix it. Left for future work. Be careful! flake8-2 and flake8-3 behave differently with respect to import resolution for # type: comments. flake8-3 will report an import unused; flake8-2 will not. For now, I just noqa'd all these sites. All the changes were done by hand. Signed-off-by: Edward Z. Yang <ezyang@fb.com> Differential Revision: D14687478 fbshipit-source-id: 30d532381e914091aadfa0d2a5a89404819663e3
110 lines
3.8 KiB
Python
110 lines
3.8 KiB
Python
from functools import update_wrapper
|
|
from numbers import Number
|
|
import torch
|
|
import torch.nn.functional as F
|
|
|
|
|
|
# promote numbers to tensors of dtype torch.get_default_dtype()
|
|
def _default_promotion(v):
|
|
return torch.tensor(v, dtype=torch.get_default_dtype())
|
|
|
|
|
|
def broadcast_all(*values):
|
|
r"""
|
|
Given a list of values (possibly containing numbers), returns a list where each
|
|
value is broadcasted based on the following rules:
|
|
- `torch.*Tensor` instances are broadcasted as per :ref:`_broadcasting-semantics`.
|
|
- numbers.Number instances (scalars) are upcast to tensors having
|
|
the same size and type as the first tensor passed to `values`. If all the
|
|
values are scalars, then they are upcasted to scalar Tensors.
|
|
|
|
Args:
|
|
values (list of `numbers.Number` or `torch.*Tensor`)
|
|
|
|
Raises:
|
|
ValueError: if any of the values is not a `numbers.Number` or
|
|
`torch.*Tensor` instance
|
|
"""
|
|
if not all(torch.is_tensor(v) or isinstance(v, Number) for v in values):
|
|
raise ValueError('Input arguments must all be instances of numbers.Number or torch.tensor.')
|
|
if not all(map(torch.is_tensor, values)):
|
|
new_tensor = _default_promotion
|
|
for value in values:
|
|
if torch.is_tensor(value):
|
|
new_tensor = value.new_tensor
|
|
break
|
|
values = [v if torch.is_tensor(v) else new_tensor(v) for v in values]
|
|
return torch.broadcast_tensors(*values)
|
|
|
|
|
|
def _standard_normal(shape, dtype, device):
|
|
if torch._C._get_tracing_state():
|
|
# [JIT WORKAROUND] lack of support for .normal_()
|
|
return torch.normal(torch.zeros(shape, dtype=dtype, device=device),
|
|
torch.ones(shape, dtype=dtype, device=device))
|
|
return torch.empty(shape, dtype=dtype, device=device).normal_()
|
|
|
|
|
|
def _sum_rightmost(value, dim):
|
|
r"""
|
|
Sum out ``dim`` many rightmost dimensions of a given tensor.
|
|
|
|
Args:
|
|
value (Tensor): A tensor of ``.dim()`` at least ``dim``.
|
|
dim (int): The number of rightmost dims to sum out.
|
|
"""
|
|
if dim == 0:
|
|
return value
|
|
required_shape = value.shape[:-dim] + (-1,)
|
|
return value.reshape(required_shape).sum(-1)
|
|
|
|
|
|
def logits_to_probs(logits, is_binary=False):
|
|
r"""
|
|
Converts a tensor of logits into probabilities. Note that for the
|
|
binary case, each value denotes log odds, whereas for the
|
|
multi-dimensional case, the values along the last dimension denote
|
|
the log probabilities (possibly unnormalized) of the events.
|
|
"""
|
|
if is_binary:
|
|
return torch.sigmoid(logits)
|
|
return F.softmax(logits, dim=-1)
|
|
|
|
|
|
def clamp_probs(probs):
|
|
eps = torch.finfo(probs.dtype).eps
|
|
return probs.clamp(min=eps, max=1 - eps)
|
|
|
|
|
|
def probs_to_logits(probs, is_binary=False):
|
|
r"""
|
|
Converts a tensor of probabilities into logits. For the binary case,
|
|
this denotes the probability of occurrence of the event indexed by `1`.
|
|
For the multi-dimensional case, the values along the last dimension
|
|
denote the probabilities of occurrence of each of the events.
|
|
"""
|
|
ps_clamped = clamp_probs(probs)
|
|
if is_binary:
|
|
return torch.log(ps_clamped) - torch.log1p(-ps_clamped)
|
|
return torch.log(ps_clamped)
|
|
|
|
|
|
class lazy_property(object):
|
|
r"""
|
|
Used as a decorator for lazy loading of class attributes. This uses a
|
|
non-data descriptor that calls the wrapped method to compute the property on
|
|
first call; thereafter replacing the wrapped method into an instance
|
|
attribute.
|
|
"""
|
|
def __init__(self, wrapped):
|
|
self.wrapped = wrapped
|
|
update_wrapper(self, wrapped)
|
|
|
|
def __get__(self, instance, obj_type=None):
|
|
if instance is None:
|
|
return self
|
|
with torch.enable_grad():
|
|
value = self.wrapped(instance)
|
|
setattr(instance, self.wrapped.__name__, value)
|
|
return value
|