mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[pep8] Fix most lint automatically with autopep8
Here's the command I used to invoke autopep8 (in parallel!): git ls-files | grep '\.py$' | xargs -n1 -P`nproc` autopep8 -i Several rules are ignored in setup.cfg. The goal is to let autopep8 handle everything which it can handle safely, and to disable any rules which are tricky or controversial to address. We may want to come back and re-enable some of these rules later, but I'm trying to make this patch as safe as possible. Also configures flake8 to match pep8's behavior. Also configures TravisCI to check the whole project for lint.
This commit is contained in:
@ -44,4 +44,4 @@ matrix:
|
||||
python: "2.7"
|
||||
addons: true
|
||||
install: pip install pep8
|
||||
script: pep8 setup.py
|
||||
script: pep8
|
||||
|
@ -201,6 +201,7 @@ from docutils import nodes
|
||||
from sphinx.util.docfields import TypedField
|
||||
from sphinx import addnodes
|
||||
|
||||
|
||||
def patched_make_field(self, types, domain, items):
|
||||
# type: (List, unicode, Tuple) -> nodes.field
|
||||
def handle_item(fieldarg, content):
|
||||
|
@ -1,2 +1,7 @@
|
||||
[pep8]
|
||||
max-line-length = 120
|
||||
ignore = E402,E721,E731
|
||||
|
||||
[flake8]
|
||||
max-line-length = 120
|
||||
ignore = E305,E402,E721,E731,F401,F403,F405,F811,F812,F821,F841
|
||||
|
@ -12,6 +12,7 @@ from torch.autograd import Variable, Function
|
||||
|
||||
torch.set_default_tensor_type('torch.DoubleTensor')
|
||||
|
||||
|
||||
def run_tests():
|
||||
parser = argparse.ArgumentParser(add_help=False)
|
||||
parser.add_argument('--seed', type=int, default=123)
|
||||
@ -29,6 +30,7 @@ try:
|
||||
except ImportError:
|
||||
TEST_NUMPY = False
|
||||
|
||||
|
||||
def get_cpu_type(t):
|
||||
assert t.__module__ == 'torch.cuda'
|
||||
return getattr(torch, t.__class__.__name__)
|
||||
|
@ -189,7 +189,7 @@ criterion_tests = [
|
||||
dict(module_name='L1Loss',
|
||||
input_size=(2, 3, 4),
|
||||
target=torch.randn(2, 3, 4),
|
||||
reference_fn=lambda i,t,_: 1./i.numel() * \
|
||||
reference_fn=lambda i, t, _: 1. / i.numel() *
|
||||
sum((a - b).abs().sum() for a, b in zip(i, t))
|
||||
),
|
||||
dict(
|
||||
@ -447,6 +447,7 @@ class NNTestCase(TestCase):
|
||||
|
||||
|
||||
class TestBase(object):
|
||||
|
||||
def __init__(self, constructor, constructor_args=tuple(), input_size=None,
|
||||
input=None, desc='', reference_fn=None, fullname=None, **kwargs):
|
||||
if input_size is None and input is None:
|
||||
@ -496,6 +497,7 @@ class TestBase(object):
|
||||
|
||||
|
||||
class ModuleTest(TestBase):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(ModuleTest, self).__init__(*args, **kwargs)
|
||||
self.jacobian_input = kwargs.get('jacobian_input', True)
|
||||
@ -568,6 +570,7 @@ class ModuleTest(TestBase):
|
||||
|
||||
|
||||
class CriterionTest(TestBase):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CriterionTest, self).__init__(*args, **kwargs)
|
||||
self.target = self._get_target(kwargs['target'])
|
||||
|
@ -2,6 +2,7 @@ import torch.nn as nn
|
||||
|
||||
|
||||
class Net(nn.Module):
|
||||
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
self.linear = nn.Linear(10, 20)
|
||||
|
@ -2,6 +2,7 @@ import torch.nn as nn
|
||||
|
||||
|
||||
class Net(nn.Module):
|
||||
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
self.linear = nn.Linear(10, 20)
|
||||
|
@ -1,5 +1,6 @@
|
||||
import torch
|
||||
|
||||
|
||||
def check_error(desc, fn, *required_substrings):
|
||||
try:
|
||||
fn()
|
||||
@ -52,6 +53,7 @@ check_error('Invalid index type',
|
||||
lambda: torch.FloatStorage(10)['first item'],
|
||||
'str')
|
||||
|
||||
|
||||
def assign():
|
||||
torch.FloatStorage(10)[1:-1] = '1'
|
||||
check_error('Invalid value type',
|
||||
|
@ -3,10 +3,12 @@ import torch
|
||||
import torch.legacy.optim as optim
|
||||
from pprint import pprint
|
||||
|
||||
|
||||
def rosenbrock(tensor):
|
||||
x, y = tensor
|
||||
return (1 - x)**2 + 100 * (y - x**2)**2
|
||||
|
||||
|
||||
def drosenbrock(tensor):
|
||||
x, y = tensor
|
||||
return torch.DoubleTensor((-400 * x * (y - x**2) - 2 * (1 - x), 200 * x * (y - x**2)))
|
||||
|
@ -68,6 +68,7 @@ class TestAutograd(TestCase):
|
||||
y = Variable(torch.ones(5, 5) * 4, requires_grad=True)
|
||||
|
||||
counter = [0]
|
||||
|
||||
def bw_hook(inc, grad):
|
||||
self.assertIsInstance(grad, Variable)
|
||||
counter[0] += inc
|
||||
@ -103,6 +104,7 @@ class TestAutograd(TestCase):
|
||||
# WARNING: this is a test for autograd internals.
|
||||
# You should never have to use such things in your code.
|
||||
class NoneGradientFunction(Function):
|
||||
|
||||
def forward(self, x, y):
|
||||
assert self.needs_input_grad[0]
|
||||
assert not self.needs_input_grad[1]
|
||||
@ -114,6 +116,7 @@ class TestAutograd(TestCase):
|
||||
fn = NoneGradientFunction()
|
||||
fn._backward_hooks = OrderedDict()
|
||||
was_called = [False]
|
||||
|
||||
def hook(grad_input, grad_output):
|
||||
self.assertIsInstance(grad_input, tuple)
|
||||
self.assertIsInstance(grad_output, tuple)
|
||||
@ -242,6 +245,7 @@ class TestAutograd(TestCase):
|
||||
self.assertFalse(a.requires_grad)
|
||||
b = a + z
|
||||
self.assertTrue(b.requires_grad)
|
||||
|
||||
def error():
|
||||
raise RuntimeError
|
||||
# Make sure backward isn't called on these
|
||||
@ -379,6 +383,7 @@ class TestAutograd(TestCase):
|
||||
segfault.
|
||||
"""
|
||||
class CollectOnDelete(Function):
|
||||
|
||||
def __del__(self):
|
||||
gc.collect()
|
||||
|
||||
@ -436,6 +441,7 @@ class TestAutograd(TestCase):
|
||||
|
||||
def test_return_leaf(self):
|
||||
class Identity(Function):
|
||||
|
||||
def forward(self, a, b):
|
||||
return a, a + b
|
||||
|
||||
@ -443,6 +449,7 @@ class TestAutograd(TestCase):
|
||||
return grad_a + grad_b, grad_b
|
||||
|
||||
class Inplace(InplaceFunction):
|
||||
|
||||
def forward(self, a, b):
|
||||
self.mark_dirty(a)
|
||||
return a.add_(b), b + 2
|
||||
@ -464,6 +471,7 @@ class TestAutograd(TestCase):
|
||||
|
||||
def test_return_leaf_inplace(self):
|
||||
class Inplace(InplaceFunction):
|
||||
|
||||
def forward(self, a, b):
|
||||
self.mark_dirty(a)
|
||||
return a.add_(b), b + 2
|
||||
@ -573,7 +581,9 @@ class TestAutograd(TestCase):
|
||||
|
||||
def test_save_none_for_backward(self):
|
||||
test_case = self
|
||||
|
||||
class MyFn(Function):
|
||||
|
||||
def forward(self, input):
|
||||
self.save_for_backward(None, input, None)
|
||||
return input * input
|
||||
@ -591,6 +601,7 @@ class TestAutograd(TestCase):
|
||||
|
||||
def test_too_many_grads(self):
|
||||
class MyFn(Function):
|
||||
|
||||
def forward(self, input):
|
||||
return input
|
||||
|
||||
@ -679,6 +690,7 @@ class TestAutograd(TestCase):
|
||||
|
||||
def test_dep_nograd(self):
|
||||
class F1(Function):
|
||||
|
||||
def forward(self, input):
|
||||
out = torch.randn(input.size())
|
||||
self.mark_non_differentiable(out)
|
||||
@ -688,6 +700,7 @@ class TestAutograd(TestCase):
|
||||
return grad_output
|
||||
|
||||
class F2(Function):
|
||||
|
||||
def forward(self, input, ignored):
|
||||
return input
|
||||
|
||||
@ -710,6 +723,7 @@ def index_variable(shape, max_indices):
|
||||
index = torch.rand(*shape).mul_(max_indices).floor_().long()
|
||||
return Variable(index, requires_grad=False)
|
||||
|
||||
|
||||
def gather_variable(shape, index_dim, max_indices):
|
||||
assert len(shape) == 2
|
||||
assert index_dim < 2
|
||||
@ -946,6 +960,7 @@ method_tests = [
|
||||
def create_input(call_args):
|
||||
if not isinstance(call_args, tuple):
|
||||
call_args = (call_args,)
|
||||
|
||||
def map_arg(arg):
|
||||
if isinstance(arg, tuple) and not isinstance(arg[0], Variable):
|
||||
return Variable(torch.randn(*arg).double(), requires_grad=True)
|
||||
@ -976,6 +991,7 @@ ignore_inplace = set((
|
||||
for test in function_tests:
|
||||
cls, constructor_args, call_args = test[:3]
|
||||
test_name = 'test_' + cls.__name__ + ('_' + test[3] if len(test) == 4 else '')
|
||||
|
||||
def do_test(self, cls=cls, constructor_args=constructor_args,
|
||||
call_args=call_args, test_name=test_name):
|
||||
input = create_input(call_args)
|
||||
@ -986,6 +1002,7 @@ for test in function_tests:
|
||||
if not o.requires_grad:
|
||||
continue
|
||||
analytical = get_analytical_jacobian(input, o)
|
||||
|
||||
def fn(input):
|
||||
tmp = cls(*constructor_args)(*input)
|
||||
if not isinstance(tmp, tuple):
|
||||
@ -1032,6 +1049,7 @@ EXCLUDE_FUNCTIONAL = {
|
||||
for test in method_tests:
|
||||
name, self_size, args = test[:3]
|
||||
test_name = 'test_' + name + ('_' + test[3] if len(test) == 4 else '')
|
||||
|
||||
def do_test(self, name=name, self_size=self_size, args=args, test_name=test_name):
|
||||
def check(name):
|
||||
self_variable = create_input((self_size,))[0]
|
||||
@ -1064,7 +1082,6 @@ for test in method_tests:
|
||||
if not 'only supports scalar' in e.args[0]:
|
||||
raise
|
||||
|
||||
|
||||
assert not hasattr(TestAutograd, test_name), 'Two tests have the same name: ' + test_name
|
||||
setattr(TestAutograd, test_name, do_test)
|
||||
|
||||
|
@ -14,6 +14,7 @@ if not torch.cuda.is_available():
|
||||
import sys
|
||||
sys.exit()
|
||||
|
||||
|
||||
def is_floating(t):
|
||||
return type(t) in [torch.FloatTensor, torch.DoubleTensor,
|
||||
torch.cuda.FloatTensor, torch.cuda.DoubleTensor]
|
||||
@ -33,6 +34,7 @@ float_types = [
|
||||
torch.DoubleTensor
|
||||
] # TODO: add half...
|
||||
|
||||
|
||||
def number(floating, integer, t):
|
||||
name = type(t).__name__
|
||||
if 'Double' in name or 'Float' in name or 'Half' in name:
|
||||
@ -44,49 +46,64 @@ def number(floating, integer, t):
|
||||
S = 10
|
||||
M = 50
|
||||
|
||||
|
||||
def make_tensor(t, *sizes):
|
||||
return t(*sizes).copy_(torch.randn(*sizes))
|
||||
|
||||
|
||||
def small_2d(t):
|
||||
return make_tensor(t, S, S)
|
||||
|
||||
|
||||
def small_2d_scaled(t, scale=10):
|
||||
return make_tensor(t, S, S).mul(scale)
|
||||
|
||||
|
||||
def small_3d(t):
|
||||
return make_tensor(t, S, S, S)
|
||||
|
||||
|
||||
def medium_1d(t):
|
||||
return make_tensor(t, M)
|
||||
|
||||
|
||||
def medium_2d(t):
|
||||
return make_tensor(t, M, M)
|
||||
|
||||
|
||||
def medium_2d_scaled(t, scale=10):
|
||||
return make_tensor(t, M, M).mul(scale)
|
||||
|
||||
|
||||
def small_3d_ones(t):
|
||||
return t(S, S, S).copy_(torch.ones(S, S, S))
|
||||
|
||||
|
||||
def small_3d_positive(t):
|
||||
min_val = 1e-3 if is_floating(t) else 2
|
||||
return make_tensor(t, S, S, S).clamp_(min_val, 120)
|
||||
|
||||
|
||||
def small_3d_unique(t):
|
||||
return t(S, S, S).copy_(torch.range(1, S * S * S))
|
||||
|
||||
|
||||
def small_1d_lapack(t):
|
||||
return t(1, 3).copy_(torch.range(1, 3).view(3))
|
||||
|
||||
|
||||
def small_2d_lapack(t):
|
||||
return t(3, 3).copy_(torch.range(1, 9).view(3, 3))
|
||||
|
||||
|
||||
def small_2d_lapack_skinny(t):
|
||||
return t(3, 4).copy_(torch.range(1, 12).view(3, 4))
|
||||
|
||||
|
||||
def small_2d_lapack_fat(t):
|
||||
return t(4, 3).copy_(torch.range(1, 12).view(4, 3))
|
||||
|
||||
|
||||
def new_t(*sizes):
|
||||
def tmp(t):
|
||||
return t(*sizes).copy_(torch.randn(*sizes))
|
||||
@ -111,7 +128,8 @@ tests = [
|
||||
('baddbmm', small_3d, lambda t: [number(0.4, 2, t), small_3d(t), small_3d(t)], 'scalar'),
|
||||
('baddbmm', small_3d, lambda t: [number(0.5, 3, t), number(0.4, 2, t), small_3d(t), small_3d(t)], 'two_scalars'),
|
||||
('addcdiv', small_2d_lapack, lambda t: [small_2d_lapack(t).mul(2), small_2d_lapack(t)],),
|
||||
('addcdiv', small_2d_lapack, lambda t: [number(2.8, 1, t), small_2d_lapack(t).mul(2), small_2d_lapack(t)], 'scalar' ),
|
||||
('addcdiv', small_2d_lapack, lambda t: [number(2.8, 1, t),
|
||||
small_2d_lapack(t).mul(2), small_2d_lapack(t)], 'scalar'),
|
||||
('addcmul', small_3d, lambda t: [small_3d(t), small_3d(t)],),
|
||||
('addcmul', small_3d, lambda t: [number(0.4, 2, t), small_3d(t), small_3d(t)], 'scalar'),
|
||||
('addmm', medium_2d, lambda t: [medium_2d(t), medium_2d(t)],),
|
||||
@ -275,6 +293,8 @@ for fn in simple_pointwise_float:
|
||||
tests.append((fn, small_3d, lambda t: [], None, float_types))
|
||||
|
||||
_cycles_per_ms = None
|
||||
|
||||
|
||||
def get_cycles_per_ms():
|
||||
"""Approximate number of cycles per millisecond for torch.cuda._sleep"""
|
||||
global _cycles_per_ms
|
||||
@ -288,6 +308,7 @@ def get_cycles_per_ms():
|
||||
_cycles_per_ms = 1000000 / start.elapsed_time(end)
|
||||
return _cycles_per_ms
|
||||
|
||||
|
||||
def compare_cpu_gpu(tensor_constructor, arg_constructor, fn, t, precision=1e-5):
|
||||
def tmp(self):
|
||||
cpu_tensor = tensor_constructor(t)
|
||||
@ -314,6 +335,7 @@ def compare_cpu_gpu(tensor_constructor, arg_constructor, fn, t, precision=1e-5):
|
||||
self.assertEqual(cpu_result, gpu_result, precision)
|
||||
return tmp
|
||||
|
||||
|
||||
class TestCuda(TestCase):
|
||||
|
||||
def test_autogpu(self):
|
||||
@ -526,6 +548,7 @@ class TestCuda(TestCase):
|
||||
@unittest.skipIf(torch.cuda.device_count() < 2, "detected only one GPU")
|
||||
def test_multigpu_serialization_remap(self):
|
||||
x = [torch.randn(4, 4).cuda(0), torch.randn(4, 4).cuda(1)]
|
||||
|
||||
def gpu_remap(storage, location):
|
||||
if location == 'cuda:1':
|
||||
return storage.cuda(0)
|
||||
@ -666,7 +689,8 @@ for decl in tests:
|
||||
if not hasattr(tensor, name_inner):
|
||||
continue
|
||||
if not hasattr(gpu_tensor, name_inner):
|
||||
print("Ignoring {}, because it's not implemented by torch.cuda.{}".format(name_inner, gpu_tensor.__class__.__name__))
|
||||
print("Ignoring {}, because it's not implemented by torch.cuda.{}".format(
|
||||
name_inner, gpu_tensor.__class__.__name__))
|
||||
continue
|
||||
|
||||
test_name = 'test_' + t.__name__ + '_' + name_inner
|
||||
|
@ -32,6 +32,7 @@ class TestTensorDataset(TestCase):
|
||||
|
||||
|
||||
class ErrorDataset(Dataset):
|
||||
|
||||
def __init__(self, size):
|
||||
self.size = size
|
||||
|
||||
@ -84,7 +85,6 @@ class TestDataLoader(TestCase):
|
||||
math.ceil(float(len(loader.dataset)) / loader.batch_size))
|
||||
return
|
||||
|
||||
|
||||
def test_sequential(self):
|
||||
self._test_sequential(DataLoader(self.dataset))
|
||||
|
||||
|
@ -9,7 +9,9 @@ from common_nn import NNTestCase, ModuleTest, CriterionTest, iter_tensors, \
|
||||
module_tests, criterion_tests, TEST_CUDA, PRECISION
|
||||
from common import to_gpu, freeze_rng_state, run_tests
|
||||
|
||||
|
||||
class OldModuleTest(ModuleTest):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(OldModuleTest, self).__init__(*args, **kwargs)
|
||||
self.check_inplace = kwargs.get('check_inplace', False)
|
||||
@ -541,6 +543,7 @@ for p in range(1, 4+1):
|
||||
desc=str(p))
|
||||
)
|
||||
|
||||
|
||||
def build_spatial_unpooling_net():
|
||||
pool = nn.SpatialMaxPooling(2, 2, 2, 2)
|
||||
unpool = nn.SpatialMaxUnpooling(pool)
|
||||
@ -552,6 +555,7 @@ tests.append(
|
||||
desc='SpatialMaxUnpooling')
|
||||
)
|
||||
|
||||
|
||||
def build_volumetric_unpooling_net():
|
||||
pool = nn.VolumetricMaxPooling(2, 2, 2, 2)
|
||||
unpool = nn.VolumetricMaxUnpooling(pool)
|
||||
@ -563,6 +567,7 @@ tests.append(
|
||||
desc='VolumetricMaxUnpooling')
|
||||
)
|
||||
|
||||
|
||||
def prepare_tests():
|
||||
def add_test(test):
|
||||
test_name = test.get_name()
|
||||
@ -613,6 +618,7 @@ def prepare_tests():
|
||||
test = CriterionTest(**test_params)
|
||||
add_test(test)
|
||||
|
||||
|
||||
class TestNN(NNTestCase):
|
||||
|
||||
def _forward(self, module, input):
|
||||
@ -878,7 +884,8 @@ class TestNN(NNTestCase):
|
||||
output2 = [input, input, input]
|
||||
self.assertEqual(output2, output)
|
||||
gradInput = module.backward(input, gradOutput)
|
||||
gradInput2 = [_gradOutput[0].sum(0).squeeze(0), _gradOutput[1].sum(0).squeeze(0), [_gradOutput[2].sum(0).squeeze(0)]]
|
||||
gradInput2 = [_gradOutput[0].sum(0).squeeze(0), _gradOutput[1].sum(
|
||||
0).squeeze(0), [_gradOutput[2].sum(0).squeeze(0)]]
|
||||
self.assertTrue(isinstance(gradInput, list))
|
||||
self.assertFalse(isinstance(gradInput[0], list))
|
||||
self.assertFalse(isinstance(gradInput[1], list))
|
||||
@ -920,7 +927,8 @@ class TestNN(NNTestCase):
|
||||
gradInputConcat = concat.backward(input, gradOutput)
|
||||
# the spatial dims are the largest, the nFilters is the sum
|
||||
output = torch.Tensor(2, int(outputSize.sum()), 12, 12).zero_() # zero for padding
|
||||
narrows = ( (slice(None), slice(0, 5), slice(None), slice(None)), (slice(None), slice(5, 11), slice(1, 11), slice(1, 11)), (slice(None), slice(11, 18), slice(1, 10), slice(1, 10)), (slice(None), slice(18, 26), slice(2, 10), slice(2, 10)) )
|
||||
narrows = ((slice(None), slice(0, 5), slice(None), slice(None)), (slice(None), slice(5, 11), slice(1, 11), slice(
|
||||
1, 11)), (slice(None), slice(11, 18), slice(1, 10), slice(1, 10)), (slice(None), slice(18, 26), slice(2, 10), slice(2, 10)))
|
||||
gradInput = input.clone().zero_()
|
||||
for i in range(4):
|
||||
conv = concat.get(i)
|
||||
@ -1112,7 +1120,8 @@ class TestNN(NNTestCase):
|
||||
pc = nn.ParallelCriterion().add(nll, 0.5).add(mse)
|
||||
pc2 = nn.ParallelCriterion().add(nll2, 0.4).add(pc)
|
||||
output = pc2.forward(input, target)
|
||||
output2 = nll2.forward(input[0], target[0])*0.4 + nll.forward(input[1][0], target[1][0])/2 + mse.forward(input[1][1], target[1][1])
|
||||
output2 = nll2.forward(input[0], target[0]) * 0.4 + nll.forward(input[1][0],
|
||||
target[1][0]) / 2 + mse.forward(input[1][1], target[1][1])
|
||||
self.assertEqual(output, output2)
|
||||
gradInput2 = [
|
||||
nll2.backward(input[0], target[0]).clone().mul(0.4),
|
||||
@ -1197,6 +1206,7 @@ class TestNN(NNTestCase):
|
||||
def test_apply(self):
|
||||
net = self._build_net()
|
||||
seen_modules = set()
|
||||
|
||||
def callback(module):
|
||||
self.assertNotIn(module, seen_modules)
|
||||
seen_modules.add(module)
|
||||
@ -1206,6 +1216,7 @@ class TestNN(NNTestCase):
|
||||
def test_listModules(self):
|
||||
net = self._build_net()
|
||||
module_list = list()
|
||||
|
||||
def callback(module):
|
||||
module_list.append(module)
|
||||
net.apply(callback)
|
||||
@ -1214,6 +1225,7 @@ class TestNN(NNTestCase):
|
||||
def test_replace(self):
|
||||
ref_net = self._build_net()
|
||||
net = self._build_net()
|
||||
|
||||
def callback(module):
|
||||
if isinstance(module, nn.ReLU):
|
||||
return nn.Tanh()
|
||||
|
@ -268,6 +268,7 @@ class TestMultiprocessing(TestCase):
|
||||
|
||||
def test_inherit_tensor(self):
|
||||
class SubProcess(mp.Process):
|
||||
|
||||
def __init__(self, tensor):
|
||||
super(SubProcess, self).__init__()
|
||||
self.tensor = tensor
|
||||
@ -286,7 +287,6 @@ class TestMultiprocessing(TestCase):
|
||||
torch.cuda.FloatTensor([1]) # initialize CUDA outside of leak checker
|
||||
self._test_sharing(mp.get_context('spawn'), torch.cuda.FloatTensor)
|
||||
|
||||
|
||||
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
|
||||
def test_cuda_small_tensors(self):
|
||||
# Check multiple small tensors which will likely use the same
|
||||
|
@ -16,8 +16,10 @@ from common_nn import NNTestCase, ModuleTest, CriterionTest, TestBase, \
|
||||
module_tests, criterion_tests, TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, PRECISION
|
||||
from common import freeze_rng_state, run_tests
|
||||
|
||||
|
||||
def default_tensor_type(type):
|
||||
type_str = torch.typename(type)
|
||||
|
||||
def decorator(fn):
|
||||
@wraps(fn)
|
||||
def wrapper(*args, **kwargs):
|
||||
@ -30,9 +32,12 @@ def default_tensor_type(type):
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
|
||||
class InputVariableMixin(object):
|
||||
|
||||
def _get_input(self):
|
||||
input = TestBase._get_input(self)
|
||||
|
||||
def map_variables(i):
|
||||
if isinstance(i, Variable):
|
||||
return i
|
||||
@ -44,6 +49,7 @@ class InputVariableMixin(object):
|
||||
|
||||
|
||||
class NewModuleTest(InputVariableMixin, ModuleTest):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(NewModuleTest, self).__init__(*args, **kwargs)
|
||||
self.cudnn = kwargs.get('cudnn', False)
|
||||
@ -379,7 +385,9 @@ class TestNN(NNTestCase):
|
||||
def test_parameters(self):
|
||||
def num_params(module):
|
||||
return len(list(module.parameters()))
|
||||
|
||||
class Net(nn.Module):
|
||||
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
self.l1 = l
|
||||
@ -394,6 +402,7 @@ class TestNN(NNTestCase):
|
||||
|
||||
def test_modules(self):
|
||||
class Net(nn.Module):
|
||||
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
self.l1 = l
|
||||
@ -455,6 +464,7 @@ class TestNN(NNTestCase):
|
||||
def test_non_leaf_parameters(self):
|
||||
l1 = nn.Linear(10, 10)
|
||||
l2 = nn.Linear(10, 10)
|
||||
|
||||
def assign_weight():
|
||||
l2.weight = l1.weight + 2
|
||||
self.assertRaises(TypeError, assign_weight)
|
||||
@ -671,7 +681,9 @@ class TestNN(NNTestCase):
|
||||
def test_data_parallel_nested_output(self):
|
||||
def fn(input):
|
||||
return [input, (input.sin(), input.cos(), [input.add(1)]), input]
|
||||
|
||||
class Net(nn.Module):
|
||||
|
||||
def forward(self, input):
|
||||
return fn(input)
|
||||
i = Variable(torch.randn(2, 2).float().cuda(1))
|
||||
@ -690,7 +702,9 @@ class TestNN(NNTestCase):
|
||||
def test_data_parallel_nested_input(self):
|
||||
def fn(input):
|
||||
return input[1][0]
|
||||
|
||||
class Net(nn.Module):
|
||||
|
||||
def forward(self, input):
|
||||
return fn(input)
|
||||
i = Variable(torch.randn(20, 3).float().cuda(1))
|
||||
@ -781,6 +795,7 @@ class TestNN(NNTestCase):
|
||||
|
||||
def test_parameter_assignment(self):
|
||||
l = nn.Linear(5, 5)
|
||||
|
||||
def num_params():
|
||||
return len(list(l.parameters()))
|
||||
self.assertEqual(num_params(), 2)
|
||||
@ -904,6 +919,7 @@ class TestNN(NNTestCase):
|
||||
|
||||
def test_container_copy(self):
|
||||
class Model(nn.Module):
|
||||
|
||||
def __init__(self):
|
||||
super(Model, self).__init__()
|
||||
self.linear = nn.Linear(4, 5)
|
||||
@ -1019,7 +1035,6 @@ class TestNN(NNTestCase):
|
||||
for (cpu_weight, gpu_weight) in zip(cpu_layer_weight, gpu_layer_weight):
|
||||
self.assertEqual(cpu_weight.grad.data, gpu_weight.grad.data, prec=5e-5)
|
||||
|
||||
|
||||
for module in (nn.RNN, nn.LSTM, nn.GRU):
|
||||
for bias in (True, False):
|
||||
for bidirectional in (False, True):
|
||||
@ -1528,13 +1543,15 @@ new_module_tests = [
|
||||
jacobian_input=False
|
||||
),
|
||||
dict(
|
||||
constructor=lambda: nn.FractionalMaxPool2d(2, output_ratio=0.5, _random_samples=torch.DoubleTensor(1, 3, 2).uniform_()),
|
||||
constructor=lambda: nn.FractionalMaxPool2d(
|
||||
2, output_ratio=0.5, _random_samples=torch.DoubleTensor(1, 3, 2).uniform_()),
|
||||
input_size=(1, 3, 5, 5),
|
||||
fullname='FractionalMaxPool2d_ratio',
|
||||
test_cuda=False
|
||||
),
|
||||
dict(
|
||||
constructor=lambda: nn.FractionalMaxPool2d((2, 2), output_size=(4, 4), _random_samples=torch.DoubleTensor(1, 3, 2).uniform_()),
|
||||
constructor=lambda: nn.FractionalMaxPool2d((2, 2), output_size=(
|
||||
4, 4), _random_samples=torch.DoubleTensor(1, 3, 2).uniform_()),
|
||||
input_size=(1, 3, 7, 7),
|
||||
fullname='FractionalMaxPool2d_size',
|
||||
test_cuda=False
|
||||
@ -1596,6 +1613,7 @@ for test_params in criterion_tests:
|
||||
|
||||
|
||||
class UnpoolingNet(nn.Module):
|
||||
|
||||
def __init__(self, pool, unpool):
|
||||
super(UnpoolingNet, self).__init__()
|
||||
self.pool = pool
|
||||
|
@ -11,6 +11,7 @@ SparseTensor = sparse.DoubleTensor
|
||||
|
||||
|
||||
class TestSparse(TestCase):
|
||||
|
||||
@staticmethod
|
||||
def _gen_sparse(d, nnz, with_size):
|
||||
v = torch.randn(nnz)
|
||||
|
@ -15,6 +15,7 @@ if TEST_NUMPY:
|
||||
|
||||
SIZE = 100
|
||||
|
||||
|
||||
def skipIfNoLapack(fn):
|
||||
@wraps(fn)
|
||||
def wrapper(*args, **kwargs):
|
||||
@ -26,6 +27,7 @@ def skipIfNoLapack(fn):
|
||||
raise
|
||||
return wrapper
|
||||
|
||||
|
||||
class TestTorch(TestCase):
|
||||
|
||||
def test_dot(self):
|
||||
@ -211,7 +213,7 @@ class TestTorch(TestCase):
|
||||
|
||||
def test_lerp(self):
|
||||
def TH_lerp(a, b, weight):
|
||||
return a + weight * (b-a);
|
||||
return a + weight * (b - a)
|
||||
|
||||
size = (100, 100)
|
||||
a = torch.rand(*size)
|
||||
@ -1584,7 +1586,7 @@ class TestTorch(TestCase):
|
||||
imvc2 = torch.conv3(x, k, 'V')
|
||||
imfc = torch.conv3(x, k, 'F')
|
||||
|
||||
ki = k.clone();
|
||||
ki = k.clone()
|
||||
ks = k.storage()
|
||||
kis = ki.storage()
|
||||
for i in range(ks.size() - 1, 0, -1):
|
||||
@ -1889,6 +1891,7 @@ class TestTorch(TestCase):
|
||||
def test_newindex(self):
|
||||
reference = self._consecutive((3, 3, 3))
|
||||
# This relies on __index__() being correct - but we have separate tests for that
|
||||
|
||||
def checkPartialAssign(index):
|
||||
reference = torch.zeros(3, 3, 3)
|
||||
reference[index] = self._consecutive((3, 3, 3))[index]
|
||||
|
@ -28,7 +28,9 @@ try:
|
||||
except ImportError:
|
||||
HAS_CFFI = False
|
||||
|
||||
|
||||
class SimplePlugin(Plugin):
|
||||
|
||||
def __init__(self, interval):
|
||||
super(SimplePlugin, self).__init__(interval)
|
||||
self.trainer = None
|
||||
@ -58,6 +60,7 @@ class SimplePlugin(Plugin):
|
||||
|
||||
|
||||
class ModelMock(object):
|
||||
|
||||
def __init__(self):
|
||||
self.num_calls = 0
|
||||
self.output = Variable(torch.ones(1, 1), requires_grad=True)
|
||||
@ -68,6 +71,7 @@ class ModelMock(object):
|
||||
|
||||
|
||||
class CriterionMock(object):
|
||||
|
||||
def __init__(self):
|
||||
self.num_calls = 0
|
||||
|
||||
@ -95,6 +99,7 @@ class OptimizerMock(object):
|
||||
|
||||
|
||||
class DatasetMock(object):
|
||||
|
||||
def __iter__(self):
|
||||
for i in range(10):
|
||||
yield torch.randn(2, 10), torch.randperm(10)[:2]
|
||||
@ -183,6 +188,7 @@ class TestTrainer(TestCase):
|
||||
|
||||
test_dir = os.path.abspath(os.path.dirname(str(__file__)))
|
||||
|
||||
|
||||
class TestFFI(TestCase):
|
||||
|
||||
def setUp(self):
|
||||
|
@ -26,7 +26,8 @@ class cwrap(object):
|
||||
|
||||
FUNCTION_CALL_TEMPLATE = Template("$capture_result$cname($arg_unpack);")
|
||||
|
||||
DEFAULT_PLUGIN_CLASSES = [ArgcountChecker, ConstantArguments, OptionalArguments, ArgumentReferences, BeforeAfterCall, ReturnArguments, GILRelease]
|
||||
DEFAULT_PLUGIN_CLASSES = [ArgcountChecker, ConstantArguments, OptionalArguments,
|
||||
ArgumentReferences, BeforeAfterCall, ReturnArguments, GILRelease]
|
||||
|
||||
def __init__(self, source, destination=None, plugins=[], default_plugins=True):
|
||||
if destination is None:
|
||||
|
@ -1,5 +1,6 @@
|
||||
from . import CWrapPlugin
|
||||
|
||||
|
||||
class ArgcountChecker(CWrapPlugin):
|
||||
|
||||
def process_all_checks(self, checks, option):
|
||||
|
@ -1,5 +1,6 @@
|
||||
from . import CWrapPlugin
|
||||
|
||||
|
||||
class ArgcountSortPlugin(CWrapPlugin):
|
||||
|
||||
def __init__(self, descending=True):
|
||||
@ -11,4 +12,3 @@ class ArgcountSortPlugin(CWrapPlugin):
|
||||
for declaration in declarations:
|
||||
declaration['options'].sort(key=num_checked_args, reverse=self.descending)
|
||||
return declarations
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
from . import CWrapPlugin
|
||||
from string import Template
|
||||
|
||||
|
||||
class ArgumentReferences(CWrapPlugin):
|
||||
|
||||
def initialize(self, cwrap):
|
||||
|
@ -1,5 +1,6 @@
|
||||
from . import CWrapPlugin
|
||||
|
||||
|
||||
class AutoGPU(CWrapPlugin):
|
||||
|
||||
def __init__(self, has_self=True, condition=None):
|
||||
|
@ -1,6 +1,7 @@
|
||||
from . import CWrapPlugin
|
||||
from string import Template
|
||||
|
||||
|
||||
class BeforeAfterCall(CWrapPlugin):
|
||||
|
||||
def initialize(self, cwrap):
|
||||
|
@ -1,6 +1,7 @@
|
||||
from . import CWrapPlugin
|
||||
from string import Template
|
||||
|
||||
|
||||
class BoolOption(CWrapPlugin):
|
||||
|
||||
UNPACK_TEMPLATE = Template('$arg == Py_True ? $if_true : $if_false')
|
||||
@ -16,4 +17,3 @@ class BoolOption(CWrapPlugin):
|
||||
if self.is_bool_option(arg):
|
||||
return Template(self.UNPACK_TEMPLATE.safe_substitute(
|
||||
if_true=arg['if_true'], if_false=arg['if_false']))
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
from . import CWrapPlugin
|
||||
from string import Template
|
||||
|
||||
|
||||
class ConstantArguments(CWrapPlugin):
|
||||
|
||||
def process_declarations(self, declarations):
|
||||
@ -18,5 +19,3 @@ class ConstantArguments(CWrapPlugin):
|
||||
def get_arg_accessor(self, arg, option):
|
||||
if arg['type'] == 'CONSTANT':
|
||||
return arg['name']
|
||||
|
||||
|
||||
|
@ -3,6 +3,7 @@ from copy import deepcopy
|
||||
from . import CWrapPlugin
|
||||
from itertools import product
|
||||
|
||||
|
||||
class CuDNNPlugin(CWrapPlugin):
|
||||
|
||||
TYPE_UNPACK = {
|
||||
|
@ -1,6 +1,7 @@
|
||||
from . import CWrapPlugin
|
||||
from string import Template
|
||||
|
||||
|
||||
class GILRelease(CWrapPlugin):
|
||||
|
||||
OPTION_START = [
|
||||
@ -26,4 +27,3 @@ class GILRelease(CWrapPlugin):
|
||||
template.insert(call_idx, self.BEFORE_CALL)
|
||||
template.insert(call_idx + 2, self.AFTER_CALL)
|
||||
return self.OPTION_START + template + self.OPTION_END
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
from . import CWrapPlugin
|
||||
from string import Template
|
||||
|
||||
|
||||
class KwargsPlugin(CWrapPlugin):
|
||||
|
||||
ACCESSOR_TEMPLATE = Template('(__tuplecount > $idx ? PyTuple_GET_ITEM(args, $idx) : __kw_$name)')
|
||||
@ -53,7 +54,8 @@ class KwargsPlugin(CWrapPlugin):
|
||||
seen_args.add(name)
|
||||
args.append(name)
|
||||
declarations = '\n '.join(['PyObject *__kw_{} = NULL;'.format(name) for name in args])
|
||||
lookups = '\n '.join(['__kw_{name} = PyDict_GetItemString(kwargs, "{name}");'.format(name=name) for name in args])
|
||||
lookups = '\n '.join(
|
||||
['__kw_{name} = PyDict_GetItemString(kwargs, "{name}");'.format(name=name) for name in args])
|
||||
start_idx = code.find('{') + 1
|
||||
new_code = self.WRAPPER_TEMPLATE.substitute(declarations=declarations, lookups=lookups)
|
||||
return code[:start_idx] + new_code + code[start_idx:]
|
||||
|
@ -1,6 +1,8 @@
|
||||
from . import CWrapPlugin
|
||||
|
||||
|
||||
class NullableArguments(CWrapPlugin):
|
||||
|
||||
def process_single_check(self, code, arg, arg_accessor):
|
||||
if 'nullable' in arg and arg['nullable']:
|
||||
return '({} || {} == Py_None)'.format(code, arg_accessor)
|
||||
@ -10,5 +12,3 @@ class NullableArguments(CWrapPlugin):
|
||||
if 'nullable' in arg and arg['nullable']:
|
||||
return '({} == Py_None ? NULL : {})'.format(arg_accessor, code)
|
||||
return code
|
||||
|
||||
|
||||
|
@ -2,6 +2,7 @@ from copy import deepcopy
|
||||
from . import CWrapPlugin
|
||||
from itertools import product
|
||||
|
||||
|
||||
class OptionalArguments(CWrapPlugin):
|
||||
|
||||
def process_declarations(self, declarations):
|
||||
@ -55,4 +56,3 @@ class OptionalArguments(CWrapPlugin):
|
||||
seen_signatures.add(sig)
|
||||
break
|
||||
return unique
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
from . import CWrapPlugin
|
||||
from string import Template
|
||||
|
||||
|
||||
class ReturnArguments(CWrapPlugin):
|
||||
ARGUMENT_RETURN_TEMPLATE = Template("Py_INCREF($arg);\nreturn (PyObject*)($arg);")
|
||||
TUPLE_RETURN_TEMPLATE = Template("return PyTuple_Pack($num_args, $args);")
|
||||
|
@ -131,6 +131,7 @@ PyObject * $name(PyObject *_unused, PyObject *args)
|
||||
|
||||
def get_wrapper_template(self, declaration):
|
||||
arg_desc = []
|
||||
|
||||
def describe_arg(arg):
|
||||
desc = self.TYPE_NAMES[arg['type']] + ' ' + arg['name']
|
||||
if arg.get('nullable'):
|
||||
|
@ -4,6 +4,7 @@ from . import CWrapPlugin
|
||||
from itertools import product, chain
|
||||
from collections import OrderedDict
|
||||
|
||||
|
||||
class THPPlugin(CWrapPlugin):
|
||||
|
||||
TYPE_UNPACK = {
|
||||
@ -345,7 +346,6 @@ ${cpu}
|
||||
if arg['name'] == 'self':
|
||||
arg['ignore_check'] = True
|
||||
|
||||
|
||||
declarations = [d for d in declarations if not d.get('only_stateless', False)]
|
||||
self.declarations.extend(filter(lambda x: not x.get('only_stateless', False), register_only))
|
||||
self.stateless_declarations.extend(filter(lambda x: x.get('only_stateless', False), register_only))
|
||||
|
@ -8,6 +8,7 @@ BASE_PATH = os.path.realpath(os.path.join(__file__, '..', '..', '..'))
|
||||
WRAPPER_PATH = os.path.join(BASE_PATH, 'torch', 'csrc', 'nn')
|
||||
THNN_UTILS_PATH = os.path.join(BASE_PATH, 'torch', '_thnn', 'utils.py')
|
||||
|
||||
|
||||
def import_module(name, path):
|
||||
if sys.version_info >= (3, 5):
|
||||
import importlib.util
|
||||
@ -81,7 +82,8 @@ for t in ['CudaHalf', 'Cuda', 'CudaDouble']:
|
||||
def wrap_function(name, type, arguments):
|
||||
cname = 'THNN_' + type + name
|
||||
declaration = ''
|
||||
declaration += 'extern "C" void ' + cname + '(' + ', '.join(TYPE_TRANSFORMS[type].get(arg.type, arg.type) for arg in arguments) + ');\n'
|
||||
declaration += 'extern "C" void ' + cname + \
|
||||
'(' + ', '.join(TYPE_TRANSFORMS[type].get(arg.type, arg.type) for arg in arguments) + ');\n'
|
||||
declaration += FUNCTION_TEMPLATE.substitute(name=type + name, cname=cname)
|
||||
indent = ' ' * 4
|
||||
dict_indent = ' ' * 6
|
||||
@ -97,10 +99,12 @@ def wrap_function(name, type, arguments):
|
||||
declaration += ']]\n\n\n'
|
||||
return declaration
|
||||
|
||||
|
||||
def generate_wrappers():
|
||||
wrap_nn()
|
||||
wrap_cunn()
|
||||
|
||||
|
||||
def wrap_nn():
|
||||
wrapper = '#include <TH/TH.h>\n\n\n'
|
||||
nn_functions = thnn_utils.parse_header(thnn_utils.THNN_H_PATH)
|
||||
@ -114,6 +118,7 @@ def wrap_nn():
|
||||
NullableArguments(),
|
||||
])
|
||||
|
||||
|
||||
def wrap_cunn():
|
||||
wrapper = '#include <TH/TH.h>\n'
|
||||
wrapper += '#include <THC/THC.h>\n\n\n'
|
||||
|
@ -1,4 +1,5 @@
|
||||
import os
|
||||
|
||||
|
||||
def check_env_flag(name):
|
||||
return os.getenv(name) in ['ON', '1', 'YES', 'TRUE', 'Y']
|
||||
|
@ -56,6 +56,7 @@ del old_flags
|
||||
# Define basic utilities
|
||||
################################################################################
|
||||
|
||||
|
||||
def typename(o):
|
||||
module = ''
|
||||
class_name = ''
|
||||
@ -130,61 +131,101 @@ from ._tensor_str import set_printoptions
|
||||
from .storage import _StorageBase
|
||||
from .tensor import _TensorBase
|
||||
|
||||
|
||||
class DoubleStorage(_C.DoubleStorageBase, _StorageBase):
|
||||
pass
|
||||
|
||||
|
||||
class FloatStorage(_C.FloatStorageBase, _StorageBase):
|
||||
pass
|
||||
|
||||
|
||||
class LongStorage(_C.LongStorageBase, _StorageBase):
|
||||
pass
|
||||
|
||||
|
||||
class IntStorage(_C.IntStorageBase, _StorageBase):
|
||||
pass
|
||||
|
||||
|
||||
class ShortStorage(_C.ShortStorageBase, _StorageBase):
|
||||
pass
|
||||
|
||||
|
||||
class CharStorage(_C.CharStorageBase, _StorageBase):
|
||||
pass
|
||||
|
||||
|
||||
class ByteStorage(_C.ByteStorageBase, _StorageBase):
|
||||
pass
|
||||
|
||||
|
||||
class DoubleTensor(_C.DoubleTensorBase, _TensorBase):
|
||||
|
||||
def is_signed(self):
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def storage_type(cls):
|
||||
return DoubleStorage
|
||||
|
||||
|
||||
class FloatTensor(_C.FloatTensorBase, _TensorBase):
|
||||
|
||||
def is_signed(self):
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def storage_type(cls):
|
||||
return FloatStorage
|
||||
|
||||
|
||||
class LongTensor(_C.LongTensorBase, _TensorBase):
|
||||
|
||||
def is_signed(self):
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def storage_type(cls):
|
||||
return LongStorage
|
||||
|
||||
|
||||
class IntTensor(_C.IntTensorBase, _TensorBase):
|
||||
|
||||
def is_signed(self):
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def storage_type(cls):
|
||||
return IntStorage
|
||||
|
||||
|
||||
class ShortTensor(_C.ShortTensorBase, _TensorBase):
|
||||
|
||||
def is_signed(self):
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def storage_type(cls):
|
||||
return ShortStorage
|
||||
|
||||
|
||||
class CharTensor(_C.CharTensorBase, _TensorBase):
|
||||
|
||||
def is_signed(self):
|
||||
# TODO
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def storage_type(cls):
|
||||
return CharStorage
|
||||
|
||||
|
||||
class ByteTensor(_C.ByteTensorBase, _TensorBase):
|
||||
|
||||
def is_signed(self):
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def storage_type(cls):
|
||||
return ByteStorage
|
||||
|
@ -295,4 +295,3 @@ def _str(self):
|
||||
strt += '[{} of size {}{}]\n'.format(torch.typename(self),
|
||||
size_str, device_str)
|
||||
return '\n' + strt
|
||||
|
||||
|
@ -2,7 +2,9 @@ import threading
|
||||
import torch.cuda
|
||||
from .utils import THNN_H_PATH, THCUNN_H_PATH, parse_header, load_backend
|
||||
|
||||
|
||||
class Backends(object):
|
||||
|
||||
def __init__(self):
|
||||
self.backends = {}
|
||||
|
||||
@ -14,6 +16,7 @@ class Backends(object):
|
||||
|
||||
|
||||
class Backend(object):
|
||||
|
||||
def __init__(self, lib_prefix, lib_name, functions, mixins=tuple()):
|
||||
self.lib_prefix = lib_prefix
|
||||
self.lib_name = lib_name
|
||||
@ -37,6 +40,7 @@ class Backend(object):
|
||||
|
||||
|
||||
class THNNCudaBackendStateMixin(object):
|
||||
|
||||
@property
|
||||
def library_state(self):
|
||||
return torch.cuda._state_cdata
|
||||
|
@ -12,6 +12,7 @@ def _unpickle_backend(backend_name):
|
||||
|
||||
|
||||
class THNNBackendBase(object):
|
||||
|
||||
def __init__(self):
|
||||
self.methods = {}
|
||||
|
||||
@ -33,6 +34,7 @@ class THNNBackendBase(object):
|
||||
|
||||
|
||||
class Function(object):
|
||||
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
self.arguments = []
|
||||
@ -46,6 +48,7 @@ class Function(object):
|
||||
|
||||
|
||||
class Argument(object):
|
||||
|
||||
def __init__(self, _type, name, is_optional):
|
||||
self.type = _type
|
||||
self.name = name
|
||||
|
@ -12,6 +12,7 @@ from .stochastic_function import StochasticFunction
|
||||
|
||||
__all__ = ['Variable', 'Function', 'StochasticFunction', 'backward']
|
||||
|
||||
|
||||
def backward(variables, grad_variables, retain_variables=False):
|
||||
"""Computes the sum of gradients of given variables w.r.t. graph leaves.
|
||||
|
||||
|
@ -5,4 +5,3 @@ from .reduce import *
|
||||
from .linalg import *
|
||||
from .blas import *
|
||||
from .stochastic import *
|
||||
|
||||
|
@ -199,4 +199,3 @@ class Dot(Function):
|
||||
# TODO: trace
|
||||
# TODO: tril
|
||||
# TODO: triu
|
||||
|
||||
|
@ -42,4 +42,3 @@ class Triu(Function):
|
||||
return grad_output.triu(self.diagonal_idx)
|
||||
|
||||
# TODO: trace
|
||||
|
||||
|
@ -165,6 +165,7 @@ class Tan(Function):
|
||||
|
||||
|
||||
class Asin(Function):
|
||||
|
||||
def forward(self, i):
|
||||
self.save_for_backward(i)
|
||||
return i.asin()
|
||||
@ -175,6 +176,7 @@ class Asin(Function):
|
||||
|
||||
|
||||
class Acos(Function):
|
||||
|
||||
def forward(self, i):
|
||||
self.save_for_backward(i)
|
||||
return i.acos()
|
||||
@ -185,6 +187,7 @@ class Acos(Function):
|
||||
|
||||
|
||||
class Atan(Function):
|
||||
|
||||
def forward(self, i):
|
||||
self.save_for_backward(i)
|
||||
return i.atan()
|
||||
|
@ -4,6 +4,7 @@ from ..function import Function
|
||||
|
||||
|
||||
class _DimReduceFunction(Function):
|
||||
|
||||
def __init__(self, dim=None):
|
||||
super(_DimReduceFunction, self).__init__()
|
||||
self.dim = dim
|
||||
@ -139,6 +140,7 @@ class Kthvalue(_SelectionFunction):
|
||||
|
||||
|
||||
class Norm(Function):
|
||||
|
||||
def __init__(self, norm_type=2, dim=None):
|
||||
super(Norm, self).__init__()
|
||||
self.norm_type = norm_type
|
||||
|
@ -88,4 +88,3 @@ class Normal(StochasticFunction):
|
||||
grad_means /= stddevs_sq
|
||||
grad_means *= reward
|
||||
return grad_means, grad_stddevs
|
||||
|
||||
|
@ -103,6 +103,7 @@ class View(Function):
|
||||
|
||||
|
||||
class Expand(Function):
|
||||
|
||||
def __init__(self, sizes):
|
||||
super(Expand, self).__init__()
|
||||
self.sizes = sizes
|
||||
|
@ -157,6 +157,7 @@ def _nested_map(condition, fn):
|
||||
"an input object of type " + torch.typename(obj))
|
||||
return _map
|
||||
|
||||
|
||||
def _iter_filter(condition):
|
||||
def _iter(obj):
|
||||
if condition(obj):
|
||||
@ -178,9 +179,11 @@ _iter_tensors = _iter_filter(torch.is_tensor)
|
||||
_iter_None_tensors = _iter_filter(lambda o: o is None or torch.is_tensor(o))
|
||||
_map_variable_tensor = _nested_map(lambda o: isinstance(o, torch.autograd.Variable), lambda o: o.data)
|
||||
|
||||
|
||||
def _map_tensor_fromiter(itr):
|
||||
return _nested_map(lambda o: torch.is_tensor(o), lambda o: next(itr))
|
||||
|
||||
|
||||
class NestedIOFunction(Function):
|
||||
|
||||
def _do_forward(self, *input):
|
||||
|
@ -2,6 +2,7 @@ from .function import Function
|
||||
|
||||
_NOT_PROVIDED = object()
|
||||
|
||||
|
||||
class StochasticFunction(Function):
|
||||
|
||||
def __init__(self):
|
||||
@ -18,4 +19,3 @@ class StochasticFunction(Function):
|
||||
|
||||
def _reinforce(self, reward):
|
||||
self.reward = reward
|
||||
|
||||
|
@ -151,7 +151,8 @@ class Variable(_C._VariableBase):
|
||||
raise RuntimeError('calling backward on a volatile variable')
|
||||
if gradient is None and self.requires_grad:
|
||||
if self.data.numel() != 1:
|
||||
raise RuntimeError('backward should be called only on a scalar (i.e. 1-element tensor) or with gradient w.r.t. the variable')
|
||||
raise RuntimeError(
|
||||
'backward should be called only on a scalar (i.e. 1-element tensor) or with gradient w.r.t. the variable')
|
||||
gradient = self.data.new().resize_as_(self.data).fill_(1)
|
||||
self._execution_engine.run_backward((self,), (gradient,), retain_variables)
|
||||
|
||||
|
@ -20,6 +20,7 @@ elif sys.platform == 'darwin':
|
||||
else:
|
||||
libnames = []
|
||||
|
||||
|
||||
def _loadlib():
|
||||
global lib
|
||||
loaded = False
|
||||
@ -39,6 +40,7 @@ def _loadlib():
|
||||
lib = None
|
||||
raise OSError("Could not load cuDNN")
|
||||
|
||||
|
||||
def is_acceptable(tensor):
|
||||
if not enabled:
|
||||
return False
|
||||
@ -65,6 +67,8 @@ def is_acceptable(tensor):
|
||||
return True
|
||||
|
||||
__cudnn_version = []
|
||||
|
||||
|
||||
def version():
|
||||
if not lib:
|
||||
raise RuntimeError("cuDNN not initialized")
|
||||
@ -108,7 +112,9 @@ CUDNN_GRU = 3
|
||||
CUDNN_LINEAR_INPUT = 0
|
||||
CUDNN_SKIP_INPUT = 1
|
||||
|
||||
|
||||
class CuDNNHandle:
|
||||
|
||||
def __init__(self):
|
||||
ptr = ctypes.c_void_p()
|
||||
check_error(lib.cudnnCreate(ctypes.byref(ptr)))
|
||||
@ -117,7 +123,9 @@ class CuDNNHandle:
|
||||
def __del__(self):
|
||||
check_error(lib.cudnnDestroy(self))
|
||||
|
||||
|
||||
class CuDNNError(RuntimeError):
|
||||
|
||||
def __init__(self, status):
|
||||
self.status = status
|
||||
msg = '{}: {}'.format(status, get_error_string(status))
|
||||
@ -125,6 +133,7 @@ class CuDNNError(RuntimeError):
|
||||
|
||||
|
||||
class TensorDescriptor(object):
|
||||
|
||||
def __init__(self):
|
||||
ptr = ctypes.c_void_p()
|
||||
check_error(lib.cudnnCreateTensorDescriptor(ctypes.byref(ptr)))
|
||||
@ -147,6 +156,7 @@ class TensorDescriptor(object):
|
||||
|
||||
|
||||
class TensorDescriptorArray(object):
|
||||
|
||||
def __init__(self, N):
|
||||
self.ptrs = (ctypes.c_void_p * N)()
|
||||
for i in range(N):
|
||||
@ -175,6 +185,7 @@ class TensorDescriptorArray(object):
|
||||
|
||||
|
||||
class ConvolutionDescriptor(object):
|
||||
|
||||
def __init__(self):
|
||||
ptr = ctypes.c_void_p()
|
||||
check_error(lib.cudnnCreateConvolutionDescriptor(ctypes.byref(ptr)))
|
||||
@ -195,7 +206,9 @@ class ConvolutionDescriptor(object):
|
||||
def as_tuple(self):
|
||||
return (self._pad, self._stride)
|
||||
|
||||
|
||||
class FilterDescriptor(object):
|
||||
|
||||
def __init__(self):
|
||||
ptr = ctypes.c_void_p()
|
||||
check_error(lib.cudnnCreateFilterDescriptor(ctypes.byref(ptr)))
|
||||
@ -216,6 +229,7 @@ class FilterDescriptor(object):
|
||||
|
||||
|
||||
class DropoutDescriptor(object):
|
||||
|
||||
def __init__(self, handle, dropout, seed):
|
||||
ptr = ctypes.c_void_p()
|
||||
check_error(lib.cudnnCreateDropoutDescriptor(ctypes.byref(ptr)))
|
||||
@ -241,8 +255,8 @@ class DropoutDescriptor(object):
|
||||
check_error(lib.cudnnDestroyDropoutDescriptor(self))
|
||||
|
||||
|
||||
|
||||
class RNNDescriptor(object):
|
||||
|
||||
def __init__(self, hidden_size, num_layers, dropout_desc, input_mode,
|
||||
bidirectional, mode, datatype):
|
||||
ptr = ctypes.c_void_p()
|
||||
@ -272,13 +286,16 @@ class ConvolutionAlgoPerf(ctypes.Structure):
|
||||
("memory", ctypes.c_size_t),
|
||||
]
|
||||
|
||||
|
||||
def check_error(status):
|
||||
if status is not 0:
|
||||
raise CuDNNError(status)
|
||||
|
||||
|
||||
def get_error_string(status):
|
||||
return lib.cudnnGetErrorString(status)
|
||||
|
||||
|
||||
def get_handle():
|
||||
if lib is None:
|
||||
_loadlib()
|
||||
@ -301,6 +318,7 @@ _sizeofmap = {
|
||||
CUDNN_DATA_DOUBLE: 8,
|
||||
}
|
||||
|
||||
|
||||
def c_type(tensor):
|
||||
if isinstance(tensor, torch.cuda.HalfTensor):
|
||||
return ctypes.c_float
|
||||
@ -311,10 +329,12 @@ def c_type(tensor):
|
||||
else:
|
||||
raise ValueError("unknown type '{}'".format(type(tensor)))
|
||||
|
||||
|
||||
def int_array(itr):
|
||||
array_type = ctypes.c_int * len(itr)
|
||||
return array_type(*itr)
|
||||
|
||||
|
||||
def descriptor(tensor, N=None):
|
||||
if N is not None:
|
||||
descriptor = TensorDescriptorArray(N)
|
||||
@ -331,9 +351,11 @@ _autotuner_forward = {}
|
||||
_autotuner_backward_data = {}
|
||||
_autotuner_backward_filter = {}
|
||||
|
||||
|
||||
def convolution_autotuner_key(idesc, weight_desc, conv_desc):
|
||||
return (idesc.as_tuple(), weight_desc.as_tuple(), conv_desc.as_tuple())
|
||||
|
||||
|
||||
def convolution_forward_algorithm(idesc, weight_desc, conv_desc, odesc):
|
||||
k = convolution_autotuner_key(idesc, weight_desc, conv_desc)
|
||||
if k in _autotuner_forward:
|
||||
@ -360,15 +382,19 @@ def convolution_forward_algorithm(idesc, weight_desc, conv_desc, odesc):
|
||||
wlimit, ctypes.byref(fwd_alg)))
|
||||
return fwd_alg
|
||||
|
||||
|
||||
def convolution_forward_workspace_size(*args):
|
||||
check_error(lib.cudnnGetConvolutionForwardWorkspaceSize(*args))
|
||||
|
||||
|
||||
def convolution_forward(*args):
|
||||
check_error(lib.cudnnConvolutionForward(*args))
|
||||
|
||||
|
||||
def convolution_backward_data(*args):
|
||||
return check_error(lib.cudnnConvolutionBackwardData(*args))
|
||||
|
||||
|
||||
def convolution_backward_data_algorithm(weight_desc, odesc, conv_desc, idesc):
|
||||
k = convolution_autotuner_key(idesc, weight_desc, conv_desc)
|
||||
if k in _autotuner_backward_data:
|
||||
@ -395,12 +421,15 @@ def convolution_backward_data_algorithm(weight_desc, odesc, conv_desc, idesc):
|
||||
wlimit, ctypes.byref(bwd_data_alg)))
|
||||
return bwd_data_alg
|
||||
|
||||
|
||||
def convolution_backward_data_workspace_size(*args):
|
||||
return check_error(lib.cudnnGetConvolutionBackwardDataWorkspaceSize(*args))
|
||||
|
||||
|
||||
def convolution_backward_filter(*args):
|
||||
return check_error(lib.cudnnConvolutionBackwardFilter(*args))
|
||||
|
||||
|
||||
def convolution_backward_filter_algorithm(idesc, odesc, conv_desc, weight_desc):
|
||||
k = convolution_autotuner_key(idesc, weight_desc, conv_desc)
|
||||
if k in _autotuner_backward_filter:
|
||||
@ -427,11 +456,14 @@ def convolution_backward_filter_algorithm(idesc, odesc, conv_desc, weight_desc):
|
||||
wlimit, ctypes.byref(bwd_filter_alg)))
|
||||
return bwd_filter_alg
|
||||
|
||||
|
||||
def convolution_backward_filter_workspace_size(*args):
|
||||
return check_error(lib.cudnnGetConvolutionBackwardFilterWorkspaceSize(*args))
|
||||
|
||||
|
||||
def convolution_backward_bias(*args):
|
||||
check_error(lib.cudnnConvolutionBackwardBias(*args))
|
||||
|
||||
|
||||
def add_tensor(*args):
|
||||
check_error(lib.cudnnAddTensor(*args))
|
||||
|
@ -3,6 +3,7 @@ import torch.backends.cudnn as cudnn
|
||||
from torch.backends.cudnn import check_error
|
||||
import ctypes
|
||||
|
||||
|
||||
def get_cudnn_mode(mode):
|
||||
if mode == 'RNN_RELU':
|
||||
return cudnn.CUDNN_RNN_RELU
|
||||
@ -17,6 +18,7 @@ def get_cudnn_mode(mode):
|
||||
|
||||
|
||||
class Unserializable(object):
|
||||
|
||||
def __init__(self, inner):
|
||||
self.inner = inner
|
||||
|
||||
@ -39,6 +41,7 @@ def init_dropout_descriptor(fn, handle):
|
||||
fn.dropout_seed
|
||||
)
|
||||
|
||||
|
||||
def init_rnn_descriptor(fn):
|
||||
return cudnn.RNNDescriptor(
|
||||
fn.hidden_size,
|
||||
@ -161,7 +164,6 @@ def get_parameters(fn, handle, weight_buf):
|
||||
|
||||
cur_offset = offset + filter_dim_a[0]
|
||||
|
||||
|
||||
params.append(layer_params)
|
||||
|
||||
return params
|
||||
@ -295,7 +297,6 @@ def forward(fn, input, hx, weight, output, hy):
|
||||
output = output.transpose_(0, 1)
|
||||
|
||||
|
||||
|
||||
def backward_grad(fn, input, hx, weight, output, grad_output, grad_hy, grad_input, grad_hx):
|
||||
with torch.cuda.device_of(input):
|
||||
handle = cudnn.get_handle()
|
||||
|
@ -259,67 +259,112 @@ class _CudaBase(object):
|
||||
|
||||
class DoubleStorage(_CudaBase, torch._C.CudaDoubleStorageBase, _StorageBase):
|
||||
pass
|
||||
|
||||
|
||||
class FloatStorage(_CudaBase, torch._C.CudaFloatStorageBase, _StorageBase):
|
||||
pass
|
||||
|
||||
|
||||
class LongStorage(_CudaBase, torch._C.CudaLongStorageBase, _StorageBase):
|
||||
pass
|
||||
|
||||
|
||||
class IntStorage(_CudaBase, torch._C.CudaIntStorageBase, _StorageBase):
|
||||
pass
|
||||
|
||||
|
||||
class ShortStorage(_CudaBase, torch._C.CudaShortStorageBase, _StorageBase):
|
||||
pass
|
||||
|
||||
|
||||
class CharStorage(_CudaBase, torch._C.CudaCharStorageBase, _StorageBase):
|
||||
pass
|
||||
|
||||
|
||||
class ByteStorage(_CudaBase, torch._C.CudaByteStorageBase, _StorageBase):
|
||||
pass
|
||||
|
||||
|
||||
class HalfStorage(_CudaBase, torch._C.CudaHalfStorageBase, _StorageBase):
|
||||
pass
|
||||
|
||||
|
||||
class DoubleTensor(_CudaBase, torch._C.CudaDoubleTensorBase, _TensorBase):
|
||||
|
||||
def is_signed(self):
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def storage_type(cls):
|
||||
return DoubleStorage
|
||||
|
||||
|
||||
class FloatTensor(_CudaBase, torch._C.CudaFloatTensorBase, _TensorBase):
|
||||
|
||||
def is_signed(self):
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def storage_type(cls):
|
||||
return FloatStorage
|
||||
|
||||
|
||||
class LongTensor(_CudaBase, torch._C.CudaLongTensorBase, _TensorBase):
|
||||
|
||||
def is_signed(self):
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def storage_type(cls):
|
||||
return LongStorage
|
||||
|
||||
|
||||
class IntTensor(_CudaBase, torch._C.CudaIntTensorBase, _TensorBase):
|
||||
|
||||
def is_signed(self):
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def storage_type(cls):
|
||||
return IntStorage
|
||||
|
||||
|
||||
class ShortTensor(_CudaBase, torch._C.CudaShortTensorBase, _TensorBase):
|
||||
|
||||
def is_signed(self):
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def storage_type(cls):
|
||||
return ShortStorage
|
||||
|
||||
|
||||
class CharTensor(_CudaBase, torch._C.CudaCharTensorBase, _TensorBase):
|
||||
|
||||
def is_signed(self):
|
||||
# TODO
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def storage_type(cls):
|
||||
return CharStorage
|
||||
|
||||
|
||||
class ByteTensor(_CudaBase, torch._C.CudaByteTensorBase, _TensorBase):
|
||||
|
||||
def is_signed(self):
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def storage_type(cls):
|
||||
return ByteStorage
|
||||
|
||||
|
||||
class HalfTensor(_CudaBase, torch._C.CudaHalfTensorBase, _TensorBase):
|
||||
|
||||
def is_signed(self):
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def storage_type():
|
||||
return HalfStorage
|
||||
|
@ -4,6 +4,7 @@ from torch._utils import _accumulate
|
||||
|
||||
# TODO: sync streams when implemented
|
||||
|
||||
|
||||
def broadcast(tensor, devices):
|
||||
"""Broadcasts a tensor to a number of GPUs.
|
||||
|
||||
|
@ -92,6 +92,7 @@ nccl_types = {
|
||||
|
||||
|
||||
class NcclError(RuntimeError):
|
||||
|
||||
def __init__(self, status):
|
||||
self.status = status
|
||||
msg = '{0} ({1})'.format(status_codes.get(status), status)
|
||||
@ -103,6 +104,7 @@ class NcclComm(ctypes.c_void_p):
|
||||
|
||||
|
||||
class NcclCommList(object):
|
||||
|
||||
def __init__(self, devices):
|
||||
self.devices = devices
|
||||
ptrs = (NcclComm * len(devices))()
|
||||
|
@ -35,4 +35,3 @@ def seed_all():
|
||||
def initial_seed():
|
||||
_lazy_init()
|
||||
return _C._cuda_initialSeed()
|
||||
|
||||
|
@ -8,6 +8,7 @@ ERROR_NOT_READY = 34
|
||||
|
||||
|
||||
class CudaError(RuntimeError):
|
||||
|
||||
def __init__(self, code):
|
||||
msg = cudart().cudaGetErrorString(code).decode('utf-8')
|
||||
super(CudaError, self).__init__('{0} ({1})'.format(msg, code))
|
||||
|
@ -1,12 +1,14 @@
|
||||
import torch
|
||||
from ._utils import _range
|
||||
|
||||
|
||||
def split(tensor, split_size, dim=0):
|
||||
if dim < 0:
|
||||
dim += tensor.dim()
|
||||
dim_size = tensor.size(dim)
|
||||
num_splits = (dim_size + split_size - 1) // split_size
|
||||
last_split_size = split_size - (split_size * num_splits - dim_size)
|
||||
|
||||
def get_split_size(i):
|
||||
return split_size if i < num_splits - 1 else last_split_size
|
||||
return tuple(tensor.narrow(int(dim), int(i * split_size), int(get_split_size(i))) for i
|
||||
|
@ -1,7 +1,9 @@
|
||||
import torch
|
||||
from .Module import Module
|
||||
|
||||
|
||||
class Abs(Module):
|
||||
|
||||
def __init__(self):
|
||||
super(Abs, self).__init__()
|
||||
|
||||
@ -21,4 +23,3 @@ class Abs(Module):
|
||||
self.gradInput
|
||||
)
|
||||
return self.gradInput
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
import torch
|
||||
from .Criterion import Criterion
|
||||
|
||||
|
||||
class AbsCriterion(Criterion):
|
||||
|
||||
def __init__(self, sizeAverage=True):
|
||||
@ -21,7 +22,6 @@ class AbsCriterion(Criterion):
|
||||
self.output = self.output_tensor[0]
|
||||
return self.output
|
||||
|
||||
|
||||
def updateGradInput(self, input, target):
|
||||
self._backend.AbsCriterion_updateGradInput(
|
||||
self._backend.library_state,
|
||||
@ -31,4 +31,3 @@ class AbsCriterion(Criterion):
|
||||
self.sizeAverage
|
||||
)
|
||||
return self.gradInput
|
||||
|
||||
|
@ -2,6 +2,7 @@ import math
|
||||
import torch
|
||||
from .Module import Module
|
||||
|
||||
|
||||
class Add(Module):
|
||||
|
||||
def __init__(self, inputSize, scalar=False):
|
||||
@ -28,7 +29,7 @@ class Add(Module):
|
||||
def updateOutput(self, input):
|
||||
self.output.resize_as_(input).copy_(input)
|
||||
if self.scalar:
|
||||
self.output.add_(self.bias[0]);
|
||||
self.output.add_(self.bias[0])
|
||||
else:
|
||||
batchSize = input.size(0)
|
||||
if self._ones.size(0) != batchSize:
|
||||
@ -47,11 +48,10 @@ class Add(Module):
|
||||
|
||||
def accGradParameters(self, input, gradOutput, scale=1):
|
||||
if self.gradBias.size(0) == 1:
|
||||
self.gradBias[0] = self.gradBias[0] + scale*gradOutput.sum();
|
||||
self.gradBias[0] = self.gradBias[0] + scale * gradOutput.sum()
|
||||
else:
|
||||
if input.is_same_size(self.bias):
|
||||
self.gradBias.add_(scale, gradOutput)
|
||||
else:
|
||||
gradOutput = gradOutput.view(input.size(0), -1)
|
||||
self.gradBias.view(-1).addmv_(scale, gradOutput.t(), self._ones)
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
import torch
|
||||
from .Module import Module
|
||||
|
||||
|
||||
class AddConstant(Module):
|
||||
|
||||
def __init__(self, constant_scalar, inplace=False):
|
||||
@ -29,4 +30,3 @@ class AddConstant(Module):
|
||||
self.gradInput.copy_(gradOutput)
|
||||
|
||||
return self.gradInput
|
||||
|
||||
|
@ -2,6 +2,8 @@ import torch
|
||||
from .Criterion import Criterion
|
||||
|
||||
# TODO: use THNN
|
||||
|
||||
|
||||
class BCECriterion(Criterion):
|
||||
eps = 1e-12
|
||||
|
||||
@ -52,7 +54,6 @@ class BCECriterion(Criterion):
|
||||
|
||||
return self.output
|
||||
|
||||
|
||||
def updateGradInput(self, input, target):
|
||||
# - (target - input) / ( input (1 - input) )
|
||||
# The gradient is slightly incorrect:
|
||||
@ -72,7 +73,6 @@ class BCECriterion(Criterion):
|
||||
if weights is not None and target.dim() != 1:
|
||||
weights = self.weights.view(1, target.size(1)).expand_as(target)
|
||||
|
||||
|
||||
buffer.resize_as_(input)
|
||||
# - x ( 1 + self.eps -x ) + self.eps
|
||||
torch.add(input, -1, out=buffer).add_(-self.eps).mul_(input).add_(-self.eps)
|
||||
@ -90,4 +90,3 @@ class BCECriterion(Criterion):
|
||||
gradInput.div_(target.nelement())
|
||||
|
||||
return gradInput
|
||||
|
||||
|
@ -32,6 +32,7 @@ import torch
|
||||
from .Module import Module
|
||||
from .utils import clear
|
||||
|
||||
|
||||
class BatchNormalization(Module):
|
||||
# expected dimension of input
|
||||
nDim = 2
|
||||
@ -74,7 +75,8 @@ class BatchNormalization(Module):
|
||||
|
||||
def _checkInputDim(self, input):
|
||||
if input.dim() != self.nDim:
|
||||
raise RuntimeError('only mini-batch supported ({}D tensor), got {}D tensor instead'.format(self.nDim, input.dim()))
|
||||
raise RuntimeError(
|
||||
'only mini-batch supported ({}D tensor), got {}D tensor instead'.format(self.nDim, input.dim()))
|
||||
if input.size(1) != self.running_mean.nelement():
|
||||
raise RuntimeError('got {}-feature tensor, expected {}'.format(input.size(1), self.running_mean.nelement()))
|
||||
|
||||
@ -124,7 +126,6 @@ class BatchNormalization(Module):
|
||||
|
||||
return self.output
|
||||
|
||||
|
||||
def _backward(self, input, gradOutput, scale, gradInput=None, gradWeight=None, gradBias=None):
|
||||
self._checkInputDim(input)
|
||||
self._checkInputDim(gradOutput)
|
||||
@ -137,7 +138,6 @@ class BatchNormalization(Module):
|
||||
if gradInput is not None:
|
||||
gradInput.resize_as_(gradOutput)
|
||||
|
||||
|
||||
self._backend.BatchNormalization_backward(
|
||||
self._backend.library_state,
|
||||
input,
|
||||
@ -188,4 +188,3 @@ class BatchNormalization(Module):
|
||||
'save_std',
|
||||
])
|
||||
return super(BatchNormalization, self).clearState()
|
||||
|
||||
|
@ -3,6 +3,7 @@ import torch
|
||||
from .Module import Module
|
||||
from .utils import clear
|
||||
|
||||
|
||||
class Bilinear(Module):
|
||||
|
||||
def _assertInput(self, input):
|
||||
@ -23,7 +24,6 @@ class Bilinear(Module):
|
||||
if gradOutput.size(1) != self.weight.size(0):
|
||||
raise RuntimeError('number of columns in gradOutput does not match layer\'s output size')
|
||||
|
||||
|
||||
def __init__(self, inputSize1, inputSize2, outputSize, bias=True):
|
||||
# set up model:
|
||||
super(Bilinear, self).__init__()
|
||||
@ -53,7 +53,6 @@ class Bilinear(Module):
|
||||
self.bias.uniform_(-stdv, stdv)
|
||||
return self
|
||||
|
||||
|
||||
def updateOutput(self, input):
|
||||
self._assertInput(input)
|
||||
|
||||
@ -74,7 +73,6 @@ class Bilinear(Module):
|
||||
|
||||
return self.output
|
||||
|
||||
|
||||
def updateGradInput(self, input, gradOutput):
|
||||
if self.gradInput is None:
|
||||
return
|
||||
@ -111,8 +109,6 @@ class Bilinear(Module):
|
||||
|
||||
return self.gradInput
|
||||
|
||||
|
||||
|
||||
def accGradParameters(self, input, gradOutput, scale=1):
|
||||
self._assertInputGradOutput(input, gradOutput)
|
||||
|
||||
@ -129,7 +125,6 @@ class Bilinear(Module):
|
||||
if self.bias is not None:
|
||||
self.gradBias.add_(scale, gradOutput.sum(0))
|
||||
|
||||
|
||||
def __repr__(self):
|
||||
return str(type(self)) + \
|
||||
'({}x{} -> {}) {}'.format(
|
||||
@ -140,4 +135,3 @@ class Bilinear(Module):
|
||||
def clearState(self):
|
||||
clear(self, 'buff1', 'buff2')
|
||||
return super(Bilinear, self).clearState()
|
||||
|
||||
|
@ -1,13 +1,14 @@
|
||||
import torch
|
||||
from .Module import Module
|
||||
|
||||
|
||||
class CAddTable(Module):
|
||||
|
||||
def __init__(self, inplace=False):
|
||||
super(CAddTable, self).__init__()
|
||||
self.inplace = inplace
|
||||
self.gradInput = []
|
||||
|
||||
|
||||
def updateOutput(self, input):
|
||||
if self.inplace:
|
||||
self.output.set_(input[0])
|
||||
@ -19,7 +20,6 @@ class CAddTable(Module):
|
||||
|
||||
return self.output
|
||||
|
||||
|
||||
def updateGradInput(self, input, gradOutput):
|
||||
for i in range(len(input)):
|
||||
if i >= len(self.gradInput):
|
||||
@ -34,4 +34,3 @@ class CAddTable(Module):
|
||||
del self.gradInput[len(input):]
|
||||
|
||||
return self.gradInput
|
||||
|
||||
|
@ -1,7 +1,9 @@
|
||||
import torch
|
||||
from .Module import Module
|
||||
|
||||
|
||||
class CDivTable(Module):
|
||||
|
||||
def __init__(self, ):
|
||||
super(CDivTable, self).__init__()
|
||||
self.gradInput = []
|
||||
@ -20,4 +22,3 @@ class CDivTable(Module):
|
||||
del self.gradInput[len(input):]
|
||||
|
||||
return self.gradInput
|
||||
|
||||
|
@ -4,6 +4,7 @@ import torch
|
||||
from .Module import Module
|
||||
from .utils import clear, contiguousView
|
||||
|
||||
|
||||
class CMul(Module):
|
||||
|
||||
def __init__(self, *args):
|
||||
@ -37,7 +38,6 @@ class CMul(Module):
|
||||
|
||||
self.weight.uniform_(-stdv, stdv)
|
||||
|
||||
|
||||
def updateOutput(self, input):
|
||||
# lazy-initialize
|
||||
if self._output is None:
|
||||
@ -61,7 +61,6 @@ class CMul(Module):
|
||||
|
||||
return self.output
|
||||
|
||||
|
||||
def updateGradInput(self, input, gradOutput):
|
||||
if self.gradInput is None:
|
||||
return
|
||||
@ -85,7 +84,6 @@ class CMul(Module):
|
||||
|
||||
return self.gradInput
|
||||
|
||||
|
||||
def accGradParameters(self, input, gradOutput, scale=1):
|
||||
if self._input is None:
|
||||
self._input = input.new()
|
||||
|
@ -2,6 +2,7 @@ import torch
|
||||
from .Module import Module
|
||||
from .utils import clear
|
||||
|
||||
|
||||
class CMulTable(Module):
|
||||
|
||||
def __init__(self, ):
|
||||
|
@ -1,6 +1,7 @@
|
||||
import torch
|
||||
from .Module import Module
|
||||
|
||||
|
||||
class CSubTable(Module):
|
||||
|
||||
def __init__(self, ):
|
||||
@ -22,4 +23,3 @@ class CSubTable(Module):
|
||||
|
||||
self.gradInput = self.gradInput[:2]
|
||||
return self.gradInput
|
||||
|
||||
|
@ -1,6 +1,8 @@
|
||||
import torch
|
||||
from .HardTanh import HardTanh
|
||||
|
||||
|
||||
class Clamp(HardTanh):
|
||||
|
||||
def __init__(self, min_value, max_value):
|
||||
super(Clamp, self,).__init__(min_value, max_value)
|
||||
|
@ -1,7 +1,9 @@
|
||||
import torch
|
||||
from .Criterion import Criterion
|
||||
|
||||
|
||||
class ClassNLLCriterion(Criterion):
|
||||
|
||||
def __init__(self, weights=None, sizeAverage=True):
|
||||
super(ClassNLLCriterion, self).__init__()
|
||||
self.sizeAverage = sizeAverage
|
||||
@ -27,7 +29,6 @@ class ClassNLLCriterion(Criterion):
|
||||
self.output = self.output_tensor[0]
|
||||
return self.output
|
||||
|
||||
|
||||
def updateGradInput(self, input, target):
|
||||
self.gradInput.resize_as_(input).zero_()
|
||||
target = target.long()
|
||||
|
@ -12,6 +12,7 @@ from .MSECriterion import MSECriterion
|
||||
Reference: http.//arxiv.org/abs/1506.08230
|
||||
"""
|
||||
|
||||
|
||||
class ClassSimplexCriterion(MSECriterion):
|
||||
|
||||
def __init__(self, nClasses):
|
||||
@ -102,4 +103,3 @@ class ClassSimplexCriterion(MSECriterion):
|
||||
prod = self.getPredictions(input)
|
||||
_, maxs = prod.max(prod.ndimension() - 1)
|
||||
return maxs.view(-1)
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
import torch
|
||||
from .Container import Container
|
||||
|
||||
|
||||
class Concat(Container):
|
||||
|
||||
def __init__(self, dimension):
|
||||
@ -34,9 +35,11 @@ class Concat(Container):
|
||||
offset = 0
|
||||
for i, module in enumerate(self.modules):
|
||||
currentOutput = module.output
|
||||
currentGradInput = module.updateGradInput(input, gradOutput.narrow(self.dimension, offset, currentOutput.size(self.dimension)))
|
||||
currentGradInput = module.updateGradInput(input, gradOutput.narrow(
|
||||
self.dimension, offset, currentOutput.size(self.dimension)))
|
||||
|
||||
if currentGradInput: # if the module does not produce a gradInput (for example first layer),: ignore it and move on.
|
||||
# if the module does not produce a gradInput (for example first layer),: ignore it and move on.
|
||||
if currentGradInput:
|
||||
if i == 0:
|
||||
self.gradInput.copy_(currentGradInput)
|
||||
else:
|
||||
@ -46,7 +49,6 @@ class Concat(Container):
|
||||
|
||||
return self.gradInput
|
||||
|
||||
|
||||
def accGradParameters(self, input, gradOutput, scale=1):
|
||||
offset = 0
|
||||
for i, module in enumerate(self.modules):
|
||||
@ -62,8 +64,10 @@ class Concat(Container):
|
||||
offset = 0
|
||||
for i, module in enumerate(self.modules):
|
||||
currentOutput = module.output
|
||||
currentGradInput = module.backward(input, gradOutput.narrow(self.dimension, offset, currentOutput.size(self.dimension)), scale)
|
||||
if currentGradInput is not None: # if the module.es not produce a gradInput (for example first layer),: ignore it and move on.
|
||||
currentGradInput = module.backward(input, gradOutput.narrow(
|
||||
self.dimension, offset, currentOutput.size(self.dimension)), scale)
|
||||
# if the module.es not produce a gradInput (for example first layer),: ignore it and move on.
|
||||
if currentGradInput is not None:
|
||||
if i == 0:
|
||||
self.gradInput.copy_(currentGradInput)
|
||||
else:
|
||||
|
@ -1,6 +1,7 @@
|
||||
import torch
|
||||
from .Container import Container
|
||||
|
||||
|
||||
class ConcatTable(Container):
|
||||
|
||||
def __init__(self, ):
|
||||
@ -44,6 +45,7 @@ class ConcatTable(Container):
|
||||
|
||||
if i == 0:
|
||||
self.gradInput = self.gradInput if wasTable else []
|
||||
|
||||
def fn(l, i, v):
|
||||
if i >= len(l):
|
||||
assert len(l) == i
|
||||
@ -99,13 +101,12 @@ class ConcatTable(Container):
|
||||
res = res + ' {' + line + tab + 'input'
|
||||
for i in range(len(self.modules)):
|
||||
if i == len(self.modules) - 1:
|
||||
res = res + line + tab + next + '(' + str(i) + '): ' + str(self.modules[i]).replace(line, line + tab + extlast)
|
||||
res = res + line + tab + next + '(' + str(i) + '): ' + \
|
||||
str(self.modules[i]).replace(line, line + tab + extlast)
|
||||
else:
|
||||
res = res + line + tab + next + '(' + str(i) + '): ' + str(self.modules[i]).replace(line, line + tab + ext)
|
||||
|
||||
res = res + line + tab + next + '(' + str(i) + '): ' + \
|
||||
str(self.modules[i]).replace(line, line + tab + ext)
|
||||
|
||||
res = res + line + tab + last + 'output'
|
||||
res = res + line + '}'
|
||||
return res
|
||||
|
||||
|
||||
|
@ -4,6 +4,7 @@ from .utils import clear
|
||||
from functools import wraps
|
||||
import sys
|
||||
|
||||
|
||||
class Container(Module):
|
||||
|
||||
def __init__(self, *args):
|
||||
@ -63,4 +64,3 @@ class Container(Module):
|
||||
for module in self.modules:
|
||||
module.clearState()
|
||||
return self
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
import torch
|
||||
from .Module import Module
|
||||
|
||||
|
||||
class Contiguous(Module):
|
||||
|
||||
def updateOutput(self, input):
|
||||
@ -11,7 +12,6 @@ class Contiguous(Module):
|
||||
|
||||
return self.output
|
||||
|
||||
|
||||
def updateGradInput(self, input, gradOutput):
|
||||
if not gradOutput.is_contiguous():
|
||||
self.gradInput.resize_as_(gradOutput).copy_(gradOutput)
|
||||
@ -19,4 +19,3 @@ class Contiguous(Module):
|
||||
self.gradInput.set_(gradOutput)
|
||||
|
||||
return self.gradInput
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
import torch
|
||||
from .Module import Module
|
||||
|
||||
|
||||
class Copy(Module):
|
||||
|
||||
def __init__(self, intype, outtype, dontCast=False):
|
||||
@ -13,15 +14,12 @@ class Copy(Module):
|
||||
self.output.resize_(input.size()).copy_(input)
|
||||
return self.output
|
||||
|
||||
|
||||
def updateGradInput(self, input, gradOutput):
|
||||
self.gradInput.resize_(gradOutput.size()).copy_(gradOutput)
|
||||
return self.gradInput
|
||||
|
||||
|
||||
def type(self, type=None, tensorCache=None):
|
||||
if type and self.dontCast:
|
||||
return self
|
||||
|
||||
return super(Copy, self).type(self, type, tensorCache)
|
||||
|
||||
|
@ -3,6 +3,7 @@ import torch
|
||||
from .Module import Module
|
||||
from .utils import clear
|
||||
|
||||
|
||||
class Cosine(Module):
|
||||
|
||||
def __init__(self, inputSize, outputSize):
|
||||
@ -53,7 +54,6 @@ class Cosine(Module):
|
||||
self.output.div_(self._inputNorm.expand_as(self.output))
|
||||
return self.output
|
||||
|
||||
|
||||
def updateGradInput(self, input, gradOutput):
|
||||
assert input.dim() == 2
|
||||
|
||||
@ -141,7 +141,6 @@ class Cosine(Module):
|
||||
|
||||
return super(Cosine, self).type(type, tensorCache)
|
||||
|
||||
|
||||
def clearState(self):
|
||||
clear(self, [
|
||||
'_input',
|
||||
@ -152,4 +151,3 @@ class Cosine(Module):
|
||||
'_weightNorm',
|
||||
])
|
||||
return super(Cosine, self).clearState()
|
||||
|
||||
|
@ -2,6 +2,7 @@ import torch
|
||||
from .Module import Module
|
||||
from .utils import clear
|
||||
|
||||
|
||||
class CosineDistance(Module):
|
||||
|
||||
def __init__(self, ):
|
||||
@ -32,7 +33,6 @@ class CosineDistance(Module):
|
||||
|
||||
return input1, input2
|
||||
|
||||
|
||||
def updateOutput(self, input):
|
||||
input1, input2 = input[0], input[1]
|
||||
input1, input2 = self._makeContiguous(input1, input2)
|
||||
@ -65,7 +65,6 @@ class CosineDistance(Module):
|
||||
|
||||
return self.output
|
||||
|
||||
|
||||
def updateGradInput(self, input, gradOutput):
|
||||
v1 = input[0]
|
||||
v2 = input[1]
|
||||
@ -97,7 +96,6 @@ class CosineDistance(Module):
|
||||
|
||||
return self.gradInput
|
||||
|
||||
|
||||
def clearState(self):
|
||||
clear(self, [
|
||||
'buffer',
|
||||
@ -108,4 +106,3 @@ class CosineDistance(Module):
|
||||
'ones',
|
||||
])
|
||||
return super(CosineDistance, self).clearState()
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
import torch
|
||||
from .Criterion import Criterion
|
||||
|
||||
|
||||
class CosineEmbeddingCriterion(Criterion):
|
||||
|
||||
def __init__(self, margin=0, sizeAverage=True):
|
||||
@ -16,7 +17,6 @@ class CosineEmbeddingCriterion(Criterion):
|
||||
self._outputs = None
|
||||
self._idx = None
|
||||
|
||||
|
||||
def updateOutput(self, input, y):
|
||||
input1, input2 = input[0], input[1]
|
||||
|
||||
@ -68,7 +68,6 @@ class CosineEmbeddingCriterion(Criterion):
|
||||
|
||||
return self.output
|
||||
|
||||
|
||||
def updateGradInput(self, input, y):
|
||||
v1 = input[0]
|
||||
v2 = input[1]
|
||||
@ -116,4 +115,3 @@ class CosineEmbeddingCriterion(Criterion):
|
||||
self._idx = torch.ByteTensor()
|
||||
|
||||
return self
|
||||
|
||||
|
@ -3,6 +3,7 @@ from .Module import Module
|
||||
from .utils import recursiveType
|
||||
import torch._thnn
|
||||
|
||||
|
||||
class Criterion(object):
|
||||
|
||||
def __init__(self):
|
||||
|
@ -1,6 +1,7 @@
|
||||
import torch
|
||||
from .Module import Module
|
||||
|
||||
|
||||
class CriterionTable(Module):
|
||||
|
||||
def __init__(self, criterion):
|
||||
@ -15,4 +16,3 @@ class CriterionTable(Module):
|
||||
def updateGradInput(self, input, grad_output):
|
||||
self.criterion.updateGradInput(*input)
|
||||
return self.gradInput
|
||||
|
||||
|
@ -3,6 +3,7 @@ from .Criterion import Criterion
|
||||
from .LogSoftMax import LogSoftMax
|
||||
from .ClassNLLCriterion import ClassNLLCriterion
|
||||
|
||||
|
||||
class CrossEntropyCriterion(Criterion):
|
||||
|
||||
def __init__(self, weights=None):
|
||||
@ -26,4 +27,3 @@ class CrossEntropyCriterion(Criterion):
|
||||
self.lsm.updateGradInput(input, self.nll.gradInput)
|
||||
self.gradInput = self.lsm.gradInput.view(size)
|
||||
return self.gradInput
|
||||
|
||||
|
@ -14,6 +14,7 @@ import math
|
||||
import torch
|
||||
from .Concat import Concat
|
||||
|
||||
|
||||
class DepthConcat(Concat):
|
||||
|
||||
def windowNarrow(self, output, currentOutput, offset):
|
||||
|
@ -1,6 +1,7 @@
|
||||
import torch
|
||||
from .Criterion import Criterion
|
||||
|
||||
|
||||
class DistKLDivCriterion(Criterion):
|
||||
|
||||
def __init__(self, sizeAverage=True):
|
||||
@ -32,4 +33,3 @@ class DistKLDivCriterion(Criterion):
|
||||
self.sizeAverage
|
||||
)
|
||||
return self.gradInput
|
||||
|
||||
|
@ -2,6 +2,7 @@ import torch
|
||||
from .Module import Module
|
||||
from .utils import clear
|
||||
|
||||
|
||||
class DotProduct(Module):
|
||||
|
||||
def __init__(self):
|
||||
@ -46,4 +47,3 @@ class DotProduct(Module):
|
||||
def clearState(self):
|
||||
clear(self, 'buffer')
|
||||
return super(DotProduct, self).clearState()
|
||||
|
||||
|
@ -2,6 +2,7 @@ import torch
|
||||
from .Module import Module
|
||||
from .utils import clear
|
||||
|
||||
|
||||
class Dropout(Module):
|
||||
|
||||
def __init__(self, p=0.5, inplace=False):
|
||||
@ -45,4 +46,3 @@ class Dropout(Module):
|
||||
def clearState(self):
|
||||
clear(self, 'noise')
|
||||
return super(Dropout, self).clearState()
|
||||
|
||||
|
@ -2,6 +2,7 @@
|
||||
import torch
|
||||
from .Module import Module
|
||||
|
||||
|
||||
class ELU(Module):
|
||||
"""
|
||||
Djork-Arné Clevert, Thomas Unterthiner, Sepp Hochreiter
|
||||
@ -39,4 +40,3 @@ class ELU(Module):
|
||||
|
||||
def __repr__(self):
|
||||
return '{}(alpha={:.3f})'.format(str(type(self)), self.alpha)
|
||||
|
||||
|
@ -3,6 +3,7 @@ import torch
|
||||
from .Module import Module
|
||||
from .utils import clear
|
||||
|
||||
|
||||
class Euclidean(Module):
|
||||
|
||||
def __init__(self, inputSize, outputSize):
|
||||
@ -126,13 +127,11 @@ class Euclidean(Module):
|
||||
else:
|
||||
torch.mul(self._repeat, self._expand3, out=self._repeat2)
|
||||
|
||||
|
||||
torch.sum(self._repeat2, 2, out=self.gradInput)
|
||||
self.gradInput.resize_as_(input)
|
||||
|
||||
return self.gradInput
|
||||
|
||||
|
||||
def accGradParameters(self, input, gradOutput, scale=1):
|
||||
inputSize, outputSize = self.weight.size(0), self.weight.size(1)
|
||||
|
||||
@ -156,7 +155,6 @@ class Euclidean(Module):
|
||||
|
||||
return super(Euclidean, self).type(type, tensorCache)
|
||||
|
||||
|
||||
def clearState(self):
|
||||
clear(self, [
|
||||
'_input',
|
||||
@ -172,4 +170,3 @@ class Euclidean(Module):
|
||||
'_repeat2',
|
||||
])
|
||||
return super(Euclidean, self).clearState()
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
import torch
|
||||
from .Module import Module
|
||||
|
||||
|
||||
class Exp(Module):
|
||||
|
||||
def updateOutput(self, input):
|
||||
@ -8,4 +9,3 @@ class Exp(Module):
|
||||
|
||||
def updateGradInput(self, input, gradOutput):
|
||||
return torch.mul(self.output, gradOutput, out=self.gradInput)
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
import torch
|
||||
from .Module import Module
|
||||
|
||||
|
||||
class FlattenTable(Module):
|
||||
|
||||
def __init__(self):
|
||||
@ -59,7 +60,6 @@ class FlattenTable(Module):
|
||||
|
||||
return self.output
|
||||
|
||||
|
||||
def updateGradInput(self, input, gradOutput):
|
||||
assert isinstance(input, list)
|
||||
assert isinstance(gradOutput, list)
|
||||
@ -73,7 +73,6 @@ class FlattenTable(Module):
|
||||
|
||||
return self.gradInput
|
||||
|
||||
|
||||
def type(self, type=None, tensorCache=None):
|
||||
if not type:
|
||||
return self._type
|
||||
@ -81,8 +80,6 @@ class FlattenTable(Module):
|
||||
# conversions. Just force the tables to be empty.
|
||||
self.clearState()
|
||||
|
||||
|
||||
def clearState(self):
|
||||
self.input_map = []
|
||||
return super(FlattenTable, self).clearState()
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
import torch
|
||||
from .Module import Module
|
||||
|
||||
|
||||
class GradientReversal(Module):
|
||||
|
||||
def __init__(self, lambd=1):
|
||||
@ -19,4 +20,3 @@ class GradientReversal(Module):
|
||||
self.gradInput.copy_(gradOutput)
|
||||
self.gradInput.mul_(-self.lambd)
|
||||
return self.gradInput
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
import torch
|
||||
from .Module import Module
|
||||
|
||||
|
||||
class HardShrink(Module):
|
||||
|
||||
def __init__(self, lambd=0.5):
|
||||
@ -26,4 +27,3 @@ class HardShrink(Module):
|
||||
self.lambd
|
||||
)
|
||||
return self.gradInput
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
import torch
|
||||
from .Module import Module
|
||||
|
||||
|
||||
class HardTanh(Module):
|
||||
|
||||
def __init__(self, min_value=-1, max_value=1, inplace=False):
|
||||
@ -32,4 +33,3 @@ class HardTanh(Module):
|
||||
self.inplace
|
||||
)
|
||||
return self.gradInput
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
import torch
|
||||
from .Criterion import Criterion
|
||||
|
||||
|
||||
class HingeEmbeddingCriterion(Criterion):
|
||||
|
||||
def __init__(self, margin=1, sizeAverage=True):
|
||||
@ -34,4 +35,3 @@ class HingeEmbeddingCriterion(Criterion):
|
||||
self.gradInput.mul_(1. / input.nelement())
|
||||
|
||||
return self.gradInput
|
||||
|
||||
|
@ -2,6 +2,7 @@ import torch
|
||||
from .Module import Module
|
||||
from .utils import clear
|
||||
|
||||
|
||||
class Identity(Module):
|
||||
|
||||
def updateOutput(self, input):
|
||||
@ -18,4 +19,3 @@ class Identity(Module):
|
||||
'gradInput',
|
||||
])
|
||||
return super(Identity, self).clearState()
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
import torch
|
||||
from .Module import Module
|
||||
|
||||
|
||||
class Index(Module):
|
||||
|
||||
def __init__(self, dimension):
|
||||
@ -22,4 +23,3 @@ class Index(Module):
|
||||
gradInput.resize_as_(t).zero_()
|
||||
gradInput.index_add_(self.dimension, index, gradOutput)
|
||||
return self.gradInput
|
||||
|
||||
|
@ -2,6 +2,7 @@ import torch
|
||||
from .Criterion import Criterion
|
||||
from .utils import clear
|
||||
|
||||
|
||||
class L1Cost(Criterion):
|
||||
|
||||
def __init__(self):
|
||||
@ -33,4 +34,3 @@ class L1Cost(Criterion):
|
||||
def clearState(self):
|
||||
clear(self, 'output_tensor')
|
||||
return super(L1Cost, self).clearState()
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user