Add random-related Tensor methods

This commit is contained in:
Adam Paszke
2016-06-18 21:36:10 +02:00
parent 857c32bc21
commit 4f66ea42af
10 changed files with 325 additions and 1 deletions

View File

@ -63,6 +63,7 @@ if platform.system() == 'Darwin':
sources = [
"torch/csrc/Module.cpp",
"torch/csrc/Generator.cpp",
"torch/csrc/Tensor.cpp",
"torch/csrc/Storage.cpp",
"torch/csrc/utils.cpp",

View File

@ -118,7 +118,10 @@ TYPE_TRANSFORMS = {
'THTensor': 'THPTensor*',
'THByteTensor': 'THPByteTensor*',
'THLongTensor': 'THPLongTensor*',
'THFloatTensor': 'THPFloatTensor*',
'THDoubleTensor': 'THPDoubleTensor*',
'THLongStorage': 'THPLongStorage*',
'THGenerator': 'THPGenerator*',
}
# Code that will be used to generate each of argument options
@ -133,19 +136,27 @@ FORMAT_STR_MAP = {
'THPTensor*': 'O!',
'THPLongTensor*': 'O!',
'THPByteTensor*': 'O!',
'THPFloatTensor*': 'O!',
'THPDoubleTensor*': 'O!',
'THPLongStorage*': 'O!',
'THPGenerator*': 'O!',
'real': 'O&',
'long': 'l',
'double': 'd',
'bool': 'p',
}
# If O! is specified for any type in FORMAT_STR_MAP you should specify it's
# type here
# TODO: change to THP*Class or use a parser function
ARGPARSE_TYPE_CHECK = {
'THPTensor*': 'THPTensorType',
'THPLongTensor*': 'THPLongTensorType',
'THPByteTensor*': 'THPByteTensorType',
'THPFloatTensor*': 'THPFloatTensorType',
'THPDoubleTensor*': 'THPDoubleTensorType',
'THPLongStorage*': 'THPLongStorageType',
'THPGenerator*': 'THPGeneratorType',
'real': 'THPUtils_(parseReal)',
}
@ -220,7 +231,16 @@ ADDITIONAL_ARGS = {
}
# Types for which it's necessary to extract cdata
CDATA_TYPES = set(('THPTensor*', 'THPByteTensor*', 'THPLongTensor*', 'THPStorage*', 'THPLongStorage*'))
CDATA_TYPES = set((
'THPTensor*',
'THPByteTensor*',
'THPLongTensor*',
'THPFloatTensor*',
'THPDoubleTensor*',
'THPStorage*',
'THPLongStorage*',
'THPGenerator*',
))
def generate_function(lines, stateless):
assert len(lines) > 1

View File

@ -137,3 +137,39 @@ class RealTensor(RealTensorBase):
result.set(src.storage(), src.storageOffset(),
src_size, src_stride)
return result
def repeatTensor(self, src, *args):
if not isTensor(src):
if isStorage(src) and len(args) == 0:
repeats = src.tolist()
else:
repeats = [src] + list(args)
src = self
result = self.new()
else:
repeats = list(args)
result = self
if not src.isContiguous():
src = src.clone()
if len(repeats) < src.dim():
raise ValueError('Number of dimensions of repeat dims can not be smaller than number of dimensions of tensor')
xtensor = src.new().set(src)
xsize = xtensor.size().tolist()
for i in range(len(repeats)-src.dim()):
xsize = [1] + xsize
size = LongStorage([a * b for a, b in zip(xsize, repeats)])
xtensor.resize(LongStorage(xsize))
result.resize(size)
urtensor = result.new(result)
for i in range(xtensor.dim()):
urtensor = urtensor.unfold(i,xtensor.size(i),xtensor.size(i))
for i in range(urtensor.dim()-xtensor.dim()):
xsize = [1] + xsize
xtensor.resize(LongStorage(xsize))
xxtensor = xtensor.expandAs(urtensor)
urtensor.copy(xxtensor)
return result

98
torch/csrc/Generator.cpp Normal file
View File

@ -0,0 +1,98 @@
#include <Python.h>
#include <structmember.h>
#include <stdbool.h>
#include <TH/TH.h>
#include "THP.h"
extern PyObject *THPGeneratorClass;
static void THPGenerator_dealloc(THPGenerator* self)
{
THGenerator_free(self->cdata);
Py_TYPE(self)->tp_free((PyObject*)self);
}
static PyObject * THPGenerator_pynew(PyTypeObject *type, PyObject *args, PyObject *kwargs)
{
HANDLE_TH_ERRORS
if ((args && PyTuple_Size(args) != 0) || kwargs) {
THPUtils_setError("torch.Generator doesn't constructor doesn't accept any arguments");
return NULL;
}
THPGeneratorPtr self = (THPGenerator *)type->tp_alloc(type, 0);
self->cdata = THGenerator_new();
return (PyObject*)self.release();
END_HANDLE_TH_ERRORS
}
PyTypeObject THPGeneratorType = {
PyVarObject_HEAD_INIT(NULL, 0)
"torch.C.Generator", /* tp_name */
sizeof(THPGenerator), /* tp_basicsize */
0, /* tp_itemsize */
(destructor)THPGenerator_dealloc, /* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_reserved */
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
0, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
0, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
NULL, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
0, /* will be assigned in init */ /* tp_methods */
0, /* will be assigned in init */ /* tp_members */
0, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
0, /* tp_init */
0, /* tp_alloc */
THPGenerator_pynew, /* tp_new */
};
bool THPGenerator_Check(PyObject *obj)
{
return Py_TYPE(obj) == &THPGeneratorType;
}
PyObject * THPGenerator_newObject()
{
// TODO: error checking
THPObjectPtr args = PyTuple_New(0);
return PyObject_Call((PyObject*)&THPGeneratorType, args, NULL);
}
//static struct PyMemberDef THPStorage_(members)[] = {
//{(char*)"_cdata", T_ULONGLONG, offsetof(THPGenerator, cdata), READONLY, NULL},
//{NULL}
//};
bool THPGenerator_init(PyObject *module)
{
THPGeneratorClass = (PyObject*)&THPGeneratorType;
//THPStorageType.tp_methods = THPStorage_(methods);
//THPStorageType.tp_members = THPStorage_(members);
if (PyType_Ready(&THPGeneratorType) < 0)
return false;
Py_INCREF(&THPGeneratorType);
PyModule_AddObject(module, "Generator", (PyObject *)&THPGeneratorType);
return true;
}

10
torch/csrc/Generator.h Normal file
View File

@ -0,0 +1,10 @@
struct THPGenerator {
PyObject_HEAD
THGenerator *cdata;
};
extern PyTypeObject THPGeneratorType;
bool THPGenerator_init(PyObject *module);
PyObject *THPGenerator_newObject();
bool THPGenerator_Check(PyObject *obj);

View File

@ -31,6 +31,11 @@ PyObject *THPCharTensorClass = NULL;
PyObject *THPByteTensorClass = NULL;
PyObject *THPDefaultTensorClass = NULL;
PyObject *THPGeneratorClass = NULL;
// Used if no other generator is provided
// TODO: this won't be thread-safe anymore
THPGenerator *THPDefaultGenerator = NULL;
static bool THPModule_loadClasses(PyObject *self)
{
@ -223,6 +228,17 @@ IMPLEMENT_STATELESS(addcmul)
IMPLEMENT_STATELESS(addcdiv)
IMPLEMENT_STATELESS(mm)
IMPLEMENT_STATELESS(bmm)
// TODO: this doesn't implement options that return numbers!
IMPLEMENT_STATELESS(multinomial)
IMPLEMENT_STATELESS(uniform)
IMPLEMENT_STATELESS(normal)
IMPLEMENT_STATELESS(cauchy)
IMPLEMENT_STATELESS(logNormal)
IMPLEMENT_STATELESS(exponential)
IMPLEMENT_STATELESS(random)
IMPLEMENT_STATELESS(geometric)
IMPLEMENT_STATELESS(bernoulli)
IMPLEMENT_STATELESS(randperm)
// In nonzero, the first argument might be a LongTensor that will be used
// for indices output, so we should pick a function based on second
@ -349,6 +365,16 @@ static PyMethodDef TorchMethods[] = {
{"addcdiv", (PyCFunction)THPModule_addcdiv, METH_VARARGS, NULL},
{"mm", (PyCFunction)THPModule_mm, METH_VARARGS, NULL},
{"bmm", (PyCFunction)THPModule_bmm, METH_VARARGS, NULL},
{"multinomial", (PyCFunction)THPModule_multinomial, METH_VARARGS, NULL},
{"uniform", (PyCFunction)THPModule_uniform, METH_VARARGS, NULL},
{"normal", (PyCFunction)THPModule_normal, METH_VARARGS, NULL},
{"cauchy", (PyCFunction)THPModule_cauchy, METH_VARARGS, NULL},
{"logNormal", (PyCFunction)THPModule_logNormal, METH_VARARGS, NULL},
{"exponential", (PyCFunction)THPModule_exponential, METH_VARARGS, NULL},
{"random", (PyCFunction)THPModule_random, METH_VARARGS, NULL},
{"geometric", (PyCFunction)THPModule_geometric, METH_VARARGS, NULL},
{"bernoulli", (PyCFunction)THPModule_bernoulli, METH_VARARGS, NULL},
{"randperm", (PyCFunction)THPModule_randperm, METH_VARARGS, NULL},
{NULL, NULL, 0, NULL}
};
@ -394,6 +420,8 @@ PyMODINIT_FUNC PyInit_C()
ASSERT_TRUE(tensor_classes = PySet_New(NULL));
ASSERT_TRUE(PyObject_SetAttrString(module, "_tensorclasses", tensor_classes) == 0);
ASSERT_TRUE(THPGenerator_init(module));
ASSERT_TRUE(THPDoubleStorage_init(module));
ASSERT_TRUE(THPFloatStorage_init(module));
ASSERT_TRUE(THPLongStorage_init(module));
@ -410,6 +438,9 @@ PyMODINIT_FUNC PyInit_C()
ASSERT_TRUE(THPCharTensor_init(module));
ASSERT_TRUE(THPByteTensor_init(module));
THPDefaultGenerator = (THPGenerator*)THPGenerator_newObject();
ASSERT_TRUE(THPDefaultGenerator != nullptr);
updateErrorHandlers();
#if PY_MAJOR_VERSION == 2

View File

@ -12,8 +12,12 @@
#include "Exceptions.h"
#include "Generator.h"
#include "Storage.h"
#include "Tensor.h"
// TODO: this won't be thread-safe anymore
extern THPGenerator *THPDefaultGenerator;
// This requires defined Storage and Tensor types
#include "utils.h"

View File

@ -1809,6 +1809,101 @@ static PyObject * THPTensor_(map2)(THPTensor *self, PyObject *args)
- THTensor mat2
]]
[[
randperm
randperm -> self OPTIONAL_SELF
- self
- THGenerator generator OPTIONAL THPDefaultGenerator->cdata
- long n
]]
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
[[
multinomial
multinomial -> new THLongTensor
- THGenerator generator OPTIONAL THPDefaultGenerator->cdata
- self
- long n
- bool replacement OPTIONAL false
]]
[[
uniform
uniform -> self
- self
- THGenerator generator OPTIONAL THPDefaultGenerator->cdata
- real a OPTIONAL 0
- real b OPTIONAL 1
]]
[[
normal
normal -> self
- self
- THGenerator generator OPTIONAL THPDefaultGenerator->cdata
- real a OPTIONAL 0
- real b OPTIONAL 1
]]
[[
cauchy
cauchy -> self
- self
- THGenerator generator OPTIONAL THPDefaultGenerator->cdata
- real a OPTIONAL 0
- real b OPTIONAL 1
]]
[[
logNormal
logNormal -> self
- self
- THGenerator generator OPTIONAL THPDefaultGenerator->cdata
- real a OPTIONAL 1
- real b OPTIONAL 2
]]
[[
exponential
exponential -> self
- self
- THGenerator generator OPTIONAL THPDefaultGenerator->cdata
- real lambda OPTIONAL 1
]]
#endif
// TODO: can't handle sampling from [a, b]
[[
random
random -> self
- self
- THGenerator generator OPTIONAL THPDefaultGenerator->cdata
]]
[[
geometric
geometric -> self
- self
- THGenerator generator OPTIONAL THPDefaultGenerator->cdata
- double p
]]
[[
bernoulli
bernoulli -> self
- self
- THGenerator generator OPTIONAL THPDefaultGenerator->cdata
- double p OPTIONAL 0.5
bernoulli_FloatTensor -> self
- self
- THGenerator generator OPTIONAL THPDefaultGenerator->cdata
- THFloatTensor float_probabilities
bernoulli_DoubleTensor -> self
- self
- THGenerator generator OPTIONAL THPDefaultGenerator->cdata
- THDoubleTensor double_probabilities
]]
// Declared in TensorCopy.cpp
static PyObject * THPTensor_(copy)(THPTensor *self, PyObject *other);
@ -1874,7 +1969,17 @@ static PyMethodDef THPTensor_(methods)[] = {
{"atan2", (PyCFunction)THPTensor_(atan2), METH_VARARGS, NULL},
{"pow", (PyCFunction)THPTensor_(pow), METH_VARARGS, NULL},
{"lerp", (PyCFunction)THPTensor_(lerp), METH_VARARGS, NULL},
{"multinomial", (PyCFunction)THPTensor_(multinomial), METH_VARARGS, NULL},
{"uniform", (PyCFunction)THPTensor_(uniform), METH_VARARGS, NULL},
{"normal", (PyCFunction)THPTensor_(normal), METH_VARARGS, NULL},
{"cauchy", (PyCFunction)THPTensor_(cauchy), METH_VARARGS, NULL},
{"logNormal", (PyCFunction)THPTensor_(logNormal), METH_VARARGS, NULL},
{"exponential", (PyCFunction)THPTensor_(exponential), METH_VARARGS, NULL},
#endif
{"randperm", (PyCFunction)THPTensor_(randperm), METH_VARARGS, NULL},
{"random", (PyCFunction)THPTensor_(random), METH_VARARGS, NULL},
{"geometric", (PyCFunction)THPTensor_(geometric), METH_VARARGS, NULL},
{"bernoulli", (PyCFunction)THPTensor_(bernoulli), METH_VARARGS, NULL},
{"add", (PyCFunction)THPTensor_(add), METH_VARARGS, NULL},
{"csub", (PyCFunction)THPTensor_(csub), METH_VARARGS, NULL},
{"mul", (PyCFunction)THPTensor_(mul), METH_VARARGS, NULL},
@ -1992,7 +2097,17 @@ static PyMethodDef THPTensorStatelessMethods[] = {
{"atan2", (PyCFunction)THPTensor_stateless_(atan2), METH_VARARGS, NULL},
{"pow", (PyCFunction)THPTensor_stateless_(pow), METH_VARARGS, NULL},
{"lerp", (PyCFunction)THPTensor_stateless_(lerp), METH_VARARGS, NULL},
{"multinomial", (PyCFunction)THPTensor_stateless_(multinomial), METH_VARARGS, NULL},
{"uniform", (PyCFunction)THPTensor_stateless_(uniform), METH_VARARGS, NULL},
{"normal", (PyCFunction)THPTensor_stateless_(normal), METH_VARARGS, NULL},
{"cauchy", (PyCFunction)THPTensor_stateless_(cauchy), METH_VARARGS, NULL},
{"logNormal", (PyCFunction)THPTensor_stateless_(logNormal), METH_VARARGS, NULL},
{"exponential", (PyCFunction)THPTensor_stateless_(exponential), METH_VARARGS, NULL},
#endif
{"randperm", (PyCFunction)THPTensor_stateless_(randperm), METH_VARARGS, NULL},
{"random", (PyCFunction)THPTensor_stateless_(random), METH_VARARGS, NULL},
{"geometric", (PyCFunction)THPTensor_stateless_(geometric), METH_VARARGS, NULL},
{"bernoulli", (PyCFunction)THPTensor_stateless_(bernoulli), METH_VARARGS, NULL},
{"add", (PyCFunction)THPTensor_stateless_(add), METH_VARARGS, NULL},
{"csub", (PyCFunction)THPTensor_stateless_(csub), METH_VARARGS, NULL},
{"mul", (PyCFunction)THPTensor_stateless_(mul), METH_VARARGS, NULL},

View File

@ -5,6 +5,14 @@
#include "generic/utils.cpp"
#include <TH/THGenerateAllTypes.h>
template<>
void THPPointer<THPGenerator>::free() {
if (ptr)
Py_DECREF(ptr);
}
template class THPPointer<THPGenerator>;
bool THPUtils_checkLong(PyObject *index) {
return PyLong_Check(index) || PyInt_Check(index);
}

View File

@ -39,6 +39,7 @@ private:
#include <TH/THGenerateAllTypes.h>
typedef THPPointer<PyObject> THPObjectPtr;
typedef THPPointer<THPGenerator> THPGeneratorPtr;
#define THPUtils_assert(cond, ...) \
if (!(cond)) { THPUtils_setError(__VA_ARGS__); return NULL; }