Separate cuda-ness from dtype. (#6470)

* Separate cuda-ness from dtype.

There are no longer torch.cuda.int64, etc; only torch.int64 that correspond to at::ScalarType.
At the python arg parser level, the corresponding ATen type is selected from the combination of (ScalarType, Layout, Device).

There is also currently unused code in here for support ScalarType in native_functions; this will be used for specifying aggregate types
on reduction functions.

* Fix test_autograd.

* Add defaults to randint_like.

* Track is_cuda in py tensor types.

* Fix test_sparse.

* Fix multiprocessing.

* Fix rnn.

* Fix test_nn.

* Fix flake8.
This commit is contained in:
gchanan
2018-04-12 14:05:44 -04:00
committed by GitHub
parent 8995ddda05
commit 749d51414a
30 changed files with 298 additions and 253 deletions

View File

@ -30,6 +30,7 @@ struct PyTensorType {
at::Type* aten_type;
THPDtype* dtype;
THPLayout* layout;
bool is_cuda;
char name[64];
};
@ -51,7 +52,7 @@ static PyObject* Tensor_new(PyTypeObject *type, PyObject *args, PyObject *kwargs
if (!tensor_type.aten_type) {
throw unavailable_type(tensor_type);
}
if (tensor_type.dtype->is_cuda) {
if (tensor_type.aten_type->is_cuda()) {
torch::utils::cuda_lazy_init();
}
return THPVariable_Wrap(torch::utils::legacy_tensor_ctor(*tensor_type.aten_type, args, kwargs));
@ -79,7 +80,7 @@ PyObject *Tensor_layout(PyTensorType* self) {
}
PyObject *Tensor_is_cuda(PyTensorType* self) {
if (self->dtype->is_cuda) {
if (self->is_cuda) {
Py_RETURN_TRUE;
} else {
Py_RETURN_FALSE;
@ -178,7 +179,8 @@ static void set_type(PyTensorType& type_obj, Backend backend, ScalarType scalarT
auto baseType = globalContext().type_registry[static_cast<int>(backend)][static_cast<int>(scalarType)].get();
type_obj.aten_type = baseType ? torch::autograd::VariableType::getType(*baseType) : nullptr;
type_obj.layout = torch::getLayout(backend);
type_obj.dtype = torch::getDtype(scalarType, backend == kCUDA || backend == kSparseCUDA);
type_obj.dtype = torch::getDtype(scalarType);
type_obj.is_cuda = (backend == at::Backend::CUDA || backend == at::Backend::SparseCUDA);
}
static void set_name(PyTensorType& type_obj, const std::string& name) {