Revert D18171156: Merge Tensor and Variable.

Test Plan: revert-hammer

Differential Revision:
D18171156

Original commit changeset: 5b6a045beba3

fbshipit-source-id: f5581d902c2305018ea49f8473592be2a465560b
This commit is contained in:
Edward Yang
2019-11-06 10:54:26 -08:00
committed by Facebook Github Bot
parent 6a4b51aec1
commit 9c43b16df9
40 changed files with 293 additions and 214 deletions

View File

@ -731,6 +731,9 @@ TEST(TensorTest, DataPtr) {
TEST(TensorTest, Data) {
const auto tensor = torch::rand({3, 3});
ASSERT_TRUE(torch::equal(tensor, tensor.data()));
const auto tensor2 = at::rand({3, 3});
ASSERT_THROW(tensor2.data(), c10::Error);
}
TEST(TensorTest, BackwardAndGrad) {
@ -738,6 +741,11 @@ TEST(TensorTest, BackwardAndGrad) {
auto y = x * x;
y.backward();
ASSERT_EQ(x.grad().item<float>(), 10.0);
x = at::tensor({5});
y = x * x;
ASSERT_THROWS_WITH(y.backward(), "backward is not implemented for Tensor");
ASSERT_THROWS_WITH(x.grad(), "grad is not implemented for Tensor");
}
TEST(TensorTest, BackwardCreatesOnesGrad) {
@ -759,6 +767,12 @@ TEST(TensorTest, IsLeaf) {
auto y = x * x;
ASSERT_TRUE(x.is_leaf());
ASSERT_FALSE(y.is_leaf());
x = at::tensor({5});
y = x * x;
const auto message = "is_leaf is not implemented for Tensor";
ASSERT_THROWS_WITH(y.is_leaf(), message);
ASSERT_THROWS_WITH(x.is_leaf(), message);
}
TEST(TensorTest, OutputNr) {
@ -766,6 +780,12 @@ TEST(TensorTest, OutputNr) {
auto y = x * x;
ASSERT_EQ(x.output_nr(), 0);
ASSERT_EQ(y.output_nr(), 0);
x = at::tensor({5});
y = x * x;
const auto message = "output_nr is not implemented for Tensor";
ASSERT_THROWS_WITH(y.output_nr(), message);
ASSERT_THROWS_WITH(x.output_nr(), message);
}
TEST(TensorTest, Version) {
@ -775,6 +795,14 @@ TEST(TensorTest, Version) {
ASSERT_EQ(x._version(), 1);
x.add_(1);
ASSERT_EQ(x._version(), 2);
x = at::ones(3);
const auto message = "version is not implemented for Tensor";
ASSERT_THROWS_WITH(x._version(), message);
x.mul_(2);
ASSERT_THROWS_WITH(x._version(), message);
x.add_(1);
ASSERT_THROWS_WITH(x._version(), message);
}
TEST(TensorTest, Detach) {
@ -784,6 +812,12 @@ TEST(TensorTest, Detach) {
ASSERT_FALSE(y.is_leaf());
ASSERT_TRUE(y_detached.is_leaf());
ASSERT_FALSE(y_detached.requires_grad());
x = at::tensor({5}, at::TensorOptions().requires_grad(false));
y = x * x;
const auto message = "detach is not implemented for Tensor";
ASSERT_THROWS_WITH(x.detach(), message);
ASSERT_THROWS_WITH(y.detach(), message);
}
TEST(TensorTest, DetachInplace) {
@ -794,6 +828,12 @@ TEST(TensorTest, DetachInplace) {
ASSERT_FALSE(y.requires_grad());
ASSERT_TRUE(y_detached.is_leaf());
ASSERT_FALSE(y_detached.requires_grad());
x = at::tensor({5}, at::TensorOptions().requires_grad(false));
y = x * x;
const auto message = "detach_ is not implemented for Tensor";
ASSERT_THROWS_WITH(x.detach_(), message);
ASSERT_THROWS_WITH(y.detach_(), message);
}
TEST(TensorTest, SetData) {
@ -805,6 +845,10 @@ TEST(TensorTest, SetData) {
x.set_data(y);
ASSERT_TRUE(torch::equal(x, y));
ASSERT_EQ(x.data_ptr<float>(), y.data_ptr<float>());
x = at::tensor({5});
y = at::tensor({5});
ASSERT_THROWS_WITH(x.set_data(y), "set_data is not implemented for Tensor");
}
TEST(TensorTest, RequiresGradInplace) {
@ -822,4 +866,11 @@ TEST(TensorTest, RequiresGradInplace) {
const auto int_tensor = torch::tensor({5}, at::TensorOptions().dtype(torch::kInt));
ASSERT_THROWS_WITH(int_tensor.requires_grad_(true),
"Only Tensors of floating point dtype can require gradients");
x = at::tensor({5}, at::TensorOptions().requires_grad(false));
y = x * x;
ASSERT_THROWS_WITH(x.requires_grad_(false),
"requires_grad_ is not implemented for Tensor");
ASSERT_THROWS_WITH(y.requires_grad_(false),
"requires_grad_ is not implemented for Tensor");
}