Remove template parameter from Tensor (#9939)

Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/9939

Pull Request resolved: https://github.com/facebookresearch/weakly-supervised-action-detection/pull/13

Pull Request resolved: https://github.com/pytorch/translate/pull/166

Pull Request resolved: https://github.com/pytorch/pytorch/pull/9125

Closes https://github.com/pytorch/pytorch/pull/9125

Use inheritance for polymorphism, and remove template parameter
This is to change the templating in call sites, the core implementations will change later

Before Caffe2 Tensor class was compile-time fixed to bind to a particular device/context. With this change, we're making it a runtime property (stored inside the tensor), but preserve the same semantics. For example, one has to specify device type in order to create a Tensor - there are no uninitialized tensors. More specifically the changes are:

1. We added an extra argument *DeviceType* to most of the constructors of the tensor, e.g. (Tensor(DeviceType type)),
2. Semantics of constructor Tensor(const Tensor<SrcContext>& src, ContextForCopy* context); is changed, in this constructor, the second context is passed in to enable us to call the templated Copy function, it could be in a different context as source and target previously, now we'll enforce that the context should have same device type as src, if it is provided.
3. To preserve 'get-or-construct' semantics of Blob, we added specialized getter Blob::GetMutableTensor that verifies both that Blob contains a Tensor and that it's of a correct type
4. Specifically, Tensor type is not default-constructible any more (as we don't have unknown device tensors) and thus some of the code handling STL containers needs to change

Note: Some changes are postponed just to keep this diff a bit smaller. Please see `TODO`s.

Reviewed By: ezyang, houseroad

Differential Revision: D9024330

fbshipit-source-id: e0b8295d2dc6ebe2963383ded5af799ad17164ba
This commit is contained in:
Jerry Zhang
2018-07-27 10:50:54 -07:00
committed by Facebook Github Bot
parent 94439d7df4
commit aebf3b47ae
365 changed files with 4187 additions and 3515 deletions

View File

@ -68,7 +68,7 @@ class GroupSpatialSoftmaxGradientOp final : public Operator<Context> {
protected:
int num_classes_;
StorageOrder order_;
Tensor<Context> sum_probs_;
Tensor sum_probs_{Context::GetDeviceType()};
};
} // namespace caffe2

View File

@ -45,7 +45,7 @@ class SelectSmoothL1LossOp final : public Operator<Context> {
float beta_; // Transition point from L1 to L2 loss
float scale_; // Scale the loss by scale_
int dim_; // dimension for 1 anchor prediction
Tensor<Context> buff_; // Buffer for element-wise differences
Tensor buff_{Context::GetDeviceType()}; // Buffer for element-wise differences
};
template <typename T, class Context>
@ -69,7 +69,7 @@ class SelectSmoothL1LossGradientOp final : public Operator<Context> {
float beta_; // Transition point from L1 to L2 loss
float scale_; // Scale the loss by scale_
int dim_; // dimension for 1 anchor prediction
Tensor<Context> buff_; // Buffer for element-wise differences
Tensor buff_{Context::GetDeviceType()}; // Buffer for element-wise differences
};
} // namespace caffe2

View File

@ -44,9 +44,9 @@ class SigmoidCrossEntropyLossOp final : public Operator<Context> {
protected:
float scale_;
int normalize_;
Tensor<Context> losses_;
Tensor<Context> counts_;
Tensor<Context> normalizer_;
Tensor losses_{Context::GetDeviceType()};
Tensor counts_{Context::GetDeviceType()};
Tensor normalizer_{Context::GetDeviceType()};
};
template <typename T, class Context>
@ -69,8 +69,8 @@ class SigmoidCrossEntropyLossGradientOp final : public Operator<Context> {
protected:
float scale_;
int normalize_;
Tensor<Context> counts_;
Tensor<Context> normalizer_;
Tensor counts_{Context::GetDeviceType()};
Tensor normalizer_{Context::GetDeviceType()};
};
} // namespace caffe2

View File

@ -47,8 +47,8 @@ class SigmoidFocalLossOp final : public Operator<Context> {
int num_classes_;
float gamma_;
float alpha_;
Tensor<Context> losses_;
Tensor<Context> counts_;
Tensor losses_{Context::GetDeviceType()};
Tensor counts_{Context::GetDeviceType()};
};
template <typename T, class Context>
@ -74,8 +74,8 @@ class SigmoidFocalLossGradientOp final : public Operator<Context> {
int num_classes_;
float gamma_;
float alpha_;
Tensor<Context> counts_;
Tensor<Context> weights_; // unignored weights
Tensor counts_{Context::GetDeviceType()};
Tensor weights_{Context::GetDeviceType()}; // unignored weights
};
} // namespace caffe2

View File

@ -44,7 +44,7 @@ class SmoothL1LossOp final : public Operator<Context> {
protected:
float beta_; // Transition point from L1 to L2 loss
float scale_; // Scale the loss by scale_
Tensor<Context> buff_; // Buffer for element-wise differences
Tensor buff_{Context::GetDeviceType()}; // Buffer for element-wise differences
};
template <typename T, class Context>
@ -67,7 +67,7 @@ class SmoothL1LossGradientOp final : public Operator<Context> {
protected:
float beta_; // Transition point from L1 to L2 loss
float scale_; // Scale the loss by scale_
Tensor<Context> buff_; // Buffer for element-wise differences
Tensor buff_{Context::GetDeviceType()}; // Buffer for element-wise differences
};
} // namespace caffe2

View File

@ -52,7 +52,7 @@ class SoftmaxFocalLossOp final : public Operator<Context> {
float alpha_;
int num_classes_;
StorageOrder order_;
Tensor<Context> losses_;
Tensor losses_{Context::GetDeviceType()};
};
template <typename T, class Context>
@ -83,7 +83,7 @@ class SoftmaxFocalLossGradientOp final : public Operator<Context> {
float alpha_;
int num_classes_;
StorageOrder order_;
Tensor<Context> buff_;
Tensor buff_{Context::GetDeviceType()};
};
} // namespace caffe2