mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
* PyObject* <--> at::Tensor no longer unwraps variables, instead we expect end uses to always work with variable types, and we will only unwrap the variables when we optimize. * Add torch::CPU, torch::CUDA and torch::getType * at::CPU -> torch::CPU in extensions
30 lines
742 B
C++
30 lines
742 B
C++
#include <torch/torch.h>
|
|
|
|
at::Tensor sigmoid_add(at::Tensor x, at::Tensor y) {
|
|
return x.sigmoid() + y.sigmoid();
|
|
}
|
|
|
|
struct MatrixMultiplier {
|
|
MatrixMultiplier(int A, int B) {
|
|
tensor_ = at::ones(torch::CPU(at::kDouble), {A, B});
|
|
torch::set_requires_grad(tensor_, true);
|
|
}
|
|
at::Tensor forward(at::Tensor weights) {
|
|
return tensor_.mm(weights);
|
|
}
|
|
at::Tensor get() const {
|
|
return tensor_;
|
|
}
|
|
|
|
private:
|
|
at::Tensor tensor_;
|
|
};
|
|
|
|
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
|
m.def("sigmoid_add", &sigmoid_add, "sigmoid(x) + sigmoid(y)");
|
|
py::class_<MatrixMultiplier>(m, "MatrixMultiplier")
|
|
.def(py::init<int, int>())
|
|
.def("forward", &MatrixMultiplier::forward)
|
|
.def("get", &MatrixMultiplier::get);
|
|
}
|