mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-22 14:15:01 +08:00
Summary:
/cc goldsborough
Working on #14582
The corresponding python implementations are at: [pytorch/torch/nn/init.py](6302e4001a/torch/nn/init.py (L261-L327)
)
Here is my initial implementation of Kaiming Initialization. I have not been able to figure out how to successfully run tests locally so I haven't added any yet.
A couple questions:
- Are the enums defined in the right place? I copied their names from Python, but do you prefer different naming conventions for C++?
- To run tests locally do I use `python setup.py test`? Can I run just a subset of the tests somehow?
- Should I add my tests at [test/cpp/api/misc.cpp](https://github.com/pytorch/pytorch/blob/master/test/cpp/api/misc.cpp#L47-L54)?
Pull Request resolved: https://github.com/pytorch/pytorch/pull/14718
Differential Revision: D14049159
Pulled By: goldsborough
fbshipit-source-id: 966ac5126875936e69b185b5041f16476ed4cf70
44 lines
1.0 KiB
C++
44 lines
1.0 KiB
C++
#include <gtest/gtest.h>
|
|
|
|
#include <torch/nn/init.h>
|
|
#include <torch/nn/modules/linear.h>
|
|
#include <torch/types.h>
|
|
#include <torch/utils.h>
|
|
|
|
#include <test/cpp/api/support.h>
|
|
|
|
TEST(NoGradTest, SetsGradModeCorrectly) {
|
|
torch::manual_seed(0);
|
|
torch::NoGradGuard guard;
|
|
torch::nn::Linear model(5, 2);
|
|
auto x = torch::randn({10, 5}, torch::requires_grad());
|
|
auto y = model->forward(x);
|
|
torch::Tensor s = y.sum();
|
|
|
|
s.backward();
|
|
ASSERT_FALSE(model->weight.grad().defined());
|
|
}
|
|
|
|
struct AutogradTest : torch::test::SeedingFixture {
|
|
AutogradTest() {
|
|
x = torch::randn({3, 3}, torch::requires_grad());
|
|
y = torch::randn({3, 3});
|
|
z = x * y;
|
|
}
|
|
torch::Tensor x, y, z;
|
|
};
|
|
|
|
TEST_F(AutogradTest, CanTakeDerivatives) {
|
|
z.backward();
|
|
ASSERT_TRUE(x.grad().allclose(y));
|
|
}
|
|
|
|
TEST_F(AutogradTest, CanTakeDerivativesOfZeroDimTensors) {
|
|
z.sum().backward();
|
|
ASSERT_TRUE(x.grad().allclose(y));
|
|
}
|
|
|
|
TEST_F(AutogradTest, CanPassCustomGradientInputs) {
|
|
z.sum().backward(torch::ones({}) * 2);
|
|
ASSERT_TRUE(x.grad().allclose(y * 2));
|
|
} |