Files
pytorch/test/cpp/api/enum.cpp
Will Feng bdd7dbfd4b [C++ API] RNN / GRU / LSTM layer refactoring (#34322)
Summary:
This PR refactors RNN / GRU / LSTM layers in C++ API to exactly match the implementation in Python API.

**BC-breaking changes:**
- Instead of returning `RNNOutput`, RNN / GRU forward method now returns `std::tuple<Tensor, Tensor>`, and LSTM forward method now returns `std::tuple<Tensor, std::tuple<Tensor, Tensor>>`, matching Python API.
- RNN / LSTM / GRU forward method now accepts the same inputs (input tensor and optionally hidden state), matching Python API.
- RNN / LSTM / GRU layers now have `forward_with_packed_input` method which accepts `PackedSequence` as input and optionally hidden state, matching the `forward(PackedSequence, ...)` variant in Python API.
- RNN / LSTM / GRU layers no longer have these fields: `w_ih` / `w_hh` / `b_ih` / `b_hh`. Instead, to access the weights and biases of the gates, users should do e.g. `rnn->named_parameters()["weight_ih_l0"]`, which mirrors the Python API `rnn.weight_ih_l0`.
- In `RNNOptions`
    - `tanh()` / `relu()` / `activation` are removed. Instead, `nonlinearity` is added which takes either `torch::kTanh` or `torch::kReLU`
    - `layers` -> `num_layers`
    - `with_bias` -> `bias`
- In `LSTMOptions`
    - `layers` -> `num_layers`
    - `with_bias` -> `bias`
- In `GRUOptions`
    - `layers` -> `num_layers`
    - `with_bias` -> `bias`

The majority of the changes in this PR focused on refactoring the implementations in `torch/csrc/api/src/nn/modules/rnn.cpp` to match the Python API. RNN tests are then changed to reflected the revised API design.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/34322

Differential Revision: D20458302

Pulled By: yf225

fbshipit-source-id: ffff2ae1ddb1c742c966956f6ad4d7fba03dc54d
2020-03-15 17:48:29 -07:00

88 lines
2.7 KiB
C++

#include <gtest/gtest.h>
#include <torch/torch.h>
#include <test/cpp/api/support.h>
#define TORCH_ENUM_PRETTY_PRINT_TEST(name) \
{ \
v = torch::k##name; \
std::string pretty_print_name("k"); \
pretty_print_name.append(#name); \
ASSERT_EQ(torch::enumtype::get_enum_name(v), pretty_print_name); \
}
TEST(EnumTest, AllEnums) {
c10::variant<
torch::enumtype::kLinear,
torch::enumtype::kConv1D,
torch::enumtype::kConv2D,
torch::enumtype::kConv3D,
torch::enumtype::kConvTranspose1D,
torch::enumtype::kConvTranspose2D,
torch::enumtype::kConvTranspose3D,
torch::enumtype::kSigmoid,
torch::enumtype::kTanh,
torch::enumtype::kReLU,
torch::enumtype::kLeakyReLU,
torch::enumtype::kFanIn,
torch::enumtype::kFanOut,
torch::enumtype::kConstant,
torch::enumtype::kReflect,
torch::enumtype::kReplicate,
torch::enumtype::kCircular,
torch::enumtype::kNearest,
torch::enumtype::kBilinear,
torch::enumtype::kBicubic,
torch::enumtype::kTrilinear,
torch::enumtype::kArea,
torch::enumtype::kSum,
torch::enumtype::kMean,
torch::enumtype::kMax,
torch::enumtype::kNone,
torch::enumtype::kBatchMean,
torch::enumtype::kZeros,
torch::enumtype::kBorder,
torch::enumtype::kReflection,
torch::enumtype::kRNN_TANH,
torch::enumtype::kRNN_RELU,
torch::enumtype::kLSTM,
torch::enumtype::kGRU
> v;
TORCH_ENUM_PRETTY_PRINT_TEST(Linear)
TORCH_ENUM_PRETTY_PRINT_TEST(Conv1D)
TORCH_ENUM_PRETTY_PRINT_TEST(Conv2D)
TORCH_ENUM_PRETTY_PRINT_TEST(Conv3D)
TORCH_ENUM_PRETTY_PRINT_TEST(ConvTranspose1D)
TORCH_ENUM_PRETTY_PRINT_TEST(ConvTranspose2D)
TORCH_ENUM_PRETTY_PRINT_TEST(ConvTranspose3D)
TORCH_ENUM_PRETTY_PRINT_TEST(Sigmoid)
TORCH_ENUM_PRETTY_PRINT_TEST(Tanh)
TORCH_ENUM_PRETTY_PRINT_TEST(ReLU)
TORCH_ENUM_PRETTY_PRINT_TEST(LeakyReLU)
TORCH_ENUM_PRETTY_PRINT_TEST(FanIn)
TORCH_ENUM_PRETTY_PRINT_TEST(FanOut)
TORCH_ENUM_PRETTY_PRINT_TEST(Constant)
TORCH_ENUM_PRETTY_PRINT_TEST(Reflect)
TORCH_ENUM_PRETTY_PRINT_TEST(Replicate)
TORCH_ENUM_PRETTY_PRINT_TEST(Circular)
TORCH_ENUM_PRETTY_PRINT_TEST(Nearest)
TORCH_ENUM_PRETTY_PRINT_TEST(Bilinear)
TORCH_ENUM_PRETTY_PRINT_TEST(Bicubic)
TORCH_ENUM_PRETTY_PRINT_TEST(Trilinear)
TORCH_ENUM_PRETTY_PRINT_TEST(Area)
TORCH_ENUM_PRETTY_PRINT_TEST(Sum)
TORCH_ENUM_PRETTY_PRINT_TEST(Mean)
TORCH_ENUM_PRETTY_PRINT_TEST(Max)
TORCH_ENUM_PRETTY_PRINT_TEST(None)
TORCH_ENUM_PRETTY_PRINT_TEST(BatchMean)
TORCH_ENUM_PRETTY_PRINT_TEST(Zeros)
TORCH_ENUM_PRETTY_PRINT_TEST(Border)
TORCH_ENUM_PRETTY_PRINT_TEST(Reflection)
TORCH_ENUM_PRETTY_PRINT_TEST(RNN_TANH)
TORCH_ENUM_PRETTY_PRINT_TEST(RNN_RELU)
TORCH_ENUM_PRETTY_PRINT_TEST(LSTM)
TORCH_ENUM_PRETTY_PRINT_TEST(GRU)
}