Add include path to Doxygen preprocessing and add some documentation (#11313)

Summary:
1. Add documentation to Linear and improve documentation for RNNs
2. Fix preprocessing in C++ docs by adding correct include path
3. Make myself and ebetica codeowner of docs/cpp to improve development speed

ebetica ezyang soumith
Pull Request resolved: https://github.com/pytorch/pytorch/pull/11313

Differential Revision: D9683615

Pulled By: goldsborough

fbshipit-source-id: 84ea32f9ea6b4060744aabbf5db368776a30f0b5
This commit is contained in:
Peter Goldsborough
2018-09-06 12:11:22 -07:00
committed by Facebook Github Bot
parent f98bd53b01
commit ed8849b640
5 changed files with 93 additions and 14 deletions

View File

@ -4,6 +4,7 @@
/aten/ @apaszke @soumith @colesbury @gchanan @zdevito @ezyang
/torch/ @apaszke @soumith @colesbury @gchanan @zdevito @ezyang
/docs/source @apaszke @soumith @colesbury @gchanan @zdevito @ezyang @ssnl @zou3519
/docs/cpp @goldsborough @ebetica @apaszke @soumith @colesbury @gchanan @zdevito @ezyang
/test @apaszke @soumith @colesbury @gchanan @zdevito @ezyang
/tools @apaszke @soumith @colesbury @gchanan @zdevito @ezyang
/README.md @apaszke @soumith @colesbury @gchanan @zdevito @ezyang

View File

@ -1645,7 +1645,10 @@ SEARCH_INCLUDES = YES
# preprocessor.
# This tag requires that the tag SEARCH_INCLUDES is set to YES.
INCLUDE_PATH =
INCLUDE_PATH = ../../ \
../../torch/csrc/api/include/ \
../../aten/src/ \
../../build/aten/src/
# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
# patterns (like *.h and *.hpp) to filter out the header-files in the

View File

@ -17,6 +17,7 @@ cat original-doxygen-log.txt
# Filter out some warnings.
ignore_warning "warning: no uniquely matching class member found for"
ignore_warning "warning: source ../../build/aten/src/ is not a readable file"
ignore_warning "warning: source ../../build/aten/src/ATen/Tensor.h is not a readable file"
ignore_warning "warning: source ../../build/aten/src/ATen/Functions.h is not a readable file"
@ -24,7 +25,8 @@ ignore_warning "warning: source ../../build/aten/src/ATen/Functions.h is not a r
warnings=$(grep 'warning:' doxygen-log.txt | wc -l)
if [[ $warnings != 0 ]]; then
cat original-doxygen-log.txt
echo "Filtered output"
cat doxygen-log.txt
rm -f doxygen-log.txt original-doxygen-log.txt
exit 1
fi

View File

@ -12,24 +12,41 @@ namespace torch {
namespace nn {
struct LinearOptions {
LinearOptions(int64_t in, int64_t out);
/// The number of input features (columns of the input matrix).
TORCH_ARG(int64_t, in);
/// The number of output features to produce (columns of the output matrix).
TORCH_ARG(int64_t, out);
/// Whether to learn and add a bias after the linear transformation.
TORCH_ARG(bool, with_bias) = true;
};
/// Applies a linear transformation with optional bias.
class LinearImpl : public Cloneable<LinearImpl> {
public:
LinearImpl(int64_t in, int64_t out) : LinearImpl(LinearOptions(in, out)) {}
explicit LinearImpl(LinearOptions options);
void reset() override;
Tensor forward(Tensor);
/// Transforms the `input` tensor by multiplying with the `weight` and
/// optionally adding the `bias`, if `with_bias` is true in the options.
Tensor forward(Tensor input);
/// The options used to configure this module.
LinearOptions options;
/// The learned weight.
Tensor weight;
/// The learned bias. If `with_bias` is false in the `options`, this tensor is
/// undefined.
Tensor bias;
};
/// A `ModuleHolder` subclass for `LinearImpl`.
/// See the documentation for `LinearImpl` class to learn what methods it
/// provides, or the documentation for `ModuleHolder` to learn about PyTorch's
/// module storage semantics.
TORCH_MODULE(Linear);
} // namespace nn

View File

@ -17,29 +17,47 @@
namespace torch {
namespace nn {
/// The output of a single invocation of an RNN module's `forward()` method.
struct RNNOutput {
/// The result of applying the specific RNN algorithm
/// to the input tensor and input state.
Tensor output;
/// The new, updated state that can be fed into the RNN
/// in the next forward step.
Tensor state;
};
namespace detail {
/// Common options for LSTM and GRU modules.
struct RNNOptionsBase {
RNNOptionsBase(int64_t input_size, int64_t hidden_size);
virtual ~RNNOptionsBase() = default;
/// The number of features of a single sample in the input sequence `x`.
TORCH_ARG(int64_t, input_size);
/// The number of features in the hidden state `h`.
TORCH_ARG(int64_t, hidden_size);
/// The number of recurrent layers (cells) to use.
TORCH_ARG(int64_t, layers) = 1;
/// Whether a bias term should be added to all linear operations.
TORCH_ARG(bool, with_bias) = true;
/// If non-zero, adds dropout with the given probability to the output of each
/// RNN layer, except the final layer.
TORCH_ARG(double, dropout) = 0.0;
/// Whether to make the RNN bidirectional.
TORCH_ARG(bool, bidirectional) = false;
/// If true, the input sequence should be provided as `(batch, sequence,
/// features)`. If false (default), the expected layout is `(sequence, batch,
/// features)`.
TORCH_ARG(bool, batch_first) = false;
};
/// Base class for all RNN implementations (intended for code sharing).
template <typename Derived>
class RNNImplBase : public torch::nn::Cloneable<Derived> {
public:
// These must line up with the CUDNN mode codes:
// https://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#cudnnRNNMode_t
/// These must line up with the CUDNN mode codes:
/// https://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#cudnnRNNMode_t
enum class CuDNNMode { RNN_RELU = 0, RNN_TANH = 1, LSTM = 2, GRU = 3 };
explicit RNNImplBase(
@ -121,72 +139,110 @@ class RNNImplBase : public torch::nn::Cloneable<Derived> {
enum class RNNActivation { ReLU, Tanh };
/// Options for RNN modules.
struct RNNOptions {
RNNOptions(int64_t input_size, int64_t hidden_size);
/// Sets the activation after linear operations to `tanh`.
RNNOptions& tanh();
/// Sets the activation after linear operations to `relu`.
RNNOptions& relu();
/// The number of features of a single sample in the input sequence `x`.
TORCH_ARG(int64_t, input_size);
/// The number of features in the hidden state `h`.
TORCH_ARG(int64_t, hidden_size);
/// The number of recurrent layers (cells) to use.
TORCH_ARG(int64_t, layers) = 1;
/// Whether a bias term should be added to all linear operations.
TORCH_ARG(bool, with_bias) = true;
/// If non-zero, adds dropout with the given probability to the output of each
/// RNN layer, except the final layer.
TORCH_ARG(double, dropout) = 0.0;
/// Whether to make the RNN bidirectional.
TORCH_ARG(bool, bidirectional) = false;
/// If true, the input sequence should be provided as `(batch, sequence,
/// features)`. If false (default), the expected layout is `(sequence, batch,
/// features)`.
TORCH_ARG(bool, batch_first) = false;
/// The activation to use after linear operations.
TORCH_ARG(RNNActivation, activation) = RNNActivation::ReLU;
};
/// A multi-layer Elman RNN module with Tanh or ReLU activation.
/// See https://pytorch.org/docs/master/nn.html#torch.nn.RNN for more
/// documenation.
class RNNImpl : public detail::RNNImplBase<RNNImpl> {
public:
RNNImpl(int64_t input_size, int64_t hidden_size)
: RNNImpl(RNNOptions(input_size, hidden_size)) {}
explicit RNNImpl(RNNOptions options);
/// Applies the `RNN` module to an input sequence and input state.
/// The `input` should follow a `(sequence, batch, features)` layout unless
/// `batch_first` is true, in which case the layout should be `(batch,
/// sequence, features)`.
RNNOutput forward(Tensor input, Tensor state = {});
RNNOptions options;
};
/// A multi-layer Elman RNN module with Tanh or ReLU activation.
/// See https://pytorch.org/docs/master/nn.html#torch.nn.RNN for more
/// documenation.
/// A `ModuleHolder` subclass for `RNNImpl`.
/// See the documentation for `RNNImpl` class to learn what methods it provides,
/// or the documentation for `ModuleHolder` to learn about PyTorch's module
/// storage semantics.
TORCH_MODULE(RNN);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LSTM ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
using LSTMOptions = detail::RNNOptionsBase;
/// A multi-layer long-short-term-memory (LSTM) module.
/// See https://pytorch.org/docs/master/nn.html#torch.nn.LSTM for more
/// documenation.
class LSTMImpl : public detail::RNNImplBase<LSTMImpl> {
public:
LSTMImpl(int64_t input_size, int64_t hidden_size)
: LSTMImpl(LSTMOptions(input_size, hidden_size)) {}
explicit LSTMImpl(LSTMOptions options);
/// Applies the `LSTM` module to an input sequence and input state.
/// The `input` should follow a `(sequence, batch, features)` layout unless
/// `batch_first` is true, in which case the layout should be `(batch,
/// sequence, features)`.
RNNOutput forward(Tensor input, Tensor state = {});
};
/// A multi-layer long-short-term-memory (LSTM) module.
/// See https://pytorch.org/docs/master/nn.html#torch.nn.LSTM for more
/// documenation.
/// A `ModuleHolder` subclass for `LSTMImpl`.
/// See the documentation for `LSTMImpl` class to learn what methods it
/// provides, or the documentation for `ModuleHolder` to learn about PyTorch's
/// module storage semantics.
TORCH_MODULE(LSTM);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GRU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
using GRUOptions = detail::RNNOptionsBase;
/// A multi-layer gated recurrent unit (GRU) module.
/// See https://pytorch.org/docs/master/nn.html#torch.nn.GRU for more
/// documenation.
class GRUImpl : public detail::RNNImplBase<GRUImpl> {
public:
GRUImpl(int64_t input_size, int64_t hidden_size)
: GRUImpl(GRUOptions(input_size, hidden_size)) {}
explicit GRUImpl(GRUOptions options);
/// Applies the `GRU` module to an input sequence and input state.
/// The `input` should follow a `(sequence, batch, features)` layout unless
/// `batch_first` is true, in which case the layout should be `(batch,
/// sequence, features)`.
RNNOutput forward(Tensor input, Tensor state = {});
};
/// A multi-layer gated recurrent unit (GRU) module.
/// See https://pytorch.org/docs/master/nn.html#torch.nn.GRU for more
/// documenation.
/// A `ModuleHolder` subclass for `GRUImpl`.
/// See the documentation for `GRUImpl` class to learn what methods it provides,
/// or the documentation for `ModuleHolder` to learn about PyTorch's module
/// storage semantics.
TORCH_MODULE(GRU);
} // namespace nn