mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-21 05:34:18 +08:00
Concatting multiple linear layers with same input Tensor (different weight/bias) (#63198)
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/63198 Linear layers using the same input tensor can be concatted together as long as the weights and biases are compatible. Test Plan: Imported from OSS Reviewed By: albanD Differential Revision: D31240642 fbshipit-source-id: 1e78daa6b89822412ba2513d326ee0e072ceff1e
This commit is contained in:
committed by
Facebook GitHub Bot
parent
94845fc44e
commit
3bad54069b
@ -24,6 +24,7 @@
|
||||
#include <torch/csrc/jit/passes/erase_number_types.h>
|
||||
#include <torch/csrc/jit/passes/fold_conv_bn.h>
|
||||
#include <torch/csrc/jit/passes/freeze_module.h>
|
||||
#include <torch/csrc/jit/passes/frozen_concat_linear.h>
|
||||
#include <torch/csrc/jit/passes/frozen_conv_add_relu_fusion.h>
|
||||
#include <torch/csrc/jit/passes/frozen_conv_folding.h>
|
||||
#include <torch/csrc/jit/passes/frozen_graph_optimizations.h>
|
||||
@ -352,6 +353,7 @@ void initJITBindings(PyObject* module) {
|
||||
py::arg("preservedAttrs") = std::vector<std::string>(),
|
||||
py::arg("freezeInterfaces") = true,
|
||||
py::arg("preserveParameters") = false)
|
||||
.def("_jit_pass_concat_frozen_linear", &FrozenConcatLinear)
|
||||
.def("_jit_pass_fold_frozen_conv_bn", &FoldFrozenConvBatchnorm)
|
||||
.def("_jit_pass_fold_frozen_conv_add_or_sub", &FoldFrozenConvAddOrSub)
|
||||
.def("_jit_pass_fold_frozen_conv_mul_or_div", &FoldFrozenConvMulOrDiv)
|
||||
|
Reference in New Issue
Block a user