mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
It should be safe to remove the old torch::make_unique functions. Pull Request resolved: https://github.com/pytorch/pytorch/pull/108866 Approved by: https://github.com/albanD
34 lines
1.0 KiB
C++
34 lines
1.0 KiB
C++
#include <c10/util/irange.h>
|
|
#include <torch/csrc/jit/ir/alias_analysis.h>
|
|
#include <torch/csrc/jit/ir/ir_views.h>
|
|
#include <torch/csrc/jit/passes/frozen_concat_linear.h>
|
|
#include <torch/csrc/jit/passes/frozen_conv_folding.h>
|
|
#include <torch/csrc/jit/passes/frozen_graph_optimizations.h>
|
|
#include <torch/csrc/jit/passes/frozen_linear_folding.h>
|
|
#include <torch/csrc/jit/passes/remove_dropout.h>
|
|
#include <torch/csrc/jit/runtime/graph_executor.h>
|
|
|
|
namespace torch {
|
|
namespace jit {
|
|
|
|
void OptimizeFrozenGraph(
|
|
std::shared_ptr<Graph>& graph,
|
|
bool optimize_numerics) {
|
|
removeDropout(graph);
|
|
FrozenConcatLinear(graph);
|
|
// run a couple times to capture Conv -> Mul -> Add etc
|
|
if (optimize_numerics) {
|
|
bool changed = false;
|
|
do {
|
|
changed = false;
|
|
changed |= FoldFrozenConvBatchnorm(graph);
|
|
changed |= FoldFrozenConvAddOrSub(graph);
|
|
changed |= FoldFrozenConvMulOrDiv(graph);
|
|
changed |= FoldFrozenLinearBatchnorm(graph);
|
|
} while (changed);
|
|
}
|
|
}
|
|
|
|
} // namespace jit
|
|
} // namespace torch
|