mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-21 13:44:15 +08:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/12178 Fisher GAN calls processor_util.add_mlp, which inject the layer norm through the normalizer. We allow to use alternative impl for LayerNorn in the normalizer. Reviewed By: Wakeupbuddy Differential Revision: D9235528 fbshipit-source-id: 88c126c658102926613242ef84a481f6de1676ed
43 lines
1.2 KiB
Python
43 lines
1.2 KiB
Python
# @package optimizer
|
|
# Module caffe2.python.normalizer
|
|
from __future__ import absolute_import, division, print_function, unicode_literals
|
|
|
|
|
|
class Normalizer(object):
|
|
def __init__(self):
|
|
pass
|
|
"""
|
|
Adds normalization to train_net for given parameter. Its factor ahead of
|
|
regularization is given when initialization.
|
|
The param should be a BlobReference.
|
|
"""
|
|
|
|
def __call__(self, net, param):
|
|
return self._run(net, param)
|
|
|
|
def _run(self, net, param):
|
|
raise Exception("Not Impelemented")
|
|
|
|
|
|
class BatchNormalizer(Normalizer):
|
|
def __init__(self, momentum):
|
|
super(BatchNormalizer, self).__init__()
|
|
self._momentum = float(momentum)
|
|
|
|
def _run(self, layer_model, param):
|
|
return layer_model.BatchNormalization(
|
|
param, momentum=self._momentum
|
|
)
|
|
|
|
|
|
class LayerNormalizer(Normalizer):
|
|
def __init__(self, epsilon, use_layer_norm_op=True):
|
|
super(LayerNormalizer, self).__init__()
|
|
self._epsilon = float(epsilon)
|
|
self._use_layer_norm_op = use_layer_norm_op
|
|
|
|
def _run(self, layer_model, param):
|
|
return layer_model.LayerNormalization(
|
|
param, epsilon=self._epsilon, use_layer_norm_op=self._use_layer_norm_op
|
|
)
|