mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/76637 The previous naming convention `default_affine_fixed_qparams_observer` and `default_symmetric_fixed_qparams_observer` were uninformative, and users had to read the definition in order to understand what these observers are. The new naming convention reveals information about the range of the observers The analogous changes were also made for `default_symmetric_fixed_qparams_fake_quant` and `default_affine_fixed_qparams_fake_quant` Test Plan: ``` python test/test_quantization.py ``` ``` python test/test_quantization.py ``` Differential Revision: D36054169 D36054169 Reviewed By: vkuzo Pulled By: dzdang fbshipit-source-id: 215f7786a4b7abda7327f17cc61735697ec5cca9 (cherry picked from commit 21a4e6eda4467c8adca7fd534a506a14e975f9cf)
33 lines
1015 B
Python
33 lines
1015 B
Python
# flake8: noqa: F401
|
|
r"""
|
|
This file is in the process of migration to `torch/ao/quantization`, and
|
|
is kept here for compatibility while the migration process is ongoing.
|
|
If you are adding a new entry/functionality, please, add it to the
|
|
`torch/ao/quantization/fake_quantize.py`, while adding an import statement
|
|
here.
|
|
"""
|
|
|
|
from torch.ao.quantization.fake_quantize import (
|
|
_is_per_channel,
|
|
_is_per_tensor,
|
|
_is_symmetric_quant,
|
|
FakeQuantizeBase,
|
|
FakeQuantize,
|
|
FixedQParamsFakeQuantize,
|
|
FusedMovingAvgObsFakeQuantize,
|
|
default_fake_quant,
|
|
default_weight_fake_quant,
|
|
default_fixed_qparams_range_neg1to1_fake_quant,
|
|
default_fixed_qparams_range_0to1_fake_quant,
|
|
default_per_channel_weight_fake_quant,
|
|
default_histogram_fake_quant,
|
|
default_fused_act_fake_quant,
|
|
default_fused_wt_fake_quant,
|
|
default_fused_per_channel_wt_fake_quant,
|
|
_is_fake_quant_script_module,
|
|
disable_fake_quant,
|
|
enable_fake_quant,
|
|
disable_observer,
|
|
enable_observer,
|
|
)
|