mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-21 05:34:18 +08:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/64910 This bled through from the original location. Removing it is not just refactoring, but also prevents potential recursive imports. ghstack-source-id: 138112663 Test Plan: `buck test mode/dev //caffe2/test:quantization` Reviewed By: vkuzo Differential Revision: D30882924 fbshipit-source-id: 8652a334a5186c635761ea5e50f978d1f1078c12
29 lines
1.4 KiB
Python
29 lines
1.4 KiB
Python
# flake8: noqa: F401
|
|
r"""
|
|
This file is in the process of migration to `torch/ao/quantization`, and
|
|
is kept here for compatibility while the migration process is ongoing.
|
|
If you are adding a new entry/functionality, please, add it to the
|
|
`torch/ao/quantization/quantize.py`, while adding an import statement
|
|
here.
|
|
"""
|
|
|
|
from torch.ao.quantization.quantize import _convert
|
|
from torch.ao.quantization.quantize import _observer_forward_hook
|
|
from torch.ao.quantization.quantize import _propagate_qconfig_helper
|
|
from torch.ao.quantization.quantize import _remove_activation_post_process
|
|
from torch.ao.quantization.quantize import _remove_qconfig
|
|
from torch.ao.quantization.quantize import add_observer_
|
|
from torch.ao.quantization.quantize import add_quant_dequant
|
|
from torch.ao.quantization.quantize import convert
|
|
from torch.ao.quantization.quantize import get_observer_dict
|
|
from torch.ao.quantization.quantize import get_unique_devices_
|
|
from torch.ao.quantization.quantize import is_activation_post_process
|
|
from torch.ao.quantization.quantize import prepare
|
|
from torch.ao.quantization.quantize import prepare_qat
|
|
from torch.ao.quantization.quantize import propagate_qconfig_
|
|
from torch.ao.quantization.quantize import quantize
|
|
from torch.ao.quantization.quantize import quantize_dynamic
|
|
from torch.ao.quantization.quantize import quantize_qat
|
|
from torch.ao.quantization.quantize import register_activation_post_process_hook
|
|
from torch.ao.quantization.quantize import swap_module
|