mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-21 21:49:24 +08:00
Summary: made _register_activation_post_process_hook, _add_observer, _get_unique_devices_, _get_observer_dict private Test Plan: python test/test_public_bindings.py Reviewers: Subscribers: Tasks: Tags: Differential Revision: [D40709277](https://our.internmc.facebook.com/intern/diff/D40709277) Pull Request resolved: https://github.com/pytorch/pytorch/pull/87521 Approved by: https://github.com/jerryzh168
29 lines
1.4 KiB
Python
29 lines
1.4 KiB
Python
# flake8: noqa: F401
|
|
r"""
|
|
This file is in the process of migration to `torch/ao/quantization`, and
|
|
is kept here for compatibility while the migration process is ongoing.
|
|
If you are adding a new entry/functionality, please, add it to the
|
|
`torch/ao/quantization/quantize.py`, while adding an import statement
|
|
here.
|
|
"""
|
|
|
|
from torch.ao.quantization.quantize import _convert
|
|
from torch.ao.quantization.quantize import _observer_forward_hook
|
|
from torch.ao.quantization.quantize import _propagate_qconfig_helper
|
|
from torch.ao.quantization.quantize import _remove_activation_post_process
|
|
from torch.ao.quantization.quantize import _remove_qconfig
|
|
from torch.ao.quantization.quantize import _add_observer_
|
|
from torch.ao.quantization.quantize import add_quant_dequant
|
|
from torch.ao.quantization.quantize import convert
|
|
from torch.ao.quantization.quantize import _get_observer_dict
|
|
from torch.ao.quantization.quantize import _get_unique_devices_
|
|
from torch.ao.quantization.quantize import is_activation_post_process
|
|
from torch.ao.quantization.quantize import prepare
|
|
from torch.ao.quantization.quantize import prepare_qat
|
|
from torch.ao.quantization.quantize import propagate_qconfig_
|
|
from torch.ao.quantization.quantize import quantize
|
|
from torch.ao.quantization.quantize import quantize_dynamic
|
|
from torch.ao.quantization.quantize import quantize_qat
|
|
from torch.ao.quantization.quantize import _register_activation_post_process_hook
|
|
from torch.ao.quantization.quantize import swap_module
|