mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-21 05:34:18 +08:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/64919 AO Team is migrating the existing torch.quantization into torch.ao.quantization. We are doing it one file at a time to make sure that the internal callsites are updated properly. This migrates the quantization utilities. ghstack-source-id: 138303325 Test Plan: `buck test mode/dev //caffe2/test:quantization` Reviewed By: jerryzh168 Differential Revision: D30899082 fbshipit-source-id: 85eb38c419e417147e71758b682cd095308dd0c9
30 lines
833 B
Python
30 lines
833 B
Python
# flake8: noqa: F401
|
|
r"""
|
|
Utils shared by different modes of quantization (eager/graph)
|
|
|
|
This file is in the process of migration to `torch/ao/quantization`, and
|
|
is kept here for compatibility while the migration process is ongoing.
|
|
If you are adding a new entry/functionality, please, add it to the
|
|
`torch/ao/quantization/utils.py`, while adding an import statement
|
|
here.
|
|
"""
|
|
|
|
from torch.ao.quantization.utils import (
|
|
activation_dtype,
|
|
activation_is_int8_quantized,
|
|
activation_is_statically_quantized,
|
|
calculate_qmin_qmax,
|
|
check_min_max_valid,
|
|
get_combined_dict,
|
|
get_qconfig_dtypes,
|
|
get_qparam_dict,
|
|
get_quant_type,
|
|
get_swapped_custom_module_class,
|
|
getattr_from_fqn,
|
|
is_per_channel,
|
|
is_per_tensor,
|
|
weight_dtype,
|
|
weight_is_quantized,
|
|
weight_is_statically_quantized,
|
|
)
|