[BE] document some quantization public apis (#165160)

This PR documents some apis in `torch.ao.quantization.utils`

<img width="885" height="296" alt="Screenshot 2025-10-10 at 4 38 10 PM" src="https://github.com/user-attachments/assets/4323a6f5-ac3a-4f2e-ba00-35f3b208bef4" />
<img width="876" height="319" alt="Screenshot 2025-10-10 at 4 38 14 PM" src="https://github.com/user-attachments/assets/164822c3-9740-46f9-953d-bb20c77bcf69" />

Pull Request resolved: https://github.com/pytorch/pytorch/pull/165160
Approved by: https://github.com/janeyx99
This commit is contained in:
Angel Li
2025-10-13 17:24:40 +00:00
committed by PyTorch MergeBot
parent 2c600bb665
commit 70ec464c16
3 changed files with 21 additions and 22 deletions

View File

@ -217,9 +217,7 @@ coverage_ignore_functions = [
"is_available",
# torch.distributed.checkpoint.state_dict
"gc_context",
"state_dict",
# torch.distributed.elastic.events
"construct_and_record_rdzv_event",
"record_rdzv_event",
# torch.distributed.elastic.metrics
"initialize_metrics",
@ -430,7 +428,6 @@ coverage_ignore_functions = [
"get_default_qconfig_dict",
"qconfig_equals",
# torch.ao.quantization.quantization_mappings
"get_default_compare_output_module_list",
"get_default_dynamic_quant_module_mappings",
"get_default_dynamic_sparse_quant_module_mappings",
"get_default_float_to_quantized_operator_mappings",
@ -473,29 +470,13 @@ coverage_ignore_functions = [
"get_weight_qspec",
"propagate_annotation",
"register_annotator",
# torch.ao.quantization.utils
"activation_dtype",
"activation_is_dynamically_quantized",
"activation_is_int32_quantized",
"activation_is_int8_quantized",
"activation_is_statically_quantized",
"calculate_qmin_qmax",
"check_min_max_valid",
"check_node",
"determine_qparams",
"get_combined_dict",
"get_fqn_to_example_inputs",
"get_qconfig_dtypes",
"get_qparam_dict",
"get_quant_type",
"get_swapped_custom_module_class",
"getattr_from_fqn",
"has_no_children_ignoring_parametrizations",
"is_per_channel",
"is_per_tensor",
"op_is_int8_dynamically_quantized",
"to_underlying_dtype",
"validate_qmin_qmax",
"weight_dtype",
"weight_is_quantized",
"weight_is_statically_quantized",

View File

@ -52,6 +52,26 @@ This module contains Eager mode quantization APIs.
default_eval_fn
```
## torch.ao.quantization.utils
```{eval-rst}
.. automodule:: torch.ao.quantization.utils
.. autosummary::
:toctree: generated
:nosignatures:
:template: classtemplate.rst
activation_is_dynamically_quantized
activation_is_int32_quantized
activation_is_int8_quantized
activation_is_statically_quantized
determine_qparams
check_min_max_valid
calculate_qmin_qmax
validate_qmin_qmax
```
## torch.ao.quantization.quantize_fx
This module contains FX graph mode quantization APIs (prototype).
@ -150,7 +170,7 @@ This module contains a few CustomConfig classes that's used in both eager mode a
## torch.ao.quantization.pt2e.export_utils
```{eval-rst}
.. currentmodule:: torch.ao.quantization.pt2e.export_utils
.. automodule:: torch.ao.quantization.pt2e.export_utils
```
```{eval-rst}

View File

@ -134,7 +134,6 @@ and supported quantized modules and functions.
.. py:module:: torch.ao.quantization.fx.utils
.. py:module:: torch.ao.quantization.observer
.. py:module:: torch.ao.quantization.pt2e.duplicate_dq_pass
.. py:module:: torch.ao.quantization.pt2e.export_utils
.. py:module:: torch.ao.quantization.pt2e.graph_utils
.. py:module:: torch.ao.quantization.pt2e.port_metadata_pass
.. py:module:: torch.ao.quantization.pt2e.prepare
@ -158,7 +157,6 @@ and supported quantized modules and functions.
.. py:module:: torch.ao.quantization.quantizer.xnnpack_quantizer
.. py:module:: torch.ao.quantization.quantizer.xnnpack_quantizer_utils
.. py:module:: torch.ao.quantization.stubs
.. py:module:: torch.ao.quantization.utils
.. py:module:: torch.nn.intrinsic.modules.fused
.. py:module:: torch.nn.intrinsic.qat.modules.conv_fused
.. py:module:: torch.nn.intrinsic.qat.modules.linear_fused