mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-21 13:44:15 +08:00
[quant][graphmode] docstrings for top level APIs (#40328)
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/40328 Test Plan: Imported from OSS Differential Revision: D22149708 fbshipit-source-id: 63a1cd229d9e4668fba0ef3977e894cb8984318b
This commit is contained in:
committed by
Facebook GitHub Bot
parent
7a837019a4
commit
59ca1d31ca
@ -20,14 +20,28 @@ def _check_forward_method(model):
|
||||
raise ValueError('input script module does not have forward method')
|
||||
|
||||
def script_qconfig(qconfig):
|
||||
r"""Instantiate the activation and weight observer modules and script
|
||||
them, these observer module instances will be deepcopied during
|
||||
prepare_jit step.
|
||||
"""
|
||||
return QConfig(
|
||||
activation=torch.jit.script(qconfig.activation())._c,
|
||||
weight=torch.jit.script(qconfig.weight())._c)
|
||||
|
||||
def script_qconfig_dict(qconfig_dict):
|
||||
r"""Helper function used by `prepare_jit`.
|
||||
Apply `script_qconfig` for all entries in `qconfig_dict` that is
|
||||
not None.
|
||||
"""
|
||||
return {k: script_qconfig(v) if v else None for k, v in qconfig_dict.items()}
|
||||
|
||||
def fuse_conv_bn_jit(model):
|
||||
r""" Fuse conv - bn module
|
||||
Works for eval model only.
|
||||
|
||||
Args:
|
||||
model: TorchScript model from scripting or tracing
|
||||
"""
|
||||
return torch.jit._recursive.wrap_cpp_module(torch._C._jit_pass_fold_convbn(model._c))
|
||||
|
||||
def _prepare_jit(model, qconfig_dict, inplace=False, quant_type=QuantType.STATIC):
|
||||
@ -87,7 +101,93 @@ def _quantize_jit(model, qconfig_dict, run_fn=None, run_args=None, inplace=False
|
||||
return model
|
||||
|
||||
def quantize_jit(model, qconfig_dict, run_fn, run_args, inplace=False, debug=False):
|
||||
r"""Quantize the input float TorchScript model with
|
||||
post training static quantization.
|
||||
|
||||
First it will prepare the model for calibration, then it calls
|
||||
`run_fn` which will run the calibration step, after that we will
|
||||
convert the model to a quantized model.
|
||||
|
||||
Args:
|
||||
`model`: input float TorchScript model
|
||||
`qconfig_dict`: qconfig_dict is a dictionary with names of sub modules as key and
|
||||
qconfig for that module as value, empty key means the qconfig will be applied
|
||||
to whole model unless it’s overwritten by more specific configurations, the
|
||||
qconfig for each module is either found in the dictionary or fallback to
|
||||
the qconfig of parent module.
|
||||
|
||||
Right now qconfig_dict is the only way to configure how the model is quantized,
|
||||
and it is done in the granularity of module, that is, we only support one type
|
||||
of qconfig for each torch.nn.Module, and the qconfig for sub module will
|
||||
override the qconfig for parent module, empty string means global configuration.
|
||||
`run_fn`: a calibration function for calibrating the prepared model
|
||||
`run_args`: positional arguments for `run_fn`
|
||||
`inplace`: carry out model transformations in-place, the original module is
|
||||
mutated
|
||||
`debug`: flag for producing a debug friendly model (preserve weight attribute)
|
||||
|
||||
Return:
|
||||
Quantized TorchSciprt model.
|
||||
|
||||
Example:
|
||||
```python
|
||||
import torch
|
||||
from torch.quantization import get_default_qconfig
|
||||
from torch.quantization import quantize_jit
|
||||
|
||||
ts_model = torch.jit.script(float_model.eval()) # or torch.jit.trace(float_model, input)
|
||||
qconfig = get_default_qconfig('fbgemm')
|
||||
def calibrate(model, data_loader):
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
for image, target in data_loader:
|
||||
model(image)
|
||||
|
||||
quantized_model = quantize_jit(
|
||||
ts_model,
|
||||
{'': qconfig},
|
||||
calibrate,
|
||||
[data_loader_test])
|
||||
```
|
||||
"""
|
||||
return _quantize_jit(model, qconfig_dict, run_fn, run_args, inplace, debug, quant_type=QuantType.STATIC)
|
||||
|
||||
def quantize_dynamic_jit(model, qconfig_dict, inplace=False, debug=False):
|
||||
r"""Quantize the input float TorchScript model with
|
||||
post training dynamic quantization.
|
||||
Currently only qint8 quantization of torch.nn.Linear is supported.
|
||||
|
||||
Args:
|
||||
`model`: input float TorchScript model
|
||||
`qconfig_dict`: qconfig_dict is a dictionary with names of sub modules as key and
|
||||
qconfig for that module as value, please see detailed
|
||||
descriptions in :func:`~torch.quantization.quantize_jit`
|
||||
`inplace`: carry out model transformations in-place, the original module is
|
||||
mutated
|
||||
`debug`: flag for producing a debug friendly model (preserve weight attribute)
|
||||
|
||||
Return:
|
||||
Quantized TorchSciprt model.
|
||||
|
||||
Example:
|
||||
```python
|
||||
import torch
|
||||
from torch.quantization import per_channel_dynamic_qconfig
|
||||
from torch.quantization import quantize_dynmiac_jit
|
||||
|
||||
ts_model = torch.jit.script(float_model.eval()) # or torch.jit.trace(float_model, input)
|
||||
qconfig = get_default_qconfig('fbgemm')
|
||||
def calibrate(model, data_loader):
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
for image, target in data_loader:
|
||||
model(image)
|
||||
|
||||
quantized_model = quantize_dynamic_jit(
|
||||
ts_model,
|
||||
{'': qconfig},
|
||||
calibrate,
|
||||
[data_loader_test])
|
||||
```
|
||||
"""
|
||||
return _quantize_jit(model, qconfig_dict, inplace=inplace, debug=debug, quant_type=QuantType.DYNAMIC)
|
||||
|
Reference in New Issue
Block a user