Apply UFMT to low traffic torch modules (#106249)

Signed-off-by: Edward Z. Yang <ezyang@meta.com>

Pull Request resolved: https://github.com/pytorch/pytorch/pull/106249
Approved by: https://github.com/Skylion007
This commit is contained in:
Edward Z. Yang
2023-07-29 10:51:26 -04:00
committed by PyTorch MergeBot
parent a4ebc61f15
commit 3bf922a6ce
163 changed files with 8472 additions and 4412 deletions

View File

@ -6,10 +6,12 @@ from .fuse_modules import fuse_modules
from .stubs import * # noqa: F403
from .quant_type import * # noqa: F403
from .quantize_jit import * # noqa: F403
# from .quantize_fx import *
from .quantization_mappings import * # noqa: F403
from .fuser_method_mappings import * # noqa: F403
def default_eval_fn(model, calib_data):
r"""
Default evaluation function takes a torch.utils.data.Dataset or a list of
@ -18,45 +20,68 @@ def default_eval_fn(model, calib_data):
for data, target in calib_data:
model(data)
__all__ = [
'QuantWrapper', 'QuantStub', 'DeQuantStub',
"QuantWrapper",
"QuantStub",
"DeQuantStub",
# Top level API for eager mode quantization
'quantize', 'quantize_dynamic', 'quantize_qat',
'prepare', 'convert', 'prepare_qat',
"quantize",
"quantize_dynamic",
"quantize_qat",
"prepare",
"convert",
"prepare_qat",
# Top level API for graph mode quantization on TorchScript
'quantize_jit', 'quantize_dynamic_jit', '_prepare_ondevice_dynamic_jit',
'_convert_ondevice_dynamic_jit', '_quantize_ondevice_dynamic_jit',
"quantize_jit",
"quantize_dynamic_jit",
"_prepare_ondevice_dynamic_jit",
"_convert_ondevice_dynamic_jit",
"_quantize_ondevice_dynamic_jit",
# Top level API for graph mode quantization on GraphModule(torch.fx)
# 'fuse_fx', 'quantize_fx', # TODO: add quantize_dynamic_fx
# 'prepare_fx', 'prepare_dynamic_fx', 'convert_fx',
'QuantType', # quantization type
"QuantType", # quantization type
# custom module APIs
'get_default_static_quant_module_mappings', 'get_static_quant_module_class',
'get_default_dynamic_quant_module_mappings',
'get_default_qat_module_mappings',
'get_default_qconfig_propagation_list',
'get_default_compare_output_module_list',
'get_quantized_operator',
'get_fuser_method',
"get_default_static_quant_module_mappings",
"get_static_quant_module_class",
"get_default_dynamic_quant_module_mappings",
"get_default_qat_module_mappings",
"get_default_qconfig_propagation_list",
"get_default_compare_output_module_list",
"get_quantized_operator",
"get_fuser_method",
# Sub functions for `prepare` and `swap_module`
'propagate_qconfig_', 'add_quant_dequant', 'swap_module',
'default_eval_fn',
"propagate_qconfig_",
"add_quant_dequant",
"swap_module",
"default_eval_fn",
# Observers
'ObserverBase', 'WeightObserver', 'HistogramObserver',
'observer', 'default_observer',
'default_weight_observer', 'default_placeholder_observer',
'default_per_channel_weight_observer',
"ObserverBase",
"WeightObserver",
"HistogramObserver",
"observer",
"default_observer",
"default_weight_observer",
"default_placeholder_observer",
"default_per_channel_weight_observer",
# FakeQuantize (for qat)
'default_fake_quant', 'default_weight_fake_quant',
'default_fixed_qparams_range_neg1to1_fake_quant',
'default_fixed_qparams_range_0to1_fake_quant',
'default_per_channel_weight_fake_quant',
'default_histogram_fake_quant',
"default_fake_quant",
"default_weight_fake_quant",
"default_fixed_qparams_range_neg1to1_fake_quant",
"default_fixed_qparams_range_0to1_fake_quant",
"default_per_channel_weight_fake_quant",
"default_histogram_fake_quant",
# QConfig
'QConfig', 'default_qconfig', 'default_dynamic_qconfig', 'float16_dynamic_qconfig',
'float_qparams_weight_only_qconfig',
"QConfig",
"default_qconfig",
"default_dynamic_qconfig",
"float16_dynamic_qconfig",
"float_qparams_weight_only_qconfig",
# QAT utilities
'default_qat_qconfig', 'prepare_qat', 'quantize_qat',
"default_qat_qconfig",
"prepare_qat",
"quantize_qat",
# module transformations
'fuse_modules',
"fuse_modules",
]

View File

@ -8,21 +8,21 @@ here.
"""
from torch.ao.ns._numeric_suite import (
NON_LEAF_MODULE_TO_ADD_OBSERVER_ALLOW_LIST,
_find_match,
compare_weights,
_get_logger_dict_helper,
get_logger_dict,
Logger,
ShadowLogger,
OutputLogger,
_convert_tuple_to_list,
_dequantize_tensor_list,
Shadow,
prepare_model_with_stubs,
_find_match,
_get_logger_dict_helper,
_is_identical_module_type,
compare_model_stub,
get_matching_activations,
prepare_model_outputs,
compare_model_outputs,
compare_model_stub,
compare_weights,
get_logger_dict,
get_matching_activations,
Logger,
NON_LEAF_MODULE_TO_ADD_OBSERVER_ALLOW_LIST,
OutputLogger,
prepare_model_outputs,
prepare_model_with_stubs,
Shadow,
ShadowLogger,
)

View File

@ -8,19 +8,19 @@ here.
"""
from torch.ao.ns._numeric_suite_fx import (
RNNReturnType,
OutputLogger,
NSTracer,
_extract_weights_one_model,
_extract_weights_impl,
extract_weights,
_add_loggers_one_model,
_add_loggers_impl,
add_loggers,
_extract_logger_info_one_model,
extract_logger_info,
_add_loggers_one_model,
_add_shadow_loggers_impl,
_extract_logger_info_one_model,
_extract_weights_impl,
_extract_weights_one_model,
add_loggers,
add_shadow_loggers,
extract_shadow_logger_info,
extend_logger_results_with_comparison,
extract_logger_info,
extract_shadow_logger_info,
extract_weights,
NSTracer,
OutputLogger,
RNNReturnType,
)

View File

@ -8,25 +8,25 @@ here.
"""
from torch.ao.quantization.fake_quantize import (
_is_fake_quant_script_module,
_is_per_channel,
_is_per_tensor,
_is_symmetric_quant,
FakeQuantizeBase,
default_fake_quant,
default_fixed_qparams_range_0to1_fake_quant,
default_fixed_qparams_range_neg1to1_fake_quant,
default_fused_act_fake_quant,
default_fused_per_channel_wt_fake_quant,
default_fused_wt_fake_quant,
default_histogram_fake_quant,
default_per_channel_weight_fake_quant,
default_weight_fake_quant,
disable_fake_quant,
disable_observer,
enable_fake_quant,
enable_observer,
FakeQuantize,
FakeQuantizeBase,
FixedQParamsFakeQuantize,
FusedMovingAvgObsFakeQuantize,
default_fake_quant,
default_weight_fake_quant,
default_fixed_qparams_range_neg1to1_fake_quant,
default_fixed_qparams_range_0to1_fake_quant,
default_per_channel_weight_fake_quant,
default_histogram_fake_quant,
default_fused_act_fake_quant,
default_fused_wt_fake_quant,
default_fused_per_channel_wt_fake_quant,
_is_fake_quant_script_module,
disable_fake_quant,
enable_fake_quant,
disable_observer,
enable_observer,
)

View File

@ -7,18 +7,16 @@ If you are adding a new entry/functionality, please, add it to the
here.
"""
from torch.ao.quantization.fuse_modules import fuse_modules
from torch.ao.quantization.fuse_modules import fuse_known_modules
from torch.ao.quantization.fuse_modules import get_fuser_method
# for backward compatiblity
from torch.ao.quantization.fuser_method_mappings import fuse_conv_bn
from torch.ao.quantization.fuser_method_mappings import fuse_conv_bn_relu
# TODO: These functions are not used outside the `fuse_modules.py`
# Keeping here for now, need to remove them later.
from torch.ao.quantization.fuse_modules import (
_fuse_modules,
_get_module,
_set_module,
fuse_known_modules,
fuse_modules,
get_fuser_method,
)
# for backward compatiblity
from torch.ao.quantization.fuser_method_mappings import fuse_conv_bn, fuse_conv_bn_relu

View File

@ -7,9 +7,9 @@ If you are adding a new entry/functionality, please, add it to the
here.
"""
from torch.ao.quantization.fuser_method_mappings import (
_DEFAULT_OP_LIST_TO_FUSER_METHOD,
fuse_conv_bn,
fuse_conv_bn_relu,
fuse_linear_bn,
_DEFAULT_OP_LIST_TO_FUSER_METHOD,
get_fuser_method,
)

View File

@ -7,8 +7,9 @@ appropriate files under `torch/ao/quantization/fx/`, while adding an import stat
here.
"""
from torch.ao.quantization.fx.convert import convert
from torch.ao.quantization.fx.fuse import fuse
# omitting files that's unlikely to be used right now, for example
# the newly added lower_to_fbgemm etc.
from torch.ao.quantization.fx.prepare import prepare
from torch.ao.quantization.fx.convert import convert
from torch.ao.quantization.fx.fuse import fuse

View File

@ -7,32 +7,32 @@ appropriate files under `torch/ao/quantization/fx/`, while adding an import stat
here.
"""
from torch.ao.quantization.fx._equalize import (
reshape_scale,
_convert_equalization_ref,
_InputEqualizationObserver,
_WeightEqualizationObserver,
calculate_equalization_scale,
EqualizationQConfig,
input_equalization_observer,
weight_equalization_observer,
default_equalization_qconfig,
fused_module_supports_equalization,
nn_module_supports_equalization,
custom_module_supports_equalization,
node_supports_equalization,
is_equalization_observer,
get_op_node_and_weight_eq_obs,
maybe_get_weight_eq_obs_node,
maybe_get_next_input_eq_obs,
maybe_get_next_equalization_scale,
scale_input_observer,
scale_weight_node,
scale_weight_functional,
clear_weight_quant_obs_node,
remove_node,
update_obs_for_equalization,
convert_eq_obs,
_convert_equalization_ref,
get_layer_sqnr_dict,
get_equalization_qconfig_dict,
CUSTOM_MODULE_SUPP_LIST,
custom_module_supports_equalization,
default_equalization_qconfig,
EqualizationQConfig,
fused_module_supports_equalization,
get_equalization_qconfig_dict,
get_layer_sqnr_dict,
get_op_node_and_weight_eq_obs,
input_equalization_observer,
is_equalization_observer,
maybe_get_next_equalization_scale,
maybe_get_next_input_eq_obs,
maybe_get_weight_eq_obs_node,
nn_module_supports_equalization,
node_supports_equalization,
remove_node,
reshape_scale,
scale_input_observer,
scale_weight_functional,
scale_weight_node,
update_obs_for_equalization,
weight_equalization_observer,
)

View File

@ -6,7 +6,4 @@ If you are adding a new entry/functionality, please, add it to the
appropriate files under `torch/ao/quantization/fx/`, while adding an import statement
here.
"""
from torch.ao.quantization.fx.fuse_handler import (
FuseHandler,
DefaultFuseHandler,
)
from torch.ao.quantization.fx.fuse_handler import DefaultFuseHandler, FuseHandler

View File

@ -7,11 +7,11 @@ appropriate files under `torch/ao/quantization/fx/`, while adding an import stat
here.
"""
from torch.ao.quantization.fx.graph_module import (
GraphModule,
FusedGraphModule,
ObservedGraphModule,
_is_observed_module,
ObservedStandaloneGraphModule,
_is_observed_standalone_module,
QuantizedGraphModule
FusedGraphModule,
GraphModule,
ObservedGraphModule,
ObservedStandaloneGraphModule,
QuantizedGraphModule,
)

View File

@ -7,8 +7,8 @@ appropriate files under `torch/ao/quantization/fx/`, while adding an import stat
here.
"""
from torch.ao.quantization.fx.match_utils import (
_find_matches,
_is_match,
_MatchResult,
MatchAllNode,
_is_match,
_find_matches
)

View File

@ -7,12 +7,12 @@ appropriate files under `torch/ao/quantization/fx/`, while adding an import stat
here.
"""
from torch.ao.quantization.fx.pattern_utils import (
QuantizeHandler,
_register_fusion_pattern,
get_default_fusion_patterns,
_register_quant_pattern,
get_default_fusion_patterns,
get_default_output_activation_post_process_map,
get_default_quant_patterns,
get_default_output_activation_post_process_map
QuantizeHandler,
)
# QuantizeHandler.__module__ = _NAMESPACE
@ -20,7 +20,9 @@ _register_fusion_pattern.__module__ = "torch.ao.quantization.fx.pattern_utils"
get_default_fusion_patterns.__module__ = "torch.ao.quantization.fx.pattern_utils"
_register_quant_pattern.__module__ = "torch.ao.quantization.fx.pattern_utils"
get_default_quant_patterns.__module__ = "torch.ao.quantization.fx.pattern_utils"
get_default_output_activation_post_process_map.__module__ = "torch.ao.quantization.fx.pattern_utils"
get_default_output_activation_post_process_map.__module__ = (
"torch.ao.quantization.fx.pattern_utils"
)
# __all__ = [
# "QuantizeHandler",

View File

@ -6,6 +6,4 @@ If you are adding a new entry/functionality, please, add it to the
appropriate files under `torch/ao/quantization/fx/`, while adding an import statement
here.
"""
from torch.ao.quantization.fx.prepare import (
prepare
)
from torch.ao.quantization.fx.prepare import prepare

View File

@ -7,20 +7,20 @@ appropriate files under `torch/ao/quantization/fx/`, while adding an import stat
here.
"""
from torch.ao.quantization.fx.quantize_handler import (
QuantizeHandler,
BatchNormQuantizeHandler,
BinaryOpQuantizeHandler,
CatQuantizeHandler,
ConvReluQuantizeHandler,
LinearReLUQuantizeHandler,
BatchNormQuantizeHandler,
EmbeddingQuantizeHandler,
RNNDynamicQuantizeHandler,
DefaultNodeQuantizeHandler,
FixedQParamsOpQuantizeHandler,
CopyNodeQuantizeHandler,
CustomModuleQuantizeHandler,
DefaultNodeQuantizeHandler,
EmbeddingQuantizeHandler,
FixedQParamsOpQuantizeHandler,
GeneralTensorShapeOpQuantizeHandler,
StandaloneModuleQuantizeHandler
LinearReLUQuantizeHandler,
QuantizeHandler,
RNNDynamicQuantizeHandler,
StandaloneModuleQuantizeHandler,
)
QuantizeHandler.__module__ = "torch.ao.quantization.fx.quantization_patterns"
@ -32,8 +32,16 @@ BatchNormQuantizeHandler.__module__ = "torch.ao.quantization.fx.quantization_pat
EmbeddingQuantizeHandler.__module__ = "torch.ao.quantization.fx.quantization_patterns"
RNNDynamicQuantizeHandler.__module__ = "torch.ao.quantization.fx.quantization_patterns"
DefaultNodeQuantizeHandler.__module__ = "torch.ao.quantization.fx.quantization_patterns"
FixedQParamsOpQuantizeHandler.__module__ = "torch.ao.quantization.fx.quantization_patterns"
FixedQParamsOpQuantizeHandler.__module__ = (
"torch.ao.quantization.fx.quantization_patterns"
)
CopyNodeQuantizeHandler.__module__ = "torch.ao.quantization.fx.quantization_patterns"
CustomModuleQuantizeHandler.__module__ = "torch.ao.quantization.fx.quantization_patterns"
GeneralTensorShapeOpQuantizeHandler.__module__ = "torch.ao.quantization.fx.quantization_patterns"
StandaloneModuleQuantizeHandler.__module__ = "torch.ao.quantization.fx.quantization_patterns"
CustomModuleQuantizeHandler.__module__ = (
"torch.ao.quantization.fx.quantization_patterns"
)
GeneralTensorShapeOpQuantizeHandler.__module__ = (
"torch.ao.quantization.fx.quantization_patterns"
)
StandaloneModuleQuantizeHandler.__module__ = (
"torch.ao.quantization.fx.quantization_patterns"
)

View File

@ -6,7 +6,4 @@ If you are adding a new entry/functionality, please, add it to the
appropriate files under `torch/ao/quantization/fx/`, while adding an import statement
here.
"""
from torch.ao.quantization.utils import (
Pattern,
QuantizerCls
)
from torch.ao.quantization.utils import Pattern, QuantizerCls

View File

@ -7,14 +7,14 @@ appropriate files under `torch/ao/quantization/fx/`, while adding an import stat
here.
"""
from torch.ao.quantization.fx.utils import (
get_custom_module_class_keys,
get_linear_prepack_op_for_dtype,
get_qconv_prepack_op,
get_new_attr_name_with_prefix,
graph_module_from_producer_nodes,
all_node_args_have_no_tensors,
assert_and_get_unique_device,
create_getattr_from_value,
all_node_args_have_no_tensors,
get_custom_module_class_keys,
get_linear_prepack_op_for_dtype,
get_new_attr_name_with_prefix,
get_non_observable_arg_indexes_and_types,
maybe_get_next_module
get_qconv_prepack_op,
graph_module_from_producer_nodes,
maybe_get_next_module,
)

View File

@ -7,30 +7,30 @@ If you are adding a new entry/functionality, please, add it to the
here.
"""
from torch.ao.quantization.observer import (
_is_activation_post_process,
_is_per_channel_script_obs_instance,
_ObserverBase,
_PartialWrapper,
_with_args,
_with_callable_args,
ABC,
ObserverBase,
_ObserverBase,
MinMaxObserver,
MovingAverageMinMaxObserver,
PerChannelMinMaxObserver,
MovingAveragePerChannelMinMaxObserver,
HistogramObserver,
PlaceholderObserver,
RecordingObserver,
NoopObserver,
_is_activation_post_process,
_is_per_channel_script_obs_instance,
get_observer_state_dict,
load_observer_state_dict,
default_observer,
default_placeholder_observer,
default_debug_observer,
default_weight_observer,
default_histogram_observer,
default_per_channel_weight_observer,
default_dynamic_quant_observer,
default_float_qparams_observer,
default_histogram_observer,
default_observer,
default_per_channel_weight_observer,
default_placeholder_observer,
default_weight_observer,
get_observer_state_dict,
HistogramObserver,
load_observer_state_dict,
MinMaxObserver,
MovingAverageMinMaxObserver,
MovingAveragePerChannelMinMaxObserver,
NoopObserver,
ObserverBase,
PerChannelMinMaxObserver,
PlaceholderObserver,
RecordingObserver,
)

View File

@ -7,24 +7,24 @@ If you are adding a new entry/functionality, please, add it to the
here.
"""
from torch.ao.quantization.qconfig import (
QConfig,
default_qconfig,
_add_module_to_qconfig_obs_ctr,
_assert_valid_qconfig,
default_activation_only_qconfig,
default_debug_qconfig,
default_per_channel_qconfig,
QConfigDynamic,
default_dynamic_qconfig,
default_per_channel_qconfig,
default_qat_qconfig,
default_qat_qconfig_v2,
default_qconfig,
default_weight_only_qconfig,
float16_dynamic_qconfig,
float16_static_qconfig,
per_channel_dynamic_qconfig,
float_qparams_weight_only_qconfig,
default_qat_qconfig,
default_weight_only_qconfig,
default_activation_only_qconfig,
default_qat_qconfig_v2,
get_default_qconfig,
get_default_qat_qconfig,
_assert_valid_qconfig,
get_default_qconfig,
per_channel_dynamic_qconfig,
QConfig,
qconfig_equals,
QConfigAny,
_add_module_to_qconfig_obs_ctr,
qconfig_equals
QConfigDynamic,
)

View File

@ -7,5 +7,4 @@ If you are adding a new entry/functionality, please, add it to the
here.
"""
from torch.ao.quantization.quant_type import QuantType
from torch.ao.quantization.quant_type import _get_quant_type_to_str
from torch.ao.quantization.quant_type import _get_quant_type_to_str, QuantType

View File

@ -7,23 +7,23 @@ If you are adding a new entry/functionality, please, add it to the
here.
"""
from torch.ao.quantization.quantization_mappings import (
DEFAULT_REFERENCE_STATIC_QUANT_MODULE_MAPPINGS,
DEFAULT_STATIC_QUANT_MODULE_MAPPINGS,
DEFAULT_QAT_MODULE_MAPPINGS,
DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS,
_INCLUDE_QCONFIG_PROPAGATE_LIST,
DEFAULT_FLOAT_TO_QUANTIZED_OPERATOR_MAPPINGS,
DEFAULT_MODULE_TO_ACT_POST_PROCESS,
no_observer_set,
get_default_static_quant_module_mappings,
get_static_quant_module_class,
get_dynamic_quant_module_class,
get_default_qat_module_mappings,
get_default_dynamic_quant_module_mappings,
get_default_qconfig_propagation_list,
get_default_compare_output_module_list,
get_default_float_to_quantized_operator_mappings,
get_quantized_operator,
_get_special_act_post_process,
_has_special_act_post_process,
_INCLUDE_QCONFIG_PROPAGATE_LIST,
DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS,
DEFAULT_FLOAT_TO_QUANTIZED_OPERATOR_MAPPINGS,
DEFAULT_MODULE_TO_ACT_POST_PROCESS,
DEFAULT_QAT_MODULE_MAPPINGS,
DEFAULT_REFERENCE_STATIC_QUANT_MODULE_MAPPINGS,
DEFAULT_STATIC_QUANT_MODULE_MAPPINGS,
get_default_compare_output_module_list,
get_default_dynamic_quant_module_mappings,
get_default_float_to_quantized_operator_mappings,
get_default_qat_module_mappings,
get_default_qconfig_propagation_list,
get_default_static_quant_module_mappings,
get_dynamic_quant_module_class,
get_quantized_operator,
get_static_quant_module_class,
no_observer_set,
)

View File

@ -7,22 +7,24 @@ If you are adding a new entry/functionality, please, add it to the
here.
"""
from torch.ao.quantization.quantize import _convert
from torch.ao.quantization.quantize import _observer_forward_hook
from torch.ao.quantization.quantize import _propagate_qconfig_helper
from torch.ao.quantization.quantize import _remove_activation_post_process
from torch.ao.quantization.quantize import _remove_qconfig
from torch.ao.quantization.quantize import _add_observer_
from torch.ao.quantization.quantize import add_quant_dequant
from torch.ao.quantization.quantize import convert
from torch.ao.quantization.quantize import _get_observer_dict
from torch.ao.quantization.quantize import _get_unique_devices_
from torch.ao.quantization.quantize import _is_activation_post_process
from torch.ao.quantization.quantize import prepare
from torch.ao.quantization.quantize import prepare_qat
from torch.ao.quantization.quantize import propagate_qconfig_
from torch.ao.quantization.quantize import quantize
from torch.ao.quantization.quantize import quantize_dynamic
from torch.ao.quantization.quantize import quantize_qat
from torch.ao.quantization.quantize import _register_activation_post_process_hook
from torch.ao.quantization.quantize import swap_module
from torch.ao.quantization.quantize import (
_add_observer_,
_convert,
_get_observer_dict,
_get_unique_devices_,
_is_activation_post_process,
_observer_forward_hook,
_propagate_qconfig_helper,
_register_activation_post_process_hook,
_remove_activation_post_process,
_remove_qconfig,
add_quant_dequant,
convert,
prepare,
prepare_qat,
propagate_qconfig_,
quantize,
quantize_dynamic,
quantize_qat,
swap_module,
)

View File

@ -7,23 +7,20 @@ If you are adding a new entry/functionality, please, add it to the
here.
"""
from torch.ao.quantization.fx.graph_module import ObservedGraphModule
from torch.ao.quantization.quantize_fx import (
_check_is_graph_module,
_swap_ff_with_fxff,
_convert_fx,
_convert_standalone_module_fx,
_fuse_fx,
Scope,
ScopeContextManager,
QuantizationTracer,
_prepare_fx,
_prepare_standalone_module_fx,
_swap_ff_with_fxff,
convert_fx,
fuse_fx,
prepare_fx,
prepare_qat_fx,
_convert_fx,
convert_fx,
_convert_standalone_module_fx,
)
from torch.ao.quantization.fx.graph_module import (
ObservedGraphModule,
QuantizationTracer,
Scope,
ScopeContextManager,
)

View File

@ -8,19 +8,19 @@ here.
"""
from torch.ao.quantization.quantize_jit import (
_check_is_script_module,
_check_forward_method,
_check_is_script_module,
_convert_jit,
_prepare_jit,
_prepare_ondevice_dynamic_jit,
_quantize_jit,
convert_dynamic_jit,
convert_jit,
fuse_conv_bn_jit,
prepare_dynamic_jit,
prepare_jit,
quantize_dynamic_jit,
quantize_jit,
script_qconfig,
script_qconfig_dict,
fuse_conv_bn_jit,
_prepare_jit,
prepare_jit,
prepare_dynamic_jit,
_prepare_ondevice_dynamic_jit,
_convert_jit,
convert_jit,
convert_dynamic_jit,
_quantize_jit,
quantize_jit,
quantize_dynamic_jit
)

View File

@ -7,8 +7,4 @@ If you are adding a new entry/functionality, please, add it to the
here.
"""
from torch.ao.quantization.stubs import (
QuantStub,
DeQuantStub,
QuantWrapper
)
from torch.ao.quantization.stubs import DeQuantStub, QuantStub, QuantWrapper