[quant] Rename _convert_do_not_use.py to convert.py (#74322)

Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/74322

att, also change all references to _convert_do_not_use

Test Plan:
python test/test_quantization.py TestQuantizeFx
python test/test_quantization.py TestAOMigrationQuantizationFx

Imported from OSS

Reviewed By: andrewor14

Differential Revision: D34936430

fbshipit-source-id: c96fb887847383bf47f0ec4219127e96e2b63b2d
(cherry picked from commit 8ad5a9e031e6ca4ede2656d9b2f7906a82b57c1c)
This commit is contained in:
Jerry Zhang
2022-03-17 11:46:19 -07:00
committed by PyTorch MergeBot
parent a6bed4deaa
commit 975c9f15bd
7 changed files with 12 additions and 46 deletions

View File

@ -142,15 +142,14 @@ class TestAOMigrationQuantizationFx(AOMigrationTestCase):
]
self._test_function_import('fx.prepare', function_list)
# skipping for now, will enable in next PR
# def test_package_import_fx_convert(self):
# self._test_package_import('fx.convert')
def test_package_import_fx_convert(self):
self._test_package_import('fx.convert')
# def test_function_import_fx_convert(self):
# function_list = [
# 'convert'
# ]
# self._test_function_import('fx.convert', function_list)
def test_function_import_fx_convert(self):
function_list = [
'convert'
]
self._test_function_import('fx.convert', function_list)
def test_package_import_fx_fuse(self):
self._test_package_import('fx.fuse')

View File

@ -1,33 +0,0 @@
import torch
from torch.fx import GraphModule
from typing import Dict, Any, Optional
from .quantize_fx import (
_check_is_graph_module,
check_is_valid_convert_custom_config_dict
)
from .fx._convert_do_not_use import _convert_do_not_use
def _convert_fx_do_not_use(
graph_module: GraphModule, is_reference: bool = False,
convert_custom_config_dict: Dict[str, Any] = None,
_remove_qconfig: bool = True,
backend_config_dict: Optional[Dict[str, Any]] = None) -> torch.nn.Module:
"""
Please do not use, this is a temporary function to migrate convert_fx
to a new implementation
"""
if convert_custom_config_dict is None:
convert_custom_config_dict = {}
_check_is_graph_module(graph_module)
check_is_valid_convert_custom_config_dict(convert_custom_config_dict)
quantized = _convert_do_not_use(
graph_module, is_reference, convert_custom_config_dict,
False, _remove_qconfig_flag=_remove_qconfig,
backend_config_dict=backend_config_dict)
preserved_attributes = convert_custom_config_dict.get("preserved_attributes", [])
for attr_name in preserved_attributes:
setattr(quantized, attr_name, getattr(graph_module, attr_name))
return quantized

View File

@ -1,4 +1,4 @@
from .prepare import prepare
from ._convert_do_not_use import _convert_do_not_use as convert
from .convert import convert
from .fuse import fuse
from .backend_config import get_tensorrt_backend_config_dict

View File

@ -525,7 +525,7 @@ def convert_custom_module(
parent_name, name = _parent_name(node.target)
setattr(modules[parent_name], name, quantized_custom_module)
def _convert_do_not_use(
def convert(
model: GraphModule, is_reference: bool = False,
convert_custom_config_dict: Dict[str, Any] = None,
is_standalone_module: bool = False,

View File

@ -7,7 +7,7 @@ from torch.fx.node import Target, Node, Argument
from torch.nn.intrinsic import _FusedModule
from .fx import fuse # noqa: F401
from .fx import prepare # noqa: F401
from .fx._convert_do_not_use import _convert_do_not_use as convert
from .fx.convert import convert
from .fx import get_tensorrt_backend_config_dict # noqa: F401
from .fx.graph_module import ObservedGraphModule
from .fx.qconfig_utils import (

View File

@ -10,5 +10,5 @@ here.
# omitting files that's unlikely to be used right now, for example
# the newly added lower_to_fbgemm etc.
from torch.ao.quantization.fx.prepare import prepare
from torch.ao.quantization.fx._convert_do_not_use import _convert_do_not_use as convert
from torch.ao.quantization.fx.convert import convert
from torch.ao.quantization.fx.fuse import fuse

View File

@ -6,4 +6,4 @@ If you are adding a new entry/functionality, please, add it to the
appropriate files under `torch/ao/quantization/fx/`, while adding an import statement
here.
"""
from torch.ao.quantization.fx._convert_do_not_use import _convert_do_not_use as convert
from torch.ao.quantization.fx.convert import convert