Revert "[quant][pt2e][bc-breaking] Remove fold_quantize flag (#118701)"

This reverts commit 482d952e880cf78c103a06f2d483556ab0a89138.

Reverted https://github.com/pytorch/pytorch/pull/118701 on behalf of https://github.com/facebook-github-bot due to Diff reverted internally ([comment](https://github.com/pytorch/pytorch/pull/118701#issuecomment-1932866964))
This commit is contained in:
PyTorch MergeBot
2024-02-07 20:56:16 +00:00
parent a6e16fe202
commit 81abc2b249
11 changed files with 35 additions and 25 deletions

View File

@ -1181,7 +1181,7 @@ class PT2EQuantizationTestCase(QuantizationTestCase):
m = prepare_pt2e(m, quantizer)
# Calibrate
m(*example_inputs)
m = convert_pt2e(m)
m = convert_pt2e(m, fold_quantize=True)
pt2_quant_output = m(*example_inputs)
ns = NodeSpec
@ -1228,7 +1228,7 @@ class PT2EQuantizationTestCase(QuantizationTestCase):
)
m = prepare_pt2e(m, quantizer)
m(*example_inputs)
m = convert_pt2e(m)
m = convert_pt2e(m, fold_quantize=True)
return m
def _get_pt2e_quantized_linear(self, is_per_channel=False) -> torch.fx.GraphModule: