mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Revert "[quant][pt2e][bc-breaking] Remove fold_quantize flag (#118701)"
This reverts commit 482d952e880cf78c103a06f2d483556ab0a89138. Reverted https://github.com/pytorch/pytorch/pull/118701 on behalf of https://github.com/facebook-github-bot due to Diff reverted internally ([comment](https://github.com/pytorch/pytorch/pull/118701#issuecomment-1932866964))
This commit is contained in:
@ -1181,7 +1181,7 @@ class PT2EQuantizationTestCase(QuantizationTestCase):
|
||||
m = prepare_pt2e(m, quantizer)
|
||||
# Calibrate
|
||||
m(*example_inputs)
|
||||
m = convert_pt2e(m)
|
||||
m = convert_pt2e(m, fold_quantize=True)
|
||||
|
||||
pt2_quant_output = m(*example_inputs)
|
||||
ns = NodeSpec
|
||||
@ -1228,7 +1228,7 @@ class PT2EQuantizationTestCase(QuantizationTestCase):
|
||||
)
|
||||
m = prepare_pt2e(m, quantizer)
|
||||
m(*example_inputs)
|
||||
m = convert_pt2e(m)
|
||||
m = convert_pt2e(m, fold_quantize=True)
|
||||
return m
|
||||
|
||||
def _get_pt2e_quantized_linear(self, is_per_channel=False) -> torch.fx.GraphModule:
|
||||
|
Reference in New Issue
Block a user