mirror of
https://github.com/huggingface/transformers.git
synced 2025-10-20 17:13:56 +08:00
FIX: Broken repr of TorchAoConfig (#34560)
FIX Broken repr of TorchAoConfig
The __repr__ method references a non-existent self.kwargs. This is now
fixed.
There does not appear to be a uniform way of defining __repr__ for
quantization configs. I copied the method as implemented for HQQ:
e2ac16b28a/src/transformers/utils/quantization_config.py (L285-L287)
This commit is contained in:
@ -1309,7 +1309,8 @@ class TorchAoConfig(QuantizationConfigMixin):
|
||||
return _STR_TO_METHOD[self.quant_type](**self.quant_type_kwargs)
|
||||
|
||||
def __repr__(self):
|
||||
return f"{self.quant_type}({', '.join(str(k) + '=' + str(v) for k, v in self.kwargs.items())})"
|
||||
config_dict = self.to_dict()
|
||||
return f"{self.__class__.__name__} {json.dumps(config_dict, indent=2, sort_keys=True)}\n"
|
||||
|
||||
|
||||
@dataclass
|
||||
|
@ -74,6 +74,13 @@ class TorchAoConfigTest(unittest.TestCase):
|
||||
with self.assertRaisesRegex(ValueError, "Unexpected keyword arg"):
|
||||
_ = TorchAoConfig("int4_weight_only", group_size1=32)
|
||||
|
||||
def test_repr(self):
|
||||
"""
|
||||
Check that there is no error in the repr
|
||||
"""
|
||||
quantization_config = TorchAoConfig("int4_weight_only", modules_to_not_convert=["conv"], group_size=8)
|
||||
repr(quantization_config)
|
||||
|
||||
|
||||
@require_torch_gpu
|
||||
@require_torchao
|
||||
|
Reference in New Issue
Block a user