mirror of
https://github.com/huggingface/peft.git
synced 2025-10-20 15:33:48 +08:00
Compare commits
2 Commits
2410f458c8
...
smangrul/f
Author | SHA1 | Date | |
---|---|---|---|
4525e18e58 | |||
6e43bd53ba |
@ -40,7 +40,7 @@ class ModelArguments:
|
||||
metadata={"help": "Compute dtype for 4bit base models"},
|
||||
)
|
||||
bnb_4bit_quant_storage_dtype: Optional[str] = field(
|
||||
default="float32",
|
||||
default="uint8",
|
||||
metadata={"help": "Quantization storage dtype for 4bit base models"},
|
||||
)
|
||||
bnb_4bit_quant_type: Optional[str] = field(
|
||||
|
@ -125,12 +125,15 @@ def create_and_prepare_model(args, data_args, training_args):
|
||||
load_in_4bit=args.use_4bit_quantization,
|
||||
)
|
||||
else:
|
||||
torch_dtype = (
|
||||
quant_storage_dtype if quant_storage_dtype and quant_storage_dtype.is_floating_point else torch.float32
|
||||
)
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
args.model_name_or_path,
|
||||
quantization_config=bnb_config,
|
||||
trust_remote_code=True,
|
||||
attn_implementation="flash_attention_2" if args.use_flash_attn else "eager",
|
||||
torch_dtype=quant_storage_dtype or torch.float32,
|
||||
torch_dtype=torch_dtype,
|
||||
)
|
||||
|
||||
peft_config = None
|
||||
|
Reference in New Issue
Block a user