Fix code style with make precommit (#4119)

This commit is contained in:
Albert Villanova del Moral
2025-09-22 21:19:54 +02:00
committed by GitHub
parent 9f0ed8b130
commit a68b4af50f
3 changed files with 7 additions and 8 deletions

View File

@ -338,7 +338,7 @@ def main(test_size, push_to_hub, repo_id):
"Namespaces are one",
"Although practicality sometimes beats purity,",
],
"completions":[
"completions": [
[", let me think...", " ugly."],
[", of course,", " implicit.", " because clarity matters."],
["... let's keep it basic,", " complex."],
@ -350,7 +350,7 @@ def main(test_size, push_to_hub, repo_id):
[" some theoretical elegance,", " purity."],
[" silently,", " unless explicitly silenced."],
[" the temptation to guess."],
[" way to do it,"," but sometimes it's not obvious.", " especially when there's more than one possibility."],
[" way to do it,", " but sometimes it's not obvious.", " especially when there's more than one possibility."],
[" clear at first,", " it will eventually emerge."],
[" later."],
[" problematic fixes."],

View File

@ -1411,10 +1411,9 @@ class SFTTrainerTester(TrlTestCase):
def test_peft_model_with_quantization(self):
"""SFTTrainer should not freeze layers of existing PeftModel.
This test simulates a realistic QLoRA scenario where a quantized base model
is first converted to a PeftModel, then passed to SFTTrainer. The issue was
that prepare_model_for_kbit_training would freeze all parameters including
the LoRA adapters, making training impossible.
This test simulates a realistic QLoRA scenario where a quantized base model is first converted to a PeftModel,
then passed to SFTTrainer. The issue was that prepare_model_for_kbit_training would freeze all parameters
including the LoRA adapters, making training impossible.
"""
# Get the base model
model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5"

View File

@ -271,7 +271,7 @@ class PPOTrainer(Trainer):
) # note that we are calling `self.lr_scheduler.step()` manually only at the batch level
#########
### trainer specifics
# trainer specifics
#########
default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)
self.callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks
@ -303,7 +303,7 @@ class PPOTrainer(Trainer):
self.model.add_model_tags(self._tag_names)
#########
### setup dataloader
# setup dataloader
#########
self.dataloader = DataLoader(
self.train_dataset,