mirror of
https://github.com/huggingface/trl.git
synced 2025-10-20 18:43:52 +08:00
* Refactor main function in dpo.py * Update setup.py and add cli.py * Add examples to package data * style * Refactor setup.py file * Add new file t.py * Move dpo to package * Update MANIFEST.in and setup.py, refactor trl/cli.py * Add __init__.py to trl/scripts directory * Add license header to __init__.py * File moved instruction * Add Apache License and update file path * Move dpo.py to new location * Refactor CLI and DPO script * Refactor import structure in scripts package * env * rm config from chat arg * rm old cli * chat init * test cli [skip ci] * Add `datast_config_name` to `ScriptArguments` (#2440) * add missing arg * Add test cases for 'trl sft' and 'trl dpo' commands * Add sft.py script and update cli.py to include sft command * Move sft script * chat * style [ci skip] * kto * rm example config * first step on doc * see #2442 * see #2443 * fix chat windows * ©️ Copyrights update (#2454) * First changes * Other files * Finally * rm comment * fix nashmd * Fix example * Fix example [ci skip] * 💬 Fix chat for windows (#2443) * fix chat for windows * add some tests back * Revert "add some tests back" This reverts commit 350aef52f53f8cf34fccd7ad0f78a3dd63867e06. * 🆔 Add `datast_config` to `ScriptArguments` (#2440) * datast_config_name * Update trl/utils.py [ci skip] * sort import * typo [ci skip] * Trigger CI * Rename `dataset_config_name` to `dataset_config` * 🏎 Fix deepspeed preparation of `ref_model` in `OnlineDPOTrainer` (#2417) * Remove unused deepspeed code * add model prep back * add deepspeed even if it doesn't work * rm old code * Fix config name * Remove `make dev` in favor of `pip install -e .[dev]` * Update script paths and remove old symlink related things * Fix chat script path [ci skip] * style
129 lines
4.0 KiB
Python
129 lines
4.0 KiB
Python
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
"""
|
|
Run the KTO training script with the commands below. In general, the optimal configuration for KTO will be similar to that of DPO.
|
|
|
|
# Full training:
|
|
python trl/scripts/kto.py \
|
|
--dataset_name trl-lib/kto-mix-14k \
|
|
--model_name_or_path=trl-lib/qwen1.5-1.8b-sft \
|
|
--per_device_train_batch_size 16 \
|
|
--num_train_epochs 1 \
|
|
--learning_rate 5e-7 \
|
|
--lr_scheduler_type=cosine \
|
|
--gradient_accumulation_steps 1 \
|
|
--logging_steps 10 \
|
|
--eval_steps 500 \
|
|
--output_dir=kto-aligned-model \
|
|
--warmup_ratio 0.1 \
|
|
--report_to wandb \
|
|
--bf16 \
|
|
--logging_first_step
|
|
|
|
# QLoRA:
|
|
python trl/scripts/kto.py \
|
|
--dataset_name trl-lib/kto-mix-14k \
|
|
--model_name_or_path=trl-lib/qwen1.5-1.8b-sft \
|
|
--per_device_train_batch_size 8 \
|
|
--num_train_epochs 1 \
|
|
--learning_rate 5e-7 \
|
|
--lr_scheduler_type=cosine \
|
|
--gradient_accumulation_steps 1 \
|
|
--logging_steps 10 \
|
|
--eval_steps 500 \
|
|
--output_dir=kto-aligned-model-lora \
|
|
--warmup_ratio 0.1 \
|
|
--report_to wandb \
|
|
--bf16 \
|
|
--logging_first_step \
|
|
--use_peft \
|
|
--load_in_4bit \
|
|
--lora_target_modules=all-linear \
|
|
--lora_r=16 \
|
|
--lora_alpha=16
|
|
"""
|
|
|
|
import argparse
|
|
|
|
from datasets import load_dataset
|
|
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
|
from trl import (
|
|
KTOConfig,
|
|
KTOTrainer,
|
|
ModelConfig,
|
|
ScriptArguments,
|
|
TrlParser,
|
|
get_peft_config,
|
|
setup_chat_format,
|
|
)
|
|
|
|
|
|
def main(script_args, training_args, model_args):
|
|
# Load a pretrained model
|
|
model = AutoModelForCausalLM.from_pretrained(
|
|
model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code
|
|
)
|
|
ref_model = AutoModelForCausalLM.from_pretrained(
|
|
model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code
|
|
)
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(
|
|
model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code
|
|
)
|
|
if tokenizer.pad_token is None:
|
|
tokenizer.pad_token = tokenizer.eos_token
|
|
|
|
# If we are aligning a base model, we use ChatML as the default template
|
|
if tokenizer.chat_template is None:
|
|
model, tokenizer = setup_chat_format(model, tokenizer)
|
|
|
|
# Load the dataset
|
|
dataset = load_dataset(script_args.dataset_name, name=script_args.dataset_config)
|
|
|
|
# Initialize the KTO trainer
|
|
trainer = KTOTrainer(
|
|
model,
|
|
ref_model,
|
|
args=training_args,
|
|
train_dataset=dataset[script_args.dataset_train_split],
|
|
eval_dataset=dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None,
|
|
processing_class=tokenizer,
|
|
peft_config=get_peft_config(model_args),
|
|
)
|
|
|
|
# Train and push the model to the Hub
|
|
trainer.train()
|
|
|
|
# Save and push to hub
|
|
trainer.save_model(training_args.output_dir)
|
|
if training_args.push_to_hub:
|
|
trainer.push_to_hub(dataset_name=script_args.dataset_name)
|
|
|
|
|
|
def make_parser(subparsers: argparse._SubParsersAction = None):
|
|
dataclass_types = (ScriptArguments, KTOConfig, ModelConfig)
|
|
if subparsers is not None:
|
|
parser = subparsers.add_parser("kto", help="Run the KTO training script", dataclass_types=dataclass_types)
|
|
else:
|
|
parser = TrlParser(dataclass_types)
|
|
return parser
|
|
|
|
|
|
if __name__ == "__main__":
|
|
parser = make_parser()
|
|
script_args, training_args, model_args = parser.parse_args_and_config()
|
|
main(script_args, training_args, model_args)
|