mirror of
https://github.com/huggingface/trl.git
synced 2025-10-20 18:43:52 +08:00
140 lines
4.4 KiB
Python
140 lines
4.4 KiB
Python
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
# /// script
|
|
# dependencies = [
|
|
# "trl",
|
|
# "trackio",
|
|
# "kernels",
|
|
# ]
|
|
# ///
|
|
|
|
"""
|
|
Full training:
|
|
python examples/scripts/reward_modeling.py \
|
|
--model_name_or_path Qwen/Qwen2-0.5B-Instruct \
|
|
--dataset_name trl-lib/ultrafeedback_binarized \
|
|
--output_dir Qwen2-0.5B-Reward \
|
|
--per_device_train_batch_size 8 \
|
|
--num_train_epochs 1 \
|
|
--gradient_checkpointing True \
|
|
--learning_rate 1.0e-5 \
|
|
--eval_strategy steps \
|
|
--eval_steps 50 \
|
|
--max_length 2048
|
|
|
|
LoRA:
|
|
python examples/scripts/reward_modeling.py \
|
|
--model_name_or_path Qwen/Qwen2-0.5B-Instruct \
|
|
--dataset_name trl-lib/ultrafeedback_binarized \
|
|
--output_dir Qwen2-0.5B-Reward-LoRA \
|
|
--per_device_train_batch_size 8 \
|
|
--num_train_epochs 1 \
|
|
--gradient_checkpointing True \
|
|
--learning_rate 1.0e-4 \
|
|
--eval_strategy steps \
|
|
--eval_steps 50 \
|
|
--max_length 2048 \
|
|
--use_peft \
|
|
--lora_task_type SEQ_CLS \
|
|
--lora_r 32 \
|
|
--lora_alpha 16
|
|
"""
|
|
|
|
import os
|
|
|
|
import torch
|
|
from accelerate import logging
|
|
from datasets import load_dataset
|
|
from transformers import AutoModelForSequenceClassification, HfArgumentParser
|
|
|
|
from trl import (
|
|
ModelConfig,
|
|
RewardConfig,
|
|
RewardTrainer,
|
|
ScriptArguments,
|
|
get_kbit_device_map,
|
|
get_peft_config,
|
|
get_quantization_config,
|
|
)
|
|
|
|
|
|
logger = logging.get_logger(__name__)
|
|
|
|
# Enable logging in a Hugging Face Space
|
|
os.environ.setdefault("TRACKIO_SPACE_ID", "trl-trackio")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
parser = HfArgumentParser((ScriptArguments, RewardConfig, ModelConfig))
|
|
script_args, training_args, model_args = parser.parse_args_into_dataclasses()
|
|
training_args.gradient_checkpointing_kwargs = dict(use_reentrant=False)
|
|
|
|
################
|
|
# Model & Tokenizer
|
|
################
|
|
dtype = model_args.dtype if model_args.dtype in ["auto", None] else getattr(torch, model_args.dtype)
|
|
model_kwargs = dict(
|
|
revision=model_args.model_revision,
|
|
use_cache=False if training_args.gradient_checkpointing else True,
|
|
dtype=dtype,
|
|
)
|
|
quantization_config = get_quantization_config(model_args)
|
|
if quantization_config is not None:
|
|
# Passing None would not be treated the same as omitting the argument, so we include it only when valid.
|
|
model_kwargs["device_map"] = get_kbit_device_map()
|
|
model_kwargs["quantization_config"] = quantization_config
|
|
|
|
model = AutoModelForSequenceClassification.from_pretrained(
|
|
model_args.model_name_or_path, num_labels=1, trust_remote_code=model_args.trust_remote_code, **model_kwargs
|
|
)
|
|
|
|
if model_args.use_peft and model_args.lora_task_type != "SEQ_CLS":
|
|
logger.warning(
|
|
"You are using a `task_type` that is different than `SEQ_CLS` for PEFT. This will lead to silent bugs"
|
|
" Make sure to pass --lora_task_type SEQ_CLS when using this script with PEFT.",
|
|
)
|
|
|
|
##############
|
|
# Load dataset
|
|
##############
|
|
dataset = load_dataset(script_args.dataset_name, name=script_args.dataset_config)
|
|
|
|
##########
|
|
# Training
|
|
##########
|
|
trainer = RewardTrainer(
|
|
model=model,
|
|
args=training_args,
|
|
train_dataset=dataset[script_args.dataset_train_split],
|
|
eval_dataset=dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None,
|
|
peft_config=get_peft_config(model_args),
|
|
)
|
|
trainer.train()
|
|
|
|
############################
|
|
# Save model and push to Hub
|
|
############################
|
|
trainer.save_model(training_args.output_dir)
|
|
|
|
if training_args.eval_strategy != "no":
|
|
metrics = trainer.evaluate()
|
|
trainer.log_metrics("eval", metrics)
|
|
trainer.save_metrics("eval", metrics)
|
|
|
|
# Save and push to hub
|
|
trainer.save_model(training_args.output_dir)
|
|
if training_args.push_to_hub:
|
|
trainer.push_to_hub(dataset_name=script_args.dataset_name)
|