# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ python examples/scripts/reward_modeling.py \ --model_name_or_path=facebook/opt-350m \ --output_dir="reward_modeling_anthropic_hh" \ --per_device_train_batch_size=64 \ --num_train_epochs=1 \ --gradient_accumulation_steps=16 \ --gradient_checkpointing=True \ --learning_rate=1.41e-5 \ --report_to="wandb" \ --remove_unused_columns=False \ --optim="adamw_torch" \ --logging_steps=10 \ --evaluation_strategy="steps" \ --max_length=512 \ """ import torch from datasets import load_dataset from tqdm import tqdm from transformers import AutoModelForSequenceClassification, AutoTokenizer, HfArgumentParser from trl import ModelConfig, RewardConfig, RewardTrainer, get_kbit_device_map, get_peft_config, get_quantization_config tqdm.pandas() if __name__ == "__main__": parser = HfArgumentParser((RewardConfig, ModelConfig)) reward_config, model_config = parser.parse_args_into_dataclasses() reward_config.gradient_checkpointing_kwargs = dict(use_reentrant=False) ################ # Model & Tokenizer ################ torch_dtype = ( model_config.torch_dtype if model_config.torch_dtype in ["auto", None] else getattr(torch, model_config.torch_dtype) ) quantization_config = get_quantization_config(model_config) model_kwargs = dict( revision=model_config.model_revision, trust_remote_code=model_config.trust_remote_code, device_map=get_kbit_device_map() if quantization_config is not None else None, quantization_config=quantization_config, ) tokenizer = AutoTokenizer.from_pretrained(model_config.model_name_or_path, use_fast=True) model = AutoModelForSequenceClassification.from_pretrained( model_config.model_name_or_path, num_labels=1, **model_kwargs ) ################ # Dataset ################ raw_datasets = load_dataset("Anthropic/hh-rlhf") # Tokenize chosen/rejected pairs of inputs # Adapt this section to your needs for custom datasets def preprocess_function(examples): new_examples = { "input_ids_chosen": [], "attention_mask_chosen": [], "input_ids_rejected": [], "attention_mask_rejected": [], } for chosen, rejected in zip(examples["chosen"], examples["rejected"]): tokenized_chosen = tokenizer(chosen) tokenized_rejected = tokenizer(rejected) new_examples["input_ids_chosen"].append(tokenized_chosen["input_ids"]) new_examples["attention_mask_chosen"].append(tokenized_chosen["attention_mask"]) new_examples["input_ids_rejected"].append(tokenized_rejected["input_ids"]) new_examples["attention_mask_rejected"].append(tokenized_rejected["attention_mask"]) return new_examples # Preprocess the dataset and filter out examples that are longer than args.max_length raw_datasets = raw_datasets.map( preprocess_function, batched=True, num_proc=4, ) raw_datasets = raw_datasets.filter( lambda x: len(x["input_ids_chosen"]) <= reward_config.max_length and len(x["input_ids_rejected"]) <= reward_config.max_length ) train_dataset = raw_datasets["train"] eval_dataset = raw_datasets["test"] ################ # Training ################ trainer = RewardTrainer( model=model, tokenizer=tokenizer, args=reward_config, train_dataset=train_dataset, eval_dataset=eval_dataset, peft_config=get_peft_config(model_config), ) trainer.train() trainer.save_model(reward_config.output_dir)