mirror of
https://github.com/volcengine/verl.git
synced 2025-10-20 21:53:50 +08:00
[trainer] feat: ReMax support using reward model for baseline (#3780)
### What does this PR do? > Add **concise** overview of what this PR aims to achieve or accomplish. Reference related GitHub issues and PRs that help with the review. Not only limited to reward functions, we should also support using rm to calculate the reward baseline. ### Checklist Before Starting - [X] Search for similar PRs. Paste at least one query link here: ... - [X] Format the PR title as `[{modules}] {type}: {description}` (This will be checked by the CI) - `{modules}` include `fsdp`, `megatron`, `sglang`, `vllm`, `rollout`, `trainer`, `ci`, `training_utils`, `recipe`, `hardware`, `deployment`, `ray`, `worker`, `single_controller`, `misc`, `perf`, `model`, `algo`, `env`, `tool`, `ckpt`, `doc`, `data` - If this PR involves multiple modules, separate them with `,` like `[megatron, fsdp, doc]` - `{type}` is in `feat`, `fix`, `refactor`, `chore`, `test` - If this PR breaks any API (CLI arguments, config, function signature, etc.), add `[BREAKING]` to the beginning of the title. - Example: `[BREAKING][fsdp, megatron] feat: dynamic batching` ### Test > For changes that can not be tested by CI (e.g., algorithm implementation, new model support), validate by experiment(s) and show results like training curve plots, evaluation results, etc. ### API and Usage Example > Demonstrate how the API changes if any, and provide usage example(s) if possible. ```python # Add code snippet or script demonstrating how to use this ``` ### Design & Code Changes > Demonstrate the high-level design if this PR is complex, and list the specific changes. ### Checklist Before Submitting > [!IMPORTANT] > Please check all the following items before requesting a review, otherwise the reviewer might deprioritize this PR for review. - [X] Read the [Contribute Guide](https://github.com/volcengine/verl/blob/main/CONTRIBUTING.md). - [X] Apply [pre-commit checks](https://github.com/volcengine/verl/blob/main/CONTRIBUTING.md#code-linting-and-formatting): `pre-commit install && pre-commit run --all-files --show-diff-on-failure --color=always` - [X] Add / Update [the documentation](https://github.com/volcengine/verl/tree/main/docs). - [X] Add unit or end-to-end test(s) to [the CI workflow](https://github.com/volcengine/verl/tree/main/.github/workflows) to cover all the code. If not feasible, explain why: ... - [X] Once your PR is ready for CI, send a message in [the `ci-request` channel](https://verl-project.slack.com/archives/C091TCESWB1) in [the `verl` Slack workspace](https://join.slack.com/t/verl-project/shared_invite/zt-3855yhg8g-CTkqXu~hKojPCmo7k_yXTQ). (If not accessible, please try [the Feishu group (飞书群)](https://applink.larkoffice.com/client/chat/chatter/add_by_link?link_token=772jd4f1-cd91-441e-a820-498c6614126a).) Signed-off-by: Hollow Man <hollowman@opensuse.org>
This commit is contained in:
@ -31,6 +31,7 @@ from verl.trainer.ppo.ray_trainer import (
|
||||
compute_timing_metrics,
|
||||
marked_timer,
|
||||
)
|
||||
from verl.trainer.ppo.reward import compute_reward
|
||||
from verl.utils.metric import reduce_metrics
|
||||
|
||||
|
||||
@ -95,14 +96,22 @@ def fit(self):
|
||||
gen_baseline_output = self.actor_rollout_wg.generate_sequences(gen_baseline_batch)
|
||||
|
||||
batch = batch.union(gen_baseline_output)
|
||||
reward_baseline_tensor = self.reward_fn(batch)
|
||||
# compute reward model score on batch
|
||||
rm_scores = None
|
||||
if self.use_rm and "rm_scores" not in batch.batch.keys():
|
||||
rm_scores = self.rm_wg.compute_rm_score(batch)
|
||||
batch = batch.union(rm_scores)
|
||||
reward_baseline_tensor, _ = compute_reward(batch, self.reward_fn)
|
||||
reward_baseline_tensor = reward_baseline_tensor.sum(dim=-1)
|
||||
|
||||
batch.pop(batch_keys=list(gen_baseline_output.batch.keys()))
|
||||
keys_to_pop = set(gen_baseline_output.batch.keys())
|
||||
if rm_scores is not None:
|
||||
keys_to_pop.update(rm_scores.batch.keys())
|
||||
batch.pop(batch_keys=list(keys_to_pop))
|
||||
|
||||
batch.batch["reward_baselines"] = reward_baseline_tensor
|
||||
|
||||
del gen_baseline_batch, gen_baseline_output
|
||||
del rm_scores, gen_baseline_batch, gen_baseline_output
|
||||
|
||||
batch.non_tensor_batch["uid"] = np.array(
|
||||
[str(uuid.uuid4()) for _ in range(len(batch.batch))], dtype=object
|
||||
@ -142,13 +151,13 @@ def fit(self):
|
||||
# compute scores. Support both model and function-based.
|
||||
# We first compute the scores using reward model. Then, we call reward_fn to combine
|
||||
# the results from reward model and rule-based results.
|
||||
if self.use_rm:
|
||||
if self.use_rm and "rm_scores" not in batch.batch.keys():
|
||||
# we first compute reward model score
|
||||
reward_tensor = self.rm_wg.compute_rm_score(batch)
|
||||
batch = batch.union(reward_tensor)
|
||||
|
||||
# we combine with rule-based rm
|
||||
reward_tensor = self.reward_fn(batch)
|
||||
reward_tensor, _ = compute_reward(batch, self.reward_fn)
|
||||
batch.batch["token_level_scores"] = reward_tensor
|
||||
|
||||
# compute rewards. apply_kl_penalty if available
|
||||
|
@ -41,6 +41,7 @@ from verl.trainer.ppo.ray_trainer import (
|
||||
compute_advantage,
|
||||
compute_response_mask,
|
||||
)
|
||||
from verl.trainer.ppo.reward import compute_reward
|
||||
from verl.utils.profiler import marked_timer
|
||||
from verl.utils.rollout_skip import RolloutSkip
|
||||
|
||||
@ -152,14 +153,22 @@ class RayDAPOTrainer(RayPPOTrainer):
|
||||
gen_baseline_output = self.actor_rollout_wg.generate_sequences(gen_baseline_batch)
|
||||
|
||||
new_batch = new_batch.union(gen_baseline_output)
|
||||
reward_baseline_tensor = self.reward_fn(new_batch)
|
||||
# compute reward model score on new_batch
|
||||
rm_scores = None
|
||||
if self.use_rm and "rm_scores" not in new_batch.batch.keys():
|
||||
rm_scores = self.rm_wg.compute_rm_score(new_batch)
|
||||
new_batch = new_batch.union(rm_scores)
|
||||
reward_baseline_tensor, _ = compute_reward(new_batch, self.reward_fn)
|
||||
reward_baseline_tensor = reward_baseline_tensor.sum(dim=-1)
|
||||
|
||||
new_batch.pop(batch_keys=list(gen_baseline_output.batch.keys()))
|
||||
keys_to_pop = set(gen_baseline_output.batch.keys())
|
||||
if rm_scores is not None:
|
||||
keys_to_pop.update(rm_scores.batch.keys())
|
||||
new_batch.pop(batch_keys=list(keys_to_pop))
|
||||
|
||||
new_batch.batch["reward_baselines"] = reward_baseline_tensor
|
||||
|
||||
del gen_baseline_batch, gen_baseline_output
|
||||
del rm_scores, gen_baseline_batch, gen_baseline_output
|
||||
|
||||
new_batch.non_tensor_batch["uid"] = np.array(
|
||||
[str(uuid.uuid4()) for _ in range(len(new_batch.batch))], dtype=object
|
||||
@ -172,21 +181,13 @@ class RayDAPOTrainer(RayPPOTrainer):
|
||||
# compute scores. Support both model and function-based.
|
||||
# We first compute the scores using reward model. Then, we call reward_fn to combine
|
||||
# the results from reward model and rule-based results.
|
||||
if self.use_rm:
|
||||
if self.use_rm and "rm_scores" not in new_batch.batch.keys():
|
||||
# we first compute reward model score
|
||||
reward_tensor = self.rm_wg.compute_rm_score(new_batch)
|
||||
new_batch = new_batch.union(reward_tensor)
|
||||
|
||||
# we combine with rule-based rm
|
||||
reward_extra_infos_dict: dict[str, list]
|
||||
try:
|
||||
reward_result = self.reward_fn(new_batch, return_dict=True)
|
||||
reward_tensor = reward_result["reward_tensor"]
|
||||
reward_extra_infos_dict = reward_result.get("reward_extra_info", {})
|
||||
except Exception as e:
|
||||
print(f"Error in reward_fn: {e}")
|
||||
reward_tensor = self.reward_fn(new_batch)
|
||||
reward_extra_infos_dict = {}
|
||||
reward_tensor, reward_extra_infos_dict = compute_reward(new_batch, self.reward_fn)
|
||||
|
||||
new_batch.batch["token_level_scores"] = reward_tensor
|
||||
|
||||
|
@ -39,6 +39,7 @@ from verl.trainer.ppo.ray_trainer import (
|
||||
compute_advantage,
|
||||
compute_response_mask,
|
||||
)
|
||||
from verl.trainer.ppo.reward import compute_reward
|
||||
from verl.utils.profiler import simple_timer
|
||||
|
||||
|
||||
@ -129,14 +130,22 @@ class RayEntropyTrainer(RayPPOTrainer):
|
||||
gen_baseline_output = self.actor_rollout_wg.generate_sequences(gen_baseline_batch)
|
||||
|
||||
new_batch = new_batch.union(gen_baseline_output)
|
||||
reward_baseline_tensor = self.reward_fn(new_batch)
|
||||
# compute reward model score on new_batch
|
||||
rm_scores = None
|
||||
if self.use_rm and "rm_scores" not in new_batch.batch.keys():
|
||||
rm_scores = self.rm_wg.compute_rm_score(new_batch)
|
||||
new_batch = new_batch.union(rm_scores)
|
||||
reward_baseline_tensor, _ = compute_reward(new_batch, self.reward_fn)
|
||||
reward_baseline_tensor = reward_baseline_tensor.sum(dim=-1)
|
||||
|
||||
new_batch.pop(batch_keys=list(gen_baseline_output.batch.keys()))
|
||||
keys_to_pop = set(gen_baseline_output.batch.keys())
|
||||
if rm_scores is not None:
|
||||
keys_to_pop.update(rm_scores.batch.keys())
|
||||
new_batch.pop(batch_keys=list(keys_to_pop))
|
||||
|
||||
new_batch.batch["reward_baselines"] = reward_baseline_tensor
|
||||
|
||||
del gen_baseline_batch, gen_baseline_output
|
||||
del rm_scores, gen_baseline_batch, gen_baseline_output
|
||||
|
||||
new_batch.non_tensor_batch["uid"] = np.array(
|
||||
[str(uuid.uuid4()) for _ in range(len(new_batch.batch))], dtype=object
|
||||
@ -149,21 +158,13 @@ class RayEntropyTrainer(RayPPOTrainer):
|
||||
# compute scores. Support both model and function-based.
|
||||
# We first compute the scores using reward model. Then, we call reward_fn to combine
|
||||
# the results from reward model and rule-based results.
|
||||
if self.use_rm:
|
||||
if self.use_rm and "rm_scores" not in new_batch.batch.keys():
|
||||
# we first compute reward model score
|
||||
reward_tensor = self.rm_wg.compute_rm_score(new_batch)
|
||||
new_batch = new_batch.union(reward_tensor)
|
||||
|
||||
# we combine with rule-based rm
|
||||
reward_extra_infos_dict: dict[str, list]
|
||||
try:
|
||||
reward_result = self.reward_fn(new_batch, return_dict=True)
|
||||
reward_tensor = reward_result["reward_tensor"]
|
||||
reward_extra_infos_dict = reward_result["reward_extra_info"]
|
||||
except Exception as e:
|
||||
print(f"Error in reward_fn: {e}")
|
||||
reward_tensor = self.reward_fn(new_batch)
|
||||
reward_extra_infos_dict = {}
|
||||
reward_tensor, reward_extra_infos_dict = compute_reward(new_batch, self.reward_fn)
|
||||
|
||||
new_batch.batch["token_level_scores"] = reward_tensor
|
||||
|
||||
|
@ -329,6 +329,45 @@ class RayPRIMETrainer(RayPPOTrainer):
|
||||
if isinstance(self.train_dataloader.dataset, RLHFDataset):
|
||||
self.train_dataloader.dataset.resume_dataset_state()
|
||||
|
||||
def compute_reward(self, batch: DataProto, n_samples: int):
|
||||
update_style = self.config.reward_model.model.get("update", "none")
|
||||
reward_output_metrics = {}
|
||||
if update_style == "none": # only run forward
|
||||
reward_output = self.rm_wg.compute_rm_score(batch)
|
||||
elif update_style == "after": # update and directly return the reward
|
||||
reward_output = self.rm_wg.update_rm(batch)
|
||||
elif update_style == "before": # update reward model, and then run forward
|
||||
reward_output = self.rm_wg.update_rm(batch)
|
||||
if "metrics" in reward_output.meta_info.keys():
|
||||
reward_output_metrics = reduce_metrics(reward_output.meta_info["metrics"])
|
||||
|
||||
reward_output = self.rm_wg.compute_rm_score(batch)
|
||||
elif update_style == "reverse": # run forward to calculate statistics, then update reward model
|
||||
reward_output = self.rm_wg.compute_rm_score(batch)
|
||||
|
||||
# broadcast q and acc tensor to each result
|
||||
bc_td = DataProto.from_dict(
|
||||
tensors={
|
||||
"Q_bc": reward_output.batch["q"]
|
||||
.sum(dim=-1)
|
||||
.view(-1, n_samples)
|
||||
.unsqueeze(1)
|
||||
.expand(-1, n_samples, -1)
|
||||
.reshape(-1, n_samples),
|
||||
"acc_bc": batch.batch["acc"]
|
||||
.view(-1, n_samples)
|
||||
.unsqueeze(1)
|
||||
.expand(-1, n_samples, -1)
|
||||
.reshape(-1, n_samples),
|
||||
}
|
||||
)
|
||||
batch = batch.union(bc_td)
|
||||
reward_output = self.rm_wg.update_rm(batch)
|
||||
else:
|
||||
raise NotImplementedError
|
||||
|
||||
return reward_output, reward_output_metrics
|
||||
|
||||
def fit(self):
|
||||
"""
|
||||
The training loop of PPO.
|
||||
@ -391,10 +430,19 @@ class RayPRIMETrainer(RayPPOTrainer):
|
||||
gen_baseline_output = self.actor_rollout_wg.generate_sequences(gen_baseline_batch)
|
||||
|
||||
batch = batch.union(gen_baseline_output)
|
||||
reward_baseline_tensor = self.reward_fn(batch)
|
||||
rm_scores, _ = self.compute_reward(batch, 1)
|
||||
reward_baseline_tensor = rm_scores.batch.get(
|
||||
"rm_scores", rm_scores.batch.get("acc_bc", None)
|
||||
)
|
||||
if reward_baseline_tensor is None:
|
||||
raise ValueError(
|
||||
"Neither 'rm_scores' nor 'acc_bc' found in reward model output for baseline."
|
||||
)
|
||||
reward_baseline_tensor = reward_baseline_tensor.sum(dim=-1)
|
||||
|
||||
batch.pop(batch_keys=list(gen_baseline_output.batch.keys()))
|
||||
keys_to_pop = set(gen_baseline_output.batch.keys())
|
||||
keys_to_pop.update(rm_scores.batch.keys())
|
||||
batch.pop(batch_keys=list(keys_to_pop))
|
||||
|
||||
batch.batch["reward_baselines"] = reward_baseline_tensor
|
||||
|
||||
@ -450,46 +498,11 @@ class RayPRIMETrainer(RayPPOTrainer):
|
||||
|
||||
with simple_timer("adv", timing_raw):
|
||||
if self.use_rm:
|
||||
update_style = self.config.reward_model.model.get("update", "none")
|
||||
if update_style == "none": # only run forward
|
||||
reward_output = self.rm_wg.compute_rm_score(batch)
|
||||
elif update_style == "after": # update and directly return the reward
|
||||
reward_output = self.rm_wg.update_rm(batch)
|
||||
elif update_style == "before": # update reward model, and then run forward
|
||||
reward_output = self.rm_wg.update_rm(batch)
|
||||
if "metrics" in reward_output.meta_info.keys():
|
||||
reward_output_metrics = reduce_metrics(reward_output.meta_info["metrics"])
|
||||
metrics.update(reward_output_metrics)
|
||||
|
||||
reward_output = self.rm_wg.compute_rm_score(batch)
|
||||
elif (
|
||||
update_style == "reverse"
|
||||
): # run forward to calculate statistics, then update reward model
|
||||
reward_output = self.rm_wg.compute_rm_score(batch)
|
||||
# broadcast q and acc tensor to each result
|
||||
bc_td = DataProto.from_dict(
|
||||
tensors={
|
||||
"Q_bc": reward_output.batch["q"]
|
||||
.sum(dim=-1)
|
||||
.view(-1, n_samples)
|
||||
.unsqueeze(1)
|
||||
.expand(-1, n_samples, -1)
|
||||
.reshape(-1, n_samples),
|
||||
"acc_bc": batch.batch["acc"]
|
||||
.view(-1, n_samples)
|
||||
.unsqueeze(1)
|
||||
.expand(-1, n_samples, -1)
|
||||
.reshape(-1, n_samples),
|
||||
}
|
||||
)
|
||||
batch = batch.union(bc_td)
|
||||
reward_output = self.rm_wg.update_rm(batch)
|
||||
else:
|
||||
raise NotImplementedError
|
||||
reward_output, reward_output_metrics = self.compute_reward(batch, n_samples)
|
||||
batch = batch.union(reward_output)
|
||||
if "metrics" in reward_output.meta_info.keys():
|
||||
reward_output_metrics = reduce_metrics(reward_output.meta_info["metrics"])
|
||||
metrics.update(reward_output_metrics)
|
||||
reward_output_metrics.update(reduce_metrics(reward_output.meta_info["metrics"]))
|
||||
metrics.update(reward_output_metrics)
|
||||
|
||||
# compute advantages, executed on the driver process
|
||||
batch = compute_advantage(
|
||||
|
@ -205,14 +205,22 @@ class RaySPPOTrainer(RayPPOTrainer):
|
||||
gen_baseline_output = self.actor_rollout_wg.generate_sequences(gen_baseline_batch)
|
||||
|
||||
batch = batch.union(gen_baseline_output)
|
||||
reward_baseline_tensor = self.reward_fn(batch)
|
||||
# compute reward model score on batch
|
||||
rm_scores = None
|
||||
if self.use_rm and "rm_scores" not in batch.batch.keys():
|
||||
rm_scores = self.rm_wg.compute_rm_score(batch)
|
||||
batch = batch.union(rm_scores)
|
||||
reward_baseline_tensor, _ = compute_reward(batch, self.reward_fn)
|
||||
reward_baseline_tensor = reward_baseline_tensor.sum(dim=-1)
|
||||
|
||||
batch.pop(batch_keys=list(gen_baseline_output.batch.keys()))
|
||||
keys_to_pop = set(gen_baseline_output.batch.keys())
|
||||
if rm_scores is not None:
|
||||
keys_to_pop.update(rm_scores.batch.keys())
|
||||
batch.pop(batch_keys=list(keys_to_pop))
|
||||
|
||||
batch.batch["reward_baselines"] = reward_baseline_tensor
|
||||
|
||||
del gen_baseline_batch, gen_baseline_output
|
||||
del rm_scores, gen_baseline_batch, gen_baseline_output
|
||||
|
||||
batch.non_tensor_batch["uid"] = np.array(
|
||||
[str(uuid.uuid4()) for _ in range(len(batch.batch))], dtype=object
|
||||
@ -235,7 +243,7 @@ class RaySPPOTrainer(RayPPOTrainer):
|
||||
|
||||
with simple_timer("reward", timing_raw):
|
||||
# compute reward model score
|
||||
if self.use_rm:
|
||||
if self.use_rm and "rm_scores" not in batch.batch.keys():
|
||||
reward_tensor = self.rm_wg.compute_rm_score(batch)
|
||||
batch = batch.union(reward_tensor)
|
||||
|
||||
|
@ -1065,14 +1065,22 @@ class RayPPOTrainer:
|
||||
else:
|
||||
gen_baseline_output = self.async_rollout_manager.generate_sequences(gen_baseline_batch)
|
||||
batch = batch.union(gen_baseline_output)
|
||||
reward_baseline_tensor = self.reward_fn(batch)
|
||||
# compute reward model score on batch
|
||||
rm_scores = None
|
||||
if self.use_rm and "rm_scores" not in batch.batch.keys():
|
||||
rm_scores = self.rm_wg.compute_rm_score(batch)
|
||||
batch = batch.union(rm_scores)
|
||||
reward_baseline_tensor, _ = compute_reward(batch, self.reward_fn)
|
||||
reward_baseline_tensor = reward_baseline_tensor.sum(dim=-1)
|
||||
|
||||
batch.pop(batch_keys=list(gen_baseline_output.batch.keys()))
|
||||
keys_to_pop = set(gen_baseline_output.batch.keys())
|
||||
if rm_scores is not None:
|
||||
keys_to_pop.update(rm_scores.batch.keys())
|
||||
batch.pop(batch_keys=list(keys_to_pop))
|
||||
|
||||
batch.batch["reward_baselines"] = reward_baseline_tensor
|
||||
|
||||
del gen_baseline_batch, gen_baseline_output
|
||||
del rm_scores, gen_baseline_batch, gen_baseline_output
|
||||
# repeat to align with repeated responses in rollout
|
||||
batch = batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True)
|
||||
batch = batch.union(gen_batch_output)
|
||||
|
Reference in New Issue
Block a user