[trainer] fix: address serialization issues when using async reward function and ray ppo trainer (#3769)

This commit is contained in:
ben
2025-10-17 17:22:59 -07:00
committed by GitHub
parent e0e352b566
commit f0539a5121

View File

@ -1105,7 +1105,9 @@ class RayPPOTrainer:
batch = batch.union(reward_tensor)
if self.config.reward_model.launch_reward_fn_async:
future_reward = compute_reward_async.remote(data=batch, reward_fn=self.reward_fn)
future_reward = compute_reward_async.remote(
data=batch, config=self.config, tokenizer=self.tokenizer
)
else:
reward_tensor, reward_extra_infos_dict = compute_reward(batch, self.reward_fn)