mirror of
https://github.com/volcengine/verl.git
synced 2025-10-20 13:43:50 +08:00
### What does this PR do? #1858 support DAPO on Ascend NPU, but example `qwen2.5-7b-instruct` training script is not added, which will be added through this PR. The script in this PR is borrowed from https://gitee.com/ascend/ModelZoo-PyTorch/blob/master/PyTorch/built-in/rl/VeRL_for_PyTorch/test/train_qwen2_5_7b_instruct_DAPO_full_16p.sh ### Checklist Before Starting - [x] Search for similar PRs. Paste at least one query link here: ... - [x] Format the PR title as `[{modules}] {type}: {description}` (This will be checked by the CI) - `{modules}` include `fsdp`, `megatron`, `sglang`, `vllm`, `rollout`, `trainer`, `ci`, `training_utils`, `recipe`, `hardware`, `deployment`, `ray`, `worker`, `single_controller`, `misc`, `perf`, `model`, `algo`, `env`, `tool`, `ckpt`, `doc`, `data` - If this PR involves multiple modules, separate them with `,` like `[megatron, fsdp, doc]` - `{type}` is in `feat`, `fix`, `refactor`, `chore`, `test` - If this PR breaks any API (CLI arguments, config, function signature, etc.), add `[BREAKING]` to the beginning of the title. - Example: `[BREAKING][fsdp, megatron] feat: dynamic batching` ### Test Not related. ### API and Usage Example Not related. ### Design & Code Changes Not relaetd. ### Checklist Before Submitting > [!IMPORTANT] > Please check all the following items before requesting a review, otherwise the reviewer might deprioritize this PR for review. - [x] Read the [Contribute Guide](https://github.com/volcengine/verl/blob/main/CONTRIBUTING.md). - [x] Apply [pre-commit checks](https://github.com/volcengine/verl/blob/main/CONTRIBUTING.md#code-linting-and-formatting): `pre-commit install && pre-commit run --all-files --show-diff-on-failure --color=always` - [x] Add / Update [the documentation](https://github.com/volcengine/verl/tree/main/docs). - [x] Add unit or end-to-end test(s) to [the CI workflow](https://github.com/volcengine/verl/tree/main/.github/workflows) to cover all the code. If not feasible, explain why: ... - [x] Once your PR is ready for CI, send a message in [the `ci-request` channel](https://verl-project.slack.com/archives/C091TCESWB1) in [the `verl` Slack workspace](https://join.slack.com/t/verl-project/shared_invite/zt-3855yhg8g-CTkqXu~hKojPCmo7k_yXTQ). (If not accessible, please try [the Feishu group (飞书群)](https://applink.larkoffice.com/client/chat/chatter/add_by_link?link_token=772jd4f1-cd91-441e-a820-498c6614126a).)
142 lines
5.9 KiB
Bash
142 lines
5.9 KiB
Bash
#!/usr/bin/env bash
|
|
set -xeuo pipefail
|
|
|
|
project_name='DAPO-Qwen2.5-32B'
|
|
exp_name='Qwen2.5-32B-npu-32rank-gbs128'
|
|
|
|
adv_estimator=grpo
|
|
|
|
use_kl_in_reward=False
|
|
kl_coef=0.0
|
|
use_kl_loss=False
|
|
kl_loss_coef=0.0
|
|
clip_ratio_low=0.2
|
|
clip_ratio_high=0.28
|
|
max_prompt_length=$((1024 * 2))
|
|
max_response_length=$((1024 * 20))
|
|
enable_overlong_buffer=True
|
|
overlong_buffer_len=$((1024 * 4))
|
|
overlong_penalty_factor=1.0
|
|
loss_agg_mode="token-mean"
|
|
enable_filter_groups=True
|
|
filter_groups_metric=acc
|
|
max_num_gen_batches=10
|
|
|
|
NNODES=2
|
|
|
|
train_prompt_bsz=128
|
|
gen_prompt_bsz=$((train_prompt_bsz * 3))
|
|
n_resp_per_prompt=16
|
|
train_prompt_mini_bsz=32
|
|
|
|
# Ray
|
|
PWD=./
|
|
RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"}
|
|
WORKING_DIR=${WORKING_DIR:-"${PWD}"}
|
|
RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"}
|
|
|
|
# Paths
|
|
RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"}
|
|
MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-32B"}
|
|
CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"}
|
|
TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"}
|
|
TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"}
|
|
|
|
# Algorithm
|
|
temperature=1.0
|
|
top_p=1.0
|
|
top_k=-1 # 0 for HF rollout, -1 for vLLM rollout
|
|
val_top_p=0.7
|
|
|
|
# Performance Related Parameter
|
|
sp_size=8
|
|
use_dynamic_bsz=True
|
|
actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) / sp_size))
|
|
infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) / sp_size))
|
|
offload=True
|
|
gen_tp=4
|
|
enable_chunked_prefill=True
|
|
|
|
ray job submit --no-wait --runtime-env="${RUNTIME_ENV}" \
|
|
--working-dir "${WORKING_DIR}" \
|
|
--address "${RAY_ADDRESS}" \
|
|
-- python3 -m recipe.dapo.main_dapo \
|
|
data.train_files="${TRAIN_FILE}" \
|
|
data.val_files="${TEST_FILE}" \
|
|
data.prompt_key=prompt \
|
|
data.truncation='left' \
|
|
data.max_prompt_length=${max_prompt_length} \
|
|
data.max_response_length=${max_response_length} \
|
|
data.gen_batch_size=${gen_prompt_bsz} \
|
|
data.train_batch_size=${train_prompt_bsz} \
|
|
actor_rollout_ref.rollout.n=${n_resp_per_prompt} \
|
|
algorithm.adv_estimator=${adv_estimator} \
|
|
algorithm.use_kl_in_reward=${use_kl_in_reward} \
|
|
algorithm.kl_ctrl.kl_coef=${kl_coef} \
|
|
actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \
|
|
actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \
|
|
actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \
|
|
actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \
|
|
actor_rollout_ref.actor.clip_ratio_c=10.0 \
|
|
algorithm.filter_groups.enable=${enable_filter_groups} \
|
|
algorithm.filter_groups.max_num_gen_batches=${max_num_gen_batches} \
|
|
algorithm.filter_groups.metric=${filter_groups_metric} \
|
|
actor_rollout_ref.actor.use_torch_compile=False \
|
|
actor_rollout_ref.ref.use_torch_compile=False \
|
|
actor_rollout_ref.model.use_remove_padding=True \
|
|
actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \
|
|
actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \
|
|
actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \
|
|
actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \
|
|
actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \
|
|
actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \
|
|
actor_rollout_ref.rollout.name=vllm \
|
|
actor_rollout_ref.model.path="${MODEL_PATH}" \
|
|
+actor_rollout_ref.model.override_config.attention_dropout=0. \
|
|
+actor_rollout_ref.model.override_config.embd_pdrop=0. \
|
|
+actor_rollout_ref.model.override_config.resid_pdrop=0. \
|
|
actor_rollout_ref.model.enable_gradient_checkpointing=True \
|
|
actor_rollout_ref.actor.optim.lr=1e-6 \
|
|
actor_rollout_ref.actor.optim.lr_warmup_steps=10 \
|
|
actor_rollout_ref.actor.optim.weight_decay=0.1 \
|
|
actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \
|
|
actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \
|
|
actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \
|
|
actor_rollout_ref.actor.entropy_coeff=0 \
|
|
actor_rollout_ref.actor.grad_clip=1.0 \
|
|
actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \
|
|
actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \
|
|
actor_rollout_ref.rollout.gpu_memory_utilization=0.90 \
|
|
actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \
|
|
actor_rollout_ref.rollout.enable_chunked_prefill=${enable_chunked_prefill} \
|
|
actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \
|
|
actor_rollout_ref.rollout.temperature=${temperature} \
|
|
actor_rollout_ref.rollout.top_p=${top_p} \
|
|
actor_rollout_ref.rollout.top_k="${top_k}" \
|
|
actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \
|
|
actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \
|
|
actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \
|
|
actor_rollout_ref.rollout.val_kwargs.do_sample=True \
|
|
actor_rollout_ref.rollout.val_kwargs.n=1 \
|
|
actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \
|
|
actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \
|
|
actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \
|
|
reward_model.reward_manager=dapo \
|
|
reward_model.overlong_buffer.enable=${enable_overlong_buffer} \
|
|
reward_model.overlong_buffer.len=${overlong_buffer_len} \
|
|
reward_model.overlong_buffer.penalty_factor=${overlong_penalty_factor} \
|
|
trainer.logger="['console','wandb']" \
|
|
trainer.project_name="${project_name}" \
|
|
trainer.experiment_name="${exp_name}" \
|
|
trainer.n_gpus_per_node=16 \
|
|
trainer.nnodes="${NNODES}" \
|
|
trainer.val_before_train=True \
|
|
trainer.test_freq=5 \
|
|
trainer.save_freq=20 \
|
|
trainer.total_epochs=1 \
|
|
trainer.default_local_dir="${CKPTS_DIR}" \
|
|
trainer.device=npu \
|
|
trainer.resume_mode=auto \
|
|
actor_rollout_ref.actor.fsdp_config.forward_prefetch=True \
|
|
actor_rollout_ref.ref.fsdp_config.forward_prefetch=True \
|