mirror of
https://github.com/volcengine/verl.git
synced 2025-10-20 13:43:50 +08:00
### What does this PR do? - Add open math reasoning recipe using sft trainer with model engine - Support setting none to val dataset in sft trainer - Fix main_eval - Using aiohttp for main_generation_server to avoid hang in AsyncOpenAI ### Checklist Before Starting - [ ] Search for similar PRs. Paste at least one query link here: ... - [ ] Format the PR title as `[{modules}] {type}: {description}` (This will be checked by the CI) - `{modules}` include `fsdp`, `megatron`, `sglang`, `vllm`, `rollout`, `trainer`, `ci`, `training_utils`, `recipe`, `hardware`, `deployment`, `ray`, `worker`, `single_controller`, `misc`, `perf`, `model`, `algo`, `env`, `tool`, `ckpt`, `doc`, `data` - If this PR involves multiple modules, separate them with `,` like `[megatron, fsdp, doc]` - `{type}` is in `feat`, `fix`, `refactor`, `chore`, `test` - If this PR breaks any API (CLI arguments, config, function signature, etc.), add `[BREAKING]` to the beginning of the title. - Example: `[BREAKING][fsdp, megatron] feat: dynamic batching` ### Test > For changes that can not be tested by CI (e.g., algorithm implementation, new model support), validate by experiment(s) and show results like training curve plots, evaluation results, etc. ### API and Usage Example > Demonstrate how the API changes if any, and provide usage example(s) if possible. ```python # Add code snippet or script demonstrating how to use this ``` ### Design & Code Changes > Demonstrate the high-level design if this PR is complex, and list the specific changes. ### Checklist Before Submitting > [!IMPORTANT] > Please check all the following items before requesting a review, otherwise the reviewer might deprioritize this PR for review. - [ ] Read the [Contribute Guide](https://github.com/volcengine/verl/blob/main/CONTRIBUTING.md). - [ ] Apply [pre-commit checks](https://github.com/volcengine/verl/blob/main/CONTRIBUTING.md#code-linting-and-formatting): `pre-commit install && pre-commit run --all-files --show-diff-on-failure --color=always` - [ ] Add / Update [the documentation](https://github.com/volcengine/verl/tree/main/docs). - [ ] Add unit or end-to-end test(s) to [the CI workflow](https://github.com/volcengine/verl/tree/main/.github/workflows) to cover all the code. If not feasible, explain why: ... - [ ] Once your PR is ready for CI, send a message in [the `ci-request` channel](https://verl-project.slack.com/archives/C091TCESWB1) in [the `verl` Slack workspace](https://join.slack.com/t/verl-project/shared_invite/zt-3855yhg8g-CTkqXu~hKojPCmo7k_yXTQ). (If not accessible, please try [the Feishu group (飞书群)](https://applink.larkoffice.com/client/chat/chatter/add_by_link?link_token=772jd4f1-cd91-441e-a820-498c6614126a).) --------- Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
94 lines
2.8 KiB
Bash
94 lines
2.8 KiB
Bash
#!/usr/bin/env bash
|
|
set -xeuo pipefail
|
|
|
|
ENTRYPOINT=${ENTRYPOINT:-"-m verl.trainer.sft_trainer"}
|
|
|
|
TRAIN_FILES=${TRAIN_FILES:-/path/to/cot_dataset.parquet}
|
|
|
|
backend=${BACKEND:-fsdp}
|
|
|
|
project_name=verl_sft_test
|
|
|
|
RESUME_MODE=auto
|
|
MODEL_ID=${MODEL_ID:-Qwen/Qwen3-8B-Base}
|
|
|
|
SP_SIZE=${SP_SIZE:-8}
|
|
FSDP_SIZE=${FSDP_SIZE:-16}
|
|
FSDP_STRATEGY=${FSDP_STRATEGY:-"fsdp2"}
|
|
|
|
TP_SIZE=${TP_SIZE:-1}
|
|
PP_SIZE=${PP_SIZE:-1}
|
|
VPP_SIZE=${VPP_SIZE:-null}
|
|
CP_SIZE=${CP_SIZE:-1}
|
|
|
|
PAD_MODE=${PAD_MODE:-no_padding}
|
|
|
|
USE_REMOVE_PADDING=${USE_REMOVE_PADDING:-True}
|
|
|
|
FSDP_ENGINE_CONFIG="\
|
|
engine=${backend} \
|
|
optim=${backend} \
|
|
optim.lr=2e-5 \
|
|
optim.lr_warmup_steps_ratio=0.01 \
|
|
optim.weight_decay=0.1 \
|
|
optim.betas="[0.9,0.95]" \
|
|
optim.clip_grad=1.0 \
|
|
optim.min_lr_ratio=0.1 \
|
|
optim.warmup_style=cosine \
|
|
engine.ulysses_sequence_parallel_size=${SP_SIZE} \
|
|
engine.strategy=${FSDP_STRATEGY} \
|
|
engine.fsdp_size=${FSDP_SIZE}"
|
|
|
|
|
|
MEGATRON_ENGINE_CONFIG="\
|
|
engine=${backend} \
|
|
optim=${backend} \
|
|
optim.lr=1e-5 \
|
|
optim.lr_warmup_steps_ratio=0.2 \
|
|
optim.weight_decay=0.1 \
|
|
optim.betas="[0.9,0.95]" \
|
|
optim.clip_grad=1.0 \
|
|
optim.lr_warmup_init=0 \
|
|
optim.lr_decay_style=cosine \
|
|
optim.min_lr=1e-6 \
|
|
engine.tensor_model_parallel_size=${TP_SIZE} \
|
|
engine.pipeline_model_parallel_size=${PP_SIZE} \
|
|
engine.virtual_pipeline_model_parallel_size=${VPP_SIZE} \
|
|
engine.context_parallel_size=${CP_SIZE}"
|
|
|
|
if [ "$backend" = "fsdp" ]; then
|
|
ENGINE_CONFIG="$FSDP_ENGINE_CONFIG"
|
|
echo "Using fsdp engine"
|
|
exp_name=nvidia-openmathreasoning-qwen3-8b-${backend}-${FSDP_STRATEGY}-sp${SP_SIZE}-fsdp-1008a1
|
|
else
|
|
ENGINE_CONFIG="$MEGATRON_ENGINE_CONFIG"
|
|
echo "Using megatron engine"
|
|
exp_name=nvidia-openmathreasoning-${backend}-tp${TP_SIZE}-pp${PP_SIZE}-vpp${VPP_SIZE}-cp${CP_SIZE}-pad-${PAD_MODE}-use_remove_padding-${USE_REMOVE_PADDING}
|
|
fi
|
|
|
|
CKPT_HOME=${CKPT_HOME:-$HOME/open_verl/sft/${project_name}/${exp_name}}
|
|
mkdir -p "${CKPT_HOME}"
|
|
|
|
torchrun --standalone --nnodes=1 --nproc-per-node=${NUM_TRAINERS:-8} \
|
|
${ENTRYPOINT} \
|
|
data.train_files="${TRAIN_FILES}" \
|
|
data.train_batch_size=96 \
|
|
data.max_length=32768 \
|
|
data.pad_mode=${PAD_MODE} \
|
|
data.truncation=error \
|
|
data.use_dynamic_bsz=True \
|
|
data.max_token_len_per_gpu=65536 \
|
|
data.messages_key=messages \
|
|
model.path=$MODEL_ID \
|
|
model.use_remove_padding=${USE_REMOVE_PADDING} \
|
|
${ENGINE_CONFIG} \
|
|
trainer.test_freq=-1 \
|
|
trainer.save_freq=4000 \
|
|
trainer.logger=['console','wandb'] \
|
|
trainer.project_name="${project_name}" \
|
|
trainer.experiment_name="${exp_name}" \
|
|
trainer.total_epochs=1 \
|
|
trainer.default_local_dir="${CKPT_HOME}" \
|
|
trainer.resume_mode=${RESUME_MODE} \
|
|
trainer.max_ckpt_to_keep=5 \
|
|
checkpoint.save_contents=[model,optimizer,extra] |