mirror of
https://github.com/volcengine/verl.git
synced 2025-10-20 13:43:50 +08:00
### What does this PR do? Since we directly offer `override_transformer_config` option, we directly use it to recompute activations. Default settings are the same with `megatron.training`. ### Checklist Before Starting - [x] Search for similar PRs. Paste at least one query link here: ... - [x] Format the PR title as `[{modules}] {type}: {description}` (This will be checked by the CI) - `{modules}` include `fsdp`, `megatron`, `sglang`, `vllm`, `rollout`, `trainer`, `ci`, `training_utils`, `recipe`, `hardware`, `deployment`, `ray`, `worker`, `single_controller`, `misc`, `perf`, `model`, `algo`, `env`, `tool`, `ckpt`, `doc`, `data` - If this PR involves multiple modules, separate them with `,` like `[megatron, fsdp, doc]` - `{type}` is in `feat`, `fix`, `refactor`, `chore`, `test` - If this PR breaks any API (CLI arguments, config, function signature, etc.), add `[BREAKING]` to the beginning of the title. - Example: `[BREAKING][fsdp, megatron] feat: dynamic batching` ### Test > For changes that can not be tested by CI (e.g., algorithm implementation, new model support), validate by experiment(s) and show results like training curve plots, evaluation results, etc. ### API and Usage Example > Demonstrate how the API changes if any, and provide usage example(s) if possible. ```python # Add code snippet or script demonstrating how to use this ``` ### Design & Code Changes > Demonstrate the high-level design if this PR is complex, and list the specific changes. ### Checklist Before Submitting > [!IMPORTANT] > Please check all the following items before requesting a review, otherwise the reviewer might deprioritize this PR for review. - [ ] Read the [Contribute Guide](https://github.com/volcengine/verl/blob/main/CONTRIBUTING.md). - [ ] Apply [pre-commit checks](https://github.com/volcengine/verl/blob/main/CONTRIBUTING.md#code-linting-and-formatting): `pre-commit install && pre-commit run --all-files --show-diff-on-failure --color=always` - [ ] Add / Update [the documentation](https://github.com/volcengine/verl/tree/main/docs). - [ ] Add unit or end-to-end test(s) to [the CI workflow](https://github.com/volcengine/verl/tree/main/.github/workflows) to cover all the code. If not feasible, explain why: ... - [ ] Once your PR is ready for CI, send a message in [the `ci-request` channel](https://verl-project.slack.com/archives/C091TCESWB1) in [the `verl` Slack workspace](https://join.slack.com/t/verl-project/shared_invite/zt-3855yhg8g-CTkqXu~hKojPCmo7k_yXTQ).
106 lines
4.7 KiB
Bash
106 lines
4.7 KiB
Bash
set -x
|
|
|
|
export CUDA_DEVICE_MAX_CONNECTIONS=1 # For megatron communication/computation overlapping
|
|
|
|
|
|
# 0. download the model
|
|
huggingface-cli download moonshotai/Moonlight-16B-A3B-Instruct
|
|
|
|
# 1. convert the model to mcore format
|
|
# change the HF_MODEL_PATH and DIST_CKPT_PATH to your own path
|
|
HF_MODEL_PATH=/data/models/moonshotai/Moonlight-16B-A3B-Instruct
|
|
DIST_CKPT_PATH=/data/mcore_ckpt/Moonlight-16B-A3B-Instruct
|
|
python scripts/converter_hf_to_mcore.py --hf_model_path $HF_MODEL_PATH --output_path $DIST_CKPT_PATH
|
|
|
|
|
|
# 2. run the script
|
|
gsm8k_train_path=$HOME/data/gsm8k/train.parquet
|
|
gsm8k_test_path=$HOME/data/gsm8k/test.parquet
|
|
train_files=$gsm8k_train_path
|
|
test_files=$gsm8k_test_path
|
|
|
|
ALL_OFFLOAD=${ALL_OFFLOAD:-False}
|
|
COMMON_PARAM_OFFLOAD=${COMMON_PARAM_OFFLOAD:-$ALL_OFFLOAD}
|
|
COMMON_GRAD_OFFLOAD=${COMMON_GRAD_OFFLOAD:-$ALL_OFFLOAD}
|
|
COMMON_OPTIMIZER_OFFLOAD=${COMMON_OPTIMIZER_OFFLOAD:-$ALL_OFFLOAD}
|
|
|
|
ACTOR_PARAM_OFFLOAD=${ACTOR_PARAM_OFFLOAD:-$COMMON_PARAM_OFFLOAD}
|
|
ACTOR_GRAD_OFFLOAD=${ACTOR_GRAD_OFFLOAD:-$COMMON_GRAD_OFFLOAD}
|
|
ACTOR_OPTIMIZER_OFFLOAD=${ACTOR_OPTIMIZER_OFFLOAD:-$COMMON_OPTIMIZER_OFFLOAD}
|
|
REF_PARAM_OFFLOAD=${REF_PARAM_OFFLOAD:-$COMMON_PARAM_OFFLOAD}
|
|
CRITIC_PARAM_OFFLOAD=${CRITIC_PARAM_OFFLOAD:-$COMMON_PARAM_OFFLOAD}
|
|
CRITIC_GRAD_OFFLOAD=${CRITIC_GRAD_OFFLOAD:-$COMMON_GRAD_OFFLOAD}
|
|
CRITIC_OPTIMIZER_OFFLOAD=${CRITIC_OPTIMIZER_OFFLOAD:-$COMMON_OPTIMIZER_OFFLOAD}
|
|
RM_PARAM_OFFLOAD=${RM_PARAM_OFFLOAD:-$COMMON_PARAM_OFFLOAD}
|
|
|
|
|
|
NODES=4
|
|
PP=2
|
|
TP=8
|
|
EP=8
|
|
ETP=1
|
|
VLLM_TP=4
|
|
|
|
# RAY_ADDRESS='auto' ray job submit --working-dir . --
|
|
python3 -m verl.trainer.main_ppo --config-path=./config --config-name='ppo_megatron_trainer'\
|
|
algorithm.adv_estimator=gae \
|
|
data.train_files="$train_files" \
|
|
data.val_files="$test_files" \
|
|
data.train_batch_size=1024 \
|
|
data.max_prompt_length=1024 \
|
|
data.max_response_length=512 \
|
|
data.filter_overlong_prompts=True \
|
|
data.truncation='error' \
|
|
data.trust_remote_code=True \
|
|
actor_rollout_ref.model.path=$LLM \
|
|
actor_rollout_ref.actor.optim.lr=1e-6 \
|
|
actor_rollout_ref.actor.ppo_mini_batch_size=256 \
|
|
actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \
|
|
actor_rollout_ref.actor.use_kl_loss=False \
|
|
actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=2 \
|
|
actor_rollout_ref.rollout.name=vllm \
|
|
actor_rollout_ref.rollout.gpu_memory_utilization=0.7 \
|
|
critic.optim.lr=1e-5 \
|
|
critic.model.path=$LLM \
|
|
critic.ppo_micro_batch_size_per_gpu=4 \
|
|
algorithm.use_kl_in_reward=False \
|
|
trainer.critic_warmup=0 \
|
|
trainer.logger='["console","wandb"]' \
|
|
trainer.project_name='verl_megatron_gsm8k_examples' \
|
|
trainer.experiment_name='moonlight_16b_a3b_instruct_1node' \
|
|
trainer.n_gpus_per_node=8 \
|
|
trainer.nnodes=$NODES \
|
|
trainer.save_freq=-1 \
|
|
trainer.test_freq=5 \
|
|
actor_rollout_ref.model.trust_remote_code=True \
|
|
critic.model.trust_remote_code=True \
|
|
+actor_rollout_ref.actor.megatron.override_transformer_config.num_layers_in_last_pipeline_stage=13 \
|
|
actor_rollout_ref.rollout.tensor_model_parallel_size=$VLLM_TP \
|
|
actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=$PP \
|
|
actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=$PP \
|
|
critic.megatron.pipeline_model_parallel_size=$PP \
|
|
actor_rollout_ref.actor.megatron.tensor_model_parallel_size=$TP \
|
|
actor_rollout_ref.ref.megatron.tensor_model_parallel_size=$TP \
|
|
critic.megatron.tensor_model_parallel_size=$TP \
|
|
actor_rollout_ref.actor.megatron.expert_model_parallel_size=$EP \
|
|
actor_rollout_ref.ref.megatron.expert_model_parallel_size=$EP \
|
|
critic.megatron.expert_model_parallel_size=$EP \
|
|
actor_rollout_ref.actor.megatron.expert_tensor_parallel_size=$ETP \
|
|
actor_rollout_ref.ref.megatron.expert_tensor_parallel_size=$ETP \
|
|
critic.megatron.expert_tensor_parallel_size=$ETP \
|
|
actor_rollout_ref.actor.megatron.param_offload=${ACTOR_PARAM_OFFLOAD} \
|
|
actor_rollout_ref.actor.megatron.optimizer_offload=${ACTOR_OPTIMIZER_OFFLOAD} \
|
|
actor_rollout_ref.actor.megatron.grad_offload=${ACTOR_GRAD_OFFLOAD} \
|
|
actor_rollout_ref.ref.megatron.param_offload=${REF_PARAM_OFFLOAD} \
|
|
critic.megatron.param_offload=${CRITIC_PARAM_OFFLOAD} \
|
|
critic.megatron.optimizer_offload=${CRITIC_OPTIMIZER_OFFLOAD} \
|
|
critic.megatron.grad_offload=${CRITIC_GRAD_OFFLOAD} \
|
|
actor_rollout_ref.actor.megatron.use_dist_checkpointing=True \
|
|
actor_rollout_ref.ref.megatron.use_dist_checkpointing=True \
|
|
critic.megatron.use_dist_checkpointing=True \
|
|
actor_rollout_ref.actor.megatron.dist_checkpointing_path=$DIST_CKPT_PATH \
|
|
actor_rollout_ref.ref.megatron.dist_checkpointing_path=$DIST_CKPT_PATH \
|
|
critic.megatron.dist_checkpointing_path=$DIST_CKPT_PATH \
|
|
trainer.val_before_train=False \
|
|
trainer.total_epochs=100 $@
|
|
|