mirror of
https://github.com/volcengine/verl.git
synced 2025-10-20 13:43:50 +08:00
### What does this PR do? This PR makes update to the base config in verl: - support +extra.any_key usage for the base config in verl. - allow selective subfields to be frozen - add a auto-generated config yaml file `verl/trainer/config/_generated_ppo_trainer.yaml` for reference purpose, in case the nested inheritance structure makes the config information too scattered ### Checklist Before Starting - [x] Search for similar PRs. Paste at least one query link here: ... - [x] Format the PR title as `[{modules}] {type}: {description}` (This will be checked by the CI) - `{modules}` include `fsdp`, `megatron`, `sglang`, `vllm`, `rollout`, `trainer`, `ci`, `training_utils`, `recipe`, `hardware`, `deployment`, `ray`, `worker`, `single_controller`, `misc`, `perf`, `model`, `algo`, `env`, `tool`, `ckpt`, `doc`, `data` - If this PR involves multiple modules, separate them with `,` like `[megatron, fsdp, doc]` - `{type}` is in `feat`, `fix`, `refactor`, `chore`, `test` - If this PR breaks any API (CLI arguments, config, function signature, etc.), add `[BREAKING]` to the beginning of the title. - Example: `[BREAKING][fsdp, megatron] feat: dynamic batching` ### Test - added frozen field tests ### API and Usage Example > Demonstrate how the API changes if any, and provide usage example(s) if possible. Now you can pass `--xx.profiler.extra.any_new_key=any_plain_value` in command line to a dataclass inheriting `verl.BaseConfig`. This way we can still pass dataclass configs inside verl but allow some flexiblity in accepting new keys from users' adhoc usage. ### Checklist Before Submitting > [!IMPORTANT] > Please check all the following items before requesting a review, otherwise the reviewer might deprioritize this PR for review. - [x] Read the [Contribute Guide](https://github.com/volcengine/verl/blob/main/CONTRIBUTING.md). - [x] Apply [pre-commit checks](https://github.com/volcengine/verl/blob/main/CONTRIBUTING.md#code-linting-and-formatting): `pre-commit install && pre-commit run --all-files --show-diff-on-failure --color=always` - [ ] Add / Update [the documentation](https://github.com/volcengine/verl/tree/main/docs). - [ ] Add unit or end-to-end test(s) to [the CI workflow](https://github.com/volcengine/verl/tree/main/.github/workflows) to cover all the code. If not feasible, explain why: ... - [ ] Once your PR is ready for CI, send a message in [the `ci-request` channel](https://verl-project.slack.com/archives/C091TCESWB1) in [the `verl` Slack workspace](https://join.slack.com/t/verl-project/shared_invite/zt-3855yhg8g-CTkqXu~hKojPCmo7k_yXTQ). --------- Co-authored-by: Lin <haibin@Lins-Laptop.hsd1.wa.comcast.net> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
428 lines
20 KiB
YAML
428 lines
20 KiB
YAML
name: e2e_ppo_trainer
|
|
|
|
on:
|
|
# Trigger the workflow on push or pull request,
|
|
# but only for the main branch
|
|
# For push, for now only anti-patterns are specified so it is more conservative
|
|
# and achieves higher coverage.
|
|
push:
|
|
branches:
|
|
- main
|
|
- v0.*
|
|
paths:
|
|
- "**/*.py"
|
|
# Other entrypoints
|
|
- "!verl/trainer/fsdp_sft_trainer.py"
|
|
# Recipes
|
|
- "!recipe/**"
|
|
# Megatron
|
|
- "!verl/workers/**/megatron_*.py"
|
|
|
|
pull_request:
|
|
branches:
|
|
- main
|
|
- v0.*
|
|
paths:
|
|
- "**/*.py"
|
|
# Other entrypoints
|
|
- "!**/*.md"
|
|
- "!docker/**"
|
|
- "!examples/**"
|
|
- "!tests/**"
|
|
- "!verl/trainer/main_*.py"
|
|
- "!verl/trainer/fsdp_sft_trainer.py"
|
|
# Docs
|
|
- "!docs/**"
|
|
# Recipes
|
|
- "!recipe/**"
|
|
# Megatron
|
|
- "!verl/workers/**/megatron_*.py"
|
|
# Entrypoints
|
|
- ".github/workflows/e2e_ppo_trainer.yml"
|
|
- "examples/data_preprocess/gsm8k.py"
|
|
- "examples/data_preprocess/geo3k.py"
|
|
- "tests/special_e2e/ppo_trainer"
|
|
- "verl/trainer/main_ppo.py"
|
|
- "verl/trainer/config/ppo_trainer.yaml"
|
|
|
|
# Cancel jobs on the same ref if a new one is triggered
|
|
concurrency:
|
|
group: ${{ github.workflow }}-${{ github.ref }}
|
|
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
|
|
|
|
# Declare permissions just read content.
|
|
permissions:
|
|
contents: read
|
|
|
|
jobs:
|
|
pre_commit_for_ppo:
|
|
runs-on: ubuntu-latest
|
|
strategy:
|
|
matrix:
|
|
python-version: ["3.12"]
|
|
steps:
|
|
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
|
- name: Set up Python ${{ matrix.python-version }}
|
|
uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0
|
|
with:
|
|
python-version: ${{ matrix.python-version }}
|
|
- name: Install the current repository
|
|
run: |
|
|
pip install -e .
|
|
- name: Set ruff --output-format=github
|
|
run: |
|
|
sed -i 's/--output-format=full/--output-format=github/' .pre-commit-config.yaml
|
|
git add .pre-commit-config.yaml
|
|
- uses: pre-commit/action@v3.0.1
|
|
with:
|
|
extra_args: "" # Overriding default "--all-files"
|
|
|
|
e2e_ppo_trainer_vllm:
|
|
runs-on: [L20x8]
|
|
timeout-minutes: 60 # Increase this timeout value as needed
|
|
env:
|
|
HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
|
|
HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
|
|
NO_PROXY: "localhost,127.0.0.1,hf-mirror.com"
|
|
HF_ENDPOINT: "https://hf-mirror.com"
|
|
HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable
|
|
container:
|
|
image: verlai/verl:app-verl0.4-vllm0.8.5-mcore0.12.1
|
|
options: --gpus all --shm-size=10g
|
|
steps:
|
|
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
|
with:
|
|
fetch-depth: 0
|
|
- name: Install the current repository
|
|
run: |
|
|
pip3 install --no-deps -e .[test,vllm]
|
|
- name: Prepare GSM8K dataset
|
|
run: |
|
|
ray stop --force
|
|
python3 examples/data_preprocess/gsm8k.py
|
|
# HF sanity
|
|
- name: Running GSM8K E2E training tests on 1 L20 GPU with hf for santiy
|
|
run: |
|
|
ray stop --force
|
|
bash tests/special_e2e/ppo_trainer/run_single_gpu.sh
|
|
# Function RM
|
|
- name: Running GSM8K E2E training tests on 8 L20 GPUs with rmpad using function rm with validation and saving (FSDP_SIZE=8)
|
|
run: |
|
|
ray stop --force
|
|
VAL_BEFORE_TRAIN=True TEST_FREQ=1 SAVE_FREQ=1 SAVE_HF_MODEL=True VERL_EXP_NAME="qwen2.5-0.5b-function-reward-minimal-fsdp-size8" bash tests/special_e2e/ppo_trainer/run_function_reward.sh
|
|
- name: Running GSM8K E2E training tests on 8 L20 GPUs with rmpad using function rm after resuming
|
|
run: |
|
|
ray stop --force
|
|
RESUME_MODE=auto VERL_EXP_NAME="qwen2.5-0.5b-function-reward-minimal-fsdp-size8" bash tests/special_e2e/ppo_trainer/run_function_reward.sh
|
|
- name: Test merging FSDP checkpoints (Qwen Actor)
|
|
run: |
|
|
exp_name="qwen2.5-0.5b-function-reward-minimal-fsdp-size8"
|
|
python -m verl.model_merger test --backend fsdp --local_dir checkpoints/verl-test/${exp_name}/global_step_1/actor --test_hf_dir checkpoints/verl-test/${exp_name}/global_step_1/actor/huggingface
|
|
- name: Running GSM8K E2E training tests on 8 L20 GPUs with rmpad using function rm with validation and saving (DDP_SIZE=2, FSDP_SIZE=4)
|
|
run: |
|
|
ray stop --force
|
|
VAL_BEFORE_TRAIN=True TEST_FREQ=1 SAVE_FREQ=1 SAVE_HF_MODEL=True FSDP_SIZE=4 VERL_EXP_NAME="qwen2.5-0.5b-function-reward-minimal-ddp-size2-fsdp-size4" bash tests/special_e2e/ppo_trainer/run_function_reward.sh
|
|
- name: Test merging DDP+FSDP checkpoints (Qwen Actor)
|
|
run: |
|
|
exp_name="qwen2.5-0.5b-function-reward-minimal-ddp-size2-fsdp-size4"
|
|
python -m verl.model_merger test --backend fsdp --local_dir checkpoints/verl-test/${exp_name}/global_step_1/actor --test_hf_dir checkpoints/verl-test/${exp_name}/global_step_1/actor/huggingface
|
|
- name: Running GSM8K E2E training tests on 8 L20 GPUs with rmpad using function rm with validation and saving (FSDP2)
|
|
run: |
|
|
ray stop --force
|
|
VAL_BEFORE_TRAIN=True TEST_FREQ=1 SAVE_FREQ=1 SAVE_HF_MODEL=True VERL_EXP_NAME="qwen2.5-0.5b-function-reward-minimal-fsdp2-size8" STRATEGY=fsdp2 bash tests/special_e2e/ppo_trainer/run_function_reward.sh
|
|
- name: Test merging FSDP2 checkpoints (Qwen Actor)
|
|
run: |
|
|
exp_name="qwen2.5-0.5b-function-reward-minimal-fsdp2-size8"
|
|
python -m verl.model_merger test --backend fsdp --local_dir checkpoints/verl-test/${exp_name}/global_step_1/actor --test_hf_dir checkpoints/verl-test/${exp_name}/global_step_1/actor/huggingface
|
|
- name: Running GSM8K E2E without rmpad using function rm
|
|
run: |
|
|
ray stop --force
|
|
RM_PAD=False bash tests/special_e2e/ppo_trainer/run_function_reward.sh
|
|
- name: Running GSM8K E2E training tests on 8 L20 GPUs with rmpad using function rm (GRPO)
|
|
run: |
|
|
ray stop --force
|
|
ADV_ESTIMATOR=grpo USE_KL=True bash tests/special_e2e/ppo_trainer/run_function_reward.sh
|
|
- name: Running GSM8K E2E training tests on 8 L20 GPUs with rmpad using function rm (ReMax)
|
|
run: |
|
|
ray stop --force
|
|
ADV_ESTIMATOR=remax USE_KL=True bash tests/special_e2e/ppo_trainer/run_function_reward.sh
|
|
- name: Running GSM8K E2E training tests on 8 L20 GPUs with rmpad using customized reward function
|
|
run: |
|
|
ray stop --force
|
|
CUSTOM_REWARD_FN=True bash tests/special_e2e/ppo_trainer/run_function_reward.sh
|
|
- name: Running GSM8K E2E training tests on 8 L20 GPUs with rmpad using function rm with in-reward kl and kl loss
|
|
run: |
|
|
ray stop --force
|
|
USE_KL=True bash tests/special_e2e/ppo_trainer/run_function_reward.sh
|
|
# LoRA tests
|
|
- name: Running GSM8K E2E training tests on 8 L20 GPUs with grpo lora using function rm with use_shm
|
|
run: |
|
|
ray stop --force
|
|
ADV_ESTIMATOR=grpo USE_SHM=True LORA_RANK=32 LOAD_FORMAT=safetensors bash tests/special_e2e/ppo_trainer/run_function_reward.sh
|
|
- name: Running GSM8K E2E training tests on 8 L20 GPUs with grpo lora using function rm with use_shm and layered_summon
|
|
run: |
|
|
ray stop --force
|
|
ADV_ESTIMATOR=grpo USE_SHM=True LORA_RANK=32 LOAD_FORMAT=safetensors LAYERED_SUMMON=True TOTAL_TRAIN_STEPS=1 SAVE_FREQ=1 FSDP_SIZE=4 VERL_EXP_NAME="qwen2.5-0.5b-function-reward-minimal" bash tests/special_e2e/ppo_trainer/run_function_reward.sh
|
|
- name: Test GRPO LoRA checkpoints merging function
|
|
run: |
|
|
export EXP_NAME="qwen2.5-0.5b-function-reward-minimal"
|
|
ls checkpoints/verl-test/${EXP_NAME}/global_step_1/actor
|
|
cat checkpoints/verl-test/${EXP_NAME}/global_step_1/actor/huggingface/config.json
|
|
python3 -m verl.model_merger merge --backend fsdp --local_dir checkpoints/verl-test/${EXP_NAME}/global_step_1/actor/ --target_dir checkpoints/verl-test/${EXP_NAME}/global_step_1/actor/huggingface
|
|
- name: Running GSM8K E2E training tests on 8 L20 GPUs with grpo lora using function rm with use_shm and layered_summon with fsdp2
|
|
run: |
|
|
ray stop --force
|
|
ADV_ESTIMATOR=grpo USE_SHM=True LORA_RANK=32 LOAD_FORMAT=safetensors LAYERED_SUMMON=True STRATEGY=fsdp2 bash tests/special_e2e/ppo_trainer/run_function_reward.sh
|
|
# Model RM
|
|
- name: Running GRPO GSM8K E2E training tests with FSDP on 8 L20 GPUs (DeepSeek)
|
|
run: |
|
|
ray stop --force
|
|
MODEL_ID=deepseek-ai/deepseek-coder-1.3b-instruct bash tests/special_e2e/ppo_trainer/run_function_reward.sh
|
|
- name: Running GSM8K E2E with rmpad using model rm
|
|
run: |
|
|
ray stop --force
|
|
bash tests/special_e2e/ppo_trainer/run_model_reward.sh
|
|
- name: Running GSM8K E2E without rmpad using model rm
|
|
run: |
|
|
ray stop --force
|
|
RM_PAD=False bash tests/special_e2e/ppo_trainer/run_model_reward.sh
|
|
- name: Running GSM8K E2E with rmpad using model rm and ulysses sp=2
|
|
run: |
|
|
ray stop --force
|
|
SP_SIZE=2 bash tests/special_e2e/ppo_trainer/run_model_reward.sh
|
|
- name: Running GSM8K E2E with rmpad using model rm and dynamic batch size
|
|
run: |
|
|
ray stop --force
|
|
SEQ_BALANCE=True bash tests/special_e2e/ppo_trainer/run_model_reward.sh
|
|
- name: Running GSM8K E2E with rmpad using model rm with Liger Kernel enabled
|
|
run: |
|
|
ray stop --force
|
|
LIGER=True bash tests/special_e2e/ppo_trainer/run_model_reward.sh
|
|
- name: Running GSM8K E2E with rmpad using model rm with Fused Kernel enabled
|
|
run: |
|
|
ray stop --force
|
|
FUSED_KERNELS=True bash tests/special_e2e/ppo_trainer/run_model_reward.sh
|
|
- name: Running GSM8K E2E with rmpad using model rm with Fused Kernel enabled
|
|
run: |
|
|
ray stop --force
|
|
FUSED_KERNEL=True FUSED_KERNEL_BACKEND=triton bash tests/special_e2e/ppo_trainer/run_model_reward.sh
|
|
|
|
e2e_ppo_trainer_vllm_vlm:
|
|
runs-on: [L20x8]
|
|
needs: pre_commit_for_ppo
|
|
timeout-minutes: 40 # Increase this timeout value as needed
|
|
env:
|
|
HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
|
|
HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
|
|
NO_PROXY: "localhost,127.0.0.1,hf-mirror.com"
|
|
HF_ENDPOINT: "https://hf-mirror.com"
|
|
HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable
|
|
container:
|
|
image: verlai/verl:app-verl0.4-vllm0.8.5-mcore0.12.1
|
|
options: --gpus all --shm-size=50g # Visual dataloader requires large memory
|
|
steps:
|
|
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
|
with:
|
|
fetch-depth: 0
|
|
- name: Install the current repository
|
|
run: |
|
|
pip3 install -e .[test,gpu,vllm,geo,trl]
|
|
pip install "transformers[hf_xet]<4.53.0" # Fix for transformers 4.53.0
|
|
# Geo3k
|
|
- name: Prepare GEO3K dataset
|
|
run: |
|
|
ray stop --force
|
|
python3 examples/data_preprocess/geo3k.py
|
|
- name: Running GEO3K VLM GRPO E2E training tests on 8 L20 GPUs with rmpad using function rm
|
|
run: |
|
|
ray stop --force
|
|
TRAIN_FILES=$HOME/data/geo3k/train.parquet VAL_FILES=$HOME/data/geo3k/test.parquet \
|
|
MAX_PROMPT_LEN=1536 MAX_RESPONSE_LEN=1536 \
|
|
MODEL_ID=Qwen/Qwen2-VL-2B-Instruct \
|
|
ADV_ESTIMATOR=grpo RM_PAD=True USE_KL=True ENABLE_CHUNKED_PREFILL=False \
|
|
SP_SIZE=2 \
|
|
bash tests/special_e2e/ppo_trainer/run_function_reward.sh
|
|
|
|
- name: Running GEO3K VLM PPO E2E training tests on 8 L20 GPUs with rmpad using function rm
|
|
run: |
|
|
ray stop --force
|
|
TRAIN_FILES=$HOME/data/geo3k/train.parquet VAL_FILES=$HOME/data/geo3k/test.parquet \
|
|
MAX_PROMPT_LEN=1536 MAX_RESPONSE_LEN=1536 \
|
|
MODEL_ID=Qwen/Qwen2-VL-2B-Instruct \
|
|
ADV_ESTIMATOR=gae RM_PAD=True USE_KL=True ENABLE_CHUNKED_PREFILL=False \
|
|
SP_SIZE=2 \
|
|
bash tests/special_e2e/ppo_trainer/run_function_reward.sh
|
|
- name: Running GEO3K VLM GRPO E2E lora training tests on 8 L20 GPUs with rmpad using function rm
|
|
run: |
|
|
ray stop --force
|
|
TRAIN_FILES=$HOME/data/geo3k/train.parquet VAL_FILES=$HOME/data/geo3k/test.parquet \
|
|
MAX_PROMPT_LEN=1536 MAX_RESPONSE_LEN=1536 \
|
|
MODEL_ID=Qwen/Qwen2-VL-2B-Instruct \
|
|
ADV_ESTIMATOR=grpo RM_PAD=True USE_KL=True ENABLE_CHUNKED_PREFILL=False \
|
|
SP_SIZE=2 \
|
|
LORA_RANK=32 LORA_EXCLUDE=".*visual.*" \
|
|
bash tests/special_e2e/ppo_trainer/run_function_reward.sh
|
|
|
|
|
|
e2e_ppo_trainer_sglang:
|
|
runs-on: [L20x8]
|
|
needs: pre_commit_for_ppo
|
|
timeout-minutes: 40 # Increase this timeout value as needed
|
|
env:
|
|
HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
|
|
HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
|
|
NO_PROXY: "localhost,127.0.0.1,hf-mirror.com"
|
|
HF_ENDPOINT: "https://hf-mirror.com"
|
|
HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable
|
|
container:
|
|
image: verlai/verl:app-verl0.4-sglang0.4.6.post5-vllm0.8.5-mcore0.12.1
|
|
options: --gpus all --shm-size=10g
|
|
steps:
|
|
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
|
with:
|
|
fetch-depth: 0
|
|
- name: Install the current repository
|
|
run: |
|
|
pip3 install -e .[test,gpu,sglang] --no-deps
|
|
- name: Prepare gsm8k dataset
|
|
run: |
|
|
ray stop --force
|
|
python3 examples/data_preprocess/gsm8k.py
|
|
- name: Running GSM8K E2E training tests on 8 L20 GPUs with rmpad using function rm and save ckpt
|
|
run: |
|
|
ray stop --force
|
|
ENGINE=sglang bash tests/special_e2e/ppo_trainer/run_function_reward.sh
|
|
- name: Running GSM8K E2E training tests on sglang async
|
|
run: |
|
|
ray stop --force
|
|
TOTAL_TRAIN_STEPS=2 ENGINE=sglang ROLLOUT_MODE=async bash tests/special_e2e/ppo_trainer/run_function_reward.sh
|
|
- name: Running GSM8K E2E training tests on vllm async
|
|
run: |
|
|
ray stop --force
|
|
export VLLM_USE_V1=1
|
|
ray start --head
|
|
TOTAL_TRAIN_STEPS=2 ENGINE=vllm ROLLOUT_MODE=async bash tests/special_e2e/ppo_trainer/run_function_reward.sh
|
|
|
|
e2e_ppo_trainer_sglang_multiturn_with_tool:
|
|
runs-on: [L20x8]
|
|
needs: pre_commit_for_ppo
|
|
timeout-minutes: 40 # Increase this timeout value as needed
|
|
env:
|
|
HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
|
|
HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
|
|
NO_PROXY: "localhost,127.0.0.1,hf-mirror.com"
|
|
HF_ENDPOINT: "https://hf-mirror.com"
|
|
HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable
|
|
container:
|
|
image: verlai/verl:app-verl0.4-sglang0.4.6.post5-vllm0.8.5-mcore0.12.1
|
|
options: --gpus all --shm-size=10g
|
|
steps:
|
|
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
|
with:
|
|
fetch-depth: 0
|
|
- name: Install the current repository
|
|
run: |
|
|
pip3 install -e .[test,gpu,sglang] --no-deps
|
|
- name: Prepare gsm8k dataset with tool
|
|
run: |
|
|
ray stop --force
|
|
python3 examples/data_preprocess/gsm8k_multiturn_w_tool.py --local_dir $HOME/data/gsm8k_verl_sgl_multi_turn_preprocessed
|
|
- name: Running GSM8K with tool E2E training tests on 8 L20 GPUs with rmpad using function rm and save ckpt with sglang
|
|
run: |
|
|
ray stop --force
|
|
bash tests/special_e2e/run_gsm8k_fsdp_sgl_multiturn_w_tool.sh
|
|
- name: Running GSM8K with tool E2E training tests with FSDP2
|
|
run: |
|
|
ray stop --force
|
|
FSDP_STRATEGY=fsdp2 bash tests/special_e2e/run_gsm8k_fsdp_sgl_multiturn_w_tool.sh
|
|
|
|
e2e_ppo_trainer_sglang_vlm:
|
|
runs-on: [L20x8]
|
|
needs: pre_commit_for_ppo
|
|
timeout-minutes: 60 # Increase this timeout value as needed
|
|
env:
|
|
HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
|
|
HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
|
|
NO_PROXY: "localhost,127.0.0.1,hf-mirror.com"
|
|
HF_ENDPOINT: "https://hf-mirror.com"
|
|
HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable
|
|
container:
|
|
image: verlai/verl:app-verl0.4-sglang0.4.6.post5-vllm0.8.5-mcore0.12.1
|
|
options: --gpus all --shm-size=50g # Visual dataloader requires large memory
|
|
steps:
|
|
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
|
with:
|
|
fetch-depth: 0
|
|
- name: Install the current repository
|
|
run: |
|
|
pip3 install -e .[test,geo,gpu,sglang]
|
|
# Geo3k
|
|
- name: Prepare GEO3K dataset
|
|
run: |
|
|
ray stop --force
|
|
python3 examples/data_preprocess/geo3k.py
|
|
- name: Running GEO3K VLM E2E training tests on 8 L20 GPUs with rmpad using function rm
|
|
run: |
|
|
ray stop --force
|
|
TRAIN_FILES=$HOME/data/geo3k/train.parquet VAL_FILES=$HOME/data/geo3k/test.parquet \
|
|
MAX_PROMPT_LEN=1536 MAX_RESPONSE_LEN=1536 \
|
|
MODEL_ID=Qwen/Qwen2-VL-2B-Instruct \
|
|
ADV_ESTIMATOR=grpo RM_PAD=True USE_KL=True ENABLE_CHUNKED_PREFILL=False \
|
|
ENGINE=sglang GPU_MEMORY_UTILIZATION=0.6 ACTOR_FSDP_PARAM_OFFLOAD=True \
|
|
ACTOR_FSDP_OPTIMIZER_OFFLOAD=True REF_FSDP_PARAM_OFFLOAD=True \
|
|
bash tests/special_e2e/ppo_trainer/run_function_reward.sh
|
|
- name: Running GEO3K VLM E2E with rmpad using torch fused kernel (Qwen2.5-VL)
|
|
run: |
|
|
ray stop --force
|
|
FUSED_KERNELS=True TRAIN_FILES=$HOME/data/geo3k/train.parquet VAL_FILES=$HOME/data/geo3k/test.parquet \
|
|
MAX_PROMPT_LEN=1536 MAX_RESPONSE_LEN=1536 \
|
|
MODEL_ID=Qwen/Qwen2.5-VL-3B-Instruct \
|
|
ADV_ESTIMATOR=grpo RM_PAD=True USE_KL=True ENABLE_CHUNKED_PREFILL=False \
|
|
ENGINE=sglang GPU_MEMORY_UTILIZATION=0.6 ACTOR_FSDP_PARAM_OFFLOAD=True \
|
|
ACTOR_FSDP_OPTIMIZER_OFFLOAD=True REF_FSDP_PARAM_OFFLOAD=True \
|
|
bash tests/special_e2e/ppo_trainer/run_function_reward.sh
|
|
- name: Running GEO3K VLM E2E with rmpad using triton fused kernel (Qwen2.5-VL)
|
|
run: |
|
|
ray stop --force
|
|
FUSED_KERNELS=True FUSED_KERNEL_BACKEND=triton \
|
|
TRAIN_FILES=$HOME/data/geo3k/train.parquet VAL_FILES=$HOME/data/geo3k/test.parquet \
|
|
MAX_PROMPT_LEN=1536 MAX_RESPONSE_LEN=1536 \
|
|
MODEL_ID=Qwen/Qwen2.5-VL-3B-Instruct \
|
|
ADV_ESTIMATOR=grpo RM_PAD=True USE_KL=True ENABLE_CHUNKED_PREFILL=False \
|
|
ENGINE=sglang GPU_MEMORY_UTILIZATION=0.6 ACTOR_FSDP_PARAM_OFFLOAD=True \
|
|
ACTOR_FSDP_OPTIMIZER_OFFLOAD=True REF_FSDP_PARAM_OFFLOAD=True \
|
|
bash tests/special_e2e/ppo_trainer/run_function_reward.sh
|
|
|
|
e2e_ppo_trainer_sglang_vlm_multiturn_with_tool:
|
|
runs-on: [L20x8]
|
|
needs: pre_commit_for_ppo
|
|
timeout-minutes: 40 # Increase this timeout value as needed
|
|
env:
|
|
HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
|
|
HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
|
|
NO_PROXY: "localhost,127.0.0.1,hf-mirror.com"
|
|
HF_ENDPOINT: "https://hf-mirror.com"
|
|
HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable
|
|
container:
|
|
image: verlai/verl:app-verl0.4-sglang0.4.6.post5-vllm0.8.5-mcore0.12.1
|
|
options: --gpus all --shm-size=10g
|
|
steps:
|
|
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
|
with:
|
|
fetch-depth: 0
|
|
- name: Install the current repository
|
|
run: |
|
|
pip3 install -e .[test,geo,gpu,sglang]
|
|
- name: Prepare geo3k dataset with tool
|
|
run: |
|
|
ray stop --force
|
|
python3 examples/data_preprocess/geo3k_multiturn_w_tool.py --local_dir $HOME/data/geo3k_verl_sgl_multi_turn_preprocessed
|
|
- name: Running GEO3K with tool E2E training tests on 8 L20 GPUs with rmpad using function rm and save ckpt with sglang
|
|
run: |
|
|
ray stop --force
|
|
bash tests/special_e2e/run_geo3k_fsdp_sgl_multiturn_w_tool.sh
|
|
- name: Running GEO3K with tool E2E training tests with FSDP2
|
|
run: |
|
|
ray stop --force
|
|
FSDP_STRATEGY=fsdp2 bash tests/special_e2e/run_geo3k_fsdp_sgl_multiturn_w_tool.sh
|