# # Tests layout # Each folder under tests/ corresponds to a test category for a sub-namespace in verl. For instance: # - `tests/trainer` for testing functionality related to `verl/trainer` # - `tests/models` for testing functionality related to `verl/models` # - ... # There are a few folders with `special_` prefix, created for special purposes: # - `special_distributed`: unit tests that must run with multiple GPUs # - `special_e2e`: end-to-end tests with training/generation scripts # - `special_npu`: tests for NPUs # - `special_sanity`: a suite of quick sanity tests # - `special_standalone`: a set of test that are designed to run in dedicated environments # Accelerators for tests # - By default tests are run with GPU available, except for the ones under `special_npu`, and any test script whose name ends with `on_cpu.py`. # - For test scripts with `on_cpu.py` name suffix would be tested on CPU resources in linux environment. # # Workflow layout # All CI tests are configured by yaml files in `.github/workflows/`. Here's an overview of all test configs: # 1. A list of always triggered CPU sanity tests: `check-pr-title.yml`, `secrets_scan.yml`, `check-pr-title,yml`, `pre-commit.yml`, `doc.yml` # 2. Some heavy multi-GPU unit tests, such as `model.yml`, `vllm.yml`, `sgl.yml` # 3. End-to-end tests: `e2e_*.yml` # 4. Unit tests # - `cpu_unit_tests.yml`, run pytest on all scripts with file name pattern `tests/**/test_*_on_cpu.py` # - `gpu_unit_tests.yml`, run pytest on all scripts with file without the `on_cpu.py` suffix. # - Since cpu/gpu unit tests by default runs all tests under `tests`, please make sure tests are manually excluded in them when # - new workflow yaml is added to `.github/workflows` # - new tests are added to workflow mentioned in 2. name: sgl on: # workflow_dispatch: # Manual # Trigger the workflow on push or pull request, # but only for the main branch push: branches: - main - v0.* paths: - "**/*.py" - .github/workflows/sgl.yml pull_request: branches: - main - v0.* paths: - "**/*.py" # Other entrypoints - "!examples/**" - "!tests/**" - "!verl/trainer/main_*.py" - "!verl/trainer/fsdp_sft_trainer.py" # FSDP - "!verl/workers/**/*dp_*.py" # Megatron - "!verl/workers/**/megatron_*.py" # vLLM - "!**/*vllm*" # Recipes - "!recipe/**" # Entrypoints - ".github/workflows/sgl.yml" - "tests/rollout/*sglang*" - "tests/rollout/async_rollout_utils.py" - "tests/workers/rollout/*interaction*" # Cancel jobs on the same ref if a new one is triggered concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} # Declare permissions just read content. permissions: contents: read env: IMAGE: "verl-ci-cn-beijing.cr.volces.com/verlai/verl:app-verl0.6-transformers4.56.1-sglang0.5.2-mcore0.13.0-te2.2" DYNAMIC_RUNNER_ENDPOINT: "https://sd10g3clalm04ug7alq90.apigateway-cn-beijing.volceapi.com/runner" jobs: setup: if: github.repository_owner == 'volcengine' runs-on: ubuntu-latest outputs: runner-label: ${{ steps.create-runner.outputs.runner-label }} mlp-task-id: ${{ steps.create-runner.outputs.mlp-task-id }} steps: - uses: actions/checkout@v4 - id: create-runner uses: volcengine/vemlp-github-runner@v1 with: mode: "create" faas-url: "${{ env.DYNAMIC_RUNNER_ENDPOINT }}" mlp-image: "${{ env.IMAGE }}" sgl: needs: setup runs-on: ["${{ needs.setup.outputs.runner-label || 'L20x8' }}"] timeout-minutes: 35 # Increase this timeout value as needed env: HTTP_PROXY: ${{ secrets.PROXY_HTTP }} HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }} NO_PROXY: "localhost,127.0.0.1,hf-mirror.com" HF_ENDPOINT: "https://hf-mirror.com" HF_HUB_ENABLE_HF_TRANSFER: 1 SGL_DISABLE_TP_MEMORY_INBALANCE_CHECK: "True" NCCL_SHM_DISABLE: "1" NCCL_P2P_DISABLE: "1" steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: fetch-depth: 0 - name: Install the current repository run: | pip3 install hf_transfer fastmcp pip3 install -e .[test] # - name: Download Model to Use # run: | # huggingface-cli download Qwen/Qwen2.5-0.5B --local-dir ${HOME}/models/Qwen/Qwen2.5-0.5B # huggingface-cli download Qwen/Qwen2.5-1.5B-Instruct --local-dir ${HOME}/models/Qwen/Qwen2.5-1.5B-Instruct # huggingface-cli download Qwen/Qwen2.5-VL-3B-Instruct --local-dir ${HOME}/models/Qwen/Qwen2.5-VL-3B-Instruct # export HF_HUB_OFFLINE=1 - name: Prepare gsm8k dataset run: | ray stop --force python3 examples/data_preprocess/gsm8k.py --local_dataset_path ${HOME}/models/hf_data/gsm8k - name: Test the latest SGLang Rollout async with agent loop run: | ROLLOUT_NAME=sglang pytest -svvv tests/experimental/agent_loop # huggingface-cli download verl-team/gsm8k-v0.4.1 --repo-type dataset --local-dir ~/verl-data/gsm8k - name: Test the latest SGLang run: | cd tests/workers/rollout torchrun --nnodes=1 --nproc_per_node=2 $(which pytest) -s test_sglang_spmd.py - name: Test the latest SGLang Rollout async with interaction run: | cd tests/workers/rollout torchrun --nnodes=1 --nproc_per_node=2 $(which pytest) -s test_sglang_async_rollout_w_interaction.py - name: Test the latest SGLang Multi Interaction run: | cd tests/workers/rollout torchrun --nnodes=1 --nproc_per_node=2 $(which pytest) -s test_sglang_multi_interaction.py - name: Test the latest SGLang Rollout async with tool run: | cd tests/workers/rollout torchrun --nnodes=1 --nproc_per_node=2 $(which pytest) -s test_sglang_async_rollout_w_tools.py - name: Test the latest SGLang Rollout async with sandbox fusion tool run: | cd tests/workers/rollout pytest -s test_sglang_async_rollout_sf_tools.py - name: Test the latest SGLang Rollout async with search tool run: | cd tests/workers/rollout pytest -s test_sglang_async_rollout_search_tools.py - name: Test the latest SGLang Rollout async with mcp search tool run: | cd tests/workers/rollout pytest -s test_sglang_async_rollout_mcp_tools.py # Note(haibin.lin): for any new test, please update gpu_unit_tests.yaml to avoid repeated tests - name: Test the latest SGLang Rollout async with multimodal delta run: | cd tests/workers/rollout pytest -s test_sglang_async_rollout_multimodal_delta.py cleanup: runs-on: ubuntu-latest needs: [setup, sgl] if: always() steps: - id: destroy-runner uses: volcengine/vemlp-github-runner@v1 with: mode: "destroy" faas-url: "${{ env.DYNAMIC_RUNNER_ENDPOINT }}" mlp-task-id: "${{ needs.setup.outputs.mlp-task-id }}"