[ci] fix: use local models/configs/datasets to increase stability (#3616)

### What does this PR do?

- As title

### Checklist Before Starting

- [ ] Search for similar PRs. Paste at least one query link here: ...
- [ ] Format the PR title as `[{modules}] {type}: {description}` (This
will be checked by the CI)
- `{modules}` include `fsdp`, `megatron`, `sglang`, `vllm`, `rollout`,
`trainer`, `ci`, `training_utils`, `recipe`, `hardware`, `deployment`,
`ray`, `worker`, `single_controller`, `misc`, `perf`, `model`, `algo`,
`env`, `tool`, `ckpt`, `doc`, `data`
- If this PR involves multiple modules, separate them with `,` like
`[megatron, fsdp, doc]`
  - `{type}` is in `feat`, `fix`, `refactor`, `chore`, `test`
- If this PR breaks any API (CLI arguments, config, function signature,
etc.), add `[BREAKING]` to the beginning of the title.
  - Example: `[BREAKING][fsdp, megatron] feat: dynamic batching`

### Test

> For changes that can not be tested by CI (e.g., algorithm
implementation, new model support), validate by experiment(s) and show
results like training curve plots, evaluation results, etc.

### API and Usage Example

> Demonstrate how the API changes if any, and provide usage example(s)
if possible.

```python
# Add code snippet or script demonstrating how to use this
```

### Design & Code Changes

> Demonstrate the high-level design if this PR is complex, and list the
specific changes.

### Checklist Before Submitting

> [!IMPORTANT]
> Please check all the following items before requesting a review,
otherwise the reviewer might deprioritize this PR for review.

- [ ] Read the [Contribute
Guide](https://github.com/volcengine/verl/blob/main/CONTRIBUTING.md).
- [ ] Apply [pre-commit
checks](https://github.com/volcengine/verl/blob/main/CONTRIBUTING.md#code-linting-and-formatting):
`pre-commit install && pre-commit run --all-files --show-diff-on-failure
--color=always`
- [ ] Add / Update [the
documentation](https://github.com/volcengine/verl/tree/main/docs).
- [ ] Add unit or end-to-end test(s) to [the CI
workflow](https://github.com/volcengine/verl/tree/main/.github/workflows)
to cover all the code. If not feasible, explain why: ...
- [ ] Once your PR is ready for CI, send a message in [the `ci-request`
channel](https://verl-project.slack.com/archives/C091TCESWB1) in [the
`verl` Slack
workspace](https://join.slack.com/t/verl-project/shared_invite/zt-3855yhg8g-CTkqXu~hKojPCmo7k_yXTQ).
(If not accessible, please try [the Feishu group
(飞书群)](https://applink.larkoffice.com/client/chat/chatter/add_by_link?link_token=772jd4f1-cd91-441e-a820-498c6614126a).)
This commit is contained in:
Chi Zhang
2025-09-25 22:14:56 +08:00
committed by GitHub
parent bf7aac2fa7
commit 515f2255ac
11 changed files with 37 additions and 18 deletions

View File

@ -95,7 +95,7 @@ jobs:
pip3 install -e .[test,gpu,sglang]
- name: Prepare MATH dataset
run: |
python3 examples/data_preprocess/math_dataset.py
python3 examples/data_preprocess/math_dataset.py --local_dataset_path $HOME/models/hf_data/DigitalLearningGmbH/MATH-lighteval
- name: Running the E2E test with the SPPO algorithm
run: |
ray stop --force

View File

@ -171,14 +171,14 @@ jobs:
run: |
pip3 install --no-deps -e .[test]
pip install --upgrade "huggingface_hub[cli]"
- name: Download model config files
run: |
hf download Qwen/Qwen2.5-7B config.json --local-dir $HOME/configs/Qwen/Qwen2.5-7B
hf download Qwen/Qwen3-8B config.json --local-dir $HOME/configs/Qwen/Qwen3-8B
hf download deepseek-ai/deepseek-coder-1.3b-instruct config.json --local-dir $HOME/configs/deepseek-ai/deepseek-coder-1.3b-instruct
hf download Qwen/Qwen2-57B-A14B config.json --local-dir $HOME/configs/Qwen/Qwen2-57B-A14B
hf download Qwen/Qwen3-30B-A3B config.json --local-dir $HOME/configs/Qwen/Qwen3-30B-A3B
hf download deepseek-ai/DeepSeek-V3-Base config.json --local-dir $HOME/configs/deepseek-ai/DeepSeek-V3-Base
# - name: Download model config files
# run: |
# hf download Qwen/Qwen2.5-7B config.json --local-dir $HOME/configs/Qwen/Qwen2.5-7B
# hf download Qwen/Qwen3-8B config.json --local-dir $HOME/configs/Qwen/Qwen3-8B
# hf download deepseek-ai/deepseek-coder-1.3b-instruct config.json --local-dir $HOME/configs/deepseek-ai/deepseek-coder-1.3b-instruct
# hf download Qwen/Qwen2-57B-A14B config.json --local-dir $HOME/configs/Qwen/Qwen2-57B-A14B
# hf download Qwen/Qwen3-30B-A3B config.json --local-dir $HOME/configs/Qwen/Qwen3-30B-A3B
# hf download deepseek-ai/DeepSeek-V3-Base config.json --local-dir $HOME/configs/deepseek-ai/DeepSeek-V3-Base
- name: Running mcore config converter tests on 8 L20 GPUs
run: |
torchrun --nproc_per_node=8 tests/special_distributed/test_mcore_config_converter.py

View File

@ -129,8 +129,8 @@ jobs:
python3 examples/data_preprocess/gsm8k.py --local_dataset_path ${HOME}/models/hf_data/gsm8k
- name: Test the latest SGLang Rollout async with agent loop
run: |
huggingface-cli download verl-team/gsm8k-v0.4.1 --repo-type dataset --local-dir ~/verl-data/gsm8k
ROLLOUT_NAME=sglang pytest -svvv tests/experimental/agent_loop
# huggingface-cli download verl-team/gsm8k-v0.4.1 --repo-type dataset --local-dir ~/verl-data/gsm8k
- name: Test the latest SGLang
run: |
cd tests/workers/rollout

View File

@ -20,8 +20,9 @@ jobs:
- name: Install dependencies
run: |
pip install gitpython
pip install -e .[sglang]
pip3 install torch torchvision --index-url https://download.pytorch.org/whl/cpu
pip3 install -r requirements.txt
pip3 install -e . --no-deps
- name: Run type annotation coverage check
run: |
python3 tests/special_sanity/type_coverage_check.py

View File

@ -31,16 +31,28 @@ def extract_solution(solution_str):
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--local_dir", default="~/data/math")
parser.add_argument("--local_dir", default=None)
parser.add_argument("--hdfs_dir", default=None)
parser.add_argument("--local_dataset_path", default=None, help="The local path to the raw dataset, if it exists.")
parser.add_argument(
"--local_save_dir", default="~/data/math", help="The save directory for the preprocessed dataset."
)
args = parser.parse_args()
local_dataset_path = args.local_dataset_path
# 'lighteval/MATH' is no longer available on huggingface.
# Use mirror repo: DigitalLearningGmbH/MATH-lighteval
data_source = "DigitalLearningGmbH/MATH-lighteval"
print(f"Loading the {data_source} dataset from huggingface...", flush=True)
dataset = datasets.load_dataset(data_source, trust_remote_code=True)
if local_dataset_path is not None:
dataset = datasets.load_dataset(
local_dataset_path,
)
else:
dataset = datasets.load_dataset(
data_source,
)
train_dataset = dataset["train"]
test_dataset = dataset["test"]
@ -70,7 +82,13 @@ if __name__ == "__main__":
train_dataset = train_dataset.map(function=make_map_fn("train"), with_indices=True)
test_dataset = test_dataset.map(function=make_map_fn("test"), with_indices=True)
local_dir = os.path.expanduser(args.local_dir)
local_save_dir = args.local_dir
if local_save_dir is not None:
print("Warning: Argument 'local_dir' is deprecated. Please use 'local_save_dir' instead.")
else:
local_save_dir = args.local_save_dir
local_dir = os.path.expanduser(local_save_dir)
hdfs_dir = args.hdfs_dir
train_dataset.to_parquet(os.path.join(local_dir, "train.parquet"))

1
requirements-cuda.txt Normal file
View File

@ -0,0 +1 @@
flash-attn

View File

@ -3,7 +3,6 @@ accelerate
codetiming
datasets
dill
flash-attn
hydra-core
liger-kernel
numpy<2.0.0

View File

@ -49,7 +49,7 @@ def test_fsdp_ckpt(strategy="fsdp"):
local_rank, rank, world_size = initialize_global_process_group()
device_mesh = init_device_mesh("cuda", mesh_shape=(world_size,), mesh_dim_names=("dp",))
model_name = "Qwen/Qwen2.5-0.5B-Instruct"
model_name = os.path.expanduser("~/models/Qwen/Qwen2.5-0.5B-Instruct")
config = Qwen2Config(num_hidden_layers=1)
with torch.device("cuda"):

View File

@ -89,7 +89,7 @@ def test_mcore_config_converter():
)
for model_name in TEST_MODELS:
print(f"testing {model_name}")
hf_config = AutoConfig.from_pretrained(os.path.expanduser(f"~/configs/{model_name}/config.json"))
hf_config = AutoConfig.from_pretrained(os.path.expanduser(f"~/models/configs/{model_name}/config.json"))
hf_config = modify_hf_config(model_name, hf_config)
tf_config = hf_to_mcore_config(hf_config, torch.bfloat16)
check_config_converter_results(tf_config, hf_config)