mirror of
https://github.com/volcengine/verl.git
synced 2025-10-20 13:43:50 +08:00
### What does this PR do? Refactor profiler CI to a unified way. TODO: - nsys use `save_path` - nsys descrete tests are disabled - torch profiler cc: @davidmlw ### Checklist Before Starting - [ ] Search for similar PRs. Paste at least one query link here: ... - [ ] Format the PR title as `[{modules}] {type}: {description}` (This will be checked by the CI) - `{modules}` include `fsdp`, `megatron`, `sglang`, `vllm`, `rollout`, `trainer`, `ci`, `training_utils`, `recipe`, `hardware`, `deployment`, `ray`, `worker`, `single_controller`, `misc`, `perf`, `model`, `algo`, `env`, `tool`, `ckpt`, `doc`, `data` - If this PR involves multiple modules, separate them with `,` like `[megatron, fsdp, doc]` - `{type}` is in `feat`, `fix`, `refactor`, `chore`, `test` - If this PR breaks any API (CLI arguments, config, function signature, etc.), add `[BREAKING]` to the beginning of the title. - Example: `[BREAKING][fsdp, megatron] feat: dynamic batching` ### Test > For changes that can not be tested by CI (e.g., algorithm implementation, new model support), validate by experiment(s) and show results like training curve plots, evaluation results, etc. ### API and Usage Example Global profiler config: ```yaml global_profiler: _target_: verl.utils.profiler.ProfilerConfig tool: null steps: null profile_continuous_steps: false save_path: outputs/profile tool_config: nsys: _target_: verl.utils.profiler.config.NsightToolConfig discrete: false npu: _target_: verl.utils.profiler.config.NPUToolConfig discrete: false contents: [] level: level1 analysis: true torch: _target_: verl.utils.profiler.config.TorchProfilerToolConfig step_start: 0 step_end: null ``` Local profiler config: ```yaml profiler: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.utils.profiler.ProfilerConfig # profiler tool, default same as profiler.tool in global config # choices: nsys, npu, torch tool: ${oc.select:global_profiler.tool,null} # whether enable profile on critic enable: False # Whether to profile all ranks. all_ranks: False # The ranks that will be profiled. [] or [0,1,...] ranks: [] # profile results saving path save_path: ${oc.select:global_profiler.save_path,null} # specific tool config tool_config: ${oc.select:global_profiler.tool_config,null} ``` ### Design & Code Changes > Demonstrate the high-level design if this PR is complex, and list the specific changes. ### Checklist Before Submitting > [!IMPORTANT] > Please check all the following items before requesting a review, otherwise the reviewer might deprioritize this PR for review. - [ ] Read the [Contribute Guide](https://github.com/volcengine/verl/blob/main/CONTRIBUTING.md). - [ ] Apply [pre-commit checks](https://github.com/volcengine/verl/blob/main/CONTRIBUTING.md#code-linting-and-formatting): `pre-commit install && pre-commit run --all-files --show-diff-on-failure --color=always` - [ ] Add / Update [the documentation](https://github.com/volcengine/verl/tree/main/docs). - [ ] Add unit or end-to-end test(s) to [the CI workflow](https://github.com/volcengine/verl/tree/main/.github/workflows) to cover all the code. If not feasible, explain why: ... - [ ] Once your PR is ready for CI, send a message in [the `ci-request` channel](https://verl-project.slack.com/archives/C091TCESWB1) in [the `verl` Slack workspace](https://join.slack.com/t/verl-project/shared_invite/zt-3855yhg8g-CTkqXu~hKojPCmo7k_yXTQ). (If not accessible, please try [the Feishu group (飞书群)](https://applink.larkoffice.com/client/chat/chatter/add_by_link?link_token=772jd4f1-cd91-441e-a820-498c6614126a).)
172 lines
7.1 KiB
Python
172 lines
7.1 KiB
Python
# Copyright 2024 Bytedance Ltd. and/or its affiliates
|
|
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
import unittest
|
|
from unittest.mock import MagicMock, patch
|
|
|
|
from verl.utils import omega_conf_to_dataclass
|
|
from verl.utils.profiler.config import NsightToolConfig, ProfilerConfig
|
|
from verl.utils.profiler.nvtx_profile import NsightSystemsProfiler
|
|
|
|
|
|
class TestProfilerConfig(unittest.TestCase):
|
|
def test_config_init(self):
|
|
import os
|
|
|
|
from hydra import compose, initialize_config_dir
|
|
|
|
with initialize_config_dir(config_dir=os.path.abspath("verl/trainer/config")):
|
|
cfg = compose(config_name="ppo_trainer")
|
|
for config in [
|
|
cfg.actor_rollout_ref.actor.profiler,
|
|
cfg.actor_rollout_ref.rollout.profiler,
|
|
cfg.actor_rollout_ref.ref.profiler,
|
|
cfg.critic.profiler,
|
|
cfg.reward_model.profiler,
|
|
]:
|
|
profiler_config = omega_conf_to_dataclass(config)
|
|
self.assertEqual(profiler_config.tool, config.tool)
|
|
self.assertEqual(profiler_config.enable, config.enable)
|
|
self.assertEqual(profiler_config.all_ranks, config.all_ranks)
|
|
self.assertEqual(profiler_config.ranks, config.ranks)
|
|
self.assertEqual(profiler_config.save_path, config.save_path)
|
|
self.assertEqual(profiler_config.ranks, config.ranks)
|
|
assert isinstance(profiler_config, ProfilerConfig)
|
|
with self.assertRaises(AttributeError):
|
|
_ = profiler_config.non_existing_key
|
|
assert config.get("non_existing_key") == profiler_config.get("non_existing_key")
|
|
assert config.get("non_existing_key", 1) == profiler_config.get("non_existing_key", 1)
|
|
|
|
def test_frozen_config(self):
|
|
"""Test that modifying frozen keys in ProfilerConfig raises exceptions."""
|
|
from dataclasses import FrozenInstanceError
|
|
|
|
from verl.utils.profiler.config import ProfilerConfig
|
|
|
|
# Create a new ProfilerConfig instance
|
|
config = ProfilerConfig(all_ranks=False, ranks=[0], extra={"key": "value"})
|
|
|
|
with self.assertRaises(FrozenInstanceError):
|
|
config.all_ranks = True
|
|
|
|
with self.assertRaises(FrozenInstanceError):
|
|
config.ranks = [1, 2, 3]
|
|
|
|
with self.assertRaises(TypeError):
|
|
config["all_ranks"] = True
|
|
|
|
with self.assertRaises(TypeError):
|
|
config["ranks"] = [1, 2, 3]
|
|
|
|
assert config["extra"]["key"] == "value"
|
|
config["extra"]["key"] = "value2"
|
|
assert config["extra"]["key"] == "value2"
|
|
|
|
|
|
class TestNsightSystemsProfiler(unittest.TestCase):
|
|
"""Test suite for NsightSystemsProfiler functionality.
|
|
|
|
Test Plan:
|
|
1. Initialization: Verify profiler state after creation
|
|
2. Basic Profiling: Test start/stop functionality
|
|
3. Discrete Mode: TODO: Test discrete profiling behavior
|
|
4. Annotation: Test the annotate decorator in both normal and discrete modes
|
|
5. Config Validation: Verify proper config initialization from OmegaConf
|
|
"""
|
|
|
|
def setUp(self):
|
|
self.config = ProfilerConfig(enable=True, all_ranks=True)
|
|
self.rank = 0
|
|
self.profiler = NsightSystemsProfiler(self.rank, self.config, tool_config=NsightToolConfig(discrete=False))
|
|
|
|
def test_initialization(self):
|
|
self.assertEqual(self.profiler.this_rank, True)
|
|
self.assertEqual(self.profiler.this_step, False)
|
|
|
|
def test_start_stop_profiling(self):
|
|
with patch("torch.cuda.profiler.start") as mock_start, patch("torch.cuda.profiler.stop") as mock_stop:
|
|
# Test start
|
|
self.profiler.start()
|
|
self.assertTrue(self.profiler.this_step)
|
|
mock_start.assert_called_once()
|
|
|
|
# Test stop
|
|
self.profiler.stop()
|
|
self.assertFalse(self.profiler.this_step)
|
|
mock_stop.assert_called_once()
|
|
|
|
# def test_discrete_profiling(self):
|
|
# discrete_config = ProfilerConfig(discrete=True, all_ranks=True)
|
|
# profiler = NsightSystemsProfiler(self.rank, discrete_config)
|
|
|
|
# with patch("torch.cuda.profiler.start") as mock_start, patch("torch.cuda.profiler.stop") as mock_stop:
|
|
# profiler.start()
|
|
# self.assertTrue(profiler.this_step)
|
|
# mock_start.assert_not_called() # Shouldn't start immediately in discrete mode
|
|
|
|
# profiler.stop()
|
|
# self.assertFalse(profiler.this_step)
|
|
# mock_stop.assert_not_called() # Shouldn't stop immediately in discrete mode
|
|
|
|
def test_annotate_decorator(self):
|
|
mock_self = MagicMock()
|
|
mock_self.profiler = self.profiler
|
|
mock_self.profiler.this_step = True
|
|
|
|
@NsightSystemsProfiler.annotate(message="test")
|
|
def test_func(self, *args, **kwargs):
|
|
return "result"
|
|
|
|
with (
|
|
patch("torch.cuda.profiler.start") as mock_start,
|
|
patch("torch.cuda.profiler.stop") as mock_stop,
|
|
patch("verl.utils.profiler.nvtx_profile.mark_start_range") as mock_start_range,
|
|
patch("verl.utils.profiler.nvtx_profile.mark_end_range") as mock_end_range,
|
|
):
|
|
result = test_func(mock_self)
|
|
self.assertEqual(result, "result")
|
|
mock_start_range.assert_called_once()
|
|
mock_end_range.assert_called_once()
|
|
mock_start.assert_not_called() # Not discrete mode
|
|
mock_stop.assert_not_called() # Not discrete mode
|
|
|
|
# def test_annotate_discrete_mode(self):
|
|
# discrete_config = ProfilerConfig(discrete=True, all_ranks=True)
|
|
# profiler = NsightSystemsProfiler(self.rank, discrete_config)
|
|
# mock_self = MagicMock()
|
|
# mock_self.profiler = profiler
|
|
# mock_self.profiler.this_step = True
|
|
|
|
# @NsightSystemsProfiler.annotate(message="test")
|
|
# def test_func(self, *args, **kwargs):
|
|
# return "result"
|
|
|
|
# with (
|
|
# patch("torch.cuda.profiler.start") as mock_start,
|
|
# patch("torch.cuda.profiler.stop") as mock_stop,
|
|
# patch("verl.utils.profiler.nvtx_profile.mark_start_range") as mock_start_range,
|
|
# patch("verl.utils.profiler.nvtx_profile.mark_end_range") as mock_end_range,
|
|
# ):
|
|
# result = test_func(mock_self)
|
|
# self.assertEqual(result, "result")
|
|
# mock_start_range.assert_called_once()
|
|
# mock_end_range.assert_called_once()
|
|
# mock_start.assert_called_once() # Should start in discrete mode
|
|
# mock_stop.assert_called_once() # Should stop in discrete mode
|
|
|
|
|
|
if __name__ == "__main__":
|
|
unittest.main()
|