mirror of
https://github.com/huggingface/accelerate.git
synced 2025-10-20 18:13:46 +08:00
Compare commits
122 Commits
v1.5.1
...
context-pa
Author | SHA1 | Date | |
---|---|---|---|
f251a0ad9c | |||
7fae694f87 | |||
de3938f701 | |||
1b7e224f1c | |||
a70304abcf | |||
14b552fa63 | |||
3323b148b8 | |||
76dcecf807 | |||
4361b0c634 | |||
d4ef46c265 | |||
baa6670b3b | |||
f451355ba9 | |||
4f97d75b43 | |||
e5acf573b1 | |||
8411d03d51 | |||
16fb4c0fca | |||
dbf9f696ee | |||
5adc8cd0e5 | |||
46f9366fb8 | |||
d4aec10f6d | |||
90f7856f64 | |||
c653e86347 | |||
2f8fd72e51 | |||
d2e6b0313d | |||
b9fee48c85 | |||
3a82b056cf | |||
6b61a373a2 | |||
682691deac | |||
791055b484 | |||
16bf1d8901 | |||
ab3c604e48 | |||
273799c85d | |||
43526c5c08 | |||
07f2392f40 | |||
ee2f48c2c3 | |||
4f3abb73a7 | |||
db536cbfeb | |||
4e9d0deba6 | |||
8cb3ace894 | |||
b6d97cb856 | |||
33967d4733 | |||
5b1fcda371 | |||
f55f0533b5 | |||
1ec99f0b58 | |||
417bc52965 | |||
97c93c4809 | |||
cd37bbb629 | |||
7aa3b56c80 | |||
14f4306ca6 | |||
e6e717589e | |||
1f6efcea0b | |||
9fa97f9600 | |||
764eee4a48 | |||
202e6c178a | |||
32874257f3 | |||
281314b479 | |||
3524a504c8 | |||
f48d95c493 | |||
f76208f5a8 | |||
ae0499ea96 | |||
ddc49f1e9a | |||
9b2d6eaf32 | |||
7b5774ac55 | |||
7013365791 | |||
8d8fd83672 | |||
3a941d4b4e | |||
d02e51cc21 | |||
c5caa11e85 | |||
39e2bebb12 | |||
0af45bf1e8 | |||
806ac848c9 | |||
23b092507a | |||
8fb073536a | |||
4f35cf713c | |||
ada21cfbbd | |||
b451956fd6 | |||
6a9a61520d | |||
423fbbfdea | |||
34c1779828 | |||
54496571fd | |||
4a3cbcb63c | |||
583b26db3c | |||
7812d979c3 | |||
67adb473a4 | |||
ee4cab96ed | |||
73c2378c55 | |||
b2f937faec | |||
3b89987710 | |||
a43e4170fc | |||
334d6ab957 | |||
650b6659c0 | |||
fb90996365 | |||
32b2e1606f | |||
8c0a29626d | |||
63168b151f | |||
3cf5e4c802 | |||
9642a1ac81 | |||
3169339f5b | |||
67a768be07 | |||
531643436e | |||
83e09a9331 | |||
9c4eeb9ba8 | |||
a0edc8dcf2 | |||
11a3c0001d | |||
8b31a2fe2c | |||
3f636d6260 | |||
803b6648b4 | |||
17f9c19f48 | |||
d7c741a6bc | |||
8ab01d32cf | |||
140acb356e | |||
8576112bc8 | |||
806f661cd3 | |||
9015a26f09 | |||
6de900e10a | |||
ffb27138f7 | |||
4b6be89910 | |||
a702364256 | |||
a31bd767c1 | |||
71036329f7 | |||
f648feba97 | |||
14fc61eeac |
12
.github/PULL_REQUEST_TEMPLATE.md
vendored
12
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -37,11 +37,11 @@ members/contributors who may be interested in your PR.
|
||||
If you know how to use git blame, that is the easiest way, otherwise, here is a rough guide of **who to tag**.
|
||||
|
||||
- Big modeling: @SunMarc
|
||||
- Fully-Sharded Data Parallism: @muellerzr
|
||||
- DeepSpeed: @muellerzr
|
||||
- Command Line Interface: @muellerzr
|
||||
- Documentation: @muellerzr
|
||||
- Core parts of the library: @muellerzr @BenjaminBossan @SunMarc
|
||||
- Maintained examples: @muellerzr or @SunMarc
|
||||
- Fully-Sharded Data Parallism: @SunMarc @zach-huggingface
|
||||
- DeepSpeed: @SunMarc @zach-huggingface
|
||||
- Command Line Interface: @SunMarc @zach-huggingface
|
||||
- Documentation: @SunMarc @zach-huggingface
|
||||
- Core parts of the library: @BenjaminBossan @SunMarc @zach-huggingface
|
||||
- Maintained examples: @SunMarc or @zach-huggingface
|
||||
|
||||
-->
|
@ -15,7 +15,7 @@ jobs:
|
||||
outputs:
|
||||
version: ${{ steps.step1.outputs.version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3.1.0
|
||||
- uses: actions/checkout@4
|
||||
- id: step1
|
||||
run: echo "version=$(python setup.py --version)" >> $GITHUB_OUTPUT
|
||||
|
||||
|
6
.github/workflows/build_and_run_tests.yml
vendored
6
.github/workflows/build_and_run_tests.yml
vendored
@ -16,13 +16,13 @@ jobs:
|
||||
outputs:
|
||||
changed: ${{ steps.was_changed.outputs.changed }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3.1.0
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: "2"
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v41
|
||||
uses: tj-actions/changed-files@3f54ebb830831fc121d3263c1857cfbdc310cdb9 #v42
|
||||
|
||||
- name: Was setup changed
|
||||
id: was_changed
|
||||
@ -47,4 +47,4 @@ jobs:
|
||||
run-integration-tests:
|
||||
needs: build-docker-containers
|
||||
if: always()
|
||||
uses: ./.github/workflows/self_hosted_integration_tests.yml
|
||||
uses: ./.github/workflows/self_hosted_integration_tests.yml
|
||||
|
8
.github/workflows/build_docker_images.yml
vendored
8
.github/workflows/build_docker_images.yml
vendored
@ -102,9 +102,15 @@ jobs:
|
||||
id: date
|
||||
run: |
|
||||
echo "date=$(date '+%Y-%m-%d')" >> $GITHUB_ENV
|
||||
# Get the previous month
|
||||
echo "base_year=$(date -d 'last month' '+%y')" >> $GITHUB_ENV
|
||||
echo "base_month=$(date -d 'last month' '+%m')" >> $GITHUB_ENV
|
||||
- name: Build and Push GPU
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
file: benchmarks/fp8/transformer_engine/Dockerfile
|
||||
push: true
|
||||
tags: huggingface/accelerate:gpu-fp8-transformerengine-nightly-${{ env.date }}
|
||||
tags: huggingface/accelerate:gpu-fp8-transformerengine-nightly-${{ env.date }}
|
||||
build-args: |
|
||||
BASE_YEAR=${{ env.base_year }}
|
||||
BASE_MONTH=${{ env.base_month }}
|
37
.github/workflows/fp8_runner.yml
vendored
Normal file
37
.github/workflows/fp8_runner.yml
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
name: Test FP8 Runner
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
jobs:
|
||||
set-prev-day:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
prev-day: ${{ steps.set-prev-day.outputs.prev-day }}
|
||||
steps:
|
||||
- name: Set PREV_DAY
|
||||
id: set-prev-day
|
||||
run: |
|
||||
PREV_DAY=$(date -d "yesterday" '+%Y-%m-%d')
|
||||
echo "prev-day=$PREV_DAY" >> $GITHUB_OUTPUT
|
||||
run-fp8-tests:
|
||||
needs: set-prev-day
|
||||
runs-on:
|
||||
group: aws-g6e-12xlarge
|
||||
container:
|
||||
image: huggingface/accelerate:gpu-fp8-transformerengine-nightly-${{ needs.set-prev-day.outputs.prev-day }}
|
||||
options: --gpus all --shm-size "16gb"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install the library
|
||||
run: |
|
||||
pip install -e .[test_prod,test_fp8]
|
||||
- name: Show installed libraries
|
||||
run: |
|
||||
pip freeze
|
||||
- name: Run TE FP8 tests
|
||||
run: |
|
||||
python -m pytest -s -v ./tests/test_fp8.py
|
||||
|
@ -1,23 +1,22 @@
|
||||
name: Gaudi1 tests (scheduled)
|
||||
name: Gaudi3 tests (scheduled)
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: "0 2 * * *"
|
||||
schedule: # every day at 6 AM UTC
|
||||
- cron: "0 6 * * *"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
run_gaudi1_tests:
|
||||
name: Test on Gaudi1
|
||||
run-gaudi3-tests:
|
||||
runs-on:
|
||||
group: aws-dl1-24xlarge
|
||||
group: itac-bm-emr-gaudi3-dell-2gaudi
|
||||
|
||||
container:
|
||||
image: docker://vault.habana.ai/gaudi-docker/1.20.0/ubuntu22.04/habanalabs/pytorch-installer-2.6.0:latest
|
||||
options: --runtime=habana --shm-size=64G --cap-add=sys_nice --env HABANA_VISIBLE_DEVICES=0,1
|
||||
options: --runtime=habana --shm-size=64G --cap-add=sys_nice --env HABANA_VISIBLE_DEVICES
|
||||
env:
|
||||
OMPI_MCA_btl_vader_single_copy_mechanism: none
|
||||
PT_ENABLE_INT64_SUPPORT: 1
|
||||
@ -50,28 +49,34 @@ jobs:
|
||||
run: |
|
||||
pip install -e .[testing] \
|
||||
git+https://github.com/HabanaAI/DeepSpeed.git@1.20.0 \
|
||||
git+https://github.com/huggingface/transformers.git@hpu-support
|
||||
git+https://github.com/huggingface/transformers.git
|
||||
|
||||
- name: Run CLI tests
|
||||
if: ${{ !cancelled() && (success() || failure()) }}
|
||||
run: |
|
||||
make test_cli
|
||||
|
||||
- name: Run Core tests
|
||||
if: ${{ !cancelled() && (success() || failure()) }}
|
||||
run: |
|
||||
make test_core
|
||||
|
||||
- name: Run Big Modeling tests
|
||||
if: ${{ !cancelled() && (success() || failure()) }}
|
||||
run: |
|
||||
make test_big_modeling
|
||||
|
||||
- name: Run FSDP integration tests
|
||||
if: ${{ !cancelled() && (success() || failure()) }}
|
||||
run: |
|
||||
make test_fsdp
|
||||
|
||||
- name: Run DeepSpeed integration tests
|
||||
if: ${{ !cancelled() && (success() || failure()) }}
|
||||
run: |
|
||||
make test_deepspeed
|
||||
|
||||
- name: Run Examples tests
|
||||
if: ${{ !cancelled() && (success() || failure()) }}
|
||||
run: |
|
||||
make test_examples
|
4
.github/workflows/integration_tests.yml
vendored
4
.github/workflows/integration_tests.yml
vendored
@ -26,9 +26,9 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v3.1.0
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up python 3.9
|
||||
uses: actions/setup-python@v3
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.9
|
||||
cache: 'pip'
|
||||
|
19
.github/workflows/pr_style_bot.yml
vendored
Normal file
19
.github/workflows/pr_style_bot.yml
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
# To run this bot, comment "@bot /style" on a PR
|
||||
name: Style Bot
|
||||
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
style:
|
||||
uses: huggingface/huggingface_hub/.github/workflows/style-bot-action.yml@main
|
||||
with:
|
||||
python_quality_dependencies: "[quality]"
|
||||
style_command_type: "default"
|
||||
secrets:
|
||||
bot_token: ${{ secrets.GITHUB_TOKEN }}
|
4
.github/workflows/quality.yml
vendored
4
.github/workflows/quality.yml
vendored
@ -6,9 +6,9 @@ jobs:
|
||||
quality:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3.1.0
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python 3.9
|
||||
uses: actions/setup-python@v3
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.9
|
||||
cache: 'pip'
|
||||
|
4
.github/workflows/stale.yml
vendored
4
.github/workflows/stale.yml
vendored
@ -16,10 +16,10 @@ jobs:
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3.1.0
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v3
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.9
|
||||
cache: 'pip'
|
||||
|
6
.github/workflows/test.yml
vendored
6
.github/workflows/test.yml
vendored
@ -38,9 +38,9 @@ jobs:
|
||||
test_rest
|
||||
]
|
||||
steps:
|
||||
- uses: actions/checkout@v3.1.0
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up python 3.9
|
||||
uses: actions/setup-python@v3
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.9
|
||||
cache: 'pip'
|
||||
@ -52,7 +52,7 @@ jobs:
|
||||
if [[ ${{ matrix.test-kind }} != test_prod ]]; then pip install -e .[testing,test_trackers]; fi
|
||||
if [[ ${{ matrix.test-kind }} = test_rest ]]; then pip uninstall comet_ml -y; fi
|
||||
if [[ ${{ matrix.pytorch-version }} = minimum ]]; then pip install torchvision==0.18.1 torch==2.3.1; fi
|
||||
pip install pytest-reportlog tabulate setuptools
|
||||
pip install pytest-reportlog tabulate setuptools importlib_metadata
|
||||
|
||||
- name: Show installed libraries
|
||||
run: |
|
||||
|
4
.github/workflows/test_imports.yml
vendored
4
.github/workflows/test_imports.yml
vendored
@ -26,9 +26,9 @@ jobs:
|
||||
minimum,
|
||||
]
|
||||
steps:
|
||||
- uses: actions/checkout@v3.1.0
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up python 3.9
|
||||
uses: actions/setup-python@v3
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.9
|
||||
cache: 'pip'
|
||||
|
2
Makefile
2
Makefile
@ -83,7 +83,7 @@ prepare_release:
|
||||
# Make sure this is ran in a fresh venv of some form
|
||||
install_test_release:
|
||||
pip uninstall accelerate -y
|
||||
pip install -i https://testpypi.python.org/pypi --extra-index-url https://pypi.org/simple accelerate
|
||||
pip install -i https://testpypi.python.org/pypi --extra-index-url https://pypi.org/simple accelerate$(if $(version),==$(version),)
|
||||
|
||||
# Run as `make target=testpypi upload_release`
|
||||
upload_release:
|
||||
|
@ -13,7 +13,7 @@ pip install transformers
|
||||
To reproduce or test a new setup, run
|
||||
|
||||
```py
|
||||
python inference_acc.py model_name
|
||||
python big_model_inference.py model_name
|
||||
```
|
||||
|
||||
This script supports `gpt-j-6b`, `gpt-neox`, `opt` (30B version) and `T0pp` out of the box, but you can specify any valid checkpoint for `model_name`.
|
||||
@ -43,4 +43,4 @@ Note on the results:
|
||||
|
||||
You will also note that Accelerate does not use anymore GPU and CPU RAM than necessary:
|
||||
- peak GPU memory is exactly the size of the model put on a given GPU
|
||||
- peak CPU memory is either the size of the biggest checkpoint shard or the part of the model offloaded on CPU, whichever is bigger.
|
||||
- peak CPU memory is either the size of the biggest checkpoint shard or the part of the model offloaded on CPU, whichever is bigger.
|
||||
|
@ -18,6 +18,12 @@ import time
|
||||
import psutil
|
||||
import torch
|
||||
|
||||
from accelerate.test_utils.testing import get_backend
|
||||
|
||||
|
||||
torch_device_type, _, _ = get_backend()
|
||||
torch_accelerator_module = getattr(torch, torch_device_type, torch.cuda)
|
||||
|
||||
|
||||
class PeakCPUMemory:
|
||||
def __init__(self):
|
||||
@ -54,16 +60,16 @@ def start_measure():
|
||||
measures = {"time": time.time()}
|
||||
|
||||
gc.collect()
|
||||
torch.cuda.empty_cache()
|
||||
torch_accelerator_module.empty_cache()
|
||||
|
||||
# CPU mem
|
||||
measures["cpu"] = psutil.Process().memory_info().rss
|
||||
cpu_peak_tracker.start()
|
||||
|
||||
# GPU mem
|
||||
for i in range(torch.cuda.device_count()):
|
||||
measures[str(i)] = torch.cuda.memory_allocated(i)
|
||||
torch.cuda.reset_peak_memory_stats()
|
||||
for i in range(torch_accelerator_module.device_count()):
|
||||
measures[str(i)] = torch_accelerator_module.memory_allocated(i)
|
||||
torch_accelerator_module.reset_peak_memory_stats()
|
||||
|
||||
return measures
|
||||
|
||||
@ -73,16 +79,16 @@ def end_measure(start_measures):
|
||||
measures = {"time": time.time() - start_measures["time"]}
|
||||
|
||||
gc.collect()
|
||||
torch.cuda.empty_cache()
|
||||
torch_accelerator_module.empty_cache()
|
||||
|
||||
# CPU mem
|
||||
measures["cpu"] = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
|
||||
measures["cpu-peak"] = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
|
||||
|
||||
# GPU mem
|
||||
for i in range(torch.cuda.device_count()):
|
||||
measures[str(i)] = (torch.cuda.memory_allocated(i) - start_measures[str(i)]) / 2**20
|
||||
measures[f"{i}-peak"] = (torch.cuda.max_memory_allocated(i) - start_measures[str(i)]) / 2**20
|
||||
for i in range(torch_accelerator_module.device_count()):
|
||||
measures[str(i)] = (torch_accelerator_module.memory_allocated(i) - start_measures[str(i)]) / 2**20
|
||||
measures[f"{i}-peak"] = (torch_accelerator_module.max_memory_allocated(i) - start_measures[str(i)]) / 2**20
|
||||
|
||||
return measures
|
||||
|
||||
@ -90,9 +96,9 @@ def end_measure(start_measures):
|
||||
def log_measures(measures, description):
|
||||
print(f"{description}:")
|
||||
print(f"- Time: {measures['time']:.2f}s")
|
||||
for i in range(torch.cuda.device_count()):
|
||||
print(f"- GPU {i} allocated: {measures[str(i)]:.2f}MiB")
|
||||
for i in range(torch_accelerator_module.device_count()):
|
||||
print(f"- {torch_device_type} {i} allocated: {measures[str(i)]:.2f}MiB")
|
||||
peak = measures[f"{i}-peak"]
|
||||
print(f"- GPU {i} peak: {peak:.2f}MiB")
|
||||
print(f"- {torch_device_type} {i} peak: {peak:.2f}MiB")
|
||||
print(f"- CPU RAM allocated: {measures['cpu']:.2f}MiB")
|
||||
print(f"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB")
|
||||
|
@ -62,12 +62,12 @@ def train_baseline(opt_level="O2"):
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
@ -95,12 +95,12 @@ def train_integration(opt_level="O2"):
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
@ -109,15 +109,15 @@ if __name__ == "__main__":
|
||||
for opt_level in ["O1", "O2"]:
|
||||
baseline_not_trained, baseline_trained = train_baseline(opt_level)
|
||||
accelerator_not_trained, accelerator_trained = train_integration(opt_level)
|
||||
assert (
|
||||
baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"]
|
||||
), f'Accuracy not the same for untrained baseline and accelerator using opt_level={opt_level}: {baseline_not_trained["accuracy"]} == {accelerator_not_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_not_trained["f1"] == accelerator_not_trained["f1"]
|
||||
), f'F1 not the same for untrained baseline and accelerator using opt_level={opt_level}: {baseline_not_trained["f1"]} == {accelerator_not_trained["f1"]}'
|
||||
assert (
|
||||
baseline_trained["accuracy"] == accelerator_trained["accuracy"]
|
||||
), f'Accuracy not the same for trained baseline and accelerator using opt_level={opt_level}: {baseline_trained["accuracy"]} == {accelerator_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_trained["f1"] == accelerator_trained["f1"]
|
||||
), f'F1 not the same for trained baseline and accelerator using opt_level={opt_level}: {baseline_trained["f1"]} == {accelerator_trained["f1"]}'
|
||||
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
|
||||
f"Accuracy not the same for untrained baseline and accelerator using opt_level={opt_level}: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
|
||||
f"F1 not the same for untrained baseline and accelerator using opt_level={opt_level}: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
|
||||
)
|
||||
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
|
||||
f"Accuracy not the same for trained baseline and accelerator using opt_level={opt_level}: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_trained["f1"] == accelerator_trained["f1"], (
|
||||
f"F1 not the same for trained baseline and accelerator using opt_level={opt_level}: {baseline_trained['f1']} == {accelerator_trained['f1']}"
|
||||
)
|
||||
|
@ -90,12 +90,12 @@ def train_baseline(zero_stage: int = 1, opt_level: str = "O1"):
|
||||
model.destroy()
|
||||
torch.cuda.empty_cache()
|
||||
AcceleratorState()._reset_state(True)
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
@ -129,12 +129,12 @@ def train_integration(zero_stage: int = 1, opt_level: str = "O1"):
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
model.destroy()
|
||||
torch.cuda.empty_cache()
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
|
||||
AcceleratorState()._reset_state(True)
|
||||
return base_model_results, trained_model_results
|
||||
@ -145,17 +145,17 @@ if __name__ == "__main__":
|
||||
for opt_level in ["O1", "O2", "O3"]:
|
||||
baseline_not_trained, baseline_trained = train_baseline(zero_stage, opt_level)
|
||||
accelerator_not_trained, accelerator_trained = train_integration(zero_stage, opt_level)
|
||||
assert (
|
||||
baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"]
|
||||
), f'ZERO stage {zero_stage}, opt_level={opt_level}:\nAccuracy should be the same for the baseline and accelerator: {baseline_not_trained["accuracy"]} == {accelerator_not_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_not_trained["f1"] == accelerator_not_trained["f1"]
|
||||
), f'ZERO stage {zero_stage}, opt_level={opt_level}:\nF1 score should be the same for the baseline and accelerator: {baseline_not_trained["f1"]} == {accelerator_not_trained["f1"]}'
|
||||
assert (
|
||||
baseline_trained["accuracy"] == accelerator_trained["accuracy"]
|
||||
), f'ZERO stage {zero_stage}, opt_level={opt_level}:\nAccuracy should be the same for the baseline and accelerator: {baseline_trained["accuracy"]} == {accelerator_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_trained["f1"] == accelerator_trained["f1"]
|
||||
), f'ZERO stage {zero_stage}, opt_level={opt_level}:\nF1 score should be the same for the baseline and accelerator: {baseline_trained["f1"]} == {accelerator_trained["f1"]}'
|
||||
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
|
||||
f"ZERO stage {zero_stage}, opt_level={opt_level}:\nAccuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
|
||||
f"ZERO stage {zero_stage}, opt_level={opt_level}:\nF1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
|
||||
)
|
||||
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
|
||||
f"ZERO stage {zero_stage}, opt_level={opt_level}:\nAccuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_trained["f1"] == accelerator_trained["f1"], (
|
||||
f"ZERO stage {zero_stage}, opt_level={opt_level}:\nF1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
|
||||
)
|
||||
|
||||
torch.distributed.destroy_process_group()
|
||||
|
@ -56,12 +56,12 @@ def train_baseline(opt_level="O2"):
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC)
|
||||
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
@ -89,12 +89,12 @@ def train_integration(opt_level="O2"):
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC)
|
||||
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
@ -104,15 +104,15 @@ if __name__ == "__main__":
|
||||
baseline_not_trained, baseline_trained = train_baseline(opt_level)
|
||||
accelerator_not_trained, accelerator_trained = train_integration(opt_level)
|
||||
|
||||
assert (
|
||||
baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"]
|
||||
), f'Accuracy should be the same for the baseline and accelerator: {baseline_not_trained["accuracy"]} == {accelerator_not_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_not_trained["f1"] == accelerator_not_trained["f1"]
|
||||
), f'F1 score should be the same for the baseline and accelerator: {baseline_not_trained["f1"]} == {accelerator_not_trained["f1"]}'
|
||||
assert (
|
||||
baseline_trained["accuracy"] == accelerator_trained["accuracy"]
|
||||
), f'Accuracy should be the same for the baseline and accelerator: {baseline_trained["accuracy"]} == {accelerator_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_trained["f1"] == accelerator_trained["f1"]
|
||||
), f'F1 score should be the same for the baseline and accelerator: {baseline_trained["f1"]} == {accelerator_trained["f1"]}'
|
||||
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
|
||||
f"Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
|
||||
f"F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
|
||||
)
|
||||
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
|
||||
f"Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_trained["f1"] == accelerator_trained["f1"], (
|
||||
f"F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
|
||||
)
|
||||
|
@ -96,12 +96,12 @@ def train_baseline():
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
@ -128,12 +128,12 @@ def train_integration():
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
@ -142,17 +142,17 @@ if __name__ == "__main__":
|
||||
baseline_not_trained, baseline_trained = train_baseline()
|
||||
accelerator_not_trained, accelerator_trained = train_integration()
|
||||
|
||||
assert (
|
||||
baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"]
|
||||
), f'Accuracy should be the same for the baseline and accelerator: {baseline_not_trained["accuracy"]} == {accelerator_not_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_not_trained["f1"] == accelerator_not_trained["f1"]
|
||||
), f'F1 score should be the same for the baseline and accelerator: {baseline_not_trained["f1"]} == {accelerator_not_trained["f1"]}'
|
||||
assert (
|
||||
baseline_trained["accuracy"] == accelerator_trained["accuracy"]
|
||||
), f'Accuracy should be the same for the baseline and accelerator: {baseline_trained["accuracy"]} == {accelerator_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_trained["f1"] == accelerator_trained["f1"]
|
||||
), f'F1 score should be the same for the baseline and accelerator: {baseline_trained["f1"]} == {accelerator_trained["f1"]}'
|
||||
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
|
||||
f"Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
|
||||
f"F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
|
||||
)
|
||||
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
|
||||
f"Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_trained["f1"] == accelerator_trained["f1"], (
|
||||
f"F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
|
||||
)
|
||||
|
||||
torch.distributed.destroy_process_group()
|
||||
|
@ -126,12 +126,12 @@ def train_baseline(zero_stage: int = 1):
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
model.destroy()
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
|
||||
del config
|
||||
return base_model_results, trained_model_results, model_outputs, data
|
||||
@ -180,12 +180,12 @@ def train_integration(zero_stage: int = 1):
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
model.destroy()
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
|
||||
del config
|
||||
return base_model_results, trained_model_results, model_outputs, data
|
||||
@ -197,17 +197,17 @@ if __name__ == "__main__":
|
||||
accelerator_not_trained, accelerator_trained, accelerator_outputs, accelerator_data = train_integration(
|
||||
zero_stage
|
||||
)
|
||||
assert (
|
||||
baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"]
|
||||
), f'ZERO stage {zero_stage}: Accuracy should be the same for the baseline and accelerator: {baseline_not_trained["accuracy"]} == {accelerator_not_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_not_trained["f1"] == accelerator_not_trained["f1"]
|
||||
), f'ZERO stage {zero_stage}: F1 score should be the same for the baseline and accelerator: {baseline_not_trained["f1"]} == {accelerator_not_trained["f1"]}'
|
||||
assert (
|
||||
baseline_trained["accuracy"] == accelerator_trained["accuracy"]
|
||||
), f'ZERO stage {zero_stage}: Accuracy should be the same for the baseline and accelerator: {baseline_trained["accuracy"]} == {accelerator_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_trained["f1"] == accelerator_trained["f1"]
|
||||
), f'ZERO stage {zero_stage}: F1 score should be the same for the baseline and accelerator: {baseline_trained["f1"]} == {accelerator_trained["f1"]}'
|
||||
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
|
||||
f"ZERO stage {zero_stage}: Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
|
||||
f"ZERO stage {zero_stage}: F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
|
||||
)
|
||||
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
|
||||
f"ZERO stage {zero_stage}: Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_trained["f1"] == accelerator_trained["f1"], (
|
||||
f"ZERO stage {zero_stage}: F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
|
||||
)
|
||||
AcceleratorState()._reset_state(True)
|
||||
torch.distributed.destroy_process_group()
|
||||
|
@ -106,12 +106,12 @@ def train_baseline():
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
@ -143,12 +143,12 @@ def train_integration():
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
@ -157,17 +157,17 @@ if __name__ == "__main__":
|
||||
baseline_not_trained, baseline_trained = train_baseline()
|
||||
accelerator_not_trained, accelerator_trained = train_integration()
|
||||
|
||||
assert (
|
||||
baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"]
|
||||
), f'Accuracy should be the same for the baseline and accelerator: {baseline_not_trained["accuracy"]} == {accelerator_not_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_not_trained["f1"] == accelerator_not_trained["f1"]
|
||||
), f'F1 score should be the same for the baseline and accelerator: {baseline_not_trained["f1"]} == {accelerator_not_trained["f1"]}'
|
||||
assert (
|
||||
baseline_trained["accuracy"] == accelerator_trained["accuracy"]
|
||||
), f'Accuracy should be the same for the baseline and accelerator: {baseline_trained["accuracy"]} == {accelerator_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_trained["f1"] == accelerator_trained["f1"]
|
||||
), f'F1 score should be the same for the baseline and accelerator: {baseline_trained["f1"]} == {accelerator_trained["f1"]}'
|
||||
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
|
||||
f"Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
|
||||
f"F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
|
||||
)
|
||||
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
|
||||
f"Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_trained["f1"] == accelerator_trained["f1"], (
|
||||
f"F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
|
||||
)
|
||||
|
||||
torch.distributed.destroy_process_group()
|
||||
|
@ -87,12 +87,12 @@ def train_baseline():
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC)
|
||||
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
@ -117,12 +117,12 @@ def train_integration():
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC)
|
||||
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
@ -131,15 +131,15 @@ if __name__ == "__main__":
|
||||
baseline_not_trained, baseline_trained = train_baseline()
|
||||
AcceleratorState._reset_state(True)
|
||||
accelerator_not_trained, accelerator_trained = train_integration()
|
||||
assert (
|
||||
baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"]
|
||||
), f'Accuracy should be the same for the baseline and accelerator: {baseline_not_trained["accuracy"]} == {accelerator_not_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_not_trained["f1"] == accelerator_not_trained["f1"]
|
||||
), f'F1 score should be the same for the baseline and accelerator: {baseline_not_trained["f1"]} == {accelerator_not_trained["f1"]}'
|
||||
assert (
|
||||
baseline_trained["accuracy"] == accelerator_trained["accuracy"]
|
||||
), f'Accuracy should be the same for the baseline and accelerator: {baseline_trained["accuracy"]} == {accelerator_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_trained["f1"] == accelerator_trained["f1"]
|
||||
), f'F1 score should be the same for the baseline and accelerator: {baseline_trained["f1"]} == {accelerator_trained["f1"]}'
|
||||
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
|
||||
f"Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
|
||||
f"F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
|
||||
)
|
||||
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
|
||||
f"Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_trained["f1"] == accelerator_trained["f1"], (
|
||||
f"F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
|
||||
)
|
||||
|
@ -1,4 +1,7 @@
|
||||
FROM nvcr.io/nvidia/pytorch:24.07-py3
|
||||
ARG BASE_YEAR=25
|
||||
ARG BASE_MONTH=03
|
||||
|
||||
FROM nvcr.io/nvidia/pytorch:${BASE_YEAR}.${BASE_MONTH}-py3
|
||||
|
||||
RUN pip install transformers evaluate datasets
|
||||
RUN git clone https://github.com/huggingface/accelerate.git
|
||||
|
@ -79,12 +79,12 @@ def train_baseline():
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
@ -114,12 +114,12 @@ def train_integration():
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
@ -128,17 +128,17 @@ if __name__ == "__main__":
|
||||
baseline_not_trained, baseline_trained = train_baseline()
|
||||
accelerator_not_trained, accelerator_trained = train_integration()
|
||||
|
||||
assert (
|
||||
baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"]
|
||||
), f'Accuracy should be the same for the baseline and accelerator: {baseline_not_trained["accuracy"]} == {accelerator_not_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_not_trained["f1"] == accelerator_not_trained["f1"]
|
||||
), f'F1 score should be the same for the baseline and accelerator: {baseline_not_trained["f1"]} == {accelerator_not_trained["f1"]}'
|
||||
assert (
|
||||
baseline_trained["accuracy"] == accelerator_trained["accuracy"]
|
||||
), f'Accuracy should be the same for the baseline and accelerator: {baseline_trained["accuracy"]} == {accelerator_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_trained["f1"] == accelerator_trained["f1"]
|
||||
), f'F1 score should be the same for the baseline and accelerator: {baseline_trained["f1"]} == {accelerator_trained["f1"]}'
|
||||
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
|
||||
f"Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
|
||||
f"F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
|
||||
)
|
||||
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
|
||||
f"Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_trained["f1"] == accelerator_trained["f1"], (
|
||||
f"F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
|
||||
)
|
||||
|
||||
torch.distributed.destroy_process_group()
|
||||
|
@ -113,12 +113,12 @@ def train_baseline(zero_stage: int = 1):
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
model.destroy()
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
|
||||
return base_model_results, trained_model_results, model_outputs, data
|
||||
|
||||
@ -159,12 +159,12 @@ def train_integration(zero_stage: int = 1):
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
model.destroy()
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
|
||||
return base_model_results, trained_model_results, model_outputs, data
|
||||
|
||||
@ -175,17 +175,17 @@ if __name__ == "__main__":
|
||||
accelerator_not_trained, accelerator_trained, accelerator_outputs, accelerator_data = train_integration(
|
||||
zero_stage
|
||||
)
|
||||
assert (
|
||||
baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"]
|
||||
), f'ZERO stage {zero_stage}: Accuracy should be the same for the baseline and accelerator: {baseline_not_trained["accuracy"]} == {accelerator_not_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_not_trained["f1"] == accelerator_not_trained["f1"]
|
||||
), f'ZERO stage {zero_stage}: F1 score should be the same for the baseline and accelerator: {baseline_not_trained["f1"]} == {accelerator_not_trained["f1"]}'
|
||||
assert (
|
||||
baseline_trained["accuracy"] == accelerator_trained["accuracy"]
|
||||
), f'ZERO stage {zero_stage}: Accuracy should be the same for the baseline and accelerator: {baseline_trained["accuracy"]} == {accelerator_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_trained["f1"] == accelerator_trained["f1"]
|
||||
), f'ZERO stage {zero_stage}: F1 score should be the same for the baseline and accelerator: {baseline_trained["f1"]} == {accelerator_trained["f1"]}'
|
||||
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
|
||||
f"ZERO stage {zero_stage}: Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
|
||||
f"ZERO stage {zero_stage}: F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
|
||||
)
|
||||
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
|
||||
f"ZERO stage {zero_stage}: Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_trained["f1"] == accelerator_trained["f1"], (
|
||||
f"ZERO stage {zero_stage}: F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
|
||||
)
|
||||
|
||||
torch.distributed.destroy_process_group()
|
||||
|
@ -91,12 +91,12 @@ def train_baseline():
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
@ -131,12 +131,12 @@ def train_integration():
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
@ -145,17 +145,17 @@ if __name__ == "__main__":
|
||||
baseline_not_trained, baseline_trained = train_baseline()
|
||||
accelerator_not_trained, accelerator_trained = train_integration()
|
||||
|
||||
assert (
|
||||
baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"]
|
||||
), f'Accuracy should be the same for the baseline and accelerator: {baseline_not_trained["accuracy"]} == {accelerator_not_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_not_trained["f1"] == accelerator_not_trained["f1"]
|
||||
), f'F1 score should be the same for the baseline and accelerator: {baseline_not_trained["f1"]} == {accelerator_not_trained["f1"]}'
|
||||
assert (
|
||||
baseline_trained["accuracy"] == accelerator_trained["accuracy"]
|
||||
), f'Accuracy should be the same for the baseline and accelerator: {baseline_trained["accuracy"]} == {accelerator_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_trained["f1"] == accelerator_trained["f1"]
|
||||
), f'F1 score should be the same for the baseline and accelerator: {baseline_trained["f1"]} == {accelerator_trained["f1"]}'
|
||||
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
|
||||
f"Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
|
||||
f"F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
|
||||
)
|
||||
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
|
||||
f"Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_trained["f1"] == accelerator_trained["f1"], (
|
||||
f"F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
|
||||
)
|
||||
|
||||
torch.distributed.destroy_process_group()
|
||||
|
@ -70,12 +70,12 @@ def train_baseline():
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC)
|
||||
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
@ -104,12 +104,12 @@ def train_integration():
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC)
|
||||
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
@ -118,15 +118,15 @@ if __name__ == "__main__":
|
||||
baseline_not_trained, baseline_trained = train_baseline()
|
||||
accelerator_not_trained, accelerator_trained = train_integration()
|
||||
|
||||
assert (
|
||||
baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"]
|
||||
), f'Accuracy should be the same for the baseline and accelerator: {baseline_not_trained["accuracy"]} == {accelerator_not_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_not_trained["f1"] == accelerator_not_trained["f1"]
|
||||
), f'F1 score should be the same for the baseline and accelerator: {baseline_not_trained["f1"]} == {accelerator_not_trained["f1"]}'
|
||||
assert (
|
||||
baseline_trained["accuracy"] == accelerator_trained["accuracy"]
|
||||
), f'Accuracy should be the same for the baseline and accelerator: {baseline_trained["accuracy"]} == {accelerator_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_trained["f1"] == accelerator_trained["f1"]
|
||||
), f'F1 score should be the same for the baseline and accelerator: {baseline_trained["f1"]} == {accelerator_trained["f1"]}'
|
||||
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
|
||||
f"Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
|
||||
f"F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
|
||||
)
|
||||
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
|
||||
f"Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_trained["f1"] == accelerator_trained["f1"], (
|
||||
f"F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
|
||||
)
|
||||
|
74
benchmarks/fsdp2/README.md
Normal file
74
benchmarks/fsdp2/README.md
Normal file
@ -0,0 +1,74 @@
|
||||
# FSDP2 Benchmarks
|
||||
|
||||
This benchmark showcases `FSDP2` in 🤗 `accelerate` and compares it to `torch` baseline.
|
||||
|
||||
## Overview
|
||||
|
||||
This benchmark consists of two parts:
|
||||
- `main.py` is the main script that runs the benchmark
|
||||
- `visualize.py` is the script that visualizes the results (if `--output_dir` was specified for the previous command)
|
||||
|
||||
## Motivation
|
||||
|
||||
We want to showcase that 🤗 `accelerate`'s integration of `FSDP2` is on par raw PyTorch, and highlight a "broken" part in PyTorch that creating an optimizer before applying `FSDP2` **doesn't result in a working training loop**. (more on this later)
|
||||
This script showcases **matching memory usage and convergence between `accelerate` and `torch`'s baseline.**
|
||||
To deal with this breaking change (and maintain backward compatibility with FSDP1 in terms of an API), `accelerate` had to come up with a workaround since `accelerate` assumes that the user will nearly always create a model, optimizer, scheduler, etc beforehand and bring them themselves. This lead to an issue of a stark increase in memory as well as the model not even training if the user creates an optimizer beforehand.
|
||||
To workaround this, we replace the parameters inside the optimizer with the newly created FSDP2 sharded ones. More about this can be found in this [blog post (TBD)](TODO)
|
||||
> [!WARNING]
|
||||
> This script is intended to fit on 2x 24GB GPUs, though on so few GPUs it's not possible to see the memory difference (discrepancies in grad allocation result in lower memory usage in the non-fixed case), only the difference in convergence. Below are attached results from 8x H100 GPUs where the difference is visible.
|
||||
> TLDR: more GPUs = bigger memory difference between fixed and non-fixed cases.
|
||||
|
||||
## Results
|
||||
|
||||
Here are the results from running the benchmark on 8x H100 GPUs:
|
||||
|
||||
<p align="center">
|
||||
<img src="imgs/allocated_memory.png" width="80%" alt="Allocated Memory Usage">
|
||||
</p>
|
||||
<p align="center">
|
||||
<img src="imgs/reserved_memory.png" width="80%" alt="Reserved Memory Usage">
|
||||
</p>
|
||||
|
||||
As you can see, the memory usage of `accelerate` and `torch_post_shard` (the **intended** way) are very similar, while `torch_pre_shard_not_fixed` uses significantly more memory. Our fix in `torch_pre_shard_fixed` brings the memory usage back in line with the **intended** approach.
|
||||
|
||||
> [!WARNING]
|
||||
> Timing discrepancies are due to the benchmarks being ran in 1 script.
|
||||
|
||||
|
||||
## Running
|
||||
|
||||
To run the benchmark, you can either use `accelerate launch` or `torchrun`:
|
||||
```bash
|
||||
accelerate launch main.py
|
||||
```
|
||||
```bash
|
||||
# For two GPUs
|
||||
torchrun --nproc_per_node 2 main.py
|
||||
```
|
||||
|
||||
This supports multiple configurable options, you can learn about them by running:
|
||||
```bash
|
||||
python3 main.py --help
|
||||
```
|
||||
|
||||
This script will run 4 different benchmarks:
|
||||
- `torch_optimizer_after_fsdp`: `torch` baseline where optimizer is created after applying `FSDP2`, this is the **intended** way to do it
|
||||
- `torch_optimizer_before_fsdp_not_fixed`: `torch` baseline where optimizer is created before applying `FSDP2` without fixing the optimizer parameters
|
||||
- `torch_optimizer_before_fsdp_fixed`: `torch` baseline where optimizer is created before applying `FSDP2` with our fix to the optimizer
|
||||
- `accelerate`: `accelerate`'s own integration of `FSDP2` where optimizer is created before applying `FSDP2`, but we apply our fix to the optimizer
|
||||
|
||||
Memory results are saved in a folder specified by `--output_dir` argument.
|
||||
Optionally, you can specify `--save_memory_snapshot` to save the torch memory snapshot, which can then be viewed using [`torch memory viz`](https://pytorch.org/memory_viz)
|
||||
|
||||
## Visualizing results
|
||||
|
||||
To visualize the results, you can run:
|
||||
|
||||
```bash
|
||||
python3 visualize.py --dir <path_to_output_dir>
|
||||
```
|
||||
|
||||
This will then create two plots, showcasing allocated and reserved memory usage between all the different benchmarks discussed above.
|
||||
|
||||
|
||||
|
BIN
benchmarks/fsdp2/imgs/allocated_memory.png
Normal file
BIN
benchmarks/fsdp2/imgs/allocated_memory.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 124 KiB |
BIN
benchmarks/fsdp2/imgs/reserved_memory.png
Normal file
BIN
benchmarks/fsdp2/imgs/reserved_memory.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 56 KiB |
122
benchmarks/fsdp2/main.py
Normal file
122
benchmarks/fsdp2/main.py
Normal file
@ -0,0 +1,122 @@
|
||||
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import functools
|
||||
from typing import Callable
|
||||
|
||||
import torch
|
||||
|
||||
from accelerate import Accelerator
|
||||
from utils import parse_args, prepare_accelerate, prepare_torch
|
||||
|
||||
|
||||
MODEL_NAME = "Qwen/Qwen2.5-1.5B-Instruct"
|
||||
LEARNING_RATE = 3e-5
|
||||
|
||||
CONFIG = {
|
||||
"model_name": MODEL_NAME,
|
||||
"learning_rate": LEARNING_RATE,
|
||||
}
|
||||
|
||||
|
||||
def train(
|
||||
model: torch.nn.Module,
|
||||
optimizer: torch.optim.Optimizer,
|
||||
train_dataloader: torch.utils.data.DataLoader,
|
||||
accelerator: Accelerator,
|
||||
) -> torch.Tensor:
|
||||
losses = []
|
||||
for batch in train_dataloader:
|
||||
optimizer.zero_grad()
|
||||
outputs = model(**batch, use_cache=False)
|
||||
|
||||
loss = outputs.loss
|
||||
losses.append(loss.item())
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
|
||||
return torch.tensor(losses)
|
||||
|
||||
|
||||
def evaluate(args, config: dict, init_fn: Callable, run_name: str) -> torch.Tensor:
|
||||
model, optimizer, dataloader, accelerator, memory_tracker = init_fn(args, config)
|
||||
|
||||
loss = train(model, optimizer, dataloader, accelerator)
|
||||
|
||||
memory_tracker.stop()
|
||||
msg = f"""Results for {run_name} (rank 0):
|
||||
Loss: {loss[-1].item()}
|
||||
Peak Allocated Memory: {float(memory_tracker.peak_allocated_memory):.2f} MB
|
||||
Peak Reserved Memory: {float(memory_tracker.peak_reserved_memory):.2f} MB
|
||||
{"-" * 34}"""
|
||||
accelerator.print(msg)
|
||||
return loss
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
evaluations = [
|
||||
functools.partial(
|
||||
evaluate,
|
||||
init_fn=functools.partial(prepare_torch, post_shard_optimizer=False, apply_optimizer_fix=True),
|
||||
run_name="Optimizer Before FSDP (w/ fix)",
|
||||
),
|
||||
functools.partial(
|
||||
evaluate,
|
||||
init_fn=functools.partial(prepare_torch, post_shard_optimizer=False, apply_optimizer_fix=False),
|
||||
run_name="Optimizer Before FSDP (w/o fix)",
|
||||
),
|
||||
functools.partial(
|
||||
evaluate,
|
||||
init_fn=functools.partial(prepare_torch, post_shard_optimizer=True),
|
||||
run_name="Optimizer After FSDP",
|
||||
),
|
||||
functools.partial(evaluate, init_fn=prepare_accelerate, run_name="Accelerate"),
|
||||
]
|
||||
labels = [
|
||||
"Optimizer Before FSDP (w/ fix)",
|
||||
"Optimizer Before FSDP (w/o fix)",
|
||||
"Optimizer After FSDP",
|
||||
"Accelerate",
|
||||
]
|
||||
|
||||
results = {}
|
||||
torch.use_deterministic_algorithms(True)
|
||||
|
||||
for evaluation, label in zip(evaluations, labels):
|
||||
results[label] = evaluation(args, CONFIG)
|
||||
|
||||
torch.testing.assert_close(
|
||||
results["Optimizer After FSDP"],
|
||||
results["Optimizer Before FSDP (w/ fix)"],
|
||||
msg="Optimizer After FSDP and Optimizer Before FSDP (w/ fix) should be the same",
|
||||
)
|
||||
|
||||
torch.testing.assert_close(
|
||||
results["Optimizer After FSDP"],
|
||||
results["Accelerate"],
|
||||
msg="Optimizer After FSDP and Accelerate should be the same",
|
||||
)
|
||||
|
||||
torch.testing.assert_close(
|
||||
results["Accelerate"],
|
||||
results["Optimizer Before FSDP (w/ fix)"],
|
||||
msg="Accelerate and Optimizer Before FSDP (w/ fix) should be the same",
|
||||
)
|
||||
|
||||
torch.distributed.destroy_process_group()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
130
benchmarks/fsdp2/measure_utils.py
Normal file
130
benchmarks/fsdp2/measure_utils.py
Normal file
@ -0,0 +1,130 @@
|
||||
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import gc
|
||||
import json
|
||||
import os
|
||||
import threading
|
||||
import time
|
||||
|
||||
import psutil
|
||||
import torch
|
||||
|
||||
from accelerate import PartialState
|
||||
|
||||
|
||||
class MemoryTracker:
|
||||
def __init__(
|
||||
self,
|
||||
device: torch.device,
|
||||
output_directory: str,
|
||||
run_name: str,
|
||||
save_memory_snapshot: bool,
|
||||
log_interval: float = 0.01,
|
||||
):
|
||||
"""Class for tracking gpu and cpu memory usage of the process.
|
||||
|
||||
Args:
|
||||
device (`torch.device`):
|
||||
PyTorch device to monitor.
|
||||
output_directory (`str`):
|
||||
Directory to save the memory usage data to, will be created if it doesn't exist.
|
||||
run_name (`str`):
|
||||
Name of the run, will be used to name the output files.
|
||||
save_memory_snapshot (`bool`):
|
||||
Whether to also save `torch.cuda.memory._dump_snapshot` to the output directory.
|
||||
log_interval (`float`, *optional*):
|
||||
Interval in seconds between memory measurements. Defaults to 0.01.
|
||||
"""
|
||||
self.log_interval = log_interval
|
||||
self.save_memory_snapshot = save_memory_snapshot
|
||||
self.output_directory = output_directory
|
||||
self.run_name = run_name
|
||||
|
||||
self.timestamps = []
|
||||
self.allocated_memory = []
|
||||
self.reserved_memory = []
|
||||
self.virtual_memory = []
|
||||
|
||||
self.start_time = None
|
||||
self.running = False
|
||||
|
||||
self._thread = None
|
||||
self._state = PartialState()
|
||||
self._process = psutil.Process()
|
||||
self._device = device
|
||||
self.torch_accelerator_module = getattr(torch, device.type, torch.cuda)
|
||||
|
||||
def _monitor(self):
|
||||
self.start_time = time.time()
|
||||
|
||||
while self.running:
|
||||
allocated = self.torch_accelerator_module.memory_allocated(self._device) / (1024 * 1024)
|
||||
reserved = self.torch_accelerator_module.memory_reserved(self._device) / (1024 * 1024)
|
||||
virtual_memory = self._process.memory_info().rss / (1024 * 1024)
|
||||
|
||||
self.allocated_memory.append(allocated)
|
||||
self.reserved_memory.append(reserved)
|
||||
self.virtual_memory.append(virtual_memory)
|
||||
self.timestamps.append(time.time() - self.start_time)
|
||||
|
||||
time.sleep(self.log_interval)
|
||||
|
||||
def start(self):
|
||||
gc.collect()
|
||||
self.torch_accelerator_module.empty_cache()
|
||||
|
||||
if self.output_directory:
|
||||
os.makedirs(self.output_directory, exist_ok=True)
|
||||
|
||||
if self.save_memory_snapshot:
|
||||
self.torch_accelerator_module.memory._record_memory_history()
|
||||
|
||||
self.running = True
|
||||
self._thread = threading.Thread(target=self._monitor)
|
||||
self._thread.daemon = True
|
||||
self._thread.start()
|
||||
|
||||
def stop(self):
|
||||
self.running = False
|
||||
if self._thread:
|
||||
self._thread.join()
|
||||
|
||||
if self.save_memory_snapshot and self._state.is_main_process and self.output_directory:
|
||||
output_file = os.path.join(self.output_directory, f"{self.run_name}_memory_snapshot.pkl")
|
||||
self.torch_accelerator_module.memory._dump_snapshot(output_file)
|
||||
|
||||
if self._state.is_main_process and self.output_directory:
|
||||
path = os.path.join(self.output_directory, f"{self.run_name}_memory_usage.json")
|
||||
with open(path, "w") as f:
|
||||
json.dump(
|
||||
{
|
||||
"timestamps": self.timestamps,
|
||||
"allocated_memory": self.allocated_memory,
|
||||
"reserved_memory": self.reserved_memory,
|
||||
"virtual_memory": self.virtual_memory,
|
||||
},
|
||||
f,
|
||||
)
|
||||
if self.save_memory_snapshot:
|
||||
self.torch_accelerator_module.memory._record_memory_history(False)
|
||||
self.torch_accelerator_module.empty_cache()
|
||||
|
||||
@property
|
||||
def peak_allocated_memory(self):
|
||||
return max(self.allocated_memory)
|
||||
|
||||
@property
|
||||
def peak_reserved_memory(self):
|
||||
return max(self.reserved_memory)
|
290
benchmarks/fsdp2/utils.py
Normal file
290
benchmarks/fsdp2/utils.py
Normal file
@ -0,0 +1,290 @@
|
||||
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import argparse
|
||||
from types import MethodType
|
||||
from typing import Union
|
||||
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from measure_utils import MemoryTracker
|
||||
from torch.distributed.fsdp import MixedPrecisionPolicy, fully_shard
|
||||
from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, DataCollatorForLanguageModeling
|
||||
from transformers.models.qwen2.modeling_qwen2 import Qwen2DecoderLayer
|
||||
|
||||
from accelerate import Accelerator, FullyShardedDataParallelPlugin
|
||||
from accelerate.state import AcceleratorState, is_initialized
|
||||
from accelerate.utils import convert_outputs_to_fp32, set_seed
|
||||
|
||||
|
||||
SEED = 421
|
||||
|
||||
|
||||
def get_named_parameters(model: torch.nn.Module, drop_refs: bool = False) -> dict[str, Union[torch.Tensor, int]]:
|
||||
"""
|
||||
This function returns a dictionary mapping the parameter names to their data pointers or
|
||||
the original parameters if `drop_refs` is `False`.
|
||||
It is used to get the original parameter names before `fully_shard` is applied.
|
||||
|
||||
We only return the data pointers, so we drop the references to the original parameters
|
||||
and `fully_shard` will then trigger a new allocation for the sharded ones.
|
||||
|
||||
Args:
|
||||
model (`torch.nn.Module`): Model instance to get the named parameters from
|
||||
drop_refs (`bool`, *optional*, defaults to `False`): Whether to drop the references to the original parameters
|
||||
|
||||
Returns:
|
||||
`dict[str, Union[torch.Tensor, int]]`: Dictionary mapping the parameter names to their data pointers or the original parameters if `drop_refs` is `False`
|
||||
"""
|
||||
named_parameters = {}
|
||||
for n, p in model.named_parameters():
|
||||
# We only preserve the data pointers to have the unique 1:1 mapping between the original and the sharded parameters
|
||||
named_parameters[n] = p.data_ptr() if drop_refs else p
|
||||
return named_parameters
|
||||
|
||||
|
||||
def replace_optimizer_params(optimizer: torch.optim.Optimizer):
|
||||
"""
|
||||
This function is called before using `fully_shard` on the model. It replaces the parameters of the optimizer with
|
||||
empty tensors, so `fully_shard` can trigger a new allocation for the sharded ones. After this, we swap the parameters
|
||||
`data_ptr` to the original one, so we can reuse that later to map the sharded parameters to the original ones.
|
||||
This function modifies the optimizer in-place.
|
||||
|
||||
Args:
|
||||
optimizer (torch.optim.Optimizer): Optimizer instance which contains the original model parameters
|
||||
"""
|
||||
|
||||
for param_group in optimizer.param_groups:
|
||||
for i, p in enumerate(param_group["params"]):
|
||||
# We drop a reference to the original param here, so that _move_states_to_device triggers a reallocation
|
||||
# This is required or else the `fully_shard` -> `_move_states_to_device` uses the original memory address
|
||||
# for the sharded parameters, and we get a weird/undefined behavior.
|
||||
param_group["params"][i] = torch.empty_like(p)
|
||||
|
||||
# We save the original data_ptr, so we can swap back the parameters later
|
||||
param_group["params"][i].data_ptr = p.data_ptr()
|
||||
|
||||
|
||||
def swap_back_optimizer_params(
|
||||
model: torch.nn.Module, optimizer: torch.optim.Optimizer, old_named_parameter_pointers: dict[str, int]
|
||||
):
|
||||
"""
|
||||
This function is the counterpart of `replace_optimizer_params`. It is called after `fully_shard` being applied to
|
||||
the model. It swaps the parameters of the optimizer to their sharded counterparts.
|
||||
It is done using the `data_ptr` mapping prepared in `replace_optimizer_params` and `get_named_parameters`.
|
||||
|
||||
Args:
|
||||
model (`torch.nn.Module`): Model instance to get the new named parameters from
|
||||
optimizer (`torch.optim.Optimizer`): Optimizer instance to swap the parameters of
|
||||
old_named_parameter_pointers (`dict[str, int]`): Dictionary mapping the original parameter names: data_ptrs to the new ones
|
||||
"""
|
||||
# We get the new named parameters after `fully_shard` being applied
|
||||
# We don't drop the references as we need the sharded parameters now
|
||||
new_named_parameters = get_named_parameters(model, drop_refs=False)
|
||||
|
||||
# We create a mapping from the original data_ptr to the new sharded param corresponding to it
|
||||
mapping = {p: new_named_parameters[n] for n, p in old_named_parameter_pointers.items()}
|
||||
|
||||
for param_group in optimizer.param_groups:
|
||||
# We swap the parameters of the optimizer to the new sharded ones
|
||||
param_group["params"] = [mapping[p.data_ptr] for p in param_group["params"]]
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--output_dir",
|
||||
type=str,
|
||||
help="Directory to save the benchmarking results.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--save_memory_snapshot",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="If True, `torch.cuda.memory._dump_snapshot` will be used to additionaly save the memory trace.",
|
||||
)
|
||||
######################
|
||||
# Training arguments #
|
||||
######################
|
||||
parser.add_argument(
|
||||
"--batch_size",
|
||||
type=int,
|
||||
default=2,
|
||||
help="Batch size for the training loop.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--block_size",
|
||||
type=int,
|
||||
default=128,
|
||||
help="The maximum sequence length to use with the model.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--dataset_fraction",
|
||||
type=float,
|
||||
default=1.0,
|
||||
help="Fraction of the dataset to use.",
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def prepare_dataloader(tokenizer, args, accelerator: Accelerator) -> DataLoader:
|
||||
dataset = load_dataset("tiny_shakespeare", split="train", trust_remote_code=True)
|
||||
|
||||
def tokenize_function(example):
|
||||
return tokenizer(
|
||||
example["text"],
|
||||
)
|
||||
|
||||
dataset = dataset.map(
|
||||
tokenize_function,
|
||||
batched=True,
|
||||
remove_columns=["text"],
|
||||
)
|
||||
|
||||
block_size = min(tokenizer.model_max_length, args.block_size)
|
||||
|
||||
def group_texts(examples):
|
||||
concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
|
||||
total_length = len(concatenated_examples[list(examples.keys())[0]])
|
||||
|
||||
total_length = (total_length // block_size) * block_size
|
||||
|
||||
result = {
|
||||
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
|
||||
for k, t in concatenated_examples.items()
|
||||
}
|
||||
|
||||
result["labels"] = result["input_ids"].copy()
|
||||
return result
|
||||
|
||||
dataset = dataset.map(group_texts, batched=True)
|
||||
dataset = dataset.select(range(int(len(dataset) * args.dataset_fraction)))
|
||||
|
||||
def collate_fn(examples):
|
||||
return DataCollatorForLanguageModeling(
|
||||
tokenizer=tokenizer,
|
||||
mlm=False,
|
||||
)(examples)
|
||||
|
||||
dataloader = DataLoader(
|
||||
dataset,
|
||||
batch_size=args.batch_size,
|
||||
collate_fn=collate_fn,
|
||||
)
|
||||
dataloader = accelerator.prepare(dataloader)
|
||||
return dataloader
|
||||
|
||||
|
||||
def get_model(model_name: str):
|
||||
# We reguire model to be loaded in fp32, otherwise benchmarks don't match as accelerate does upcasting of parameters to fp32
|
||||
config = AutoConfig.from_pretrained(model_name, trust_remote_code=True, torch_dtype=torch.float32)
|
||||
model = AutoModelForCausalLM.from_config(config)
|
||||
return model
|
||||
|
||||
|
||||
def get_tokenizer(model_name: str):
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
||||
tokenizer.pad_token = tokenizer.eos_token
|
||||
return tokenizer
|
||||
|
||||
|
||||
def prepare_torch(
|
||||
args, config: dict, post_shard_optimizer: bool = False, apply_optimizer_fix: bool = False
|
||||
) -> tuple[torch.nn.Module, torch.optim.Optimizer, torch.utils.data.DataLoader, Accelerator]:
|
||||
mp_policy = MixedPrecisionPolicy(
|
||||
param_dtype=torch.bfloat16,
|
||||
reduce_dtype=torch.bfloat16,
|
||||
output_dtype=torch.bfloat16,
|
||||
)
|
||||
|
||||
accelerator = Accelerator(mixed_precision="bf16")
|
||||
set_seed(SEED)
|
||||
is_fixed = "fixed" if apply_optimizer_fix else "not_fixed"
|
||||
is_post_shard = "optimizer_after_fsdp" if post_shard_optimizer else "optimizer_before_fsdp"
|
||||
run_name = f"torch_{is_post_shard}" if post_shard_optimizer else f"torch_{is_post_shard}_{is_fixed}"
|
||||
|
||||
tokenizer = get_tokenizer(config["model_name"])
|
||||
train_dataloader = prepare_dataloader(tokenizer, args, accelerator)
|
||||
|
||||
memory_tracker = MemoryTracker(accelerator.device, args.output_dir, run_name, args.save_memory_snapshot)
|
||||
memory_tracker.start()
|
||||
|
||||
model = get_model(config["model_name"])
|
||||
optimizer = None
|
||||
|
||||
if not post_shard_optimizer:
|
||||
optimizer = AdamW(model.parameters(), lr=config["learning_rate"])
|
||||
|
||||
if apply_optimizer_fix:
|
||||
# We drop the references to the original parameters, so that `fully_shard` can trigger a new allocation
|
||||
# Then we get the `module_name: data_ptr` mapping, so we can swap back the parameters later
|
||||
old_named_parameters = get_named_parameters(model, drop_refs=True)
|
||||
|
||||
# We replace the parameters of the optimizer with empty tensors, so that `fully_shard` can trigger a new allocation
|
||||
# We also change the `data_ptr` of the parameters to the original ones, so we can swap back the parameters later
|
||||
replace_optimizer_params(optimizer)
|
||||
|
||||
for module in model.modules():
|
||||
if isinstance(module, Qwen2DecoderLayer):
|
||||
fully_shard(module, mp_policy=mp_policy)
|
||||
fully_shard(model, mp_policy=mp_policy)
|
||||
|
||||
# We do this to imitate how accelerate forces outputs to be in fp32 via `convert_outputs_to_fp32`
|
||||
autocast_context = torch.autocast(device_type=accelerator.state.device.type, dtype=torch.bfloat16)
|
||||
model_forward_func = model.forward.__func__
|
||||
new_forward = autocast_context(model_forward_func)
|
||||
model.forward = MethodType(new_forward, model)
|
||||
model.forward = MethodType(convert_outputs_to_fp32(model.forward.__func__), model)
|
||||
|
||||
if post_shard_optimizer:
|
||||
optimizer = AdamW(model.parameters(), lr=config["learning_rate"])
|
||||
|
||||
if not post_shard_optimizer and apply_optimizer_fix:
|
||||
# We swap back the parameters of the optimizer to the original ones
|
||||
swap_back_optimizer_params(model, optimizer, old_named_parameters)
|
||||
|
||||
return model, optimizer, train_dataloader, accelerator, memory_tracker
|
||||
|
||||
|
||||
def prepare_accelerate(
|
||||
args, config: dict
|
||||
) -> tuple[torch.nn.Module, torch.optim.Optimizer, torch.utils.data.DataLoader, Accelerator]:
|
||||
if is_initialized():
|
||||
AcceleratorState()._reset_state(True)
|
||||
|
||||
fsdp_plugin = FullyShardedDataParallelPlugin(
|
||||
fsdp_version=2,
|
||||
auto_wrap_policy="transformer_based_wrap",
|
||||
transformer_cls_names_to_wrap=["Qwen2DecoderLayer"],
|
||||
)
|
||||
accelerator = Accelerator(
|
||||
fsdp_plugin=fsdp_plugin,
|
||||
mixed_precision="bf16",
|
||||
)
|
||||
set_seed(SEED)
|
||||
|
||||
tokenizer = get_tokenizer(config["model_name"])
|
||||
train_dataloader = prepare_dataloader(tokenizer, args, accelerator)
|
||||
|
||||
memory_tracker = MemoryTracker(accelerator.device, args.output_dir, "accelerate", args.save_memory_snapshot)
|
||||
memory_tracker.start()
|
||||
|
||||
model = get_model(config["model_name"])
|
||||
optimizer = AdamW(model.parameters(), lr=config["learning_rate"])
|
||||
|
||||
model, optimizer = accelerator.prepare(model, optimizer)
|
||||
|
||||
return model, optimizer, train_dataloader, accelerator, memory_tracker
|
114
benchmarks/fsdp2/visualize.py
Normal file
114
benchmarks/fsdp2/visualize.py
Normal file
@ -0,0 +1,114 @@
|
||||
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import argparse
|
||||
import json
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--dir", type=str, help="Directory containing the memory usage data")
|
||||
parser.add_argument(
|
||||
"--memory_threshold",
|
||||
type=int,
|
||||
default=0,
|
||||
help="Memory threshold to filter data that is below this value (only filters 1st `--filter_partition` of the points which should roughtly correspond to the model loading)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--filter_partition",
|
||||
type=float,
|
||||
default=1 / 3,
|
||||
help="Partition to drop data from that are below the memory threshold",
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def filter_data(data, memory_threshold, filter_partition, key):
|
||||
timestamps = data["timestamps"]
|
||||
memory = data[key]
|
||||
|
||||
mid_point = int(len(timestamps) * filter_partition)
|
||||
filtered_times = []
|
||||
filtered_memory = []
|
||||
for i, (t, m) in enumerate(zip(timestamps, memory)):
|
||||
if i < mid_point and m < memory_threshold:
|
||||
continue
|
||||
filtered_times.append(t)
|
||||
filtered_memory.append(m)
|
||||
return filtered_times, filtered_memory
|
||||
|
||||
|
||||
def compare_memory_usage(data, labels, memory_threshold, filter_partition):
|
||||
plt.style.use("seaborn-v0_8")
|
||||
colors = ["#2ecc71", "#e74c3c", "#3498db", "#f1c40f"]
|
||||
|
||||
fig1, ax1 = plt.subplots(figsize=(15, 5))
|
||||
for data_item, label, color in zip(data, labels, colors):
|
||||
timestamps, allocated = filter_data(data_item, memory_threshold, filter_partition, "allocated_memory")
|
||||
ax1.plot(timestamps, allocated, label=label, color=color, linewidth=2)
|
||||
|
||||
ax1.set_xlabel("Time (s)", fontsize=12)
|
||||
ax1.set_ylabel("Allocated Memory (GB)", fontsize=12)
|
||||
ax1.set_title("Allocated Memory Usage Over Time", fontsize=14, pad=15)
|
||||
ax1.grid(True, linestyle="--", alpha=0.7)
|
||||
ax1.legend(frameon=True, fancybox=True, shadow=True, fontsize=10)
|
||||
ax1.spines["top"].set_visible(False)
|
||||
ax1.spines["right"].set_visible(False)
|
||||
plt.tight_layout()
|
||||
|
||||
fig2, ax2 = plt.subplots(figsize=(15, 5))
|
||||
for data_item, label, color in zip(data, labels, colors):
|
||||
timestamps, reserved = filter_data(data_item, memory_threshold, filter_partition, "reserved_memory")
|
||||
ax2.plot(timestamps, reserved, label=label, color=color, linewidth=2)
|
||||
|
||||
ax2.set_xlabel("Time (s)", fontsize=12)
|
||||
ax2.set_ylabel("Reserved Memory (GB)", fontsize=12)
|
||||
ax2.set_title("Reserved Memory Usage Over Time", fontsize=14, pad=15)
|
||||
ax2.grid(True, linestyle="--", alpha=0.7)
|
||||
ax2.legend(frameon=True, fancybox=True, shadow=True, fontsize=10)
|
||||
ax2.spines["top"].set_visible(False)
|
||||
ax2.spines["right"].set_visible(False)
|
||||
plt.tight_layout()
|
||||
|
||||
return fig1, fig2
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = parse_args()
|
||||
DIR = args.dir
|
||||
with open(f"{DIR}/torch_optimizer_before_fsdp_not_fixed_memory_usage.json") as f:
|
||||
optimizer_before_fsdp_not_fixed = json.load(f)
|
||||
|
||||
with open(f"{DIR}/torch_optimizer_after_fsdp_memory_usage.json") as f:
|
||||
optimizer_after_fsdp = json.load(f)
|
||||
|
||||
with open(f"{DIR}/torch_optimizer_before_fsdp_fixed_memory_usage.json") as f:
|
||||
optimizer_before_fsdp_fixed = json.load(f)
|
||||
|
||||
with open(f"{DIR}/accelerate_memory_usage.json") as f:
|
||||
accelerate = json.load(f)
|
||||
|
||||
data = [optimizer_before_fsdp_not_fixed, optimizer_before_fsdp_fixed, optimizer_after_fsdp, accelerate]
|
||||
labels = [
|
||||
"Optimizer Before FSDP (w/o fix)",
|
||||
"Optimizer Before FSDP (w/ fix)",
|
||||
"Optimizer After FSDP",
|
||||
"Accelerate",
|
||||
]
|
||||
|
||||
fig1, fig2 = compare_memory_usage(data, labels, args.memory_threshold, args.filter_partition)
|
||||
fig1.savefig(f"{DIR}/allocated_memory.png")
|
||||
fig2.savefig(f"{DIR}/reserved_memory.png")
|
111
benchmarks/torch.compile/README.md
Normal file
111
benchmarks/torch.compile/README.md
Normal file
@ -0,0 +1,111 @@
|
||||
# Regional Compilation Benchmark
|
||||
|
||||
This benchmark compares different compilation strategies using PyTorch's `torch.compile` and Accelerate's `compile_regions` utility, which is based on the recipe in [PyTorch documentation](https://pytorch.org/tutorials/recipes/regional_compilation.html).
|
||||
|
||||
## Overview
|
||||
|
||||
The benchmark evaluates three approaches:
|
||||
|
||||
- **Baseline**: No compilation, standard PyTorch eager execution.
|
||||
- **Full compilation**: Using PyTorch's `torch.compile()` on the entire model.
|
||||
- **Regional compilation**: Using `accelerate.utils.compile_regions()` which targets specific blocks of the model to optimize compilation time.
|
||||
|
||||
Each approach is tested with different batch sizes (1 and 4) and sequence lengths (128) on various LLaMA-based models ranging from 1B to 13B parameters. We purposefully run the forward pass outside of the `torch.no_grad()` context to simulate performance in a training environment, where gradients are needed.
|
||||
|
||||
## Usage
|
||||
|
||||
To run this benchmark:
|
||||
|
||||
```bash
|
||||
python regional_compilation.py
|
||||
```
|
||||
|
||||
The script will automatically download the model configurations, create models, and benchmark both compilation and inference times across different scenarios.
|
||||
|
||||
## Requirements
|
||||
|
||||
- Suitable GPU memory for the models being tested.
|
||||
- PyTorch with CUDA support.
|
||||
- Transformers library.
|
||||
- Accelerate library.
|
||||
|
||||
## Results
|
||||
|
||||
The benchmark results are summarized in the following figures:
|
||||
|
||||
- Compilation time is how long it takes to run the first forward pass.
|
||||
- Speedup factor is the ratio of non-compiled baseline inference time to the fully/regionally compiled inference time.
|
||||
|
||||
<p align="center">
|
||||
<img src="imgs/compilation_time.png" width="80%" alt="Compilation Time">
|
||||
</p>
|
||||
<p align="center">
|
||||
<img src="imgs/speedup_factor.png" width="80%" alt="Speedup Factor">
|
||||
</p>
|
||||
|
||||
Full results are available in the tables below:
|
||||
|
||||
```markdown
|
||||
[-------------------------------------------------- NousResearch/Llama-3.2-1B ---------------------------------------------------]
|
||||
| Inference time (1x128) | Inference time (4x128) | Compile time (1x128) | Compile time (4x128)
|
||||
1 threads: -----------------------------------------------------------------------------------------------------------------------
|
||||
Baseline | 18.3 | 18.4 | |
|
||||
Full compilation | 6.3 | 10.0 | 10696.4 | 10248.0
|
||||
Regional compilation | 9.7 | 10.0 | 1952.7 | 2903.9
|
||||
|
||||
Times are in milliseconds (ms).
|
||||
|
||||
[---------------------------------------------- NousResearch/Hermes-3-Llama-3.2-3B ----------------------------------------------]
|
||||
| Inference time (1x128) | Inference time (4x128) | Compile time (1x128) | Compile time (4x128)
|
||||
1 threads: -----------------------------------------------------------------------------------------------------------------------
|
||||
Baseline | 33.4 | 33.6 | |
|
||||
Full compilation | 11.2 | 23.9 | 17857.5 | 17736.5
|
||||
Regional compilation | 17.3 | 23.7 | 2993.2 | 2478.8
|
||||
|
||||
Times are in milliseconds (ms).
|
||||
|
||||
[---------------------------------------------- NousResearch/Hermes-3-Llama-3.1-8B ----------------------------------------------]
|
||||
| Inference time (1x128) | Inference time (4x128) | Compile time (1x128) | Compile time (4x128)
|
||||
1 threads: -----------------------------------------------------------------------------------------------------------------------
|
||||
Baseline | 40.3 | 59.5 | |
|
||||
Full compilation | 18.9 | 54.4 | 20437.8 | 20152.3
|
||||
Regional compilation | 19.7 | 54.0 | 2903.1 | 2438.0
|
||||
|
||||
Times are in milliseconds (ms).
|
||||
|
||||
[--------------------------------------------- NousResearch/Nous-Hermes-Llama2-13b ----------------------------------------------]
|
||||
| Inference time (1x128) | Inference time (4x128) | Compile time (1x128) | Compile time (4x128)
|
||||
1 threads: -----------------------------------------------------------------------------------------------------------------------
|
||||
Baseline | 45.5 | 100.4 | |
|
||||
Full compilation | 29.4 | 89.7 | 23099.4 | 22885.9
|
||||
Regional compilation | 29.4 | 87.5 | 2945.5 | 2526.2
|
||||
|
||||
Times are in milliseconds (ms).
|
||||
```
|
||||
|
||||
## Results Summary
|
||||
|
||||
### Compilation Time
|
||||
|
||||
Regional compilation provides significantly faster compilation times compared to full model compilation:
|
||||
|
||||
- **Full compilation**: Takes ~10-23 seconds depending on model size.
|
||||
- **Regional compilation**: Takes only ~2-3 seconds across all model sizes.
|
||||
- **Speed improvement**: Regional compilation is **5-9x faster** to compile.
|
||||
|
||||
### Inference Time
|
||||
|
||||
Regional compilation delivers inference performance close to full compilation:
|
||||
|
||||
- For batch size 1:
|
||||
- For smaller models (1B-3B): Full compilation has a slight edge over regional compilation.
|
||||
- For larger models (8B-13B): Regional compilation performs similarly to full compilation.
|
||||
- For batch size 4: Regional compilation performs similarly to full compilation across all models.
|
||||
|
||||
## Key Takeaways
|
||||
|
||||
1. **Comparable Performance**: Regional compilation delivers performance speedups similar to full compilation, especially for larger models.
|
||||
2. **Faster Compilation**: Regional compilation significantly reduces the time taken to compile models, making it a more efficient choice for deployment.
|
||||
3. **Batch Size Impact**: At batch size 4, full compilation and regional compilation perform nearly identically.
|
||||
4. **Model Size Impact**: Even with a small batch size, full compilation and regional compilation perform similarly for larger models (8B-13B).
|
||||
5. **Practical Application**: For real-world applications, regional compilation is a practical choice for optimizing training cold start times, especially when working with large models.
|
BIN
benchmarks/torch.compile/imgs/compilation_time.png
Normal file
BIN
benchmarks/torch.compile/imgs/compilation_time.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 242 KiB |
BIN
benchmarks/torch.compile/imgs/speedup_factor.png
Normal file
BIN
benchmarks/torch.compile/imgs/speedup_factor.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 218 KiB |
77
benchmarks/torch.compile/regional_compilation.py
Normal file
77
benchmarks/torch.compile/regional_compilation.py
Normal file
@ -0,0 +1,77 @@
|
||||
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import torch
|
||||
from torch.utils.benchmark import Compare, Timer
|
||||
from transformers import AutoConfig, AutoModelForCausalLM
|
||||
|
||||
from accelerate.test_utils.testing import get_backend
|
||||
from accelerate.utils import compile_regions
|
||||
|
||||
|
||||
torch.set_float32_matmul_precision("high")
|
||||
|
||||
COMPILE_ITERS = 2
|
||||
INFERENCE_ITERS = 100
|
||||
|
||||
BASELINE = "Baseline"
|
||||
COMPILE_TIME = "Compile time"
|
||||
INFRENCE_TIME = "Inference time"
|
||||
FULL_COMPILATION = "Full compilation"
|
||||
REGIONAL_COMPILATION = "Regional compilation"
|
||||
|
||||
INFRENCE_STMT = "model(input_ids, use_cache=False)"
|
||||
COMPILE_STMT = f"torch._dynamo.reset(); torch._inductor.utils.clear_inductor_caches(); {INFRENCE_STMT}"
|
||||
|
||||
torch_device_type, _, _ = get_backend()
|
||||
|
||||
results = []
|
||||
for model_id in [
|
||||
# non-gated llama models
|
||||
"NousResearch/Llama-3.2-1B",
|
||||
"NousResearch/Hermes-3-Llama-3.2-3B",
|
||||
"NousResearch/Hermes-3-Llama-3.1-8B",
|
||||
"NousResearch/Nous-Hermes-Llama2-13b",
|
||||
]:
|
||||
with torch.device(torch_device_type):
|
||||
config = AutoConfig.from_pretrained(model_id)
|
||||
model = AutoModelForCausalLM.from_config(config).to(dtype=torch.float16).eval()
|
||||
|
||||
full_compilation_model = torch.compile(model)
|
||||
regional_compilation_model = compile_regions(model)
|
||||
|
||||
for model, sub_label, description, stmt, iters in [
|
||||
(model, BASELINE, INFRENCE_TIME, INFRENCE_STMT, INFERENCE_ITERS),
|
||||
(full_compilation_model, FULL_COMPILATION, COMPILE_TIME, COMPILE_STMT, COMPILE_ITERS),
|
||||
(full_compilation_model, FULL_COMPILATION, INFRENCE_TIME, INFRENCE_STMT, INFERENCE_ITERS),
|
||||
(regional_compilation_model, REGIONAL_COMPILATION, COMPILE_TIME, COMPILE_STMT, COMPILE_ITERS),
|
||||
(regional_compilation_model, REGIONAL_COMPILATION, INFRENCE_TIME, INFRENCE_STMT, INFERENCE_ITERS),
|
||||
]:
|
||||
for batch_size, sequence_length in [(1, 128), (4, 128)]:
|
||||
input_ids = torch.randint(
|
||||
0, 1000, size=(batch_size, sequence_length), dtype=torch.int64, device=torch_device_type
|
||||
)
|
||||
results.append(
|
||||
Timer(
|
||||
label=model_id,
|
||||
sub_label=sub_label,
|
||||
description=f"{description} ({batch_size}x{sequence_length})",
|
||||
globals={"model": model, "input_ids": input_ids},
|
||||
stmt=stmt,
|
||||
).timeit(number=iters)
|
||||
)
|
||||
|
||||
compare = Compare(results)
|
||||
compare.colorize()
|
||||
compare.print()
|
@ -25,12 +25,12 @@ RUN source activate accelerate && conda install -c conda-forge mpi4py
|
||||
RUN source activate accelerate && \
|
||||
python3 -m pip install --no-cache-dir \
|
||||
git+https://github.com/huggingface/accelerate#egg=accelerate[testing,test_trackers,deepspeed] \
|
||||
--extra-index-url https://download.pytorch.org/whl/cu117
|
||||
--extra-index-url https://download.pytorch.org/whl/cu126
|
||||
|
||||
RUN python3 -m pip install --no-cache-dir bitsandbytes
|
||||
|
||||
# Stage 2
|
||||
FROM nvidia/cuda:12.1.0-cudnn8-devel-ubuntu20.04 AS build-image
|
||||
FROM nvidia/cuda:12.6.3-cudnn-devel-ubuntu22.04 AS build-image
|
||||
COPY --from=compile-image /opt/conda /opt/conda
|
||||
ENV PATH /opt/conda/bin:$PATH
|
||||
|
||||
|
@ -24,12 +24,12 @@ RUN source activate accelerate && conda install -c conda-forge mpi4py
|
||||
RUN source activate accelerate && \
|
||||
python3 -m pip install --no-cache-dir \
|
||||
git+https://github.com/huggingface/accelerate#egg=accelerate[testing,test_trackers] \
|
||||
--extra-index-url https://download.pytorch.org/whl/cu117
|
||||
--extra-index-url https://download.pytorch.org/whl/cu126
|
||||
|
||||
RUN python3 -m pip install --no-cache-dir bitsandbytes
|
||||
|
||||
# Stage 2
|
||||
FROM nvidia/cuda:12.1.0-cudnn8-devel-ubuntu20.04 AS build-image
|
||||
FROM nvidia/cuda:12.6.3-cudnn-devel-ubuntu22.04 AS build-image
|
||||
COPY --from=compile-image /opt/conda /opt/conda
|
||||
ENV PATH /opt/conda/bin:$PATH
|
||||
|
||||
|
@ -64,6 +64,10 @@
|
||||
title: Apple M1 GPUs
|
||||
- local: usage_guides/ipex
|
||||
title: IPEX training with CPU
|
||||
- local: usage_guides/gaudi
|
||||
title: Intel Gaudi
|
||||
- local: usage_guides/compilation
|
||||
title: Compilation
|
||||
title: Training
|
||||
- isExpanded: true
|
||||
sections:
|
||||
@ -78,6 +82,8 @@
|
||||
title: Accelerate's internal mechanism
|
||||
- local: concept_guides/big_model_inference
|
||||
title: Loading big models into memory
|
||||
- local: concept_guides/context_parallel
|
||||
title: Context parallelism
|
||||
- local: concept_guides/performance
|
||||
title: Comparing performance across distributed setups
|
||||
- local: concept_guides/deferring_execution
|
||||
@ -86,12 +92,14 @@
|
||||
title: Gradient synchronization
|
||||
- local: concept_guides/fsdp_and_deepspeed
|
||||
title: FSDP vs DeepSpeed
|
||||
- local: concept_guides/fsdp1_vs_fsdp2
|
||||
title: FSDP1 vs FSDP2
|
||||
- local: concept_guides/low_precision_training
|
||||
title: Low precision training methods
|
||||
- local: concept_guides/training_tpu
|
||||
title: Training on TPUs
|
||||
title: Concepts and fundamentals
|
||||
- sections:
|
||||
- sections:
|
||||
- local: package_reference/accelerator
|
||||
title: Accelerator
|
||||
- local: package_reference/state
|
||||
|
@ -26,7 +26,7 @@ You will also learn how to setup a few requirements needed for ensuring your env
|
||||
|
||||
## Configuring the Environment
|
||||
|
||||
Before any training can be performed, a Accelerate config file must exist in the system. Usually this can be done by running the following in a terminal and answering the prompts:
|
||||
Before any training can be performed, an Accelerate config file must exist in the system. Usually this can be done by running the following in a terminal and answering the prompts:
|
||||
|
||||
```bash
|
||||
accelerate config
|
||||
@ -52,7 +52,7 @@ os._exit(00) # Restart the notebook
|
||||
|
||||
## Preparing the Dataset and Model
|
||||
|
||||
Next you should prepare your dataset. As mentioned at earlier, great care should be taken when preparing the `DataLoaders` and model to make sure that **nothing** is put on *any* GPU.
|
||||
Next you should prepare your dataset. As mentioned earlier, great care should be taken when preparing the `DataLoaders` and model to make sure that **nothing** is put on *any* GPU.
|
||||
|
||||
If you do, it is recommended to put that specific code into a function and call that from within the notebook launcher interface, which will be shown later.
|
||||
|
||||
|
@ -153,7 +153,7 @@ To use [`find_executable_batch_size`], restructure your training function to inc
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
The inner function **must** take batch size as the first parameter, but we do not pass one to it when called. The wrapper will handles this for you. Any object (models, optimizers) that consumes device memory and is passed to the [`Accelerator`] also **must** be declared inside the inner function.
|
||||
The inner function **must** take batch size as the first parameter, but we do not pass one to it when called. The wrapper will handle this for you. Any object (models, optimizers) that consumes device memory and is passed to the [`Accelerator`] also **must** be declared inside the inner function.
|
||||
|
||||
</Tip>
|
||||
|
||||
|
156
docs/source/concept_guides/context_parallel.md
Normal file
156
docs/source/concept_guides/context_parallel.md
Normal file
@ -0,0 +1,156 @@
|
||||
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Context Parallel in 🤗`accelerate`
|
||||
|
||||
This guide will cover basics of using context parallelism in 🤗`accelerate`, for the more curious readers, we will also cover some technicalities in the later sections.
|
||||
|
||||
## Why context parallelism?
|
||||
|
||||
With the advent of large language models, and recently reasoning models, the sequence length has been growing rapidly. This, combined with quadratic memory complexity of attention, has lead to a need for more efficient ways to train models with long sequences.
|
||||
With sequence length of 128k, the memory requirement of the attention matrix is `128k * 128k * 2 bytes * num_heads = ~32 GB * num_heads` for `bf16` precision, given vanilla attention implementation. Granted, with usage of `flash attention` or `SDPA` which do not materialize these attention weights, this decreases drastically, but the growth in memory requirements is still considerable.
|
||||
|
||||
Context parallelism allows us to shard the inputs to the attention computation along the sequence dimension and compute the attention in parallel on multiple GPUs. With this, we can train models with long sequences, scaling potentially to 1M+ sequence length.
|
||||
|
||||
|
||||
## How to use context parallelism?
|
||||
|
||||
As with any other feature in 🤗`accelerate`, enabling context parallelism is as simple as passing the corresponding flags to `accelerate launch`.
|
||||
In this case, it's no different:
|
||||
|
||||
```bash
|
||||
accelerate launch --context-parallel-size 8 --context-parallel-shard-rotation [allgather|alltoall] ...
|
||||
```
|
||||
|
||||
Context parallelism is tightly coupled (for now) with `FSDP2`, which you can learn more about in the [FSDP2 introduction](fsdp1_vs_fsdp2.md). Meaning, context parallelism is applied only if `FSDP2` is enabled.
|
||||
You can also enable context parallelism programatically, by passing it in the `FullyShardedDataParallelPlugin` constructor:
|
||||
|
||||
```diff
|
||||
from accelerate.utils import FullyShardedDataParallelPlugin
|
||||
|
||||
plugin = FullyShardedDataParallelPlugin(
|
||||
...
|
||||
fsdp_version=2,
|
||||
+ cp_size=8,
|
||||
+ cp_comm_strategy="allgather",
|
||||
)
|
||||
accelerator = Accelerator(fsdp_plugin=plugin)
|
||||
```
|
||||
|
||||
After enabling context parallelism with the methods mentioned above, you can then apply it to your training loop. We provide a thin wrapper around [`torch.distributed.tensor.experimental.context_parallel`](https://docs.pytorch.org/docs/stable/distributed.tensor.html#torch.distributed.tensor.experimental.context_parallel) that you can use in your training loop, that abstracts some of the complexity of using it (more on this later).
|
||||
You can use it as follows:
|
||||
|
||||
```python
|
||||
for batch in dataloader:
|
||||
with accelerator.context_parallel(
|
||||
buffers=[batch["input_ids"], batch["attention_mask"]],
|
||||
buffer_seq_dims=[1, 1],
|
||||
no_restore_buffers={batch["input_ids"]},
|
||||
):
|
||||
outputs = model(batch)
|
||||
...
|
||||
```
|
||||
|
||||
> [!Warning]
|
||||
> This context manager has to be recreated with each training step, as shown in the example above. It's crucial to do so.
|
||||
|
||||
This can scale your context size to 1M+ sequence length potentially. Below, we showcase speed and memory usage of context parallelism for up-to 256k context size. We can see that when we double the context size and number of GPUs, we can achieve consistent memory usage, potentiall enabling endless context length scaling.
|
||||
|
||||
<p align="center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/accelerate/examples/fsdp2/cp_perf.png" alt="context parallelism memory usage" />
|
||||
<br>
|
||||
<em>Figure 1: Memory usage and speed of context parallelism for up-to 256k context size.</em>
|
||||
</p>
|
||||
|
||||
> [!Tip]
|
||||
> These examples were created with a script you can find [in the examples folder](https://github.com/huggingface/accelerate/blob/main/examples/fsdp2/fsdp2_context_parallel.py). For instructions on how to run it, see the [README](https://github.com/huggingface/accelerate/blob/main/examples/fsdp2/README.md) in the same folder.
|
||||
|
||||
|
||||
## Accelerate's interface
|
||||
|
||||
The context manager takes a few arguments, that are used to configure the context parallelism.
|
||||
|
||||
- `buffers`: This is a list of tensors that are to be sharded across the sequence dimension. These tensors are usually input ids, labels and attention mask.
|
||||
- `buffer_seq_dims`: This is a list of integers, that specify the sequence dimension of the buffers, in the order of the `buffers` list.
|
||||
- `no_restore_buffers`: The implementation of context parallelism modifies the buffers in-place, converting them to `torch.distributed.tensor.Dtensor`s. After the context manager is exited, a communication kernel would need to be launched to restore the buffers to their original state (usually all-gather). This takes some time, so it is reccomended to pass the same arguments as to the `buffers` argument, to avoid unnecessary communication, unless you are sure that you need to use the buffers after the context manager is exited.
|
||||
|
||||
## Configurable options
|
||||
Accelerate provides only a few options to configure context parallelism, which are:
|
||||
|
||||
- `cp_size`: The number of ranks to shard the inputs to the attention computation across the sequence dimension.
|
||||
- `cp_comm_strategy`: The rotation method to use for the shards. We strongly reccomend keeping this as `"allgather"`, as it's very likely it will outperform `"alltoall"` in most cases.
|
||||
|
||||
Context parallel size is rather self-explanatory, it's the number of ranks across which the inputs are to be-sharded.
|
||||
Context parallel shard rotation defines how the shards of the inputs are rotated across ranks. We'll cover the 2 options in more detail in the next section.
|
||||
|
||||
You can see an end-to-end example in the [FSDP2 context parallel example](https://github.com/huggingface/accelerate/blob/main/examples/fsdp2/fsdp2_context_parallel.py) file, where you can train an 8B model with 128k sequence length on 8x H100 SXM GPUs. Using multi-node training, you can scale this to 1M+ sequence length on 64x H100 SXM GPUs.
|
||||
|
||||
## Technical details
|
||||
|
||||
> [!Tip]
|
||||
> This section is fairly technical, so if you don't need to learn the internals of context parallelism, you can skip it and start building 🚀
|
||||
|
||||
We're going to be using word `shard` extensively in the following sections, so let's define it first. If we call tensor `sharded` across `Dth` dimension, across `N` ranks, we mean that this tensor is split into `N` parts, where each part of the tensor has shape `[..., D//N, ...]`.
|
||||
|
||||
|
||||
## So how does it work?
|
||||
|
||||
Context parallelism works on sharding the `Q, K and V` matrices across the sequence dimension. Each rank has its assigned shard of `Q`, let's call it `Q_i`. This matrix stays only on this rank, during the whole computation. Similarly, each rank has its own shard of `K` and `V`, let's call them `K_i` and `V_i`. Then, each rank calculates attention with its own shard of `Q_i`, `K_i` and `V_i`, let's call it `attn_i`. During this computation, a communication kernel is launched to gather the `Ks` and `Vs` from all other ranks. What communication primitive is used, depends on the `context_parallel_shard_rotation` option.
|
||||
This way, each rank gets to calculate local attention, first with `Q_i`, `K_i` and `V_i`, then with `K_j` and `V_j` from all other ranks. As each rank holds `Q, K and V` matrices that are sharded across the sequence dimension, the resulting matrices are smaller and can fit on a single GPU.
|
||||
|
||||
We can formalize this in a following pseudocode:
|
||||
```python
|
||||
comm_kernel = {"allgather": allgather, "alltoall": alltoall}[context_parallel_shard_rotation]
|
||||
Qi, Ki, Vi = shard(Q, K, V, seq_dim)
|
||||
attn[i] = attn(Qi, Ki, Vi)
|
||||
for j in range(context_parallel_size):
|
||||
Kj, Vj = comm_kernel()
|
||||
attn[j] = attn(Qi, Kj, Vj) # [batch, num_heads, seq_len // context_parallel_size, head_dim]
|
||||
|
||||
final_attn = combine(attn)
|
||||
```
|
||||
|
||||
## all-to-all vs all-gather
|
||||
|
||||
### all-gather
|
||||
So what's the difference between all-to-all and all-gather? With all-gather, the communication is very simple. After (well, before, as it usually takes longer) we compute the local attention `attn_i` we launch an all-gather to gather all other `Ks` and `Vs` from all other ranks. As this communication is done, each rank has all the `Ks` and `Vs` from all other ranks, and can compute the attention with them sequentially.
|
||||
In ideal scenario, all-gather finishes in the exact moment as the calculation of `attn_i` is done. However, this never happens in practice, so the ideal real overlap is achieved when the full `attn_i` is overlapped with a part of the communication, then to start the computation with `K_j` and `V_j`, we wait for the all-gather to finish.
|
||||
|
||||
### all-to-all
|
||||
All-to-all, or sometimes called `ring-rotation` utilizes a ring-like communication pattern. After concluding `attn_i` computation, an all-to-all is launched to send `K_i` and `V_i` to the neighbouring ranks. We then repeat this `context_parallel_size-1` times, so that each rank sees all the shards of `K` and `V` from all other ranks once. In ideal scenario, we prefetch shards `K_i+1` and `V_i+1` from the neighbouring rank and this communication is exactly overlapped with computation of our current `attn_i`. Again, realistically, this perfect overlap doesn't ever happen. Given the nature of this approach, if we don't achieve perfect overlap, the penalty is way larger than with all-gather.
|
||||
|
||||
## How to choose the right rotation method?
|
||||
In theory, all-to-all should be the better choice. Though in practice, it rarely is. Therefore, we default to all-gather, as it's more likely to achieve better performance. Extensive [benchmarks](https://discuss.pytorch.org/t/distributed-w-torchtitan-breaking-barriers-training-long-context-llms-with-1m-sequence-length-in-pytorch-using-context-parallel/215082) from the `torchtitan` team also shows that all-to-all rarely outperforms all-gather. Though, we still provide both options, as you might find one to be better for your use case.
|
||||
|
||||
You can directly see this issue in the profiler output in the image below:
|
||||
<p align="center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/accelerate/examples/fsdp2/cp_all_to_all.png" alt="all-to-all profiler output" />
|
||||
<br>
|
||||
<em>Figure 1: In red you can see the idle time, while we wait for the all-to-all kernel to finish. Highlighted in the first blue bar, you can see that it takes ~250us to finish, which is repeated N-1 times for each attention call, where N is the context parallel size.</em>
|
||||
</p>
|
||||
|
||||
|
||||
## Why only FSDP2?
|
||||
|
||||
We only support context parallelism with `FSDP2` for now, as we create a joint mesh of `context_parallel_size` and `dp_shard_size` to
|
||||
utilize its full potential. In the profiler output in the image below, you can see why this is the case.
|
||||
|
||||
<p align="center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/accelerate/examples/fsdp2/cp_why_fsdp2.png" alt="why FSDP2+CP" />
|
||||
<br>
|
||||
<em>Figure 2: In blue rectangles (Stream 23), you can see that the pre-fetch of `FSDP` shard is fully overlapped with the computation of attention (Stream 7), while in red rectangles (Stream 24), you can see that the all-gather kernel results in a bubble of idle time, in which our compute stream (7) is idle.</em>
|
||||
</p>
|
||||
|
||||
In the figure above, you can also note the difference between all-to-all and all-gather. While in all-to-all (Figure 1), we launch a communication kernel N-1 times for each attention call, in all-gather (Figure 2), we launch a communication kernel only once. This results in a bigger bubble, but it only happens once per attention call, while in all-to-all, it happens N-1 times.
|
105
docs/source/concept_guides/fsdp1_vs_fsdp2.md
Normal file
105
docs/source/concept_guides/fsdp1_vs_fsdp2.md
Normal file
@ -0,0 +1,105 @@
|
||||
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# FSDP1 vs FSDP2
|
||||
|
||||
This guide explains the key differences between `FSDP1` and `FSDP2` and helps you migrate your existing code to use `FSDP2` with minimal changes.
|
||||
|
||||
## How is FSDP2 better than FSDP1?
|
||||
|
||||
First, we want to understand how `FSDP1` and `FSDP2` work internally to understand the differences between them. This also helps us understand the limitations of `FSDP1` and how `FSDP2` solves them.
|
||||
|
||||
We'll be discussing a scenario where we have a single `Layer` that contains 3 `Linear` layers and is wrapped using `FSDP` to be sharded across 2 GPUs.
|
||||
|
||||
<div align="center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/accelerate/layer.png" alt="Layer">
|
||||
</div>
|
||||
|
||||
### FSDP1
|
||||
First, we have to understand the original `FSDP1` and the limitations it brings. It represents each `FSDP` module as a single `FlatParameter` which is a single 1D tensor that contains all of the module parameters, which then get sharded across ranks. I.e. if you wrap the `Layer` with `FSDP1`, you'd achieve something as such:
|
||||
|
||||
<div align="center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/accelerate/fsdp1.png" alt="FSDP1">
|
||||
</div>
|
||||
|
||||
You might notice a problem. The whole `Layer` gets flattened into a single `FlatParameter`, which then gets sharded across ranks. But if it's a single `FlatParameter` object, how do we store metadata? That is one of the limitations. Properly storing per-parameter metadata such as `dtype`, `requires_grad`, etc. is not possible without some ugly hacks.
|
||||
|
||||
### FSDP2
|
||||
This is why `FSDP2` was introduced. It doesn't use `FlatParameter`, instead it uses `DTensor` which is short for "Distributed Tensor". Each `DTensor` basically represents a vanilla `torch.Tensor` that has been sharded across ranks. It contains metadata about the original `torch.Tensor` and how it's sharded, what is the [placement type](https://pytorch.org/docs/stable/distributed.tensor.html#module-torch.distributed.tensor.placement_types) and so on. This is why it's called `per-parameter sharding`. The following figure shows the difference:
|
||||
|
||||
<div align="center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/accelerate/fsdp2.png" alt="FSDP2">
|
||||
</div>
|
||||
|
||||
Each Parameter of the original `Layer` is sharded across the 0th dimension, and split between 2 GPUs. Now, each `Linear` layer is a separate `DTensor` and storing metadata per-parameter is possible and straightforward.
|
||||
|
||||
|
||||
> [!TIP]
|
||||
> In the image above, the tensors were sharded across the 1st dimension for the sake of fitting the image on the screen, in reality, they are sharded across the 0th dimension as stated above
|
||||
|
||||
## What does FSDP2 offer?
|
||||
|
||||
`FSDP2` is a new and improved version of PyTorch's fully-sharded data parallel training API. Its main advantage is using `DTensor` to represent sharded parameters. Compared to `FSDP1`, it offers:
|
||||
- Simpler internal implementation, where each `Parameter` is a separate `DTensor`
|
||||
- Enables simple partial parameter freezing because of the above, which makes methods as [`LORA`](https://arxiv.org/abs/2106.09685) work out of the box
|
||||
- With `DTensor`, `FSDP2` supports mixing `fp8` and other parameter types in the same model out of the box
|
||||
- Faster and simpler checkpointing without extra communication across ranks using `SHARDED_STATE_DICT` and [`torch.distributed.checkpoint`](https://pytorch.org/docs/stable/distributed.checkpoint.html), this way, each rank only saves its own shard and corresponding metadata
|
||||
- For loading, it uses a `state_dict` of the sharded model to directly load the sharded parameters
|
||||
- Support for asynchronous checkpointing, where parameters are first copied to CPU memory, after this, main thread continues training while another thread stores the parameters on disk
|
||||
- Memory efficiency and deterministic memory usage, `FSDP2` doesn't use `recordStream` anymore and uses stream-to-stream synchronization (for more technical details see [this forum post](https://dev-discuss.pytorch.org/t/fsdp-cudacachingallocator-an-outsider-newb-perspective/1486) and [this issue](https://github.com/pytorch/pytorch/issues/114299))
|
||||
- In the future, optimizations of the communication patterns via `torch.compile` are planned, further improving the performance and memory efficiency
|
||||
|
||||
|
||||
## API Differences
|
||||
|
||||
We have already discussed the internal differences, now let's discuss the differences, you, as a user, will need to know.
|
||||
|
||||
Here are the main changes in configuration options when using `FSDP2` through the `accelerate` CLI:
|
||||
|
||||
Previous (`FSDP1`) | New (`FSDP2`) | What Changed
|
||||
-- | -- | --
|
||||
`--fsdp_sharding_strategy` | `--fsdp_reshard_after_forward` | replaces `--fsdp_sharding_strategy`, changed to `true` (previously `FULL_SHARD`) or `false` (previously `SHARD_GRAD_OP`)
|
||||
`--fsdp_backward_prefetch` | \*\***REMOVED**\*\* | `FSDP2` uses previous `BACKWARD_PRE` option by default, as only this allows communication and computation overlap
|
||||
`--fsdp_forward_prefetch` | \*\***NOT YET IMPLEMENTED**\*\* | How to implement this is under active discussion, for now it is not supported in `FSDP2`
|
||||
`--fsdp_sync_module_states` | \*\***REMOVED**\*\* | with `FSDP2`, this parameter becomes redundant
|
||||
`--fsdp_cpu_ram_efficient_loading` | `--fsdp_cpu_ram_efficient_loading` | if `true`, `FSDP2` will similarly load the model only on rank 0, and then parameters get synced to other ranks, this is the same behavior as `FSDP1`, however, setting `--fsdp_sync_module_states` isn't required anymore
|
||||
`--fsdp_state_dict_type` | `--fsdp_state_dict_type` | `LOCAL_STATE_DICT` becomes obsolete and with `FSDP2` `SHARDED_STATE_DICT` is the default option, which results in no extra communication and each rank saving its own shard, other possible option is `FULL_STATE_DICT` which results in extra communication and spike in memory usage but saves the full model from rank 0.
|
||||
`--fsdp_use_orig_params` | \*\***REMOVED**\*\* | `FSDP2` uses a `DTensor` class on the background, which means it *always* uses the original parameters by default
|
||||
\*\***NEW**\*\* | `--fsdp_version` | `1` is the default option, to not break existing code, set to `2` to use `FSDP2`
|
||||
|
||||
For all other options that remain unchanged, see the [`FSDP` documentation](../usage_guides/fsdp.md).
|
||||
|
||||
## How to Switch to FSDP2
|
||||
|
||||
### If using Python code:
|
||||
Simply set `fsdp_version=2` when creating your plugin and replace options according to the table above.
|
||||
|
||||
```python
|
||||
from accelerate import FullyShardedDataParallelPlugin, Accelerator
|
||||
|
||||
fsdp_plugin = FullyShardedDataParallelPlugin(
|
||||
fsdp_version=2
|
||||
# other options...
|
||||
)
|
||||
accelerator = Accelerator(fsdp_plugin=fsdp_plugin)
|
||||
```
|
||||
|
||||
### If using YAML config:
|
||||
Use our conversion tool:
|
||||
```bash
|
||||
accelerate to-fsdp2 --config_file config.yaml --output_file new_config.yaml
|
||||
```
|
||||
|
||||
This will automatically convert all FSDP1 settings to their FSDP2 equivalents. Use `--overwrite` to update the existing file instead of creating a new one.
|
@ -109,7 +109,7 @@ While FSDP require an explicit `--fsdp_cpu_ram_efficient_loading true` to activa
|
||||
<Tip>
|
||||
|
||||
For FSDP, whenever setting `--fsdp_cpu_ram_efficient_loading true`, `accelerate` will automatically set `sync_module_states` to true.
|
||||
For RAM efficient loading the weights will be loaded only in a singe rank, and thus requires `sync_module_states` to broadcast weights to other ranks.
|
||||
For RAM efficient loading the weights will be loaded only in a single rank, and thus requires `sync_module_states` to broadcast weights to other ranks.
|
||||
|
||||
</Tip>
|
||||
|
||||
@ -125,7 +125,7 @@ FSDP requires an explicit `--fsdp_auto_wrap_policy` for the algorithm to decide
|
||||
|
||||
### Parameters Summoning
|
||||
|
||||
FSDP requires an explicit `--fsdp_use_orig_params` flag if using `torch.compile`, see [the pytorch documenation](https://pytorch.org/docs/stable/fsdp.html#module-torch.distributed.fsdp). For DeepSpeed this is transparent to the user.
|
||||
FSDP requires an explicit `--fsdp_use_orig_params` flag if using `torch.compile`, see [the pytorch documentation](https://pytorch.org/docs/stable/fsdp.html#module-torch.distributed.fsdp). For DeepSpeed this is transparent to the user.
|
||||
|
||||
<Tip>
|
||||
|
||||
@ -147,7 +147,7 @@ Deepspeed requires explicit `--gradient_accumulation_steps` and `--gradient_clip
|
||||
|
||||
## On Differences in Data Precision Handling
|
||||
|
||||
To discuss the how data precision is handled in both FSDP and Deepspeed, it is instructive to first give an overview of how model parameters are handled in these frameworks. Before the model / optimizer parameters are distributed across GPUs, parameter preparation is involved to first "flatten" them to one-dimensional [`torch.Tensor`](https://pytorch.org/docs/stable/tensors.html#torch-tensor). The implementation of FSDP / DeepSpeed varies in the respect of the `dtype` in which these "flattened" parameters are stored, and there are ramifications with regards to how [`torch.Optimizer`](https://pytorch.org/docs/stable/optim.html#module-torch.optim) allocate their `dtype`s. The table below outlines the processes for both frameworks; the "Local" column indicates the process occurring at a per-gpu level, therefore any memory overheads by upcasting should be understood to be amortized by the number of gpus used.
|
||||
To discuss how data precision is handled in both FSDP and Deepspeed, it is instructive to first give an overview of how model parameters are handled in these frameworks. Before the model / optimizer parameters are distributed across GPUs, parameter preparation is involved to first "flatten" them to one-dimensional [`torch.Tensor`](https://pytorch.org/docs/stable/tensors.html#torch-tensor). The implementation of FSDP / DeepSpeed varies in the respect of the `dtype` in which these "flattened" parameters are stored, and there are ramifications with regards to how [`torch.Optimizer`](https://pytorch.org/docs/stable/optim.html#module-torch.optim) allocate their `dtype`s. The table below outlines the processes for both frameworks; the "Local" column indicates the process occurring at a per-gpu level, therefore any memory overheads by upcasting should be understood to be amortized by the number of gpus used.
|
||||
|
||||
<Tip>
|
||||
|
||||
@ -166,7 +166,7 @@ Optimizer (Actual Step) | ✅ | FSDP<br>DeepSpeed | occurs in `torch_dtype` <br
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
Therefore when using DeepSpeed a small number of GPUs, be aware of potentially significant memory overheads due to the upcasting during preperation.
|
||||
Therefore when using DeepSpeed a small number of GPUs, be aware of potentially significant memory overheads due to the upcasting during preparation.
|
||||
|
||||
</Tip>
|
||||
|
||||
|
@ -71,4 +71,4 @@ setting the same seed in the main random number generator in all processes.
|
||||
|
||||
If you have [`torchdata>=0.8.0`](https://github.com/pytorch/data/tree/main) installed, and you have passed `use_stateful_dataloader=True` into your [`~utils.DataLoaderConfiguration`], these classes will directly inherit from `StatefulDataLoader` instead, and maintain a `state_dict`.
|
||||
|
||||
For more details about the internals, see the [Internals page](package_reference/torch_wrappers).
|
||||
For more details about the internals, see the [Internals page](../package_reference/torch_wrappers).
|
||||
|
@ -63,6 +63,10 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
[[autodoc]] hooks.SequentialHook
|
||||
|
||||
### LayerwiseCastingHook
|
||||
|
||||
[[autodoc]] hooks.LayerwiseCastingHook
|
||||
|
||||
## Adding Hooks
|
||||
|
||||
### add_hook_to_module
|
||||
@ -81,6 +85,10 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
[[autodoc]] hooks.attach_align_device_hook_on_blocks
|
||||
|
||||
### attach_layerwise_casting_hooks
|
||||
|
||||
[[autodoc]] big_modeling.attach_layerwise_casting_hooks
|
||||
|
||||
## Removing Hooks
|
||||
|
||||
### remove_hook_from_module
|
||||
@ -99,4 +107,4 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
### align_module_device
|
||||
|
||||
[[autodoc]] utils.align_module_device
|
||||
[[autodoc]] utils.align_module_device
|
||||
|
@ -158,13 +158,13 @@ The following arguments are useful for selecting which training paradigm to use.
|
||||
* `--use_deepspeed` (`bool`) -- Whether or not to use DeepSpeed for training.
|
||||
* `--use_fsdp` (`bool`) -- Whether or not to use FullyShardedDataParallel for training.
|
||||
* `--use_megatron_lm` (`bool`) -- Whether or not to use Megatron-LM for training.
|
||||
* `--use_xpu` (`bool`) -- Whether to use IPEX plugin to speed up training on XPU specifically.
|
||||
* `--use_xpu` (`bool`) -- Whether to use IPEX plugin to speed up training on XPU specifically. **This argument is deprecated and ignored, will be removed in Accelerate v1.20**
|
||||
|
||||
**Distributed GPU Arguments**:
|
||||
|
||||
The following arguments are only useful when `multi_gpu` is passed or multi-gpu training is configured through `accelerate config`:
|
||||
|
||||
* `--gpu_ids` (`str`) -- What GPUs (by id) should be used for training on this machine as a comma-seperated list
|
||||
* `--gpu_ids` (`str`) -- What GPUs (by id) should be used for training on this machine as a comma-separated list
|
||||
* `--same_network` (`bool`) -- Whether all machines used for multinode training exist on the same local network.
|
||||
* `--machine_rank` (`int`) -- The rank of the machine on which this script is launched.
|
||||
* `--main_process_ip` (`str`) -- The IP address of the machine of rank 0.
|
||||
|
@ -30,3 +30,17 @@ rendered properly in your Markdown viewer.
|
||||
## FullyShardedDataParallelPlugin
|
||||
|
||||
[[autodoc]] utils.FullyShardedDataParallelPlugin
|
||||
|
||||
## fsdp2_load_full_state_dict
|
||||
|
||||
[[autodoc]] utils.fsdp2_load_full_state_dict
|
||||
|
||||
## fsdp2_switch_optimizer_parameters
|
||||
|
||||
[[autodoc]] utils.fsdp2_switch_optimizer_parameters
|
||||
|
||||
## fsdp2_prepare_model
|
||||
|
||||
[[autodoc]] utils.fsdp2_prepare_model
|
||||
|
||||
## fsdp2_prepare_auto_wrap_policy
|
||||
|
@ -208,6 +208,7 @@ These utilities relate to interacting with PyTorch models
|
||||
|
||||
[[autodoc]] utils.set_module_tensor_to_device
|
||||
|
||||
[[autodoc]] utils.get_module_children_bottom_up
|
||||
|
||||
## Parallel
|
||||
|
||||
|
76
docs/source/usage_guides/compilation.md
Normal file
76
docs/source/usage_guides/compilation.md
Normal file
@ -0,0 +1,76 @@
|
||||
# Compilation
|
||||
|
||||
## Overview
|
||||
|
||||
Pytorch 2.0 introduced `torch.compile`, a powerful feature that makes PyTorch code run faster by JIT-compiling PyTorch code into optimized kernels. Key features of `torch.compile` include:
|
||||
|
||||
- **Performance Improvement**: Significantly speeds up model execution by optimizing the computation graph.
|
||||
- **Ease of Use**: Requires minimal code changes to implement, making it highly accessible.
|
||||
- **Compatibility**: Works seamlessly with existing PyTorch code and models.
|
||||
|
||||
When used with Accelerate, `torch.compile` integrates smoothly into distributed training workflows, allowing you to benefit from both distributed execution and compilation optimizations simultaneously.
|
||||
|
||||
The first execution of compiled code typically takes longer as it includes the compilation time, but subsequent runs are significantly faster. For optimal performance in different scenarios, `torch.compile` offers various modes like `"default"`, `"reduce-overhead"` (which uses CUDA graphs to further reduce overhead), and `"max-autotune"` (which performs extensive autotuning to find the best kernels for your model).
|
||||
|
||||
## Using `torch.compile` with Accelerate
|
||||
|
||||
Accelerate provides `TorchDynamoPlugin` for easy and seemless integration of `torch.compile` into your training scripts.
|
||||
|
||||
```python
|
||||
from accelerate import Accelerator
|
||||
from accelerate.utils import TorchDynamoPlugin
|
||||
|
||||
# Configure the compilation backend
|
||||
dynamo_plugin = TorchDynamoPlugin(
|
||||
backend="inductor", # Options: "inductor", "aot_eager", "aot_nvfuser", etc.
|
||||
mode="default", # Options: "default", "reduce-overhead", "max-autotune"
|
||||
fullgraph=True,
|
||||
dynamic=False
|
||||
)
|
||||
|
||||
# Initialize accelerator with the plugin
|
||||
accelerator = Accelerator(dynamo_plugin=dynamo_plugin)
|
||||
# This will apply torch.compile to your model
|
||||
model = accelerator.prepare(model)
|
||||
```
|
||||
|
||||
It is compatible with all other features and plugins of Accelerate, including mixed precision, distributed training (DDP, FSDP, Deepspeed), etc.
|
||||
|
||||
## Regional Compilation
|
||||
|
||||
Instead of trying to compile the whole model, which usually has a big problem space for optimization. Regional compilation targets repeated blocks of the same class and compiles them sequentially to hit the compiler's cache. For example, in `GPT2LMHeadModel`, the repeated block/class is `GPT2Block`, and can be accessed as `model.transformer.h[0]`. The rest of the model (e.g model.lm_head) is compiled separately.
|
||||
|
||||
This allows us to speed up the compilation overhead / cold start of models like LLMs and Transformers in general.
|
||||
See <https://pytorch.org/tutorials/recipes/regional_compilation.html> for more details.
|
||||
|
||||
### How to Use Regional Compilation
|
||||
|
||||
It can be enabled by setting `use_regional_compilation=True` in the `TorchDynamoPlugin` configuration:
|
||||
|
||||
```python
|
||||
# Configure the compilation backend
|
||||
dynamo_plugin = TorchDynamoPlugin(
|
||||
use_regional_compilation=True,
|
||||
... # other parameters
|
||||
)
|
||||
# Initialize accelerator with the plugin
|
||||
accelerator = Accelerator(dynamo_plugin=dynamo_plugin)
|
||||
# This will apply compile_regions to your model
|
||||
model = accelerator.prepare(model)
|
||||
```
|
||||
|
||||
You could also use the `accelerate.utils.compile_regions` utility directly the same way you would use `torch.compile`.
|
||||
|
||||
### Benefits of Regional Compilation
|
||||
|
||||
We have conducted extensive benchmarks comparing full compilation and regional compilation using the `torch.compile` feature in PyTorch. The full results are available in the [accelerate repository](https://github.com/huggingface/accelerate/tree/main/benchmarks/torch.compile/regional_compilation). The key findings from our benchmarks are:
|
||||
|
||||
1. **Comparable Performance**: Regional compilation delivers performance speedups similar to full compilation, especially for larger models.
|
||||
2. **Faster Compilation**: Regional compilation significantly reduces the time taken to compile models, making it a more efficient choice for deployment.
|
||||
3. **Batch Size Impact**: The performance difference between compilation strategies diminishes with larger batch sizes, indicating that the overhead of compilation is less impactful in those scenarios.
|
||||
4. **Model Size Consideration**: The benefits of regional compilation are more pronounced in larger models, where the compilation time savings can be substantial.
|
||||
5. **Practical Application**: For real-world applications, regional compilation is a practical choice for optimizing training cold start times, especially when working with large models.
|
||||
|
||||
## Conclusion
|
||||
|
||||
Both full and regional compilation can significantly speed up your models. Regional compilation offers a practical balance between compilation time and runtime performance, especially for training large models with substantial batch sizes.
|
@ -34,6 +34,10 @@ In this tutorial, you will see how to quickly set up DDP communication hooks and
|
||||
import torch
|
||||
from torch.nn.parallel import DistributedDataParallel as DDP
|
||||
from torch.distributed.algorithms.ddp_comm_hooks import default_hooks
|
||||
from accelerate.test_utils.testing import get_backend
|
||||
|
||||
device_type, _, _ = get_backend()
|
||||
device_id = getattr(torch, device_type, torch.cuda).current_device()
|
||||
|
||||
class MyModel(torch.nn.Module):
|
||||
def __init__(self):
|
||||
@ -44,7 +48,7 @@ class MyModel(torch.nn.Module):
|
||||
return self.layer(x)
|
||||
|
||||
model = MyModel()
|
||||
model = DDP(model, device_ids=[torch.cuda.current_device()])
|
||||
model = DDP(model, device_ids=[device_id])
|
||||
model.register_comm_hook(state=None, hook=default_hooks.fp16_compress_hook)
|
||||
|
||||
# Training loop
|
||||
@ -108,6 +112,10 @@ BF16 Compression Hook API is experimental, and it requires NCCL version later th
|
||||
import torch
|
||||
from torch.nn.parallel import DistributedDataParallel as DDP
|
||||
from torch.distributed.algorithms.ddp_comm_hooks import default_hooks
|
||||
from accelerate.test_utils.testing import get_backend
|
||||
|
||||
device_type, _, _ = get_backend()
|
||||
device_id = getattr(torch, device_type, torch.cuda).current_device()
|
||||
|
||||
class MyModel(torch.nn.Module):
|
||||
def __init__(self):
|
||||
@ -118,7 +126,7 @@ class MyModel(torch.nn.Module):
|
||||
return self.layer(x)
|
||||
|
||||
model = MyModel()
|
||||
model = DDP(model, device_ids=[torch.cuda.current_device()])
|
||||
model = DDP(model, device_ids=[device_id])
|
||||
model.register_comm_hook(state=None, hook=default_hooks.bf16_compress_hook)
|
||||
|
||||
# Training loop
|
||||
@ -182,6 +190,10 @@ PowerSGD typically requires extra memory of the same size as the model’s gradi
|
||||
import torch
|
||||
from torch.nn.parallel import DistributedDataParallel as DDP
|
||||
from torch.distributed.algorithms.ddp_comm_hooks import powerSGD_hook
|
||||
from accelerate.test_utils.testing import get_backend
|
||||
|
||||
device_type, _, _ = get_backend()
|
||||
device_id = getattr(torch, device_type, torch.cuda).current_device()
|
||||
|
||||
class MyModel(torch.nn.Module):
|
||||
def __init__(self):
|
||||
@ -192,7 +204,7 @@ class MyModel(torch.nn.Module):
|
||||
return self.layer(x)
|
||||
|
||||
model = MyModel()
|
||||
model = DDP(model, device_ids=[torch.cuda.current_device()])
|
||||
model = DDP(model, device_ids=[device_id])
|
||||
state = powerSGD_hook.PowerSGDState(process_group=None)
|
||||
model.register_comm_hook(state=state, hook=powerSGD_hook.powerSGD_hook)
|
||||
|
||||
|
@ -167,7 +167,7 @@ Currently, `Accelerate` supports following config through the CLI:
|
||||
`deepspeed_hostfile`: DeepSpeed hostfile for configuring multi-node compute resources.
|
||||
`deepspeed_exclusion_filter`: DeepSpeed exclusion filter string when using mutli-node setup.
|
||||
`deepspeed_inclusion_filter`: DeepSpeed inclusion filter string when using mutli-node setup.
|
||||
`deepspeed_multinode_launcher`: DeepSpeed multi-node launcher to use. If unspecified, will default to `pdsh`.
|
||||
`deepspeed_multinode_launcher`: DeepSpeed multi-node launcher to use, e.g. `pdsh`, `standard`, `openmpi`, `mvapich`, `mpich`, `slurm`, `nossh` (requires DeepSpeed >= 0.14.5). If unspecified, will default to `pdsh`.
|
||||
`deepspeed_config_file`: path to the DeepSpeed config file in `json` format. See the next section for more details on this.
|
||||
```
|
||||
To be able to tweak more options, you will need to use a DeepSpeed config file.
|
||||
@ -194,7 +194,7 @@ For instance, here is how you would run the NLP example `examples/by_feature/dee
|
||||
```bash
|
||||
compute_environment: LOCAL_MACHINE
|
||||
deepspeed_config:
|
||||
deepspeed_config_file: /home/ubuntu/accelerate/examples/configs/deepspeed_config_templates/zero_stage2_config.json
|
||||
deepspeed_config_file: /home/ubuntu/accelerate/examples/deepspeed_config_templates/zero_stage2_config.json
|
||||
zero3_init_flag: true
|
||||
distributed_type: DEEPSPEED
|
||||
fsdp_config: {}
|
||||
@ -275,7 +275,7 @@ accelerate launch examples/by_feature/deepspeed_with_config_support.py \
|
||||
```bash
|
||||
compute_environment: LOCAL_MACHINE
|
||||
deepspeed_config:
|
||||
deepspeed_config_file: /home/ubuntu/accelerate/examples/configs/deepspeed_config_templates/zero_stage3_offload_config.json
|
||||
deepspeed_config_file: /home/ubuntu/accelerate/examples/deepspeed_config_templates/zero_stage3_offload_config.json
|
||||
zero3_init_flag: true
|
||||
distributed_type: DEEPSPEED
|
||||
fsdp_config: {}
|
||||
@ -710,6 +710,13 @@ model, eval_dataloader = accelerator.prepare(model, eval_dataloader)
|
||||
2. Current integration doesn’t support `mpu`, limiting the tensor parallelism which is supported in Megatron-LM.
|
||||
3. Current integration doesn’t support multiple models.
|
||||
|
||||
## Multi-node DeepSpeed
|
||||
DeepSpeed supports multi-node inference and training over a variety of different launchers. You can specify a different launcher by setting the `deepspeed_multinode_launcher` config in the CLI or in the DeepSpeed config file.
|
||||
|
||||
Currently, accelerate supports passing configuration for the following DeepSpeed multi-node launchers: `pdsh` (default), `standard`, `openmpi`, `mvapich`, `mpich`, `slurm`, `nossh` (requires DeepSpeed >= 0.14.5).
|
||||
|
||||
Please read the [DeepSpeed documentation](https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node) for more information on the different launchers. By default, DeepSpeed will attempt to use passwordless SSH from the main machine node to the other nodes to perform the launcher command. In this configuration, the accelerate launch command only needs to be run on the main node. If using the `nossh` launcher, you will need to run the accelerate launch command on every node using copied configuration.
|
||||
|
||||
## DeepSpeed Resources
|
||||
|
||||
The documentation for the internals related to deepspeed can be found [here](../package_reference/deepspeed).
|
||||
|
38
docs/source/usage_guides/gaudi.md
Normal file
38
docs/source/usage_guides/gaudi.md
Normal file
@ -0,0 +1,38 @@
|
||||
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Intel Gaudi
|
||||
|
||||
Users can take advantage of Intel Gaudi AI accelerators for significantly faster and cost-effective model training and inference.
|
||||
The Intel Gaudi AI accelerator family currently includes three product generations: [Intel Gaudi 1](https://habana.ai/products/gaudi/), [Intel Gaudi 2](https://habana.ai/products/gaudi2/), and [Intel Gaudi 3](https://habana.ai/products/gaudi3/). Each server is equipped with 8 devices, known as Habana Processing Units (HPUs), providing 128GB of memory on Gaudi 3, 96GB on Gaudi 2, and 32GB on the first-gen Gaudi. For more details on the underlying hardware architecture, check out the [Gaudi Architecture Overview](https://docs.habana.ai/en/latest/Gaudi_Overview/Gaudi_Architecture.html).
|
||||
|
||||
## How it works out of the box
|
||||
|
||||
It is enabled by default if an Intel Gaudi device is detected.
|
||||
To disable it, pass `--cpu` flag to `accelerate launch` command or answer the corresponding question when answering the `accelerate config` questionnaire.
|
||||
|
||||
You can directly run the following script to test it out on Intel Gaudi:
|
||||
|
||||
```bash
|
||||
accelerate launch /examples/cv_example.py --data_dir images
|
||||
```
|
||||
|
||||
## Limitations
|
||||
|
||||
The following features are not part of the Accelerate library and requires [Optimum for Intel Gaudi](https://huggingface.co/docs/optimum/main/en/habana/index):
|
||||
|
||||
- `fast_ddp` which implements DDP by applying an all-reduce on gradients instead of the Torch DDP wrapper.
|
||||
- `minimize_memory` which is used for fp8 training and enables keeping fp8 weights in memory between the forward and backward passes, leading to a smaller memory footprint at the cost of additional fp8 casts.
|
||||
- `context_parallel_size` which is used for Context/Sequence Parallelism (CP/SP) and partitions the network inputs and activations along sequence dimension to reduce memory footprint and increase throughput.
|
@ -94,6 +94,9 @@ use_cpu: true
|
||||
accelerate launch examples/nlp_example.py
|
||||
```
|
||||
|
||||
> [!CAUTION]
|
||||
> `accelerator.prepare` can currently only handle simultaneously preparing multiple models (and no optimizer) OR a single model-optimizer pair for training. Other attempts (e.g., two model-optimizer pairs) will raise a verbose error. To work around this limitation, consider separately using `accelerator.prepare` for each model-optimizer pair.
|
||||
|
||||
**Scenario 2**: Acceleration of distributed CPU training
|
||||
we use Intel oneCCL for communication, combined with Intel® MPI library to deliver flexible, efficient, scalable cluster messaging on Intel® architecture. you could refer the [here](https://huggingface.co/docs/transformers/perf_train_cpu_many) for the installation guide
|
||||
|
||||
|
@ -39,7 +39,7 @@ from accelerate import Accelerator
|
||||
accelerator = Accelerator(mixed_precision="fp8")
|
||||
```
|
||||
|
||||
By default, if `MS-AMP` is available in your environment, Accelerate will automatically utilize it as a backend. To specify it yourself (and customize other parts of the FP8 mixed precision setup), you can utilize one of the `RecipeKwargs` dataclasses such as [`utils.AORecipeKwargs`], [`utils.TERecipeKwargs`], or [`utils.MSAMPRecipeKwargs`]; you can also nclarify it in your config `yaml`/during `accelerate launch`:
|
||||
By default, if `MS-AMP` is available in your environment, Accelerate will automatically utilize it as a backend. To specify it yourself (and customize other parts of the FP8 mixed precision setup), you can utilize one of the `RecipeKwargs` dataclasses such as [`utils.AORecipeKwargs`], [`utils.TERecipeKwargs`], or [`utils.MSAMPRecipeKwargs`]; you can also clarify it in your config `yaml`/during `accelerate launch`:
|
||||
|
||||
```{python}
|
||||
from accelerate import Accelerator
|
||||
|
@ -19,7 +19,7 @@ rendered properly in your Markdown viewer.
|
||||
[Megatron-LM](https://github.com/NVIDIA/Megatron-LM) enables training large transformer language models at scale.
|
||||
It provides efficient tensor, pipeline and sequence based model parallelism for pre-training transformer based
|
||||
Language Models such as [GPT](https://arxiv.org/abs/2005.14165) (Decoder Only), [BERT](https://arxiv.org/pdf/1810.04805.pdf) (Encoder Only) and [T5](https://arxiv.org/abs/1910.10683) (Encoder-Decoder).
|
||||
For detailed information and how things work behind the scene please refer the github [repo](https://github.com/NVIDIA/Megatron-LM).
|
||||
For detailed information and how things work behind the scene please refer to the github [repo](https://github.com/NVIDIA/Megatron-LM).
|
||||
|
||||
## What is integrated?
|
||||
|
||||
@ -30,7 +30,7 @@ a. **Tensor Parallelism (TP)**: Reduces memory footprint without much additional
|
||||
Each tensor is split into multiple chunks with each shard residing on separate GPU. At each step, the same mini-batch of data is processed
|
||||
independently and in parallel by each shard followed by syncing across all GPUs (`all-reduce` operation).
|
||||
In a simple transformer layer, this leads to 2 `all-reduces` in the forward path and 2 in the backward path.
|
||||
For more details, please refer research paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using
|
||||
For more details, please refer to the research paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using
|
||||
Model Parallelism](https://arxiv.org/pdf/1909.08053.pdf) and
|
||||
this section of blogpost [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#tensor-parallelism).
|
||||
|
||||
@ -45,7 +45,7 @@ this section of blogpost [The Technology Behind BLOOM Training](https://huggingf
|
||||
|
||||
c. **Sequence Parallelism (SP)**: Reduces memory footprint without any additional communication. Only applicable when using TP.
|
||||
It reduces activation memory required as it prevents the same copies to be on the tensor parallel ranks
|
||||
post `all-reduce` by replacing then with `reduce-scatter` and `no-op` operation would be replaced by `all-gather`.
|
||||
post `all-reduce` by replacing them with `reduce-scatter` and `no-op` operation would be replaced by `all-gather`.
|
||||
As `all-reduce = reduce-scatter + all-gather`, this saves a ton of activation memory at no added communication cost.
|
||||
To put it simply, it shards the outputs of each transformer layer along sequence dimension, e.g.,
|
||||
if the sequence length is `1024` and the TP size is `4`, each GPU will have `256` tokens (1024/4) for each sample.
|
||||
@ -56,7 +56,7 @@ d. **Data Parallelism (DP)** via Distributed Optimizer: Reduces the memory footp
|
||||
(versus the traditional method of replicating the optimizer state across data parallel ranks).
|
||||
For example, when using Adam optimizer with mixed-precision training, each parameter accounts for 12 bytes of memory.
|
||||
This gets distributed equally across the GPUs, i.e., each parameter would account for 3 bytes (12/4) if we have 4 GPUs.
|
||||
For more details, please refer the research paper [ZeRO: Memory Optimizations Toward Training Trillion
|
||||
For more details, please refer to the research paper [ZeRO: Memory Optimizations Toward Training Trillion
|
||||
Parameter Models](https://arxiv.org/pdf/1910.02054.pdf) and following section of blog
|
||||
[The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#zero-data-parallelism).
|
||||
|
||||
@ -66,7 +66,7 @@ For example, for GPT-3, this leads to 70% reduction in required memory for activ
|
||||
only 2.7% FLOPs overhead for recomputation of activations. For more details, please refer to the research paper
|
||||
[Reducing Activation Recomputation in Large Transformer Models](https://arxiv.org/pdf/2205.05198.pdf).
|
||||
|
||||
f. **Fused Kernels**: Fused Softmax, Mixed Precision Fused Layer Norm and Fused gradient accumulation to weight gradient computation of linear layer.
|
||||
f. **Fused Kernels**: Fused Softmax, Mixed Precision Fused Layer Norm and Fused gradient accumulation to weight gradient computation of linear layer.
|
||||
PyTorch JIT compiled Fused GeLU and Fused Bias+Dropout+Residual addition.
|
||||
|
||||
g. **Support for Indexed datasets**: Efficient binary format of datasets for large scale training. Support for the `mmap`, `cached` index file and the `lazy` loader format.
|
||||
@ -445,7 +445,7 @@ python checkpoint_utils/megatgron_gpt2/checkpoint_reshaping_and_interoperability
|
||||
## Megatron-LM GPT models support returning logits and `megatron_generate` function for text generation
|
||||
|
||||
1. Returning logits require setting `require_logits=True` in MegatronLMPlugin as shown below.
|
||||
These would be available on the in the last stage of pipeline.
|
||||
These would be available in the last stage of pipeline.
|
||||
```python
|
||||
megatron_lm_plugin = MegatronLMPlugin(return_logits=True)
|
||||
```
|
||||
@ -569,7 +569,7 @@ setting is synonymous with gradient accumulation.
|
||||
|
||||
7. When using Megatron-LM, use `accelerator.save_state` and `accelerator.load_state` for saving and loading checkpoints.
|
||||
|
||||
8. Below are the mapping from Megatron-LM model architectures to the the equivalent transformers model architectures.
|
||||
8. Below are the mapping from Megatron-LM model architectures to the equivalent transformers model architectures.
|
||||
Only these transformers model architectures are supported.
|
||||
|
||||
a. Megatron-LM [BertModel](https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/bert_model.py) :
|
||||
|
@ -225,7 +225,7 @@ In [/slurm/submit_multinode.sh](./slurm/submit_multinode.sh) we must specify the
|
||||
|
||||
In [/slurm/submit_multicpu.sh](./slurm/submit_multicpu.sh) we must specify the number of nodes that will be part of the training (`--num_machines`), how many CPU processes we will use in total (`--num_processes`), the [`backend`](https://pytorch.org/docs/stable/elastic/run.html#note-on-rendezvous-backend), `--main_process_ip` which will be the address the master node and the `--main_process_port`. `mpirun_hostfile` specifies to run the job using MPIRun.
|
||||
|
||||
In both scripts, we run `activateEnviroment.sh` at the beginning. This script should contain the necessary instructions to initialize the environment for execution. Below, we show an example that loads the necessary libraries ([Environment modules](https://github.com/cea-hpc/modules)), activates the Python environment, and sets up various environment variables, most of them to run the scripts in offline mode in case we don't have internet connection from the cluster.
|
||||
In both scripts, we run `activateEnvironment.sh` at the beginning. This script should contain the necessary instructions to initialize the environment for execution. Below, we show an example that loads the necessary libraries ([Environment modules](https://github.com/cea-hpc/modules)), activates the Python environment, and sets up various environment variables, most of them to run the scripts in offline mode in case we don't have internet connection from the cluster.
|
||||
|
||||
```bash
|
||||
# activateEnvironment.sh
|
||||
|
@ -12,7 +12,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import argparse
|
||||
from typing import List
|
||||
|
||||
import evaluate
|
||||
import numpy as np
|
||||
@ -61,7 +60,7 @@ EVAL_BATCH_SIZE = 32
|
||||
|
||||
|
||||
def get_fold_dataloaders(
|
||||
accelerator: Accelerator, dataset: DatasetDict, train_idxs: List[int], valid_idxs: List[int], batch_size: int = 16
|
||||
accelerator: Accelerator, dataset: DatasetDict, train_idxs: list[int], valid_idxs: list[int], batch_size: int = 16
|
||||
):
|
||||
"""
|
||||
Gets a set of train, valid, and test dataloaders for a particular fold
|
||||
|
@ -611,7 +611,7 @@ def main():
|
||||
|
||||
if isinstance(checkpointing_steps, int):
|
||||
if completed_steps % checkpointing_steps == 0:
|
||||
output_dir = f"step_{completed_steps }"
|
||||
output_dir = f"step_{completed_steps}"
|
||||
if args.output_dir is not None:
|
||||
output_dir = os.path.join(args.output_dir, output_dir)
|
||||
accelerator.save_state(output_dir)
|
||||
|
58
examples/fsdp2/README.md
Normal file
58
examples/fsdp2/README.md
Normal file
@ -0,0 +1,58 @@
|
||||
# FSDP2 Examples
|
||||
|
||||
This folder contains examples of using FSDP2 with Accelerate, utilizing extra methods to improve training speed, performance or accuracy.
|
||||
|
||||
## FSDP2 + ao Float8Linear (`fsdp2_fp8.py`)
|
||||
|
||||
In file `fsdp2_fp8.py` we use `Float8Linear` from `ao` to train a model partially in FP8 precision. We utilize `AORecipeKwargs` to pass the `Float8LinearConfig` to the accelerator,
|
||||
which replaces the default `torch.nn.Linear` with `Float8Linear`. We also utilize `TorchDynamoPlugin` together with regional compilation to compile the model,
|
||||
gaining even more speed and memory savings, as `ao` doesn't ship with any kernels by default, so we have to gain the performance from compiling the model.
|
||||
|
||||
Replacing linear layers with `Float8Linear` can greatly improve performance, if used correctly and on hardware that supports FP8 tensor cores. This highly depends on the model dimensions and sequence length used for training.
|
||||
You can view the performance of `Float8Linear` as a function of matrix dimensions in [this document](https://github.com/pytorch/ao/blob/main/torchao/float8/README.md#performance).
|
||||
|
||||
In our example, we use a 8B Llama3.1 model, which has a hidden dimension of 4096 and we train on sequence length of 8192. In the below images, we can see that this improves performance by ~25% compared to `bf16`, reaching ~10000 tokens per second, per device on 8x H100 GPUs, compared to ~8000 tokens per second using `bf16`, while loss function stays roughly the same. We can also see that the FLOPS raise by using FP8.
|
||||
|
||||
<div style="display: flex; gap: 25px;">
|
||||
<div style="text-align: center; width: 49%;">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/accelerate/examples/fsdp2/fp8_tps.png" alt="tps" style="width: 100%;">
|
||||
<p style="text-align: center; margin-top: 8px;">TPs per device, bf16 vs fp8</p>
|
||||
</div>
|
||||
<div style="text-align: center; width: 49%;">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/accelerate/examples/fsdp2/fp8_tflops.png" alt="tflops" style="width: 100%;">
|
||||
<p style="text-align: center; margin-top: 8px;">TFLOPS per device, bf16 vs fp8. We cannot really compare MFU as fp8 tensor cores are used as well.</p>
|
||||
</div>
|
||||
|
||||
<div style="text-align: center; width: 49%;">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/accelerate/examples/fsdp2/fp8_loss.png" alt="loss" style="width: 100%; max-width: 900px;">
|
||||
<p style="text-align: center; margin-top: 8px;">Loss curve, bf16 vs fp8, it's hard to see the difference as the curves mostly overlap</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
The figures above were generated on 8x H100 SXM GPUs, with 8192 sequence length and 1000 steps. To run the example, you can use the following command, where you can specify the precision to train in:
|
||||
|
||||
```bash
|
||||
accelerate launch --fsdp2_fp8.py --sequence_length 8192 --num_steps 1000 --log_with wandb --precision [fp8 | bf16]
|
||||
```
|
||||
|
||||
## FSDP2 + context parallelism (`fsdp2_context_parallel.py`)
|
||||
|
||||
In this file, we showcase integration of context parallelism with FSDP2. Context parallelism is a technique that allows us to scale the training to sequence length of up to a million tokens. With `accelerator.context_parallel` context manager, we replace the attention implementation with a context parallel version, which enables us to train on a sequence length of up to 128k tokens on 8x H100 GPUs, with possibility of endless scaling if we have enough GPUs.
|
||||
|
||||
For a detailed explanation and more details, please refer to [this guide](https://huggingface.co/docs/accelerate/concept_guides/context_parallel). You can run the example with the following command:
|
||||
|
||||
```bash
|
||||
accelerate launch --fsdp2_context_parallel.py --sequence_length 128000 --num_steps 1000 --log_with wandb --cp_size 8 --cp_comm_strategy allgather
|
||||
```
|
||||
|
||||
More details about the context parallelism can be found in the [concept guide](https://huggingface.co/docs/accelerate/concept_guides/context_parallel). You can see some results below:
|
||||
|
||||
<p align="center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/accelerate/examples/fsdp2/cp_perf.png" alt="context parallelism memory usage" />
|
||||
<br>
|
||||
<em>Figure 1: Memory usage and speed of context parallelism for up-to 256k context size.</em>
|
||||
</p>
|
||||
|
||||
|
||||
|
||||
|
179
examples/fsdp2/fsdp2_context_parallel.py
Normal file
179
examples/fsdp2/fsdp2_context_parallel.py
Normal file
@ -0,0 +1,179 @@
|
||||
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Example of training with Context Parallel using FSDP2 via Accelerate.
|
||||
This example demonstrates how to use Accelerate's context_parallel feature for efficient long sequence training.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
|
||||
import torch
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import AutoModelForCausalLM
|
||||
|
||||
from accelerate import Accelerator
|
||||
from accelerate.utils import FullyShardedDataParallelPlugin, set_seed
|
||||
from utils import PerformanceTracker, create_collate_fn, get_dataset, setup_tokenizer
|
||||
|
||||
|
||||
MODEL_ID = "NousResearch/Hermes-3-Llama-3.1-8B"
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--sequence-length", type=int, default=128_000, help="Sequence length for the dataset")
|
||||
parser.add_argument("--num-steps", type=int, default=100, help="Number of training steps")
|
||||
parser.add_argument("--log-with", type=str, default="wandb", help="Logging service to use")
|
||||
parser.add_argument("--cp-size", type=int, default=8, help="Context parallel size")
|
||||
parser.add_argument("--cp-comm-strategy", type=str, default="allgather", help="Context parallel shard rotation")
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def training_step(batch, model, optimizer, accelerator: Accelerator):
|
||||
"""
|
||||
Perform a single training step with context parallel.
|
||||
|
||||
Args:
|
||||
batch: Input batch containing input_ids and labels
|
||||
model: The model to train
|
||||
optimizer: Optimizer
|
||||
accelerator: Accelerator instance
|
||||
|
||||
Returns:
|
||||
loss: Training loss
|
||||
"""
|
||||
|
||||
# Use context parallel for efficient long sequence processing
|
||||
outputs = model(**batch)
|
||||
loss = outputs.loss
|
||||
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
|
||||
torch.distributed.all_reduce(loss, op=torch.distributed.ReduceOp.AVG)
|
||||
|
||||
return loss
|
||||
|
||||
|
||||
def main():
|
||||
set_seed(42)
|
||||
args = parse_args()
|
||||
|
||||
# Configure FSDP2 plugin
|
||||
fsdp_plugin = FullyShardedDataParallelPlugin(
|
||||
auto_wrap_policy="transformer_based_wrap",
|
||||
transformer_cls_names_to_wrap=["LlamaDecoderLayer"],
|
||||
cpu_ram_efficient_loading=True,
|
||||
activation_checkpointing=True,
|
||||
fsdp_version=2,
|
||||
cp_size=args.cp_size,
|
||||
cp_comm_strategy=args.cp_comm_strategy,
|
||||
)
|
||||
|
||||
# Initialize accelerator
|
||||
accelerator = Accelerator(
|
||||
log_with=args.log_with,
|
||||
fsdp_plugin=fsdp_plugin,
|
||||
mixed_precision="bf16",
|
||||
)
|
||||
|
||||
accelerator.init_trackers(
|
||||
project_name="FSDP2_context_parallel",
|
||||
config={
|
||||
"sequence_length": args.sequence_length,
|
||||
"num_steps": args.num_steps,
|
||||
"cp_size": args.cp_size,
|
||||
"cp_comm_strategy": args.cp_comm_strategy,
|
||||
},
|
||||
)
|
||||
|
||||
# Prepare model and optimizer
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
MODEL_ID,
|
||||
torch_dtype=torch.bfloat16,
|
||||
use_cache=False,
|
||||
)
|
||||
|
||||
tokenizer = setup_tokenizer(MODEL_ID)
|
||||
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-5)
|
||||
|
||||
model, optimizer = accelerator.prepare(model, optimizer)
|
||||
|
||||
accelerator.print("Preparing dataset... this might take a while")
|
||||
dataset = get_dataset(
|
||||
accelerator,
|
||||
tokenizer,
|
||||
args.sequence_length,
|
||||
processing_batch_size=args.sequence_length
|
||||
// 20, # we need to override the default processing batch size to avoid empty packed sequences
|
||||
)
|
||||
dataloader = DataLoader(dataset, batch_size=1, collate_fn=create_collate_fn())
|
||||
dataloader = accelerator.prepare(dataloader)
|
||||
|
||||
model.train()
|
||||
|
||||
total_num_steps = min(args.num_steps, len(dataloader))
|
||||
performance_tracker = PerformanceTracker(warmup_steps=10)
|
||||
|
||||
accelerator.print(f"Starting training with context parallel for {total_num_steps} steps...")
|
||||
accelerator.print(f"Sequence length: {args.sequence_length}")
|
||||
accelerator.print("Warming up for 10 steps...")
|
||||
|
||||
accelerator.print(
|
||||
"Each step takes ~10 seconds with default settings on 8x H100 SXM GPUs, seeing logs takes a while"
|
||||
)
|
||||
for step, batch in enumerate(dataloader):
|
||||
print(f"Step {step}")
|
||||
if step >= total_num_steps:
|
||||
break
|
||||
|
||||
# get number of tokens before context_parallel shards the batch
|
||||
batch_tokens = batch["input_ids"].shape[0] * batch["input_ids"].shape[1]
|
||||
|
||||
loss = training_step(batch, model, optimizer, accelerator)
|
||||
|
||||
# each batch gets the same data, we divide by the number of processes to get the number of tokens per process
|
||||
metrics = performance_tracker.step(batch_tokens // accelerator.num_processes)
|
||||
|
||||
log_metrics = {"loss": loss.item()}
|
||||
|
||||
if "warmup_completed" in metrics:
|
||||
accelerator.print("Warmup completed! Starting performance tracking...")
|
||||
elif metrics:
|
||||
log_metrics.update(
|
||||
{
|
||||
"tokens_per_second": int(metrics["tokens_per_second"]),
|
||||
"steps_per_second": metrics["steps_per_second"],
|
||||
}
|
||||
)
|
||||
|
||||
if (step % 10 == 0 or step == total_num_steps - 1) and metrics:
|
||||
accelerator.print(
|
||||
f"Step {step}/{total_num_steps} | "
|
||||
f"Loss: {loss.item():.4f} | "
|
||||
f"Tokens/s: {int(metrics['tokens_per_second'])} | "
|
||||
f"Steps/s: {metrics['steps_per_second']:.2f} | "
|
||||
)
|
||||
|
||||
accelerator.log(log_metrics)
|
||||
|
||||
accelerator.wait_for_everyone()
|
||||
accelerator.end_training()
|
||||
accelerator.print("Training completed!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
157
examples/fsdp2/fsdp2_fp8.py
Normal file
157
examples/fsdp2/fsdp2_fp8.py
Normal file
@ -0,0 +1,157 @@
|
||||
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Minimal example of training with FP8 precision using FSDP2 via Accelerate.
|
||||
This example demonstrates how to use torchao's Float8LinearConfig with Accelerate's AORecipeKwargs.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
|
||||
import torch
|
||||
from torch.utils.data import DataLoader
|
||||
from torchao.float8 import Float8LinearConfig
|
||||
from transformers import AutoConfig, AutoModelForCausalLM
|
||||
|
||||
from accelerate import Accelerator
|
||||
from accelerate.utils import AORecipeKwargs, FullyShardedDataParallelPlugin, TorchDynamoPlugin, set_seed
|
||||
from utils import PerformanceTracker, create_collate_fn, get_dataset, get_model_flops_per_token, setup_tokenizer
|
||||
|
||||
|
||||
MODEL_ID = "NousResearch/Hermes-3-Llama-3.1-8B"
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument("--sequence-length", type=int, default=8192, help="Sequence length for the dataset")
|
||||
parser.add_argument("--num-steps", type=int, default=1000, help="Number of steps to train for")
|
||||
parser.add_argument("--precision", type=str, default="fp8", choices=["fp8", "bf16"], help="Precision to train in")
|
||||
parser.add_argument("--log-with", type=str, default="wandb", help="Log with wandb or tensorboard")
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Main function to train the model.
|
||||
"""
|
||||
set_seed(42)
|
||||
|
||||
args = parse_args()
|
||||
|
||||
fsdp2_plugin = FullyShardedDataParallelPlugin(
|
||||
fsdp_version=2,
|
||||
cpu_ram_efficient_loading=False, # CPU RAM efficient loading CANNOT work with fp8 torchao
|
||||
auto_wrap_policy="transformer_based_wrap",
|
||||
transformer_cls_names_to_wrap=["LlamaDecoderLayer"],
|
||||
)
|
||||
fsdp2_plugin.set_mixed_precision(args.precision)
|
||||
|
||||
dynamo_plugin = TorchDynamoPlugin(
|
||||
backend="inductor",
|
||||
use_regional_compilation=True, # We use regional compilation to compile the model way faster
|
||||
)
|
||||
|
||||
fp8_config = Float8LinearConfig(
|
||||
enable_fsdp_float8_all_gather=True, # extra saving by gathering parameters in fp8 and upcasting after
|
||||
force_recompute_fp8_weight_in_bwd=True,
|
||||
)
|
||||
|
||||
kwargs = []
|
||||
if args.precision == "fp8":
|
||||
kwargs = [AORecipeKwargs(config=fp8_config)]
|
||||
|
||||
accelerator = Accelerator(
|
||||
fsdp_plugin=fsdp2_plugin,
|
||||
dynamo_plugin=dynamo_plugin,
|
||||
kwargs_handlers=kwargs,
|
||||
log_with=args.log_with,
|
||||
)
|
||||
accelerator.init_trackers(
|
||||
project_name="FSDP2_torchao_fp8",
|
||||
config={"sequence_length": args.sequence_length, "num_steps": args.num_steps},
|
||||
)
|
||||
|
||||
model = AutoModelForCausalLM.from_config(
|
||||
AutoConfig.from_pretrained(MODEL_ID, use_cache=False),
|
||||
torch_dtype=torch.bfloat16,
|
||||
)
|
||||
|
||||
tokenizer = setup_tokenizer(MODEL_ID)
|
||||
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-5)
|
||||
|
||||
model, optimizer = accelerator.prepare(model, optimizer)
|
||||
|
||||
dataset = get_dataset(accelerator, tokenizer, args.sequence_length)
|
||||
dataloader = DataLoader(dataset, batch_size=1, collate_fn=create_collate_fn())
|
||||
dataloader = accelerator.prepare(dataloader)
|
||||
|
||||
model.train()
|
||||
|
||||
total_num_steps = min(args.num_steps, len(dataloader))
|
||||
model_flops_per_token = get_model_flops_per_token(model, args.sequence_length)
|
||||
performance_tracker = PerformanceTracker(warmup_steps=10)
|
||||
|
||||
accelerator.print(f"Starting training with {args.precision} precision for {total_num_steps} steps...")
|
||||
accelerator.print(f"Sequence length: {args.sequence_length}")
|
||||
accelerator.print("Warming up for 10 steps...")
|
||||
|
||||
for step, batch in enumerate(dataloader):
|
||||
if step >= total_num_steps:
|
||||
break
|
||||
|
||||
outputs = model(**batch)
|
||||
loss = outputs.loss
|
||||
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
|
||||
batch_tokens = batch["input_ids"].shape[1]
|
||||
metrics = performance_tracker.step(batch_tokens)
|
||||
|
||||
print_msg = f"Step {step}/{total_num_steps}, Loss: {loss.item():.4f}"
|
||||
log_metrics = {"loss": loss.item()}
|
||||
|
||||
if "warmup_completed" in metrics:
|
||||
accelerator.print("Warm up completed! Starting performance tracking...")
|
||||
elif metrics:
|
||||
tps = metrics["tokens_per_second"]
|
||||
tflops = metrics["total_tokens"] * model_flops_per_token / (metrics["total_time"] * 1e12)
|
||||
|
||||
# it's rather hard to get a good estimate of MFU as we train with FP8, so both FP8 and BF16 tensor cores are used, therefore we just report TFLOPS (Tera floating point operations per second)
|
||||
# Given H100 SXM, the theoretical peak flops are ~990 TFLOPS for bf16 and ~1980 TFLOPS for fp8 [https://resources.nvidia.com/en-us-gpu-resources/h100-datasheet-24306]
|
||||
# This is WITH sparsity, so we divide by 2 to get the answer w/o sparsity
|
||||
print_msg += f" | Average steps/s: {metrics['steps_per_second']:.2f} | TPS per device: {tps:.2f} | TFLOPS per device: {tflops:.2f}"
|
||||
log_metrics.update(
|
||||
{
|
||||
"steps_per_second": metrics["steps_per_second"],
|
||||
"tps_per_device": tps,
|
||||
"tflops_per_device": tflops,
|
||||
}
|
||||
)
|
||||
|
||||
if step % 10 == 0 or step == total_num_steps - 1:
|
||||
accelerator.print(print_msg)
|
||||
|
||||
accelerator.log(log_metrics)
|
||||
|
||||
accelerator.wait_for_everyone()
|
||||
accelerator.end_training()
|
||||
accelerator.print("Training completed!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
181
examples/fsdp2/utils.py
Normal file
181
examples/fsdp2/utils.py
Normal file
@ -0,0 +1,181 @@
|
||||
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Common utilities for FSDP2 examples.
|
||||
"""
|
||||
|
||||
import time
|
||||
|
||||
import torch
|
||||
from datasets import Dataset, load_dataset
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
from accelerate import Accelerator
|
||||
|
||||
|
||||
def get_dataset(
|
||||
accelerator: Accelerator,
|
||||
tokenizer: AutoTokenizer,
|
||||
seq_len: int,
|
||||
processing_batch_size: int = 1000,
|
||||
) -> Dataset:
|
||||
"""
|
||||
Load and prepare TinyStories dataset.
|
||||
|
||||
Args:
|
||||
accelerator (Accelerator): Accelerate accelerator instance
|
||||
tokenizer (AutoTokenizer): Hugging Face tokenizer
|
||||
seq_len (int): Sequence length for the dataset
|
||||
processing_batch_size (int): Batch size for processing the dataset
|
||||
|
||||
Returns:
|
||||
Dataset: Packed dataset
|
||||
"""
|
||||
raw_dataset = load_dataset("roneneldan/TinyStories", split="train[:50%]")
|
||||
|
||||
def tokenize_function(examples):
|
||||
tokenized_batch = tokenizer(
|
||||
examples["text"],
|
||||
padding=False,
|
||||
truncation=True,
|
||||
max_length=seq_len,
|
||||
return_tensors=None,
|
||||
)
|
||||
tokenized_batch["labels"] = tokenized_batch["input_ids"].copy()
|
||||
return tokenized_batch
|
||||
|
||||
with accelerator.main_process_first():
|
||||
tokenized_dataset = raw_dataset.map(
|
||||
tokenize_function, batched=True, remove_columns=["text"], batch_size=processing_batch_size
|
||||
)
|
||||
|
||||
def create_packed_sequences(examples):
|
||||
all_tokens = []
|
||||
for input_ids in examples["input_ids"]:
|
||||
all_tokens.extend(input_ids)
|
||||
|
||||
num_sequences = len(all_tokens) // (seq_len + 1)
|
||||
packed_input_ids = []
|
||||
packed_labels = []
|
||||
|
||||
for i in range(num_sequences):
|
||||
start_idx = i * (seq_len + 1)
|
||||
end_idx = start_idx + (seq_len + 1)
|
||||
full_sequence = all_tokens[start_idx:end_idx]
|
||||
packed_input_ids.append(full_sequence[:-1])
|
||||
packed_labels.append(full_sequence[1:])
|
||||
|
||||
return {"input_ids": packed_input_ids, "labels": packed_labels}
|
||||
|
||||
with accelerator.main_process_first():
|
||||
packed_dataset = tokenized_dataset.map(
|
||||
create_packed_sequences,
|
||||
batched=True,
|
||||
remove_columns=tokenized_dataset.column_names,
|
||||
batch_size=processing_batch_size,
|
||||
)
|
||||
|
||||
return packed_dataset.shuffle(seed=42)
|
||||
|
||||
|
||||
def get_model_flops_per_token(model: AutoModelForCausalLM, seq_len: int) -> float:
|
||||
"""
|
||||
Get the number of flops per token for the model.
|
||||
|
||||
Args:
|
||||
model (AutoModelForCausalLM): Model to get the flops for
|
||||
seq_len (int): Sequence length
|
||||
"""
|
||||
cfg = model.config
|
||||
head_dim = cfg.hidden_size // cfg.num_attention_heads
|
||||
|
||||
# MLP: 3 matmuls
|
||||
mlp_flops = 18 * cfg.hidden_size * cfg.intermediate_size
|
||||
|
||||
# Attn (w/o dotproduct)
|
||||
attn_flops = 12 * head_dim * (cfg.num_attention_heads + cfg.num_key_value_heads)
|
||||
|
||||
# attn (dotproduct) - this scales quadratically with sequence length
|
||||
attn_dotproduct_flops = 12 * cfg.num_attention_heads * head_dim * seq_len
|
||||
|
||||
# we also ignore embeddings and layernorms, etc
|
||||
return (mlp_flops + attn_flops + attn_dotproduct_flops) * cfg.num_hidden_layers
|
||||
|
||||
|
||||
def create_collate_fn():
|
||||
"""Create a collate function for batching."""
|
||||
|
||||
def collate_fn(batch):
|
||||
input_ids = torch.tensor([item["input_ids"] for item in batch], dtype=torch.long)
|
||||
labels = torch.tensor([item["labels"] for item in batch], dtype=torch.long)
|
||||
return {"input_ids": input_ids, "labels": labels}
|
||||
|
||||
return collate_fn
|
||||
|
||||
|
||||
class PerformanceTracker:
|
||||
"""Track training performance metrics."""
|
||||
|
||||
def __init__(self, warmup_steps: int = 10):
|
||||
self.warmup_steps = warmup_steps
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
"""Reset all tracking variables."""
|
||||
self.start_time = None
|
||||
self.num_tokens = 0
|
||||
self.is_in_warmup = True
|
||||
self.step_count = 0
|
||||
|
||||
def step(self, batch_tokens: int) -> dict:
|
||||
"""
|
||||
Update performance tracking with a new step.
|
||||
|
||||
Args:
|
||||
batch_tokens (int): Number of tokens in current batch
|
||||
|
||||
Returns:
|
||||
dict: Performance metrics if past warmup, empty dict otherwise
|
||||
"""
|
||||
self.step_count += 1
|
||||
|
||||
if self.step_count == self.warmup_steps:
|
||||
self.start_time = time.perf_counter()
|
||||
self.num_tokens = 0
|
||||
self.is_in_warmup = False
|
||||
return {"warmup_completed": True}
|
||||
|
||||
if not self.is_in_warmup and self.start_time is not None:
|
||||
self.num_tokens += batch_tokens
|
||||
total_time = time.perf_counter() - self.start_time
|
||||
steps_from_warmup = self.step_count - self.warmup_steps
|
||||
|
||||
if total_time > 0 and steps_from_warmup > 0:
|
||||
return {
|
||||
"tokens_per_second": self.num_tokens / total_time,
|
||||
"steps_per_second": steps_from_warmup / total_time,
|
||||
"total_tokens": self.num_tokens,
|
||||
"total_time": total_time,
|
||||
}
|
||||
|
||||
return {}
|
||||
|
||||
|
||||
def setup_tokenizer(model_id: str) -> AutoTokenizer:
|
||||
"""Setup tokenizer with proper padding token."""
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
||||
if tokenizer.pad_token is None:
|
||||
tokenizer.pad_token = tokenizer.eos_token
|
||||
return tokenizer
|
@ -21,10 +21,7 @@ from accelerate.test_utils import torch_device
|
||||
from accelerate.utils import set_seed
|
||||
|
||||
|
||||
if torch_device == "hpu":
|
||||
synchronize_func = torch.hpu.synchronize
|
||||
else:
|
||||
synchronize_func = torch.cuda.synchronize
|
||||
synchronize_func = getattr(torch, torch_device, torch.cuda).synchronize
|
||||
|
||||
# Set the random seed to have reproducable outputs
|
||||
set_seed(42)
|
||||
|
@ -21,11 +21,7 @@ from accelerate.test_utils import torch_device
|
||||
from accelerate.utils import set_seed
|
||||
|
||||
|
||||
if torch_device == "hpu":
|
||||
synchronize_func = torch.hpu.synchronize
|
||||
else:
|
||||
synchronize_func = torch.cuda.synchronize
|
||||
|
||||
synchronize_func = getattr(torch, torch_device, torch.cuda).synchronize
|
||||
|
||||
# Set the random seed to have reproducable outputs
|
||||
set_seed(42)
|
||||
|
@ -8,7 +8,7 @@
|
||||
#SBATCH --error=E-%x.%j
|
||||
|
||||
######################
|
||||
### Set enviroment ###
|
||||
### Set environment ###
|
||||
######################
|
||||
source activateEnvironment.sh
|
||||
|
||||
|
@ -11,7 +11,7 @@
|
||||
#SBATCH --time=01:59:00 # maximum execution time (HH:MM:SS)
|
||||
|
||||
######################
|
||||
### Set enviroment ###
|
||||
### Set environment ###
|
||||
######################
|
||||
source activateEnvironment.sh
|
||||
export GPUS_PER_NODE=4
|
||||
|
@ -11,7 +11,7 @@
|
||||
#SBATCH --time=01:59:00 # maximum execution time (HH:MM:SS)
|
||||
|
||||
######################
|
||||
### Set enviroment ###
|
||||
### Set environment ###
|
||||
######################
|
||||
source activateEnvironment.sh
|
||||
export GPUS_PER_NODE=4
|
||||
|
@ -11,7 +11,7 @@
|
||||
#SBATCH --time=01:59:00 # maximum execution time (HH:MM:SS)
|
||||
|
||||
######################
|
||||
### Set enviroment ###
|
||||
### Set environment ###
|
||||
######################
|
||||
source activateEnvironment.sh
|
||||
export GPUS_PER_NODE=4
|
||||
@ -25,7 +25,7 @@ head_node_ip=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
|
||||
export ACCELERATE_DIR="${ACCELERATE_DIR:-/accelerate}"
|
||||
|
||||
export LAUNCHER="accelerate launch \
|
||||
--config ${ACCELERATE_DIR}/examples/slurm/fsdp_config.yaml \
|
||||
--config_file ${ACCELERATE_DIR}/examples/slurm/fsdp_config.yaml \
|
||||
--num_processes $((SLURM_NNODES * GPUS_PER_NODE)) \
|
||||
--num_machines $SLURM_NNODES \
|
||||
--rdzv_backend c10d \
|
||||
|
@ -1,6 +1,6 @@
|
||||
[tool.ruff]
|
||||
line-length = 119
|
||||
target-version = "py38"
|
||||
target-version = "py39"
|
||||
|
||||
[tool.ruff.lint]
|
||||
preview = true
|
||||
|
9
setup.py
9
setup.py
@ -19,7 +19,7 @@ extras = {}
|
||||
extras["quality"] = [
|
||||
"black ~= 23.1", # hf-doc-builder has a hidden dependency on `black`
|
||||
"hf-doc-builder >= 0.3.0",
|
||||
"ruff ~= 0.6.4",
|
||||
"ruff ~= 0.11.2",
|
||||
]
|
||||
extras["docs"] = []
|
||||
extras["test_prod"] = ["pytest>=7.2.0,<=8.0.0", "pytest-xdist", "pytest-subtests", "parameterized", "pytest-order"]
|
||||
@ -40,7 +40,8 @@ extras["testing"] = extras["test_prod"] + extras["test_dev"]
|
||||
extras["deepspeed"] = ["deepspeed"]
|
||||
extras["rich"] = ["rich"]
|
||||
|
||||
extras["test_trackers"] = ["wandb", "comet-ml", "tensorboard", "dvclive"]
|
||||
extras["test_fp8"] = ["torchao"] # note: TE for now needs to be done via pulling down the docker image directly
|
||||
extras["test_trackers"] = ["wandb", "comet-ml", "tensorboard", "dvclive", "mlflow", "matplotlib"]
|
||||
extras["dev"] = extras["quality"] + extras["testing"] + extras["rich"]
|
||||
|
||||
extras["sagemaker"] = [
|
||||
@ -49,7 +50,7 @@ extras["sagemaker"] = [
|
||||
|
||||
setup(
|
||||
name="accelerate",
|
||||
version="1.4.0.dev0",
|
||||
version="1.8.0.dev0",
|
||||
description="Accelerate",
|
||||
long_description=open("README.md", encoding="utf-8").read(),
|
||||
long_description_content_type="text/markdown",
|
||||
@ -88,7 +89,7 @@ setup(
|
||||
"License :: OSI Approved :: Apache Software License",
|
||||
"Operating System :: OS Independent",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
||||
],
|
||||
)
|
||||
|
@ -11,7 +11,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
__version__ = "1.4.0.dev0"
|
||||
__version__ = "1.8.0.dev0"
|
||||
|
||||
from .accelerator import Accelerator
|
||||
from .big_modeling import (
|
||||
|
@ -30,11 +30,10 @@ from types import MethodType
|
||||
from typing import Any, Callable, Union
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
import torch.utils.hooks as hooks
|
||||
from huggingface_hub import split_torch_state_dict_into_shards
|
||||
|
||||
from accelerate.utils.imports import is_torchao_available
|
||||
|
||||
from .checkpointing import load_accelerator_state, load_custom_state, save_accelerator_state, save_custom_state
|
||||
from .data_loader import DataLoaderDispatcher, prepare_data_loader, skip_first_batches
|
||||
from .logging import get_logger
|
||||
@ -82,8 +81,13 @@ from .utils import (
|
||||
convert_outputs_to_fp32,
|
||||
ensure_weights_retied,
|
||||
extract_model_from_parallel,
|
||||
fsdp2_apply_ac,
|
||||
fsdp2_canonicalize_names,
|
||||
fsdp2_prepare_model,
|
||||
fsdp2_switch_optimizer_parameters,
|
||||
gather,
|
||||
gather_object,
|
||||
get_fsdp2_grad_scaler,
|
||||
get_grad_scaler,
|
||||
get_mixed_precision_context_manager,
|
||||
get_pretty_name,
|
||||
@ -100,6 +104,7 @@ from .utils import (
|
||||
is_npu_available,
|
||||
is_torch_version,
|
||||
is_torch_xla_available,
|
||||
is_torchao_available,
|
||||
is_transformer_engine_available,
|
||||
is_xpu_available,
|
||||
load_fsdp_model,
|
||||
@ -117,11 +122,12 @@ from .utils import (
|
||||
from .utils.constants import (
|
||||
BETA_TP_AVAILABLE_PYTORCH_VERSION,
|
||||
BETA_TP_AVAILABLE_TRANSFORMERS_VERSION,
|
||||
FSDP2_PYTORCH_VERSION,
|
||||
FSDP_PYTORCH_VERSION,
|
||||
PROFILE_PATTERN_NAME,
|
||||
)
|
||||
from .utils.modeling import get_state_dict_offloaded_model
|
||||
from .utils.other import is_compiled_module
|
||||
from .utils.other import compile_regions, compile_regions_deepspeed, is_compiled_module
|
||||
|
||||
|
||||
if is_deepspeed_available():
|
||||
@ -172,6 +178,52 @@ _even_batches = object()
|
||||
_use_seedable_sampler = object()
|
||||
|
||||
|
||||
class ContextParallelWrapper(torch.nn.Module):
|
||||
def __init__(self, model, mesh, accelerator):
|
||||
super().__init__()
|
||||
self._cp_wrapped_model = model # Store as a submodule
|
||||
self.mesh = mesh
|
||||
self.accelerator = accelerator
|
||||
|
||||
def forward(self, input_ids, attention_mask=None, labels=None, **kwargs):
|
||||
from torch.distributed.tensor.experimental._attention import context_parallel
|
||||
|
||||
buffers = [input_ids]
|
||||
buffer_seq_dims = [1]
|
||||
|
||||
if attention_mask is not None:
|
||||
buffers.append(attention_mask)
|
||||
buffer_seq_dims.append(1)
|
||||
|
||||
if labels is not None:
|
||||
buffers.append(labels)
|
||||
buffer_seq_dims.append(1)
|
||||
from functools import partial
|
||||
|
||||
cp_context = partial(
|
||||
context_parallel,
|
||||
mesh=self.mesh,
|
||||
buffers=buffers,
|
||||
buffer_seq_dims=buffer_seq_dims,
|
||||
no_restore_buffers=set(buffers),
|
||||
)
|
||||
self.accelerator.cp_context = cp_context()
|
||||
|
||||
self.accelerator.cp_context.__enter__()
|
||||
|
||||
return self._cp_wrapped_model(input_ids=input_ids, attention_mask=attention_mask, labels=labels, **kwargs)
|
||||
|
||||
def __getattr__(self, name):
|
||||
try:
|
||||
return super().__getattr__(name)
|
||||
except AttributeError:
|
||||
return getattr(self._cp_wrapped_model, name)
|
||||
|
||||
|
||||
def create_context_parallel_model(model, mesh, accelerator):
|
||||
return ContextParallelWrapper(model, mesh, accelerator)
|
||||
|
||||
|
||||
class Accelerator:
|
||||
"""
|
||||
Creates an instance of an accelerator for distributed training or mixed precision training.
|
||||
@ -342,26 +394,14 @@ class Accelerator:
|
||||
if not is_deepspeed_available():
|
||||
raise ImportError("DeepSpeed is not installed => run `pip install deepspeed` or build it from source.")
|
||||
if is_mlu_available():
|
||||
if compare_versions("deepspeed-mlu", "<", "0.10.1"):
|
||||
raise ImportError("DeepSpeed MLU version must be >= 0.10.1. Please update DeepSpeed MLU.")
|
||||
if compare_versions("deepspeed", "<", "0.15.2"):
|
||||
raise ImportError("DeepSpeed MLU version must be >= 0.15.2. Please update DeepSpeed.")
|
||||
elif is_musa_available():
|
||||
if compare_versions("deepspeed", "<", "0.14.3"):
|
||||
raise ImportError("DeepSpeed MUSA version must be >= 0.14.3. Please update DeepSpeed.")
|
||||
elif compare_versions("deepspeed", "<", "0.9.3"):
|
||||
raise ImportError("DeepSpeed version must be >= 0.9.3. Please update DeepSpeed.")
|
||||
|
||||
mixed_precision = (
|
||||
os.environ.get("ACCELERATE_MIXED_PRECISION", "no") if mixed_precision is None else mixed_precision
|
||||
)
|
||||
if not isinstance(deepspeed_plugins, dict):
|
||||
deepspeed_plugins.set_mixed_precision(mixed_precision)
|
||||
deepspeed_plugins.select(_from_accelerator_state=True)
|
||||
else:
|
||||
for plugin in deepspeed_plugins.values():
|
||||
plugin.set_mixed_precision(mixed_precision)
|
||||
# The first plugin passed in is always the active one
|
||||
first_plugin = next(iter(deepspeed_plugins.values()))
|
||||
first_plugin.select(_from_accelerator_state=True)
|
||||
self.deepspeed_engine_wrapped = None
|
||||
|
||||
if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true" or isinstance(
|
||||
@ -370,9 +410,7 @@ class Accelerator:
|
||||
if not is_torch_version(">=", FSDP_PYTORCH_VERSION):
|
||||
raise ValueError(f"FSDP requires PyTorch >= {FSDP_PYTORCH_VERSION}")
|
||||
|
||||
if os.environ.get("ACCELERATE_USE_TP", "false") == "true" or isinstance(
|
||||
torch_tp_plugin, TorchTensorParallelPlugin
|
||||
):
|
||||
if isinstance(torch_tp_plugin, TorchTensorParallelPlugin):
|
||||
if not is_torch_version(">=", BETA_TP_AVAILABLE_PYTORCH_VERSION):
|
||||
raise ValueError(f"TP requires PyTorch >= {BETA_TP_AVAILABLE_PYTORCH_VERSION}")
|
||||
|
||||
@ -388,14 +426,12 @@ class Accelerator:
|
||||
raise TypeError("`fsdp_plugin` must be a FullyShardedDataParallelPlugin object.")
|
||||
os.environ["ACCELERATE_USE_FSDP"] = "true" # use FSDP if plugin is provided
|
||||
|
||||
if torch_tp_plugin is None:
|
||||
torch_tp_plugin = (
|
||||
TorchTensorParallelPlugin() if os.environ.get("ACCELERATE_USE_TP", "false") == "true" else None
|
||||
)
|
||||
else:
|
||||
if not isinstance(torch_tp_plugin, TorchTensorParallelPlugin):
|
||||
raise TypeError("`torch_tp_plugin` must be a TorchTensorParallelPlugin object.")
|
||||
os.environ["ACCELERATE_USE_TP"] = "true"
|
||||
if fsdp_plugin is not None and fsdp_plugin.fsdp_version == 2:
|
||||
if not is_torch_version(">=", FSDP2_PYTORCH_VERSION):
|
||||
raise ImportError(f"FSDP2 requires PyTorch >= {FSDP2_PYTORCH_VERSION}")
|
||||
|
||||
if torch_tp_plugin is not None and not isinstance(torch_tp_plugin, TorchTensorParallelPlugin):
|
||||
raise TypeError("`torch_tp_plugin` must be a TorchTensorParallelPlugin object.")
|
||||
|
||||
if megatron_lm_plugin is None: # init from env variables
|
||||
megatron_lm_plugin = (
|
||||
@ -437,9 +473,9 @@ class Accelerator:
|
||||
self.has_fp8_handler = False
|
||||
if kwargs_handlers is not None:
|
||||
for handler in kwargs_handlers:
|
||||
assert isinstance(
|
||||
handler, KwargsHandler
|
||||
), f"Unsupported kwargs handler passed: {handler}, must be one that inherits `accelerate.utils.KwargsHandler`."
|
||||
assert isinstance(handler, KwargsHandler), (
|
||||
f"Unsupported kwargs handler passed: {handler}, must be one that inherits `accelerate.utils.KwargsHandler`."
|
||||
)
|
||||
# Add the handler class to the set of found handlers
|
||||
if handler.__class__ in found_handlers:
|
||||
raise ValueError(f"You can only pass one {handler.__class__} in `kwargs_handlers`.")
|
||||
@ -462,10 +498,11 @@ class Accelerator:
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
self._mixed_precision = mixed_precision
|
||||
self.fp8_enabled = self.state.mixed_precision == "fp8" or mixed_precision == "fp8"
|
||||
|
||||
# Check for automatic FP8 recipe creation
|
||||
if self._mixed_precision == "fp8" and not self.has_fp8_handler:
|
||||
# Prioritize TE -> AO -> MSAMP
|
||||
if self.fp8_enabled and not self.has_fp8_handler:
|
||||
# Prioritize AO -> TE -> MSAMP
|
||||
if is_torchao_available():
|
||||
logger.info("Found `torchao` installed, using it for FP8 training.")
|
||||
self.ao_recipe_handler = AORecipeKwargs()
|
||||
@ -480,6 +517,7 @@ class Accelerator:
|
||||
"Tried to train with `fp8` and auto-detect backend, but no FP8-compatible backend was installed. "
|
||||
"Valid backends are: `torchao`, `transformer-engine`, and `msamp`."
|
||||
)
|
||||
self.has_fp8_handler = True
|
||||
|
||||
self.delayed_fp8_autocast = False
|
||||
if self.has_fp8_handler:
|
||||
@ -493,6 +531,12 @@ class Accelerator:
|
||||
DistributedType.FSDP,
|
||||
)
|
||||
|
||||
# TODO: S1ro - this is probably gonna be a problem with other fp8 backends too
|
||||
if self.fp8_backend == "AO" and self.state.fsdp_plugin.cpu_ram_efficient_loading:
|
||||
raise ValueError(
|
||||
"torchao with FSDP2 and cpu_ram_efficient_loading is not supported, setting `cpu_ram_efficient_loading` to False will fix the issue and work as intended."
|
||||
)
|
||||
|
||||
trackers = filter_trackers(log_with, self.logging_dir)
|
||||
if len(trackers) < 1 and log_with is not None:
|
||||
warnings.warn(f"`log_with={log_with}` was passed but no supported trackers are currently installed.")
|
||||
@ -546,7 +590,12 @@ class Accelerator:
|
||||
) or is_torch_xla_available(check_is_tpu=True):
|
||||
raise ValueError(f"fp16 mixed precision requires a GPU (not {self.device.type!r}).")
|
||||
kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {}
|
||||
self.scaler = get_grad_scaler(self.distributed_type, **kwargs)
|
||||
|
||||
# FSDP2 doesn't use ShardedGradScaler, don't want to modify `get_grad_scaler`, rather create a simple utility
|
||||
if self.is_fsdp2:
|
||||
self.scaler = get_fsdp2_grad_scaler(**kwargs)
|
||||
else:
|
||||
self.scaler = get_grad_scaler(self.distributed_type, **kwargs)
|
||||
|
||||
elif self.state.mixed_precision == "bf16" and self.distributed_type not in (
|
||||
DistributedType.DEEPSPEED,
|
||||
@ -562,7 +611,7 @@ class Accelerator:
|
||||
# for DeepSpeed, self.state.mixed_precision is always "bf16",
|
||||
# see https://github.com/huggingface/accelerate/blob/main/src/accelerate/state.py#L968 and
|
||||
# https://github.com/huggingface/accelerate/blob/main/src/accelerate/utils/dataclasses.py#L1263.
|
||||
elif mixed_precision == "fp8" or self.state.mixed_precision == "fp8":
|
||||
elif self.fp8_enabled:
|
||||
# We always enable `native_amp` for FP8
|
||||
self.native_amp = True
|
||||
if self.fp8_backend == "MSAMP":
|
||||
@ -698,6 +747,10 @@ class Accelerator:
|
||||
def mixed_precision(self):
|
||||
return self.state.mixed_precision
|
||||
|
||||
@property
|
||||
def is_fsdp2(self):
|
||||
return self.state.is_fsdp2
|
||||
|
||||
@contextmanager
|
||||
def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False):
|
||||
"""
|
||||
@ -1248,6 +1301,66 @@ class Accelerator:
|
||||
with contextlib.nullcontext(joinables):
|
||||
yield
|
||||
|
||||
@contextmanager
|
||||
def context_parallel(
|
||||
self,
|
||||
buffers: list[torch.Tensor] | None = None,
|
||||
buffer_seq_dims: list[int] | None = None,
|
||||
no_restore_buffers: set[torch.Tensor] | None = None,
|
||||
):
|
||||
"""
|
||||
A context manager that enables context parallel training.
|
||||
|
||||
Args:
|
||||
buffers (`list[torch.Tensor]`, `optional`):
|
||||
Buffers, which are going to be sharded along the sequence dimension. Common examples are inputs, labels
|
||||
or positional embedding buffers. This context manager will modify these buffers in-place, and after
|
||||
exiting the context, the buffers will be restored to their original state. To avoid unnecessary
|
||||
restores, you can use `no_restore_buffers` to specify which buffers don't need to be restored.
|
||||
buffer_seq_dims (`list[int]`, `optional`):
|
||||
Sequence dimensions of `buffers`.
|
||||
no_restore_buffers (`set[torch.Tensor]`, `optional`):
|
||||
This set must be a subset of `buffers`. Specifies which buffers from `buffers` argument won't be
|
||||
restored after the context exits. These buffers will be then kept in sharded state.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
`context_parallel` is currently only supported together with FSDP2, and requires `cp_size` to be set. If either
|
||||
of these conditions are not met, this context manager will have no effect.
|
||||
|
||||
</Tip>
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
This context manager has to be recreated with each training step, as shown in the example below.
|
||||
|
||||
</Tip>
|
||||
|
||||
Example:
|
||||
|
||||
```python
|
||||
>>> for batch in dataloader:
|
||||
... with accelerator.context_parallel(
|
||||
... buffers=[batch["input_ids"], batch["attention_mask"]],
|
||||
... buffer_seq_dims=[1, 1],
|
||||
... no_restore_buffers={batch["input_ids"]},
|
||||
... ):
|
||||
... outputs = model(batch)
|
||||
... ...
|
||||
```
|
||||
"""
|
||||
|
||||
if (
|
||||
getattr(self.state, "fsdp_plugin", None) is None
|
||||
or self.state.fsdp_plugin.cp_size == 1
|
||||
or (cp_context := getattr(self, "_cp_context", None)) is None
|
||||
):
|
||||
logger.warning("Context parallel + FSDP2 is not configured, this context manager will have no effect.")
|
||||
yield
|
||||
else:
|
||||
with cp_context(buffers=buffers, buffer_seq_dims=buffer_seq_dims, no_restore_buffers=no_restore_buffers):
|
||||
yield
|
||||
|
||||
def print(self, *args, **kwargs):
|
||||
"""
|
||||
Drop in replacement of `print()` to only print once per server.
|
||||
@ -1372,18 +1485,34 @@ class Accelerator:
|
||||
"part for you."
|
||||
)
|
||||
|
||||
if self.is_fsdp2:
|
||||
model_count = 0
|
||||
optimizer_count = 0
|
||||
for i, obj in enumerate(args):
|
||||
if isinstance(obj, torch.nn.Module):
|
||||
model_count += 1
|
||||
elif isinstance(obj, torch.optim.Optimizer):
|
||||
optimizer_count += 1
|
||||
|
||||
# This needs to be written as such, so that passing other objects other than models/optimizers doesn't raise an error
|
||||
if (model_count < 1 and optimizer_count > 0) or (model_count > 0 and optimizer_count < 1):
|
||||
raise ValueError(
|
||||
"When using FSDP2, a model and optimizer must be passed together to `Accelerator.prepare()`"
|
||||
" as the optimizer needs to have its parameters modified after the model is converted."
|
||||
)
|
||||
if model_count > 1:
|
||||
raise ValueError("Only one model is supported when using FSDP2")
|
||||
|
||||
# If we're dealing with device placement, this deals with that by...
|
||||
tpu_should_fix_optimizer = self.device_placement and self.distributed_type == DistributedType.XLA
|
||||
|
||||
if tpu_should_fix_optimizer:
|
||||
# 1. grabbing old model parameters
|
||||
old_named_params = self._get_named_parameters(*args)
|
||||
old_named_params = self._get_named_parameters(*args, drop_refs=False)
|
||||
|
||||
if self.distributed_type in [DistributedType.MULTI_CPU, DistributedType.MULTI_XPU, DistributedType.NO]:
|
||||
if self.device.type == "cpu" and self.state.use_ipex:
|
||||
args = self._prepare_ipex_or_xpu(*args)
|
||||
elif self.device.type == "xpu" and is_xpu_available():
|
||||
args = self._prepare_ipex_or_xpu(*args)
|
||||
if (self.device.type == "cpu" or self.device.type == "xpu") and self.state.use_ipex:
|
||||
args = self._prepare_ipex(*args)
|
||||
if self.fp8_backend == "TE":
|
||||
args = self._prepare_te(*args)
|
||||
elif self.fp8_backend == "AO":
|
||||
@ -1392,6 +1521,8 @@ class Accelerator:
|
||||
result = self._prepare_deepspeed(*args)
|
||||
elif self.distributed_type == DistributedType.MEGATRON_LM:
|
||||
result = self._prepare_megatron_lm(*args)
|
||||
elif self.is_fsdp2:
|
||||
result = self._prepare_fsdp2(*args)
|
||||
else:
|
||||
if self.fp8_backend == "MSAMP":
|
||||
args, device_placement = self._prepare_msamp(*args, device_placement=device_placement)
|
||||
@ -1418,6 +1549,102 @@ class Accelerator:
|
||||
|
||||
return result if len(result) > 1 else result[0]
|
||||
|
||||
def _prepare_fsdp2(self, *args):
|
||||
# First pass: prepare everything except schedulers (and model, which is prepared separately below)
|
||||
result = [
|
||||
self._prepare_one(obj, first_pass=True) if not isinstance(obj, torch.nn.Module) else obj for obj in args
|
||||
]
|
||||
|
||||
# Second pass: prepare schedulers
|
||||
result = [self._prepare_one(obj) if not isinstance(obj, torch.nn.Module) else obj for obj in result]
|
||||
|
||||
# Prepare the model
|
||||
model_index, model = None, None
|
||||
for i, obj in enumerate(result):
|
||||
if isinstance(obj, torch.nn.Module):
|
||||
model_index, model = i, obj
|
||||
|
||||
# Invariant: if we have a model, we also have an optimizer (checked in `prepare`)
|
||||
if model_index is None:
|
||||
return tuple(result)
|
||||
|
||||
# Needs to be done first, to make sure AC + fully_shard will work as expected
|
||||
self.state.fsdp_plugin.set_auto_wrap_policy(model)
|
||||
|
||||
# Apply AC if needed
|
||||
if self.state.fsdp_plugin.activation_checkpointing:
|
||||
model = fsdp2_apply_ac(self, model)
|
||||
|
||||
if (context_parallel_size := self.state.fsdp_plugin.cp_size) > 1:
|
||||
if context_parallel_size > self.state.num_processes:
|
||||
raise ValueError(
|
||||
f"`cp_size` set to {context_parallel_size}, which is greater than the number of processes {self.state.num_processes}. Please set to 1 to disable context parallel or use a smaller value."
|
||||
)
|
||||
|
||||
from torch.distributed.device_mesh import init_device_mesh
|
||||
from torch.distributed.tensor.experimental._attention import set_rotate_method
|
||||
|
||||
cp_comm_strategy = self.state.fsdp_plugin.cp_comm_strategy
|
||||
set_rotate_method(cp_comm_strategy)
|
||||
|
||||
world_size = self.state.num_processes
|
||||
|
||||
fsdp_size = world_size // context_parallel_size
|
||||
|
||||
device_mesh = init_device_mesh(
|
||||
device_type=self.device.type,
|
||||
mesh_shape=(fsdp_size, context_parallel_size),
|
||||
mesh_dim_names=("fsdp", "cp"),
|
||||
)
|
||||
self.state.torch_device_mesh = device_mesh
|
||||
device_mesh["fsdp", "cp"]._flatten("fsdp_cp")
|
||||
|
||||
model = create_context_parallel_model(model, mesh=device_mesh["cp"], accelerator=self)
|
||||
|
||||
# Apply compile if needed, has to be *after* applying AC
|
||||
# Copied from: `accelerator.prepare_model` ~ L1804
|
||||
if self.state.dynamo_plugin.backend != DynamoBackend.NO and not is_compiled_module(model):
|
||||
if self.state.dynamo_plugin.use_regional_compilation:
|
||||
model = compile_regions(model, **self.state.dynamo_plugin.to_kwargs())
|
||||
else:
|
||||
model = torch.compile(model, **self.state.dynamo_plugin.to_kwargs())
|
||||
|
||||
# Get old params and canonicalize - we cannonicalize to have the mapping easy
|
||||
old_named_params = fsdp2_canonicalize_names(self._get_named_parameters(*tuple(result), drop_refs=True))
|
||||
|
||||
# Swap the optimizer parameters with empty, so `fully_shard` after will not allocate too much memory
|
||||
for obj in result:
|
||||
if isinstance(obj, torch.optim.Optimizer):
|
||||
for param_group in obj.param_groups:
|
||||
for i, p in enumerate(param_group["params"]):
|
||||
# We drop a reference to the original param here, so that _move_states_to_device triggers a reallocation
|
||||
# We reassign the data_ptr to the original param, so that we preserve the mapping to the new ones
|
||||
param_group["params"][i] = torch.empty_like(p)
|
||||
param_group["params"][i].data_ptr = p.data_ptr()
|
||||
|
||||
self._models.append(model)
|
||||
|
||||
# Prepare everything FSDP2 related for the model (except AC)
|
||||
model = fsdp2_prepare_model(self, model)
|
||||
|
||||
# Remove the old model from the list
|
||||
if len(self._models) > 1 and (self._models[-2] is self._models[-1]):
|
||||
del self._models[-2]
|
||||
|
||||
# Replace the old model with the new one (shouldn't be needed as everything should be in place)
|
||||
result[model_index] = model
|
||||
|
||||
# Get new params and canonicalize
|
||||
new_named_params = fsdp2_canonicalize_names(self._get_named_parameters(*result))
|
||||
# Build a map from old to new params
|
||||
mapping = {p: new_named_params[n] for n, p in old_named_params.items()}
|
||||
# Update the optimizer parameters
|
||||
for obj in result:
|
||||
if isinstance(obj, torch.optim.Optimizer):
|
||||
fsdp2_switch_optimizer_parameters(obj, mapping)
|
||||
|
||||
return result
|
||||
|
||||
def prepare_model(self, model: torch.nn.Module, device_placement: bool = None, evaluation_mode: bool = False):
|
||||
"""
|
||||
Prepares a PyTorch model for training in any distributed setup. It is recommended to use
|
||||
@ -1543,15 +1770,22 @@ class Accelerator:
|
||||
if self.ddp_handler is not None:
|
||||
self.ddp_handler.register_comm_hook(model)
|
||||
elif self.distributed_type == DistributedType.TP:
|
||||
if hasattr(model, "supports_tp_plan") and not model.supports_tp_plan:
|
||||
if not compare_versions("transformers", ">=", BETA_TP_AVAILABLE_TRANSFORMERS_VERSION):
|
||||
raise ValueError(f"TP requires transformers >= {BETA_TP_AVAILABLE_TRANSFORMERS_VERSION}")
|
||||
if not compare_versions("transformers", ">=", BETA_TP_AVAILABLE_TRANSFORMERS_VERSION):
|
||||
raise ValueError(f"TP requires transformers >= {BETA_TP_AVAILABLE_TRANSFORMERS_VERSION}")
|
||||
if not hasattr(model, "tp_size"):
|
||||
raise NotImplementedError(
|
||||
"Provided model does not support tensor parallelism. \
|
||||
Tensor parallelism plan can be added as base_model_tp_plan to model config class \
|
||||
and _tp_plan attribute to model class."
|
||||
"Model should undergo tensor parallel before passing it to accelerate."
|
||||
"You can use .from_pretrained(..., tp_plan='auto') if the model supports"
|
||||
)
|
||||
model.tensor_parallel(self.state.torch_tp_plugin.torch_device_mesh["tp"])
|
||||
if model.tp_size != self.state.torch_tp_plugin.tp_size:
|
||||
raise ValueError(
|
||||
f"tp_size in the plugin {self.state.torch_tp_plugin.tp_size} should be same as model's tp size {model.tp_size}"
|
||||
)
|
||||
elif self.is_fsdp2:
|
||||
raise ValueError(
|
||||
"FSDP2 preparation should be done via `accelerate.prepare()`, as it requires a model and an optimizer."
|
||||
)
|
||||
|
||||
elif self.distributed_type == DistributedType.FSDP:
|
||||
# We need to fix the optimizer *before* sharding the model
|
||||
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
|
||||
@ -1577,7 +1811,10 @@ class Accelerator:
|
||||
)
|
||||
|
||||
kwargs = {
|
||||
"sharding_strategy": fsdp_plugin.sharding_strategy,
|
||||
# We fallback to reshard_after_forward if sharding_strategy is not set.
|
||||
# We prerfer sharding_strategy to not break the behavior of the existing code.
|
||||
# Deprecation warning has already been issued in `utils.dataclasses.py`
|
||||
"sharding_strategy": fsdp_plugin.sharding_strategy or fsdp_plugin.reshard_after_forward,
|
||||
"cpu_offload": fsdp_plugin.cpu_offload,
|
||||
"auto_wrap_policy": fsdp_plugin.auto_wrap_policy,
|
||||
"mixed_precision": fsdp_plugin.mixed_precision_policy,
|
||||
@ -1616,24 +1853,24 @@ class Accelerator:
|
||||
# * this attribute will always set by init_utils.init_core_state so its always not None.
|
||||
# * mixed_precision.param_dtype only regards _fwd_bwd_param_dtype
|
||||
# * if model is loaded in 16bit, and even if mixed_precision.param_dtype is None,
|
||||
# we sill want to upcast the flat_param.
|
||||
# we still want to upcast the flat_param.
|
||||
if self.mixed_precision != "no": # if mixed precision is set
|
||||
upcasted_log = []
|
||||
for module in FSDP.fsdp_modules(model):
|
||||
# Referencing DeepSpeed Zero3
|
||||
# - in Init, params are converted to 16bit while partitioning.
|
||||
# - in accelerator.prepare, deepspeed.initalize is called to:
|
||||
# * creates the DeepSpeeedEngine.
|
||||
# - in accelerator.prepare, deepspeed.initialize is called to:
|
||||
# * creates the DeepSpeedEngine.
|
||||
# * since zero_optimization() is True , calls engine._configure_zero_optimizer.
|
||||
#
|
||||
# Inside the DeepSpeed Zero3 optimizer configuration, which initalizes
|
||||
# Inside the DeepSpeed Zero3 optimizer configuration, which initializes
|
||||
# DeepSpeedZeroOptimizer_Stage3, during which:
|
||||
# * trainable_param_groups are obtained from the attached optimizer
|
||||
# (already partitioned in 16bit).
|
||||
# * then _setup_for_real_optimizer -> _create_fp32_partitions
|
||||
# which performs the fp32 upcasting.
|
||||
|
||||
# To mimick DeepSeepds's casting in FSDP, we look at the (single) FlatParameter held
|
||||
# To mimic DeepSeepds's casting in FSDP, we look at the (single) FlatParameter held
|
||||
# within an FSDP wrapper. This FlatParameter will be seen by the optimizer.
|
||||
# - even though there is a torch.device('meta') guard below, we
|
||||
# expect _init_utils._init_param_handle_from_module to already
|
||||
@ -1685,11 +1922,14 @@ class Accelerator:
|
||||
elif self.distributed_type == DistributedType.XLA and self.state.fork_launched:
|
||||
model = xmp.MpModelWrapper(model).to(self.device)
|
||||
# Now we can apply the FP8 autocast
|
||||
if self.delayed_fp8_autocast:
|
||||
if self.fp8_backend == "TE" and self.delayed_fp8_autocast:
|
||||
model = apply_fp8_autowrap(model, self.te_recipe_handler or self.fp8_recipe_handler)
|
||||
# torch.compile should be called last and only if the model isn't already compiled.
|
||||
# torch.compile should be called last and only if the model isn't already compiled
|
||||
if self.state.dynamo_plugin.backend != DynamoBackend.NO and not is_compiled_module(model):
|
||||
model = torch.compile(model, **self.state.dynamo_plugin.to_kwargs())
|
||||
if self.state.dynamo_plugin.use_regional_compilation:
|
||||
model = compile_regions(model, **self.state.dynamo_plugin.to_kwargs())
|
||||
else:
|
||||
model = torch.compile(model, **self.state.dynamo_plugin.to_kwargs())
|
||||
return model
|
||||
|
||||
def _prepare_ao(self, *args):
|
||||
@ -1697,6 +1937,10 @@ class Accelerator:
|
||||
raise ImportError(
|
||||
"`torchao` was not found on your system or is too old of a version. Please ensure that `torchao >= 0.6.1` is installed"
|
||||
)
|
||||
|
||||
if self.is_fsdp2:
|
||||
models = [x for x in args if isinstance(x, torch.nn.Module)]
|
||||
optimizers = [x for x in args if isinstance(x, torch.optim.Optimizer)]
|
||||
for arg in args:
|
||||
if isinstance(arg, torch.nn.Module):
|
||||
convert_model_to_fp8_ao(
|
||||
@ -1704,6 +1948,16 @@ class Accelerator:
|
||||
config=self.ao_recipe_handler.config,
|
||||
module_filter_func=self.ao_recipe_handler.module_filter_func,
|
||||
)
|
||||
|
||||
# Invariant: with FSDP2, optimizer is always passed to `prepare()` together with model
|
||||
# We only precompute scales if float8 all gather is enabled, possibly can add a flag for this later
|
||||
if self.is_fsdp2 and len(optimizers) > 0 and self.ao_recipe_handler.config.enable_fsdp_float8_all_gather:
|
||||
from torchao.float8 import precompute_float8_dynamic_scale_for_fsdp
|
||||
|
||||
optimizers[0].register_step_post_hook(
|
||||
lambda *args, **kwargs: precompute_float8_dynamic_scale_for_fsdp(models[0])
|
||||
)
|
||||
|
||||
return args
|
||||
|
||||
def _prepare_te(self, *args):
|
||||
@ -1755,6 +2009,21 @@ class Accelerator:
|
||||
deepspeed_plugin = self.deepspeed_plugin
|
||||
|
||||
is_dataloader_present = any(isinstance(obj, torch.utils.data.DataLoader) for obj in args)
|
||||
tp_size = deepspeed_plugin.deepspeed_config.get("tensor_parallel", {}).get("autotp_size", 0)
|
||||
if tp_size > 1:
|
||||
if not compare_versions("deepspeed", ">=", "0.16.4"):
|
||||
raise ImportError(
|
||||
"Deepspeed TP requires deepspeed >= 0.16.4, Please update DeepSpeed via `pip install deepspeed -U`."
|
||||
)
|
||||
if not is_torch_version(">=", "2.2.0"):
|
||||
raise ImportError(
|
||||
"Tried to use TP, but `torch.distributed.device_mesh` requires PyTorch >= 2.2.0. Please upgrade your PyTorch version"
|
||||
)
|
||||
from torch.distributed.device_mesh import init_device_mesh
|
||||
|
||||
mesh_dim_name = "tp"
|
||||
self.state.ds_device_mesh = init_device_mesh(self.device.type, (tp_size,), mesh_dim_names=(mesh_dim_name,))
|
||||
|
||||
result = [
|
||||
self._prepare_one(obj, first_pass=True) if isinstance(obj, torch.utils.data.DataLoader) else obj
|
||||
for obj in args
|
||||
@ -1954,7 +2223,10 @@ class Accelerator:
|
||||
|
||||
if compare_versions("deepspeed", ">=", "0.14.4") and self.state.dynamo_plugin.backend != DynamoBackend.NO:
|
||||
compile_kwargs = self.state.dynamo_plugin.to_kwargs()
|
||||
engine.compile(backend=compile_kwargs.pop("backend"), compile_kwargs=compile_kwargs)
|
||||
if self.state.dynamo_plugin.use_regional_compilation:
|
||||
compile_regions_deepspeed(engine.module, **compile_kwargs)
|
||||
else:
|
||||
engine.compile(backend=compile_kwargs.pop("backend"), compile_kwargs=compile_kwargs)
|
||||
if optimizer is not None:
|
||||
optimizer = DeepSpeedOptimizerWrapper(optimizer)
|
||||
if scheduler is not None:
|
||||
@ -2096,44 +2368,79 @@ class Accelerator:
|
||||
|
||||
return tuple(result)
|
||||
|
||||
def _prepare_ipex_or_xpu(self, *args):
|
||||
def _prepare_ipex(self, *args):
|
||||
"""
|
||||
Prepares model and optimizer for training with IPEX or XPU acceleration. This covers 3 cases, IPEX compiled
|
||||
with CPU only support, IPEX compiled with XPU support and training with XPU pytorch backend available in stock
|
||||
pytorch starting from version 2.4.
|
||||
Prepares model and optimizer for training with IPEX on CPU/XPU. This covers 3 cases, IPEX compiled with CPU
|
||||
only support, IPEX compiled with XPU support and training with XPU pytorch backend available in stock pytorch
|
||||
starting from version 2.4.
|
||||
"""
|
||||
if self.state.use_ipex:
|
||||
if not is_ipex_available():
|
||||
raise ImportError(
|
||||
"IPEX is not installed or IPEX's version does not match current PyTorch version. Please refer"
|
||||
" to https://github.com/intel/intel-extension-for-pytorch."
|
||||
)
|
||||
|
||||
model = None
|
||||
optimizer = None
|
||||
# ipex.optimize() is available only for IPEX, both IPEX-CPU and IPEX-XPU
|
||||
if is_ipex_available():
|
||||
import intel_extension_for_pytorch as ipex
|
||||
else:
|
||||
raise ImportError(
|
||||
"IPEX is not installed or IPEX's version does not match current PyTorch version. Please refer"
|
||||
" to https://github.com/intel/intel-extension-for-pytorch."
|
||||
)
|
||||
|
||||
models = []
|
||||
optimizers = []
|
||||
result = [obj for obj in args]
|
||||
for obj in result:
|
||||
for i, obj in enumerate(result):
|
||||
if isinstance(obj, torch.nn.Module):
|
||||
model = obj
|
||||
model.train()
|
||||
models.append((i, model))
|
||||
elif isinstance(obj, (torch.optim.Optimizer)):
|
||||
optimizer = obj
|
||||
if optimizer is not None and model is not None:
|
||||
dtype = torch.bfloat16 if self.state.mixed_precision == "bf16" else None
|
||||
optimizers.append((i, obj))
|
||||
|
||||
# Impossible to determine what to do if multiple models and/or optimizers are provided
|
||||
if len(optimizers) > 1 or (len(models) > 1 and len(optimizers) == 1):
|
||||
raise ValueError(
|
||||
"Prepare with IPEX expects either 1+ models and no optimizer OR a single model-optimizer pair."
|
||||
)
|
||||
|
||||
# Nothing to do
|
||||
if len(models) == 0 and len(optimizers) == 0:
|
||||
return result
|
||||
|
||||
dtype = torch.bfloat16 if self.state.mixed_precision == "bf16" else None
|
||||
# Multiple models and no optimizer (inference) are provided
|
||||
if len(models) > 0 and len(optimizers) == 0:
|
||||
for i, model in models:
|
||||
if self.device.type == "xpu" and next(model.parameters()).device.type == "cpu":
|
||||
model = model.to(self.device)
|
||||
model, _ = ipex.optimize(model, optimizer=None, dtype=dtype, inplace=True, level="O1")
|
||||
# Replace in result
|
||||
result[i] = model
|
||||
|
||||
# A single model-optimizer pair (training) is provided
|
||||
if len(models) == 1 and len(optimizers) == 1:
|
||||
i_model, model = models[0]
|
||||
i_optimizer, optimizer = optimizers[0]
|
||||
if self.device.type == "xpu" and next(model.parameters()).device.type == "cpu":
|
||||
model = model.to(self.device)
|
||||
# ipex.optimize() is available only for IPEX, both IPEX-CPU and IPEX-XPU
|
||||
if is_ipex_available():
|
||||
import intel_extension_for_pytorch as ipex
|
||||
model, optimizer = ipex.optimize(model, optimizer=optimizer, dtype=dtype, inplace=True, level="O1")
|
||||
# Replace in result
|
||||
result[i_model] = model
|
||||
result[i_optimizer] = optimizer
|
||||
|
||||
model, optimizer = ipex.optimize(model, optimizer=optimizer, dtype=dtype, inplace=True, level="O1")
|
||||
for i in range(len(result)):
|
||||
if isinstance(result[i], torch.nn.Module):
|
||||
result[i] = model
|
||||
elif isinstance(result[i], (torch.optim.Optimizer)):
|
||||
result[i] = optimizer
|
||||
return tuple(result)
|
||||
|
||||
def _prepare_device_mesh(self):
|
||||
"""
|
||||
Prepare the device mesh for distributed training. The dataloader will determine how to load data based on the
|
||||
device mesh.
|
||||
"""
|
||||
if self.state.torch_tp_plugin:
|
||||
return self.state.torch_tp_plugin.torch_device_mesh
|
||||
elif self.distributed_type == DistributedType.DEEPSPEED and hasattr(self.state, "ds_device_mesh"):
|
||||
return self.state.ds_device_mesh
|
||||
elif self.is_fsdp2 and hasattr(self.state, "torch_device_mesh"):
|
||||
return self.state.torch_device_mesh
|
||||
return None
|
||||
|
||||
def _prepare_msamp(self, *args, device_placement):
|
||||
if not is_msamp_available():
|
||||
raise ImportError(
|
||||
@ -2219,6 +2526,9 @@ class Accelerator:
|
||||
return data_loader
|
||||
if device_placement is None:
|
||||
device_placement = self.device_placement if self.distributed_type != DistributedType.XLA else False
|
||||
|
||||
device_mesh = self._prepare_device_mesh()
|
||||
|
||||
prepared_data_loader = prepare_data_loader(
|
||||
data_loader,
|
||||
self.device,
|
||||
@ -2234,7 +2544,7 @@ class Accelerator:
|
||||
data_seed=self.dataloader_config.data_seed,
|
||||
non_blocking=self.non_blocking,
|
||||
use_stateful_dataloader=self.use_stateful_dataloader,
|
||||
torch_device_mesh=self.state.torch_tp_plugin.torch_device_mesh if self.state.torch_tp_plugin else None,
|
||||
torch_device_mesh=device_mesh,
|
||||
)
|
||||
self._dataloaders.append(prepared_data_loader)
|
||||
return prepared_data_loader
|
||||
@ -2358,6 +2668,9 @@ class Accelerator:
|
||||
else:
|
||||
loss.backward(**kwargs)
|
||||
|
||||
if hasattr(self, "cp_context"):
|
||||
self.cp_context.__exit__(None, None, None)
|
||||
|
||||
def set_trigger(self):
|
||||
"""
|
||||
Sets the internal trigger tensor to 1 on the current process. A latter check should follow using this which
|
||||
@ -2482,7 +2795,12 @@ class Accelerator:
|
||||
parameters = [p for p in parameters]
|
||||
for model in self._models:
|
||||
if parameters == [p for p in model.parameters()]:
|
||||
return model.clip_grad_norm_(max_norm, norm_type)
|
||||
if not self.is_fsdp2:
|
||||
return model.clip_grad_norm_(max_norm, norm_type)
|
||||
else:
|
||||
return torch.nn.utils.clip_grad_norm_(
|
||||
parameters, max_norm, norm_type=norm_type
|
||||
) # viz: https://github.com/pytorch/torchtitan/blob/main/docs/fsdp.md
|
||||
elif self.distributed_type == DistributedType.DEEPSPEED:
|
||||
# `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
|
||||
# We cannot return the gradient norm because DeepSpeed does it.
|
||||
@ -2813,6 +3131,10 @@ class Accelerator:
|
||||
)
|
||||
else:
|
||||
self.trackers.append(tracker_init(project_name, **init_kwargs.get(str(tracker), {})))
|
||||
|
||||
for tracker in self.trackers:
|
||||
tracker.start()
|
||||
|
||||
if config is not None:
|
||||
for tracker in self.trackers:
|
||||
tracker.store_init_configuration(config)
|
||||
@ -3079,7 +3401,7 @@ class Accelerator:
|
||||
|
||||
If a `ProjectConfiguration` was passed to the `Accelerator` object with `automatic_checkpoint_naming` enabled
|
||||
then checkpoints will be saved to `self.project_dir/checkpoints`. If the number of current saves is greater
|
||||
than `total_limit` then the oldest save is deleted. Each checkpoint is saved in seperate folders named
|
||||
than `total_limit` then the oldest save is deleted. Each checkpoint is saved in separate folders named
|
||||
`checkpoint_<iteration>`.
|
||||
|
||||
Otherwise they are just saved to `output_dir`.
|
||||
@ -3239,7 +3561,7 @@ class Accelerator:
|
||||
self._load_model_state_pre_hook[handle.id] = hook
|
||||
return handle
|
||||
|
||||
def load_state(self, input_dir: str = None, **load_model_func_kwargs):
|
||||
def load_state(self, input_dir: str = None, load_kwargs: dict | None = None, **load_model_func_kwargs):
|
||||
"""
|
||||
Loads the current states of the model, optimizer, scaler, RNG generators, and registered objects.
|
||||
|
||||
@ -3254,6 +3576,9 @@ class Accelerator:
|
||||
input_dir (`str` or `os.PathLike`):
|
||||
The name of the folder all relevant weights and states were saved in. Can be `None` if
|
||||
`automatic_checkpoint_naming` is used, and will pick up from the latest checkpoint.
|
||||
load_kwargs (`dict`, *optional*):
|
||||
Additional keyword arguments for the underlying `load` function, such as optional arguments for
|
||||
state_dict and optimizer on.
|
||||
load_model_func_kwargs (`dict`, *optional*):
|
||||
Additional keyword arguments for loading model which can be passed to the underlying load function,
|
||||
such as optional arguments for DeepSpeed's `load_checkpoint` function or a `map_location` to load the
|
||||
@ -3358,6 +3683,7 @@ class Accelerator:
|
||||
self.state.process_index,
|
||||
self.scaler,
|
||||
map_location,
|
||||
load_kwargs,
|
||||
**load_model_func_kwargs,
|
||||
)
|
||||
if "step" in override_attributes:
|
||||
@ -3426,12 +3752,31 @@ class Accelerator:
|
||||
"""
|
||||
return self.free_memory(*objects)
|
||||
|
||||
def _get_named_parameters(self, *args):
|
||||
def _get_named_parameters(self, *args, drop_refs=False):
|
||||
named_parameters = {}
|
||||
accessor_mapping = {}
|
||||
for obj in args:
|
||||
if isinstance(obj, torch.nn.Module):
|
||||
obj = extract_model_from_parallel(obj)
|
||||
named_parameters.update({n: p for n, p in obj.named_parameters()})
|
||||
if not drop_refs:
|
||||
named_parameters.update({n: p for n, p in obj.named_parameters()})
|
||||
continue
|
||||
|
||||
# we need this bit as `WeightWithDynamic...` returns 0 when `data_ptr()` is called,
|
||||
# the underlying pointer is actually hidden in `_tensor` attribute
|
||||
if self.fp8_backend == "AO":
|
||||
from torchao.float8.fsdp_utils import WeightWithDynamicFloat8CastTensor
|
||||
|
||||
accessor_mapping[WeightWithDynamicFloat8CastTensor] = "_tensor"
|
||||
|
||||
named_parameters.update(
|
||||
{
|
||||
n: getattr(p, accessor_mapping[type(p)]).data_ptr()
|
||||
if type(p) in accessor_mapping
|
||||
else p.data_ptr()
|
||||
for n, p in obj.named_parameters()
|
||||
}
|
||||
)
|
||||
return named_parameters
|
||||
|
||||
def _get_devices(self, *args):
|
||||
@ -3479,9 +3824,19 @@ class Accelerator:
|
||||
"""
|
||||
|
||||
if self.distributed_type == DistributedType.DEEPSPEED:
|
||||
if self.deepspeed_config["zero_optimization"]["stage"] == 3:
|
||||
zero3_sharding = self.deepspeed_config["zero_optimization"]["stage"] == 3
|
||||
tp_sharding = self.deepspeed_config.get("tensor_parallel", {}).get("autotp_size", 0) > 1
|
||||
if zero3_sharding or tp_sharding:
|
||||
if model.zero_gather_16bit_weights_on_model_save():
|
||||
state_dict = model._zero3_consolidated_16bit_state_dict()
|
||||
if tp_sharding and not compare_versions("deepspeed", ">=", "0.16.4"):
|
||||
raise ImportError(
|
||||
"Deepspeed TP requires deepspeed >= 0.16.4, Please update DeepSpeed via `pip install deepspeed -U`."
|
||||
)
|
||||
state_dict = (
|
||||
model._consolidated_16bit_state_dict()
|
||||
if tp_sharding
|
||||
else model._zero3_consolidated_16bit_state_dict()
|
||||
)
|
||||
else:
|
||||
raise ValueError(
|
||||
"Cannot get 16bit model weights because `stage3_gather_16bit_weights_on_model_save` in DeepSpeed config is False. "
|
||||
@ -3493,6 +3848,12 @@ class Accelerator:
|
||||
from deepspeed.checkpoint.utils import clone_tensors_for_torch_save
|
||||
|
||||
state_dict = clone_tensors_for_torch_save(self.unwrap_model(model).state_dict())
|
||||
elif self.is_fsdp2:
|
||||
from torch.distributed.checkpoint.state_dict import StateDictOptions, get_model_state_dict
|
||||
|
||||
# This hangs if `cpu_offload` is also True
|
||||
options = StateDictOptions(full_state_dict=True, broadcast_from_rank0=True)
|
||||
state_dict = get_model_state_dict(model, options=options)
|
||||
elif self.distributed_type == DistributedType.FSDP:
|
||||
from torch.distributed.fsdp import FullStateDictConfig, StateDictType
|
||||
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
|
||||
|
@ -14,9 +14,10 @@
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
from contextlib import contextmanager
|
||||
from functools import wraps
|
||||
from typing import Dict, List, Optional, Union
|
||||
from typing import Optional, Union
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
@ -24,6 +25,7 @@ import torch.nn as nn
|
||||
from .hooks import (
|
||||
AlignDevicesHook,
|
||||
CpuOffload,
|
||||
LayerwiseCastingHook,
|
||||
UserCpuOffloadHook,
|
||||
add_hook_to_module,
|
||||
attach_align_device_hook,
|
||||
@ -48,6 +50,7 @@ from .utils import (
|
||||
parse_flag_from_env,
|
||||
retie_parameters,
|
||||
)
|
||||
from .utils.constants import SUPPORTED_PYTORCH_LAYERS_FOR_UPCASTING
|
||||
from .utils.other import recursive_getattr
|
||||
|
||||
|
||||
@ -171,8 +174,8 @@ def cpu_offload(
|
||||
model: nn.Module,
|
||||
execution_device: Optional[torch.device] = None,
|
||||
offload_buffers: bool = False,
|
||||
state_dict: Optional[Dict[str, torch.Tensor]] = None,
|
||||
preload_module_classes: Optional[List[str]] = None,
|
||||
state_dict: Optional[dict[str, torch.Tensor]] = None,
|
||||
preload_module_classes: Optional[list[str]] = None,
|
||||
):
|
||||
"""
|
||||
Activates full CPU offload for a model. As a result, all parameters of the model will be offloaded and only one
|
||||
@ -262,7 +265,7 @@ def disk_offload(
|
||||
offload_dir: Union[str, os.PathLike],
|
||||
execution_device: Optional[torch.device] = None,
|
||||
offload_buffers: bool = False,
|
||||
preload_module_classes: Optional[List[str]] = None,
|
||||
preload_module_classes: Optional[list[str]] = None,
|
||||
):
|
||||
"""
|
||||
Activates full disk offload for a model. As a result, all parameters of the model will be offloaded as
|
||||
@ -305,14 +308,14 @@ def disk_offload(
|
||||
|
||||
def dispatch_model(
|
||||
model: nn.Module,
|
||||
device_map: Dict[str, Union[str, int, torch.device]],
|
||||
device_map: dict[str, Union[str, int, torch.device]],
|
||||
main_device: Optional[torch.device] = None,
|
||||
state_dict: Optional[Dict[str, torch.Tensor]] = None,
|
||||
state_dict: Optional[dict[str, torch.Tensor]] = None,
|
||||
offload_dir: Optional[Union[str, os.PathLike]] = None,
|
||||
offload_index: Optional[Dict[str, str]] = None,
|
||||
offload_index: Optional[dict[str, str]] = None,
|
||||
offload_buffers: bool = False,
|
||||
skip_keys: Optional[Union[str, List[str]]] = None,
|
||||
preload_module_classes: Optional[List[str]] = None,
|
||||
skip_keys: Optional[Union[str, list[str]]] = None,
|
||||
preload_module_classes: Optional[list[str]] = None,
|
||||
force_hooks: bool = False,
|
||||
):
|
||||
"""
|
||||
@ -495,8 +498,6 @@ def dispatch_model(
|
||||
device = f"sdaa:{device}"
|
||||
elif is_musa_available() and isinstance(device, int):
|
||||
device = f"musa:{device}"
|
||||
elif is_xpu_available() and isinstance(device, int):
|
||||
device = f"xpu:{device}"
|
||||
if device != "disk":
|
||||
model.to(device)
|
||||
else:
|
||||
@ -511,17 +512,19 @@ def dispatch_model(
|
||||
def load_checkpoint_and_dispatch(
|
||||
model: nn.Module,
|
||||
checkpoint: Union[str, os.PathLike],
|
||||
device_map: Optional[Union[str, Dict[str, Union[int, str, torch.device]]]] = None,
|
||||
max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,
|
||||
no_split_module_classes: Optional[List[str]] = None,
|
||||
device_map: Optional[Union[str, dict[str, Union[int, str, torch.device]]]] = None,
|
||||
max_memory: Optional[dict[Union[int, str], Union[int, str]]] = None,
|
||||
no_split_module_classes: Optional[list[str]] = None,
|
||||
offload_folder: Optional[Union[str, os.PathLike]] = None,
|
||||
offload_buffers: bool = False,
|
||||
dtype: Optional[Union[str, torch.dtype]] = None,
|
||||
offload_state_dict: Optional[bool] = None,
|
||||
skip_keys: Optional[Union[str, List[str]]] = None,
|
||||
preload_module_classes: Optional[List[str]] = None,
|
||||
skip_keys: Optional[Union[str, list[str]]] = None,
|
||||
preload_module_classes: Optional[list[str]] = None,
|
||||
force_hooks: bool = False,
|
||||
strict: bool = False,
|
||||
full_state_dict: bool = True,
|
||||
broadcast_from_rank0: bool = False,
|
||||
):
|
||||
"""
|
||||
Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are
|
||||
@ -571,6 +574,12 @@ def load_checkpoint_and_dispatch(
|
||||
strict (`bool`, *optional*, defaults to `False`):
|
||||
Whether to strictly enforce that the keys in the checkpoint state_dict match the keys of the model's
|
||||
state_dict.
|
||||
full_state_dict (`bool`, *optional*, defaults to `True`): if this is set to `True`, all the tensors in the
|
||||
loaded state_dict will be gathered. No ShardedTensor and DTensor will be in the loaded state_dict.
|
||||
broadcast_from_rank0 (`False`, *optional*, defaults to `False`): when the option is `True`, a distributed
|
||||
`ProcessGroup` must be initialized. rank0 should receive a full state_dict and will broadcast the tensors
|
||||
in the state_dict one by one to other ranks. Other ranks will receive the tensors and shard (if applicable)
|
||||
according to the local shards in the model.
|
||||
|
||||
Example:
|
||||
|
||||
@ -596,8 +605,7 @@ def load_checkpoint_and_dispatch(
|
||||
"""
|
||||
if isinstance(device_map, str) and device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
|
||||
raise ValueError(
|
||||
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
|
||||
"'sequential'."
|
||||
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or 'sequential'."
|
||||
)
|
||||
if isinstance(device_map, str):
|
||||
if device_map != "sequential":
|
||||
@ -626,6 +634,8 @@ def load_checkpoint_and_dispatch(
|
||||
offload_state_dict=offload_state_dict,
|
||||
offload_buffers=offload_buffers,
|
||||
strict=strict,
|
||||
full_state_dict=full_state_dict,
|
||||
broadcast_from_rank0=broadcast_from_rank0,
|
||||
)
|
||||
if device_map is None:
|
||||
return model
|
||||
@ -638,3 +648,102 @@ def load_checkpoint_and_dispatch(
|
||||
preload_module_classes=preload_module_classes,
|
||||
force_hooks=force_hooks,
|
||||
)
|
||||
|
||||
|
||||
def attach_layerwise_casting_hooks(
|
||||
module: torch.nn.Module,
|
||||
storage_dtype: torch.dtype,
|
||||
compute_dtype: torch.dtype,
|
||||
skip_modules_pattern: Union[str, tuple[str, ...]] = None,
|
||||
skip_modules_classes: Optional[tuple[type[torch.nn.Module], ...]] = None,
|
||||
non_blocking: bool = False,
|
||||
) -> None:
|
||||
r"""
|
||||
Applies layerwise casting to a given module. The module expected here is a PyTorch `nn.Module`. This is helpful for
|
||||
reducing memory requirements when one doesn't want to fully quantize a model. Model params can be kept in say,
|
||||
`torch.float8_e4m3fn` and upcasted to a higher precision like `torch.bfloat16` during forward pass and downcasted
|
||||
back to `torch.float8_e4m3fn` to realize memory savings.
|
||||
|
||||
Args:
|
||||
module (`torch.nn.Module`):
|
||||
The module whose leaf modules will be cast to a high precision dtype for computation, and to a low
|
||||
precision dtype for storage.
|
||||
storage_dtype (`torch.dtype`):
|
||||
The dtype to cast the module to before/after the forward pass for storage.
|
||||
compute_dtype (`torch.dtype`):
|
||||
The dtype to cast the module to during the forward pass for computation.
|
||||
skip_modules_pattern (`tuple[str, ...]`, defaults to `None`):
|
||||
A list of patterns to match the names of the modules to skip during the layerwise casting process. If set
|
||||
to `None` alongside `skip_modules_classes` being `None`, the layerwise casting is applied directly to the
|
||||
module instead of its internal submodules.
|
||||
skip_modules_classes (`tuple[type[torch.nn.Module], ...]`, defaults to `None`):
|
||||
A list of module classes to skip during the layerwise casting process.
|
||||
non_blocking (`bool`, defaults to `False`):
|
||||
If `True`, the weight casting operations are non-blocking.
|
||||
|
||||
Example:
|
||||
|
||||
```python
|
||||
>>> from accelerate.hooks import attach_layerwise_casting_hooks
|
||||
>>> from transformers import AutoModelForCausalLM
|
||||
>>> import torch
|
||||
|
||||
>>> # Model
|
||||
>>> checkpoint = "EleutherAI/gpt-j-6B"
|
||||
>>> model = AutoModelForCausalLM.from_pretrained(checkpoint)
|
||||
|
||||
>>> # Attach hooks and perform inference
|
||||
>>> attach_layerwise_casting_hooks(model, storage_dtype=torch.float8_e4m3fn, compute_dtype=torch.bfloat16)
|
||||
>>> with torch.no_grad():
|
||||
... model(...)
|
||||
```
|
||||
|
||||
Users can also pass modules they want to avoid from getting downcasted.
|
||||
|
||||
```py
|
||||
>>> attach_layerwise_casting_hooks(
|
||||
... model, storage_dtype=torch.float8_e4m3fn, compute_dtype=torch.bfloat16, skip_modules_pattern=["norm"]
|
||||
... )
|
||||
```
|
||||
"""
|
||||
_attach_layerwise_casting_hooks(
|
||||
module, storage_dtype, compute_dtype, skip_modules_pattern, skip_modules_classes, non_blocking
|
||||
)
|
||||
|
||||
|
||||
def _attach_layerwise_casting_hooks(
|
||||
module: torch.nn.Module,
|
||||
storage_dtype: torch.dtype,
|
||||
compute_dtype: torch.dtype,
|
||||
skip_modules_pattern: Union[str, tuple[str, ...]] = None,
|
||||
skip_modules_classes: Optional[tuple[type[torch.nn.Module], ...]] = None,
|
||||
non_blocking: bool = False,
|
||||
_prefix: str = "",
|
||||
):
|
||||
should_skip = (skip_modules_classes is not None and isinstance(module, skip_modules_classes)) or (
|
||||
skip_modules_pattern is not None and any(re.search(pattern, _prefix) for pattern in skip_modules_pattern)
|
||||
)
|
||||
if should_skip:
|
||||
logger.debug(f'Skipping layerwise casting for layer "{_prefix}"')
|
||||
return
|
||||
|
||||
if isinstance(module, SUPPORTED_PYTORCH_LAYERS_FOR_UPCASTING):
|
||||
logger.debug(f'Applying layerwise casting to layer "{_prefix}"')
|
||||
add_hook_to_module(
|
||||
module,
|
||||
LayerwiseCastingHook(storage_dtype=storage_dtype, compute_dtype=compute_dtype, non_blocking=non_blocking),
|
||||
append=True,
|
||||
)
|
||||
return
|
||||
|
||||
for name, submodule in module.named_children():
|
||||
layer_name = f"{_prefix}.{name}" if _prefix else name
|
||||
_attach_layerwise_casting_hooks(
|
||||
submodule,
|
||||
storage_dtype,
|
||||
compute_dtype,
|
||||
skip_modules_pattern,
|
||||
skip_modules_classes,
|
||||
non_blocking,
|
||||
_prefix=layer_name,
|
||||
)
|
||||
|
@ -14,12 +14,10 @@
|
||||
|
||||
import random
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from safetensors.torch import load_model
|
||||
from torch.cuda.amp import GradScaler
|
||||
|
||||
from .utils import (
|
||||
MODEL_NAME,
|
||||
@ -37,6 +35,7 @@ from .utils import (
|
||||
is_mlu_available,
|
||||
is_musa_available,
|
||||
is_sdaa_available,
|
||||
is_torch_version,
|
||||
is_torch_xla_available,
|
||||
is_xpu_available,
|
||||
load,
|
||||
@ -44,6 +43,11 @@ from .utils import (
|
||||
)
|
||||
|
||||
|
||||
if is_torch_version(">=", "2.4.0"):
|
||||
from torch.amp import GradScaler
|
||||
else:
|
||||
from torch.cuda.amp import GradScaler
|
||||
|
||||
if is_torch_xla_available():
|
||||
import torch_xla.core.xla_model as xm
|
||||
|
||||
@ -56,7 +60,7 @@ logger = get_logger(__name__)
|
||||
|
||||
def save_accelerator_state(
|
||||
output_dir: str,
|
||||
model_states: List[dict],
|
||||
model_states: list[dict],
|
||||
optimizers: list,
|
||||
schedulers: list,
|
||||
dataloaders: list,
|
||||
@ -181,6 +185,7 @@ def load_accelerator_state(
|
||||
process_index,
|
||||
scaler=None,
|
||||
map_location=None,
|
||||
load_kwargs=None,
|
||||
**load_model_func_kwargs,
|
||||
):
|
||||
"""
|
||||
@ -201,6 +206,8 @@ def load_accelerator_state(
|
||||
An optional *GradScaler* instance to load
|
||||
map_location (`str`, *optional*):
|
||||
What device to load the optimizer state onto. Should be one of either "cpu" or "on_device".
|
||||
load_kwargs (`dict`, *optional*):
|
||||
Additional arguments that can be passed to the `load` function.
|
||||
load_model_func_kwargs (`dict`, *optional*):
|
||||
Additional arguments that can be passed to the model's `load_state_dict` method.
|
||||
|
||||
@ -218,6 +225,9 @@ def load_accelerator_state(
|
||||
elif map_location == "on_device":
|
||||
map_location = PartialState().device
|
||||
|
||||
if load_kwargs is None:
|
||||
load_kwargs = {}
|
||||
|
||||
input_dir = Path(input_dir)
|
||||
# Model states
|
||||
for i, model in enumerate(models):
|
||||
@ -236,7 +246,7 @@ def load_accelerator_state(
|
||||
for i, opt in enumerate(optimizers):
|
||||
optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin"
|
||||
input_optimizer_file = input_dir.joinpath(optimizer_name)
|
||||
optimizer_state = load(input_optimizer_file, map_location=map_location)
|
||||
optimizer_state = load(input_optimizer_file, map_location=map_location, **load_kwargs)
|
||||
optimizers[i].load_state_dict(optimizer_state)
|
||||
logger.info("All optimizer states loaded successfully")
|
||||
|
||||
@ -244,7 +254,7 @@ def load_accelerator_state(
|
||||
for i, scheduler in enumerate(schedulers):
|
||||
scheduler_name = f"{SCHEDULER_NAME}.bin" if i == 0 else f"{SCHEDULER_NAME}_{i}.bin"
|
||||
input_scheduler_file = input_dir.joinpath(scheduler_name)
|
||||
scheduler_state = load(input_scheduler_file)
|
||||
scheduler_state = load(input_scheduler_file, **load_kwargs)
|
||||
scheduler.load_state_dict(scheduler_state)
|
||||
logger.info("All scheduler states loaded successfully")
|
||||
|
||||
@ -262,7 +272,7 @@ def load_accelerator_state(
|
||||
dataloader_state_dict_name = "dl_state_dict.bin" if i == 0 else f"dl_state_dict_{i}.bin"
|
||||
input_dataloader_state_dict_file = input_dir.joinpath(dataloader_state_dict_name)
|
||||
if input_dataloader_state_dict_file.exists():
|
||||
state_dict = load(input_dataloader_state_dict_file)
|
||||
state_dict = load(input_dataloader_state_dict_file, **load_kwargs)
|
||||
dataloader.load_state_dict(state_dict)
|
||||
logger.info("All dataloader sampler states loaded successfully")
|
||||
|
||||
|
@ -20,6 +20,7 @@ from accelerate.commands.estimate import estimate_command_parser
|
||||
from accelerate.commands.launch import launch_command_parser
|
||||
from accelerate.commands.merge import merge_command_parser
|
||||
from accelerate.commands.test import test_command_parser
|
||||
from accelerate.commands.to_fsdp2 import to_fsdp2_command_parser
|
||||
from accelerate.commands.tpu import tpu_command_parser
|
||||
from accelerate.commands.utils import CustomArgumentParser
|
||||
|
||||
@ -36,6 +37,7 @@ def main():
|
||||
merge_command_parser(subparsers=subparsers)
|
||||
tpu_command_parser(subparsers=subparsers)
|
||||
test_command_parser(subparsers=subparsers)
|
||||
to_fsdp2_command_parser(subparsers=subparsers)
|
||||
|
||||
# Let's go
|
||||
args = parser.parse_args()
|
||||
|
@ -21,6 +21,7 @@ from ...utils import (
|
||||
DistributedType,
|
||||
is_deepspeed_available,
|
||||
is_fp8_available,
|
||||
is_hpu_available,
|
||||
is_mlu_available,
|
||||
is_mps_available,
|
||||
is_msamp_available,
|
||||
@ -33,6 +34,7 @@ from ...utils import (
|
||||
)
|
||||
from ...utils.constants import (
|
||||
DEEPSPEED_MULTINODE_LAUNCHERS,
|
||||
FSDP2_STATE_DICT_TYPE,
|
||||
FSDP_AUTO_WRAP_POLICY,
|
||||
FSDP_BACKWARD_PREFETCH,
|
||||
FSDP_SHARDING_STRATEGY,
|
||||
@ -59,6 +61,7 @@ def get_cluster_input():
|
||||
"No distributed training",
|
||||
"multi-CPU",
|
||||
"multi-XPU",
|
||||
"multi-HPU",
|
||||
"multi-GPU",
|
||||
"multi-NPU",
|
||||
"multi-MLU",
|
||||
@ -87,6 +90,7 @@ def get_cluster_input():
|
||||
DistributedType.MULTI_NPU,
|
||||
DistributedType.MULTI_XPU,
|
||||
DistributedType.MULTI_CPU,
|
||||
DistributedType.MULTI_HPU,
|
||||
]:
|
||||
num_machines = _ask_field(
|
||||
"How many different machines will you use (use more than 1 for multi-node training)? [1]: ",
|
||||
@ -137,13 +141,15 @@ def get_cluster_input():
|
||||
|
||||
ipex_config = {}
|
||||
mpirun_config = {}
|
||||
if use_cpu:
|
||||
if use_cpu or is_xpu_available():
|
||||
ipex_config["ipex"] = _ask_field(
|
||||
"Do you want to use Intel PyTorch Extension (IPEX) to speed up training on CPU? [yes/NO]:",
|
||||
"Do you want to use Intel PyTorch Extension (IPEX) to speed up training on CPU/XPU? [yes/NO]:",
|
||||
_convert_yes_no_to_bool,
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
|
||||
if use_cpu:
|
||||
if distributed_type == DistributedType.MULTI_CPU:
|
||||
use_mpirun = _ask_field(
|
||||
"Do you want accelerate to launch mpirun? [yes/NO]: ",
|
||||
@ -159,25 +165,6 @@ def get_cluster_input():
|
||||
)
|
||||
mpirun_config["mpirun_hostfile"] = os.path.expanduser(mpirun_hostfile.strip())
|
||||
mpirun_config["mpirun_ccl"] = _ask_field("Enter the number of oneCCL worker threads [1]: ", default=1)
|
||||
if (
|
||||
not use_cpu
|
||||
and is_xpu_available()
|
||||
and distributed_type
|
||||
not in [
|
||||
DistributedType.MULTI_GPU,
|
||||
DistributedType.MULTI_NPU,
|
||||
DistributedType.MULTI_MLU,
|
||||
DistributedType.MULTI_SDAA,
|
||||
DistributedType.XLA,
|
||||
DistributedType.MULTI_MUSA,
|
||||
]
|
||||
):
|
||||
ipex_config["use_xpu"] = _ask_field(
|
||||
"Do you want to use XPU plugin to speed up training on XPU? [yes/NO]:",
|
||||
_convert_yes_no_to_bool,
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
|
||||
dynamo_config = {}
|
||||
use_dynamo = _ask_field(
|
||||
@ -220,6 +207,12 @@ def get_cluster_input():
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
dynamo_config[prefix + "use_regional_compilation"] = _ask_field(
|
||||
"Do you want to enable regional compilation? [yes/NO]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
|
||||
use_mps = not use_cpu and is_mps_available()
|
||||
deepspeed_config = {}
|
||||
@ -228,6 +221,7 @@ def get_cluster_input():
|
||||
in [
|
||||
DistributedType.MULTI_GPU,
|
||||
DistributedType.MULTI_XPU,
|
||||
DistributedType.MULTI_HPU,
|
||||
DistributedType.MULTI_NPU,
|
||||
DistributedType.MULTI_MLU,
|
||||
DistributedType.MULTI_SDAA,
|
||||
@ -244,9 +238,9 @@ def get_cluster_input():
|
||||
)
|
||||
if use_deepspeed:
|
||||
distributed_type = DistributedType.DEEPSPEED
|
||||
assert (
|
||||
is_deepspeed_available()
|
||||
), "DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source"
|
||||
assert is_deepspeed_available(), (
|
||||
"DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source"
|
||||
)
|
||||
|
||||
if distributed_type == DistributedType.DEEPSPEED:
|
||||
use_deepspeed_config = _ask_field(
|
||||
@ -381,7 +375,7 @@ def get_cluster_input():
|
||||
)
|
||||
|
||||
fsdp_config = {}
|
||||
tp_config = {}
|
||||
|
||||
if distributed_type in [
|
||||
DistributedType.MULTI_GPU,
|
||||
DistributedType.MULTI_NPU,
|
||||
@ -389,6 +383,7 @@ def get_cluster_input():
|
||||
DistributedType.MULTI_SDAA,
|
||||
DistributedType.MULTI_MUSA,
|
||||
DistributedType.MULTI_XPU,
|
||||
DistributedType.MULTI_HPU,
|
||||
]:
|
||||
use_fsdp = _ask_field(
|
||||
"Do you want to use FullyShardedDataParallel? [yes/NO]: ",
|
||||
@ -399,18 +394,36 @@ def get_cluster_input():
|
||||
if use_fsdp:
|
||||
distributed_type = DistributedType.FSDP
|
||||
if distributed_type == DistributedType.FSDP:
|
||||
sharding_strategy_query = "What should be your sharding strategy?"
|
||||
fsdp_config["fsdp_sharding_strategy"] = _ask_options(
|
||||
sharding_strategy_query,
|
||||
FSDP_SHARDING_STRATEGY,
|
||||
lambda x: FSDP_SHARDING_STRATEGY[int(x)],
|
||||
fsdp_config["fsdp_version"] = _ask_options(
|
||||
"What should be your FSDP version? [2]: ",
|
||||
[1, 2],
|
||||
lambda x: int(x) + 1,
|
||||
default=1,
|
||||
)
|
||||
fsdp_version = fsdp_config["fsdp_version"] # extract to a variable to simplify usage later
|
||||
|
||||
if fsdp_version == 1:
|
||||
sharding_strategy_query = "What should be your sharding strategy?"
|
||||
fsdp_config["fsdp_reshard_after_forward"] = _ask_options(
|
||||
sharding_strategy_query,
|
||||
FSDP_SHARDING_STRATEGY,
|
||||
lambda x: FSDP_SHARDING_STRATEGY[int(x)],
|
||||
)
|
||||
else:
|
||||
fsdp_config["fsdp_reshard_after_forward"] = _ask_field(
|
||||
"Do you want to enable resharding after forward? [YES/no]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
default=True,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
|
||||
fsdp_config["fsdp_offload_params"] = _ask_field(
|
||||
"Do you want to offload parameters and gradients to CPU? [yes/NO]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
|
||||
fsdp_wrap_query = "What should be your auto wrap policy?"
|
||||
fsdp_config["fsdp_auto_wrap_policy"] = _ask_options(
|
||||
fsdp_wrap_query,
|
||||
@ -436,67 +449,78 @@ def get_cluster_input():
|
||||
int,
|
||||
default=100000000,
|
||||
)
|
||||
fsdp_backward_prefetch_query = "What should be your FSDP's backward prefetch policy?"
|
||||
fsdp_config["fsdp_backward_prefetch"] = _ask_options(
|
||||
fsdp_backward_prefetch_query,
|
||||
FSDP_BACKWARD_PREFETCH,
|
||||
lambda x: FSDP_BACKWARD_PREFETCH[int(x)],
|
||||
)
|
||||
# Removed in FSDP2, ask for user input for FSDP1
|
||||
if fsdp_version == 1:
|
||||
fsdp_backward_prefetch_query = "What should be your FSDP's backward prefetch policy?"
|
||||
fsdp_config["fsdp_backward_prefetch"] = _ask_options(
|
||||
fsdp_backward_prefetch_query,
|
||||
FSDP_BACKWARD_PREFETCH,
|
||||
lambda x: FSDP_BACKWARD_PREFETCH[int(x)],
|
||||
)
|
||||
|
||||
fsdp_state_dict_type_query = "What should be your FSDP's state dict type?"
|
||||
fsdp_config["fsdp_state_dict_type"] = _ask_options(
|
||||
fsdp_state_dict_type_query,
|
||||
FSDP_STATE_DICT_TYPE,
|
||||
lambda x: FSDP_STATE_DICT_TYPE[int(x)],
|
||||
default=2,
|
||||
)
|
||||
fsdp_config["fsdp_forward_prefetch"] = _ask_field(
|
||||
"Do you want to enable FSDP's forward prefetch policy? [yes/NO]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
fsdp_config["fsdp_use_orig_params"] = _ask_field(
|
||||
"Do you want to enable FSDP's `use_orig_params` feature? [YES/no]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
default=True,
|
||||
error_message="Please enter yes or no.",
|
||||
FSDP_STATE_DICT_TYPE if fsdp_version == 1 else FSDP2_STATE_DICT_TYPE,
|
||||
lambda x: FSDP_STATE_DICT_TYPE[int(x)] if fsdp_version == 1 else FSDP2_STATE_DICT_TYPE[int(x)],
|
||||
default=0,
|
||||
)
|
||||
# Not implemented in FSDP2, ask for user input for FSDP1
|
||||
if fsdp_version == 1:
|
||||
fsdp_config["fsdp_forward_prefetch"] = _ask_field(
|
||||
"Do you want to enable FSDP's forward prefetch policy? [yes/NO]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
# Obsolete in FSDP2, ask for user input for FSDP1
|
||||
if fsdp_version == 1:
|
||||
fsdp_config["fsdp_use_orig_params"] = _ask_field(
|
||||
"Do you want to enable FSDP's `use_orig_params` feature? [YES/no]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
default=True,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
fsdp_config["fsdp_cpu_ram_efficient_loading"] = _ask_field(
|
||||
"Do you want to enable CPU RAM efficient model loading? Only applicable for 🤗 Transformers models. [YES/no]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
default=True,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
if fsdp_config["fsdp_cpu_ram_efficient_loading"]:
|
||||
fsdp_config["fsdp_sync_module_states"] = True
|
||||
else:
|
||||
fsdp_config["fsdp_sync_module_states"] = _ask_field(
|
||||
"Do you want each individually wrapped FSDP unit to broadcast module parameters from rank 0 at the start? [YES/no]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
default=True,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
# Obsolete in FSDP2, ask for user input for FSDP1
|
||||
if fsdp_version == 1:
|
||||
if fsdp_config["fsdp_cpu_ram_efficient_loading"]:
|
||||
fsdp_config["fsdp_sync_module_states"] = True
|
||||
else:
|
||||
fsdp_config["fsdp_sync_module_states"] = _ask_field(
|
||||
"Do you want each individually wrapped FSDP unit to broadcast module parameters from rank 0 at the start? [YES/no]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
default=True,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
fsdp_config["fsdp_activation_checkpointing"] = _ask_field(
|
||||
"Do you want to enable FSDP activation checkpointing? [yes/NO]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
if not use_fsdp:
|
||||
use_tp = _ask_field(
|
||||
"Do you want to use TensorParallel? [yes/NO]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
if use_tp:
|
||||
distributed_type = DistributedType.TP
|
||||
if distributed_type == DistributedType.TP:
|
||||
tp_config["tp_size"] = _ask_field(
|
||||
"What should be your Tensor Parallel degree? [1]: ",
|
||||
|
||||
if fsdp_version == 2:
|
||||
fsdp_config["fsdp_cp_size"] = _ask_field(
|
||||
"What should be your FSDP's context parallel size? (Input 1 or leave blank for no context parallel) [1]: ",
|
||||
int,
|
||||
default=1,
|
||||
error_message="Please enter an integer.",
|
||||
)
|
||||
|
||||
if fsdp_version == 2 and fsdp_config.get("fsdp_cp_size", 1) != 1:
|
||||
fsdp_config["fsdp_cp_comm_strategy"] = _ask_options(
|
||||
"What should be your FSDP's context parallel communication strategy? [allgather]: ",
|
||||
["allgather", "alltoall"],
|
||||
lambda x: ["allgather", "alltoall"][int(x)],
|
||||
default=0,
|
||||
)
|
||||
|
||||
megatron_lm_config = {}
|
||||
if distributed_type in [DistributedType.MULTI_GPU]:
|
||||
use_megatron_lm = _ask_field(
|
||||
@ -571,6 +595,7 @@ def get_cluster_input():
|
||||
if distributed_type in [
|
||||
DistributedType.MULTI_CPU,
|
||||
DistributedType.MULTI_XPU,
|
||||
DistributedType.MULTI_HPU,
|
||||
DistributedType.MULTI_GPU,
|
||||
DistributedType.MULTI_MLU,
|
||||
DistributedType.MULTI_SDAA,
|
||||
@ -615,6 +640,7 @@ def get_cluster_input():
|
||||
DistributedType.MULTI_MUSA,
|
||||
DistributedType.MULTI_NPU,
|
||||
DistributedType.MULTI_XPU,
|
||||
DistributedType.MULTI_HPU,
|
||||
DistributedType.NO,
|
||||
]
|
||||
and not use_cpu
|
||||
@ -630,10 +656,12 @@ def get_cluster_input():
|
||||
machine_type = "MUSA(s)"
|
||||
elif is_xpu_available():
|
||||
machine_type = "XPU(s)"
|
||||
elif is_hpu_available():
|
||||
machine_type = "HPU(s)"
|
||||
else:
|
||||
machine_type = "GPU(s)"
|
||||
gpu_ids = _ask_field(
|
||||
f"What {machine_type} (by id) should be used for training on this machine as a comma-seperated list? [all]:",
|
||||
f"What {machine_type} (by id) should be used for training on this machine as a comma-separated list? [all]:",
|
||||
default="all",
|
||||
)
|
||||
|
||||
@ -697,7 +725,7 @@ def get_cluster_input():
|
||||
)
|
||||
tpu_command_file = os.path.abspath(tpu_command_file)
|
||||
else:
|
||||
print("Please enter each command seperately you wish to run on startup in each pod.")
|
||||
print("Please enter each command separately you wish to run on startup in each pod.")
|
||||
tpu_commands = []
|
||||
another_command = True
|
||||
while another_command:
|
||||
@ -715,11 +743,11 @@ def get_cluster_input():
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
tpu_vm = _ask_field(
|
||||
"If not using an instance group, what are the names of the Compute VM instances to be used, seperated by a comma: ",
|
||||
"If not using an instance group, what are the names of the Compute VM instances to be used, separated by a comma: ",
|
||||
default="",
|
||||
).split(",")
|
||||
tpu_env = _ask_field(
|
||||
"What environment variables do you wish to set in each pod, seperated by a comma: ",
|
||||
"What environment variables do you wish to set in each pod, separated by a comma: ",
|
||||
default="",
|
||||
).split(",")
|
||||
|
||||
@ -799,6 +827,8 @@ def get_cluster_input():
|
||||
default=False,
|
||||
)
|
||||
fp8_config["override_linear_precision"] = (fprop, dgrad, wgrad)
|
||||
else:
|
||||
fp8_config["override_linear_precision"] = (False, False, False)
|
||||
|
||||
elif fp8_config["backend"] == "MSAMP":
|
||||
if not is_msamp_available():
|
||||
@ -835,7 +865,6 @@ def get_cluster_input():
|
||||
fp8_config=fp8_config,
|
||||
deepspeed_config=deepspeed_config,
|
||||
fsdp_config=fsdp_config,
|
||||
tp_config=tp_config,
|
||||
megatron_lm_config=megatron_lm_config,
|
||||
ipex_config=ipex_config,
|
||||
mpirun_config=mpirun_config,
|
||||
|
@ -18,7 +18,7 @@ import json
|
||||
import os
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
from typing import List, Optional, Union
|
||||
from typing import Optional, Union
|
||||
|
||||
import yaml
|
||||
|
||||
@ -194,8 +194,6 @@ class ClusterConfig(BaseConfig):
|
||||
deepspeed_config: dict = None
|
||||
# args for fsdp
|
||||
fsdp_config: dict = None
|
||||
# args for tp
|
||||
tp_config: dict = None
|
||||
# args for megatron_lm
|
||||
megatron_lm_config: dict = None
|
||||
# args for ipex
|
||||
@ -211,9 +209,9 @@ class ClusterConfig(BaseConfig):
|
||||
tpu_use_cluster: bool = False
|
||||
tpu_use_sudo: bool = False
|
||||
command_file: str = None
|
||||
commands: List[str] = None
|
||||
tpu_vm: List[str] = None
|
||||
tpu_env: List[str] = None
|
||||
commands: list[str] = None
|
||||
tpu_vm: list[str] = None
|
||||
tpu_env: list[str] = None
|
||||
|
||||
# args for dynamo
|
||||
dynamo_config: dict = None
|
||||
@ -223,8 +221,6 @@ class ClusterConfig(BaseConfig):
|
||||
self.deepspeed_config = {}
|
||||
if self.fsdp_config is None:
|
||||
self.fsdp_config = {}
|
||||
if self.tp_config is None:
|
||||
self.tp_config = {}
|
||||
if self.megatron_lm_config is None:
|
||||
self.megatron_lm_config = {}
|
||||
if self.ipex_config is None:
|
||||
|
@ -72,9 +72,18 @@ def _convert_compute_environment(value):
|
||||
def _convert_distributed_mode(value):
|
||||
value = int(value)
|
||||
return DistributedType(
|
||||
["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "MULTI_MLU", "MULTI_SDAA", "MULTI_MUSA", "XLA"][
|
||||
value
|
||||
]
|
||||
[
|
||||
"NO",
|
||||
"MULTI_CPU",
|
||||
"MULTI_XPU",
|
||||
"MULTI_HPU",
|
||||
"MULTI_GPU",
|
||||
"MULTI_NPU",
|
||||
"MULTI_MLU",
|
||||
"MULTI_SDAA",
|
||||
"MULTI_MUSA",
|
||||
"XLA",
|
||||
][value]
|
||||
)
|
||||
|
||||
|
||||
|
@ -33,7 +33,7 @@ from .config_utils import SubcommandHelpFormatter
|
||||
description = "Create a default config file for Accelerate with only a few flags set."
|
||||
|
||||
|
||||
def write_basic_config(mixed_precision="no", save_location: str = default_json_config_file, use_xpu: bool = False):
|
||||
def write_basic_config(mixed_precision="no", save_location: str = default_json_config_file):
|
||||
"""
|
||||
Creates and saves a basic cluster config to be used on a local machine with potentially multiple GPUs. Will also
|
||||
set CPU if it is a CPU-only machine.
|
||||
@ -43,10 +43,8 @@ def write_basic_config(mixed_precision="no", save_location: str = default_json_c
|
||||
Mixed Precision to use. Should be one of "no", "fp16", or "bf16"
|
||||
save_location (`str`, *optional*, defaults to `default_json_config_file`):
|
||||
Optional custom save location. Should be passed to `--config_file` when using `accelerate launch`. Default
|
||||
location is inside the huggingface cache folder (`~/.cache/huggingface`) but can be overriden by setting
|
||||
location is inside the huggingface cache folder (`~/.cache/huggingface`) but can be overridden by setting
|
||||
the `HF_HOME` environmental variable, followed by `accelerate/default_config.yaml`.
|
||||
use_xpu (`bool`, *optional*, defaults to `False`):
|
||||
Whether to use XPU if available.
|
||||
"""
|
||||
path = Path(save_location)
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
@ -104,7 +102,7 @@ def write_basic_config(mixed_precision="no", save_location: str = default_json_c
|
||||
config["distributed_type"] = "MULTI_GPU"
|
||||
else:
|
||||
config["distributed_type"] = "NO"
|
||||
elif is_xpu_available() and use_xpu:
|
||||
elif is_xpu_available():
|
||||
num_xpus = torch.xpu.device_count()
|
||||
config["num_processes"] = num_xpus
|
||||
config["use_cpu"] = False
|
||||
|
@ -212,6 +212,13 @@ def get_sagemaker_input():
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
dynamo_config[prefix + "use_regional_compilation"] = _ask_field(
|
||||
"Do you want to enable regional compilation? [yes/NO]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
|
||||
ec2_instance_query = "Which EC2 instance type you want to use for your training?"
|
||||
if distributed_type != SageMakerDistributedType.NO:
|
||||
ec2_instance_type = _ask_options(
|
||||
|
@ -53,6 +53,20 @@ def env_command(args):
|
||||
pt_musa_available = is_musa_available()
|
||||
pt_npu_available = is_npu_available()
|
||||
|
||||
accelerator = "N/A"
|
||||
if pt_cuda_available:
|
||||
accelerator = "CUDA"
|
||||
elif pt_xpu_available:
|
||||
accelerator = "XPU"
|
||||
elif pt_mlu_available:
|
||||
accelerator = "MLU"
|
||||
elif pt_sdaa_available:
|
||||
accelerator = "SDAA"
|
||||
elif pt_musa_available:
|
||||
accelerator = "MUSA"
|
||||
elif pt_npu_available:
|
||||
accelerator = "NPU"
|
||||
|
||||
accelerate_config = "Not found"
|
||||
# Get the default from the config file.
|
||||
if args.config_file is not None or os.path.isfile(default_config_file):
|
||||
@ -73,23 +87,21 @@ def env_command(args):
|
||||
"`accelerate` bash location": bash_location,
|
||||
"Python version": platform.python_version(),
|
||||
"Numpy version": np.__version__,
|
||||
"PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})",
|
||||
"PyTorch XPU available": str(pt_xpu_available),
|
||||
"PyTorch NPU available": str(pt_npu_available),
|
||||
"PyTorch MLU available": str(pt_mlu_available),
|
||||
"PyTorch SDAA available": str(pt_sdaa_available),
|
||||
"PyTorch MUSA available": str(pt_musa_available),
|
||||
"System RAM": f"{psutil.virtual_memory().total / 1024 ** 3:.2f} GB",
|
||||
"PyTorch version": f"{pt_version}",
|
||||
"PyTorch accelerator": accelerator,
|
||||
"System RAM": f"{psutil.virtual_memory().total / 1024**3:.2f} GB",
|
||||
}
|
||||
if pt_cuda_available:
|
||||
info["GPU type"] = torch.cuda.get_device_name()
|
||||
if pt_mlu_available:
|
||||
elif pt_xpu_available:
|
||||
info["XPU type"] = torch.xpu.get_device_name()
|
||||
elif pt_mlu_available:
|
||||
info["MLU type"] = torch.mlu.get_device_name()
|
||||
if pt_sdaa_available:
|
||||
elif pt_sdaa_available:
|
||||
info["SDAA type"] = torch.sdaa.get_device_name()
|
||||
if pt_musa_available:
|
||||
elif pt_musa_available:
|
||||
info["MUSA type"] = torch.musa.get_device_name()
|
||||
if pt_npu_available:
|
||||
elif pt_npu_available:
|
||||
info["CANN version"] = torch.version.cann
|
||||
|
||||
print("\nCopy-and-paste the text below in your GitHub issue\n")
|
||||
|
@ -175,7 +175,7 @@ def create_ascii_table(headers: list, rows: list, title: str):
|
||||
for i, line in enumerate(rows):
|
||||
centered_line = [t.center(column_widths[i]) for i, t in enumerate(line)]
|
||||
table += f"{pattern % tuple(centered_line)}\n"
|
||||
table += f'└{"┴".join([in_between * n for n in column_widths])}┘'
|
||||
table += f"└{'┴'.join([in_between * n for n in column_widths])}┘"
|
||||
|
||||
return table
|
||||
|
||||
|
@ -75,7 +75,6 @@ options_to_group = {
|
||||
"tpu": "TPU",
|
||||
"use_deepspeed": "DeepSpeed Arguments",
|
||||
"use_fsdp": "FSDP Arguments",
|
||||
"use_tp": "PyTorch TP Arguments",
|
||||
"use_megatron_lm": "Megatron-LM Arguments",
|
||||
"fp8_backend": "FP8 Arguments",
|
||||
}
|
||||
@ -247,6 +246,12 @@ def launch_command_parser(subparsers=None):
|
||||
action="store_true",
|
||||
help="Whether to enable dynamic shape tracing.",
|
||||
)
|
||||
resource_args.add_argument(
|
||||
"--dynamo_use_regional_compilation",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="Whether to enable regional compilation.",
|
||||
)
|
||||
|
||||
# Training Paradigm arguments
|
||||
paradigm_args = parser.add_argument_group(
|
||||
@ -264,23 +269,18 @@ def launch_command_parser(subparsers=None):
|
||||
action="store_true",
|
||||
help="Whether to use fsdp.",
|
||||
)
|
||||
paradigm_args.add_argument(
|
||||
"--use_tp",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="Whether to use PyTorch TP.",
|
||||
)
|
||||
paradigm_args.add_argument(
|
||||
"--use_megatron_lm",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="Whether to use Megatron-LM.",
|
||||
)
|
||||
|
||||
paradigm_args.add_argument(
|
||||
"--use_xpu",
|
||||
default=False,
|
||||
default=None,
|
||||
action="store_true",
|
||||
help="Whether to use IPEX plugin to speed up training on XPU specifically.",
|
||||
help="Whether to use IPEX plugin to speed up training on XPU specifically. This argument is deprecated and ignored, will be removed in Accelerate v1.20.",
|
||||
)
|
||||
|
||||
# distributed GPU training arguments
|
||||
@ -288,7 +288,7 @@ def launch_command_parser(subparsers=None):
|
||||
distributed_args.add_argument(
|
||||
"--gpu_ids",
|
||||
default=None,
|
||||
help="What GPUs (by id) should be used for training on this machine as a comma-seperated list",
|
||||
help="What GPUs (by id) should be used for training on this machine as a comma-separated list",
|
||||
)
|
||||
distributed_args.add_argument(
|
||||
"--same_network",
|
||||
@ -506,7 +506,7 @@ def launch_command_parser(subparsers=None):
|
||||
"--deepspeed_multinode_launcher",
|
||||
default=None,
|
||||
type=str,
|
||||
help="DeepSpeed multi-node launcher to use. If unspecified, will default to `pdsh`.",
|
||||
help="DeepSpeed multi-node launcher to use, e.g. `pdsh`, `standard`, `openmpi`, `mvapich`, `mpich`, `slurm`, `nossh` (requires DeepSpeed >= 0.14.5). If unspecified, will default to `pdsh`.",
|
||||
)
|
||||
deepspeed_args.add_argument(
|
||||
"--deepspeed_moe_layer_cls_names",
|
||||
@ -518,6 +518,13 @@ def launch_command_parser(subparsers=None):
|
||||
|
||||
# fsdp arguments
|
||||
fsdp_args = parser.add_argument_group("FSDP Arguments", "Arguments related to Fully Shared Data Parallelism.")
|
||||
fsdp_args.add_argument(
|
||||
"--fsdp_version",
|
||||
type=str,
|
||||
default="1",
|
||||
choices=["1", "2"],
|
||||
help="FSDP version to use. (useful only when `use_fsdp` flag is passed).",
|
||||
)
|
||||
fsdp_args.add_argument(
|
||||
"--fsdp_offload_params",
|
||||
default="false",
|
||||
@ -530,11 +537,18 @@ def launch_command_parser(subparsers=None):
|
||||
default=1e8,
|
||||
help="FSDP's minimum number of parameters for Default Auto Wrapping. (useful only when `use_fsdp` flag is passed).",
|
||||
)
|
||||
# We enable this for backwards compatibility, throw a warning if this is set in `FullyShardedDataParallelPlugin`
|
||||
fsdp_args.add_argument(
|
||||
"--fsdp_sharding_strategy",
|
||||
type=str,
|
||||
default="FULL_SHARD",
|
||||
help="FSDP's Sharding Strategy. (useful only when `use_fsdp` flag is passed).",
|
||||
help="FSDP's sharding strategy. (useful only when `use_fsdp` flag is passed and `fsdp_version=1`).",
|
||||
)
|
||||
fsdp_args.add_argument(
|
||||
"--fsdp_reshard_after_forward",
|
||||
type=str,
|
||||
default="true",
|
||||
help="FSDP's Reshard After Forward Strategy. (useful only when `use_fsdp` flag is passed). Supports either boolean (FSDP2) or `FULL_SHARD | SHARD_GRAD_OP | NO_RESHARD` (FSDP1).",
|
||||
)
|
||||
fsdp_args.add_argument(
|
||||
"--fsdp_auto_wrap_policy",
|
||||
@ -596,14 +610,17 @@ def launch_command_parser(subparsers=None):
|
||||
type=str,
|
||||
help="Decides Whether (true|false) intermediate activations are freed during the forward pass, and a checkpoint is left as a placeholder. (useful only when `use_fsdp` flag is passed).",
|
||||
)
|
||||
|
||||
# tp args
|
||||
tp_args = parser.add_argument_group("TP Arguments", "Arguments related to Tensor Parallelism using PyToch.")
|
||||
tp_args.add_argument(
|
||||
"--tp_size",
|
||||
default=1,
|
||||
fsdp_args.add_argument(
|
||||
"--fsdp_cp_size",
|
||||
type=int,
|
||||
help="PyTorch Tensor Parallelism (TP) degree. Set a value greater than 1 to activate. (useful only when `use_tp` flag is passed)",
|
||||
default=1,
|
||||
help="FSDP's context parallel size. (useful only when `use_fsdp` flag is passed and `fsdp_version` is 2). Defaults to 1 (CP not applied).",
|
||||
)
|
||||
fsdp_args.add_argument(
|
||||
"--fsdp_cp_comm_strategy",
|
||||
type=str,
|
||||
default="allgather",
|
||||
help="FSDP's context parallel communication strategy. (useful only when `use_fsdp` flag is passed and `fsdp_version` is 2). Defaults to `allgather`.",
|
||||
)
|
||||
|
||||
# megatron_lm args
|
||||
@ -708,7 +725,7 @@ def launch_command_parser(subparsers=None):
|
||||
"--fp8_override_linear_precision",
|
||||
type=lambda x: tuple(map(str_to_bool, x.split(","))),
|
||||
default=(False, False, False),
|
||||
help="Whether or not to execute `fprop`, `dgrad`, and `wgrad` GEMMS in higher precision. Should be passed in a comma-seperated string of booleans (useful only when `--fp8_backend=te` is passed).",
|
||||
help="Whether or not to execute `fprop`, `dgrad`, and `wgrad` GEMMS in higher precision. Should be passed in a comma-separated string of booleans (useful only when `--fp8_backend=te` is passed).",
|
||||
)
|
||||
fp8_args.add_argument(
|
||||
"--fp8_opt_level",
|
||||
@ -872,7 +889,6 @@ def deepspeed_launcher(args):
|
||||
|
||||
def tpu_launcher(args):
|
||||
import torch_xla.distributed.xla_multiprocessing as xmp
|
||||
from torch_xla import device_count
|
||||
|
||||
if args.no_python:
|
||||
raise ValueError("--no_python cannot be used with TPU launcher")
|
||||
@ -893,10 +909,6 @@ def tpu_launcher(args):
|
||||
f"Your training script should have a function named {args.main_training_function}, or you should pass a "
|
||||
"different value to `--main_training_function`."
|
||||
)
|
||||
if args.num_processes and args.num_processes != device_count():
|
||||
raise ValueError(
|
||||
f"Number of processes ({args.num_processes}) must match the number of TPU devices ({device_count()})"
|
||||
)
|
||||
|
||||
# Patch sys.argv
|
||||
sys.argv = [mod.__file__] + args.training_script_args
|
||||
@ -987,9 +999,9 @@ def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):
|
||||
|
||||
def _validate_launch_command(args):
|
||||
# Sanity checks
|
||||
if sum([args.multi_gpu, args.cpu, args.tpu, args.use_deepspeed, args.use_fsdp, args.use_tp]) > 1:
|
||||
if sum([args.multi_gpu, args.cpu, args.tpu, args.use_deepspeed, args.use_fsdp]) > 1:
|
||||
raise ValueError(
|
||||
"You can only use one of `--cpu`, `--multi_gpu`, `--tpu`, `--use_deepspeed`, `--use_fsdp`, `--use_tp` at a time."
|
||||
"You can only use one of `--cpu`, `--multi_gpu`, `--tpu`, `--use_deepspeed`, `--use_fsdp` at a time."
|
||||
)
|
||||
if args.multi_gpu and (args.num_processes is not None) and (args.num_processes < 2):
|
||||
raise ValueError("You need to use at least 2 processes to use `--multi_gpu`.")
|
||||
@ -1006,7 +1018,6 @@ def _validate_launch_command(args):
|
||||
and not args.tpu_use_cluster
|
||||
and not args.use_deepspeed
|
||||
and not args.use_fsdp
|
||||
and not args.use_tp
|
||||
and not args.use_megatron_lm
|
||||
):
|
||||
args.use_deepspeed = defaults.distributed_type == DistributedType.DEEPSPEED
|
||||
@ -1026,7 +1037,6 @@ def _validate_launch_command(args):
|
||||
)
|
||||
args.tpu = defaults.distributed_type == DistributedType.XLA
|
||||
args.use_fsdp = defaults.distributed_type == DistributedType.FSDP
|
||||
args.use_tp = defaults.distributed_type == DistributedType.TP
|
||||
args.use_megatron_lm = defaults.distributed_type == DistributedType.MEGATRON_LM
|
||||
args.tpu_use_cluster = defaults.tpu_use_cluster if args.tpu else False
|
||||
if args.gpu_ids is None:
|
||||
@ -1072,10 +1082,7 @@ def _validate_launch_command(args):
|
||||
args.mixed_precision = defaults.mixed_precision
|
||||
mp_from_config_flag = True
|
||||
else:
|
||||
if args.use_cpu or (args.use_xpu and torch.xpu.is_available()):
|
||||
native_amp = True
|
||||
else:
|
||||
native_amp = is_bf16_available(True)
|
||||
native_amp = is_bf16_available(True)
|
||||
if (
|
||||
args.mixed_precision == "bf16"
|
||||
and not native_amp
|
||||
@ -1090,7 +1097,7 @@ def _validate_launch_command(args):
|
||||
raise ValueError("You need to manually pass in `--num_processes` using this config yaml.")
|
||||
else:
|
||||
if args.num_processes is None:
|
||||
if args.use_xpu and is_xpu_available():
|
||||
if is_xpu_available():
|
||||
args.num_processes = torch.xpu.device_count()
|
||||
elif is_mlu_available():
|
||||
args.num_processes = torch.mlu.device_count()
|
||||
@ -1156,6 +1163,12 @@ def _validate_launch_command(args):
|
||||
f"\t`--num_cpu_threads_per_process` was set to `{args.num_cpu_threads_per_process}` to improve out-of-box performance when training on CPUs"
|
||||
)
|
||||
|
||||
if args.use_xpu is not None:
|
||||
logger.warning(
|
||||
"use_xpu is deprecated and ignored, will be removed in Accelerate v1.20. "
|
||||
"XPU is a PyTorch native citizen now, we don't need extra argument to enable it any more."
|
||||
)
|
||||
|
||||
if any(warned):
|
||||
message = "The following values were not passed to `accelerate launch` and had defaults used instead:\n"
|
||||
message += "\n".join(warned)
|
||||
@ -1177,8 +1190,6 @@ def launch_command(args):
|
||||
deepspeed_launcher(args)
|
||||
elif args.use_fsdp and not args.cpu:
|
||||
multi_gpu_launcher(args)
|
||||
elif args.use_tp and not args.cpu:
|
||||
multi_gpu_launcher(args)
|
||||
elif args.use_megatron_lm and not args.cpu:
|
||||
multi_gpu_launcher(args)
|
||||
elif args.multi_gpu and not args.cpu:
|
||||
|
@ -17,8 +17,6 @@ This file contains utilities for handling input from the user and registering sp
|
||||
based on https://github.com/bchao1/bullet
|
||||
"""
|
||||
|
||||
from typing import List
|
||||
|
||||
from .keymap import KEYMAP, get_character
|
||||
|
||||
|
||||
@ -36,7 +34,7 @@ def mark(key: str):
|
||||
return decorator
|
||||
|
||||
|
||||
def mark_multiple(*keys: List[str]):
|
||||
def mark_multiple(*keys: list[str]):
|
||||
"""
|
||||
Mark the function with the key codes so it can be handled in the register
|
||||
"""
|
||||
|
@ -43,7 +43,7 @@ def merge_command_parser(subparsers=None):
|
||||
)
|
||||
parser.add_argument(
|
||||
"--unsafe_serialization",
|
||||
action="store_false",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Whether to save the merged weights as `.bin` rather than `.safetensors` (not recommended).",
|
||||
)
|
||||
|
172
src/accelerate/commands/to_fsdp2.py
Normal file
172
src/accelerate/commands/to_fsdp2.py
Normal file
@ -0,0 +1,172 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import enum
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
import yaml
|
||||
|
||||
from accelerate.commands.utils import CustomArgumentParser
|
||||
|
||||
|
||||
class ConversionStatus(enum.Enum):
|
||||
NOT_YET_IMPLEMENTED = 0
|
||||
REMOVED = -1
|
||||
|
||||
|
||||
ARGUMENT_KEY_MAPPING = {
|
||||
# New keys in FSDP2
|
||||
"fsdp_version": "fsdp_version",
|
||||
"fsdp_reshard_after_forward": "fsdp_reshard_after_forward",
|
||||
# https://github.com/pytorch/torchtitan/blob/main/docs/fsdp.md
|
||||
# https://huggingface.co/docs/accelerate/en/usage_guides/fsdp
|
||||
"fsdp_auto_wrap_policy": "fsdp_auto_wrap_policy",
|
||||
"fsdp_backward_prefetch": ConversionStatus.REMOVED,
|
||||
"fsdp_forward_prefetch": ConversionStatus.NOT_YET_IMPLEMENTED,
|
||||
"fsdp_cpu_ram_efficient_loading": "fsdp_cpu_ram_efficient_loading",
|
||||
"fsdp_offload_params": "fsdp_offload_params",
|
||||
"fsdp_sharding_strategy": "fsdp_reshard_after_forward",
|
||||
"fsdp_state_dict_type": "fsdp_state_dict_type",
|
||||
"fsdp_sync_module_states": ConversionStatus.REMOVED,
|
||||
"fsdp_transformer_layer_cls_to_wrap": "fsdp_transformer_layer_cls_to_wrap",
|
||||
"fsdp_min_num_params": "fsdp_min_num_params",
|
||||
"fsdp_use_orig_params": ConversionStatus.REMOVED,
|
||||
"fsdp_activation_checkpointing": "fsdp_activation_checkpointing",
|
||||
}
|
||||
|
||||
ARGUMENT_VALUE_MAPPING = {
|
||||
"fsdp_sharding_strategy": {
|
||||
"FULL_SHARD": True,
|
||||
"SHARD_GRAD_OP": False,
|
||||
"HYBRID_SHARD": True,
|
||||
"HYBRID_SHARD_ZERO2": False,
|
||||
"NO_SHARD": False,
|
||||
},
|
||||
"fsdp_reshard_after_forward": { # Needed to convert newly created configs using FSDP1 to FSDP2
|
||||
"FULL_SHARD": True,
|
||||
"SHARD_GRAD_OP": False,
|
||||
"HYBRID_SHARD": True,
|
||||
"HYBRID_SHARD_ZERO2": False,
|
||||
"NO_SHARD": False,
|
||||
},
|
||||
}
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _validate_to_fsdp2_args(args):
|
||||
if not Path(args.config_file).exists():
|
||||
raise FileNotFoundError(f"Config file {args.config_file} not found")
|
||||
|
||||
if not args.overwrite and args.output_file is None:
|
||||
raise ValueError("If --overwrite is not set, --output_file must be provided")
|
||||
|
||||
if not args.overwrite and Path(args.output_file).exists():
|
||||
raise FileExistsError(f"Output file {args.output_file} already exists and --overwrite is not set")
|
||||
|
||||
|
||||
def convert_config_to_fsdp2(config: dict) -> dict:
|
||||
fsdp_config = config.get("fsdp_config", {})
|
||||
|
||||
if not fsdp_config:
|
||||
logger.info("No FSDP config found in the config file, skipping conversion...")
|
||||
return config
|
||||
|
||||
new_fsdp_config = {}
|
||||
|
||||
if fsdp_config.get("fsdp_version", 1) == 2:
|
||||
logger.warning("Config already specfies FSDP2, skipping conversion...")
|
||||
logger.warning(
|
||||
"If the config doesn't use new argument names, change `fsdp_version` to `1` and rerun the command."
|
||||
)
|
||||
return config
|
||||
|
||||
for key, value in fsdp_config.items():
|
||||
conversion_status = ARGUMENT_KEY_MAPPING.get(key, None)
|
||||
if isinstance(conversion_status, ConversionStatus) or conversion_status is None:
|
||||
conversion_status = key
|
||||
new_fsdp_config[conversion_status] = value
|
||||
continue
|
||||
|
||||
if conversion_status == ConversionStatus.REMOVED:
|
||||
logger.warning(f"Argument {key} has been removed in FSDP2, skipping this key...")
|
||||
continue
|
||||
|
||||
if conversion_status == ConversionStatus.NOT_YET_IMPLEMENTED:
|
||||
logger.warning(f"Argument {key} is not yet implemented in FSDP2, skipping this key...")
|
||||
continue
|
||||
|
||||
if conversion_status is None:
|
||||
logger.warning(f"Argument {key} is not being converted, skipping this key...")
|
||||
new_fsdp_config[key] = value
|
||||
else:
|
||||
if key in ARGUMENT_VALUE_MAPPING:
|
||||
value = ARGUMENT_VALUE_MAPPING[key].get(value, value)
|
||||
new_fsdp_config[ARGUMENT_KEY_MAPPING[key]] = value
|
||||
|
||||
new_fsdp_config["fsdp_version"] = 2
|
||||
config["fsdp_config"] = new_fsdp_config
|
||||
return config
|
||||
|
||||
|
||||
def to_fsdp2_command_parser(subparsers=None):
|
||||
description = "Convert an Accelerate config from FSDP1 to FSDP2"
|
||||
|
||||
if subparsers is not None:
|
||||
parser = subparsers.add_parser("to-fsdp2", description=description)
|
||||
else:
|
||||
parser = CustomArgumentParser(description=description)
|
||||
|
||||
parser.add_argument("--config_file", type=str, help="The config file to convert to FSDP2", required=True)
|
||||
parser.add_argument(
|
||||
"--overwrite",
|
||||
action="store_true",
|
||||
help="Overwrite the config file if it exists",
|
||||
default=False,
|
||||
)
|
||||
parser.add_argument(
|
||||
"--output_file",
|
||||
type=str,
|
||||
help="The path to the output file to write the converted config to. If not provided, the input file will be overwritten (if --overwrite is set)",
|
||||
default=None,
|
||||
)
|
||||
if subparsers is not None:
|
||||
parser.set_defaults(func=to_fsdp2_command)
|
||||
|
||||
return parser
|
||||
|
||||
|
||||
def load_config(config_file: str) -> dict:
|
||||
with open(config_file) as f:
|
||||
config = yaml.safe_load(f)
|
||||
if not config:
|
||||
raise ValueError("Config file is empty")
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def to_fsdp2_command(args):
|
||||
_validate_to_fsdp2_args(args)
|
||||
config = load_config(args.config_file)
|
||||
|
||||
if args.overwrite and args.output_file is None:
|
||||
args.output_file = args.config_file
|
||||
|
||||
new_config = convert_config_to_fsdp2(config)
|
||||
|
||||
with open(args.output_file, "w") as f:
|
||||
yaml.dump(new_config, f)
|
@ -15,7 +15,7 @@
|
||||
import importlib
|
||||
import math
|
||||
from contextlib import suppress
|
||||
from typing import Callable, List, Optional, Union
|
||||
from typing import Callable, Optional, Union
|
||||
|
||||
import torch
|
||||
from packaging import version
|
||||
@ -89,7 +89,9 @@ class SeedableRandomSampler(RandomSampler):
|
||||
|
||||
def __iter__(self):
|
||||
if self.generator is None:
|
||||
self.generator = torch.Generator(device=torch.get_default_device())
|
||||
self.generator = torch.Generator(
|
||||
device=torch.get_default_device() if hasattr(torch, "get_default_device") else "cpu"
|
||||
)
|
||||
self.generator.manual_seed(self.initial_seed)
|
||||
|
||||
# Allow `self.epoch` to modify the seed of the generator
|
||||
@ -604,6 +606,12 @@ class DataLoaderShard(DataLoaderAdapter, DataLoaderStateMixin):
|
||||
self.batch_sampler.set_epoch(epoch)
|
||||
if hasattr(self.batch_sampler, "sampler") and hasattr(self.batch_sampler.sampler, "set_epoch"):
|
||||
self.batch_sampler.sampler.set_epoch(epoch)
|
||||
if (
|
||||
hasattr(self.batch_sampler, "batch_sampler")
|
||||
and hasattr(self.batch_sampler.batch_sampler, "sampler")
|
||||
and hasattr(self.batch_sampler.batch_sampler.sampler, "set_epoch")
|
||||
):
|
||||
self.batch_sampler.batch_sampler.sampler.set_epoch(epoch)
|
||||
# We support if a custom `Dataset` implementation has `set_epoch`
|
||||
# or in general HF datasets `Datasets`
|
||||
elif hasattr(self.dataset, "set_epoch"):
|
||||
@ -990,7 +998,7 @@ def prepare_data_loader(
|
||||
process_index: Optional[int] = None,
|
||||
split_batches: bool = False,
|
||||
put_on_device: bool = False,
|
||||
rng_types: Optional[List[Union[str, RNGType]]] = None,
|
||||
rng_types: Optional[list[Union[str, RNGType]]] = None,
|
||||
dispatch_batches: Optional[bool] = None,
|
||||
even_batches: bool = True,
|
||||
slice_fn_for_dispatch: Optional[Callable] = None,
|
||||
@ -1096,27 +1104,41 @@ def prepare_data_loader(
|
||||
if process_index is None:
|
||||
process_index = state.process_index
|
||||
|
||||
# when device mesh is used, specifically with TP
|
||||
# then there is need to update process_index and num_processes
|
||||
# to bring in the effect of generating same batch across TP ranks
|
||||
# and different batch across FSDP and DP ranks.
|
||||
# Example:
|
||||
# if device mesh is (dp,fsdp,tp) = (2, 2, 3)
|
||||
# ranks would range from 0...11
|
||||
# from data angle ranks should look like 0 0 0 1 1 1 2 2 2 3 3 3
|
||||
# processes with same ranks/ids would receive the same batch
|
||||
if torch_device_mesh:
|
||||
submesh_fsdp_size = 1
|
||||
submesh_dp_size = 1
|
||||
submesh_tp_size = 1
|
||||
if "tp" in torch_device_mesh.mesh_dim_names:
|
||||
submesh_tp_size = torch_device_mesh["tp"].size()
|
||||
if "dp" in torch_device_mesh.mesh_dim_names:
|
||||
submesh_dp_size = torch_device_mesh["dp"].size()
|
||||
if "fsdp" in torch_device_mesh.mesh_dim_names:
|
||||
submesh_fsdp_size = torch_device_mesh["fsdp"].size()
|
||||
process_index = process_index // submesh_tp_size
|
||||
num_processes = submesh_fsdp_size * submesh_dp_size
|
||||
if state.distributed_type == DistributedType.DEEPSPEED:
|
||||
# In DeepSpeed, the optimizer sharing level in DP is determined by the config file.
|
||||
# Only considers "dp" and "tp".
|
||||
# Given a device mesh (dp, tp) = (2, 3):
|
||||
# - From the data parallel perspective, ranks should be structured as: 0 0 0 1 1 1
|
||||
# - Processes with the same DP rank will receive the same batch.
|
||||
if "tp" in torch_device_mesh.mesh_dim_names:
|
||||
submesh_tp_size = torch_device_mesh["tp"].size()
|
||||
process_index = process_index // submesh_tp_size
|
||||
num_processes = num_processes // submesh_tp_size
|
||||
else:
|
||||
# when device mesh is used, specifically with TP or CP
|
||||
# then there is need to update process_index and num_processes
|
||||
# to bring in the effect of generating same batch across TP/CP ranks
|
||||
# and different batch across FSDP and DP ranks.
|
||||
# Example:
|
||||
# if device mesh is (dp,fsdp,tp,cp) = (2, 2, 2, 3)
|
||||
# ranks would range from 0...23
|
||||
# from data angle ranks should look like 0 0 0 0 0 0 1 1 1 1 1 1 ...
|
||||
# processes with same ranks/ids would receive the same batch
|
||||
submesh_fsdp_size = 1
|
||||
submesh_dp_size = 1
|
||||
submesh_tp_size = 1
|
||||
submesh_cp_size = 1
|
||||
if "tp" in torch_device_mesh.mesh_dim_names:
|
||||
submesh_tp_size = torch_device_mesh["tp"].size()
|
||||
if "dp" in torch_device_mesh.mesh_dim_names:
|
||||
submesh_dp_size = torch_device_mesh["dp"].size()
|
||||
if "fsdp" in torch_device_mesh.mesh_dim_names:
|
||||
submesh_fsdp_size = torch_device_mesh["fsdp"].size()
|
||||
if "cp" in torch_device_mesh.mesh_dim_names:
|
||||
submesh_cp_size = torch_device_mesh["cp"].size()
|
||||
process_index = process_index // (submesh_tp_size * submesh_cp_size)
|
||||
num_processes = submesh_fsdp_size * submesh_dp_size
|
||||
|
||||
# Sanity check
|
||||
if split_batches:
|
||||
@ -1156,13 +1178,21 @@ def prepare_data_loader(
|
||||
data_source=sampler.data_source,
|
||||
replacement=sampler.replacement,
|
||||
num_samples=sampler._num_samples,
|
||||
generator=getattr(sampler, "generator", torch.Generator(device=torch.get_default_device())),
|
||||
generator=getattr(
|
||||
sampler,
|
||||
"generator",
|
||||
torch.Generator(device=torch.get_default_device() if hasattr(torch, "get_default_device") else "cpu"),
|
||||
),
|
||||
data_seed=data_seed,
|
||||
)
|
||||
|
||||
if isinstance(dataloader.sampler, RandomSampler) and state.distributed_type == DistributedType.XLA:
|
||||
# isinstance(dataloader.sampler, RandomSampler) indicates the original dataloader has `shuffle` enabled.
|
||||
generator = torch.Generator(device=torch.get_default_device()).manual_seed(42)
|
||||
generator = torch.Generator(
|
||||
device=torch.get_default_device() if hasattr(torch, "get_default_device") else "cpu"
|
||||
)
|
||||
seed = int(torch.empty((), dtype=torch.int64).random_().item())
|
||||
generator.manual_seed(seed)
|
||||
dataloader.generator = generator
|
||||
dataloader.sampler.generator = generator
|
||||
# No change if no multiprocess
|
||||
@ -1181,7 +1211,11 @@ def prepare_data_loader(
|
||||
else:
|
||||
if not use_seedable_sampler and hasattr(sampler, "generator"):
|
||||
if sampler.generator is None:
|
||||
sampler.generator = torch.Generator(device=torch.get_default_device())
|
||||
sampler.generator = torch.Generator(
|
||||
device=torch.get_default_device() if hasattr(torch, "get_default_device") else "cpu"
|
||||
)
|
||||
seed = int(torch.empty((), dtype=torch.int64).random_().item())
|
||||
sampler.generator.manual_seed(seed)
|
||||
synchronized_generator = sampler.generator
|
||||
batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler
|
||||
new_batch_sampler = BatchSamplerShard(
|
||||
|
@ -13,7 +13,8 @@
|
||||
# limitations under the License.
|
||||
|
||||
import functools
|
||||
from typing import Dict, List, Mapping, Optional, Union
|
||||
from collections.abc import Mapping
|
||||
from typing import Optional, Union
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
@ -30,7 +31,6 @@ from .utils.imports import (
|
||||
is_mlu_available,
|
||||
is_musa_available,
|
||||
is_npu_available,
|
||||
is_xpu_available,
|
||||
)
|
||||
from .utils.memory import clear_device_cache
|
||||
from .utils.modeling import get_non_persistent_buffers
|
||||
@ -151,7 +151,6 @@ def add_hook_to_module(module: nn.Module, hook: ModelHook, append: bool = False)
|
||||
`torch.nn.Module`: The same module, with the hook attached (the module is modified in place, so the result can
|
||||
be discarded).
|
||||
"""
|
||||
|
||||
if append and (getattr(module, "_hf_hook", None) is not None):
|
||||
old_hook = module._hf_hook
|
||||
remove_hook_from_module(module)
|
||||
@ -251,8 +250,8 @@ class AlignDevicesHook(ModelHook):
|
||||
weights_map: Optional[Mapping] = None,
|
||||
offload_buffers: bool = False,
|
||||
place_submodules: bool = False,
|
||||
skip_keys: Optional[Union[str, List[str]]] = None,
|
||||
tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None,
|
||||
skip_keys: Optional[Union[str, list[str]]] = None,
|
||||
tied_params_map: Optional[dict[int, dict[torch.device, torch.Tensor]]] = None,
|
||||
):
|
||||
self.execution_device = execution_device
|
||||
self.offload = offload
|
||||
@ -394,9 +393,8 @@ class AlignDevicesHook(ModelHook):
|
||||
device = f"mlu:{device}"
|
||||
elif is_musa_available():
|
||||
device = f"musa:{device}"
|
||||
elif is_xpu_available():
|
||||
device = f"xpu:{device}"
|
||||
del self.tied_params_map[value_pointer][device]
|
||||
if device in self.tied_params_map[value_pointer]:
|
||||
del self.tied_params_map[value_pointer][device]
|
||||
self.tied_pointers_to_remove = set()
|
||||
if self.io_same_device and self.input_device is not None:
|
||||
output = send_to_device(output, self.input_device, skip_keys=self.skip_keys)
|
||||
@ -414,9 +412,9 @@ class AlignDevicesHook(ModelHook):
|
||||
def attach_execution_device_hook(
|
||||
module: torch.nn.Module,
|
||||
execution_device: Union[int, str, torch.device],
|
||||
skip_keys: Optional[Union[str, List[str]]] = None,
|
||||
preload_module_classes: Optional[List[str]] = None,
|
||||
tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None,
|
||||
skip_keys: Optional[Union[str, list[str]]] = None,
|
||||
preload_module_classes: Optional[list[str]] = None,
|
||||
tied_params_map: Optional[dict[int, dict[torch.device, torch.Tensor]]] = None,
|
||||
):
|
||||
"""
|
||||
Recursively attaches `AlignDevicesHook` to all submodules of a given model to make sure they have the right
|
||||
@ -466,9 +464,9 @@ def attach_align_device_hook(
|
||||
weights_map: Optional[Mapping] = None,
|
||||
offload_buffers: bool = False,
|
||||
module_name: str = "",
|
||||
skip_keys: Optional[Union[str, List[str]]] = None,
|
||||
preload_module_classes: Optional[List[str]] = None,
|
||||
tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None,
|
||||
skip_keys: Optional[Union[str, list[str]]] = None,
|
||||
preload_module_classes: Optional[list[str]] = None,
|
||||
tied_params_map: Optional[dict[int, dict[torch.device, torch.Tensor]]] = None,
|
||||
):
|
||||
"""
|
||||
Recursively attaches `AlignDevicesHook` to all submodules of a given model that have direct parameters and/or
|
||||
@ -556,14 +554,14 @@ def remove_hook_from_submodules(module: nn.Module):
|
||||
|
||||
def attach_align_device_hook_on_blocks(
|
||||
module: nn.Module,
|
||||
execution_device: Optional[Union[torch.device, Dict[str, torch.device]]] = None,
|
||||
offload: Union[bool, Dict[str, bool]] = False,
|
||||
execution_device: Optional[Union[torch.device, dict[str, torch.device]]] = None,
|
||||
offload: Union[bool, dict[str, bool]] = False,
|
||||
weights_map: Mapping = None,
|
||||
offload_buffers: bool = False,
|
||||
module_name: str = "",
|
||||
skip_keys: Optional[Union[str, List[str]]] = None,
|
||||
preload_module_classes: Optional[List[str]] = None,
|
||||
tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None,
|
||||
skip_keys: Optional[Union[str, list[str]]] = None,
|
||||
preload_module_classes: Optional[list[str]] = None,
|
||||
tied_params_map: Optional[dict[int, dict[torch.device, torch.Tensor]]] = None,
|
||||
):
|
||||
"""
|
||||
Attaches `AlignDevicesHook` to all blocks of a given model as needed.
|
||||
@ -738,3 +736,30 @@ class UserCpuOffloadHook:
|
||||
|
||||
def remove(self):
|
||||
remove_hook_from_module(self.model)
|
||||
|
||||
|
||||
class LayerwiseCastingHook(ModelHook):
|
||||
r"""
|
||||
A hook that casts the weights of a module to a high precision dtype for computation, and to a low precision dtype
|
||||
for storage. This process may lead to quality loss in the output, but can significantly reduce the memory
|
||||
footprint.
|
||||
"""
|
||||
|
||||
_is_stateful = False
|
||||
|
||||
def __init__(self, storage_dtype: torch.dtype, compute_dtype: torch.dtype, non_blocking: bool) -> None:
|
||||
self.storage_dtype = storage_dtype
|
||||
self.compute_dtype = compute_dtype
|
||||
self.non_blocking = non_blocking
|
||||
|
||||
def init_hook(self, module: torch.nn.Module):
|
||||
module.to(dtype=self.storage_dtype, non_blocking=self.non_blocking)
|
||||
return module
|
||||
|
||||
def pre_forward(self, module: torch.nn.Module, *args, **kwargs):
|
||||
module.to(dtype=self.compute_dtype, non_blocking=self.non_blocking)
|
||||
return args, kwargs
|
||||
|
||||
def post_forward(self, module: torch.nn.Module, output):
|
||||
module.to(dtype=self.storage_dtype, non_blocking=self.non_blocking)
|
||||
return output
|
||||
|
@ -13,7 +13,7 @@
|
||||
# limitations under the License.
|
||||
import math
|
||||
from types import MethodType
|
||||
from typing import Any, Dict, List, Optional, Tuple, Union
|
||||
from typing import Any, Optional, Union
|
||||
|
||||
from .state import PartialState
|
||||
from .utils import (
|
||||
@ -123,10 +123,10 @@ def pippy_forward(forward, num_chunks, gather_output, *args, **kwargs):
|
||||
|
||||
def prepare_pippy(
|
||||
model,
|
||||
split_points: Optional[Union[str, List[str]]] = "auto",
|
||||
no_split_module_classes: Optional[List[str]] = None,
|
||||
example_args: Optional[Tuple[Any]] = (),
|
||||
example_kwargs: Optional[Dict[str, Any]] = None,
|
||||
split_points: Optional[Union[str, list[str]]] = "auto",
|
||||
no_split_module_classes: Optional[list[str]] = None,
|
||||
example_args: Optional[tuple[Any]] = (),
|
||||
example_kwargs: Optional[dict[str, Any]] = None,
|
||||
num_chunks: Optional[int] = None,
|
||||
gather_output: Optional[bool] = False,
|
||||
):
|
||||
|
@ -132,10 +132,11 @@ def notebook_launcher(
|
||||
f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}."
|
||||
)
|
||||
|
||||
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME", None) is not None):
|
||||
if (in_colab or in_kaggle) and (
|
||||
(os.environ.get("TPU_NAME", None) is not None) or (os.environ.get("PJRT_DEVICE", "") == "TPU")
|
||||
):
|
||||
# TPU launch
|
||||
import torch_xla.distributed.xla_multiprocessing as xmp
|
||||
from torch_xla import device_count
|
||||
|
||||
if len(AcceleratorState._shared_state) > 0:
|
||||
raise ValueError(
|
||||
@ -145,7 +146,7 @@ def notebook_launcher(
|
||||
)
|
||||
|
||||
launcher = PrepareForLaunch(function, distributed_type="XLA")
|
||||
print(f"Launching a training on {device_count()} TPU cores.")
|
||||
print("Launching a training on TPU cores.")
|
||||
xmp.spawn(launcher, args=args, start_method="fork")
|
||||
elif in_colab and get_gpu_info()[1] < 2:
|
||||
# No need for a distributed launch otherwise as it's either CPU or one GPU.
|
||||
|
@ -22,6 +22,7 @@ from .utils import DistributedType, honor_type, is_lomo_available, is_torch_xla_
|
||||
|
||||
if is_torch_xla_available():
|
||||
import torch_xla.core.xla_model as xm
|
||||
import torch_xla.runtime as xr
|
||||
|
||||
|
||||
def move_to_device(state, device):
|
||||
@ -47,7 +48,7 @@ class AcceleratedOptimizer(torch.optim.Optimizer):
|
||||
device_placement (`bool`, *optional*, defaults to `True`):
|
||||
Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of
|
||||
`optimizer` on the right device.
|
||||
scaler (`torch.cuda.amp.grad_scaler.GradScaler`, *optional*):
|
||||
scaler (`torch.amp.GradScaler` or `torch.cuda.amp.GradScaler`, *optional*):
|
||||
The scaler to use in the step function if training with mixed precision.
|
||||
"""
|
||||
|
||||
@ -150,7 +151,7 @@ class AcceleratedOptimizer(torch.optim.Optimizer):
|
||||
and self.accelerator_state.distributed_type == DistributedType.XLA
|
||||
):
|
||||
gradients = xm._fetch_gradients(self.optimizer)
|
||||
xm.all_reduce("sum", gradients, scale=1.0 / xm.xrt_world_size())
|
||||
xm.all_reduce("sum", gradients, scale=1.0 / xr.world_size())
|
||||
self.gradient_state.is_xla_gradients_synced = True
|
||||
|
||||
if is_lomo_available():
|
||||
|
@ -21,7 +21,7 @@ import warnings
|
||||
import weakref
|
||||
from contextlib import contextmanager
|
||||
from functools import partial
|
||||
from typing import Any, Callable, Optional
|
||||
from typing import Any, Callable
|
||||
|
||||
import torch
|
||||
|
||||
@ -32,7 +32,6 @@ from .utils import (
|
||||
check_cuda_fp8_capability,
|
||||
check_cuda_p2p_ib_support,
|
||||
deepspeed_required,
|
||||
get_ccl_version,
|
||||
get_cpu_distributed_information,
|
||||
get_int_from_env,
|
||||
is_ccl_available,
|
||||
@ -48,6 +47,7 @@ from .utils import (
|
||||
is_npu_available,
|
||||
is_sdaa_available,
|
||||
is_torch_xla_available,
|
||||
is_xccl_available,
|
||||
is_xpu_available,
|
||||
parse_choice_from_env,
|
||||
parse_flag_from_env,
|
||||
@ -58,6 +58,7 @@ from .utils.dataclasses import SageMakerDistributedType
|
||||
|
||||
if is_torch_xla_available():
|
||||
import torch_xla.core.xla_model as xm
|
||||
import torch_xla.runtime as xr
|
||||
|
||||
if is_mlu_available(check_device=False):
|
||||
import torch_mlu # noqa: F401
|
||||
@ -212,6 +213,12 @@ class PartialState:
|
||||
if self.backend == "tccl":
|
||||
local_rank = os.environ.get("LOCAL_RANK", -1)
|
||||
torch.sdaa.set_device(f"sdaa:{local_rank}")
|
||||
if (
|
||||
self.backend == "nccl"
|
||||
and os.environ.get("ACCELERATE_USE_FSDP", "false") == "true"
|
||||
and os.environ.get("FSDP_OFFLOAD_PARAMS", "false") == "true"
|
||||
):
|
||||
self.backend = "cuda:nccl,cpu:gloo"
|
||||
dist.init_distributed(dist_backend=self.backend, auto_mpi_discovery=False, **kwargs)
|
||||
# We need to flag to `use_deepspeed` to be True to override `distributed_type` later
|
||||
use_deepspeed = True
|
||||
@ -276,8 +283,8 @@ class PartialState:
|
||||
# XLA needs device setting first for `set_replication`
|
||||
self.set_device()
|
||||
xm.set_replication(self.device, xm.get_xla_supported_devices())
|
||||
self.num_processes = xm.xrt_world_size()
|
||||
self.process_index = xm.get_ordinal()
|
||||
self.num_processes = xr.world_size()
|
||||
self.process_index = xr.global_ordinal()
|
||||
if is_torch_xla_available(check_is_tpu=True):
|
||||
self.local_process_index = xm.get_local_ordinal()
|
||||
else:
|
||||
@ -469,7 +476,7 @@ class PartialState:
|
||||
tensorized_result = send_to_device(result, self.device)
|
||||
result = pad_across_processes(tensorized_result, pad_index=inputs[-1])
|
||||
else:
|
||||
result += [result[-1]] * (num_samples_per_process + 1 - len(result))
|
||||
result += [result[-1]] * (num_samples_per_process + (1 if num_extras > 0 else 0) - len(result))
|
||||
return result
|
||||
elif isinstance(inputs, dict):
|
||||
for key in inputs.keys():
|
||||
@ -486,7 +493,9 @@ class PartialState:
|
||||
end_index = len(inputs)
|
||||
result_idcs = list(range(start_index, end_index))
|
||||
if apply_padding:
|
||||
result_idcs += [end_index - 1] * (num_samples_per_process + 1 - len(result_idcs))
|
||||
result_idcs += [end_index - 1] * (
|
||||
num_samples_per_process + (1 if num_extras > 0 else 0) - len(result_idcs)
|
||||
)
|
||||
return inputs.select(result_idcs)
|
||||
return inputs
|
||||
|
||||
@ -768,6 +777,10 @@ class PartialState:
|
||||
if backend is None:
|
||||
backend = "nccl"
|
||||
distributed_type = DistributedType.MULTI_GPU
|
||||
elif is_xpu_available() and is_xccl_available():
|
||||
if backend is None:
|
||||
backend = "xccl"
|
||||
distributed_type = DistributedType.MULTI_XPU
|
||||
|
||||
if distributed_type is None and (
|
||||
int(os.environ.get("LOCAL_RANK", -1)) != -1
|
||||
@ -783,10 +796,7 @@ class PartialState:
|
||||
and is_ccl_available()
|
||||
and (get_int_from_env(["CCL_WORKER_COUNT"], 0) > 0 or distributed_type == DistributedType.MULTI_XPU)
|
||||
):
|
||||
if get_ccl_version() >= "1.12":
|
||||
import oneccl_bindings_for_pytorch # noqa: F401
|
||||
else:
|
||||
import torch_ccl # noqa: F401
|
||||
import oneccl_bindings_for_pytorch # noqa: F401
|
||||
|
||||
backend = "ccl"
|
||||
elif backend in (None, "mpi") and torch.distributed.is_mpi_available():
|
||||
@ -944,8 +954,17 @@ class AcceleratorState:
|
||||
os.environ["XLA_DOWNCAST_BF16"] = str(0)
|
||||
self.downcast_bfloat = False
|
||||
elif os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true" and not cpu:
|
||||
self.deepspeed_plugins = deepspeed_plugin
|
||||
self.distributed_type = DistributedType.DEEPSPEED
|
||||
if not isinstance(deepspeed_plugin, dict):
|
||||
deepspeed_plugin.set_mixed_precision(mixed_precision)
|
||||
deepspeed_plugin.select(_from_accelerator_state=True)
|
||||
else:
|
||||
for plugin in deepspeed_plugin.values():
|
||||
plugin.set_mixed_precision(mixed_precision)
|
||||
# The first plugin passed in is always the active one
|
||||
first_plugin = next(iter(deepspeed_plugin.values()))
|
||||
first_plugin.select(_from_accelerator_state=True)
|
||||
self.deepspeed_plugins = deepspeed_plugin
|
||||
elif self.distributed_type in [
|
||||
DistributedType.MULTI_GPU,
|
||||
DistributedType.MULTI_MLU,
|
||||
@ -966,7 +985,7 @@ class AcceleratorState:
|
||||
self.distributed_type = DistributedType.MEGATRON_LM
|
||||
megatron_lm_plugin.set_mixed_precision(self._mixed_precision)
|
||||
self.megatron_lm_plugin = megatron_lm_plugin
|
||||
if os.environ.get("ACCELERATE_USE_TP", "false") == "true" or self.torch_tp_plugin is not None:
|
||||
if self.torch_tp_plugin is not None:
|
||||
self.distributed_type = DistributedType.TP
|
||||
elif self.distributed_type in [DistributedType.MULTI_CPU, DistributedType.MULTI_XPU, DistributedType.NO]:
|
||||
if is_ipex_available():
|
||||
@ -1051,6 +1070,10 @@ class AcceleratorState:
|
||||
"""
|
||||
return PartialState().use_distributed
|
||||
|
||||
@property
|
||||
def is_fsdp2(self) -> bool:
|
||||
return self.distributed_type == DistributedType.FSDP and self.fsdp_plugin.fsdp_version == 2
|
||||
|
||||
@property
|
||||
def is_last_process(self) -> bool:
|
||||
"Returns whether the current process is the last one"
|
||||
@ -1203,7 +1226,7 @@ class GradientState:
|
||||
|
||||
_shared_state = SharedDict()
|
||||
|
||||
def __init__(self, gradient_accumulation_plugin: Optional[GradientAccumulationPlugin] = None):
|
||||
def __init__(self, gradient_accumulation_plugin: GradientAccumulationPlugin | None = None):
|
||||
self.__dict__ = self._shared_state
|
||||
if not self.initialized:
|
||||
self.sync_gradients = True
|
||||
|
@ -19,8 +19,10 @@ from .testing import (
|
||||
device_count,
|
||||
execute_subprocess_async,
|
||||
get_launch_command,
|
||||
get_torch_dist_unique_port,
|
||||
memory_allocated_func,
|
||||
path_in_accelerate_package,
|
||||
pytest_xdist_worker_id,
|
||||
require_bnb,
|
||||
require_cpu,
|
||||
require_cuda,
|
||||
@ -33,6 +35,7 @@ from .testing import (
|
||||
require_mps,
|
||||
require_multi_device,
|
||||
require_multi_gpu,
|
||||
require_multi_gpu_or_xpu,
|
||||
require_multi_xpu,
|
||||
require_musa,
|
||||
require_non_cpu,
|
||||
|
@ -20,16 +20,15 @@ others are used to either get the code that matters, or to preprocess them (such
|
||||
"""
|
||||
|
||||
import os
|
||||
from typing import List
|
||||
|
||||
|
||||
def get_function_contents_by_name(lines: List[str], name: str):
|
||||
def get_function_contents_by_name(lines: list[str], name: str):
|
||||
"""
|
||||
Extracts a function from `lines` of segmented source code with the name `name`.
|
||||
|
||||
Args:
|
||||
lines (`List[str]`):
|
||||
Source code of a script seperated by line.
|
||||
Source code of a script separated by line.
|
||||
name (`str`):
|
||||
The name of the function to extract. Should be either `training_function` or `main`
|
||||
"""
|
||||
@ -49,13 +48,13 @@ def get_function_contents_by_name(lines: List[str], name: str):
|
||||
good_lines.append(line)
|
||||
|
||||
|
||||
def clean_lines(lines: List[str]):
|
||||
def clean_lines(lines: list[str]):
|
||||
"""
|
||||
Filters `lines` and removes any entries that start with a comment ('#') or is just a newline ('\n')
|
||||
|
||||
Args:
|
||||
lines (`List[str]`):
|
||||
Source code of a script seperated by line.
|
||||
Source code of a script separated by line.
|
||||
"""
|
||||
return [line for line in lines if not line.lstrip().startswith("#") and line != "\n"]
|
||||
|
||||
|
@ -184,12 +184,12 @@ def training_function(config, args):
|
||||
with open(os.path.join(args.output_dir, f"state_{starting_epoch - 1}.json")) as f:
|
||||
resumed_state = json.load(f)
|
||||
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
|
||||
assert (
|
||||
resumed_state["lr"] == lr_scheduler.get_lr()[0]
|
||||
), "Scheduler learning rate mismatch, loading from checkpoint failed"
|
||||
assert (
|
||||
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
|
||||
), "Optimizer learning rate mismatch, loading from checkpoint failed"
|
||||
assert resumed_state["lr"] == lr_scheduler.get_lr()[0], (
|
||||
"Scheduler learning rate mismatch, loading from checkpoint failed"
|
||||
)
|
||||
assert resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"], (
|
||||
"Optimizer learning rate mismatch, loading from checkpoint failed"
|
||||
)
|
||||
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
|
||||
return
|
||||
|
||||
|
@ -0,0 +1,46 @@
|
||||
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import torch
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
from accelerate import Accelerator
|
||||
|
||||
|
||||
def main():
|
||||
accelerator = Accelerator()
|
||||
B, S, D = 2, 3, 4
|
||||
rank_data = torch.ones((B, S, D), device="cuda") * (accelerator.process_index + 1)
|
||||
all_rank_data = [torch.empty_like(rank_data) for _ in range(accelerator.num_processes)]
|
||||
torch.distributed.all_gather(all_rank_data, rank_data)
|
||||
|
||||
dataloader = DataLoader(all_rank_data, batch_size=B, shuffle=False)
|
||||
dataloader = accelerator.prepare(dataloader)
|
||||
for batch in dataloader:
|
||||
all_rank_batch = [torch.empty_like(batch) for _ in range(accelerator.num_processes)]
|
||||
torch.distributed.all_gather(all_rank_batch, batch)
|
||||
|
||||
if accelerator.is_main_process:
|
||||
for rank_idx in range(accelerator.num_processes):
|
||||
torch.testing.assert_close(
|
||||
all_rank_batch[0],
|
||||
all_rank_batch[rank_idx],
|
||||
msg=f"Rank {rank_idx} batch {all_rank_batch[rank_idx]} differs from rank 0 batch {all_rank_batch[0]}",
|
||||
)
|
||||
|
||||
accelerator.end_training()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -115,9 +115,9 @@ def test_torch_metrics(
|
||||
):
|
||||
_, ddp_model, dataloader = get_basic_setup(accelerator, num_samples, batch_size)
|
||||
logits, _ = generate_predictions(ddp_model, dataloader, accelerator)
|
||||
assert (
|
||||
len(logits) == num_samples
|
||||
), f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(logits)}"
|
||||
assert len(logits) == num_samples, (
|
||||
f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(logits)}"
|
||||
)
|
||||
|
||||
|
||||
def test_mrpc(dispatch_batches: bool = False, split_batches: bool = False):
|
||||
@ -148,9 +148,9 @@ def test_mrpc(dispatch_batches: bool = False, split_batches: bool = False):
|
||||
distributed = metric.compute()
|
||||
|
||||
for key in "accuracy f1".split():
|
||||
assert math.isclose(
|
||||
baseline[key], distributed[key]
|
||||
), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
|
||||
assert math.isclose(baseline[key], distributed[key]), (
|
||||
f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
|
||||
)
|
||||
|
||||
|
||||
def test_gather_for_metrics_with_non_tensor_objects_iterable_dataset():
|
||||
@ -235,9 +235,9 @@ def test_gather_for_metrics_drop_last():
|
||||
|
||||
# Should return a full set of complete batches from each GPU
|
||||
num_expected_items = per_device_batch_size * accelerator.num_processes
|
||||
assert gathered_items.size(0) == (
|
||||
num_expected_items
|
||||
), f"Expected number of items: {num_expected_items}, Actual: {gathered_items.size(0)}"
|
||||
assert gathered_items.size(0) == (num_expected_items), (
|
||||
f"Expected number of items: {num_expected_items}, Actual: {gathered_items.size(0)}"
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user