mirror of
https://github.com/huggingface/accelerate.git
synced 2025-11-19 09:04:28 +08:00
Compare commits
6 Commits
context-pa
...
v1.0.0
| Author | SHA1 | Date | |
|---|---|---|---|
| 5d71646380 | |||
| 97d413ef3e | |||
| 0c46240894 | |||
| 873b623a12 | |||
| 1aa1dce638 | |||
| f57136a0d4 |
12
.github/PULL_REQUEST_TEMPLATE.md
vendored
12
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -37,11 +37,11 @@ members/contributors who may be interested in your PR.
|
||||
If you know how to use git blame, that is the easiest way, otherwise, here is a rough guide of **who to tag**.
|
||||
|
||||
- Big modeling: @SunMarc
|
||||
- Fully-Sharded Data Parallism: @SunMarc @zach-huggingface
|
||||
- DeepSpeed: @SunMarc @zach-huggingface
|
||||
- Command Line Interface: @SunMarc @zach-huggingface
|
||||
- Documentation: @SunMarc @zach-huggingface
|
||||
- Core parts of the library: @BenjaminBossan @SunMarc @zach-huggingface
|
||||
- Maintained examples: @SunMarc or @zach-huggingface
|
||||
- Fully-Sharded Data Parallism: @muellerzr
|
||||
- DeepSpeed: @muellerzr
|
||||
- Command Line Interface: @muellerzr
|
||||
- Documentation: @muellerzr
|
||||
- Core parts of the library: @muellerzr @BenjaminBossan @SunMarc
|
||||
- Maintained examples: @muellerzr or @SunMarc
|
||||
|
||||
-->
|
||||
@ -15,7 +15,7 @@ jobs:
|
||||
outputs:
|
||||
version: ${{ steps.step1.outputs.version }}
|
||||
steps:
|
||||
- uses: actions/checkout@4
|
||||
- uses: actions/checkout@v3.1.0
|
||||
- id: step1
|
||||
run: echo "version=$(python setup.py --version)" >> $GITHUB_OUTPUT
|
||||
|
||||
|
||||
6
.github/workflows/build_and_run_tests.yml
vendored
6
.github/workflows/build_and_run_tests.yml
vendored
@ -16,13 +16,13 @@ jobs:
|
||||
outputs:
|
||||
changed: ${{ steps.was_changed.outputs.changed }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3.1.0
|
||||
with:
|
||||
fetch-depth: "2"
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@3f54ebb830831fc121d3263c1857cfbdc310cdb9 #v42
|
||||
uses: tj-actions/changed-files@v41
|
||||
|
||||
- name: Was setup changed
|
||||
id: was_changed
|
||||
@ -47,4 +47,4 @@ jobs:
|
||||
run-integration-tests:
|
||||
needs: build-docker-containers
|
||||
if: always()
|
||||
uses: ./.github/workflows/self_hosted_integration_tests.yml
|
||||
uses: ./.github/workflows/self_hosted_integration_tests.yml
|
||||
10
.github/workflows/build_docker_images.yml
vendored
10
.github/workflows/build_docker_images.yml
vendored
@ -102,15 +102,9 @@ jobs:
|
||||
id: date
|
||||
run: |
|
||||
echo "date=$(date '+%Y-%m-%d')" >> $GITHUB_ENV
|
||||
# Get the previous month
|
||||
echo "base_year=$(date -d 'last month' '+%y')" >> $GITHUB_ENV
|
||||
echo "base_month=$(date -d 'last month' '+%m')" >> $GITHUB_ENV
|
||||
- name: Build and Push GPU
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
file: benchmarks/fp8/transformer_engine/Dockerfile
|
||||
file: benchmarks/fp8/Dockerfile
|
||||
push: true
|
||||
tags: huggingface/accelerate:gpu-fp8-transformerengine-nightly-${{ env.date }}
|
||||
build-args: |
|
||||
BASE_YEAR=${{ env.base_year }}
|
||||
BASE_MONTH=${{ env.base_month }}
|
||||
tags: huggingface/accelerate:gpu-fp8-transformerengine-nightly-${{ env.date }}
|
||||
37
.github/workflows/fp8_runner.yml
vendored
37
.github/workflows/fp8_runner.yml
vendored
@ -1,37 +0,0 @@
|
||||
name: Test FP8 Runner
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
jobs:
|
||||
set-prev-day:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
prev-day: ${{ steps.set-prev-day.outputs.prev-day }}
|
||||
steps:
|
||||
- name: Set PREV_DAY
|
||||
id: set-prev-day
|
||||
run: |
|
||||
PREV_DAY=$(date -d "yesterday" '+%Y-%m-%d')
|
||||
echo "prev-day=$PREV_DAY" >> $GITHUB_OUTPUT
|
||||
run-fp8-tests:
|
||||
needs: set-prev-day
|
||||
runs-on:
|
||||
group: aws-g6e-12xlarge
|
||||
container:
|
||||
image: huggingface/accelerate:gpu-fp8-transformerengine-nightly-${{ needs.set-prev-day.outputs.prev-day }}
|
||||
options: --gpus all --shm-size "16gb"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install the library
|
||||
run: |
|
||||
pip install -e .[test_prod,test_fp8]
|
||||
- name: Show installed libraries
|
||||
run: |
|
||||
pip freeze
|
||||
- name: Run TE FP8 tests
|
||||
run: |
|
||||
python -m pytest -s -v ./tests/test_fp8.py
|
||||
|
||||
82
.github/workflows/gaudi3_scheduled.yml
vendored
82
.github/workflows/gaudi3_scheduled.yml
vendored
@ -1,82 +0,0 @@
|
||||
name: Gaudi3 tests (scheduled)
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule: # every day at 6 AM UTC
|
||||
- cron: "0 6 * * *"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
run-gaudi3-tests:
|
||||
runs-on:
|
||||
group: itac-bm-emr-gaudi3-dell-2gaudi
|
||||
|
||||
container:
|
||||
image: docker://vault.habana.ai/gaudi-docker/1.20.0/ubuntu22.04/habanalabs/pytorch-installer-2.6.0:latest
|
||||
options: --runtime=habana --shm-size=64G --cap-add=sys_nice --env HABANA_VISIBLE_DEVICES
|
||||
env:
|
||||
OMPI_MCA_btl_vader_single_copy_mechanism: none
|
||||
PT_ENABLE_INT64_SUPPORT: 1
|
||||
PT_HPU_LAZY_MODE: 0
|
||||
RUN_SLOW: 1
|
||||
|
||||
steps:
|
||||
- name: HL-SMI (1)
|
||||
run: |
|
||||
hl-smi
|
||||
echo "HABANA_VISIBLE_DEVICES=${HABANA_VISIBLE_DEVICES}"
|
||||
echo "HABANA_VISIBLE_MODULES=${HABANA_VISIBLE_MODULES}"
|
||||
|
||||
- name: Extract HPU visible modules
|
||||
id: add-modules
|
||||
run: |
|
||||
export HABANA_VISIBLE_MODULES=$(hl-smi -Q module_id -f csv,noheader | tr '\n' ',' | sed 's/,$//')
|
||||
echo "HABANA_VISIBLE_MODULES=${HABANA_VISIBLE_MODULES}" >> $GITHUB_ENV
|
||||
|
||||
- name: HL-SMI (2)
|
||||
run: |
|
||||
hl-smi
|
||||
echo "HABANA_VISIBLE_DEVICES=${HABANA_VISIBLE_DEVICES}"
|
||||
echo "HABANA_VISIBLE_MODULES=${HABANA_VISIBLE_MODULES}"
|
||||
|
||||
- name: Checkout to Accelerate
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Accelerate with Transformers & DeepSpeed
|
||||
run: |
|
||||
pip install -e .[testing] \
|
||||
git+https://github.com/HabanaAI/DeepSpeed.git@1.20.0 \
|
||||
git+https://github.com/huggingface/transformers.git
|
||||
|
||||
- name: Run CLI tests
|
||||
if: ${{ !cancelled() && (success() || failure()) }}
|
||||
run: |
|
||||
make test_cli
|
||||
|
||||
- name: Run Core tests
|
||||
if: ${{ !cancelled() && (success() || failure()) }}
|
||||
run: |
|
||||
make test_core
|
||||
|
||||
- name: Run Big Modeling tests
|
||||
if: ${{ !cancelled() && (success() || failure()) }}
|
||||
run: |
|
||||
make test_big_modeling
|
||||
|
||||
- name: Run FSDP integration tests
|
||||
if: ${{ !cancelled() && (success() || failure()) }}
|
||||
run: |
|
||||
make test_fsdp
|
||||
|
||||
- name: Run DeepSpeed integration tests
|
||||
if: ${{ !cancelled() && (success() || failure()) }}
|
||||
run: |
|
||||
make test_deepspeed
|
||||
|
||||
- name: Run Examples tests
|
||||
if: ${{ !cancelled() && (success() || failure()) }}
|
||||
run: |
|
||||
make test_examples
|
||||
8
.github/workflows/integration_tests.yml
vendored
8
.github/workflows/integration_tests.yml
vendored
@ -26,11 +26,11 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up python 3.9
|
||||
uses: actions/setup-python@v5
|
||||
- uses: actions/checkout@v3.1.0
|
||||
- name: Set up python 3.8
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: 3.9
|
||||
python-version: 3.8
|
||||
cache: 'pip'
|
||||
cache-dependency-path: 'setup.py'
|
||||
|
||||
|
||||
19
.github/workflows/pr_style_bot.yml
vendored
19
.github/workflows/pr_style_bot.yml
vendored
@ -1,19 +0,0 @@
|
||||
# To run this bot, comment "@bot /style" on a PR
|
||||
name: Style Bot
|
||||
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
style:
|
||||
uses: huggingface/huggingface_hub/.github/workflows/style-bot-action.yml@main
|
||||
with:
|
||||
python_quality_dependencies: "[quality]"
|
||||
style_command_type: "default"
|
||||
secrets:
|
||||
bot_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
8
.github/workflows/quality.yml
vendored
8
.github/workflows/quality.yml
vendored
@ -6,11 +6,11 @@ jobs:
|
||||
quality:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python 3.9
|
||||
uses: actions/setup-python@v5
|
||||
- uses: actions/checkout@v3.1.0
|
||||
- name: Set up Python 3.8
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: 3.9
|
||||
python-version: 3.8
|
||||
cache: 'pip'
|
||||
cache-dependency-path: 'setup.py'
|
||||
- name: Install Python dependencies
|
||||
|
||||
6
.github/workflows/stale.yml
vendored
6
.github/workflows/stale.yml
vendored
@ -16,12 +16,12 @@ jobs:
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3.1.0
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: 3.9
|
||||
python-version: 3.8
|
||||
cache: 'pip'
|
||||
cache-dependency-path: 'setup.py'
|
||||
|
||||
|
||||
10
.github/workflows/test.yml
vendored
10
.github/workflows/test.yml
vendored
@ -38,11 +38,11 @@ jobs:
|
||||
test_rest
|
||||
]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up python 3.9
|
||||
uses: actions/setup-python@v5
|
||||
- uses: actions/checkout@v3.1.0
|
||||
- name: Set up python 3.8
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: 3.9
|
||||
python-version: 3.8
|
||||
cache: 'pip'
|
||||
cache-dependency-path: 'setup.py'
|
||||
|
||||
@ -52,7 +52,7 @@ jobs:
|
||||
if [[ ${{ matrix.test-kind }} != test_prod ]]; then pip install -e .[testing,test_trackers]; fi
|
||||
if [[ ${{ matrix.test-kind }} = test_rest ]]; then pip uninstall comet_ml -y; fi
|
||||
if [[ ${{ matrix.pytorch-version }} = minimum ]]; then pip install torchvision==0.18.1 torch==2.3.1; fi
|
||||
pip install pytest-reportlog tabulate setuptools importlib_metadata
|
||||
pip install pytest-reportlog tabulate setuptools
|
||||
|
||||
- name: Show installed libraries
|
||||
run: |
|
||||
|
||||
8
.github/workflows/test_imports.yml
vendored
8
.github/workflows/test_imports.yml
vendored
@ -26,11 +26,11 @@ jobs:
|
||||
minimum,
|
||||
]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up python 3.9
|
||||
uses: actions/setup-python@v5
|
||||
- uses: actions/checkout@v3.1.0
|
||||
- name: Set up python 3.8
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: 3.9
|
||||
python-version: 3.8
|
||||
cache: 'pip'
|
||||
cache-dependency-path: 'setup.py'
|
||||
|
||||
|
||||
26
Makefile
26
Makefile
@ -28,7 +28,7 @@ test_big_modeling:
|
||||
|
||||
test_core:
|
||||
python -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py --ignore=./tests/deepspeed --ignore=./tests/test_big_modeling.py \
|
||||
--ignore=./tests/fsdp --ignore=./tests/tp --ignore=./tests/test_cli.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_core.log",)
|
||||
--ignore=./tests/fsdp --ignore=./tests/test_cli.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_core.log",)
|
||||
|
||||
test_cli:
|
||||
python -m pytest -s -v ./tests/test_cli.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_cli.log",)
|
||||
@ -39,9 +39,6 @@ test_deepspeed:
|
||||
test_fsdp:
|
||||
python -m pytest -s -v ./tests/fsdp $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_fsdp.log",)
|
||||
|
||||
test_tp:
|
||||
python -m pytest -s -v ./tests/tp $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_tp.log",)
|
||||
|
||||
# Since the new version of pytest will *change* how things are collected, we need `deepspeed` to
|
||||
# run after test_core and test_cli
|
||||
test:
|
||||
@ -50,14 +47,13 @@ test:
|
||||
$(MAKE) test_big_modeling
|
||||
$(MAKE) test_deepspeed
|
||||
$(MAKE) test_fsdp
|
||||
$(MAKE) test_tp
|
||||
|
||||
test_examples:
|
||||
python -m pytest -s -v ./tests/test_examples.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_examples.log",)
|
||||
|
||||
# Broken down example tests for the CI runners
|
||||
test_integrations:
|
||||
python -m pytest -s -v ./tests/deepspeed ./tests/fsdp ./tests/tp $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_integrations.log",)
|
||||
python -m pytest -s -v ./tests/deepspeed ./tests/fsdp $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_integrations.log",)
|
||||
|
||||
test_example_differences:
|
||||
python -m pytest -s -v ./tests/test_examples.py::ExampleDifferenceTests $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_example_diff.log",)
|
||||
@ -74,21 +70,3 @@ test_prod:
|
||||
|
||||
test_rest:
|
||||
python -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k "not by_step and not by_epoch" $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_rest.log",)
|
||||
|
||||
# For developers to prepare a release
|
||||
prepare_release:
|
||||
rm -rf dist build
|
||||
python setup.py bdist_wheel sdist
|
||||
|
||||
# Make sure this is ran in a fresh venv of some form
|
||||
install_test_release:
|
||||
pip uninstall accelerate -y
|
||||
pip install -i https://testpypi.python.org/pypi --extra-index-url https://pypi.org/simple accelerate$(if $(version),==$(version),)
|
||||
|
||||
# Run as `make target=testpypi upload_release`
|
||||
upload_release:
|
||||
@if [ "$(target)" != "testpypi" ] && [ "$(target)" != "pypi" ]; then \
|
||||
echo "Error: target must be either 'testpypi' or 'pypi'"; \
|
||||
exit 1; \
|
||||
fi
|
||||
twine upload dist/* -r $(target)
|
||||
@ -13,7 +13,7 @@ pip install transformers
|
||||
To reproduce or test a new setup, run
|
||||
|
||||
```py
|
||||
python big_model_inference.py model_name
|
||||
python inference_acc.py model_name
|
||||
```
|
||||
|
||||
This script supports `gpt-j-6b`, `gpt-neox`, `opt` (30B version) and `T0pp` out of the box, but you can specify any valid checkpoint for `model_name`.
|
||||
@ -43,4 +43,4 @@ Note on the results:
|
||||
|
||||
You will also note that Accelerate does not use anymore GPU and CPU RAM than necessary:
|
||||
- peak GPU memory is exactly the size of the model put on a given GPU
|
||||
- peak CPU memory is either the size of the biggest checkpoint shard or the part of the model offloaded on CPU, whichever is bigger.
|
||||
- peak CPU memory is either the size of the biggest checkpoint shard or the part of the model offloaded on CPU, whichever is bigger.
|
||||
@ -18,12 +18,6 @@ import time
|
||||
import psutil
|
||||
import torch
|
||||
|
||||
from accelerate.test_utils.testing import get_backend
|
||||
|
||||
|
||||
torch_device_type, _, _ = get_backend()
|
||||
torch_accelerator_module = getattr(torch, torch_device_type, torch.cuda)
|
||||
|
||||
|
||||
class PeakCPUMemory:
|
||||
def __init__(self):
|
||||
@ -60,16 +54,16 @@ def start_measure():
|
||||
measures = {"time": time.time()}
|
||||
|
||||
gc.collect()
|
||||
torch_accelerator_module.empty_cache()
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
# CPU mem
|
||||
measures["cpu"] = psutil.Process().memory_info().rss
|
||||
cpu_peak_tracker.start()
|
||||
|
||||
# GPU mem
|
||||
for i in range(torch_accelerator_module.device_count()):
|
||||
measures[str(i)] = torch_accelerator_module.memory_allocated(i)
|
||||
torch_accelerator_module.reset_peak_memory_stats()
|
||||
for i in range(torch.cuda.device_count()):
|
||||
measures[str(i)] = torch.cuda.memory_allocated(i)
|
||||
torch.cuda.reset_peak_memory_stats()
|
||||
|
||||
return measures
|
||||
|
||||
@ -79,16 +73,16 @@ def end_measure(start_measures):
|
||||
measures = {"time": time.time() - start_measures["time"]}
|
||||
|
||||
gc.collect()
|
||||
torch_accelerator_module.empty_cache()
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
# CPU mem
|
||||
measures["cpu"] = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
|
||||
measures["cpu-peak"] = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
|
||||
|
||||
# GPU mem
|
||||
for i in range(torch_accelerator_module.device_count()):
|
||||
measures[str(i)] = (torch_accelerator_module.memory_allocated(i) - start_measures[str(i)]) / 2**20
|
||||
measures[f"{i}-peak"] = (torch_accelerator_module.max_memory_allocated(i) - start_measures[str(i)]) / 2**20
|
||||
for i in range(torch.cuda.device_count()):
|
||||
measures[str(i)] = (torch.cuda.memory_allocated(i) - start_measures[str(i)]) / 2**20
|
||||
measures[f"{i}-peak"] = (torch.cuda.max_memory_allocated(i) - start_measures[str(i)]) / 2**20
|
||||
|
||||
return measures
|
||||
|
||||
@ -96,9 +90,9 @@ def end_measure(start_measures):
|
||||
def log_measures(measures, description):
|
||||
print(f"{description}:")
|
||||
print(f"- Time: {measures['time']:.2f}s")
|
||||
for i in range(torch_accelerator_module.device_count()):
|
||||
print(f"- {torch_device_type} {i} allocated: {measures[str(i)]:.2f}MiB")
|
||||
for i in range(torch.cuda.device_count()):
|
||||
print(f"- GPU {i} allocated: {measures[str(i)]:.2f}MiB")
|
||||
peak = measures[f"{i}-peak"]
|
||||
print(f"- {torch_device_type} {i} peak: {peak:.2f}MiB")
|
||||
print(f"- GPU {i} peak: {peak:.2f}MiB")
|
||||
print(f"- CPU RAM allocated: {measures['cpu']:.2f}MiB")
|
||||
print(f"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB")
|
||||
|
||||
@ -22,11 +22,12 @@ import evaluate
|
||||
import msamp
|
||||
import torch
|
||||
from fp8_utils import evaluate_model, get_training_utilities
|
||||
from packaging import version
|
||||
from torch.nn.parallel import DistributedDataParallel as DDP
|
||||
|
||||
from accelerate import Accelerator
|
||||
from accelerate.state import AcceleratorState
|
||||
from accelerate.utils import FP8RecipeKwargs, get_grad_scaler, set_seed
|
||||
from accelerate.utils import FP8RecipeKwargs, set_seed
|
||||
|
||||
|
||||
MODEL_NAME = "bert-base-cased"
|
||||
@ -35,7 +36,10 @@ METRIC = evaluate.load("glue", "mrpc")
|
||||
|
||||
def train_baseline(opt_level="O2"):
|
||||
set_seed(42)
|
||||
scaler = get_grad_scaler()
|
||||
if version.parse(torch.__version__) > version.parse("2.3"):
|
||||
scaler = torch.amp.GradScaler("cuda")
|
||||
else:
|
||||
scaler = torch.cuda.amp.GradScaler()
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(MODEL_NAME)
|
||||
accelerator = Accelerator()
|
||||
device = accelerator.device
|
||||
@ -62,12 +66,12 @@ def train_baseline(opt_level="O2"):
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
@ -95,12 +99,12 @@ def train_integration(opt_level="O2"):
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
@ -109,15 +113,15 @@ if __name__ == "__main__":
|
||||
for opt_level in ["O1", "O2"]:
|
||||
baseline_not_trained, baseline_trained = train_baseline(opt_level)
|
||||
accelerator_not_trained, accelerator_trained = train_integration(opt_level)
|
||||
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
|
||||
f"Accuracy not the same for untrained baseline and accelerator using opt_level={opt_level}: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
|
||||
f"F1 not the same for untrained baseline and accelerator using opt_level={opt_level}: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
|
||||
)
|
||||
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
|
||||
f"Accuracy not the same for trained baseline and accelerator using opt_level={opt_level}: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_trained["f1"] == accelerator_trained["f1"], (
|
||||
f"F1 not the same for trained baseline and accelerator using opt_level={opt_level}: {baseline_trained['f1']} == {accelerator_trained['f1']}"
|
||||
)
|
||||
assert (
|
||||
baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"]
|
||||
), f'Accuracy not the same for untrained baseline and accelerator using opt_level={opt_level}: {baseline_not_trained["accuracy"]} == {accelerator_not_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_not_trained["f1"] == accelerator_not_trained["f1"]
|
||||
), f'F1 not the same for untrained baseline and accelerator using opt_level={opt_level}: {baseline_not_trained["f1"]} == {accelerator_not_trained["f1"]}'
|
||||
assert (
|
||||
baseline_trained["accuracy"] == accelerator_trained["accuracy"]
|
||||
), f'Accuracy not the same for trained baseline and accelerator using opt_level={opt_level}: {baseline_trained["accuracy"]} == {accelerator_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_trained["f1"] == accelerator_trained["f1"]
|
||||
), f'F1 not the same for trained baseline and accelerator using opt_level={opt_level}: {baseline_trained["f1"]} == {accelerator_trained["f1"]}'
|
||||
|
||||
@ -90,12 +90,12 @@ def train_baseline(zero_stage: int = 1, opt_level: str = "O1"):
|
||||
model.destroy()
|
||||
torch.cuda.empty_cache()
|
||||
AcceleratorState()._reset_state(True)
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
@ -129,12 +129,12 @@ def train_integration(zero_stage: int = 1, opt_level: str = "O1"):
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
model.destroy()
|
||||
torch.cuda.empty_cache()
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
|
||||
AcceleratorState()._reset_state(True)
|
||||
return base_model_results, trained_model_results
|
||||
@ -145,17 +145,17 @@ if __name__ == "__main__":
|
||||
for opt_level in ["O1", "O2", "O3"]:
|
||||
baseline_not_trained, baseline_trained = train_baseline(zero_stage, opt_level)
|
||||
accelerator_not_trained, accelerator_trained = train_integration(zero_stage, opt_level)
|
||||
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
|
||||
f"ZERO stage {zero_stage}, opt_level={opt_level}:\nAccuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
|
||||
f"ZERO stage {zero_stage}, opt_level={opt_level}:\nF1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
|
||||
)
|
||||
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
|
||||
f"ZERO stage {zero_stage}, opt_level={opt_level}:\nAccuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_trained["f1"] == accelerator_trained["f1"], (
|
||||
f"ZERO stage {zero_stage}, opt_level={opt_level}:\nF1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
|
||||
)
|
||||
assert (
|
||||
baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"]
|
||||
), f'ZERO stage {zero_stage}, opt_level={opt_level}:\nAccuracy should be the same for the baseline and accelerator: {baseline_not_trained["accuracy"]} == {accelerator_not_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_not_trained["f1"] == accelerator_not_trained["f1"]
|
||||
), f'ZERO stage {zero_stage}, opt_level={opt_level}:\nF1 score should be the same for the baseline and accelerator: {baseline_not_trained["f1"]} == {accelerator_not_trained["f1"]}'
|
||||
assert (
|
||||
baseline_trained["accuracy"] == accelerator_trained["accuracy"]
|
||||
), f'ZERO stage {zero_stage}, opt_level={opt_level}:\nAccuracy should be the same for the baseline and accelerator: {baseline_trained["accuracy"]} == {accelerator_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_trained["f1"] == accelerator_trained["f1"]
|
||||
), f'ZERO stage {zero_stage}, opt_level={opt_level}:\nF1 score should be the same for the baseline and accelerator: {baseline_trained["f1"]} == {accelerator_trained["f1"]}'
|
||||
|
||||
torch.distributed.destroy_process_group()
|
||||
|
||||
@ -22,10 +22,11 @@ import evaluate
|
||||
import msamp
|
||||
import torch
|
||||
from fp8_utils import evaluate_model, get_training_utilities
|
||||
from packaging import version
|
||||
|
||||
from accelerate import Accelerator
|
||||
from accelerate.state import AcceleratorState
|
||||
from accelerate.utils import FP8RecipeKwargs, get_grad_scaler, set_seed
|
||||
from accelerate.utils import FP8RecipeKwargs, set_seed
|
||||
|
||||
|
||||
MODEL_NAME = "bert-base-cased"
|
||||
@ -41,7 +42,10 @@ def train_baseline(opt_level="O2"):
|
||||
|
||||
base_model_results = evaluate_model(model, eval_dataloader, METRIC)
|
||||
model.train()
|
||||
scaler = get_grad_scaler()
|
||||
if version.parse(torch.__version__) > version.parse("2.3"):
|
||||
scaler = torch.amp.GradScaler("cuda")
|
||||
else:
|
||||
scaler = torch.cuda.amp.GradScaler()
|
||||
|
||||
for batch in train_dataloader:
|
||||
batch = batch.to("cuda")
|
||||
@ -56,12 +60,12 @@ def train_baseline(opt_level="O2"):
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC)
|
||||
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
@ -89,12 +93,12 @@ def train_integration(opt_level="O2"):
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC)
|
||||
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
@ -104,15 +108,15 @@ if __name__ == "__main__":
|
||||
baseline_not_trained, baseline_trained = train_baseline(opt_level)
|
||||
accelerator_not_trained, accelerator_trained = train_integration(opt_level)
|
||||
|
||||
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
|
||||
f"Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
|
||||
f"F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
|
||||
)
|
||||
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
|
||||
f"Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_trained["f1"] == accelerator_trained["f1"], (
|
||||
f"F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
|
||||
)
|
||||
assert (
|
||||
baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"]
|
||||
), f'Accuracy should be the same for the baseline and accelerator: {baseline_not_trained["accuracy"]} == {accelerator_not_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_not_trained["f1"] == accelerator_not_trained["f1"]
|
||||
), f'F1 score should be the same for the baseline and accelerator: {baseline_not_trained["f1"]} == {accelerator_not_trained["f1"]}'
|
||||
assert (
|
||||
baseline_trained["accuracy"] == accelerator_trained["accuracy"]
|
||||
), f'Accuracy should be the same for the baseline and accelerator: {baseline_trained["accuracy"]} == {accelerator_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_trained["f1"] == accelerator_trained["f1"]
|
||||
), f'F1 score should be the same for the baseline and accelerator: {baseline_trained["f1"]} == {accelerator_trained["f1"]}'
|
||||
|
||||
@ -1,12 +0,0 @@
|
||||
FROM nvcr.io/nvidia/pytorch:24.07-py3
|
||||
|
||||
RUN pip install transformers evaluate datasets
|
||||
RUN git clone https://github.com/huggingface/accelerate.git
|
||||
|
||||
RUN cd accelerate && \
|
||||
pip install -e . && \
|
||||
cd benchmarks/fp8
|
||||
|
||||
RUN /bin/bash
|
||||
|
||||
|
||||
@ -1,32 +0,0 @@
|
||||
# FP8 Benchmarks
|
||||
|
||||
Comparing and running [torchao](https://github.com/pytorch/ao/tree/main/torchao/float8) FP8 with accelerate
|
||||
|
||||
## Overview
|
||||
|
||||
This repo provides scripts which compare native `torchao` model training against `accelerate`'s own integration. Each modeling type is segmented out via a script, supporting the following:
|
||||
|
||||
* Single GPU training (`non_distributed.py`)
|
||||
* Multi-GPU training via DistributedDataParallelism (`ddp.py`)
|
||||
* Fully Sharded Data Parallelism (`fsdp.py`)
|
||||
* DeepSpeed ZeRO 1-3 (`deepspeed.py`)
|
||||
|
||||
To run them, it's recommended to use a docker image (see the attached `Dockerfile`) and not install `torchao` manually.
|
||||
|
||||
## Running:
|
||||
|
||||
There are official Docker images located at `huggingface/accelerate:gpu-fp8-torchao-nightly` which can be used.
|
||||
|
||||
You can run all scripts using the core `accelerate launch` command without any `accelerate config` being needed.
|
||||
|
||||
For single GPU, run it via `python`:
|
||||
|
||||
```bash
|
||||
python non_distributed.py
|
||||
```
|
||||
|
||||
For the rest, run it via `accelerate launch`:
|
||||
|
||||
```bash
|
||||
accelerate launch ddp.py # or distrib_deepspeed.py, ddp.py
|
||||
```
|
||||
@ -1,158 +0,0 @@
|
||||
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
This script tests to ensure that `accelerate` performs at the same level as raw `torchao`.
|
||||
|
||||
This particular script verifies this for DDP training.
|
||||
"""
|
||||
|
||||
from functools import partial
|
||||
|
||||
import evaluate
|
||||
import torch
|
||||
from fp8_utils import get_training_utilities
|
||||
from torch.nn.parallel import DistributedDataParallel as DDP
|
||||
from torchao.float8 import convert_to_float8_training
|
||||
|
||||
from accelerate import Accelerator
|
||||
from accelerate.state import AcceleratorState
|
||||
from accelerate.utils import AORecipeKwargs, set_seed
|
||||
|
||||
|
||||
MODEL_NAME = "bert-base-cased"
|
||||
METRIC = evaluate.load("glue", "mrpc")
|
||||
|
||||
|
||||
def evaluate_model(model, dataloader, metric, accelerator=None):
|
||||
"Turns model to .eval(), runs dataloader, calculates metric, then turns eval back on"
|
||||
model.eval()
|
||||
for step, batch in enumerate(dataloader):
|
||||
with torch.no_grad():
|
||||
outputs = model(**batch)
|
||||
predictions = outputs.logits.argmax(dim=-1)
|
||||
references = batch["labels"]
|
||||
if accelerator is not None and accelerator.num_processes > 1:
|
||||
predictions, references = accelerator.gather_for_metrics((predictions, references))
|
||||
metric.add_batch(predictions=predictions, references=references)
|
||||
return metric.compute()
|
||||
|
||||
|
||||
def filter_linear_layers(module, fqn, first_layer_name=None, last_layer_name=None):
|
||||
if isinstance(module, torch.nn.Linear):
|
||||
if module.in_features % 16 != 0 or module.out_features % 16 != 0:
|
||||
return False
|
||||
# For stability reasons, we skip the first and last linear layers
|
||||
# Otherwise can lead to the model not training or converging properly
|
||||
if fqn in (first_layer_name, last_layer_name):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def train_baseline():
|
||||
set_seed(42)
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(MODEL_NAME)
|
||||
first_linear = None
|
||||
last_linear = None
|
||||
for name, module in model.named_modules():
|
||||
if isinstance(module, torch.nn.Linear):
|
||||
if first_linear is None:
|
||||
first_linear = name
|
||||
last_linear = name
|
||||
func = partial(filter_linear_layers, first_layer_name=first_linear, last_layer_name=last_linear)
|
||||
accelerator = Accelerator()
|
||||
device = accelerator.device
|
||||
model.to(device)
|
||||
|
||||
convert_to_float8_training(model, module_filter_fn=func)
|
||||
|
||||
# Convert the model to DDP
|
||||
device_ids, output_device = [accelerator.local_process_index], accelerator.local_process_index
|
||||
model = DDP(model, device_ids=device_ids, output_device=output_device)
|
||||
|
||||
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
model.train()
|
||||
|
||||
for batch in train_dataloader:
|
||||
with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
|
||||
batch = batch.to(device)
|
||||
outputs = model(**batch)
|
||||
loss = outputs.loss
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
lr_scheduler.step()
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
|
||||
def train_integration():
|
||||
AcceleratorState()._reset_state(True)
|
||||
accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=[AORecipeKwargs()])
|
||||
set_seed(42)
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
|
||||
MODEL_NAME, accelerator=accelerator
|
||||
)
|
||||
|
||||
model, optimizer = accelerator.prepare(model, optimizer)
|
||||
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
model.train()
|
||||
|
||||
for batch in train_dataloader:
|
||||
outputs = model(**batch)
|
||||
loss = outputs.loss
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
lr_scheduler.step()
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
baseline_not_trained, baseline_trained = train_baseline()
|
||||
accelerator_not_trained, accelerator_trained = train_integration()
|
||||
|
||||
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
|
||||
f"Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
|
||||
f"F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
|
||||
)
|
||||
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
|
||||
f"Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_trained["f1"] == accelerator_trained["f1"], (
|
||||
f"F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
|
||||
)
|
||||
|
||||
torch.distributed.destroy_process_group()
|
||||
@ -1,213 +0,0 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
This script tests to ensure that `accelerate` performs at the same level as raw `torchao`.
|
||||
|
||||
This particular script verifies this for deepspeed training.
|
||||
"""
|
||||
|
||||
from functools import partial
|
||||
from unittest.mock import patch
|
||||
|
||||
import deepspeed
|
||||
import evaluate
|
||||
import torch
|
||||
from fp8_utils import evaluate_model, get_training_utilities
|
||||
from torchao.float8 import convert_to_float8_training
|
||||
from transformers.integrations import HfDeepSpeedConfig
|
||||
|
||||
from accelerate import Accelerator, DeepSpeedPlugin
|
||||
from accelerate.state import AcceleratorState
|
||||
from accelerate.utils import AORecipeKwargs, set_seed
|
||||
|
||||
|
||||
MODEL_NAME = "bert-base-cased"
|
||||
METRIC = evaluate.load("glue", "mrpc")
|
||||
|
||||
|
||||
def filter_linear_layers(module, fqn, first_layer_name=None, last_layer_name=None):
|
||||
if isinstance(module, torch.nn.Linear):
|
||||
if module.in_features % 16 != 0 or module.out_features % 16 != 0:
|
||||
return False
|
||||
# For stability reasons, we skip the first and last linear layers
|
||||
# Otherwise can lead to the model not training or converging properly
|
||||
if fqn in (first_layer_name, last_layer_name):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def train_baseline(zero_stage: int = 1):
|
||||
set_seed(42)
|
||||
# This forces transformers to think Zero-3 Init should be used
|
||||
with patch("transformers.integrations.deepspeed.is_deepspeed_zero3_enabled") as mock:
|
||||
mock.return_value = zero_stage == 3
|
||||
|
||||
config = HfDeepSpeedConfig(
|
||||
{
|
||||
"train_micro_batch_size_per_gpu": 16,
|
||||
"gradient_accumulation_steps": 1,
|
||||
"zero_optimization": {"stage": zero_stage},
|
||||
}
|
||||
)
|
||||
plugin = DeepSpeedPlugin(hf_ds_config=config)
|
||||
accelerator = Accelerator(deepspeed_plugin=plugin)
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
|
||||
MODEL_NAME, accelerator=accelerator
|
||||
)
|
||||
first_linear = None
|
||||
last_linear = None
|
||||
for name, module in model.named_modules():
|
||||
if isinstance(module, torch.nn.Linear):
|
||||
if first_linear is None:
|
||||
first_linear = name
|
||||
last_linear = name
|
||||
func = partial(filter_linear_layers, first_layer_name=first_linear, last_layer_name=last_linear)
|
||||
|
||||
convert_to_float8_training(model, module_filter_fn=func)
|
||||
|
||||
import numpy as np
|
||||
|
||||
config = {
|
||||
"train_batch_size": 32,
|
||||
"train_micro_batch_size_per_gpu": 16,
|
||||
"gradient_accumulation_steps": 1,
|
||||
"zero_optimization": {
|
||||
"stage": zero_stage,
|
||||
"offload_optimizer": {"device": "none", "nvme_path": None},
|
||||
"offload_param": {"device": "none", "nvme_path": None},
|
||||
"stage3_gather_16bit_weights_on_model_save": False,
|
||||
},
|
||||
"gradient_clipping": 1.0,
|
||||
"steps_per_print": np.inf,
|
||||
"bf16": {"enabled": True},
|
||||
"fp16": {"enabled": False},
|
||||
"zero_allow_untested_optimizer": True,
|
||||
}
|
||||
|
||||
(
|
||||
model,
|
||||
optimizer,
|
||||
_,
|
||||
lr_scheduler,
|
||||
) = deepspeed.initialize(
|
||||
model=model,
|
||||
optimizer=optimizer,
|
||||
lr_scheduler=lr_scheduler,
|
||||
config_params=config,
|
||||
)
|
||||
|
||||
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
model.train()
|
||||
|
||||
model_outputs = []
|
||||
data = []
|
||||
|
||||
for batch in train_dataloader:
|
||||
outputs = model(**batch)
|
||||
data.append(batch.to("cpu"))
|
||||
model_outputs.append(outputs.logits.to("cpu"))
|
||||
loss = outputs.loss
|
||||
model.backward(loss)
|
||||
model.step()
|
||||
for _ in range(accelerator.num_processes):
|
||||
lr_scheduler.step()
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
model.destroy()
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
|
||||
del config
|
||||
return base_model_results, trained_model_results, model_outputs, data
|
||||
|
||||
|
||||
def train_integration(zero_stage: int = 1):
|
||||
set_seed(42)
|
||||
AcceleratorState()._reset_state(True)
|
||||
config = HfDeepSpeedConfig(
|
||||
{
|
||||
"train_micro_batch_size_per_gpu": 16,
|
||||
"gradient_accumulation_steps": 1,
|
||||
"zero_optimization": {"stage": zero_stage},
|
||||
}
|
||||
)
|
||||
deepspeed_plugin = DeepSpeedPlugin(
|
||||
hf_ds_config=config,
|
||||
)
|
||||
# This forces transformers to think Zero-3 Init should be used
|
||||
with patch("transformers.integrations.deepspeed.is_deepspeed_zero3_enabled") as mock:
|
||||
mock.return_value = zero_stage == 3
|
||||
accelerator = Accelerator(
|
||||
mixed_precision="fp8", kwargs_handlers=[AORecipeKwargs()], deepspeed_plugin=deepspeed_plugin
|
||||
)
|
||||
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
|
||||
MODEL_NAME, accelerator=accelerator
|
||||
)
|
||||
|
||||
model, optimizer, lr_scheduler, train_dataloader, eval_dataloader = accelerator.prepare(
|
||||
model, optimizer, lr_scheduler, train_dataloader, eval_dataloader
|
||||
)
|
||||
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
model.train()
|
||||
model_outputs = []
|
||||
data = []
|
||||
for batch in train_dataloader:
|
||||
outputs = model(**batch)
|
||||
data.append(batch.to("cpu"))
|
||||
model_outputs.append(outputs.logits.to("cpu"))
|
||||
loss = outputs.loss
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
lr_scheduler.step()
|
||||
optimizer.zero_grad()
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
model.destroy()
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
|
||||
del config
|
||||
return base_model_results, trained_model_results, model_outputs, data
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
for zero_stage in [1, 2, 3]:
|
||||
baseline_not_trained, baseline_trained, baseline_outputs, baseline_data = train_baseline(zero_stage)
|
||||
accelerator_not_trained, accelerator_trained, accelerator_outputs, accelerator_data = train_integration(
|
||||
zero_stage
|
||||
)
|
||||
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
|
||||
f"ZERO stage {zero_stage}: Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
|
||||
f"ZERO stage {zero_stage}: F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
|
||||
)
|
||||
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
|
||||
f"ZERO stage {zero_stage}: Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_trained["f1"] == accelerator_trained["f1"], (
|
||||
f"ZERO stage {zero_stage}: F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
|
||||
)
|
||||
AcceleratorState()._reset_state(True)
|
||||
torch.distributed.destroy_process_group()
|
||||
@ -1,116 +0,0 @@
|
||||
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import torch
|
||||
|
||||
|
||||
def get_dataloaders(model_name: str, batch_size: int = 16):
|
||||
from datasets import load_dataset
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
||||
datasets = load_dataset("glue", "mrpc")
|
||||
|
||||
def tokenize_function(examples):
|
||||
# max_length=None => use the model max length (it's actually the default)
|
||||
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
|
||||
return outputs
|
||||
|
||||
# Apply the method we just defined to all the examples in all the splits of the dataset
|
||||
# starting with the main process first:
|
||||
tokenized_datasets = datasets.map(
|
||||
tokenize_function,
|
||||
batched=True,
|
||||
remove_columns=["idx", "sentence1", "sentence2"],
|
||||
)
|
||||
|
||||
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
|
||||
# transformers library
|
||||
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
|
||||
|
||||
def collate_fn(examples):
|
||||
return tokenizer.pad(
|
||||
examples,
|
||||
padding="longest",
|
||||
pad_to_multiple_of=16, # Specific for FP8
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
# Instantiate dataloaders.
|
||||
train_dataloader = DataLoader(
|
||||
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size, drop_last=True
|
||||
)
|
||||
eval_dataloader = DataLoader(
|
||||
tokenized_datasets["validation"],
|
||||
shuffle=False,
|
||||
collate_fn=collate_fn,
|
||||
batch_size=16,
|
||||
drop_last=True,
|
||||
)
|
||||
|
||||
return train_dataloader, eval_dataloader
|
||||
|
||||
|
||||
def get_training_utilities(model_name: str, batch_size: int = 16, accelerator=None, prepare=True):
|
||||
"""
|
||||
Returns a tuple of:
|
||||
- Model
|
||||
- Optimizer
|
||||
- Train dataloader (prepared)
|
||||
- Eval dataloader (prepared)
|
||||
- LR Scheduler
|
||||
Suitable for training on the MRPC dataset
|
||||
"""
|
||||
from torch.optim import AdamW
|
||||
from transformers import AutoModelForSequenceClassification, get_linear_schedule_with_warmup
|
||||
|
||||
from accelerate import Accelerator
|
||||
|
||||
if accelerator is None:
|
||||
accelerator = Accelerator()
|
||||
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
||||
train_dataloader, eval_dataloader = get_dataloaders(model_name, batch_size)
|
||||
optimizer = AdamW(model.parameters(), lr=0.0001)
|
||||
lr_scheduler = get_linear_schedule_with_warmup(
|
||||
optimizer=optimizer,
|
||||
num_warmup_steps=100,
|
||||
num_training_steps=len(train_dataloader) * 2,
|
||||
)
|
||||
train_dataloader, eval_dataloader = accelerator.prepare(train_dataloader, eval_dataloader)
|
||||
return model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
||||
|
||||
|
||||
def get_named_parameters(model):
|
||||
"""
|
||||
Same thing as `Accelerator.get_named_parameters` Returns a list of the named parameters of the model (extracted
|
||||
from parallel)
|
||||
"""
|
||||
from accelerate.utils import extract_model_from_parallel
|
||||
|
||||
model = extract_model_from_parallel(model)
|
||||
return {n: p for n, p in model.named_parameters()}
|
||||
|
||||
|
||||
def evaluate_model(model, dataloader, metric, accelerator=None):
|
||||
"Turns model to .eval(), runs dataloader, calculates metric, then turns eval back on"
|
||||
model.eval()
|
||||
for step, batch in enumerate(dataloader):
|
||||
with torch.no_grad():
|
||||
outputs = model(**batch)
|
||||
predictions = outputs.logits.argmax(dim=-1)
|
||||
references = batch["labels"]
|
||||
if accelerator is not None and accelerator.num_processes > 1:
|
||||
predictions, references = accelerator.gather_for_metrics((predictions, references))
|
||||
metric.add_batch(predictions=predictions, references=references)
|
||||
return metric.compute()
|
||||
@ -1,173 +0,0 @@
|
||||
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
This script tests to ensure that `accelerate` performs at the same level as raw `torchao`.
|
||||
|
||||
This particular script verifies this for FSDP training.
|
||||
"""
|
||||
|
||||
from functools import partial
|
||||
|
||||
import evaluate
|
||||
import torch
|
||||
from fp8_utils import get_training_utilities
|
||||
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
|
||||
from torch.distributed.fsdp import MixedPrecision
|
||||
from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy
|
||||
from torchao.float8 import convert_to_float8_training
|
||||
from transformers.models.bert import BertLayer
|
||||
|
||||
from accelerate import Accelerator
|
||||
from accelerate import FullyShardedDataParallelPlugin as FSDPPlugin
|
||||
from accelerate.state import AcceleratorState
|
||||
from accelerate.utils import AORecipeKwargs, set_seed
|
||||
|
||||
|
||||
MODEL_NAME = "bert-base-cased"
|
||||
METRIC = evaluate.load("glue", "mrpc")
|
||||
|
||||
FSDP_WRAP_POLICY = partial(transformer_auto_wrap_policy, transformer_layer_cls={BertLayer})
|
||||
|
||||
|
||||
def filter_linear_layers(module, fqn, first_layer_name=None, last_layer_name=None):
|
||||
if isinstance(module, torch.nn.Linear):
|
||||
if module.in_features % 16 != 0 or module.out_features % 16 != 0:
|
||||
return False
|
||||
# For stability reasons, we skip the first and last linear layers
|
||||
# Otherwise can lead to the model not training or converging properly
|
||||
if fqn in (first_layer_name, last_layer_name):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def evaluate_model(model, dataloader, metric, accelerator=None):
|
||||
"Turns model to .eval(), runs dataloader, calculates metric, then turns eval back on"
|
||||
model.eval()
|
||||
for step, batch in enumerate(dataloader):
|
||||
with torch.no_grad():
|
||||
outputs = model(**batch)
|
||||
predictions = outputs.logits.argmax(dim=-1)
|
||||
references = batch["labels"]
|
||||
if accelerator is not None and accelerator.num_processes > 1:
|
||||
predictions, references = accelerator.gather_for_metrics((predictions, references))
|
||||
metric.add_batch(predictions=predictions, references=references)
|
||||
return metric.compute()
|
||||
|
||||
|
||||
def train_baseline():
|
||||
set_seed(42)
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(MODEL_NAME)
|
||||
first_linear = None
|
||||
last_linear = None
|
||||
for name, module in model.named_modules():
|
||||
if isinstance(module, torch.nn.Linear):
|
||||
if first_linear is None:
|
||||
first_linear = name
|
||||
last_linear = name
|
||||
func = partial(filter_linear_layers, first_layer_name=first_linear, last_layer_name=last_linear)
|
||||
accelerator = Accelerator()
|
||||
device = accelerator.device
|
||||
model.to(device)
|
||||
|
||||
convert_to_float8_training(model, module_filter_fn=func)
|
||||
|
||||
# Convert the model to FSDP
|
||||
model = FSDP(
|
||||
model,
|
||||
use_orig_params=True,
|
||||
mixed_precision=MixedPrecision(param_dtype=torch.bfloat16, reduce_dtype=torch.float32),
|
||||
auto_wrap_policy=FSDP_WRAP_POLICY,
|
||||
)
|
||||
|
||||
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
model.train()
|
||||
|
||||
for batch in train_dataloader:
|
||||
with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
|
||||
batch = batch.to(device)
|
||||
outputs = model(**batch)
|
||||
loss = outputs.loss
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
lr_scheduler.step()
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
|
||||
def train_integration():
|
||||
AcceleratorState()._reset_state(True)
|
||||
fsdp_plugin = FSDPPlugin(
|
||||
auto_wrap_policy=FSDP_WRAP_POLICY,
|
||||
use_orig_params=True,
|
||||
mixed_precision_policy=MixedPrecision(param_dtype=torch.bfloat16, reduce_dtype=torch.float32),
|
||||
)
|
||||
accelerator = Accelerator(mixed_precision="fp8", fsdp_plugin=fsdp_plugin, kwargs_handlers=[AORecipeKwargs()])
|
||||
set_seed(42)
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
|
||||
MODEL_NAME, accelerator=accelerator
|
||||
)
|
||||
|
||||
model, optimizer = accelerator.prepare(model, optimizer)
|
||||
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
model.train()
|
||||
|
||||
for batch in train_dataloader:
|
||||
outputs = model(**batch)
|
||||
loss = outputs.loss
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
lr_scheduler.step()
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
baseline_not_trained, baseline_trained = train_baseline()
|
||||
accelerator_not_trained, accelerator_trained = train_integration()
|
||||
|
||||
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
|
||||
f"Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
|
||||
f"F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
|
||||
)
|
||||
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
|
||||
f"Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_trained["f1"] == accelerator_trained["f1"], (
|
||||
f"F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
|
||||
)
|
||||
|
||||
torch.distributed.destroy_process_group()
|
||||
@ -1,145 +0,0 @@
|
||||
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
This script tests to ensure that `accelerate` performs at the same level as raw `torchao`.
|
||||
|
||||
This particular script verifies this for single GPU training.
|
||||
"""
|
||||
|
||||
from functools import partial
|
||||
|
||||
import evaluate
|
||||
import torch
|
||||
from fp8_utils import get_training_utilities
|
||||
from torchao.float8 import convert_to_float8_training
|
||||
|
||||
from accelerate import Accelerator
|
||||
from accelerate.state import AcceleratorState
|
||||
from accelerate.utils import AORecipeKwargs, set_seed
|
||||
|
||||
|
||||
MODEL_NAME = "bert-base-cased"
|
||||
METRIC = evaluate.load("glue", "mrpc")
|
||||
|
||||
|
||||
def evaluate_model(model, dataloader, metric, accelerator=None):
|
||||
"Turns model to .eval(), runs dataloader, calculates metric, then turns eval back on"
|
||||
model.eval()
|
||||
for step, batch in enumerate(dataloader):
|
||||
with torch.no_grad():
|
||||
outputs = model(**batch)
|
||||
predictions = outputs.logits.argmax(dim=-1)
|
||||
references = batch["labels"]
|
||||
if accelerator is not None and accelerator.num_processes > 1:
|
||||
predictions, references = accelerator.gather_for_metrics((predictions, references))
|
||||
metric.add_batch(predictions=predictions, references=references)
|
||||
return metric.compute()
|
||||
|
||||
|
||||
def filter_linear_layers(module, fqn, first_layer_name=None, last_layer_name=None):
|
||||
if isinstance(module, torch.nn.Linear):
|
||||
if module.in_features % 16 != 0 or module.out_features % 16 != 0:
|
||||
return False
|
||||
# For stability reasons, we skip the first and last linear layers
|
||||
# Otherwise can lead to the model not training or converging properly
|
||||
if fqn in (first_layer_name, last_layer_name):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def train_baseline():
|
||||
set_seed(42)
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(MODEL_NAME)
|
||||
first_linear = None
|
||||
last_linear = None
|
||||
for name, module in model.named_modules():
|
||||
if isinstance(module, torch.nn.Linear):
|
||||
if first_linear is None:
|
||||
first_linear = name
|
||||
last_linear = name
|
||||
|
||||
func = partial(filter_linear_layers, first_layer_name=first_linear, last_layer_name=last_linear)
|
||||
model.to("cuda")
|
||||
convert_to_float8_training(model, module_filter_fn=func)
|
||||
base_model_results = evaluate_model(model, eval_dataloader, METRIC)
|
||||
model.train()
|
||||
|
||||
for batch in train_dataloader:
|
||||
with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
|
||||
outputs = model(**batch)
|
||||
loss = outputs.loss
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
lr_scheduler.step()
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC)
|
||||
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
|
||||
def train_integration():
|
||||
set_seed(42)
|
||||
accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=[AORecipeKwargs()])
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
|
||||
MODEL_NAME, accelerator=accelerator
|
||||
)
|
||||
model = accelerator.prepare(model)
|
||||
base_model_results = evaluate_model(model, eval_dataloader, METRIC)
|
||||
model.train()
|
||||
|
||||
for batch in train_dataloader:
|
||||
outputs = model(**batch)
|
||||
loss = outputs.loss
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
lr_scheduler.step()
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC)
|
||||
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
baseline_not_trained, baseline_trained = train_baseline()
|
||||
AcceleratorState._reset_state(True)
|
||||
accelerator_not_trained, accelerator_trained = train_integration()
|
||||
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
|
||||
f"Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
|
||||
f"F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
|
||||
)
|
||||
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
|
||||
f"Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_trained["f1"] == accelerator_trained["f1"], (
|
||||
f"F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
|
||||
)
|
||||
@ -1,7 +1,4 @@
|
||||
ARG BASE_YEAR=25
|
||||
ARG BASE_MONTH=03
|
||||
|
||||
FROM nvcr.io/nvidia/pytorch:${BASE_YEAR}.${BASE_MONTH}-py3
|
||||
FROM nvcr.io/nvidia/pytorch:24.07-py3
|
||||
|
||||
RUN pip install transformers evaluate datasets
|
||||
RUN git clone https://github.com/huggingface/accelerate.git
|
||||
|
||||
@ -79,12 +79,12 @@ def train_baseline():
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
@ -114,12 +114,12 @@ def train_integration():
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
@ -128,17 +128,17 @@ if __name__ == "__main__":
|
||||
baseline_not_trained, baseline_trained = train_baseline()
|
||||
accelerator_not_trained, accelerator_trained = train_integration()
|
||||
|
||||
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
|
||||
f"Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
|
||||
f"F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
|
||||
)
|
||||
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
|
||||
f"Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_trained["f1"] == accelerator_trained["f1"], (
|
||||
f"F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
|
||||
)
|
||||
assert (
|
||||
baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"]
|
||||
), f'Accuracy should be the same for the baseline and accelerator: {baseline_not_trained["accuracy"]} == {accelerator_not_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_not_trained["f1"] == accelerator_not_trained["f1"]
|
||||
), f'F1 score should be the same for the baseline and accelerator: {baseline_not_trained["f1"]} == {accelerator_not_trained["f1"]}'
|
||||
assert (
|
||||
baseline_trained["accuracy"] == accelerator_trained["accuracy"]
|
||||
), f'Accuracy should be the same for the baseline and accelerator: {baseline_trained["accuracy"]} == {accelerator_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_trained["f1"] == accelerator_trained["f1"]
|
||||
), f'F1 score should be the same for the baseline and accelerator: {baseline_trained["f1"]} == {accelerator_trained["f1"]}'
|
||||
|
||||
torch.distributed.destroy_process_group()
|
||||
|
||||
@ -66,7 +66,7 @@ def train_baseline(zero_stage: int = 1):
|
||||
import numpy as np
|
||||
|
||||
config = {
|
||||
"train_batch_size": 16,
|
||||
"train_batch_size": 32,
|
||||
"train_micro_batch_size_per_gpu": 16,
|
||||
"gradient_accumulation_steps": 1,
|
||||
"zero_optimization": {
|
||||
@ -113,12 +113,12 @@ def train_baseline(zero_stage: int = 1):
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
model.destroy()
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
|
||||
return base_model_results, trained_model_results, model_outputs, data
|
||||
|
||||
@ -159,33 +159,32 @@ def train_integration(zero_stage: int = 1):
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
model.destroy()
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
|
||||
return base_model_results, trained_model_results, model_outputs, data
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
for zero_stage in [1, 2, 3]:
|
||||
baseline_not_trained, baseline_trained, baseline_outputs, baseline_data = train_baseline(zero_stage)
|
||||
accelerator_not_trained, accelerator_trained, accelerator_outputs, accelerator_data = train_integration(
|
||||
zero_stage
|
||||
)
|
||||
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
|
||||
f"ZERO stage {zero_stage}: Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
|
||||
f"ZERO stage {zero_stage}: F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
|
||||
)
|
||||
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
|
||||
f"ZERO stage {zero_stage}: Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_trained["f1"] == accelerator_trained["f1"], (
|
||||
f"ZERO stage {zero_stage}: F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
|
||||
)
|
||||
# for zero_stage in [1, 2, 3]:
|
||||
zero_stage = 1
|
||||
baseline_not_trained, baseline_trained, baseline_outputs, baseline_data = train_baseline(zero_stage)
|
||||
accelerator_not_trained, accelerator_trained, accelerator_outputs, accelerator_data = train_integration(zero_stage)
|
||||
assert (
|
||||
baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"]
|
||||
), f'ZERO stage {zero_stage}: Accuracy should be the same for the baseline and accelerator: {baseline_not_trained["accuracy"]} == {accelerator_not_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_not_trained["f1"] == accelerator_not_trained["f1"]
|
||||
), f'ZERO stage {zero_stage}: F1 score should be the same for the baseline and accelerator: {baseline_not_trained["f1"]} == {accelerator_not_trained["f1"]}'
|
||||
assert (
|
||||
baseline_trained["accuracy"] == accelerator_trained["accuracy"]
|
||||
), f'ZERO stage {zero_stage}: Accuracy should be the same for the baseline and accelerator: {baseline_trained["accuracy"]} == {accelerator_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_trained["f1"] == accelerator_trained["f1"]
|
||||
), f'ZERO stage {zero_stage}: F1 score should be the same for the baseline and accelerator: {baseline_trained["f1"]} == {accelerator_trained["f1"]}'
|
||||
|
||||
torch.distributed.destroy_process_group()
|
||||
torch.distributed.destroy_process_group()
|
||||
|
||||
@ -91,12 +91,12 @@ def train_baseline():
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
@ -131,12 +131,12 @@ def train_integration():
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
@ -145,17 +145,17 @@ if __name__ == "__main__":
|
||||
baseline_not_trained, baseline_trained = train_baseline()
|
||||
accelerator_not_trained, accelerator_trained = train_integration()
|
||||
|
||||
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
|
||||
f"Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
|
||||
f"F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
|
||||
)
|
||||
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
|
||||
f"Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_trained["f1"] == accelerator_trained["f1"], (
|
||||
f"F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
|
||||
)
|
||||
assert (
|
||||
baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"]
|
||||
), f'Accuracy should be the same for the baseline and accelerator: {baseline_not_trained["accuracy"]} == {accelerator_not_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_not_trained["f1"] == accelerator_not_trained["f1"]
|
||||
), f'F1 score should be the same for the baseline and accelerator: {baseline_not_trained["f1"]} == {accelerator_not_trained["f1"]}'
|
||||
assert (
|
||||
baseline_trained["accuracy"] == accelerator_trained["accuracy"]
|
||||
), f'Accuracy should be the same for the baseline and accelerator: {baseline_trained["accuracy"]} == {accelerator_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_trained["f1"] == accelerator_trained["f1"]
|
||||
), f'F1 score should be the same for the baseline and accelerator: {baseline_trained["f1"]} == {accelerator_trained["f1"]}'
|
||||
|
||||
torch.distributed.destroy_process_group()
|
||||
|
||||
@ -70,12 +70,12 @@ def train_baseline():
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC)
|
||||
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
@ -104,12 +104,12 @@ def train_integration():
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC)
|
||||
|
||||
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||
)
|
||||
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||
)
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
@ -118,15 +118,15 @@ if __name__ == "__main__":
|
||||
baseline_not_trained, baseline_trained = train_baseline()
|
||||
accelerator_not_trained, accelerator_trained = train_integration()
|
||||
|
||||
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
|
||||
f"Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
|
||||
f"F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
|
||||
)
|
||||
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
|
||||
f"Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
|
||||
)
|
||||
assert baseline_trained["f1"] == accelerator_trained["f1"], (
|
||||
f"F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
|
||||
)
|
||||
assert (
|
||||
baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"]
|
||||
), f'Accuracy should be the same for the baseline and accelerator: {baseline_not_trained["accuracy"]} == {accelerator_not_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_not_trained["f1"] == accelerator_not_trained["f1"]
|
||||
), f'F1 score should be the same for the baseline and accelerator: {baseline_not_trained["f1"]} == {accelerator_not_trained["f1"]}'
|
||||
assert (
|
||||
baseline_trained["accuracy"] == accelerator_trained["accuracy"]
|
||||
), f'Accuracy should be the same for the baseline and accelerator: {baseline_trained["accuracy"]} == {accelerator_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_trained["f1"] == accelerator_trained["f1"]
|
||||
), f'F1 score should be the same for the baseline and accelerator: {baseline_trained["f1"]} == {accelerator_trained["f1"]}'
|
||||
|
||||
@ -1,74 +0,0 @@
|
||||
# FSDP2 Benchmarks
|
||||
|
||||
This benchmark showcases `FSDP2` in 🤗 `accelerate` and compares it to `torch` baseline.
|
||||
|
||||
## Overview
|
||||
|
||||
This benchmark consists of two parts:
|
||||
- `main.py` is the main script that runs the benchmark
|
||||
- `visualize.py` is the script that visualizes the results (if `--output_dir` was specified for the previous command)
|
||||
|
||||
## Motivation
|
||||
|
||||
We want to showcase that 🤗 `accelerate`'s integration of `FSDP2` is on par raw PyTorch, and highlight a "broken" part in PyTorch that creating an optimizer before applying `FSDP2` **doesn't result in a working training loop**. (more on this later)
|
||||
This script showcases **matching memory usage and convergence between `accelerate` and `torch`'s baseline.**
|
||||
To deal with this breaking change (and maintain backward compatibility with FSDP1 in terms of an API), `accelerate` had to come up with a workaround since `accelerate` assumes that the user will nearly always create a model, optimizer, scheduler, etc beforehand and bring them themselves. This lead to an issue of a stark increase in memory as well as the model not even training if the user creates an optimizer beforehand.
|
||||
To workaround this, we replace the parameters inside the optimizer with the newly created FSDP2 sharded ones. More about this can be found in this [blog post (TBD)](TODO)
|
||||
> [!WARNING]
|
||||
> This script is intended to fit on 2x 24GB GPUs, though on so few GPUs it's not possible to see the memory difference (discrepancies in grad allocation result in lower memory usage in the non-fixed case), only the difference in convergence. Below are attached results from 8x H100 GPUs where the difference is visible.
|
||||
> TLDR: more GPUs = bigger memory difference between fixed and non-fixed cases.
|
||||
|
||||
## Results
|
||||
|
||||
Here are the results from running the benchmark on 8x H100 GPUs:
|
||||
|
||||
<p align="center">
|
||||
<img src="imgs/allocated_memory.png" width="80%" alt="Allocated Memory Usage">
|
||||
</p>
|
||||
<p align="center">
|
||||
<img src="imgs/reserved_memory.png" width="80%" alt="Reserved Memory Usage">
|
||||
</p>
|
||||
|
||||
As you can see, the memory usage of `accelerate` and `torch_post_shard` (the **intended** way) are very similar, while `torch_pre_shard_not_fixed` uses significantly more memory. Our fix in `torch_pre_shard_fixed` brings the memory usage back in line with the **intended** approach.
|
||||
|
||||
> [!WARNING]
|
||||
> Timing discrepancies are due to the benchmarks being ran in 1 script.
|
||||
|
||||
|
||||
## Running
|
||||
|
||||
To run the benchmark, you can either use `accelerate launch` or `torchrun`:
|
||||
```bash
|
||||
accelerate launch main.py
|
||||
```
|
||||
```bash
|
||||
# For two GPUs
|
||||
torchrun --nproc_per_node 2 main.py
|
||||
```
|
||||
|
||||
This supports multiple configurable options, you can learn about them by running:
|
||||
```bash
|
||||
python3 main.py --help
|
||||
```
|
||||
|
||||
This script will run 4 different benchmarks:
|
||||
- `torch_optimizer_after_fsdp`: `torch` baseline where optimizer is created after applying `FSDP2`, this is the **intended** way to do it
|
||||
- `torch_optimizer_before_fsdp_not_fixed`: `torch` baseline where optimizer is created before applying `FSDP2` without fixing the optimizer parameters
|
||||
- `torch_optimizer_before_fsdp_fixed`: `torch` baseline where optimizer is created before applying `FSDP2` with our fix to the optimizer
|
||||
- `accelerate`: `accelerate`'s own integration of `FSDP2` where optimizer is created before applying `FSDP2`, but we apply our fix to the optimizer
|
||||
|
||||
Memory results are saved in a folder specified by `--output_dir` argument.
|
||||
Optionally, you can specify `--save_memory_snapshot` to save the torch memory snapshot, which can then be viewed using [`torch memory viz`](https://pytorch.org/memory_viz)
|
||||
|
||||
## Visualizing results
|
||||
|
||||
To visualize the results, you can run:
|
||||
|
||||
```bash
|
||||
python3 visualize.py --dir <path_to_output_dir>
|
||||
```
|
||||
|
||||
This will then create two plots, showcasing allocated and reserved memory usage between all the different benchmarks discussed above.
|
||||
|
||||
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 124 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 56 KiB |
@ -1,122 +0,0 @@
|
||||
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import functools
|
||||
from typing import Callable
|
||||
|
||||
import torch
|
||||
|
||||
from accelerate import Accelerator
|
||||
from utils import parse_args, prepare_accelerate, prepare_torch
|
||||
|
||||
|
||||
MODEL_NAME = "Qwen/Qwen2.5-1.5B-Instruct"
|
||||
LEARNING_RATE = 3e-5
|
||||
|
||||
CONFIG = {
|
||||
"model_name": MODEL_NAME,
|
||||
"learning_rate": LEARNING_RATE,
|
||||
}
|
||||
|
||||
|
||||
def train(
|
||||
model: torch.nn.Module,
|
||||
optimizer: torch.optim.Optimizer,
|
||||
train_dataloader: torch.utils.data.DataLoader,
|
||||
accelerator: Accelerator,
|
||||
) -> torch.Tensor:
|
||||
losses = []
|
||||
for batch in train_dataloader:
|
||||
optimizer.zero_grad()
|
||||
outputs = model(**batch, use_cache=False)
|
||||
|
||||
loss = outputs.loss
|
||||
losses.append(loss.item())
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
|
||||
return torch.tensor(losses)
|
||||
|
||||
|
||||
def evaluate(args, config: dict, init_fn: Callable, run_name: str) -> torch.Tensor:
|
||||
model, optimizer, dataloader, accelerator, memory_tracker = init_fn(args, config)
|
||||
|
||||
loss = train(model, optimizer, dataloader, accelerator)
|
||||
|
||||
memory_tracker.stop()
|
||||
msg = f"""Results for {run_name} (rank 0):
|
||||
Loss: {loss[-1].item()}
|
||||
Peak Allocated Memory: {float(memory_tracker.peak_allocated_memory):.2f} MB
|
||||
Peak Reserved Memory: {float(memory_tracker.peak_reserved_memory):.2f} MB
|
||||
{"-" * 34}"""
|
||||
accelerator.print(msg)
|
||||
return loss
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
evaluations = [
|
||||
functools.partial(
|
||||
evaluate,
|
||||
init_fn=functools.partial(prepare_torch, post_shard_optimizer=False, apply_optimizer_fix=True),
|
||||
run_name="Optimizer Before FSDP (w/ fix)",
|
||||
),
|
||||
functools.partial(
|
||||
evaluate,
|
||||
init_fn=functools.partial(prepare_torch, post_shard_optimizer=False, apply_optimizer_fix=False),
|
||||
run_name="Optimizer Before FSDP (w/o fix)",
|
||||
),
|
||||
functools.partial(
|
||||
evaluate,
|
||||
init_fn=functools.partial(prepare_torch, post_shard_optimizer=True),
|
||||
run_name="Optimizer After FSDP",
|
||||
),
|
||||
functools.partial(evaluate, init_fn=prepare_accelerate, run_name="Accelerate"),
|
||||
]
|
||||
labels = [
|
||||
"Optimizer Before FSDP (w/ fix)",
|
||||
"Optimizer Before FSDP (w/o fix)",
|
||||
"Optimizer After FSDP",
|
||||
"Accelerate",
|
||||
]
|
||||
|
||||
results = {}
|
||||
torch.use_deterministic_algorithms(True)
|
||||
|
||||
for evaluation, label in zip(evaluations, labels):
|
||||
results[label] = evaluation(args, CONFIG)
|
||||
|
||||
torch.testing.assert_close(
|
||||
results["Optimizer After FSDP"],
|
||||
results["Optimizer Before FSDP (w/ fix)"],
|
||||
msg="Optimizer After FSDP and Optimizer Before FSDP (w/ fix) should be the same",
|
||||
)
|
||||
|
||||
torch.testing.assert_close(
|
||||
results["Optimizer After FSDP"],
|
||||
results["Accelerate"],
|
||||
msg="Optimizer After FSDP and Accelerate should be the same",
|
||||
)
|
||||
|
||||
torch.testing.assert_close(
|
||||
results["Accelerate"],
|
||||
results["Optimizer Before FSDP (w/ fix)"],
|
||||
msg="Accelerate and Optimizer Before FSDP (w/ fix) should be the same",
|
||||
)
|
||||
|
||||
torch.distributed.destroy_process_group()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -1,130 +0,0 @@
|
||||
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import gc
|
||||
import json
|
||||
import os
|
||||
import threading
|
||||
import time
|
||||
|
||||
import psutil
|
||||
import torch
|
||||
|
||||
from accelerate import PartialState
|
||||
|
||||
|
||||
class MemoryTracker:
|
||||
def __init__(
|
||||
self,
|
||||
device: torch.device,
|
||||
output_directory: str,
|
||||
run_name: str,
|
||||
save_memory_snapshot: bool,
|
||||
log_interval: float = 0.01,
|
||||
):
|
||||
"""Class for tracking gpu and cpu memory usage of the process.
|
||||
|
||||
Args:
|
||||
device (`torch.device`):
|
||||
PyTorch device to monitor.
|
||||
output_directory (`str`):
|
||||
Directory to save the memory usage data to, will be created if it doesn't exist.
|
||||
run_name (`str`):
|
||||
Name of the run, will be used to name the output files.
|
||||
save_memory_snapshot (`bool`):
|
||||
Whether to also save `torch.cuda.memory._dump_snapshot` to the output directory.
|
||||
log_interval (`float`, *optional*):
|
||||
Interval in seconds between memory measurements. Defaults to 0.01.
|
||||
"""
|
||||
self.log_interval = log_interval
|
||||
self.save_memory_snapshot = save_memory_snapshot
|
||||
self.output_directory = output_directory
|
||||
self.run_name = run_name
|
||||
|
||||
self.timestamps = []
|
||||
self.allocated_memory = []
|
||||
self.reserved_memory = []
|
||||
self.virtual_memory = []
|
||||
|
||||
self.start_time = None
|
||||
self.running = False
|
||||
|
||||
self._thread = None
|
||||
self._state = PartialState()
|
||||
self._process = psutil.Process()
|
||||
self._device = device
|
||||
self.torch_accelerator_module = getattr(torch, device.type, torch.cuda)
|
||||
|
||||
def _monitor(self):
|
||||
self.start_time = time.time()
|
||||
|
||||
while self.running:
|
||||
allocated = self.torch_accelerator_module.memory_allocated(self._device) / (1024 * 1024)
|
||||
reserved = self.torch_accelerator_module.memory_reserved(self._device) / (1024 * 1024)
|
||||
virtual_memory = self._process.memory_info().rss / (1024 * 1024)
|
||||
|
||||
self.allocated_memory.append(allocated)
|
||||
self.reserved_memory.append(reserved)
|
||||
self.virtual_memory.append(virtual_memory)
|
||||
self.timestamps.append(time.time() - self.start_time)
|
||||
|
||||
time.sleep(self.log_interval)
|
||||
|
||||
def start(self):
|
||||
gc.collect()
|
||||
self.torch_accelerator_module.empty_cache()
|
||||
|
||||
if self.output_directory:
|
||||
os.makedirs(self.output_directory, exist_ok=True)
|
||||
|
||||
if self.save_memory_snapshot:
|
||||
self.torch_accelerator_module.memory._record_memory_history()
|
||||
|
||||
self.running = True
|
||||
self._thread = threading.Thread(target=self._monitor)
|
||||
self._thread.daemon = True
|
||||
self._thread.start()
|
||||
|
||||
def stop(self):
|
||||
self.running = False
|
||||
if self._thread:
|
||||
self._thread.join()
|
||||
|
||||
if self.save_memory_snapshot and self._state.is_main_process and self.output_directory:
|
||||
output_file = os.path.join(self.output_directory, f"{self.run_name}_memory_snapshot.pkl")
|
||||
self.torch_accelerator_module.memory._dump_snapshot(output_file)
|
||||
|
||||
if self._state.is_main_process and self.output_directory:
|
||||
path = os.path.join(self.output_directory, f"{self.run_name}_memory_usage.json")
|
||||
with open(path, "w") as f:
|
||||
json.dump(
|
||||
{
|
||||
"timestamps": self.timestamps,
|
||||
"allocated_memory": self.allocated_memory,
|
||||
"reserved_memory": self.reserved_memory,
|
||||
"virtual_memory": self.virtual_memory,
|
||||
},
|
||||
f,
|
||||
)
|
||||
if self.save_memory_snapshot:
|
||||
self.torch_accelerator_module.memory._record_memory_history(False)
|
||||
self.torch_accelerator_module.empty_cache()
|
||||
|
||||
@property
|
||||
def peak_allocated_memory(self):
|
||||
return max(self.allocated_memory)
|
||||
|
||||
@property
|
||||
def peak_reserved_memory(self):
|
||||
return max(self.reserved_memory)
|
||||
@ -1,290 +0,0 @@
|
||||
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import argparse
|
||||
from types import MethodType
|
||||
from typing import Union
|
||||
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from measure_utils import MemoryTracker
|
||||
from torch.distributed.fsdp import MixedPrecisionPolicy, fully_shard
|
||||
from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, DataCollatorForLanguageModeling
|
||||
from transformers.models.qwen2.modeling_qwen2 import Qwen2DecoderLayer
|
||||
|
||||
from accelerate import Accelerator, FullyShardedDataParallelPlugin
|
||||
from accelerate.state import AcceleratorState, is_initialized
|
||||
from accelerate.utils import convert_outputs_to_fp32, set_seed
|
||||
|
||||
|
||||
SEED = 421
|
||||
|
||||
|
||||
def get_named_parameters(model: torch.nn.Module, drop_refs: bool = False) -> dict[str, Union[torch.Tensor, int]]:
|
||||
"""
|
||||
This function returns a dictionary mapping the parameter names to their data pointers or
|
||||
the original parameters if `drop_refs` is `False`.
|
||||
It is used to get the original parameter names before `fully_shard` is applied.
|
||||
|
||||
We only return the data pointers, so we drop the references to the original parameters
|
||||
and `fully_shard` will then trigger a new allocation for the sharded ones.
|
||||
|
||||
Args:
|
||||
model (`torch.nn.Module`): Model instance to get the named parameters from
|
||||
drop_refs (`bool`, *optional*, defaults to `False`): Whether to drop the references to the original parameters
|
||||
|
||||
Returns:
|
||||
`dict[str, Union[torch.Tensor, int]]`: Dictionary mapping the parameter names to their data pointers or the original parameters if `drop_refs` is `False`
|
||||
"""
|
||||
named_parameters = {}
|
||||
for n, p in model.named_parameters():
|
||||
# We only preserve the data pointers to have the unique 1:1 mapping between the original and the sharded parameters
|
||||
named_parameters[n] = p.data_ptr() if drop_refs else p
|
||||
return named_parameters
|
||||
|
||||
|
||||
def replace_optimizer_params(optimizer: torch.optim.Optimizer):
|
||||
"""
|
||||
This function is called before using `fully_shard` on the model. It replaces the parameters of the optimizer with
|
||||
empty tensors, so `fully_shard` can trigger a new allocation for the sharded ones. After this, we swap the parameters
|
||||
`data_ptr` to the original one, so we can reuse that later to map the sharded parameters to the original ones.
|
||||
This function modifies the optimizer in-place.
|
||||
|
||||
Args:
|
||||
optimizer (torch.optim.Optimizer): Optimizer instance which contains the original model parameters
|
||||
"""
|
||||
|
||||
for param_group in optimizer.param_groups:
|
||||
for i, p in enumerate(param_group["params"]):
|
||||
# We drop a reference to the original param here, so that _move_states_to_device triggers a reallocation
|
||||
# This is required or else the `fully_shard` -> `_move_states_to_device` uses the original memory address
|
||||
# for the sharded parameters, and we get a weird/undefined behavior.
|
||||
param_group["params"][i] = torch.empty_like(p)
|
||||
|
||||
# We save the original data_ptr, so we can swap back the parameters later
|
||||
param_group["params"][i].data_ptr = p.data_ptr()
|
||||
|
||||
|
||||
def swap_back_optimizer_params(
|
||||
model: torch.nn.Module, optimizer: torch.optim.Optimizer, old_named_parameter_pointers: dict[str, int]
|
||||
):
|
||||
"""
|
||||
This function is the counterpart of `replace_optimizer_params`. It is called after `fully_shard` being applied to
|
||||
the model. It swaps the parameters of the optimizer to their sharded counterparts.
|
||||
It is done using the `data_ptr` mapping prepared in `replace_optimizer_params` and `get_named_parameters`.
|
||||
|
||||
Args:
|
||||
model (`torch.nn.Module`): Model instance to get the new named parameters from
|
||||
optimizer (`torch.optim.Optimizer`): Optimizer instance to swap the parameters of
|
||||
old_named_parameter_pointers (`dict[str, int]`): Dictionary mapping the original parameter names: data_ptrs to the new ones
|
||||
"""
|
||||
# We get the new named parameters after `fully_shard` being applied
|
||||
# We don't drop the references as we need the sharded parameters now
|
||||
new_named_parameters = get_named_parameters(model, drop_refs=False)
|
||||
|
||||
# We create a mapping from the original data_ptr to the new sharded param corresponding to it
|
||||
mapping = {p: new_named_parameters[n] for n, p in old_named_parameter_pointers.items()}
|
||||
|
||||
for param_group in optimizer.param_groups:
|
||||
# We swap the parameters of the optimizer to the new sharded ones
|
||||
param_group["params"] = [mapping[p.data_ptr] for p in param_group["params"]]
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--output_dir",
|
||||
type=str,
|
||||
help="Directory to save the benchmarking results.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--save_memory_snapshot",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="If True, `torch.cuda.memory._dump_snapshot` will be used to additionaly save the memory trace.",
|
||||
)
|
||||
######################
|
||||
# Training arguments #
|
||||
######################
|
||||
parser.add_argument(
|
||||
"--batch_size",
|
||||
type=int,
|
||||
default=2,
|
||||
help="Batch size for the training loop.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--block_size",
|
||||
type=int,
|
||||
default=128,
|
||||
help="The maximum sequence length to use with the model.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--dataset_fraction",
|
||||
type=float,
|
||||
default=1.0,
|
||||
help="Fraction of the dataset to use.",
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def prepare_dataloader(tokenizer, args, accelerator: Accelerator) -> DataLoader:
|
||||
dataset = load_dataset("tiny_shakespeare", split="train", trust_remote_code=True)
|
||||
|
||||
def tokenize_function(example):
|
||||
return tokenizer(
|
||||
example["text"],
|
||||
)
|
||||
|
||||
dataset = dataset.map(
|
||||
tokenize_function,
|
||||
batched=True,
|
||||
remove_columns=["text"],
|
||||
)
|
||||
|
||||
block_size = min(tokenizer.model_max_length, args.block_size)
|
||||
|
||||
def group_texts(examples):
|
||||
concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
|
||||
total_length = len(concatenated_examples[list(examples.keys())[0]])
|
||||
|
||||
total_length = (total_length // block_size) * block_size
|
||||
|
||||
result = {
|
||||
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
|
||||
for k, t in concatenated_examples.items()
|
||||
}
|
||||
|
||||
result["labels"] = result["input_ids"].copy()
|
||||
return result
|
||||
|
||||
dataset = dataset.map(group_texts, batched=True)
|
||||
dataset = dataset.select(range(int(len(dataset) * args.dataset_fraction)))
|
||||
|
||||
def collate_fn(examples):
|
||||
return DataCollatorForLanguageModeling(
|
||||
tokenizer=tokenizer,
|
||||
mlm=False,
|
||||
)(examples)
|
||||
|
||||
dataloader = DataLoader(
|
||||
dataset,
|
||||
batch_size=args.batch_size,
|
||||
collate_fn=collate_fn,
|
||||
)
|
||||
dataloader = accelerator.prepare(dataloader)
|
||||
return dataloader
|
||||
|
||||
|
||||
def get_model(model_name: str):
|
||||
# We reguire model to be loaded in fp32, otherwise benchmarks don't match as accelerate does upcasting of parameters to fp32
|
||||
config = AutoConfig.from_pretrained(model_name, trust_remote_code=True, torch_dtype=torch.float32)
|
||||
model = AutoModelForCausalLM.from_config(config)
|
||||
return model
|
||||
|
||||
|
||||
def get_tokenizer(model_name: str):
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
||||
tokenizer.pad_token = tokenizer.eos_token
|
||||
return tokenizer
|
||||
|
||||
|
||||
def prepare_torch(
|
||||
args, config: dict, post_shard_optimizer: bool = False, apply_optimizer_fix: bool = False
|
||||
) -> tuple[torch.nn.Module, torch.optim.Optimizer, torch.utils.data.DataLoader, Accelerator]:
|
||||
mp_policy = MixedPrecisionPolicy(
|
||||
param_dtype=torch.bfloat16,
|
||||
reduce_dtype=torch.bfloat16,
|
||||
output_dtype=torch.bfloat16,
|
||||
)
|
||||
|
||||
accelerator = Accelerator(mixed_precision="bf16")
|
||||
set_seed(SEED)
|
||||
is_fixed = "fixed" if apply_optimizer_fix else "not_fixed"
|
||||
is_post_shard = "optimizer_after_fsdp" if post_shard_optimizer else "optimizer_before_fsdp"
|
||||
run_name = f"torch_{is_post_shard}" if post_shard_optimizer else f"torch_{is_post_shard}_{is_fixed}"
|
||||
|
||||
tokenizer = get_tokenizer(config["model_name"])
|
||||
train_dataloader = prepare_dataloader(tokenizer, args, accelerator)
|
||||
|
||||
memory_tracker = MemoryTracker(accelerator.device, args.output_dir, run_name, args.save_memory_snapshot)
|
||||
memory_tracker.start()
|
||||
|
||||
model = get_model(config["model_name"])
|
||||
optimizer = None
|
||||
|
||||
if not post_shard_optimizer:
|
||||
optimizer = AdamW(model.parameters(), lr=config["learning_rate"])
|
||||
|
||||
if apply_optimizer_fix:
|
||||
# We drop the references to the original parameters, so that `fully_shard` can trigger a new allocation
|
||||
# Then we get the `module_name: data_ptr` mapping, so we can swap back the parameters later
|
||||
old_named_parameters = get_named_parameters(model, drop_refs=True)
|
||||
|
||||
# We replace the parameters of the optimizer with empty tensors, so that `fully_shard` can trigger a new allocation
|
||||
# We also change the `data_ptr` of the parameters to the original ones, so we can swap back the parameters later
|
||||
replace_optimizer_params(optimizer)
|
||||
|
||||
for module in model.modules():
|
||||
if isinstance(module, Qwen2DecoderLayer):
|
||||
fully_shard(module, mp_policy=mp_policy)
|
||||
fully_shard(model, mp_policy=mp_policy)
|
||||
|
||||
# We do this to imitate how accelerate forces outputs to be in fp32 via `convert_outputs_to_fp32`
|
||||
autocast_context = torch.autocast(device_type=accelerator.state.device.type, dtype=torch.bfloat16)
|
||||
model_forward_func = model.forward.__func__
|
||||
new_forward = autocast_context(model_forward_func)
|
||||
model.forward = MethodType(new_forward, model)
|
||||
model.forward = MethodType(convert_outputs_to_fp32(model.forward.__func__), model)
|
||||
|
||||
if post_shard_optimizer:
|
||||
optimizer = AdamW(model.parameters(), lr=config["learning_rate"])
|
||||
|
||||
if not post_shard_optimizer and apply_optimizer_fix:
|
||||
# We swap back the parameters of the optimizer to the original ones
|
||||
swap_back_optimizer_params(model, optimizer, old_named_parameters)
|
||||
|
||||
return model, optimizer, train_dataloader, accelerator, memory_tracker
|
||||
|
||||
|
||||
def prepare_accelerate(
|
||||
args, config: dict
|
||||
) -> tuple[torch.nn.Module, torch.optim.Optimizer, torch.utils.data.DataLoader, Accelerator]:
|
||||
if is_initialized():
|
||||
AcceleratorState()._reset_state(True)
|
||||
|
||||
fsdp_plugin = FullyShardedDataParallelPlugin(
|
||||
fsdp_version=2,
|
||||
auto_wrap_policy="transformer_based_wrap",
|
||||
transformer_cls_names_to_wrap=["Qwen2DecoderLayer"],
|
||||
)
|
||||
accelerator = Accelerator(
|
||||
fsdp_plugin=fsdp_plugin,
|
||||
mixed_precision="bf16",
|
||||
)
|
||||
set_seed(SEED)
|
||||
|
||||
tokenizer = get_tokenizer(config["model_name"])
|
||||
train_dataloader = prepare_dataloader(tokenizer, args, accelerator)
|
||||
|
||||
memory_tracker = MemoryTracker(accelerator.device, args.output_dir, "accelerate", args.save_memory_snapshot)
|
||||
memory_tracker.start()
|
||||
|
||||
model = get_model(config["model_name"])
|
||||
optimizer = AdamW(model.parameters(), lr=config["learning_rate"])
|
||||
|
||||
model, optimizer = accelerator.prepare(model, optimizer)
|
||||
|
||||
return model, optimizer, train_dataloader, accelerator, memory_tracker
|
||||
@ -1,114 +0,0 @@
|
||||
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import argparse
|
||||
import json
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--dir", type=str, help="Directory containing the memory usage data")
|
||||
parser.add_argument(
|
||||
"--memory_threshold",
|
||||
type=int,
|
||||
default=0,
|
||||
help="Memory threshold to filter data that is below this value (only filters 1st `--filter_partition` of the points which should roughtly correspond to the model loading)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--filter_partition",
|
||||
type=float,
|
||||
default=1 / 3,
|
||||
help="Partition to drop data from that are below the memory threshold",
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def filter_data(data, memory_threshold, filter_partition, key):
|
||||
timestamps = data["timestamps"]
|
||||
memory = data[key]
|
||||
|
||||
mid_point = int(len(timestamps) * filter_partition)
|
||||
filtered_times = []
|
||||
filtered_memory = []
|
||||
for i, (t, m) in enumerate(zip(timestamps, memory)):
|
||||
if i < mid_point and m < memory_threshold:
|
||||
continue
|
||||
filtered_times.append(t)
|
||||
filtered_memory.append(m)
|
||||
return filtered_times, filtered_memory
|
||||
|
||||
|
||||
def compare_memory_usage(data, labels, memory_threshold, filter_partition):
|
||||
plt.style.use("seaborn-v0_8")
|
||||
colors = ["#2ecc71", "#e74c3c", "#3498db", "#f1c40f"]
|
||||
|
||||
fig1, ax1 = plt.subplots(figsize=(15, 5))
|
||||
for data_item, label, color in zip(data, labels, colors):
|
||||
timestamps, allocated = filter_data(data_item, memory_threshold, filter_partition, "allocated_memory")
|
||||
ax1.plot(timestamps, allocated, label=label, color=color, linewidth=2)
|
||||
|
||||
ax1.set_xlabel("Time (s)", fontsize=12)
|
||||
ax1.set_ylabel("Allocated Memory (GB)", fontsize=12)
|
||||
ax1.set_title("Allocated Memory Usage Over Time", fontsize=14, pad=15)
|
||||
ax1.grid(True, linestyle="--", alpha=0.7)
|
||||
ax1.legend(frameon=True, fancybox=True, shadow=True, fontsize=10)
|
||||
ax1.spines["top"].set_visible(False)
|
||||
ax1.spines["right"].set_visible(False)
|
||||
plt.tight_layout()
|
||||
|
||||
fig2, ax2 = plt.subplots(figsize=(15, 5))
|
||||
for data_item, label, color in zip(data, labels, colors):
|
||||
timestamps, reserved = filter_data(data_item, memory_threshold, filter_partition, "reserved_memory")
|
||||
ax2.plot(timestamps, reserved, label=label, color=color, linewidth=2)
|
||||
|
||||
ax2.set_xlabel("Time (s)", fontsize=12)
|
||||
ax2.set_ylabel("Reserved Memory (GB)", fontsize=12)
|
||||
ax2.set_title("Reserved Memory Usage Over Time", fontsize=14, pad=15)
|
||||
ax2.grid(True, linestyle="--", alpha=0.7)
|
||||
ax2.legend(frameon=True, fancybox=True, shadow=True, fontsize=10)
|
||||
ax2.spines["top"].set_visible(False)
|
||||
ax2.spines["right"].set_visible(False)
|
||||
plt.tight_layout()
|
||||
|
||||
return fig1, fig2
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = parse_args()
|
||||
DIR = args.dir
|
||||
with open(f"{DIR}/torch_optimizer_before_fsdp_not_fixed_memory_usage.json") as f:
|
||||
optimizer_before_fsdp_not_fixed = json.load(f)
|
||||
|
||||
with open(f"{DIR}/torch_optimizer_after_fsdp_memory_usage.json") as f:
|
||||
optimizer_after_fsdp = json.load(f)
|
||||
|
||||
with open(f"{DIR}/torch_optimizer_before_fsdp_fixed_memory_usage.json") as f:
|
||||
optimizer_before_fsdp_fixed = json.load(f)
|
||||
|
||||
with open(f"{DIR}/accelerate_memory_usage.json") as f:
|
||||
accelerate = json.load(f)
|
||||
|
||||
data = [optimizer_before_fsdp_not_fixed, optimizer_before_fsdp_fixed, optimizer_after_fsdp, accelerate]
|
||||
labels = [
|
||||
"Optimizer Before FSDP (w/o fix)",
|
||||
"Optimizer Before FSDP (w/ fix)",
|
||||
"Optimizer After FSDP",
|
||||
"Accelerate",
|
||||
]
|
||||
|
||||
fig1, fig2 = compare_memory_usage(data, labels, args.memory_threshold, args.filter_partition)
|
||||
fig1.savefig(f"{DIR}/allocated_memory.png")
|
||||
fig2.savefig(f"{DIR}/reserved_memory.png")
|
||||
@ -1,111 +0,0 @@
|
||||
# Regional Compilation Benchmark
|
||||
|
||||
This benchmark compares different compilation strategies using PyTorch's `torch.compile` and Accelerate's `compile_regions` utility, which is based on the recipe in [PyTorch documentation](https://pytorch.org/tutorials/recipes/regional_compilation.html).
|
||||
|
||||
## Overview
|
||||
|
||||
The benchmark evaluates three approaches:
|
||||
|
||||
- **Baseline**: No compilation, standard PyTorch eager execution.
|
||||
- **Full compilation**: Using PyTorch's `torch.compile()` on the entire model.
|
||||
- **Regional compilation**: Using `accelerate.utils.compile_regions()` which targets specific blocks of the model to optimize compilation time.
|
||||
|
||||
Each approach is tested with different batch sizes (1 and 4) and sequence lengths (128) on various LLaMA-based models ranging from 1B to 13B parameters. We purposefully run the forward pass outside of the `torch.no_grad()` context to simulate performance in a training environment, where gradients are needed.
|
||||
|
||||
## Usage
|
||||
|
||||
To run this benchmark:
|
||||
|
||||
```bash
|
||||
python regional_compilation.py
|
||||
```
|
||||
|
||||
The script will automatically download the model configurations, create models, and benchmark both compilation and inference times across different scenarios.
|
||||
|
||||
## Requirements
|
||||
|
||||
- Suitable GPU memory for the models being tested.
|
||||
- PyTorch with CUDA support.
|
||||
- Transformers library.
|
||||
- Accelerate library.
|
||||
|
||||
## Results
|
||||
|
||||
The benchmark results are summarized in the following figures:
|
||||
|
||||
- Compilation time is how long it takes to run the first forward pass.
|
||||
- Speedup factor is the ratio of non-compiled baseline inference time to the fully/regionally compiled inference time.
|
||||
|
||||
<p align="center">
|
||||
<img src="imgs/compilation_time.png" width="80%" alt="Compilation Time">
|
||||
</p>
|
||||
<p align="center">
|
||||
<img src="imgs/speedup_factor.png" width="80%" alt="Speedup Factor">
|
||||
</p>
|
||||
|
||||
Full results are available in the tables below:
|
||||
|
||||
```markdown
|
||||
[-------------------------------------------------- NousResearch/Llama-3.2-1B ---------------------------------------------------]
|
||||
| Inference time (1x128) | Inference time (4x128) | Compile time (1x128) | Compile time (4x128)
|
||||
1 threads: -----------------------------------------------------------------------------------------------------------------------
|
||||
Baseline | 18.3 | 18.4 | |
|
||||
Full compilation | 6.3 | 10.0 | 10696.4 | 10248.0
|
||||
Regional compilation | 9.7 | 10.0 | 1952.7 | 2903.9
|
||||
|
||||
Times are in milliseconds (ms).
|
||||
|
||||
[---------------------------------------------- NousResearch/Hermes-3-Llama-3.2-3B ----------------------------------------------]
|
||||
| Inference time (1x128) | Inference time (4x128) | Compile time (1x128) | Compile time (4x128)
|
||||
1 threads: -----------------------------------------------------------------------------------------------------------------------
|
||||
Baseline | 33.4 | 33.6 | |
|
||||
Full compilation | 11.2 | 23.9 | 17857.5 | 17736.5
|
||||
Regional compilation | 17.3 | 23.7 | 2993.2 | 2478.8
|
||||
|
||||
Times are in milliseconds (ms).
|
||||
|
||||
[---------------------------------------------- NousResearch/Hermes-3-Llama-3.1-8B ----------------------------------------------]
|
||||
| Inference time (1x128) | Inference time (4x128) | Compile time (1x128) | Compile time (4x128)
|
||||
1 threads: -----------------------------------------------------------------------------------------------------------------------
|
||||
Baseline | 40.3 | 59.5 | |
|
||||
Full compilation | 18.9 | 54.4 | 20437.8 | 20152.3
|
||||
Regional compilation | 19.7 | 54.0 | 2903.1 | 2438.0
|
||||
|
||||
Times are in milliseconds (ms).
|
||||
|
||||
[--------------------------------------------- NousResearch/Nous-Hermes-Llama2-13b ----------------------------------------------]
|
||||
| Inference time (1x128) | Inference time (4x128) | Compile time (1x128) | Compile time (4x128)
|
||||
1 threads: -----------------------------------------------------------------------------------------------------------------------
|
||||
Baseline | 45.5 | 100.4 | |
|
||||
Full compilation | 29.4 | 89.7 | 23099.4 | 22885.9
|
||||
Regional compilation | 29.4 | 87.5 | 2945.5 | 2526.2
|
||||
|
||||
Times are in milliseconds (ms).
|
||||
```
|
||||
|
||||
## Results Summary
|
||||
|
||||
### Compilation Time
|
||||
|
||||
Regional compilation provides significantly faster compilation times compared to full model compilation:
|
||||
|
||||
- **Full compilation**: Takes ~10-23 seconds depending on model size.
|
||||
- **Regional compilation**: Takes only ~2-3 seconds across all model sizes.
|
||||
- **Speed improvement**: Regional compilation is **5-9x faster** to compile.
|
||||
|
||||
### Inference Time
|
||||
|
||||
Regional compilation delivers inference performance close to full compilation:
|
||||
|
||||
- For batch size 1:
|
||||
- For smaller models (1B-3B): Full compilation has a slight edge over regional compilation.
|
||||
- For larger models (8B-13B): Regional compilation performs similarly to full compilation.
|
||||
- For batch size 4: Regional compilation performs similarly to full compilation across all models.
|
||||
|
||||
## Key Takeaways
|
||||
|
||||
1. **Comparable Performance**: Regional compilation delivers performance speedups similar to full compilation, especially for larger models.
|
||||
2. **Faster Compilation**: Regional compilation significantly reduces the time taken to compile models, making it a more efficient choice for deployment.
|
||||
3. **Batch Size Impact**: At batch size 4, full compilation and regional compilation perform nearly identically.
|
||||
4. **Model Size Impact**: Even with a small batch size, full compilation and regional compilation perform similarly for larger models (8B-13B).
|
||||
5. **Practical Application**: For real-world applications, regional compilation is a practical choice for optimizing training cold start times, especially when working with large models.
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 242 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 218 KiB |
@ -1,77 +0,0 @@
|
||||
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import torch
|
||||
from torch.utils.benchmark import Compare, Timer
|
||||
from transformers import AutoConfig, AutoModelForCausalLM
|
||||
|
||||
from accelerate.test_utils.testing import get_backend
|
||||
from accelerate.utils import compile_regions
|
||||
|
||||
|
||||
torch.set_float32_matmul_precision("high")
|
||||
|
||||
COMPILE_ITERS = 2
|
||||
INFERENCE_ITERS = 100
|
||||
|
||||
BASELINE = "Baseline"
|
||||
COMPILE_TIME = "Compile time"
|
||||
INFRENCE_TIME = "Inference time"
|
||||
FULL_COMPILATION = "Full compilation"
|
||||
REGIONAL_COMPILATION = "Regional compilation"
|
||||
|
||||
INFRENCE_STMT = "model(input_ids, use_cache=False)"
|
||||
COMPILE_STMT = f"torch._dynamo.reset(); torch._inductor.utils.clear_inductor_caches(); {INFRENCE_STMT}"
|
||||
|
||||
torch_device_type, _, _ = get_backend()
|
||||
|
||||
results = []
|
||||
for model_id in [
|
||||
# non-gated llama models
|
||||
"NousResearch/Llama-3.2-1B",
|
||||
"NousResearch/Hermes-3-Llama-3.2-3B",
|
||||
"NousResearch/Hermes-3-Llama-3.1-8B",
|
||||
"NousResearch/Nous-Hermes-Llama2-13b",
|
||||
]:
|
||||
with torch.device(torch_device_type):
|
||||
config = AutoConfig.from_pretrained(model_id)
|
||||
model = AutoModelForCausalLM.from_config(config).to(dtype=torch.float16).eval()
|
||||
|
||||
full_compilation_model = torch.compile(model)
|
||||
regional_compilation_model = compile_regions(model)
|
||||
|
||||
for model, sub_label, description, stmt, iters in [
|
||||
(model, BASELINE, INFRENCE_TIME, INFRENCE_STMT, INFERENCE_ITERS),
|
||||
(full_compilation_model, FULL_COMPILATION, COMPILE_TIME, COMPILE_STMT, COMPILE_ITERS),
|
||||
(full_compilation_model, FULL_COMPILATION, INFRENCE_TIME, INFRENCE_STMT, INFERENCE_ITERS),
|
||||
(regional_compilation_model, REGIONAL_COMPILATION, COMPILE_TIME, COMPILE_STMT, COMPILE_ITERS),
|
||||
(regional_compilation_model, REGIONAL_COMPILATION, INFRENCE_TIME, INFRENCE_STMT, INFERENCE_ITERS),
|
||||
]:
|
||||
for batch_size, sequence_length in [(1, 128), (4, 128)]:
|
||||
input_ids = torch.randint(
|
||||
0, 1000, size=(batch_size, sequence_length), dtype=torch.int64, device=torch_device_type
|
||||
)
|
||||
results.append(
|
||||
Timer(
|
||||
label=model_id,
|
||||
sub_label=sub_label,
|
||||
description=f"{description} ({batch_size}x{sequence_length})",
|
||||
globals={"model": model, "input_ids": input_ids},
|
||||
stmt=stmt,
|
||||
).timeit(number=iters)
|
||||
)
|
||||
|
||||
compare = Compare(results)
|
||||
compare.colorize()
|
||||
compare.print()
|
||||
@ -1,7 +1,7 @@
|
||||
# Builds CPU-only Docker image of PyTorch
|
||||
# Uses multi-staged approach to reduce size
|
||||
# Stage 1
|
||||
FROM python:3.9-slim as compile-image
|
||||
FROM python:3.8-slim as compile-image
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
@ -25,7 +25,7 @@ RUN python3 -m pip install --no-cache-dir \
|
||||
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||
|
||||
# Stage 2
|
||||
FROM python:3.9-slim AS build-image
|
||||
FROM python:3.8-slim AS build-image
|
||||
COPY --from=compile-image /opt/venv /opt/venv
|
||||
RUN useradd -ms /bin/bash user
|
||||
USER user
|
||||
|
||||
@ -25,12 +25,12 @@ RUN source activate accelerate && conda install -c conda-forge mpi4py
|
||||
RUN source activate accelerate && \
|
||||
python3 -m pip install --no-cache-dir \
|
||||
git+https://github.com/huggingface/accelerate#egg=accelerate[testing,test_trackers,deepspeed] \
|
||||
--extra-index-url https://download.pytorch.org/whl/cu126
|
||||
--extra-index-url https://download.pytorch.org/whl/cu117
|
||||
|
||||
RUN python3 -m pip install --no-cache-dir bitsandbytes
|
||||
|
||||
# Stage 2
|
||||
FROM nvidia/cuda:12.6.3-cudnn-devel-ubuntu22.04 AS build-image
|
||||
FROM nvidia/cuda:12.1.0-cudnn8-devel-ubuntu20.04 AS build-image
|
||||
COPY --from=compile-image /opt/conda /opt/conda
|
||||
ENV PATH /opt/conda/bin:$PATH
|
||||
|
||||
|
||||
@ -24,12 +24,12 @@ RUN source activate accelerate && conda install -c conda-forge mpi4py
|
||||
RUN source activate accelerate && \
|
||||
python3 -m pip install --no-cache-dir \
|
||||
git+https://github.com/huggingface/accelerate#egg=accelerate[testing,test_trackers] \
|
||||
--extra-index-url https://download.pytorch.org/whl/cu126
|
||||
--extra-index-url https://download.pytorch.org/whl/cu117
|
||||
|
||||
RUN python3 -m pip install --no-cache-dir bitsandbytes
|
||||
|
||||
# Stage 2
|
||||
FROM nvidia/cuda:12.6.3-cudnn-devel-ubuntu22.04 AS build-image
|
||||
FROM nvidia/cuda:12.1.0-cudnn8-devel-ubuntu20.04 AS build-image
|
||||
COPY --from=compile-image /opt/conda /opt/conda
|
||||
ENV PATH /opt/conda/bin:$PATH
|
||||
|
||||
|
||||
@ -64,10 +64,6 @@
|
||||
title: Apple M1 GPUs
|
||||
- local: usage_guides/ipex
|
||||
title: IPEX training with CPU
|
||||
- local: usage_guides/gaudi
|
||||
title: Intel Gaudi
|
||||
- local: usage_guides/compilation
|
||||
title: Compilation
|
||||
title: Training
|
||||
- isExpanded: true
|
||||
sections:
|
||||
@ -82,8 +78,6 @@
|
||||
title: Accelerate's internal mechanism
|
||||
- local: concept_guides/big_model_inference
|
||||
title: Loading big models into memory
|
||||
- local: concept_guides/context_parallel
|
||||
title: Context parallelism
|
||||
- local: concept_guides/performance
|
||||
title: Comparing performance across distributed setups
|
||||
- local: concept_guides/deferring_execution
|
||||
@ -92,14 +86,12 @@
|
||||
title: Gradient synchronization
|
||||
- local: concept_guides/fsdp_and_deepspeed
|
||||
title: FSDP vs DeepSpeed
|
||||
- local: concept_guides/fsdp1_vs_fsdp2
|
||||
title: FSDP1 vs FSDP2
|
||||
- local: concept_guides/low_precision_training
|
||||
title: Low precision training methods
|
||||
- local: concept_guides/training_tpu
|
||||
title: Training on TPUs
|
||||
title: Concepts and fundamentals
|
||||
- sections:
|
||||
- sections:
|
||||
- local: package_reference/accelerator
|
||||
title: Accelerator
|
||||
- local: package_reference/state
|
||||
|
||||
@ -79,36 +79,23 @@ accelerate env
|
||||
|
||||
An example output is shown below, which describes two GPUs on a single machine with no mixed precision being used:
|
||||
|
||||
|
||||
```bash
|
||||
- `Accelerate` version: 1.2.0.dev0
|
||||
- Platform: Linux-6.8.0-47-generic-x86_64-with-glibc2.35
|
||||
- `accelerate` bash location: /home/zach/miniconda3/envs/accelerate/bin/accelerate
|
||||
- Python version: 3.10.13
|
||||
- Numpy version: 1.26.4
|
||||
- PyTorch version (GPU?): 2.5.1+cu124 (True)
|
||||
- PyTorch XPU available: False
|
||||
- PyTorch NPU available: False
|
||||
- PyTorch MLU available: False
|
||||
- PyTorch MUSA available: False
|
||||
- System RAM: 187.91 GB
|
||||
- GPU type: NVIDIA GeForce RTX 4090
|
||||
- `Accelerate` version: 0.11.0.dev0
|
||||
- Platform: Linux-5.10.0-15-cloud-amd64-x86_64-with-debian-11.3
|
||||
- Python version: 3.7.12
|
||||
- Numpy version: 1.19.5
|
||||
- PyTorch version (GPU?): 1.12.0+cu102 (True)
|
||||
- `Accelerate` default config:
|
||||
- compute_environment: LOCAL_MACHINE
|
||||
- distributed_type: MULTI_GPU
|
||||
- mixed_precision: no
|
||||
- use_cpu: False
|
||||
- debug: False
|
||||
- num_processes: 2
|
||||
- machine_rank: 0
|
||||
- num_machines: 1
|
||||
- gpu_ids: all
|
||||
- rdzv_backend: static
|
||||
- same_network: True
|
||||
- main_process_ip: None
|
||||
- main_process_port: None
|
||||
- main_training_function: main
|
||||
- enable_cpu_affinity: False
|
||||
- downcast_bf16: no
|
||||
- tpu_use_cluster: False
|
||||
- tpu_use_sudo: False
|
||||
- tpu_env: []
|
||||
- deepspeed_config: {}
|
||||
- fsdp_config: {}
|
||||
```
|
||||
|
||||
@ -97,10 +97,7 @@ Since this runs the various torch spawn methods, all of the expected environment
|
||||
For example, here is how to use `accelerate launch` with a single GPU:
|
||||
|
||||
```bash
|
||||
# for cuda device:
|
||||
CUDA_VISIBLE_DEVICES="0" accelerate launch {script_name.py} --arg1 --arg2 ...
|
||||
# for xpu device:
|
||||
ZE_AFFINITY_MASK="0" accelerate launch {script_name.py} --arg1 --arg2 ...
|
||||
```
|
||||
|
||||
You can also use `accelerate launch` without performing `accelerate config` first, but you may need to manually pass in the right configuration parameters.
|
||||
@ -139,7 +136,7 @@ accelerate launch -h
|
||||
For a visualization of this difference, that earlier `accelerate launch` on multi-gpu would look something like so with `torchrun`:
|
||||
|
||||
```bash
|
||||
MIXED_PRECISION="fp16" torchrun --nproc_per_node=2 --nnodes=1 {script_name.py} {--arg1} {--arg2} ...
|
||||
MIXED_PRECISION="fp16" torchrun --nproc_per_node=2 --num_machines=1 {script_name.py} {--arg1} {--arg2} ...
|
||||
```
|
||||
|
||||
You can also launch your script utilizing the launch CLI as a python module itself, enabling the ability to pass in other python-specific
|
||||
|
||||
@ -145,7 +145,7 @@ Set the mixed precision type to use in the [`Accelerator`], and then use the [`~
|
||||
```diff
|
||||
+ accelerator = Accelerator(mixed_precision="fp16")
|
||||
+ with accelerator.autocast():
|
||||
loss = complex_loss_function(outputs, target)
|
||||
loss = complex_loss_function(outputs, target):
|
||||
```
|
||||
|
||||
## Save and load
|
||||
|
||||
@ -26,7 +26,7 @@ You will also learn how to setup a few requirements needed for ensuring your env
|
||||
|
||||
## Configuring the Environment
|
||||
|
||||
Before any training can be performed, an Accelerate config file must exist in the system. Usually this can be done by running the following in a terminal and answering the prompts:
|
||||
Before any training can be performed, a Accelerate config file must exist in the system. Usually this can be done by running the following in a terminal and answering the prompts:
|
||||
|
||||
```bash
|
||||
accelerate config
|
||||
@ -52,7 +52,7 @@ os._exit(00) # Restart the notebook
|
||||
|
||||
## Preparing the Dataset and Model
|
||||
|
||||
Next you should prepare your dataset. As mentioned earlier, great care should be taken when preparing the `DataLoaders` and model to make sure that **nothing** is put on *any* GPU.
|
||||
Next you should prepare your dataset. As mentioned at earlier, great care should be taken when preparing the `DataLoaders` and model to make sure that **nothing** is put on *any* GPU.
|
||||
|
||||
If you do, it is recommended to put that specific code into a function and call that from within the notebook launcher interface, which will be shown later.
|
||||
|
||||
@ -327,7 +327,7 @@ def training_loop(mixed_precision="fp16", seed: int = 42, batch_size: int = 64):
|
||||
# Build dataloaders
|
||||
train_dataloader, eval_dataloader = get_dataloaders(batch_size)
|
||||
|
||||
# Instantiate the model (you build the model here so that the seed also controls new weight initializations)
|
||||
# Instantiate the model (you build the model here so that the seed also controls new weight initaliziations)
|
||||
model = create_model("resnet50d", pretrained=True, num_classes=len(label_to_id))
|
||||
|
||||
# Freeze the base model
|
||||
|
||||
@ -111,17 +111,17 @@ Input shapes:
|
||||
|
||||
For early stopping in distributed training, if each process has a specific stopping condition (e.g. validation loss), it may not be synchronized across all processes. As a result, a break can happen on process 0 but not on process 1 which will cause your code to hang indefinitely until a timeout occurs.
|
||||
|
||||
If you have early stopping conditionals, use the `set_trigger` and `check_trigger` methods to make sure all the processes
|
||||
If you have early stopping conditionals, use the `set_breakpoint` and `check_breakpoint` methods to make sure all the processes
|
||||
are ended correctly.
|
||||
|
||||
```py
|
||||
# Assume `should_do_breakpoint` is a custom defined function that returns a conditional,
|
||||
# and that conditional might be true only on process 1
|
||||
if should_do_breakpoint(loss):
|
||||
accelerator.set_trigger()
|
||||
accelerator.set_breakpoint()
|
||||
|
||||
# Later in the training script when we need to check for the breakpoint
|
||||
if accelerator.check_trigger():
|
||||
if accelerator.check_breakpoint():
|
||||
break
|
||||
```
|
||||
|
||||
@ -142,9 +142,9 @@ hostnames for each of the nodes.
|
||||
mpirun -f hostfile -n {number of nodes} -ppn 1 hostname
|
||||
```
|
||||
|
||||
## Out-of-Memory
|
||||
## CUDA Out-of-Memory
|
||||
|
||||
One of the most frustrating errors when it comes to running training scripts is hitting "Out-of-Memory" on devices like CUDA, XPU or CPU. The entire script needs to be restarted and any progress is lost.
|
||||
One of the most frustrating errors when it comes to running training scripts is hitting "CUDA Out-of-Memory". The entire script needs to be restarted and any progress is lost.
|
||||
|
||||
To address this problem, Accelerate provides the [`find_executable_batch_size`] utility that is heavily based on [toma](https://github.com/BlackHC/toma).
|
||||
This utility retries code that fails due to OOM (out-of-memory) conditions and automatically lowers batch sizes. For each OOM condition, the algorithm decreases the batch size by half and retries the code until it succeeds.
|
||||
@ -153,7 +153,7 @@ To use [`find_executable_batch_size`], restructure your training function to inc
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
The inner function **must** take batch size as the first parameter, but we do not pass one to it when called. The wrapper will handle this for you. Any object (models, optimizers) that consumes device memory and is passed to the [`Accelerator`] also **must** be declared inside the inner function.
|
||||
The inner function **must** take batch size as the first parameter, but we do not pass one to it when called. The wrapper will handles this for you. Any object (models, optimizers) that consumes CUDA memory and is passed to the [`Accelerator`] also **must** be declared inside the inner function.
|
||||
|
||||
</Tip>
|
||||
|
||||
|
||||
@ -1,156 +0,0 @@
|
||||
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Context Parallel in 🤗`accelerate`
|
||||
|
||||
This guide will cover basics of using context parallelism in 🤗`accelerate`, for the more curious readers, we will also cover some technicalities in the later sections.
|
||||
|
||||
## Why context parallelism?
|
||||
|
||||
With the advent of large language models, and recently reasoning models, the sequence length has been growing rapidly. This, combined with quadratic memory complexity of attention, has lead to a need for more efficient ways to train models with long sequences.
|
||||
With sequence length of 128k, the memory requirement of the attention matrix is `128k * 128k * 2 bytes * num_heads = ~32 GB * num_heads` for `bf16` precision, given vanilla attention implementation. Granted, with usage of `flash attention` or `SDPA` which do not materialize these attention weights, this decreases drastically, but the growth in memory requirements is still considerable.
|
||||
|
||||
Context parallelism allows us to shard the inputs to the attention computation along the sequence dimension and compute the attention in parallel on multiple GPUs. With this, we can train models with long sequences, scaling potentially to 1M+ sequence length.
|
||||
|
||||
|
||||
## How to use context parallelism?
|
||||
|
||||
As with any other feature in 🤗`accelerate`, enabling context parallelism is as simple as passing the corresponding flags to `accelerate launch`.
|
||||
In this case, it's no different:
|
||||
|
||||
```bash
|
||||
accelerate launch --context-parallel-size 8 --context-parallel-shard-rotation [allgather|alltoall] ...
|
||||
```
|
||||
|
||||
Context parallelism is tightly coupled (for now) with `FSDP2`, which you can learn more about in the [FSDP2 introduction](fsdp1_vs_fsdp2.md). Meaning, context parallelism is applied only if `FSDP2` is enabled.
|
||||
You can also enable context parallelism programatically, by passing it in the `FullyShardedDataParallelPlugin` constructor:
|
||||
|
||||
```diff
|
||||
from accelerate.utils import FullyShardedDataParallelPlugin
|
||||
|
||||
plugin = FullyShardedDataParallelPlugin(
|
||||
...
|
||||
fsdp_version=2,
|
||||
+ cp_size=8,
|
||||
+ cp_comm_strategy="allgather",
|
||||
)
|
||||
accelerator = Accelerator(fsdp_plugin=plugin)
|
||||
```
|
||||
|
||||
After enabling context parallelism with the methods mentioned above, you can then apply it to your training loop. We provide a thin wrapper around [`torch.distributed.tensor.experimental.context_parallel`](https://docs.pytorch.org/docs/stable/distributed.tensor.html#torch.distributed.tensor.experimental.context_parallel) that you can use in your training loop, that abstracts some of the complexity of using it (more on this later).
|
||||
You can use it as follows:
|
||||
|
||||
```python
|
||||
for batch in dataloader:
|
||||
with accelerator.context_parallel(
|
||||
buffers=[batch["input_ids"], batch["attention_mask"]],
|
||||
buffer_seq_dims=[1, 1],
|
||||
no_restore_buffers={batch["input_ids"]},
|
||||
):
|
||||
outputs = model(batch)
|
||||
...
|
||||
```
|
||||
|
||||
> [!Warning]
|
||||
> This context manager has to be recreated with each training step, as shown in the example above. It's crucial to do so.
|
||||
|
||||
This can scale your context size to 1M+ sequence length potentially. Below, we showcase speed and memory usage of context parallelism for up-to 256k context size. We can see that when we double the context size and number of GPUs, we can achieve consistent memory usage, potentiall enabling endless context length scaling.
|
||||
|
||||
<p align="center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/accelerate/examples/fsdp2/cp_perf.png" alt="context parallelism memory usage" />
|
||||
<br>
|
||||
<em>Figure 1: Memory usage and speed of context parallelism for up-to 256k context size.</em>
|
||||
</p>
|
||||
|
||||
> [!Tip]
|
||||
> These examples were created with a script you can find [in the examples folder](https://github.com/huggingface/accelerate/blob/main/examples/fsdp2/fsdp2_context_parallel.py). For instructions on how to run it, see the [README](https://github.com/huggingface/accelerate/blob/main/examples/fsdp2/README.md) in the same folder.
|
||||
|
||||
|
||||
## Accelerate's interface
|
||||
|
||||
The context manager takes a few arguments, that are used to configure the context parallelism.
|
||||
|
||||
- `buffers`: This is a list of tensors that are to be sharded across the sequence dimension. These tensors are usually input ids, labels and attention mask.
|
||||
- `buffer_seq_dims`: This is a list of integers, that specify the sequence dimension of the buffers, in the order of the `buffers` list.
|
||||
- `no_restore_buffers`: The implementation of context parallelism modifies the buffers in-place, converting them to `torch.distributed.tensor.Dtensor`s. After the context manager is exited, a communication kernel would need to be launched to restore the buffers to their original state (usually all-gather). This takes some time, so it is reccomended to pass the same arguments as to the `buffers` argument, to avoid unnecessary communication, unless you are sure that you need to use the buffers after the context manager is exited.
|
||||
|
||||
## Configurable options
|
||||
Accelerate provides only a few options to configure context parallelism, which are:
|
||||
|
||||
- `cp_size`: The number of ranks to shard the inputs to the attention computation across the sequence dimension.
|
||||
- `cp_comm_strategy`: The rotation method to use for the shards. We strongly reccomend keeping this as `"allgather"`, as it's very likely it will outperform `"alltoall"` in most cases.
|
||||
|
||||
Context parallel size is rather self-explanatory, it's the number of ranks across which the inputs are to be-sharded.
|
||||
Context parallel shard rotation defines how the shards of the inputs are rotated across ranks. We'll cover the 2 options in more detail in the next section.
|
||||
|
||||
You can see an end-to-end example in the [FSDP2 context parallel example](https://github.com/huggingface/accelerate/blob/main/examples/fsdp2/fsdp2_context_parallel.py) file, where you can train an 8B model with 128k sequence length on 8x H100 SXM GPUs. Using multi-node training, you can scale this to 1M+ sequence length on 64x H100 SXM GPUs.
|
||||
|
||||
## Technical details
|
||||
|
||||
> [!Tip]
|
||||
> This section is fairly technical, so if you don't need to learn the internals of context parallelism, you can skip it and start building 🚀
|
||||
|
||||
We're going to be using word `shard` extensively in the following sections, so let's define it first. If we call tensor `sharded` across `Dth` dimension, across `N` ranks, we mean that this tensor is split into `N` parts, where each part of the tensor has shape `[..., D//N, ...]`.
|
||||
|
||||
|
||||
## So how does it work?
|
||||
|
||||
Context parallelism works on sharding the `Q, K and V` matrices across the sequence dimension. Each rank has its assigned shard of `Q`, let's call it `Q_i`. This matrix stays only on this rank, during the whole computation. Similarly, each rank has its own shard of `K` and `V`, let's call them `K_i` and `V_i`. Then, each rank calculates attention with its own shard of `Q_i`, `K_i` and `V_i`, let's call it `attn_i`. During this computation, a communication kernel is launched to gather the `Ks` and `Vs` from all other ranks. What communication primitive is used, depends on the `context_parallel_shard_rotation` option.
|
||||
This way, each rank gets to calculate local attention, first with `Q_i`, `K_i` and `V_i`, then with `K_j` and `V_j` from all other ranks. As each rank holds `Q, K and V` matrices that are sharded across the sequence dimension, the resulting matrices are smaller and can fit on a single GPU.
|
||||
|
||||
We can formalize this in a following pseudocode:
|
||||
```python
|
||||
comm_kernel = {"allgather": allgather, "alltoall": alltoall}[context_parallel_shard_rotation]
|
||||
Qi, Ki, Vi = shard(Q, K, V, seq_dim)
|
||||
attn[i] = attn(Qi, Ki, Vi)
|
||||
for j in range(context_parallel_size):
|
||||
Kj, Vj = comm_kernel()
|
||||
attn[j] = attn(Qi, Kj, Vj) # [batch, num_heads, seq_len // context_parallel_size, head_dim]
|
||||
|
||||
final_attn = combine(attn)
|
||||
```
|
||||
|
||||
## all-to-all vs all-gather
|
||||
|
||||
### all-gather
|
||||
So what's the difference between all-to-all and all-gather? With all-gather, the communication is very simple. After (well, before, as it usually takes longer) we compute the local attention `attn_i` we launch an all-gather to gather all other `Ks` and `Vs` from all other ranks. As this communication is done, each rank has all the `Ks` and `Vs` from all other ranks, and can compute the attention with them sequentially.
|
||||
In ideal scenario, all-gather finishes in the exact moment as the calculation of `attn_i` is done. However, this never happens in practice, so the ideal real overlap is achieved when the full `attn_i` is overlapped with a part of the communication, then to start the computation with `K_j` and `V_j`, we wait for the all-gather to finish.
|
||||
|
||||
### all-to-all
|
||||
All-to-all, or sometimes called `ring-rotation` utilizes a ring-like communication pattern. After concluding `attn_i` computation, an all-to-all is launched to send `K_i` and `V_i` to the neighbouring ranks. We then repeat this `context_parallel_size-1` times, so that each rank sees all the shards of `K` and `V` from all other ranks once. In ideal scenario, we prefetch shards `K_i+1` and `V_i+1` from the neighbouring rank and this communication is exactly overlapped with computation of our current `attn_i`. Again, realistically, this perfect overlap doesn't ever happen. Given the nature of this approach, if we don't achieve perfect overlap, the penalty is way larger than with all-gather.
|
||||
|
||||
## How to choose the right rotation method?
|
||||
In theory, all-to-all should be the better choice. Though in practice, it rarely is. Therefore, we default to all-gather, as it's more likely to achieve better performance. Extensive [benchmarks](https://discuss.pytorch.org/t/distributed-w-torchtitan-breaking-barriers-training-long-context-llms-with-1m-sequence-length-in-pytorch-using-context-parallel/215082) from the `torchtitan` team also shows that all-to-all rarely outperforms all-gather. Though, we still provide both options, as you might find one to be better for your use case.
|
||||
|
||||
You can directly see this issue in the profiler output in the image below:
|
||||
<p align="center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/accelerate/examples/fsdp2/cp_all_to_all.png" alt="all-to-all profiler output" />
|
||||
<br>
|
||||
<em>Figure 1: In red you can see the idle time, while we wait for the all-to-all kernel to finish. Highlighted in the first blue bar, you can see that it takes ~250us to finish, which is repeated N-1 times for each attention call, where N is the context parallel size.</em>
|
||||
</p>
|
||||
|
||||
|
||||
## Why only FSDP2?
|
||||
|
||||
We only support context parallelism with `FSDP2` for now, as we create a joint mesh of `context_parallel_size` and `dp_shard_size` to
|
||||
utilize its full potential. In the profiler output in the image below, you can see why this is the case.
|
||||
|
||||
<p align="center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/accelerate/examples/fsdp2/cp_why_fsdp2.png" alt="why FSDP2+CP" />
|
||||
<br>
|
||||
<em>Figure 2: In blue rectangles (Stream 23), you can see that the pre-fetch of `FSDP` shard is fully overlapped with the computation of attention (Stream 7), while in red rectangles (Stream 24), you can see that the all-gather kernel results in a bubble of idle time, in which our compute stream (7) is idle.</em>
|
||||
</p>
|
||||
|
||||
In the figure above, you can also note the difference between all-to-all and all-gather. While in all-to-all (Figure 1), we launch a communication kernel N-1 times for each attention call, in all-gather (Figure 2), we launch a communication kernel only once. This results in a bigger bubble, but it only happens once per attention call, while in all-to-all, it happens N-1 times.
|
||||
@ -13,7 +13,7 @@ specific language governing permissions and limitations under the License.
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Executing and deferring jobs
|
||||
# DExecuting and deferring jobs
|
||||
|
||||
When you run your usual script, instructions are executed in order. Using Accelerate to deploy your script on several
|
||||
GPUs at the same time introduces a complication: while each process executes all instructions in order, some may be
|
||||
@ -127,4 +127,4 @@ for (x,y) in data_loader:
|
||||
# Later in the training script when we need to check for the breakpoint
|
||||
if accelerator.check_trigger():
|
||||
break
|
||||
```
|
||||
```
|
||||
@ -1,105 +0,0 @@
|
||||
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# FSDP1 vs FSDP2
|
||||
|
||||
This guide explains the key differences between `FSDP1` and `FSDP2` and helps you migrate your existing code to use `FSDP2` with minimal changes.
|
||||
|
||||
## How is FSDP2 better than FSDP1?
|
||||
|
||||
First, we want to understand how `FSDP1` and `FSDP2` work internally to understand the differences between them. This also helps us understand the limitations of `FSDP1` and how `FSDP2` solves them.
|
||||
|
||||
We'll be discussing a scenario where we have a single `Layer` that contains 3 `Linear` layers and is wrapped using `FSDP` to be sharded across 2 GPUs.
|
||||
|
||||
<div align="center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/accelerate/layer.png" alt="Layer">
|
||||
</div>
|
||||
|
||||
### FSDP1
|
||||
First, we have to understand the original `FSDP1` and the limitations it brings. It represents each `FSDP` module as a single `FlatParameter` which is a single 1D tensor that contains all of the module parameters, which then get sharded across ranks. I.e. if you wrap the `Layer` with `FSDP1`, you'd achieve something as such:
|
||||
|
||||
<div align="center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/accelerate/fsdp1.png" alt="FSDP1">
|
||||
</div>
|
||||
|
||||
You might notice a problem. The whole `Layer` gets flattened into a single `FlatParameter`, which then gets sharded across ranks. But if it's a single `FlatParameter` object, how do we store metadata? That is one of the limitations. Properly storing per-parameter metadata such as `dtype`, `requires_grad`, etc. is not possible without some ugly hacks.
|
||||
|
||||
### FSDP2
|
||||
This is why `FSDP2` was introduced. It doesn't use `FlatParameter`, instead it uses `DTensor` which is short for "Distributed Tensor". Each `DTensor` basically represents a vanilla `torch.Tensor` that has been sharded across ranks. It contains metadata about the original `torch.Tensor` and how it's sharded, what is the [placement type](https://pytorch.org/docs/stable/distributed.tensor.html#module-torch.distributed.tensor.placement_types) and so on. This is why it's called `per-parameter sharding`. The following figure shows the difference:
|
||||
|
||||
<div align="center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/accelerate/fsdp2.png" alt="FSDP2">
|
||||
</div>
|
||||
|
||||
Each Parameter of the original `Layer` is sharded across the 0th dimension, and split between 2 GPUs. Now, each `Linear` layer is a separate `DTensor` and storing metadata per-parameter is possible and straightforward.
|
||||
|
||||
|
||||
> [!TIP]
|
||||
> In the image above, the tensors were sharded across the 1st dimension for the sake of fitting the image on the screen, in reality, they are sharded across the 0th dimension as stated above
|
||||
|
||||
## What does FSDP2 offer?
|
||||
|
||||
`FSDP2` is a new and improved version of PyTorch's fully-sharded data parallel training API. Its main advantage is using `DTensor` to represent sharded parameters. Compared to `FSDP1`, it offers:
|
||||
- Simpler internal implementation, where each `Parameter` is a separate `DTensor`
|
||||
- Enables simple partial parameter freezing because of the above, which makes methods as [`LORA`](https://arxiv.org/abs/2106.09685) work out of the box
|
||||
- With `DTensor`, `FSDP2` supports mixing `fp8` and other parameter types in the same model out of the box
|
||||
- Faster and simpler checkpointing without extra communication across ranks using `SHARDED_STATE_DICT` and [`torch.distributed.checkpoint`](https://pytorch.org/docs/stable/distributed.checkpoint.html), this way, each rank only saves its own shard and corresponding metadata
|
||||
- For loading, it uses a `state_dict` of the sharded model to directly load the sharded parameters
|
||||
- Support for asynchronous checkpointing, where parameters are first copied to CPU memory, after this, main thread continues training while another thread stores the parameters on disk
|
||||
- Memory efficiency and deterministic memory usage, `FSDP2` doesn't use `recordStream` anymore and uses stream-to-stream synchronization (for more technical details see [this forum post](https://dev-discuss.pytorch.org/t/fsdp-cudacachingallocator-an-outsider-newb-perspective/1486) and [this issue](https://github.com/pytorch/pytorch/issues/114299))
|
||||
- In the future, optimizations of the communication patterns via `torch.compile` are planned, further improving the performance and memory efficiency
|
||||
|
||||
|
||||
## API Differences
|
||||
|
||||
We have already discussed the internal differences, now let's discuss the differences, you, as a user, will need to know.
|
||||
|
||||
Here are the main changes in configuration options when using `FSDP2` through the `accelerate` CLI:
|
||||
|
||||
Previous (`FSDP1`) | New (`FSDP2`) | What Changed
|
||||
-- | -- | --
|
||||
`--fsdp_sharding_strategy` | `--fsdp_reshard_after_forward` | replaces `--fsdp_sharding_strategy`, changed to `true` (previously `FULL_SHARD`) or `false` (previously `SHARD_GRAD_OP`)
|
||||
`--fsdp_backward_prefetch` | \*\***REMOVED**\*\* | `FSDP2` uses previous `BACKWARD_PRE` option by default, as only this allows communication and computation overlap
|
||||
`--fsdp_forward_prefetch` | \*\***NOT YET IMPLEMENTED**\*\* | How to implement this is under active discussion, for now it is not supported in `FSDP2`
|
||||
`--fsdp_sync_module_states` | \*\***REMOVED**\*\* | with `FSDP2`, this parameter becomes redundant
|
||||
`--fsdp_cpu_ram_efficient_loading` | `--fsdp_cpu_ram_efficient_loading` | if `true`, `FSDP2` will similarly load the model only on rank 0, and then parameters get synced to other ranks, this is the same behavior as `FSDP1`, however, setting `--fsdp_sync_module_states` isn't required anymore
|
||||
`--fsdp_state_dict_type` | `--fsdp_state_dict_type` | `LOCAL_STATE_DICT` becomes obsolete and with `FSDP2` `SHARDED_STATE_DICT` is the default option, which results in no extra communication and each rank saving its own shard, other possible option is `FULL_STATE_DICT` which results in extra communication and spike in memory usage but saves the full model from rank 0.
|
||||
`--fsdp_use_orig_params` | \*\***REMOVED**\*\* | `FSDP2` uses a `DTensor` class on the background, which means it *always* uses the original parameters by default
|
||||
\*\***NEW**\*\* | `--fsdp_version` | `1` is the default option, to not break existing code, set to `2` to use `FSDP2`
|
||||
|
||||
For all other options that remain unchanged, see the [`FSDP` documentation](../usage_guides/fsdp.md).
|
||||
|
||||
## How to Switch to FSDP2
|
||||
|
||||
### If using Python code:
|
||||
Simply set `fsdp_version=2` when creating your plugin and replace options according to the table above.
|
||||
|
||||
```python
|
||||
from accelerate import FullyShardedDataParallelPlugin, Accelerator
|
||||
|
||||
fsdp_plugin = FullyShardedDataParallelPlugin(
|
||||
fsdp_version=2
|
||||
# other options...
|
||||
)
|
||||
accelerator = Accelerator(fsdp_plugin=fsdp_plugin)
|
||||
```
|
||||
|
||||
### If using YAML config:
|
||||
Use our conversion tool:
|
||||
```bash
|
||||
accelerate to-fsdp2 --config_file config.yaml --output_file new_config.yaml
|
||||
```
|
||||
|
||||
This will automatically convert all FSDP1 settings to their FSDP2 equivalents. Use `--overwrite` to update the existing file instead of creating a new one.
|
||||
@ -109,7 +109,7 @@ While FSDP require an explicit `--fsdp_cpu_ram_efficient_loading true` to activa
|
||||
<Tip>
|
||||
|
||||
For FSDP, whenever setting `--fsdp_cpu_ram_efficient_loading true`, `accelerate` will automatically set `sync_module_states` to true.
|
||||
For RAM efficient loading the weights will be loaded only in a single rank, and thus requires `sync_module_states` to broadcast weights to other ranks.
|
||||
For RAM efficient loading the weights will be loaded only in a singe rank, and thus requires `sync_module_states` to broadcast weights to other ranks.
|
||||
|
||||
</Tip>
|
||||
|
||||
@ -125,7 +125,7 @@ FSDP requires an explicit `--fsdp_auto_wrap_policy` for the algorithm to decide
|
||||
|
||||
### Parameters Summoning
|
||||
|
||||
FSDP requires an explicit `--fsdp_use_orig_params` flag if using `torch.compile`, see [the pytorch documentation](https://pytorch.org/docs/stable/fsdp.html#module-torch.distributed.fsdp). For DeepSpeed this is transparent to the user.
|
||||
FSDP requires an explicit `--fsdp_use_orig_params` flag if using `torch.compile`, see [the pytorch documenation](https://pytorch.org/docs/stable/fsdp.html#module-torch.distributed.fsdp). For DeepSpeed this is transparent to the user.
|
||||
|
||||
<Tip>
|
||||
|
||||
@ -147,7 +147,7 @@ Deepspeed requires explicit `--gradient_accumulation_steps` and `--gradient_clip
|
||||
|
||||
## On Differences in Data Precision Handling
|
||||
|
||||
To discuss how data precision is handled in both FSDP and Deepspeed, it is instructive to first give an overview of how model parameters are handled in these frameworks. Before the model / optimizer parameters are distributed across GPUs, parameter preparation is involved to first "flatten" them to one-dimensional [`torch.Tensor`](https://pytorch.org/docs/stable/tensors.html#torch-tensor). The implementation of FSDP / DeepSpeed varies in the respect of the `dtype` in which these "flattened" parameters are stored, and there are ramifications with regards to how [`torch.Optimizer`](https://pytorch.org/docs/stable/optim.html#module-torch.optim) allocate their `dtype`s. The table below outlines the processes for both frameworks; the "Local" column indicates the process occurring at a per-gpu level, therefore any memory overheads by upcasting should be understood to be amortized by the number of gpus used.
|
||||
To discuss the how data precision is handled in both FSDP and Deepspeed, it is instructive to first give an overview of how model parameters are handled in these frameworks. Before the model / optimizer parameters are distributed across GPUs, parameter preparation is involved to first "flatten" them to one-dimensional [`torch.Tensor`](https://pytorch.org/docs/stable/tensors.html#torch-tensor). The implementation of FSDP / DeepSpeed varies in the respect of the `dtype` in which these "flattened" parameters are stored, and there are ramifications with regards to how [`torch.Optimizer`](https://pytorch.org/docs/stable/optim.html#module-torch.optim) allocate their `dtype`s. The table below outlines the processes for both frameworks; the "Local" column indicates the process occurring at a per-gpu level, therefore any memory overheads by upcasting should be understood to be amortized by the number of gpus used.
|
||||
|
||||
<Tip>
|
||||
|
||||
@ -166,7 +166,7 @@ Optimizer (Actual Step) | ✅ | FSDP<br>DeepSpeed | occurs in `torch_dtype` <br
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
Therefore when using DeepSpeed a small number of GPUs, be aware of potentially significant memory overheads due to the upcasting during preparation.
|
||||
Therefore when using DeepSpeed a small number of GPUs, be aware of potentially significant memory overheads due to the upcasting during preperation.
|
||||
|
||||
</Tip>
|
||||
|
||||
|
||||
@ -71,4 +71,4 @@ setting the same seed in the main random number generator in all processes.
|
||||
|
||||
If you have [`torchdata>=0.8.0`](https://github.com/pytorch/data/tree/main) installed, and you have passed `use_stateful_dataloader=True` into your [`~utils.DataLoaderConfiguration`], these classes will directly inherit from `StatefulDataLoader` instead, and maintain a `state_dict`.
|
||||
|
||||
For more details about the internals, see the [Internals page](../package_reference/torch_wrappers).
|
||||
For more details about the internals, see the [Internals page](package_reference/torch_wrappers).
|
||||
|
||||
@ -43,13 +43,13 @@ Why is this important? Under the hood this will set **5** different seed setting
|
||||
random.seed(seed)
|
||||
np.random.seed(seed)
|
||||
torch.manual_seed(seed)
|
||||
torch.cuda.manual_seed_all(seed) # or torch.xpu.manual_seed_all, etc
|
||||
torch.cuda.manual_seed_all(seed)
|
||||
# ^^ safe to call this function even if cuda is not available
|
||||
if is_torch_xla_available():
|
||||
xm.set_rng_state(seed)
|
||||
```
|
||||
|
||||
The random state, numpy's state, torch, torch's device state, and if TPUs are available torch_xla's cuda state.
|
||||
The random state, numpy's state, torch, torch's cuda state, and if TPUs are available torch_xla's cuda state.
|
||||
|
||||
## Observed Batch Sizes
|
||||
|
||||
|
||||
@ -63,10 +63,6 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
[[autodoc]] hooks.SequentialHook
|
||||
|
||||
### LayerwiseCastingHook
|
||||
|
||||
[[autodoc]] hooks.LayerwiseCastingHook
|
||||
|
||||
## Adding Hooks
|
||||
|
||||
### add_hook_to_module
|
||||
@ -85,10 +81,6 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
[[autodoc]] hooks.attach_align_device_hook_on_blocks
|
||||
|
||||
### attach_layerwise_casting_hooks
|
||||
|
||||
[[autodoc]] big_modeling.attach_layerwise_casting_hooks
|
||||
|
||||
## Removing Hooks
|
||||
|
||||
### remove_hook_from_module
|
||||
@ -97,14 +89,4 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
### remove_hook_from_submodules
|
||||
|
||||
[[autodoc]] hooks.remove_hook_from_submodules
|
||||
|
||||
## Utilities
|
||||
|
||||
### has_offloaded_params
|
||||
|
||||
[[autodoc]] utils.has_offloaded_params
|
||||
|
||||
### align_module_device
|
||||
|
||||
[[autodoc]] utils.align_module_device
|
||||
[[autodoc]] hooks.remove_hook_from_submodules
|
||||
@ -158,13 +158,13 @@ The following arguments are useful for selecting which training paradigm to use.
|
||||
* `--use_deepspeed` (`bool`) -- Whether or not to use DeepSpeed for training.
|
||||
* `--use_fsdp` (`bool`) -- Whether or not to use FullyShardedDataParallel for training.
|
||||
* `--use_megatron_lm` (`bool`) -- Whether or not to use Megatron-LM for training.
|
||||
* `--use_xpu` (`bool`) -- Whether to use IPEX plugin to speed up training on XPU specifically. **This argument is deprecated and ignored, will be removed in Accelerate v1.20**
|
||||
* `--use_xpu` (`bool`) -- Whether to use IPEX plugin to speed up training on XPU specifically.
|
||||
|
||||
**Distributed GPU Arguments**:
|
||||
|
||||
The following arguments are only useful when `multi_gpu` is passed or multi-gpu training is configured through `accelerate config`:
|
||||
|
||||
* `--gpu_ids` (`str`) -- What GPUs (by id) should be used for training on this machine as a comma-separated list
|
||||
* `--gpu_ids` (`str`) -- What GPUs (by id) should be used for training on this machine as a comma-seperated list
|
||||
* `--same_network` (`bool`) -- Whether all machines used for multinode training exist on the same local network.
|
||||
* `--machine_rank` (`int`) -- The rank of the machine on which this script is launched.
|
||||
* `--main_process_ip` (`str`) -- The IP address of the machine of rank 0.
|
||||
@ -202,8 +202,8 @@ The following arguments are only useful when `use_deepspeed` is passed or `deeps
|
||||
* `--zero3_init_flag` (`str`) -- Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. Only applicable with DeepSpeed ZeRO Stage-3.
|
||||
* `--zero3_save_16bit_model` (`str`) -- Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. Only applicable with DeepSpeed ZeRO Stage-3.
|
||||
* `--deepspeed_hostfile` (`str`) -- DeepSpeed hostfile for configuring multi-node compute resources.
|
||||
* `--deepspeed_exclusion_filter` (`str`) -- DeepSpeed exclusion filter string when using multi-node setup.
|
||||
* `--deepspeed_inclusion_filter` (`str`) -- DeepSpeed inclusion filter string when using multi-node setup.
|
||||
* `--deepspeed_exclusion_filter` (`str`) -- DeepSpeed exclusion filter string when using mutli-node setup.
|
||||
* `--deepspeed_inclusion_filter` (`str`) -- DeepSpeed inclusion filter string when using mutli-node setup.
|
||||
* `--deepspeed_multinode_launcher` (`str`) -- DeepSpeed multi-node launcher to use.
|
||||
* `--deepspeed_moe_layer_cls_names` (`str`) -- comma-separated list of transformer MoE layer class names (case-sensitive) to wrap, e.g, `MixtralSparseMoeBlock` `Qwen2MoeSparseMoeBlock`, `JetMoEAttention,JetMoEBlock`
|
||||
|
||||
|
||||
@ -30,17 +30,3 @@ rendered properly in your Markdown viewer.
|
||||
## FullyShardedDataParallelPlugin
|
||||
|
||||
[[autodoc]] utils.FullyShardedDataParallelPlugin
|
||||
|
||||
## fsdp2_load_full_state_dict
|
||||
|
||||
[[autodoc]] utils.fsdp2_load_full_state_dict
|
||||
|
||||
## fsdp2_switch_optimizer_parameters
|
||||
|
||||
[[autodoc]] utils.fsdp2_switch_optimizer_parameters
|
||||
|
||||
## fsdp2_prepare_model
|
||||
|
||||
[[autodoc]] utils.fsdp2_prepare_model
|
||||
|
||||
## fsdp2_prepare_auto_wrap_policy
|
||||
|
||||
@ -126,10 +126,6 @@ These include data operations that mimic the same `torch` ops but can be used on
|
||||
|
||||
[[autodoc]] utils.gather_object
|
||||
|
||||
[[autodoc]] utils.get_grad_scaler
|
||||
|
||||
[[autodoc]] utils.get_mixed_precision_context_manager
|
||||
|
||||
[[autodoc]] utils.listify
|
||||
|
||||
[[autodoc]] utils.pad_across_processes
|
||||
@ -174,8 +170,6 @@ When setting up 🤗 Accelerate for the first time, rather than running `acceler
|
||||
|
||||
[[autodoc]] utils.environment.override_numa_affinity
|
||||
|
||||
[[autodoc]] utils.purge_accelerate_environment
|
||||
|
||||
## Memory
|
||||
|
||||
[[autodoc]] utils.find_executable_batch_size
|
||||
@ -208,7 +202,6 @@ These utilities relate to interacting with PyTorch models
|
||||
|
||||
[[autodoc]] utils.set_module_tensor_to_device
|
||||
|
||||
[[autodoc]] utils.get_module_children_bottom_up
|
||||
|
||||
## Parallel
|
||||
|
||||
@ -218,8 +211,6 @@ These include general utilities that should be used when working in parallel.
|
||||
|
||||
[[autodoc]] utils.save
|
||||
|
||||
[[autodoc]] utils.load
|
||||
|
||||
[[autodoc]] utils.wait_for_everyone
|
||||
|
||||
|
||||
|
||||
@ -168,14 +168,13 @@ with init_empty_weights():
|
||||
|
||||
The [`~accelerate.load_checkpoint_and_dispatch`] function loads full or sharded checkpoints into the empty model, and automatically distribute weights across all available devices.
|
||||
|
||||
The `device_map` parameter determines where to place each model layer, and specifying `"auto"` places them on the GPU first, then the CPU, and finally the hard drive as memory-mapped tensors if there's still not enough memory. Use the `no_split_module_classes` parameter to indicate which modules shouldn't be split across devices (typically those with a residual connection).
|
||||
The `device_map` parameter determines where to place each model layer, and specifiying `"auto"` places them on the GPU first, then the CPU, and finally the hard drive as memory-mapped tensors if there's still not enough memory. Use the `no_split_module_classes` parameter to indicate which modules shouldn't be split across devices (typically those with a residual connection).
|
||||
|
||||
```py
|
||||
from accelerate import load_checkpoint_and_dispatch
|
||||
|
||||
model_checkpoint = "your-local-model-folder"
|
||||
model = load_checkpoint_and_dispatch(
|
||||
model, checkpoint=model_checkpoint, device_map="auto", no_split_module_classes=['Block']
|
||||
model, checkpoint="mistralai/Mixtral-8x7B-Instruct-v0.1", device_map="auto", no_split_module_classes=['Block']
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
@ -21,7 +21,7 @@ This tutorial will show you how to use Big Model Inference in Accelerate and the
|
||||
|
||||
## Accelerate
|
||||
|
||||
A typical workflow for loading a PyTorch model is shown below. `ModelClass` is a model that exceeds the GPU memory of your device (mps or cuda or xpu).
|
||||
A typical workflow for loading a PyTorch model is shown below. `ModelClass` is a model that exceeds the GPU memory of your device (mps or cuda).
|
||||
|
||||
```py
|
||||
import torch
|
||||
@ -41,7 +41,7 @@ with init_empty_weights():
|
||||
|
||||
Next, the weights are loaded into the model for inference.
|
||||
|
||||
The [`load_checkpoint_and_dispatch`] method loads a checkpoint inside your empty model and dispatches the weights for each layer across all available devices, starting with the fastest devices (GPU, MPS, XPU, NPU, MLU, SDAA, MUSA) first before moving to the slower ones (CPU and hard drive).
|
||||
The [`load_checkpoint_and_dispatch`] method loads a checkpoint inside your empty model and dispatches the weights for each layer across all available devices, starting with the fastest devices (GPU, MPS, XPU, NPU, MLU, MUSA) first before moving to the slower ones (CPU and hard drive).
|
||||
|
||||
Setting `device_map="auto"` automatically fills all available space on the GPU(s) first, then the CPU, and finally, the hard drive (the absolute slowest option) if there is still not enough memory.
|
||||
|
||||
@ -64,8 +64,7 @@ Now that the model is fully dispatched, you can perform inference.
|
||||
|
||||
```py
|
||||
input = torch.randn(2,3)
|
||||
device_type = next(iter(model.parameters())).device.type
|
||||
input = input.to(device_type)
|
||||
input = input.to("cuda")
|
||||
output = model(input)
|
||||
```
|
||||
|
||||
@ -92,8 +91,7 @@ model = load_checkpoint_and_dispatch(
|
||||
)
|
||||
|
||||
input = torch.randn(2,3)
|
||||
device_type = next(iter(model.parameters())).device.type
|
||||
input = input.to(device_type)
|
||||
input = input.to("cuda")
|
||||
output = model(input)
|
||||
```
|
||||
|
||||
|
||||
@ -1,76 +0,0 @@
|
||||
# Compilation
|
||||
|
||||
## Overview
|
||||
|
||||
Pytorch 2.0 introduced `torch.compile`, a powerful feature that makes PyTorch code run faster by JIT-compiling PyTorch code into optimized kernels. Key features of `torch.compile` include:
|
||||
|
||||
- **Performance Improvement**: Significantly speeds up model execution by optimizing the computation graph.
|
||||
- **Ease of Use**: Requires minimal code changes to implement, making it highly accessible.
|
||||
- **Compatibility**: Works seamlessly with existing PyTorch code and models.
|
||||
|
||||
When used with Accelerate, `torch.compile` integrates smoothly into distributed training workflows, allowing you to benefit from both distributed execution and compilation optimizations simultaneously.
|
||||
|
||||
The first execution of compiled code typically takes longer as it includes the compilation time, but subsequent runs are significantly faster. For optimal performance in different scenarios, `torch.compile` offers various modes like `"default"`, `"reduce-overhead"` (which uses CUDA graphs to further reduce overhead), and `"max-autotune"` (which performs extensive autotuning to find the best kernels for your model).
|
||||
|
||||
## Using `torch.compile` with Accelerate
|
||||
|
||||
Accelerate provides `TorchDynamoPlugin` for easy and seemless integration of `torch.compile` into your training scripts.
|
||||
|
||||
```python
|
||||
from accelerate import Accelerator
|
||||
from accelerate.utils import TorchDynamoPlugin
|
||||
|
||||
# Configure the compilation backend
|
||||
dynamo_plugin = TorchDynamoPlugin(
|
||||
backend="inductor", # Options: "inductor", "aot_eager", "aot_nvfuser", etc.
|
||||
mode="default", # Options: "default", "reduce-overhead", "max-autotune"
|
||||
fullgraph=True,
|
||||
dynamic=False
|
||||
)
|
||||
|
||||
# Initialize accelerator with the plugin
|
||||
accelerator = Accelerator(dynamo_plugin=dynamo_plugin)
|
||||
# This will apply torch.compile to your model
|
||||
model = accelerator.prepare(model)
|
||||
```
|
||||
|
||||
It is compatible with all other features and plugins of Accelerate, including mixed precision, distributed training (DDP, FSDP, Deepspeed), etc.
|
||||
|
||||
## Regional Compilation
|
||||
|
||||
Instead of trying to compile the whole model, which usually has a big problem space for optimization. Regional compilation targets repeated blocks of the same class and compiles them sequentially to hit the compiler's cache. For example, in `GPT2LMHeadModel`, the repeated block/class is `GPT2Block`, and can be accessed as `model.transformer.h[0]`. The rest of the model (e.g model.lm_head) is compiled separately.
|
||||
|
||||
This allows us to speed up the compilation overhead / cold start of models like LLMs and Transformers in general.
|
||||
See <https://pytorch.org/tutorials/recipes/regional_compilation.html> for more details.
|
||||
|
||||
### How to Use Regional Compilation
|
||||
|
||||
It can be enabled by setting `use_regional_compilation=True` in the `TorchDynamoPlugin` configuration:
|
||||
|
||||
```python
|
||||
# Configure the compilation backend
|
||||
dynamo_plugin = TorchDynamoPlugin(
|
||||
use_regional_compilation=True,
|
||||
... # other parameters
|
||||
)
|
||||
# Initialize accelerator with the plugin
|
||||
accelerator = Accelerator(dynamo_plugin=dynamo_plugin)
|
||||
# This will apply compile_regions to your model
|
||||
model = accelerator.prepare(model)
|
||||
```
|
||||
|
||||
You could also use the `accelerate.utils.compile_regions` utility directly the same way you would use `torch.compile`.
|
||||
|
||||
### Benefits of Regional Compilation
|
||||
|
||||
We have conducted extensive benchmarks comparing full compilation and regional compilation using the `torch.compile` feature in PyTorch. The full results are available in the [accelerate repository](https://github.com/huggingface/accelerate/tree/main/benchmarks/torch.compile/regional_compilation). The key findings from our benchmarks are:
|
||||
|
||||
1. **Comparable Performance**: Regional compilation delivers performance speedups similar to full compilation, especially for larger models.
|
||||
2. **Faster Compilation**: Regional compilation significantly reduces the time taken to compile models, making it a more efficient choice for deployment.
|
||||
3. **Batch Size Impact**: The performance difference between compilation strategies diminishes with larger batch sizes, indicating that the overhead of compilation is less impactful in those scenarios.
|
||||
4. **Model Size Consideration**: The benefits of regional compilation are more pronounced in larger models, where the compilation time savings can be substantial.
|
||||
5. **Practical Application**: For real-world applications, regional compilation is a practical choice for optimizing training cold start times, especially when working with large models.
|
||||
|
||||
## Conclusion
|
||||
|
||||
Both full and regional compilation can significantly speed up your models. Regional compilation offers a practical balance between compilation time and runtime performance, especially for training large models with substantial batch sizes.
|
||||
@ -34,10 +34,6 @@ In this tutorial, you will see how to quickly set up DDP communication hooks and
|
||||
import torch
|
||||
from torch.nn.parallel import DistributedDataParallel as DDP
|
||||
from torch.distributed.algorithms.ddp_comm_hooks import default_hooks
|
||||
from accelerate.test_utils.testing import get_backend
|
||||
|
||||
device_type, _, _ = get_backend()
|
||||
device_id = getattr(torch, device_type, torch.cuda).current_device()
|
||||
|
||||
class MyModel(torch.nn.Module):
|
||||
def __init__(self):
|
||||
@ -48,7 +44,7 @@ class MyModel(torch.nn.Module):
|
||||
return self.layer(x)
|
||||
|
||||
model = MyModel()
|
||||
model = DDP(model, device_ids=[device_id])
|
||||
model = DDP(model, device_ids=[torch.cuda.current_device()])
|
||||
model.register_comm_hook(state=None, hook=default_hooks.fp16_compress_hook)
|
||||
|
||||
# Training loop
|
||||
@ -112,10 +108,6 @@ BF16 Compression Hook API is experimental, and it requires NCCL version later th
|
||||
import torch
|
||||
from torch.nn.parallel import DistributedDataParallel as DDP
|
||||
from torch.distributed.algorithms.ddp_comm_hooks import default_hooks
|
||||
from accelerate.test_utils.testing import get_backend
|
||||
|
||||
device_type, _, _ = get_backend()
|
||||
device_id = getattr(torch, device_type, torch.cuda).current_device()
|
||||
|
||||
class MyModel(torch.nn.Module):
|
||||
def __init__(self):
|
||||
@ -126,7 +118,7 @@ class MyModel(torch.nn.Module):
|
||||
return self.layer(x)
|
||||
|
||||
model = MyModel()
|
||||
model = DDP(model, device_ids=[device_id])
|
||||
model = DDP(model, device_ids=[torch.cuda.current_device()])
|
||||
model.register_comm_hook(state=None, hook=default_hooks.bf16_compress_hook)
|
||||
|
||||
# Training loop
|
||||
@ -190,10 +182,6 @@ PowerSGD typically requires extra memory of the same size as the model’s gradi
|
||||
import torch
|
||||
from torch.nn.parallel import DistributedDataParallel as DDP
|
||||
from torch.distributed.algorithms.ddp_comm_hooks import powerSGD_hook
|
||||
from accelerate.test_utils.testing import get_backend
|
||||
|
||||
device_type, _, _ = get_backend()
|
||||
device_id = getattr(torch, device_type, torch.cuda).current_device()
|
||||
|
||||
class MyModel(torch.nn.Module):
|
||||
def __init__(self):
|
||||
@ -204,7 +192,7 @@ class MyModel(torch.nn.Module):
|
||||
return self.layer(x)
|
||||
|
||||
model = MyModel()
|
||||
model = DDP(model, device_ids=[device_id])
|
||||
model = DDP(model, device_ids=[torch.cuda.current_device()])
|
||||
state = powerSGD_hook.PowerSGDState(process_group=None)
|
||||
model.register_comm_hook(state=state, hook=powerSGD_hook.powerSGD_hook)
|
||||
|
||||
|
||||
@ -15,7 +15,7 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
# DeepSpeed
|
||||
|
||||
[DeepSpeed](https://github.com/deepspeedai/DeepSpeed) implements everything described in the [ZeRO paper](https://arxiv.org/abs/1910.02054). Some of the salient optimizations are:
|
||||
[DeepSpeed](https://github.com/microsoft/DeepSpeed) implements everything described in the [ZeRO paper](https://arxiv.org/abs/1910.02054). Some of the salient optimizations are:
|
||||
|
||||
1. Optimizer state partitioning (ZeRO stage 1)
|
||||
2. Gradient partitioning (ZeRO stage 2)
|
||||
@ -33,7 +33,7 @@ DeepSpeed ZeRO-2 is primarily used only for training, as its features are of no
|
||||
DeepSpeed ZeRO-3 can be used for inference as well since it allows huge models to be loaded on multiple GPUs, which
|
||||
won't be possible on a single GPU.
|
||||
|
||||
Accelerate integrates [DeepSpeed](https://github.com/deepspeedai/DeepSpeed) via 2 options:
|
||||
Accelerate integrates [DeepSpeed](https://github.com/microsoft/DeepSpeed) via 2 options:
|
||||
|
||||
1. Integration of the DeepSpeed features via `deepspeed config file` specification in `accelerate config` . You just supply your custom config file or use our template. Most of
|
||||
this document is focused on this feature. This supports all the core features of DeepSpeed and gives user a lot of flexibility.
|
||||
@ -74,7 +74,7 @@ Inference:
|
||||
|
||||
## How it works?
|
||||
|
||||
**Pre-Requisites**: Install DeepSpeed version >=0.6.5. Please refer to the [DeepSpeed Installation details](https://github.com/deepspeedai/DeepSpeed#installation)
|
||||
**Pre-Requisites**: Install DeepSpeed version >=0.6.5. Please refer to the [DeepSpeed Installation details](https://github.com/microsoft/DeepSpeed#installation)
|
||||
for more information.
|
||||
|
||||
We will first look at easy to use integration via `accelerate config`.
|
||||
@ -167,7 +167,7 @@ Currently, `Accelerate` supports following config through the CLI:
|
||||
`deepspeed_hostfile`: DeepSpeed hostfile for configuring multi-node compute resources.
|
||||
`deepspeed_exclusion_filter`: DeepSpeed exclusion filter string when using mutli-node setup.
|
||||
`deepspeed_inclusion_filter`: DeepSpeed inclusion filter string when using mutli-node setup.
|
||||
`deepspeed_multinode_launcher`: DeepSpeed multi-node launcher to use, e.g. `pdsh`, `standard`, `openmpi`, `mvapich`, `mpich`, `slurm`, `nossh` (requires DeepSpeed >= 0.14.5). If unspecified, will default to `pdsh`.
|
||||
`deepspeed_multinode_launcher`: DeepSpeed multi-node launcher to use. If unspecified, will default to `pdsh`.
|
||||
`deepspeed_config_file`: path to the DeepSpeed config file in `json` format. See the next section for more details on this.
|
||||
```
|
||||
To be able to tweak more options, you will need to use a DeepSpeed config file.
|
||||
@ -194,7 +194,7 @@ For instance, here is how you would run the NLP example `examples/by_feature/dee
|
||||
```bash
|
||||
compute_environment: LOCAL_MACHINE
|
||||
deepspeed_config:
|
||||
deepspeed_config_file: /home/ubuntu/accelerate/examples/deepspeed_config_templates/zero_stage2_config.json
|
||||
deepspeed_config_file: /home/ubuntu/accelerate/examples/configs/deepspeed_config_templates/zero_stage2_config.json
|
||||
zero3_init_flag: true
|
||||
distributed_type: DEEPSPEED
|
||||
fsdp_config: {}
|
||||
@ -275,7 +275,7 @@ accelerate launch examples/by_feature/deepspeed_with_config_support.py \
|
||||
```bash
|
||||
compute_environment: LOCAL_MACHINE
|
||||
deepspeed_config:
|
||||
deepspeed_config_file: /home/ubuntu/accelerate/examples/deepspeed_config_templates/zero_stage3_offload_config.json
|
||||
deepspeed_config_file: /home/ubuntu/accelerate/examples/configs/deepspeed_config_templates/zero_stage3_offload_config.json
|
||||
zero3_init_flag: true
|
||||
distributed_type: DEEPSPEED
|
||||
fsdp_config: {}
|
||||
@ -710,18 +710,11 @@ model, eval_dataloader = accelerator.prepare(model, eval_dataloader)
|
||||
2. Current integration doesn’t support `mpu`, limiting the tensor parallelism which is supported in Megatron-LM.
|
||||
3. Current integration doesn’t support multiple models.
|
||||
|
||||
## Multi-node DeepSpeed
|
||||
DeepSpeed supports multi-node inference and training over a variety of different launchers. You can specify a different launcher by setting the `deepspeed_multinode_launcher` config in the CLI or in the DeepSpeed config file.
|
||||
|
||||
Currently, accelerate supports passing configuration for the following DeepSpeed multi-node launchers: `pdsh` (default), `standard`, `openmpi`, `mvapich`, `mpich`, `slurm`, `nossh` (requires DeepSpeed >= 0.14.5).
|
||||
|
||||
Please read the [DeepSpeed documentation](https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node) for more information on the different launchers. By default, DeepSpeed will attempt to use passwordless SSH from the main machine node to the other nodes to perform the launcher command. In this configuration, the accelerate launch command only needs to be run on the main node. If using the `nossh` launcher, you will need to run the accelerate launch command on every node using copied configuration.
|
||||
|
||||
## DeepSpeed Resources
|
||||
|
||||
The documentation for the internals related to deepspeed can be found [here](../package_reference/deepspeed).
|
||||
|
||||
- [Project's github](https://github.com/deepspeedai/DeepSpeed)
|
||||
- [Project's github](https://github.com/microsoft/deepspeed)
|
||||
- [Usage docs](https://www.deepspeed.ai/getting-started/)
|
||||
- [API docs](https://deepspeed.readthedocs.io/en/latest/index.html)
|
||||
- [Blog posts](https://www.microsoft.com/en-us/research/search/?q=deepspeed)
|
||||
@ -735,7 +728,7 @@ Papers:
|
||||
|
||||
|
||||
Finally, please, remember that `Accelerate` only integrates DeepSpeed, therefore if you
|
||||
have any problems or questions with regards to DeepSpeed usage, please, file an issue with [DeepSpeed GitHub](https://github.com/deepspeedai/DeepSpeed/issues).
|
||||
have any problems or questions with regards to DeepSpeed usage, please, file an issue with [DeepSpeed GitHub](https://github.com/microsoft/DeepSpeed/issues).
|
||||
|
||||
|
||||
<Tip>
|
||||
|
||||
@ -69,7 +69,6 @@ to be padded) for you to use right away.
|
||||
Let's rewrite the above example using this context manager:
|
||||
|
||||
```python
|
||||
import torch
|
||||
from accelerate import PartialState # Can also be Accelerator or AcceleratorState
|
||||
from diffusers import DiffusionPipeline
|
||||
|
||||
@ -126,7 +125,6 @@ needs to be the same length. Basic inference does not require this.
|
||||
For instance:
|
||||
|
||||
```python
|
||||
import torch
|
||||
from accelerate import PartialState # Can also be Accelerator or AcceleratorState
|
||||
from diffusers import DiffusionPipeline
|
||||
|
||||
|
||||
@ -1,38 +0,0 @@
|
||||
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Intel Gaudi
|
||||
|
||||
Users can take advantage of Intel Gaudi AI accelerators for significantly faster and cost-effective model training and inference.
|
||||
The Intel Gaudi AI accelerator family currently includes three product generations: [Intel Gaudi 1](https://habana.ai/products/gaudi/), [Intel Gaudi 2](https://habana.ai/products/gaudi2/), and [Intel Gaudi 3](https://habana.ai/products/gaudi3/). Each server is equipped with 8 devices, known as Habana Processing Units (HPUs), providing 128GB of memory on Gaudi 3, 96GB on Gaudi 2, and 32GB on the first-gen Gaudi. For more details on the underlying hardware architecture, check out the [Gaudi Architecture Overview](https://docs.habana.ai/en/latest/Gaudi_Overview/Gaudi_Architecture.html).
|
||||
|
||||
## How it works out of the box
|
||||
|
||||
It is enabled by default if an Intel Gaudi device is detected.
|
||||
To disable it, pass `--cpu` flag to `accelerate launch` command or answer the corresponding question when answering the `accelerate config` questionnaire.
|
||||
|
||||
You can directly run the following script to test it out on Intel Gaudi:
|
||||
|
||||
```bash
|
||||
accelerate launch /examples/cv_example.py --data_dir images
|
||||
```
|
||||
|
||||
## Limitations
|
||||
|
||||
The following features are not part of the Accelerate library and requires [Optimum for Intel Gaudi](https://huggingface.co/docs/optimum/main/en/habana/index):
|
||||
|
||||
- `fast_ddp` which implements DDP by applying an all-reduce on gradients instead of the Torch DDP wrapper.
|
||||
- `minimize_memory` which is used for fp8 training and enables keeping fp8 weights in memory between the forward and backward passes, leading to a smaller memory footprint at the cost of additional fp8 casts.
|
||||
- `context_parallel_size` which is used for Context/Sequence Parallelism (CP/SP) and partitions the network inputs and activations along sequence dimension to reduce memory footprint and increase throughput.
|
||||
@ -187,46 +187,38 @@ set_seed(0)
|
||||
x = torch.tensor([1., 2., 3., 4., 5., 6., 7., 8.])
|
||||
y = torch.tensor([2., 4., 6., 8., 10., 12., 14., 16.])
|
||||
gradient_accumulation_steps = 4
|
||||
per_device_batch_size = len(x) // gradient_accumulation_steps
|
||||
batch_size = len(x) // gradient_accumulation_steps
|
||||
|
||||
# define dataset and dataloader
|
||||
dataset = TensorDataset(x, y)
|
||||
dataloader = DataLoader(dataset, batch_size=per_device_batch_size)
|
||||
dataloader = DataLoader(dataset, batch_size=batch_size)
|
||||
|
||||
# define model, optimizer and loss function
|
||||
class SimpleLinearModel(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super(SimpleLinearModel, self).__init__()
|
||||
self.weight = torch.nn.Parameter(torch.zeros((1, 1)))
|
||||
|
||||
def forward(self, inputs):
|
||||
return inputs @ self.weight
|
||||
|
||||
model = SimpleLinearModel()
|
||||
model = torch.zeros((1, 1), requires_grad=True)
|
||||
model_clone = copy.deepcopy(model)
|
||||
criterion = torch.nn.MSELoss()
|
||||
model_optimizer = torch.optim.SGD(model.parameters(), lr=0.02)
|
||||
model_optimizer = torch.optim.SGD([model], lr=0.02)
|
||||
accelerator = Accelerator(gradient_accumulation_steps=gradient_accumulation_steps)
|
||||
model, model_optimizer, dataloader = accelerator.prepare(model, model_optimizer, dataloader)
|
||||
model_clone_optimizer = torch.optim.SGD(model_clone.parameters(), lr=0.02)
|
||||
print(f"initial model weight is {model.weight.mean().item():.5f}")
|
||||
print(f"initial model weight is {model_clone.weight.mean().item():.5f}")
|
||||
model_clone_optimizer = torch.optim.SGD([model_clone], lr=0.02)
|
||||
print(f"initial model weight is {model.mean().item():.5f}")
|
||||
print(f"initial model weight is {model_clone.mean().item():.5f}")
|
||||
for i, (inputs, labels) in enumerate(dataloader):
|
||||
with accelerator.accumulate(model):
|
||||
inputs = inputs.view(-1, 1)
|
||||
print(i, inputs.flatten())
|
||||
labels = labels.view(-1, 1)
|
||||
outputs = model(inputs)
|
||||
outputs = inputs @ model
|
||||
loss = criterion(outputs, labels)
|
||||
accelerator.backward(loss)
|
||||
model_optimizer.step()
|
||||
model_optimizer.zero_grad()
|
||||
loss = criterion(x.view(-1, 1) @ model_clone.weight, y.view(-1, 1))
|
||||
loss = criterion(x.view(-1, 1) @ model_clone, y.view(-1, 1))
|
||||
model_clone_optimizer.zero_grad()
|
||||
loss.backward()
|
||||
model_clone_optimizer.step()
|
||||
print(f"w/ accumulation, the final model weight is {model.weight.mean().item():.5f}")
|
||||
print(f"w/o accumulation, the final model weight is {model_clone.weight.mean().item():.5f}")
|
||||
print(f"w/ accumulation, the final model weight is {model.mean().item():.5f}")
|
||||
print(f"w/o accumulation, the final model weight is {model_clone.mean().item():.5f}")
|
||||
```
|
||||
```
|
||||
initial model weight is 0.00000
|
||||
@ -238,233 +230,3 @@ initial model weight is 0.00000
|
||||
w/ accumulation, the final model weight is 2.04000
|
||||
w/o accumulation, the final model weight is 2.04000
|
||||
```
|
||||
|
||||
## Gradient accumulation on training samples of variable size
|
||||
|
||||
As was pointed out in this [blog-post](https://huggingface.co/blog/gradient_accumulation), which points out a common error that occurs when performing gradient accumulation on training samples of variable size:
|
||||
|
||||
> [...] for gradient accumulation across token-level tasks like causal LM training, the correct loss should be computed by the **total loss across all batches in a gradient accumulation step** divided by the **total number of all non padding tokens in those batches**. This is not the same as the average of the per-batch loss values.
|
||||
|
||||
In other words, some adjustements must be made on losses that operate on a token-level basis.
|
||||
|
||||
### Skeleton code
|
||||
|
||||
```python
|
||||
from accelerate import Accelerator
|
||||
import math
|
||||
import contextlib
|
||||
|
||||
gradient_accumulation_steps = 2
|
||||
accelerator = Accelerator(gradient_accumulation_steps=gradient_accumulation_steps)
|
||||
model, optimizer, training_dataloader, scheduler = accelerator.prepare(
|
||||
model, optimizer, training_dataloader, scheduler
|
||||
)
|
||||
|
||||
training_iterator = iter(training_dataloader)
|
||||
num_samples_in_epoch = len(training_dataloader)
|
||||
remainder = num_samples_in_epoch % gradient_accumulation_steps
|
||||
remainder = remainder if remainder != 0 else gradient_accumulation_steps
|
||||
total_updates = math.ceil(num_samples_in_epoch / gradient_accumulation_steps)
|
||||
|
||||
|
||||
total_batched_samples = 0
|
||||
for update_step in range(total_updates):
|
||||
# In order to correctly the total number of non-padded tokens on which we'll compute the cross-entropy loss
|
||||
# we need to pre-load the full local batch - i.e the next per_device_batch_size * accumulation_steps samples
|
||||
batch_samples = []
|
||||
num_batches_in_step = gradient_accumulation_steps if update_step != (total_updates - 1) else remainder
|
||||
for _ in range(num_batches_in_step):
|
||||
batch_samples += [next(training_iterator)]
|
||||
|
||||
# get local num items in batch
|
||||
num_items_in_batch = sum([(batch["labels"].ne(-100)).sum() for batch in batch_samples])
|
||||
# to compute it correctly in a multi-device DDP training, we need to gather the total number of items in the full batch.
|
||||
num_items_in_batch = accelerator.gather(num_items_in_batch).sum().item()
|
||||
|
||||
for i, batch in enumerate(batch_samples):
|
||||
# if we perform gradient accumulation in a multi-devices set-up, we want to avoid unecessary communications when accumulating
|
||||
# cf: https://muellerzr.github.io/blog/gradient_accumulation.html
|
||||
if (i < len(batch_samples) - 1 and accelerator.num_processes > 1):
|
||||
ctx = model.no_sync
|
||||
else:
|
||||
ctx = contextlib.nullcontext
|
||||
|
||||
total_batched_samples += 1
|
||||
|
||||
with ctx():
|
||||
inputs, targets = batch
|
||||
outputs = model(inputs)
|
||||
loss = loss_function(outputs, targets) # the loss function shoud sum over samples rather than averaging
|
||||
|
||||
# We multiply by num_processes because the DDP calculates the average gradient across all devices whereas dividing by num_items_in_batch already takes into account all devices
|
||||
# Same reason for gradient_accumulation_steps, but this times it's Accelerate that calculate the average gradient across the accumulated steps
|
||||
loss = (loss * gradient_accumulation_steps * accelerator.num_processes) / num_items_in_batch
|
||||
|
||||
accelerator.backward(loss)
|
||||
|
||||
# Sync gradients and perform optimization steps once every gradient_accumulation_steps
|
||||
optimizer.step()
|
||||
scheduler.step()
|
||||
optimizer.zero_grad()
|
||||
```
|
||||
|
||||
### Self-contained causal LM example
|
||||
|
||||
```py
|
||||
import torch
|
||||
import copy
|
||||
from accelerate import Accelerator
|
||||
from accelerate.utils import set_seed
|
||||
from accelerate.logging import get_logger
|
||||
from torch.utils.data import Dataset, DataLoader
|
||||
import math
|
||||
import contexlib
|
||||
|
||||
# seed
|
||||
set_seed(0)
|
||||
logger = get_logger(__name__)
|
||||
|
||||
class MyDataset(Dataset):
|
||||
def __init__(self, num_samples):
|
||||
super().__init__()
|
||||
self.len = num_samples
|
||||
|
||||
def __getitem__(self, index):
|
||||
input_ids = torch.arange(1, index+2, dtype=torch.float32)
|
||||
labels = torch.remainder(input_ids, 2)
|
||||
return {"input_ids": input_ids, "labels": labels}
|
||||
|
||||
def __len__(self):
|
||||
return self.len
|
||||
|
||||
def collate_fn(features):
|
||||
input_ids = torch.nn.utils.rnn.pad_sequence([f["input_ids"] for f in features], batch_first=True, padding_value=-100)
|
||||
labels = torch.nn.utils.rnn.pad_sequence([f["labels"] for f in features], batch_first=True, padding_value=-100)
|
||||
return {"input_ids": input_ids[..., None], "labels": labels[..., None]}
|
||||
|
||||
# define toy inputs and labels
|
||||
gradient_accumulation_steps = 2
|
||||
per_device_batch_size = 4
|
||||
|
||||
# define accelerator
|
||||
accelerator = Accelerator(gradient_accumulation_steps=gradient_accumulation_steps)
|
||||
|
||||
# define dataset and dataloader
|
||||
# for this toy example, we'll compute gradient descent over one single global batch
|
||||
dataset = MyDataset(per_device_batch_size*gradient_accumulation_steps*accelerator.num_processes)
|
||||
dataloader = DataLoader(dataset, batch_size=per_device_batch_size, collate_fn=collate_fn)
|
||||
|
||||
# define model, model_optimizer and loss function
|
||||
model = torch.nn.Linear(1, 2, bias=False)
|
||||
model_clone = copy.deepcopy(model)
|
||||
criterion = torch.nn.CrossEntropyLoss(reduction="sum") # must sum over samples rather than averaging
|
||||
model_optimizer = torch.optim.SGD(model.parameters(), lr=0.08)
|
||||
|
||||
|
||||
logger.warning(f"initial model weight is {model.weight.detach().cpu().squeeze()}")
|
||||
logger.warning(f"initial model clone weight is {model_clone.weight.detach().cpu().squeeze()}")
|
||||
|
||||
# prepare artifacts - accelerator handles device placement and dataloader splitting
|
||||
model, model_optimizer = accelerator.prepare(model, model_optimizer)
|
||||
dataloader = accelerator.prepare_data_loader(dataloader, device_placement=True)
|
||||
training_iterator = iter(dataloader)
|
||||
|
||||
num_samples_in_epoch = len(dataloader)
|
||||
remainder = num_samples_in_epoch % gradient_accumulation_steps
|
||||
remainder = remainder if remainder != 0 else gradient_accumulation_steps
|
||||
total_gradient_updates = math.ceil(num_samples_in_epoch / gradient_accumulation_steps)
|
||||
|
||||
total_batched_samples = 0
|
||||
for update_step in range(total_gradient_updates):
|
||||
# In order to correctly the total number of non-padded tokens on which we'll compute the cross-entropy loss
|
||||
# we need to pre-load the full local batch - i.e the next per_device_batch_size * accumulation_steps samples
|
||||
batch_samples = []
|
||||
num_batches_in_step = gradient_accumulation_steps if update_step != (total_gradient_updates - 1) else remainder
|
||||
for _ in range(num_batches_in_step):
|
||||
batch_samples += [next(training_iterator)]
|
||||
|
||||
# get local num items in batch
|
||||
local_num_items_in_batch = sum([(batch["labels"].ne(-100)).sum() for batch in batch_samples])
|
||||
logger.warning(f"Step {update_step} - Device {accelerator.process_index} - num items in the local batch {local_num_items_in_batch}", main_process_only=False)
|
||||
|
||||
# to compute it correctly in a multi-device DDP training, we need to gather the total number of items in the full batch.
|
||||
num_items_in_batch = accelerator.gather(local_num_items_in_batch).sum().item()
|
||||
logger.warning(f"Total num items {num_items_in_batch}")
|
||||
|
||||
for i, batch in enumerate(batch_samples):
|
||||
inputs, labels = batch["input_ids"], batch["labels"]
|
||||
total_batched_samples += 1
|
||||
# if we perform gradient accumulation in a multi-devices set-up, we want to avoid unecessary communications when accumulating
|
||||
# cf: https://muellerzr.github.io/blog/gradient_accumulation.html
|
||||
if (i < len(batch_samples) - 1 and accelerator.num_processes > 1):
|
||||
ctx = model.no_sync
|
||||
else:
|
||||
ctx = contextlib.nullcontext
|
||||
with ctx():
|
||||
|
||||
outputs = model(inputs)
|
||||
loss = criterion(outputs.view(-1, 2), labels.view(-1).to(torch.int64))
|
||||
|
||||
# We multiply by num_processes because the DDP calculates the average gradient across all devices whereas dividing by num_items_in_batch already takes into account all devices
|
||||
# Same reason for gradient_accumulation_steps, but this times it's Accelerate that calculate the average gradient across the accumulated steps
|
||||
loss = (loss * gradient_accumulation_steps * accelerator.num_processes) / num_items_in_batch
|
||||
accelerator.backward(loss)
|
||||
model_optimizer.step()
|
||||
model_optimizer.zero_grad()
|
||||
|
||||
|
||||
logger.warning(f"Device {accelerator.process_index} - w/ accumulation, the final model weight is {accelerator.unwrap_model(model).weight.detach().cpu().squeeze()}", main_process_only=False)
|
||||
|
||||
# We know do the same operation but on a single device and without gradient accumulation
|
||||
|
||||
if accelerator.is_main_process:
|
||||
# prepare one single entire batch
|
||||
dataloader = DataLoader(dataset, batch_size=len(dataset), collate_fn=collate_fn)
|
||||
full_batch_without_accum = next(iter(dataloader))
|
||||
total_inputs, total_labels = full_batch_without_accum["input_ids"], full_batch_without_accum["labels"]
|
||||
model_clone_optimizer = torch.optim.SGD(model_clone.parameters(), lr=0.08)
|
||||
|
||||
# train the cloned model
|
||||
loss = torch.nn.CrossEntropyLoss(reduction="mean")(model_clone(total_inputs).view(-1, 2), total_labels.view(-1).to(torch.int64))
|
||||
model_clone_optimizer.zero_grad()
|
||||
loss.backward()
|
||||
model_clone_optimizer.step()
|
||||
|
||||
# We should have the same final weights.
|
||||
logger.warning(f"w/o accumulation, the final model weight is {model_clone.weight.detach().cpu().squeeze()}")
|
||||
|
||||
```
|
||||
|
||||
Results on a single device - gradient accumulation steps set to 1 and batch_size set to 8:
|
||||
```
|
||||
initial model weight is tensor([-0.0075, 0.5364])
|
||||
initial model clone weight is tensor([-0.0075, 0.5364])
|
||||
Step 0 - Device 0 - num items in the local batch 36
|
||||
Total num items 36
|
||||
Device 0 - w/ accumulation, the final model weight is tensor([0.0953, 0.4337])
|
||||
w/o accumulation, the final model weight is tensor([0.0953, 0.4337])
|
||||
```
|
||||
|
||||
Results on a two devices set-up - gradient accumulation steps set to 2 and batch_size set to 4.
|
||||
```
|
||||
initial model weight is tensor([-0.0075, 0.5364])
|
||||
initial model clone weight is tensor([-0.0075, 0.5364])
|
||||
Step 0 - Device 0 - num items in the local batch 52
|
||||
Step 0 - Device 1 - num items in the local batch 84
|
||||
Total num items 136
|
||||
Device 1 - w/ accumulation, the final model weight is tensor([0.2117, 0.3172])
|
||||
Device 0 - w/ accumulation, the final model weight is tensor([0.2117, 0.3172])
|
||||
w/o accumulation, the final model weight is tensor([0.2117, 0.3172])
|
||||
```
|
||||
|
||||
### To go further:
|
||||
|
||||
Please find a complete example script on a real world training run in the examples folder at the path [`accelerate/examples/by_feature/gradient_accumulation_for_autoregressive_models.py`](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/gradient_accumulation_for_autoregressive_models.py).
|
||||
|
||||
Running it on several training configurations with constant global batch size equal to 32 gives the following graph:
|
||||
|
||||
<div style="text-align: center">
|
||||
<img src="https://huggingface.co/datasets/hf-audio/gradient_accumulation_example/resolve/main/training_losses.png">
|
||||
</div>
|
||||
|
||||
Note that the training losses are exactly the same up to training step 20. The small deviation after this training step occurs at the very end of the first epoch, because, by [default](https://huggingface.co/docs/accelerate/en/package_reference/torch_wrappers#accelerate.data_loader.prepare_data_loader.even_batches), the dataloader duplicates the samples at the beginning of the dataset when the total batch size doesn't exactly divide the dataset.
|
||||
|
||||
@ -94,9 +94,6 @@ use_cpu: true
|
||||
accelerate launch examples/nlp_example.py
|
||||
```
|
||||
|
||||
> [!CAUTION]
|
||||
> `accelerator.prepare` can currently only handle simultaneously preparing multiple models (and no optimizer) OR a single model-optimizer pair for training. Other attempts (e.g., two model-optimizer pairs) will raise a verbose error. To work around this limitation, consider separately using `accelerator.prepare` for each model-optimizer pair.
|
||||
|
||||
**Scenario 2**: Acceleration of distributed CPU training
|
||||
we use Intel oneCCL for communication, combined with Intel® MPI library to deliver flexible, efficient, scalable cluster messaging on Intel® architecture. you could refer the [here](https://huggingface.co/docs/transformers/perf_train_cpu_many) for the installation guide
|
||||
|
||||
|
||||
@ -92,7 +92,7 @@ Under the hood, the Local SGD code **disables** automatic gradient synchronizati
|
||||
|
||||
## Limitations
|
||||
|
||||
The current implementation works only with basic multi-GPU (or multi-CPU) training without, e.g., [DeepSpeed.](https://github.com/deepspeedai/DeepSpeed).
|
||||
The current implementation works only with basic multi-GPU (or multi-CPU) training without, e.g., [DeepSpeed.](https://github.com/microsoft/DeepSpeed).
|
||||
|
||||
## References
|
||||
|
||||
|
||||
@ -15,7 +15,7 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
# Low Precision Training Methods
|
||||
|
||||
Accelerate provides integrations to train on lower precision methods using specified supported hardware through the `TransformersEngine`, `MS-AMP`, and `torchao` packages. This documentation will help guide you through what hardware is supported, how to configure your [`Accelerator`] to leverage the low precision methods, and what you can expect when training.
|
||||
Accelerate provides integrations to train on lower precision methods using specified supported hardware through the `TransformersEngine` and `MS-AMP` packages. This documentation will help guide you through what hardware is supported, how to configure your [`Accelerator`] to leverage the low precision methods, and what you can expect when training.
|
||||
|
||||
## What training on FP8 means
|
||||
|
||||
@ -26,11 +26,11 @@ This is only enabled on specific NVIDIA hardware, namely:
|
||||
* Anything after the 3000 series consumer graphics cards (such as the 4090)
|
||||
* Hopper-based GPU architectures (such as the `H100` and `H200`)
|
||||
|
||||
What this will result in is some reduction in the memory used (as we've cut the needed memory in half for some parts of training) and an increase in throughput *should* be seen as well for larger models that can replace certain layers with FP8-enabled ones.
|
||||
What this will result in is some gain in the memory used (as we've cut the needed memory in half for some parts of training) and an increase in throughput *should* be seen as well for larger models that can replace certain layers with FP8-enabled ones.
|
||||
|
||||
## Configuring the Accelerator
|
||||
|
||||
Currently three different backends for FP8 are supported (`TransformersEngine`, `torchao`, and `MS-AMP`), each with different capabilities and configurations.
|
||||
Currently two different backends for FP8 are supported (`TransformersEngine` and `MS-AMP`), each with different capabilities and configurations.
|
||||
|
||||
To use either, the same core API is used. Just pass `mixed_precision="fp8"` to either the [`Accelerator`], during `accelerate config` when prompted about mixed precision, or as part of your `config.yaml` file in the `mixed_precision` key:
|
||||
|
||||
@ -39,29 +39,27 @@ from accelerate import Accelerator
|
||||
accelerator = Accelerator(mixed_precision="fp8")
|
||||
```
|
||||
|
||||
By default, if `MS-AMP` is available in your environment, Accelerate will automatically utilize it as a backend. To specify it yourself (and customize other parts of the FP8 mixed precision setup), you can utilize one of the `RecipeKwargs` dataclasses such as [`utils.AORecipeKwargs`], [`utils.TERecipeKwargs`], or [`utils.MSAMPRecipeKwargs`]; you can also clarify it in your config `yaml`/during `accelerate launch`:
|
||||
By default, if `MS-AMP` is available in your environment, Accelerate will automatically utilize it as a backend. To specify it yourself (and customize other parts of the FP8 mixed precision setup), you can utilize the [`utils.FP8RecipeKwargs`] or clarify it in your config `yaml`/during `accelerate launch`:
|
||||
|
||||
```{python}
|
||||
from accelerate import Accelerator
|
||||
from accelerate.utils import MSAMPRecipeKwargs
|
||||
kwargs = [MSAMPRecipeKwargs()]
|
||||
from accelerate.utils import FP8RecipeKwargs
|
||||
kwargs = [FP8RecipeKwargs(backend="msamp")]
|
||||
# Or to specify the backend as `TransformersEngine` even if MS-AMP is installed
|
||||
# kwargs = [TERecipeKwargs()]
|
||||
# Or to use torchao
|
||||
# kwargs = [AORecipeKwargs()]
|
||||
# kwargs = [FP8RecipeKwargs(backend="te")]
|
||||
accelerator = Accelerator(mixed_precision="fp8", kwarg_handlers=kwargs)
|
||||
```
|
||||
|
||||
```{yaml}
|
||||
mixed_precision: fp8
|
||||
fp8_config:
|
||||
amax_compute_algo: max
|
||||
amax_history_len: 1024
|
||||
amax_compute_algorithm: max
|
||||
amax_history_length: 1024
|
||||
backend: TE
|
||||
fp8_format: HYBRID
|
||||
interval: 1
|
||||
margin: 0
|
||||
override_linear_precision: (false, false, false)
|
||||
override_linear_precision: false
|
||||
use_autocast_during_eval: false
|
||||
```
|
||||
|
||||
@ -96,7 +94,7 @@ fp8_config:
|
||||
|
||||
## Configuring TransformersEngine
|
||||
|
||||
TransformersEngine has many options for customizing how and what FP8 calculations are performed. A full list of supported arguments and what they mean are available in [NVIDIA's documentation](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html), however they are restated as part of [`FP8KwargsHandler`]'s docstring for your convenience.
|
||||
TransformersEngine has much more available for customizing how and what FP8 calculations are performed. A full list of supported arguments and what they mean are available in [NVIDIA's documentation](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html), however they are restated as part of [`FP8KwargsHandler`]'s docstring for your convenience.
|
||||
|
||||
Accelerate tries to set sensible defaults, but exploring and tweaking the various parameters yourself can lead to better performance potentially.
|
||||
|
||||
@ -116,32 +114,16 @@ Similarly this can be set in your `config.yaml`:
|
||||
```{yaml}
|
||||
mixed_precision: fp8
|
||||
fp8_config:
|
||||
amax_compute_algo: max
|
||||
amax_history_len: 1024
|
||||
amax_compute_algorithm: max
|
||||
amax_history_length: 1024
|
||||
backend: TE
|
||||
fp8_format: HYBRID
|
||||
interval: 1
|
||||
margin: 0
|
||||
override_linear_precision: (false, false, false)
|
||||
override_linear_precision: false
|
||||
use_autocast_during_eval: false
|
||||
```
|
||||
|
||||
## Configuring `torchao`
|
||||
|
||||
`torchao` is a [PyTorch-driven](https://github.com/pytorch/ao/tree/main/torchao/float8) hackable FP8 backend, aiming to be more approchable than the prior two engines. One of the core differences with `ao` compared to the prior two is that for numerical stability, it's found to be generally better off keeping the first *and* last layers in the model at the regular precision (be it FP32 or BF16), and then the other layers quantized down to FP8. As a result, a config for `ao` looks a bit differently:
|
||||
|
||||
> Note: this API is experimental and is subject to change
|
||||
|
||||
```{python}
|
||||
from accelerate import Accelerator
|
||||
from accelerate.utils import AORecipeKwargs
|
||||
kwargs = [AORecipeKwargs()]
|
||||
accelerator = Accelerator(mixed_precision="fp8", kwarg_handlers=kwargs)
|
||||
```
|
||||
|
||||
To learn more about the specific parameters to be used, please see the official `torchao` repo.
|
||||
|
||||
|
||||
## Example Zoo
|
||||
|
||||
We have examples showcasing training with FP8 both with accelerate and its underlying implementation available in the accelerate repo.
|
||||
@ -161,4 +143,3 @@ To learn more about training in FP8 please check out the following resources:
|
||||
* [Our concept guide](../concept_guides/low_precision_training) detailing into more about both TransformersEngine and MS-AMP
|
||||
* [The `transformers-engine` documentation](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html)
|
||||
* [The `MS-AMP` documentation](https://azure.github.io/MS-AMP/docs/)
|
||||
* [The `torchao` documentation](https://github.com/pytorch/ao/tree/main/torchao/float8)
|
||||
|
||||
@ -19,7 +19,7 @@ rendered properly in your Markdown viewer.
|
||||
[Megatron-LM](https://github.com/NVIDIA/Megatron-LM) enables training large transformer language models at scale.
|
||||
It provides efficient tensor, pipeline and sequence based model parallelism for pre-training transformer based
|
||||
Language Models such as [GPT](https://arxiv.org/abs/2005.14165) (Decoder Only), [BERT](https://arxiv.org/pdf/1810.04805.pdf) (Encoder Only) and [T5](https://arxiv.org/abs/1910.10683) (Encoder-Decoder).
|
||||
For detailed information and how things work behind the scene please refer to the github [repo](https://github.com/NVIDIA/Megatron-LM).
|
||||
For detailed information and how things work behind the scene please refer the github [repo](https://github.com/NVIDIA/Megatron-LM).
|
||||
|
||||
## What is integrated?
|
||||
|
||||
@ -30,7 +30,7 @@ a. **Tensor Parallelism (TP)**: Reduces memory footprint without much additional
|
||||
Each tensor is split into multiple chunks with each shard residing on separate GPU. At each step, the same mini-batch of data is processed
|
||||
independently and in parallel by each shard followed by syncing across all GPUs (`all-reduce` operation).
|
||||
In a simple transformer layer, this leads to 2 `all-reduces` in the forward path and 2 in the backward path.
|
||||
For more details, please refer to the research paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using
|
||||
For more details, please refer research paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using
|
||||
Model Parallelism](https://arxiv.org/pdf/1909.08053.pdf) and
|
||||
this section of blogpost [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#tensor-parallelism).
|
||||
|
||||
@ -45,7 +45,7 @@ this section of blogpost [The Technology Behind BLOOM Training](https://huggingf
|
||||
|
||||
c. **Sequence Parallelism (SP)**: Reduces memory footprint without any additional communication. Only applicable when using TP.
|
||||
It reduces activation memory required as it prevents the same copies to be on the tensor parallel ranks
|
||||
post `all-reduce` by replacing them with `reduce-scatter` and `no-op` operation would be replaced by `all-gather`.
|
||||
post `all-reduce` by replacing then with `reduce-scatter` and `no-op` operation would be replaced by `all-gather`.
|
||||
As `all-reduce = reduce-scatter + all-gather`, this saves a ton of activation memory at no added communication cost.
|
||||
To put it simply, it shards the outputs of each transformer layer along sequence dimension, e.g.,
|
||||
if the sequence length is `1024` and the TP size is `4`, each GPU will have `256` tokens (1024/4) for each sample.
|
||||
@ -56,7 +56,7 @@ d. **Data Parallelism (DP)** via Distributed Optimizer: Reduces the memory footp
|
||||
(versus the traditional method of replicating the optimizer state across data parallel ranks).
|
||||
For example, when using Adam optimizer with mixed-precision training, each parameter accounts for 12 bytes of memory.
|
||||
This gets distributed equally across the GPUs, i.e., each parameter would account for 3 bytes (12/4) if we have 4 GPUs.
|
||||
For more details, please refer to the research paper [ZeRO: Memory Optimizations Toward Training Trillion
|
||||
For more details, please refer the research paper [ZeRO: Memory Optimizations Toward Training Trillion
|
||||
Parameter Models](https://arxiv.org/pdf/1910.02054.pdf) and following section of blog
|
||||
[The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#zero-data-parallelism).
|
||||
|
||||
@ -66,7 +66,7 @@ For example, for GPT-3, this leads to 70% reduction in required memory for activ
|
||||
only 2.7% FLOPs overhead for recomputation of activations. For more details, please refer to the research paper
|
||||
[Reducing Activation Recomputation in Large Transformer Models](https://arxiv.org/pdf/2205.05198.pdf).
|
||||
|
||||
f. **Fused Kernels**: Fused Softmax, Mixed Precision Fused Layer Norm and Fused gradient accumulation to weight gradient computation of linear layer.
|
||||
f. **Fused Kernels**: Fused Softmax, Mixed Precision Fused Layer Norm and Fused gradient accumulation to weight gradient computation of linear layer.
|
||||
PyTorch JIT compiled Fused GeLU and Fused Bias+Dropout+Residual addition.
|
||||
|
||||
g. **Support for Indexed datasets**: Efficient binary format of datasets for large scale training. Support for the `mmap`, `cached` index file and the `lazy` loader format.
|
||||
@ -445,7 +445,7 @@ python checkpoint_utils/megatgron_gpt2/checkpoint_reshaping_and_interoperability
|
||||
## Megatron-LM GPT models support returning logits and `megatron_generate` function for text generation
|
||||
|
||||
1. Returning logits require setting `require_logits=True` in MegatronLMPlugin as shown below.
|
||||
These would be available in the last stage of pipeline.
|
||||
These would be available on the in the last stage of pipeline.
|
||||
```python
|
||||
megatron_lm_plugin = MegatronLMPlugin(return_logits=True)
|
||||
```
|
||||
@ -569,7 +569,7 @@ setting is synonymous with gradient accumulation.
|
||||
|
||||
7. When using Megatron-LM, use `accelerator.save_state` and `accelerator.load_state` for saving and loading checkpoints.
|
||||
|
||||
8. Below are the mapping from Megatron-LM model architectures to the equivalent transformers model architectures.
|
||||
8. Below are the mapping from Megatron-LM model architectures to the the equivalent transformers model architectures.
|
||||
Only these transformers model architectures are supported.
|
||||
|
||||
a. Megatron-LM [BertModel](https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/bert_model.py) :
|
||||
|
||||
@ -44,7 +44,10 @@ accelerate launch /examples/cv_example.py --data_dir images
|
||||
|
||||
## A few caveats to be aware of
|
||||
|
||||
1. Distributed setups `gloo` and `nccl` are not working with `mps` device.
|
||||
1. We strongly recommend to install PyTorch >= 1.13 (nightly version at the time of writing) on your MacOS machine.
|
||||
It has major fixes related to model correctness and performance improvements for transformer based models.
|
||||
Please refer to https://github.com/pytorch/pytorch/issues/82707 for more details.
|
||||
2. Distributed setups `gloo` and `nccl` are not working with `mps` device.
|
||||
This means that currently only single GPU of `mps` device type can be used.
|
||||
|
||||
Finally, please, remember that, `Accelerate` only integrates MPS backend, therefore if you
|
||||
|
||||
@ -185,8 +185,6 @@ prof.export_chrome_trace("trace.json")
|
||||
<hfoption id="Accelerate">
|
||||
|
||||
```python
|
||||
model = models.resnet18()
|
||||
inputs = torch.randn(5, 3, 224, 224).cuda()
|
||||
profile_kwargs = ProfileKwargs(
|
||||
activities=["cpu", "cuda"],
|
||||
output_trace_dir="trace"
|
||||
@ -200,7 +198,6 @@ with accelerator.profile() as prof:
|
||||
|
||||
# The trace will be saved to the specified directory
|
||||
```
|
||||
For other hardware accelerators, e.g. XPU, you can change `cuda` to `xpu` in the above example code.
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
@ -221,7 +218,7 @@ To illustrate how the API works, consider the following example:
|
||||
from torch.profiler import schedule
|
||||
|
||||
my_schedule = schedule(
|
||||
skip_first=1,
|
||||
skip_first=10,
|
||||
wait=5,
|
||||
warmup=1,
|
||||
active=3,
|
||||
@ -254,7 +251,7 @@ def trace_handler(p):
|
||||
|
||||
profile_kwargs = ProfileKwargs(
|
||||
activities=["cpu", "cuda"],
|
||||
schedule_option={"wait": 5, "warmup": 1, "active": 3, "repeat": 2, "skip_first": 1},
|
||||
schedule_option={"wait": 5, "warmup": 1, "active": 3, "repeat": 2, "skip_first": 10},
|
||||
on_trace_ready=trace_handler
|
||||
)
|
||||
|
||||
|
||||
@ -30,8 +30,6 @@ You will need to install the following requirements:
|
||||
```bash
|
||||
pip install bitsandbytes
|
||||
```
|
||||
For non-cuda devices, you can refer to the bitsandbytes installation guide [here](https://huggingface.co/docs/bitsandbytes/main/en/installation#multi-backend).
|
||||
|
||||
- Install latest `accelerate` from source
|
||||
```bash
|
||||
pip install git+https://github.com/huggingface/accelerate.git
|
||||
@ -86,7 +84,7 @@ To quantize your empty model with the selected configuration, you need to use [`
|
||||
|
||||
```py
|
||||
from accelerate.utils import load_and_quantize_model
|
||||
quantized_model = load_and_quantize_model(empty_model, weights_location=weights_location, bnb_quantization_config=bnb_quantization_config)
|
||||
quantized_model = load_and_quantize_model(empty_model, weights_location=weights_location, bnb_quantization_config=bnb_quantization_config, device_map = "auto")
|
||||
```
|
||||
|
||||
### Saving and loading 8-bit model
|
||||
@ -135,4 +133,4 @@ Note that you don’t need to pass `device_map` when loading the model for train
|
||||
|
||||
### Example demo - running GPT2 1.5b on a Google Colab
|
||||
|
||||
Check out the Google Colab [demo](https://colab.research.google.com/drive/1T1pOgewAWVpR9gKpaEWw4orOrzPFb3yM?usp=sharing) for running quantized models on a GPT2 model. The GPT2-1.5B model checkpoint is in FP32 which uses 6GB of memory. After quantization, it uses 1.6GB with 8-bit modules and 1.2GB with 4-bit modules.
|
||||
Check out the Google Colab [demo](https://colab.research.google.com/drive/1T1pOgewAWVpR9gKpaEWw4orOrzPFb3yM?usp=sharing) for running quantized models on a GTP2 model. The GPT2-1.5B model checkpoint is in FP32 which uses 6GB of memory. After quantization, it uses 1.6GB with 8-bit modules and 1.2GB with 4-bit modules.
|
||||
|
||||
@ -145,8 +145,8 @@ image_uri: null
|
||||
mixed_precision: fp16
|
||||
num_machines: 1
|
||||
profile: xxxxx
|
||||
py_version: py10
|
||||
pytorch_version: 2.5.0
|
||||
py_version: py38
|
||||
pytorch_version: 1.10.2
|
||||
region: us-east-1
|
||||
transformers_version: 4.17.0
|
||||
use_cpu: false
|
||||
|
||||
@ -15,7 +15,7 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
# Experiment trackers
|
||||
|
||||
There are a large number of experiment tracking APIs available, however getting them all to work in a multi-processing environment can oftentimes be complex.
|
||||
There are a large number of experiment tracking API's available, however getting them all to work with in a multi-processing environment can oftentimes be complex.
|
||||
Accelerate provides a general tracking API that can be used to log useful items during your script through [`Accelerator.log`]
|
||||
|
||||
## Integrated Trackers
|
||||
@ -71,12 +71,12 @@ config = {
|
||||
|
||||
accelerator.init_trackers("example_project", config=config)
|
||||
|
||||
my_model, my_optimizer, my_training_dataloader = accelerator.prepare(my_model, my_optimizer, my_training_dataloader)
|
||||
my_model, my_optimizer, my_training_dataloader = accelerate.prepare(my_model, my_optimizer, my_training_dataloader)
|
||||
device = accelerator.device
|
||||
my_model.to(device)
|
||||
|
||||
for iteration in range(config["num_iterations"]):
|
||||
for step, batch in enumerate(my_training_dataloader):
|
||||
for iteration in config["num_iterations"]:
|
||||
for step, batch in my_training_dataloader:
|
||||
my_optimizer.zero_grad()
|
||||
inputs, targets = batch
|
||||
inputs = inputs.to(device)
|
||||
@ -184,7 +184,7 @@ wandb_tracker = accelerator.get_tracker("wandb")
|
||||
From there you can interact with `wandb`'s `run` object like normal:
|
||||
|
||||
```python
|
||||
wandb_tracker.log_artifact(some_artifact_to_log)
|
||||
wandb_run.log_artifact(some_artifact_to_log)
|
||||
```
|
||||
|
||||
<Tip>
|
||||
@ -208,10 +208,10 @@ if accelerator.is_main_process:
|
||||
If a library has an API that does not follow a strict `.log` with an overall dictionary such as Neptune.AI, logging can be done manually under an `if accelerator.is_main_process` statement:
|
||||
```diff
|
||||
from accelerate import Accelerator
|
||||
+ import neptune
|
||||
+ import neptune.new as neptune
|
||||
|
||||
accelerator = Accelerator()
|
||||
+ run = neptune.init_run(...)
|
||||
+ run = neptune.init(...)
|
||||
|
||||
my_model, my_optimizer, my_training_dataloader = accelerate.prepare(my_model, my_optimizer, my_training_dataloader)
|
||||
device = accelerator.device
|
||||
|
||||
@ -225,7 +225,7 @@ In [/slurm/submit_multinode.sh](./slurm/submit_multinode.sh) we must specify the
|
||||
|
||||
In [/slurm/submit_multicpu.sh](./slurm/submit_multicpu.sh) we must specify the number of nodes that will be part of the training (`--num_machines`), how many CPU processes we will use in total (`--num_processes`), the [`backend`](https://pytorch.org/docs/stable/elastic/run.html#note-on-rendezvous-backend), `--main_process_ip` which will be the address the master node and the `--main_process_port`. `mpirun_hostfile` specifies to run the job using MPIRun.
|
||||
|
||||
In both scripts, we run `activateEnvironment.sh` at the beginning. This script should contain the necessary instructions to initialize the environment for execution. Below, we show an example that loads the necessary libraries ([Environment modules](https://github.com/cea-hpc/modules)), activates the Python environment, and sets up various environment variables, most of them to run the scripts in offline mode in case we don't have internet connection from the cluster.
|
||||
In both scripts, we run `activateEnviroment.sh` at the beginning. This script should contain the necessary instructions to initialize the environment for execution. Below, we show an example that loads the necessary libraries ([Environment modules](https://github.com/cea-hpc/modules)), activates the Python environment, and sets up various environment variables, most of them to run the scripts in offline mode in case we don't have internet connection from the cluster.
|
||||
|
||||
```bash
|
||||
# activateEnvironment.sh
|
||||
|
||||
@ -12,6 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import argparse
|
||||
from typing import List
|
||||
|
||||
import evaluate
|
||||
import numpy as np
|
||||
@ -60,7 +61,7 @@ EVAL_BATCH_SIZE = 32
|
||||
|
||||
|
||||
def get_fold_dataloaders(
|
||||
accelerator: Accelerator, dataset: DatasetDict, train_idxs: list[int], valid_idxs: list[int], batch_size: int = 16
|
||||
accelerator: Accelerator, dataset: DatasetDict, train_idxs: List[int], valid_idxs: List[int], batch_size: int = 16
|
||||
):
|
||||
"""
|
||||
Gets a set of train, valid, and test dataloaders for a particular fold
|
||||
|
||||
@ -1,341 +0,0 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import argparse
|
||||
import contextlib
|
||||
import math
|
||||
import os
|
||||
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer, get_constant_schedule, set_seed
|
||||
|
||||
from accelerate import Accelerator, DistributedType
|
||||
|
||||
|
||||
########################################################################
|
||||
# This is a fully working simple example to use Accelerate
|
||||
# and perform gradient accumulation on samples of variable size
|
||||
#
|
||||
# This example trains a SmolLM base model on WikiText-2 v1
|
||||
# in any of the following settings (with the same script):
|
||||
# - single CPU or single GPU
|
||||
# - multi GPUS (using PyTorch distributed mode)
|
||||
# - (multi) TPUs
|
||||
# - fp16 (mixed-precision) or fp32 (normal precision)
|
||||
#
|
||||
# To run it in each of these various modes, follow the instructions
|
||||
# in the readme for examples:
|
||||
# https://github.com/huggingface/accelerate/tree/main/examples
|
||||
#
|
||||
########################################################################
|
||||
|
||||
|
||||
EVAL_BATCH_SIZE = 32
|
||||
|
||||
|
||||
def get_dataloaders(accelerator: Accelerator, batch_size: int = 16, max_training_samples=500):
|
||||
"""
|
||||
Creates a set of `DataLoader`s for the `Salesforce/wikitext` dataset,
|
||||
using "HuggingFaceTB/SmolLM-360M" as the tokenizer.
|
||||
|
||||
Args:
|
||||
accelerator (`Accelerator`):
|
||||
An `Accelerator` object
|
||||
batch_size (`int`, *optional*):
|
||||
The batch size for the train and validation DataLoaders.
|
||||
"""
|
||||
tokenizer = AutoTokenizer.from_pretrained("HuggingFaceTB/SmolLM-360M")
|
||||
tokenizer.pad_token = tokenizer.eos_token
|
||||
with accelerator.local_main_process_first():
|
||||
datasets = load_dataset("Salesforce/wikitext", "wikitext-2-v1")
|
||||
datasets["train"] = datasets["train"].select(range(max_training_samples))
|
||||
|
||||
def tokenize_function(examples):
|
||||
# max_length=None => use the model max length (it's actually the default)
|
||||
outputs = tokenizer(examples["text"], truncation=True, max_length=None, return_attention_mask=False)
|
||||
return outputs
|
||||
|
||||
# Filter out empty texts
|
||||
with accelerator.main_process_first():
|
||||
datasets = datasets.filter(
|
||||
lambda x: len(x) > 0,
|
||||
input_columns="text",
|
||||
)
|
||||
|
||||
# Apply the method we just defined to all the examples in all the splits of the dataset
|
||||
# starting with the main process first:
|
||||
with accelerator.main_process_first():
|
||||
tokenized_datasets = datasets.map(
|
||||
tokenize_function,
|
||||
batched=True,
|
||||
remove_columns=["text"],
|
||||
)
|
||||
|
||||
# Filter out empty samples
|
||||
with accelerator.main_process_first():
|
||||
tokenized_datasets = tokenized_datasets.filter(
|
||||
lambda x: len(x) > 0,
|
||||
input_columns="input_ids",
|
||||
)
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = (
|
||||
128
|
||||
if accelerator.distributed_type == DistributedType.XLA
|
||||
else max([len(e["input_ids"]) for e in examples])
|
||||
)
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
elif accelerator.mixed_precision != "no":
|
||||
pad_to_multiple_of = 8
|
||||
else:
|
||||
pad_to_multiple_of = None
|
||||
|
||||
batch = tokenizer.pad(
|
||||
examples,
|
||||
padding="max_length",
|
||||
max_length=max_length + 1,
|
||||
pad_to_multiple_of=pad_to_multiple_of,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
batch["labels"] = batch["input_ids"][:, 1:]
|
||||
batch["input_ids"] = batch["input_ids"][:, :-1]
|
||||
|
||||
batch["labels"] = torch.where(batch["labels"] == tokenizer.pad_token_id, -100, batch["labels"])
|
||||
|
||||
return batch
|
||||
|
||||
# Instantiate dataloaders.
|
||||
train_dataloader = DataLoader(
|
||||
tokenized_datasets["train"], shuffle=False, collate_fn=collate_fn, batch_size=batch_size
|
||||
)
|
||||
eval_dataloader = DataLoader(
|
||||
tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
|
||||
)
|
||||
|
||||
return train_dataloader, eval_dataloader
|
||||
|
||||
|
||||
# For testing only
|
||||
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
|
||||
from accelerate.test_utils.training import mocked_dataloaders_for_autoregressive_models
|
||||
|
||||
get_dataloaders = mocked_dataloaders_for_autoregressive_models # noqa: F811
|
||||
|
||||
|
||||
def training_function(config, args):
|
||||
# For testing only
|
||||
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
|
||||
config["num_epochs"] = 2
|
||||
|
||||
gradient_accumulation_steps = int(args.gradient_accumulation_steps)
|
||||
# Initialize accelerator
|
||||
if args.with_wandb_tracking:
|
||||
accelerator = Accelerator(
|
||||
cpu=args.cpu,
|
||||
mixed_precision=args.mixed_precision,
|
||||
gradient_accumulation_steps=gradient_accumulation_steps,
|
||||
log_with="wandb",
|
||||
)
|
||||
else:
|
||||
accelerator = Accelerator(
|
||||
cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=gradient_accumulation_steps
|
||||
)
|
||||
if accelerator.distributed_type == DistributedType.XLA and gradient_accumulation_steps > 1:
|
||||
raise NotImplementedError(
|
||||
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`"
|
||||
)
|
||||
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
|
||||
lr = config["lr"]
|
||||
num_epochs = int(config["num_epochs"])
|
||||
seed = int(config["seed"])
|
||||
batch_size = int(config["batch_size"])
|
||||
max_grad_norm = config["max_grad_norm"]
|
||||
|
||||
# We need to initialize the trackers we use, and also store our configuration
|
||||
if args.with_wandb_tracking:
|
||||
run = os.path.split(__file__)[-1].split(".")[0]
|
||||
run_name = f"{accelerator.num_processes}GPU-grad{gradient_accumulation_steps}-bs{batch_size}"
|
||||
accelerator.init_trackers(
|
||||
run,
|
||||
config,
|
||||
init_kwargs={"wandb": {"name": run_name}},
|
||||
)
|
||||
|
||||
set_seed(seed)
|
||||
train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)
|
||||
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
|
||||
model = AutoModelForCausalLM.from_pretrained("HuggingFaceTB/SmolLM-360M")
|
||||
|
||||
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
|
||||
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
|
||||
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
|
||||
model = model.to(accelerator.device)
|
||||
|
||||
# Instantiate optimizer
|
||||
optimizer = AdamW(params=model.parameters(), lr=lr)
|
||||
|
||||
# Instantiate scheduler
|
||||
lr_scheduler = get_constant_schedule(
|
||||
optimizer=optimizer,
|
||||
)
|
||||
|
||||
# Prepare everything
|
||||
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
|
||||
# prepare method.
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
||||
)
|
||||
|
||||
num_samples_in_epoch = len(train_dataloader)
|
||||
remainder = num_samples_in_epoch % gradient_accumulation_steps
|
||||
remainder = remainder if remainder != 0 else gradient_accumulation_steps
|
||||
total_gradient_updates = math.ceil(num_samples_in_epoch / gradient_accumulation_steps)
|
||||
|
||||
total_batched_samples = 0
|
||||
# Now we train the model
|
||||
for epoch in range(num_epochs):
|
||||
model.train()
|
||||
training_iterator = iter(train_dataloader)
|
||||
for update_step in range(total_gradient_updates):
|
||||
# In order to correctly the total number of non-padded tokens on which we'll compute the cross-entropy loss
|
||||
# we need to pre-load the full local batch - i.e the next per_device_batch_size * accumulation_steps samples
|
||||
batch_samples = []
|
||||
num_batches_in_step = (
|
||||
gradient_accumulation_steps if update_step != (total_gradient_updates - 1) else remainder
|
||||
)
|
||||
for _ in range(num_batches_in_step):
|
||||
batch_samples += [next(training_iterator)]
|
||||
# get local num items in batch
|
||||
local_num_items_in_batch = sum([(batch["labels"].ne(-100)).sum() for batch in batch_samples])
|
||||
|
||||
# to compute it correctly in a multi-device DDP training, we need to gather the total number of items in the full batch.
|
||||
num_items_in_batch = accelerator.gather(local_num_items_in_batch).sum().item()
|
||||
losses = []
|
||||
for i, batch in enumerate(batch_samples):
|
||||
# if we perform gradient accumulation in a multi-devices set-up, we want to avoid unecessary communications when accumulating
|
||||
# cf: https://muellerzr.github.io/blog/gradient_accumulation.html
|
||||
ctx = (
|
||||
model.no_sync
|
||||
if (i < len(batch_samples) - 1 and accelerator.num_processes > 1)
|
||||
else contextlib.nullcontext
|
||||
)
|
||||
with ctx():
|
||||
total_batched_samples += 1
|
||||
|
||||
outputs = model(**batch, use_cache=False, num_items_in_batch=num_items_in_batch)
|
||||
loss = outputs.loss
|
||||
|
||||
# We multiply by num_processes because the DDP calculates the average gradient across all devices whereas dividing by num_items_in_batch already takes into account all devices
|
||||
# Same reason for gradient_accumulation_steps, but this times it's Accelerate that calculate the average gradient across the accumulated steps
|
||||
# Because the loss is already divided by `num_items_in_batch` in the `transformers` code, we don't need to do it again
|
||||
loss = loss * gradient_accumulation_steps * accelerator.num_processes
|
||||
accelerator.backward(loss)
|
||||
losses.append(loss.detach())
|
||||
|
||||
# Sync gradients and perform optimization steps once every gradient_accumulation_steps
|
||||
grad_norm = accelerator.clip_grad_norm_(model.parameters(), max_grad_norm)
|
||||
optimizer.step()
|
||||
lr_scheduler.step()
|
||||
optimizer.zero_grad()
|
||||
|
||||
losses = accelerator.gather(sum(losses)).sum().item() / (
|
||||
accelerator.num_processes * gradient_accumulation_steps
|
||||
)
|
||||
|
||||
grad_norm = grad_norm.detach().item() if isinstance(grad_norm, torch.Tensor) else grad_norm
|
||||
accelerator.print(
|
||||
f"epoch {epoch} - update step {update_step}:: grad norm: {grad_norm} ::train loss: {losses}"
|
||||
)
|
||||
if args.with_wandb_tracking:
|
||||
accelerator.log(
|
||||
{
|
||||
"train/grad_norm": grad_norm,
|
||||
"train/epoch": epoch,
|
||||
"train/loss": losses,
|
||||
},
|
||||
step=update_step + total_gradient_updates * epoch,
|
||||
)
|
||||
model.eval()
|
||||
losses = []
|
||||
for step, batch in enumerate(eval_dataloader):
|
||||
with torch.no_grad():
|
||||
outputs = model(**batch, use_cache=False)
|
||||
eval_loss = outputs.loss
|
||||
losses.append(accelerator.gather_for_metrics(loss.repeat(EVAL_BATCH_SIZE)))
|
||||
|
||||
losses = torch.cat(losses)
|
||||
try:
|
||||
eval_loss = torch.mean(losses)
|
||||
perplexity = math.exp(eval_loss)
|
||||
except OverflowError:
|
||||
perplexity = float("inf")
|
||||
|
||||
# Use accelerator.print to print only on the main process.
|
||||
accelerator.print(f"epoch {epoch}:: eval perplexity: {perplexity} eval_loss: {eval_loss}")
|
||||
if args.with_wandb_tracking:
|
||||
accelerator.log(
|
||||
{
|
||||
"eval/perplexity": perplexity,
|
||||
"eval/loss": eval_loss,
|
||||
"eval/epoch": epoch,
|
||||
},
|
||||
step=update_step + total_gradient_updates * epoch,
|
||||
)
|
||||
accelerator.end_training()
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Simple example of training script.")
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
default=None,
|
||||
choices=["no", "fp16", "bf16", "fp8"],
|
||||
help="Whether to use mixed precision. Choose"
|
||||
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
||||
"and an Nvidia Ampere GPU.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--gradient_accumulation_steps",
|
||||
type=int,
|
||||
default=1,
|
||||
help="The number of minibatches to be ran before gradients are accumulated.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--per_device_batch_size",
|
||||
type=int,
|
||||
default=2,
|
||||
help="The size of each minibatch",
|
||||
)
|
||||
|
||||
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.")
|
||||
parser.add_argument(
|
||||
"--with_wandb_tracking",
|
||||
action="store_true",
|
||||
help="Whether to load in wandb from the environment and use them for logging.",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": args.per_device_batch_size, "max_grad_norm": 1.0}
|
||||
training_function(config, args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -252,7 +252,7 @@ def main():
|
||||
|
||||
if args.with_tracking:
|
||||
accelerator_log_kwargs["log_with"] = args.report_to
|
||||
accelerator_log_kwargs["project_dir"] = args.output_dir
|
||||
accelerator_log_kwargs["logging_dir"] = args.output_dir
|
||||
|
||||
accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, **accelerator_log_kwargs)
|
||||
|
||||
@ -611,7 +611,7 @@ def main():
|
||||
|
||||
if isinstance(checkpointing_steps, int):
|
||||
if completed_steps % checkpointing_steps == 0:
|
||||
output_dir = f"step_{completed_steps}"
|
||||
output_dir = f"step_{completed_steps }"
|
||||
if args.output_dir is not None:
|
||||
output_dir = os.path.join(args.output_dir, output_dir)
|
||||
accelerator.save_state(output_dir)
|
||||
|
||||
@ -7,11 +7,11 @@ fp8_config:
|
||||
backend: TE # Can be TE | MS-AMP
|
||||
# The following are TE specific arguments.
|
||||
# See https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html#common-api for more details
|
||||
amax_history_len: 1024
|
||||
amax_history_length: 1024
|
||||
fp8_format: E4M3
|
||||
interval: 1
|
||||
margin: 0
|
||||
override_linear_precision: (false, false, false)
|
||||
override_linear_precision: false
|
||||
# Generally this should always be set to `false` to have the most realistic fp8 eval performance
|
||||
use_autocast_during_eval: false
|
||||
# If using MS-AMP, we ignore all of the prior and set a opt_level
|
||||
|
||||
@ -24,7 +24,6 @@ from torch.utils.data import DataLoader, Dataset
|
||||
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
|
||||
|
||||
from accelerate import Accelerator
|
||||
from accelerate.utils import set_seed
|
||||
|
||||
|
||||
########################################################################
|
||||
@ -94,7 +93,10 @@ def training_function(config, args):
|
||||
label_to_id = {lbl: i for i, lbl in enumerate(id_to_label)}
|
||||
|
||||
# Set the seed before splitting the data.
|
||||
set_seed(seed)
|
||||
np.random.seed(seed)
|
||||
torch.manual_seed(seed)
|
||||
torch.cuda.manual_seed_all(seed)
|
||||
|
||||
# Split our filenames between train and validation
|
||||
random_perm = np.random.permutation(len(file_names))
|
||||
cut = int(0.8 * len(file_names))
|
||||
|
||||
@ -1,58 +0,0 @@
|
||||
# FSDP2 Examples
|
||||
|
||||
This folder contains examples of using FSDP2 with Accelerate, utilizing extra methods to improve training speed, performance or accuracy.
|
||||
|
||||
## FSDP2 + ao Float8Linear (`fsdp2_fp8.py`)
|
||||
|
||||
In file `fsdp2_fp8.py` we use `Float8Linear` from `ao` to train a model partially in FP8 precision. We utilize `AORecipeKwargs` to pass the `Float8LinearConfig` to the accelerator,
|
||||
which replaces the default `torch.nn.Linear` with `Float8Linear`. We also utilize `TorchDynamoPlugin` together with regional compilation to compile the model,
|
||||
gaining even more speed and memory savings, as `ao` doesn't ship with any kernels by default, so we have to gain the performance from compiling the model.
|
||||
|
||||
Replacing linear layers with `Float8Linear` can greatly improve performance, if used correctly and on hardware that supports FP8 tensor cores. This highly depends on the model dimensions and sequence length used for training.
|
||||
You can view the performance of `Float8Linear` as a function of matrix dimensions in [this document](https://github.com/pytorch/ao/blob/main/torchao/float8/README.md#performance).
|
||||
|
||||
In our example, we use a 8B Llama3.1 model, which has a hidden dimension of 4096 and we train on sequence length of 8192. In the below images, we can see that this improves performance by ~25% compared to `bf16`, reaching ~10000 tokens per second, per device on 8x H100 GPUs, compared to ~8000 tokens per second using `bf16`, while loss function stays roughly the same. We can also see that the FLOPS raise by using FP8.
|
||||
|
||||
<div style="display: flex; gap: 25px;">
|
||||
<div style="text-align: center; width: 49%;">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/accelerate/examples/fsdp2/fp8_tps.png" alt="tps" style="width: 100%;">
|
||||
<p style="text-align: center; margin-top: 8px;">TPs per device, bf16 vs fp8</p>
|
||||
</div>
|
||||
<div style="text-align: center; width: 49%;">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/accelerate/examples/fsdp2/fp8_tflops.png" alt="tflops" style="width: 100%;">
|
||||
<p style="text-align: center; margin-top: 8px;">TFLOPS per device, bf16 vs fp8. We cannot really compare MFU as fp8 tensor cores are used as well.</p>
|
||||
</div>
|
||||
|
||||
<div style="text-align: center; width: 49%;">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/accelerate/examples/fsdp2/fp8_loss.png" alt="loss" style="width: 100%; max-width: 900px;">
|
||||
<p style="text-align: center; margin-top: 8px;">Loss curve, bf16 vs fp8, it's hard to see the difference as the curves mostly overlap</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
The figures above were generated on 8x H100 SXM GPUs, with 8192 sequence length and 1000 steps. To run the example, you can use the following command, where you can specify the precision to train in:
|
||||
|
||||
```bash
|
||||
accelerate launch --fsdp2_fp8.py --sequence_length 8192 --num_steps 1000 --log_with wandb --precision [fp8 | bf16]
|
||||
```
|
||||
|
||||
## FSDP2 + context parallelism (`fsdp2_context_parallel.py`)
|
||||
|
||||
In this file, we showcase integration of context parallelism with FSDP2. Context parallelism is a technique that allows us to scale the training to sequence length of up to a million tokens. With `accelerator.context_parallel` context manager, we replace the attention implementation with a context parallel version, which enables us to train on a sequence length of up to 128k tokens on 8x H100 GPUs, with possibility of endless scaling if we have enough GPUs.
|
||||
|
||||
For a detailed explanation and more details, please refer to [this guide](https://huggingface.co/docs/accelerate/concept_guides/context_parallel). You can run the example with the following command:
|
||||
|
||||
```bash
|
||||
accelerate launch --fsdp2_context_parallel.py --sequence_length 128000 --num_steps 1000 --log_with wandb --cp_size 8 --cp_comm_strategy allgather
|
||||
```
|
||||
|
||||
More details about the context parallelism can be found in the [concept guide](https://huggingface.co/docs/accelerate/concept_guides/context_parallel). You can see some results below:
|
||||
|
||||
<p align="center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/accelerate/examples/fsdp2/cp_perf.png" alt="context parallelism memory usage" />
|
||||
<br>
|
||||
<em>Figure 1: Memory usage and speed of context parallelism for up-to 256k context size.</em>
|
||||
</p>
|
||||
|
||||
|
||||
|
||||
|
||||
@ -1,179 +0,0 @@
|
||||
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Example of training with Context Parallel using FSDP2 via Accelerate.
|
||||
This example demonstrates how to use Accelerate's context_parallel feature for efficient long sequence training.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
|
||||
import torch
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import AutoModelForCausalLM
|
||||
|
||||
from accelerate import Accelerator
|
||||
from accelerate.utils import FullyShardedDataParallelPlugin, set_seed
|
||||
from utils import PerformanceTracker, create_collate_fn, get_dataset, setup_tokenizer
|
||||
|
||||
|
||||
MODEL_ID = "NousResearch/Hermes-3-Llama-3.1-8B"
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--sequence-length", type=int, default=128_000, help="Sequence length for the dataset")
|
||||
parser.add_argument("--num-steps", type=int, default=100, help="Number of training steps")
|
||||
parser.add_argument("--log-with", type=str, default="wandb", help="Logging service to use")
|
||||
parser.add_argument("--cp-size", type=int, default=8, help="Context parallel size")
|
||||
parser.add_argument("--cp-comm-strategy", type=str, default="allgather", help="Context parallel shard rotation")
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def training_step(batch, model, optimizer, accelerator: Accelerator):
|
||||
"""
|
||||
Perform a single training step with context parallel.
|
||||
|
||||
Args:
|
||||
batch: Input batch containing input_ids and labels
|
||||
model: The model to train
|
||||
optimizer: Optimizer
|
||||
accelerator: Accelerator instance
|
||||
|
||||
Returns:
|
||||
loss: Training loss
|
||||
"""
|
||||
|
||||
# Use context parallel for efficient long sequence processing
|
||||
outputs = model(**batch)
|
||||
loss = outputs.loss
|
||||
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
|
||||
torch.distributed.all_reduce(loss, op=torch.distributed.ReduceOp.AVG)
|
||||
|
||||
return loss
|
||||
|
||||
|
||||
def main():
|
||||
set_seed(42)
|
||||
args = parse_args()
|
||||
|
||||
# Configure FSDP2 plugin
|
||||
fsdp_plugin = FullyShardedDataParallelPlugin(
|
||||
auto_wrap_policy="transformer_based_wrap",
|
||||
transformer_cls_names_to_wrap=["LlamaDecoderLayer"],
|
||||
cpu_ram_efficient_loading=True,
|
||||
activation_checkpointing=True,
|
||||
fsdp_version=2,
|
||||
cp_size=args.cp_size,
|
||||
cp_comm_strategy=args.cp_comm_strategy,
|
||||
)
|
||||
|
||||
# Initialize accelerator
|
||||
accelerator = Accelerator(
|
||||
log_with=args.log_with,
|
||||
fsdp_plugin=fsdp_plugin,
|
||||
mixed_precision="bf16",
|
||||
)
|
||||
|
||||
accelerator.init_trackers(
|
||||
project_name="FSDP2_context_parallel",
|
||||
config={
|
||||
"sequence_length": args.sequence_length,
|
||||
"num_steps": args.num_steps,
|
||||
"cp_size": args.cp_size,
|
||||
"cp_comm_strategy": args.cp_comm_strategy,
|
||||
},
|
||||
)
|
||||
|
||||
# Prepare model and optimizer
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
MODEL_ID,
|
||||
torch_dtype=torch.bfloat16,
|
||||
use_cache=False,
|
||||
)
|
||||
|
||||
tokenizer = setup_tokenizer(MODEL_ID)
|
||||
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-5)
|
||||
|
||||
model, optimizer = accelerator.prepare(model, optimizer)
|
||||
|
||||
accelerator.print("Preparing dataset... this might take a while")
|
||||
dataset = get_dataset(
|
||||
accelerator,
|
||||
tokenizer,
|
||||
args.sequence_length,
|
||||
processing_batch_size=args.sequence_length
|
||||
// 20, # we need to override the default processing batch size to avoid empty packed sequences
|
||||
)
|
||||
dataloader = DataLoader(dataset, batch_size=1, collate_fn=create_collate_fn())
|
||||
dataloader = accelerator.prepare(dataloader)
|
||||
|
||||
model.train()
|
||||
|
||||
total_num_steps = min(args.num_steps, len(dataloader))
|
||||
performance_tracker = PerformanceTracker(warmup_steps=10)
|
||||
|
||||
accelerator.print(f"Starting training with context parallel for {total_num_steps} steps...")
|
||||
accelerator.print(f"Sequence length: {args.sequence_length}")
|
||||
accelerator.print("Warming up for 10 steps...")
|
||||
|
||||
accelerator.print(
|
||||
"Each step takes ~10 seconds with default settings on 8x H100 SXM GPUs, seeing logs takes a while"
|
||||
)
|
||||
for step, batch in enumerate(dataloader):
|
||||
print(f"Step {step}")
|
||||
if step >= total_num_steps:
|
||||
break
|
||||
|
||||
# get number of tokens before context_parallel shards the batch
|
||||
batch_tokens = batch["input_ids"].shape[0] * batch["input_ids"].shape[1]
|
||||
|
||||
loss = training_step(batch, model, optimizer, accelerator)
|
||||
|
||||
# each batch gets the same data, we divide by the number of processes to get the number of tokens per process
|
||||
metrics = performance_tracker.step(batch_tokens // accelerator.num_processes)
|
||||
|
||||
log_metrics = {"loss": loss.item()}
|
||||
|
||||
if "warmup_completed" in metrics:
|
||||
accelerator.print("Warmup completed! Starting performance tracking...")
|
||||
elif metrics:
|
||||
log_metrics.update(
|
||||
{
|
||||
"tokens_per_second": int(metrics["tokens_per_second"]),
|
||||
"steps_per_second": metrics["steps_per_second"],
|
||||
}
|
||||
)
|
||||
|
||||
if (step % 10 == 0 or step == total_num_steps - 1) and metrics:
|
||||
accelerator.print(
|
||||
f"Step {step}/{total_num_steps} | "
|
||||
f"Loss: {loss.item():.4f} | "
|
||||
f"Tokens/s: {int(metrics['tokens_per_second'])} | "
|
||||
f"Steps/s: {metrics['steps_per_second']:.2f} | "
|
||||
)
|
||||
|
||||
accelerator.log(log_metrics)
|
||||
|
||||
accelerator.wait_for_everyone()
|
||||
accelerator.end_training()
|
||||
accelerator.print("Training completed!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -1,157 +0,0 @@
|
||||
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Minimal example of training with FP8 precision using FSDP2 via Accelerate.
|
||||
This example demonstrates how to use torchao's Float8LinearConfig with Accelerate's AORecipeKwargs.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
|
||||
import torch
|
||||
from torch.utils.data import DataLoader
|
||||
from torchao.float8 import Float8LinearConfig
|
||||
from transformers import AutoConfig, AutoModelForCausalLM
|
||||
|
||||
from accelerate import Accelerator
|
||||
from accelerate.utils import AORecipeKwargs, FullyShardedDataParallelPlugin, TorchDynamoPlugin, set_seed
|
||||
from utils import PerformanceTracker, create_collate_fn, get_dataset, get_model_flops_per_token, setup_tokenizer
|
||||
|
||||
|
||||
MODEL_ID = "NousResearch/Hermes-3-Llama-3.1-8B"
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument("--sequence-length", type=int, default=8192, help="Sequence length for the dataset")
|
||||
parser.add_argument("--num-steps", type=int, default=1000, help="Number of steps to train for")
|
||||
parser.add_argument("--precision", type=str, default="fp8", choices=["fp8", "bf16"], help="Precision to train in")
|
||||
parser.add_argument("--log-with", type=str, default="wandb", help="Log with wandb or tensorboard")
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Main function to train the model.
|
||||
"""
|
||||
set_seed(42)
|
||||
|
||||
args = parse_args()
|
||||
|
||||
fsdp2_plugin = FullyShardedDataParallelPlugin(
|
||||
fsdp_version=2,
|
||||
cpu_ram_efficient_loading=False, # CPU RAM efficient loading CANNOT work with fp8 torchao
|
||||
auto_wrap_policy="transformer_based_wrap",
|
||||
transformer_cls_names_to_wrap=["LlamaDecoderLayer"],
|
||||
)
|
||||
fsdp2_plugin.set_mixed_precision(args.precision)
|
||||
|
||||
dynamo_plugin = TorchDynamoPlugin(
|
||||
backend="inductor",
|
||||
use_regional_compilation=True, # We use regional compilation to compile the model way faster
|
||||
)
|
||||
|
||||
fp8_config = Float8LinearConfig(
|
||||
enable_fsdp_float8_all_gather=True, # extra saving by gathering parameters in fp8 and upcasting after
|
||||
force_recompute_fp8_weight_in_bwd=True,
|
||||
)
|
||||
|
||||
kwargs = []
|
||||
if args.precision == "fp8":
|
||||
kwargs = [AORecipeKwargs(config=fp8_config)]
|
||||
|
||||
accelerator = Accelerator(
|
||||
fsdp_plugin=fsdp2_plugin,
|
||||
dynamo_plugin=dynamo_plugin,
|
||||
kwargs_handlers=kwargs,
|
||||
log_with=args.log_with,
|
||||
)
|
||||
accelerator.init_trackers(
|
||||
project_name="FSDP2_torchao_fp8",
|
||||
config={"sequence_length": args.sequence_length, "num_steps": args.num_steps},
|
||||
)
|
||||
|
||||
model = AutoModelForCausalLM.from_config(
|
||||
AutoConfig.from_pretrained(MODEL_ID, use_cache=False),
|
||||
torch_dtype=torch.bfloat16,
|
||||
)
|
||||
|
||||
tokenizer = setup_tokenizer(MODEL_ID)
|
||||
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-5)
|
||||
|
||||
model, optimizer = accelerator.prepare(model, optimizer)
|
||||
|
||||
dataset = get_dataset(accelerator, tokenizer, args.sequence_length)
|
||||
dataloader = DataLoader(dataset, batch_size=1, collate_fn=create_collate_fn())
|
||||
dataloader = accelerator.prepare(dataloader)
|
||||
|
||||
model.train()
|
||||
|
||||
total_num_steps = min(args.num_steps, len(dataloader))
|
||||
model_flops_per_token = get_model_flops_per_token(model, args.sequence_length)
|
||||
performance_tracker = PerformanceTracker(warmup_steps=10)
|
||||
|
||||
accelerator.print(f"Starting training with {args.precision} precision for {total_num_steps} steps...")
|
||||
accelerator.print(f"Sequence length: {args.sequence_length}")
|
||||
accelerator.print("Warming up for 10 steps...")
|
||||
|
||||
for step, batch in enumerate(dataloader):
|
||||
if step >= total_num_steps:
|
||||
break
|
||||
|
||||
outputs = model(**batch)
|
||||
loss = outputs.loss
|
||||
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
|
||||
batch_tokens = batch["input_ids"].shape[1]
|
||||
metrics = performance_tracker.step(batch_tokens)
|
||||
|
||||
print_msg = f"Step {step}/{total_num_steps}, Loss: {loss.item():.4f}"
|
||||
log_metrics = {"loss": loss.item()}
|
||||
|
||||
if "warmup_completed" in metrics:
|
||||
accelerator.print("Warm up completed! Starting performance tracking...")
|
||||
elif metrics:
|
||||
tps = metrics["tokens_per_second"]
|
||||
tflops = metrics["total_tokens"] * model_flops_per_token / (metrics["total_time"] * 1e12)
|
||||
|
||||
# it's rather hard to get a good estimate of MFU as we train with FP8, so both FP8 and BF16 tensor cores are used, therefore we just report TFLOPS (Tera floating point operations per second)
|
||||
# Given H100 SXM, the theoretical peak flops are ~990 TFLOPS for bf16 and ~1980 TFLOPS for fp8 [https://resources.nvidia.com/en-us-gpu-resources/h100-datasheet-24306]
|
||||
# This is WITH sparsity, so we divide by 2 to get the answer w/o sparsity
|
||||
print_msg += f" | Average steps/s: {metrics['steps_per_second']:.2f} | TPS per device: {tps:.2f} | TFLOPS per device: {tflops:.2f}"
|
||||
log_metrics.update(
|
||||
{
|
||||
"steps_per_second": metrics["steps_per_second"],
|
||||
"tps_per_device": tps,
|
||||
"tflops_per_device": tflops,
|
||||
}
|
||||
)
|
||||
|
||||
if step % 10 == 0 or step == total_num_steps - 1:
|
||||
accelerator.print(print_msg)
|
||||
|
||||
accelerator.log(log_metrics)
|
||||
|
||||
accelerator.wait_for_everyone()
|
||||
accelerator.end_training()
|
||||
accelerator.print("Training completed!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -1,181 +0,0 @@
|
||||
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Common utilities for FSDP2 examples.
|
||||
"""
|
||||
|
||||
import time
|
||||
|
||||
import torch
|
||||
from datasets import Dataset, load_dataset
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
from accelerate import Accelerator
|
||||
|
||||
|
||||
def get_dataset(
|
||||
accelerator: Accelerator,
|
||||
tokenizer: AutoTokenizer,
|
||||
seq_len: int,
|
||||
processing_batch_size: int = 1000,
|
||||
) -> Dataset:
|
||||
"""
|
||||
Load and prepare TinyStories dataset.
|
||||
|
||||
Args:
|
||||
accelerator (Accelerator): Accelerate accelerator instance
|
||||
tokenizer (AutoTokenizer): Hugging Face tokenizer
|
||||
seq_len (int): Sequence length for the dataset
|
||||
processing_batch_size (int): Batch size for processing the dataset
|
||||
|
||||
Returns:
|
||||
Dataset: Packed dataset
|
||||
"""
|
||||
raw_dataset = load_dataset("roneneldan/TinyStories", split="train[:50%]")
|
||||
|
||||
def tokenize_function(examples):
|
||||
tokenized_batch = tokenizer(
|
||||
examples["text"],
|
||||
padding=False,
|
||||
truncation=True,
|
||||
max_length=seq_len,
|
||||
return_tensors=None,
|
||||
)
|
||||
tokenized_batch["labels"] = tokenized_batch["input_ids"].copy()
|
||||
return tokenized_batch
|
||||
|
||||
with accelerator.main_process_first():
|
||||
tokenized_dataset = raw_dataset.map(
|
||||
tokenize_function, batched=True, remove_columns=["text"], batch_size=processing_batch_size
|
||||
)
|
||||
|
||||
def create_packed_sequences(examples):
|
||||
all_tokens = []
|
||||
for input_ids in examples["input_ids"]:
|
||||
all_tokens.extend(input_ids)
|
||||
|
||||
num_sequences = len(all_tokens) // (seq_len + 1)
|
||||
packed_input_ids = []
|
||||
packed_labels = []
|
||||
|
||||
for i in range(num_sequences):
|
||||
start_idx = i * (seq_len + 1)
|
||||
end_idx = start_idx + (seq_len + 1)
|
||||
full_sequence = all_tokens[start_idx:end_idx]
|
||||
packed_input_ids.append(full_sequence[:-1])
|
||||
packed_labels.append(full_sequence[1:])
|
||||
|
||||
return {"input_ids": packed_input_ids, "labels": packed_labels}
|
||||
|
||||
with accelerator.main_process_first():
|
||||
packed_dataset = tokenized_dataset.map(
|
||||
create_packed_sequences,
|
||||
batched=True,
|
||||
remove_columns=tokenized_dataset.column_names,
|
||||
batch_size=processing_batch_size,
|
||||
)
|
||||
|
||||
return packed_dataset.shuffle(seed=42)
|
||||
|
||||
|
||||
def get_model_flops_per_token(model: AutoModelForCausalLM, seq_len: int) -> float:
|
||||
"""
|
||||
Get the number of flops per token for the model.
|
||||
|
||||
Args:
|
||||
model (AutoModelForCausalLM): Model to get the flops for
|
||||
seq_len (int): Sequence length
|
||||
"""
|
||||
cfg = model.config
|
||||
head_dim = cfg.hidden_size // cfg.num_attention_heads
|
||||
|
||||
# MLP: 3 matmuls
|
||||
mlp_flops = 18 * cfg.hidden_size * cfg.intermediate_size
|
||||
|
||||
# Attn (w/o dotproduct)
|
||||
attn_flops = 12 * head_dim * (cfg.num_attention_heads + cfg.num_key_value_heads)
|
||||
|
||||
# attn (dotproduct) - this scales quadratically with sequence length
|
||||
attn_dotproduct_flops = 12 * cfg.num_attention_heads * head_dim * seq_len
|
||||
|
||||
# we also ignore embeddings and layernorms, etc
|
||||
return (mlp_flops + attn_flops + attn_dotproduct_flops) * cfg.num_hidden_layers
|
||||
|
||||
|
||||
def create_collate_fn():
|
||||
"""Create a collate function for batching."""
|
||||
|
||||
def collate_fn(batch):
|
||||
input_ids = torch.tensor([item["input_ids"] for item in batch], dtype=torch.long)
|
||||
labels = torch.tensor([item["labels"] for item in batch], dtype=torch.long)
|
||||
return {"input_ids": input_ids, "labels": labels}
|
||||
|
||||
return collate_fn
|
||||
|
||||
|
||||
class PerformanceTracker:
|
||||
"""Track training performance metrics."""
|
||||
|
||||
def __init__(self, warmup_steps: int = 10):
|
||||
self.warmup_steps = warmup_steps
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
"""Reset all tracking variables."""
|
||||
self.start_time = None
|
||||
self.num_tokens = 0
|
||||
self.is_in_warmup = True
|
||||
self.step_count = 0
|
||||
|
||||
def step(self, batch_tokens: int) -> dict:
|
||||
"""
|
||||
Update performance tracking with a new step.
|
||||
|
||||
Args:
|
||||
batch_tokens (int): Number of tokens in current batch
|
||||
|
||||
Returns:
|
||||
dict: Performance metrics if past warmup, empty dict otherwise
|
||||
"""
|
||||
self.step_count += 1
|
||||
|
||||
if self.step_count == self.warmup_steps:
|
||||
self.start_time = time.perf_counter()
|
||||
self.num_tokens = 0
|
||||
self.is_in_warmup = False
|
||||
return {"warmup_completed": True}
|
||||
|
||||
if not self.is_in_warmup and self.start_time is not None:
|
||||
self.num_tokens += batch_tokens
|
||||
total_time = time.perf_counter() - self.start_time
|
||||
steps_from_warmup = self.step_count - self.warmup_steps
|
||||
|
||||
if total_time > 0 and steps_from_warmup > 0:
|
||||
return {
|
||||
"tokens_per_second": self.num_tokens / total_time,
|
||||
"steps_per_second": steps_from_warmup / total_time,
|
||||
"total_tokens": self.num_tokens,
|
||||
"total_time": total_time,
|
||||
}
|
||||
|
||||
return {}
|
||||
|
||||
|
||||
def setup_tokenizer(model_id: str) -> AutoTokenizer:
|
||||
"""Setup tokenizer with proper padding token."""
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
||||
if tokenizer.pad_token is None:
|
||||
tokenizer.pad_token = tokenizer.eos_token
|
||||
return tokenizer
|
||||
@ -1,234 +0,0 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import json
|
||||
import os
|
||||
import pathlib
|
||||
import queue
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from typing import Union
|
||||
|
||||
import fire
|
||||
import scipy.io.wavfile
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from transformers import AutoTokenizer, VitsModel
|
||||
|
||||
from accelerate import PartialState
|
||||
from accelerate.utils import tqdm
|
||||
|
||||
|
||||
"""
|
||||
Requirements: transformers accelerate fire scipy datasets
|
||||
pip install transformers accelerate fire scipy datasets
|
||||
Example usage:
|
||||
accelerate launch distributed_speech_generation.py --output_path outputs --batch_size 8 --num_workers 2 --dataset_split train
|
||||
"""
|
||||
|
||||
"""
|
||||
To run the speech generation
|
||||
import scipy.io.wavfile
|
||||
import numpy as np
|
||||
from IPython.display import Audio
|
||||
sample_rate, audio_data = scipy.io.wavfile.read('path_to_you_wav_file.wav')
|
||||
audio_data = audio_data.astype(np.float32) / 32762.0
|
||||
Audio(audio_data, rate=sample_rate)
|
||||
"""
|
||||
|
||||
|
||||
def load_pokemon_data(split: str, max_text_length: int):
|
||||
"""Load Pokemon descriptions from the dataset"""
|
||||
ds = load_dataset("svjack/pokemon-blip-captions-en-zh", split=split)
|
||||
|
||||
# Create dataset of dictionaries
|
||||
dataset = []
|
||||
for idx, text in enumerate(ds["en_text"]):
|
||||
if len(text.strip()) > 0: # Skip empty descriptions
|
||||
dataset.append(
|
||||
{
|
||||
"id": f"pokemon_{idx:06d}",
|
||||
"text": text.strip()[:max_text_length], # Truncate long descriptions
|
||||
"original_text": text.strip(), # Keep original for metadata
|
||||
}
|
||||
)
|
||||
return dataset
|
||||
|
||||
|
||||
class ExistsFilter:
|
||||
def __init__(self, output_dir: Union[pathlib.Path, str]):
|
||||
current_files = [f.split(".wav")[0] for f in os.listdir(output_dir) if f.endswith(".wav")]
|
||||
self.processed_files = set(current_files)
|
||||
print(f"Existing audio files found: {len(self.processed_files)}.")
|
||||
|
||||
def __call__(self, x):
|
||||
return x["id"] not in self.processed_files
|
||||
|
||||
|
||||
def preprocess_fn(sample, tokenizer, max_text_length: int):
|
||||
inputs = tokenizer(sample["text"], padding=False, truncation=True, max_length=max_text_length, return_tensors="pt")
|
||||
|
||||
return {
|
||||
"input_ids": inputs["input_ids"][0].tolist(),
|
||||
"attention_mask": inputs["attention_mask"][0].tolist(),
|
||||
"id": sample["id"],
|
||||
"text": sample["text"],
|
||||
"original_text": sample["original_text"],
|
||||
}
|
||||
|
||||
|
||||
def collate_fn(examples, tokenizer):
|
||||
"""Collate batch of examples with proper padding"""
|
||||
# Find max length in this batch
|
||||
max_length = max(len(example["input_ids"]) for example in examples)
|
||||
|
||||
# Pad sequences to max_length
|
||||
input_ids_list = []
|
||||
attention_mask_list = []
|
||||
|
||||
for example in examples:
|
||||
# Get current lengths
|
||||
curr_len = len(example["input_ids"])
|
||||
padding_length = max_length - curr_len
|
||||
|
||||
# Pad sequences
|
||||
padded_input_ids = example["input_ids"] + [tokenizer.pad_token_id] * padding_length
|
||||
padded_attention_mask = example["attention_mask"] + [0] * padding_length
|
||||
|
||||
input_ids_list.append(padded_input_ids)
|
||||
attention_mask_list.append(padded_attention_mask)
|
||||
|
||||
# Convert to tensors
|
||||
input_ids = torch.tensor(input_ids_list, dtype=torch.long)
|
||||
attention_mask = torch.tensor(attention_mask_list, dtype=torch.long)
|
||||
|
||||
ids = [example["id"] for example in examples]
|
||||
texts = [example["text"] for example in examples]
|
||||
original_texts = [example["original_text"] for example in examples]
|
||||
|
||||
return {
|
||||
"input_ids": input_ids,
|
||||
"attention_mask": attention_mask,
|
||||
"ids": ids,
|
||||
"texts": texts,
|
||||
"original_texts": original_texts,
|
||||
}
|
||||
|
||||
|
||||
def create_dataloader(dataset, batch_size, distributed_state, tokenizer):
|
||||
"""Create dataloader with preprocessing"""
|
||||
processed_dataset = [preprocess_fn(item, tokenizer, max_text_length=200) for item in dataset]
|
||||
|
||||
# Split dataset for distributed processing
|
||||
if distributed_state.num_processes > 1:
|
||||
chunk_size = len(processed_dataset) // distributed_state.num_processes
|
||||
start_idx = distributed_state.process_index * chunk_size
|
||||
end_idx = (
|
||||
start_idx + chunk_size
|
||||
if distributed_state.process_index < distributed_state.num_processes - 1
|
||||
else len(processed_dataset)
|
||||
)
|
||||
processed_dataset = processed_dataset[start_idx:end_idx]
|
||||
|
||||
# Create batches
|
||||
batches = []
|
||||
for i in range(0, len(processed_dataset), batch_size):
|
||||
batch = processed_dataset[i : i + batch_size]
|
||||
batches.append(collate_fn(batch, tokenizer))
|
||||
return batches
|
||||
|
||||
|
||||
def save_results(output_queue: queue.Queue, output_dir: pathlib.Path, sampling_rate: int):
|
||||
while True:
|
||||
try:
|
||||
item = output_queue.get(timeout=5)
|
||||
if item is None:
|
||||
break
|
||||
waveforms, ids, texts, original_texts = item
|
||||
|
||||
# Save each audio file and its metadata
|
||||
for waveform, file_id, text, original_text in zip(waveforms, ids, texts, original_texts):
|
||||
# Save audio
|
||||
wav_path = output_dir / f"{file_id}.wav"
|
||||
scipy.io.wavfile.write(wav_path, rate=sampling_rate, data=waveform.cpu().float().numpy())
|
||||
|
||||
# Save metadata with both truncated and original text
|
||||
metadata = {
|
||||
"text_used": text,
|
||||
"original_text": original_text,
|
||||
"model": "facebook/mms-tts-eng",
|
||||
"sampling_rate": sampling_rate,
|
||||
}
|
||||
metadata_path = output_dir / f"{file_id}_metadata.json"
|
||||
with metadata_path.open("w") as f:
|
||||
json.dump(metadata, f, indent=4)
|
||||
|
||||
except queue.Empty:
|
||||
continue
|
||||
|
||||
|
||||
def main(
|
||||
output_path: str = "speech_data",
|
||||
batch_size: int = 8,
|
||||
num_workers: int = 2,
|
||||
dataset_split: str = "train",
|
||||
model_name: str = "facebook/mms-tts-eng",
|
||||
max_text_length: int = 200,
|
||||
):
|
||||
output_dir = pathlib.Path(output_path)
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
distributed_state = PartialState()
|
||||
|
||||
# Load model and tokenizer
|
||||
model = VitsModel.from_pretrained(
|
||||
model_name,
|
||||
device_map=distributed_state.device,
|
||||
torch_dtype=torch.float32,
|
||||
)
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
||||
|
||||
# Load and filter data
|
||||
dataset = load_pokemon_data(dataset_split, max_text_length)
|
||||
exist_filter = ExistsFilter(output_dir)
|
||||
dataset = [item for item in dataset if exist_filter(item)]
|
||||
|
||||
distributed_state.print(f"Processing {len(dataset)} Pokemon descriptions")
|
||||
|
||||
# Create dataloader
|
||||
batches = create_dataloader(dataset, batch_size, distributed_state, tokenizer)
|
||||
|
||||
# Setup output queue and save thread
|
||||
output_queue = queue.Queue()
|
||||
save_thread = ThreadPoolExecutor(max_workers=num_workers)
|
||||
save_future = save_thread.submit(save_results, output_queue, output_dir, model.config.sampling_rate)
|
||||
|
||||
try:
|
||||
for batch in tqdm(batches, desc="Generating Pokemon descriptions"):
|
||||
with torch.no_grad():
|
||||
outputs = model(
|
||||
input_ids=batch["input_ids"].to(distributed_state.device, dtype=torch.long),
|
||||
attention_mask=batch["attention_mask"].to(distributed_state.device, dtype=torch.long),
|
||||
).waveform
|
||||
|
||||
output_queue.put((outputs, batch["ids"], batch["texts"], batch["original_texts"]))
|
||||
finally:
|
||||
output_queue.put(None)
|
||||
save_thread.shutdown(wait=True)
|
||||
|
||||
save_future.result()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
fire.Fire(main)
|
||||
@ -1,202 +0,0 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import os
|
||||
import pathlib
|
||||
import queue
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from functools import partial
|
||||
from typing import Union
|
||||
|
||||
import fire
|
||||
import torch
|
||||
import webdataset as wds
|
||||
from huggingface_hub.utils import insecure_hashlib
|
||||
from PIL import Image
|
||||
from tqdm import tqdm
|
||||
from transformers import AutoModelForCausalLM, AutoProcessor
|
||||
|
||||
from accelerate import PartialState
|
||||
|
||||
|
||||
"""
|
||||
Additional requirements: flash_attn einops timm webdataset fire tqdm huggingface_hub
|
||||
pip install flash_attn einops timm webdataset fire tqdm huggingface_hub
|
||||
|
||||
Example:
|
||||
|
||||
accelerate launch --num_processes=2 florence2.py --data_path "https://huggingface.co/datasets/pixparse/cc3m-wds/resolve/main/cc3m-train-0000.tar" --output_path outputs --batch_size 12 --num_workers 1 --prompt "<CAPTION>"
|
||||
"""
|
||||
|
||||
|
||||
def main(
|
||||
data_path: str,
|
||||
output_path: str,
|
||||
batch_size: int,
|
||||
num_workers: int,
|
||||
prompt: str = "<MORE_DETAILED_CAPTION>",
|
||||
model_name: str = "microsoft/Florence-2-large",
|
||||
max_new_tokens: int = 1024,
|
||||
num_beams: int = 3,
|
||||
):
|
||||
output_dir = pathlib.Path(output_path)
|
||||
|
||||
distributed_state = PartialState()
|
||||
|
||||
if distributed_state.is_main_process:
|
||||
output_dir.mkdir(exist_ok=True)
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
model_name,
|
||||
device_map=distributed_state.device,
|
||||
torch_dtype=torch.float16,
|
||||
trust_remote_code=True,
|
||||
)
|
||||
|
||||
processor = AutoProcessor.from_pretrained(model_name, trust_remote_code=True, clean_up_tokenization_spaces=True)
|
||||
|
||||
class ExistsFilter:
|
||||
def __init__(self, output_dir: Union[pathlib.Path, str]):
|
||||
current_training_img_hashes = [f.split(".jpg")[0] for f in os.listdir(output_dir) if f.endswith(".jpg")]
|
||||
self.current_training_img_hashes = set(current_training_img_hashes)
|
||||
if distributed_state.is_main_process:
|
||||
print(f"Existing images found: {len(self.current_training_img_hashes)}.")
|
||||
|
||||
def __call__(self, x):
|
||||
if len(self.current_training_img_hashes) > 0:
|
||||
if x["img_hash"] in self.current_training_img_hashes:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
else:
|
||||
return True
|
||||
|
||||
def preprocess_fn(sample, processor):
|
||||
image: Image.Image = sample["jpg"].convert("RGB")
|
||||
img_hash = insecure_hashlib.sha1(image.tobytes()).hexdigest()
|
||||
inputs = processor(
|
||||
text=prompt,
|
||||
images=image,
|
||||
return_tensors="pt",
|
||||
)
|
||||
return {
|
||||
"input_ids": inputs["input_ids"],
|
||||
"pixel_values": inputs["pixel_values"],
|
||||
"image": image,
|
||||
"img_hash": img_hash,
|
||||
"original_caption": sample["txt"],
|
||||
}
|
||||
|
||||
def collate_fn(examples):
|
||||
input_ids = torch.cat([example["input_ids"] for example in examples])
|
||||
pixel_values = torch.cat([example["pixel_values"] for example in examples])
|
||||
images = [example["image"] for example in examples]
|
||||
img_hashes = [example["img_hash"] for example in examples]
|
||||
captions = [example["original_caption"] for example in examples]
|
||||
return {
|
||||
"input_ids": input_ids,
|
||||
"pixel_values": pixel_values,
|
||||
"images": images,
|
||||
"img_hashes": img_hashes,
|
||||
"original_captions": captions,
|
||||
}
|
||||
|
||||
exist_filter = ExistsFilter(output_dir)
|
||||
dataset = (
|
||||
wds.WebDataset(
|
||||
data_path,
|
||||
handler=wds.warn_and_continue,
|
||||
nodesplitter=None,
|
||||
shardshuffle=False,
|
||||
empty_check=False,
|
||||
)
|
||||
.decode("pil", handler=wds.warn_and_continue)
|
||||
.map(partial(preprocess_fn, processor=processor), handler=wds.warn_and_continue)
|
||||
)
|
||||
if len(exist_filter.current_training_img_hashes) > 0:
|
||||
dataset = dataset.select(exist_filter)
|
||||
dataset = dataset.batched(
|
||||
batch_size,
|
||||
partial=False,
|
||||
collation_fn=collate_fn,
|
||||
)
|
||||
dataloader = wds.WebLoader(
|
||||
dataset,
|
||||
batch_size=None,
|
||||
num_workers=num_workers,
|
||||
pin_memory=True,
|
||||
persistent_workers=True,
|
||||
)
|
||||
|
||||
def save_results(output_queue: queue.Queue, output_dir: pathlib.Path, processor):
|
||||
while True:
|
||||
try:
|
||||
item = output_queue.get(timeout=5)
|
||||
if item is None:
|
||||
break
|
||||
original_captions, predictions, images, img_hashes = item
|
||||
predicted_captions = processor.batch_decode(
|
||||
predictions,
|
||||
skip_special_tokens=False,
|
||||
)
|
||||
for caption, pred_caption, image, img_hash in zip(
|
||||
original_captions, predicted_captions, images, img_hashes
|
||||
):
|
||||
processed_caption = processor.post_process_generation(
|
||||
pred_caption, task=prompt, image_size=(image.width, image.height)
|
||||
)[prompt]
|
||||
img_path = output_dir.joinpath(f"{img_hash}.jpg")
|
||||
image.save(img_path)
|
||||
|
||||
caption_dict = {"original": caption, "predicted": processed_caption}
|
||||
with output_dir.joinpath(f"{img_hash}_caption.json").open("w") as f:
|
||||
json.dump(caption_dict, f, indent=4)
|
||||
|
||||
except queue.Empty:
|
||||
continue
|
||||
|
||||
output_queue = queue.Queue()
|
||||
save_thread = ThreadPoolExecutor(max_workers=num_workers)
|
||||
save_future = save_thread.submit(save_results, output_queue, output_dir, processor)
|
||||
|
||||
try:
|
||||
for _, batch_raw in tqdm(
|
||||
enumerate(dataloader),
|
||||
disable=not distributed_state.is_main_process,
|
||||
):
|
||||
with distributed_state.split_between_processes(batch_raw) as batch:
|
||||
outputs = model.generate(
|
||||
input_ids=batch["input_ids"].to(distributed_state.device),
|
||||
pixel_values=batch["pixel_values"].to(distributed_state.device, model.dtype),
|
||||
max_new_tokens=max_new_tokens,
|
||||
num_beams=num_beams,
|
||||
)
|
||||
output_queue.put(
|
||||
(
|
||||
batch["original_captions"],
|
||||
outputs,
|
||||
batch["images"],
|
||||
batch["img_hashes"],
|
||||
)
|
||||
)
|
||||
finally:
|
||||
output_queue.put(None)
|
||||
save_thread.shutdown(wait=True)
|
||||
|
||||
save_future.result()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
fire.Fire(main)
|
||||
@ -1,192 +0,0 @@
|
||||
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import os
|
||||
import pathlib
|
||||
import queue
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
import av
|
||||
import fire
|
||||
import numpy as np
|
||||
import torch
|
||||
from huggingface_hub import snapshot_download
|
||||
from tqdm import tqdm
|
||||
from transformers import LlavaNextVideoForConditionalGeneration, LlavaNextVideoProcessor
|
||||
|
||||
from accelerate import PartialState
|
||||
|
||||
|
||||
START_TIME = time.strftime("%Y%m%d_%H%M%S")
|
||||
DTYPE_MAP = {"fp32": torch.float32, "fp16": torch.float16, "bf16": torch.bfloat16}
|
||||
|
||||
|
||||
"""
|
||||
Example:
|
||||
|
||||
accelerate launch llava_next_video.py
|
||||
"""
|
||||
|
||||
|
||||
def save_results(output_queue: queue.Queue, output_dir: pathlib.Path):
|
||||
count = 0
|
||||
while True:
|
||||
try:
|
||||
item = output_queue.get(timeout=5)
|
||||
if item is None:
|
||||
break
|
||||
prompt, video, generated_text = item
|
||||
example_file = f"example_{count}"
|
||||
temp_dir = os.path.join(output_dir, example_file)
|
||||
|
||||
metadata = {"prompt": prompt, "video": video, "generated_text": generated_text}
|
||||
with open(temp_dir, "w") as f:
|
||||
json.dump(metadata, f, indent=4)
|
||||
count += 1
|
||||
|
||||
except queue.Empty:
|
||||
continue
|
||||
|
||||
|
||||
def get_batches(processed_videos, batch_size):
|
||||
num_batches = (len(processed_videos) + batch_size - 1) // batch_size
|
||||
batches = []
|
||||
|
||||
for i in range(num_batches):
|
||||
start_index = i * batch_size
|
||||
end_index = min((i + 1) * batch_size, len(processed_videos))
|
||||
batch = processed_videos[start_index:end_index]
|
||||
batches.append(batch)
|
||||
|
||||
return batches
|
||||
|
||||
|
||||
def read_video_pyav(container, indices):
|
||||
"""
|
||||
Decode the video with PyAV decoder.
|
||||
Args:
|
||||
container (`av.container.input.InputContainer`): PyAV container.
|
||||
indices (`List[int]`): List of frame indices to decode.
|
||||
Returns:
|
||||
result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
|
||||
"""
|
||||
frames = []
|
||||
container.seek(0)
|
||||
start_index = indices[0]
|
||||
end_index = indices[-1]
|
||||
for i, frame in enumerate(container.decode(video=0)):
|
||||
if i > end_index:
|
||||
break
|
||||
if i >= start_index and i in indices:
|
||||
frames.append(frame)
|
||||
return np.stack([x.to_ndarray(format="rgb24") for x in frames])
|
||||
|
||||
|
||||
def get_video_paths(video_dir):
|
||||
"""Get paths to all video files in the directory and its subdirectories."""
|
||||
video_extensions = (".mp4", ".avi", ".mov", ".mkv") # Add more extensions if needed
|
||||
video_paths = []
|
||||
|
||||
for root, _, files in os.walk(video_dir):
|
||||
for file in files:
|
||||
if file.lower().endswith(video_extensions):
|
||||
video_paths.append(os.path.join(root, file))
|
||||
|
||||
return video_paths
|
||||
|
||||
|
||||
def process_videos(video_paths, processor, prompt, frames_per_video):
|
||||
"""Process a batch of videos and prepare them for the model."""
|
||||
batch_inputs = []
|
||||
|
||||
for video_path in video_paths:
|
||||
try:
|
||||
with av.open(video_path) as container:
|
||||
total_frames = container.streams.video[0].frames
|
||||
indices = np.arange(0, total_frames, total_frames / frames_per_video).astype(int)
|
||||
clip = read_video_pyav(container, indices)
|
||||
|
||||
processed = processor(text=prompt, videos=clip, return_tensors="pt")
|
||||
batch_inputs.append(
|
||||
{
|
||||
"input_ids": processed["input_ids"],
|
||||
"pixel_values_videos": processed["pixel_values_videos"],
|
||||
"video": video_path,
|
||||
}
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error processing video {video_path}: {str(e)}")
|
||||
continue
|
||||
|
||||
return batch_inputs
|
||||
|
||||
|
||||
def main(
|
||||
model_name: str = "llava-hf/LLaVA-NeXT-Video-7B-hf",
|
||||
save_dir: str = "./evaluation/examples",
|
||||
prompt: str = "USER: <video>\nGenerate caption ASSISTANT:",
|
||||
frames_per_video: int = 8,
|
||||
max_new_tokens: int = 100,
|
||||
batch_size: int = 4,
|
||||
dtype: str = "fp16",
|
||||
num_workers: int = 1,
|
||||
low_mem: bool = True,
|
||||
):
|
||||
# Start up the distributed environment without needing the Accelerator.
|
||||
distributed_state = PartialState()
|
||||
|
||||
processor = LlavaNextVideoProcessor.from_pretrained(model_name)
|
||||
model = LlavaNextVideoForConditionalGeneration.from_pretrained(
|
||||
model_name, torch_dtype=DTYPE_MAP[dtype], low_cpu_mem_usage=low_mem, device_map=distributed_state.device
|
||||
)
|
||||
|
||||
if distributed_state.is_main_process:
|
||||
if not os.path.exists(save_dir):
|
||||
os.makedirs(save_dir)
|
||||
print(f"Directory '{save_dir}' created successfully.")
|
||||
else:
|
||||
print(f"Directory '{save_dir}' already exists.")
|
||||
|
||||
videos_dir = snapshot_download(repo_id="malterei/LLaVA-Video-small-swift", repo_type="dataset")
|
||||
video_paths = get_video_paths(videos_dir)
|
||||
processed_videos = process_videos(video_paths, processor, prompt, frames_per_video)
|
||||
batches = get_batches(processed_videos, batch_size)
|
||||
|
||||
output_queue = queue.Queue()
|
||||
save_thread = ThreadPoolExecutor(max_workers=num_workers)
|
||||
save_future = save_thread.submit(save_results, output_queue, save_dir)
|
||||
for _, batch_raw in tqdm(enumerate(batches), total=len(batches)):
|
||||
try:
|
||||
with distributed_state.split_between_processes(batch_raw) as batched_inputs:
|
||||
for batch in batched_inputs:
|
||||
output = model.generate(
|
||||
input_ids=batch["input_ids"].to(distributed_state.device),
|
||||
pixel_values_videos=batch["pixel_values_videos"].to(distributed_state.device, model.dtype),
|
||||
max_new_tokens=max_new_tokens,
|
||||
)
|
||||
generated_text = processor.batch_decode(output, skip_special_tokens=True)
|
||||
output_queue.put((prompt, batch["video"], generated_text))
|
||||
finally:
|
||||
output_queue.put(None)
|
||||
save_thread.shutdown(wait=True)
|
||||
|
||||
save_future.result()
|
||||
distributed_state.destroy_process_group()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
fire.Fire(main)
|
||||
@ -18,7 +18,7 @@ from diffusers import DiffusionPipeline
|
||||
from accelerate import PartialState # Can also be Accelerator or AcceleratorState
|
||||
|
||||
|
||||
pipe = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16)
|
||||
pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
|
||||
distributed_state = PartialState()
|
||||
pipe.to(distributed_state.device)
|
||||
|
||||
|
||||
@ -17,12 +17,9 @@ import torch
|
||||
from transformers import AutoModelForMaskedLM
|
||||
|
||||
from accelerate import PartialState, prepare_pippy
|
||||
from accelerate.test_utils import torch_device
|
||||
from accelerate.utils import set_seed
|
||||
|
||||
|
||||
synchronize_func = getattr(torch, torch_device, torch.cuda).synchronize
|
||||
|
||||
# Set the random seed to have reproducable outputs
|
||||
set_seed(42)
|
||||
|
||||
@ -63,25 +60,25 @@ input = torch.randint(
|
||||
)
|
||||
|
||||
# Move the inputs to the first device
|
||||
input = input.to(torch_device)
|
||||
input = input.to("cuda:0")
|
||||
|
||||
# Take an average of 5 times
|
||||
# Measure first batch
|
||||
synchronize_func()
|
||||
torch.cuda.synchronize()
|
||||
start_time = time.time()
|
||||
with torch.no_grad():
|
||||
output = model(input)
|
||||
synchronize_func()
|
||||
torch.cuda.synchronize()
|
||||
end_time = time.time()
|
||||
first_batch = end_time - start_time
|
||||
|
||||
# Now that hpu is init, measure after
|
||||
synchronize_func()
|
||||
# Now that CUDA is init, measure after
|
||||
torch.cuda.synchronize()
|
||||
start_time = time.time()
|
||||
for i in range(5):
|
||||
with torch.no_grad():
|
||||
output = model(input)
|
||||
synchronize_func()
|
||||
torch.cuda.synchronize()
|
||||
end_time = time.time()
|
||||
|
||||
# The outputs are only on the final process by default
|
||||
|
||||
@ -17,12 +17,9 @@ import torch
|
||||
from transformers import AutoModelForSequenceClassification
|
||||
|
||||
from accelerate import PartialState, prepare_pippy
|
||||
from accelerate.test_utils import torch_device
|
||||
from accelerate.utils import set_seed
|
||||
|
||||
|
||||
synchronize_func = getattr(torch, torch_device, torch.cuda).synchronize
|
||||
|
||||
# Set the random seed to have reproducable outputs
|
||||
set_seed(42)
|
||||
|
||||
@ -62,25 +59,25 @@ input = torch.randint(
|
||||
)
|
||||
|
||||
# Move the inputs to the first device
|
||||
input = input.to(torch_device)
|
||||
input = input.to("cuda:0")
|
||||
|
||||
# Take an average of 5 times
|
||||
# Measure first batch
|
||||
synchronize_func()
|
||||
torch.cuda.synchronize()
|
||||
start_time = time.time()
|
||||
with torch.no_grad():
|
||||
output = model(input)
|
||||
synchronize_func()
|
||||
torch.cuda.synchronize()
|
||||
end_time = time.time()
|
||||
first_batch = end_time - start_time
|
||||
|
||||
# Now that device/backend is init, measure after
|
||||
synchronize_func()
|
||||
# Now that CUDA is init, measure after
|
||||
torch.cuda.synchronize()
|
||||
start_time = time.time()
|
||||
for i in range(5):
|
||||
with torch.no_grad():
|
||||
output = model(input)
|
||||
synchronize_func()
|
||||
torch.cuda.synchronize()
|
||||
end_time = time.time()
|
||||
|
||||
# The outputs are only on the final process by default
|
||||
|
||||
@ -8,7 +8,7 @@
|
||||
#SBATCH --error=E-%x.%j
|
||||
|
||||
######################
|
||||
### Set environment ###
|
||||
### Set enviroment ###
|
||||
######################
|
||||
source activateEnvironment.sh
|
||||
|
||||
|
||||
@ -11,7 +11,7 @@
|
||||
#SBATCH --time=01:59:00 # maximum execution time (HH:MM:SS)
|
||||
|
||||
######################
|
||||
### Set environment ###
|
||||
### Set enviroment ###
|
||||
######################
|
||||
source activateEnvironment.sh
|
||||
export GPUS_PER_NODE=4
|
||||
|
||||
@ -11,7 +11,7 @@
|
||||
#SBATCH --time=01:59:00 # maximum execution time (HH:MM:SS)
|
||||
|
||||
######################
|
||||
### Set environment ###
|
||||
### Set enviroment ###
|
||||
######################
|
||||
source activateEnvironment.sh
|
||||
export GPUS_PER_NODE=4
|
||||
|
||||
@ -11,7 +11,7 @@
|
||||
#SBATCH --time=01:59:00 # maximum execution time (HH:MM:SS)
|
||||
|
||||
######################
|
||||
### Set environment ###
|
||||
### Set enviroment ###
|
||||
######################
|
||||
source activateEnvironment.sh
|
||||
export GPUS_PER_NODE=4
|
||||
@ -25,7 +25,7 @@ head_node_ip=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
|
||||
export ACCELERATE_DIR="${ACCELERATE_DIR:-/accelerate}"
|
||||
|
||||
export LAUNCHER="accelerate launch \
|
||||
--config_file ${ACCELERATE_DIR}/examples/slurm/fsdp_config.yaml \
|
||||
--config ${ACCELERATE_DIR}/examples/slurm/fsdp_config.yaml \
|
||||
--num_processes $((SLURM_NNODES * GPUS_PER_NODE)) \
|
||||
--num_machines $SLURM_NNODES \
|
||||
--rdzv_backend c10d \
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
[tool.ruff]
|
||||
line-length = 119
|
||||
target-version = "py39"
|
||||
target-version = "py38"
|
||||
|
||||
[tool.ruff.lint]
|
||||
preview = true
|
||||
|
||||
28
setup.py
28
setup.py
@ -19,10 +19,10 @@ extras = {}
|
||||
extras["quality"] = [
|
||||
"black ~= 23.1", # hf-doc-builder has a hidden dependency on `black`
|
||||
"hf-doc-builder >= 0.3.0",
|
||||
"ruff ~= 0.11.2",
|
||||
"ruff ~= 0.6.4",
|
||||
]
|
||||
extras["docs"] = []
|
||||
extras["test_prod"] = ["pytest>=7.2.0,<=8.0.0", "pytest-xdist", "pytest-subtests", "parameterized", "pytest-order"]
|
||||
extras["test_prod"] = ["pytest>=7.2.0,<=8.0.0", "pytest-xdist", "pytest-subtests", "parameterized"]
|
||||
extras["test_dev"] = [
|
||||
"datasets",
|
||||
"diffusers",
|
||||
@ -40,8 +40,7 @@ extras["testing"] = extras["test_prod"] + extras["test_dev"]
|
||||
extras["deepspeed"] = ["deepspeed"]
|
||||
extras["rich"] = ["rich"]
|
||||
|
||||
extras["test_fp8"] = ["torchao"] # note: TE for now needs to be done via pulling down the docker image directly
|
||||
extras["test_trackers"] = ["wandb", "comet-ml", "tensorboard", "dvclive", "mlflow", "matplotlib"]
|
||||
extras["test_trackers"] = ["wandb", "comet-ml", "tensorboard", "dvclive"]
|
||||
extras["dev"] = extras["quality"] + extras["testing"] + extras["rich"]
|
||||
|
||||
extras["sagemaker"] = [
|
||||
@ -50,7 +49,7 @@ extras["sagemaker"] = [
|
||||
|
||||
setup(
|
||||
name="accelerate",
|
||||
version="1.8.0.dev0",
|
||||
version="1.0.0",
|
||||
description="Accelerate",
|
||||
long_description=open("README.md", encoding="utf-8").read(),
|
||||
long_description_content_type="text/markdown",
|
||||
@ -70,13 +69,13 @@ setup(
|
||||
"accelerate-merge-weights=accelerate.commands.merge:main",
|
||||
]
|
||||
},
|
||||
python_requires=">=3.9.0",
|
||||
python_requires=">=3.8.0",
|
||||
install_requires=[
|
||||
"numpy>=1.17,<3.0.0",
|
||||
"packaging>=20.0",
|
||||
"psutil",
|
||||
"pyyaml",
|
||||
"torch>=2.0.0",
|
||||
"torch>=1.10.0",
|
||||
"huggingface_hub>=0.21.0",
|
||||
"safetensors>=0.4.3",
|
||||
],
|
||||
@ -89,7 +88,7 @@ setup(
|
||||
"License :: OSI Approved :: Apache Software License",
|
||||
"Operating System :: OS Independent",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
||||
],
|
||||
)
|
||||
@ -104,15 +103,20 @@ setup(
|
||||
# git tag v<VERSION> -m 'Adds tag v<VERSION> for pypi'
|
||||
# Push the tag and release commit to git: git push --tags origin vXX.xx-release
|
||||
# 5. Run the following commands in the top-level directory:
|
||||
# make prepare_release
|
||||
# rm -rf dist
|
||||
# rm -rf build
|
||||
# python setup.py bdist_wheel
|
||||
# python setup.py sdist
|
||||
# 6. Upload the package to the pypi test server first:
|
||||
# make target=testpypi upload_release
|
||||
# twine upload dist/* -r testpypi
|
||||
# 7. Check that you can install it in a virtualenv by running:
|
||||
# make install_test_release
|
||||
# pip install accelerate
|
||||
# pip uninstall accelerate
|
||||
# pip install -i https://testpypi.python.org/pypi accelerate
|
||||
# accelerate env
|
||||
# accelerate test
|
||||
# 8. Upload the final version to actual pypi:
|
||||
# make target=pypi upload_release
|
||||
# twine upload dist/* -r pypi
|
||||
# 9. Add release notes to the tag in github once everything is looking hunky-dory.
|
||||
# 10. Go back to the main branch and update the version in __init__.py, setup.py to the new version ".dev" and push to
|
||||
# main.
|
||||
|
||||
@ -11,7 +11,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
__version__ = "1.8.0.dev0"
|
||||
__version__ = "1.0.0"
|
||||
|
||||
from .accelerator import Accelerator
|
||||
from .big_modeling import (
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user