Compare commits

..

11 Commits

296 changed files with 5646 additions and 29557 deletions

View File

@ -37,11 +37,11 @@ members/contributors who may be interested in your PR.
If you know how to use git blame, that is the easiest way, otherwise, here is a rough guide of **who to tag**.
- Big modeling: @SunMarc
- Fully-Sharded Data Parallism: @SunMarc @zach-huggingface
- DeepSpeed: @SunMarc @zach-huggingface
- Command Line Interface: @SunMarc @zach-huggingface
- Documentation: @SunMarc @zach-huggingface
- Core parts of the library: @BenjaminBossan @SunMarc @zach-huggingface
- Maintained examples: @SunMarc or @zach-huggingface
- Fully-Sharded Data Parallism: @pacman100
- DeepSpeed: @pacman100
- Command Line Interface: @muellerzr
- Documentation: @muellerzr
- Core parts of the library: @muellerzr @BenjaminBossan
- Maintained examples: @muellerzr or @pacman100
-->

View File

@ -15,14 +15,13 @@ jobs:
outputs:
version: ${{ steps.step1.outputs.version }}
steps:
- uses: actions/checkout@4
- uses: actions/checkout@v3.1.0
- id: step1
run: echo "version=$(python setup.py --version)" >> $GITHUB_OUTPUT
version-cpu:
name: "Latest Accelerate CPU [version]"
runs-on:
group: aws-general-8-plus
runs-on: [self-hosted, intel-cpu, 8-cpu, ci]
needs: get-version
steps:
- name: Set up Docker Buildx
@ -38,12 +37,11 @@ jobs:
with:
file: docker/accelerate-cpu/Dockerfile
push: true
tags: huggingface/accelerate:cpu-release-${{ needs.get-version.outputs.version }}
tags: huggingface/accelerate-cpu:${{needs.get-version.outputs.version}}
version-cuda:
name: "Latest Accelerate GPU [version]"
runs-on:
group: aws-g6-4xlarge-plus
runs-on: [self-hosted, single-gpu, nvidia-gpu, t4, ci]
needs: get-version
steps:
- name: Set up Docker Buildx
@ -59,46 +57,4 @@ jobs:
with:
file: docker/accelerate-gpu/Dockerfile
push: true
tags: huggingface/accelerate:gpu-release-${{needs.get-version.outputs.version}}
version-cuda-deepspeed:
name: "Latest Accelerate GPU DeepSpeed [version]"
runs-on:
group: aws-g6-4xlarge-plus
needs: get-version
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to DockerHub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_PASSWORD }}
- name: Build and Push GPU
uses: docker/build-push-action@v4
with:
file: docker/accelerate-gpu-deepspeed/Dockerfile
push: true
tags: huggingface/accelerate:gpu-deepspeed-release-${{needs.get-version.outputs.version}}
version-cuda-fp8-transformerengine:
name: "Latest Accelerate GPU FP8 TransformerEngine [version]"
runs-on:
group: aws-g6-4xlarge-plus
needs: get-version
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to DockerHub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_PASSWORD }}
- name: Build and Push GPU
uses: docker/build-push-action@v4
with:
file: docker/accelerate-gpu/Dockerfile
push: true
tags: huggingface/accelerate:gpu-fp8-transformerengine-release-${{needs.get-version.outputs.version}}
tags: huggingface/accelerate-gpu:${{needs.get-version.outputs.version}}

View File

@ -16,13 +16,13 @@ jobs:
outputs:
changed: ${{ steps.was_changed.outputs.changed }}
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3.1.0
with:
fetch-depth: "2"
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@3f54ebb830831fc121d3263c1857cfbdc310cdb9 #v42
uses: tj-actions/changed-files@v41
- name: Was setup changed
id: was_changed
@ -47,4 +47,4 @@ jobs:
run-integration-tests:
needs: build-docker-containers
if: always()
uses: ./.github/workflows/self_hosted_integration_tests.yml
uses: ./.github/workflows/self_hosted_integration_tests.yml

View File

@ -13,8 +13,7 @@ concurrency:
jobs:
latest-cpu:
name: "Latest Accelerate CPU [dev]"
runs-on:
group: aws-general-8-plus
runs-on: [self-hosted, intel-cpu, 8-cpu, ci]
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
@ -23,23 +22,16 @@ jobs:
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_PASSWORD }}
- name: Get current date
id: date
run: |
echo "date=$(date '+%Y-%m-%d')" >> $GITHUB_ENV
- name: Build and Push CPU
uses: docker/build-push-action@v4
with:
file: docker/accelerate-cpu/Dockerfile
file: docker/accelerate-cpu/Dockerfile
push: true
tags: |
huggingface/accelerate:cpu-nightly
huggingface/accelerate:cpu-nightly-${{ env.date }}
tags: huggingface/accelerate-cpu
latest-cuda:
name: "Latest Accelerate GPU [dev]"
runs-on:
group: aws-g6-4xlarge-plus
runs-on: [self-hosted, nvidia-gpu, t4, ci]
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
@ -48,69 +40,10 @@ jobs:
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_PASSWORD }}
- name: Get current date
id: date
run: |
echo "date=$(date '+%Y-%m-%d')" >> $GITHUB_ENV
- name: Build and Push GPU
uses: docker/build-push-action@v4
with:
file: docker/accelerate-gpu/Dockerfile
push: true
tags: |
huggingface/accelerate:gpu-nightly
huggingface/accelerate:gpu-nightly-${{ env.date }}
latest-cuda-deepspeed:
name: "Latest Accelerate GPU DeepSpeed [dev]"
runs-on:
group: aws-g6-4xlarge-plus
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to DockerHub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_PASSWORD }}
- name: Get current date
id: date
run: |
echo "date=$(date '+%Y-%m-%d')" >> $GITHUB_ENV
- name: Build and Push GPU
uses: docker/build-push-action@v4
with:
file: docker/accelerate-gpu-deepspeed/Dockerfile
file: docker/accelerate-gpu/Dockerfile
push: true
tags: |
huggingface/accelerate:gpu-deepspeed-nightly
huggingface/accelerate:gpu-deepspeed-nightly-${{ env.date }}
latest-cuda-fp8-transformerengine:
name: "Latest Accelerate GPU FP8 TransformerEngine [dev]"
runs-on:
group: aws-g6-4xlarge-plus
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to DockerHub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_PASSWORD }}
- name: Get current date
id: date
run: |
echo "date=$(date '+%Y-%m-%d')" >> $GITHUB_ENV
# Get the previous month
echo "base_year=$(date -d 'last month' '+%y')" >> $GITHUB_ENV
echo "base_month=$(date -d 'last month' '+%m')" >> $GITHUB_ENV
- name: Build and Push GPU
uses: docker/build-push-action@v4
with:
file: benchmarks/fp8/transformer_engine/Dockerfile
push: true
tags: huggingface/accelerate:gpu-fp8-transformerengine-nightly-${{ env.date }}
build-args: |
BASE_YEAR=${{ env.base_year }}
BASE_MONTH=${{ env.base_month }}
tags: huggingface/accelerate-gpu

View File

@ -13,6 +13,5 @@ jobs:
with:
commit_sha: ${{ github.sha }}
package: accelerate
custom_container: huggingface/transformers-doc-builder
secrets:
hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}

View File

@ -14,4 +14,3 @@ jobs:
commit_sha: ${{ github.event.pull_request.head.sha }}
pr_number: ${{ github.event.number }}
package: accelerate
custom_container: huggingface/transformers-doc-builder

View File

@ -1,37 +0,0 @@
name: Test FP8 Runner
on:
workflow_dispatch:
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
jobs:
set-prev-day:
runs-on: ubuntu-latest
outputs:
prev-day: ${{ steps.set-prev-day.outputs.prev-day }}
steps:
- name: Set PREV_DAY
id: set-prev-day
run: |
PREV_DAY=$(date -d "yesterday" '+%Y-%m-%d')
echo "prev-day=$PREV_DAY" >> $GITHUB_OUTPUT
run-fp8-tests:
needs: set-prev-day
runs-on:
group: aws-g6e-12xlarge
container:
image: huggingface/accelerate:gpu-fp8-transformerengine-nightly-${{ needs.set-prev-day.outputs.prev-day }}
options: --gpus all --shm-size "16gb"
steps:
- uses: actions/checkout@v3
- name: Install the library
run: |
pip install -e .[test_prod,test_fp8]
- name: Show installed libraries
run: |
pip freeze
- name: Run TE FP8 tests
run: |
python -m pytest -s -v ./tests/test_fp8.py

View File

@ -1,82 +0,0 @@
name: Gaudi3 tests (scheduled)
on:
workflow_dispatch:
schedule: # every day at 6 AM UTC
- cron: "0 6 * * *"
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
run-gaudi3-tests:
runs-on:
group: itac-bm-emr-gaudi3-dell-2gaudi
container:
image: docker://vault.habana.ai/gaudi-docker/1.20.0/ubuntu22.04/habanalabs/pytorch-installer-2.6.0:latest
options: --runtime=habana --shm-size=64G --cap-add=sys_nice --env HABANA_VISIBLE_DEVICES
env:
OMPI_MCA_btl_vader_single_copy_mechanism: none
PT_ENABLE_INT64_SUPPORT: 1
PT_HPU_LAZY_MODE: 0
RUN_SLOW: 1
steps:
- name: HL-SMI (1)
run: |
hl-smi
echo "HABANA_VISIBLE_DEVICES=${HABANA_VISIBLE_DEVICES}"
echo "HABANA_VISIBLE_MODULES=${HABANA_VISIBLE_MODULES}"
- name: Extract HPU visible modules
id: add-modules
run: |
export HABANA_VISIBLE_MODULES=$(hl-smi -Q module_id -f csv,noheader | tr '\n' ',' | sed 's/,$//')
echo "HABANA_VISIBLE_MODULES=${HABANA_VISIBLE_MODULES}" >> $GITHUB_ENV
- name: HL-SMI (2)
run: |
hl-smi
echo "HABANA_VISIBLE_DEVICES=${HABANA_VISIBLE_DEVICES}"
echo "HABANA_VISIBLE_MODULES=${HABANA_VISIBLE_MODULES}"
- name: Checkout to Accelerate
uses: actions/checkout@v4
- name: Install Accelerate with Transformers & DeepSpeed
run: |
pip install -e .[testing] \
git+https://github.com/HabanaAI/DeepSpeed.git@1.20.0 \
git+https://github.com/huggingface/transformers.git
- name: Run CLI tests
if: ${{ !cancelled() && (success() || failure()) }}
run: |
make test_cli
- name: Run Core tests
if: ${{ !cancelled() && (success() || failure()) }}
run: |
make test_core
- name: Run Big Modeling tests
if: ${{ !cancelled() && (success() || failure()) }}
run: |
make test_big_modeling
- name: Run FSDP integration tests
if: ${{ !cancelled() && (success() || failure()) }}
run: |
make test_fsdp
- name: Run DeepSpeed integration tests
if: ${{ !cancelled() && (success() || failure()) }}
run: |
make test_deepspeed
- name: Run Examples tests
if: ${{ !cancelled() && (success() || failure()) }}
run: |
make test_examples

View File

@ -26,13 +26,11 @@ jobs:
strategy:
fail-fast: false
steps:
- uses: actions/checkout@v4
- name: Set up python 3.9
uses: actions/setup-python@v5
- uses: actions/checkout@v3.1.0
- name: Set up python 3.8
uses: actions/setup-python@v3
with:
python-version: 3.9
cache: 'pip'
cache-dependency-path: 'setup.py'
python-version: 3.8
- name: Install Accelerate from source
run: |

View File

@ -12,14 +12,13 @@ env:
jobs:
run_core_tests_single_gpu:
runs-on:
group: aws-g6-4xlarge-plus
run_all_tests_single_gpu:
runs-on: [self-hosted, single-gpu, nvidia-gpu, t4, ci]
env:
CUDA_VISIBLE_DEVICES: "0"
TEST_TYPE: "single_gpu"
container:
image: huggingface/accelerate:gpu-nightly
image: huggingface/accelerate-gpu:latest
options: --gpus all --shm-size "16gb"
defaults:
run:
@ -34,17 +33,12 @@ jobs:
pip install -e . --no-deps
pip install pytest-reportlog tabulate
- name: Show installed libraries
run: |
source activate accelerate;
pip freeze
- name: Run test on GPUs
working-directory: accelerate
run: |
source activate accelerate
make test
- name: Run examples on GPUs
working-directory: accelerate
if: always()
@ -52,7 +46,7 @@ jobs:
source activate accelerate
pip uninstall comet_ml -y
make test_examples
- name: Generate Report
working-directory: accelerate
if: always()
@ -60,69 +54,13 @@ jobs:
pip install slack_sdk tabulate
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
run_deepspeed_tests_single_gpu:
runs-on:
group: aws-g6-4xlarge-plus
env:
CUDA_VISIBLE_DEVICES: "0"
TEST_TYPE: "single_gpu_deepspeed"
container:
image: huggingface/accelerate:gpu-deepspeed-nightly
options: --gpus all --shm-size "16gb"
defaults:
run:
shell: bash
steps:
- name: Update clone & pip install
run: |
source activate accelerate
git clone https://github.com/huggingface/accelerate;
cd accelerate;
git checkout ${{ github.sha }};
pip install -e . --no-deps
pip install pytest-reportlog tabulate
- name: Show installed libraries
run: |
source activate accelerate;
pip freeze
- name: Run test on GPUs
working-directory: accelerate
run: |
source activate accelerate
make test_deepspeed
- name: Run Integration tests on GPUs
working-directory: accelerate
if: always()
run: |
source activate accelerate
make test_integrations
- name: Run examples on GPUs
working-directory: accelerate
if: always()
run: |
source activate accelerate
pip uninstall comet_ml -y
make test_examples
- name: Generate Report
working-directory: accelerate
if: always()
run: |
pip install slack_sdk tabulate
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
run_core_tests_multi_gpu:
runs-on:
group: aws-g6-12xlarge-plus
run_all_tests_multi_gpu:
runs-on: [self-hosted, multi-gpu, nvidia-gpu, t4, ci]
env:
CUDA_VISIBLE_DEVICES: "0,1"
TEST_TYPE: "multi_gpu"
container:
image: huggingface/accelerate:gpu-nightly
image: huggingface/accelerate-gpu:latest
options: --gpus all --shm-size "16gb"
defaults:
run:
@ -137,11 +75,6 @@ jobs:
pip install -e . --no-deps
pip install pytest-reportlog tabulate
- name: Show installed libraries
run: |
source activate accelerate;
pip freeze
- name: Run core and big modeling tests on GPUs
working-directory: accelerate
run: |
@ -172,62 +105,7 @@ jobs:
pip install slack_sdk tabulate
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
run_deepspeed_tests_multi_gpu:
runs-on:
group: aws-g6-12xlarge-plus
env:
CUDA_VISIBLE_DEVICES: "0,1"
TEST_TYPE: "multi_gpu_deepspeed"
container:
image: huggingface/accelerate:gpu-deepspeed-nightly
options: --gpus all --shm-size "16gb"
defaults:
run:
shell: bash
steps:
- name: Update clone
run: |
source activate accelerate
git clone https://github.com/huggingface/accelerate;
cd accelerate;
git checkout ${{ github.sha }};
pip install -e . --no-deps
pip install pytest-reportlog tabulate
- name: Show installed libraries
run: |
source activate accelerate;
pip freeze
- name: Run DeepSpeed tests
working-directory: accelerate
run: |
source activate accelerate
make test_deepspeed
- name: Run Integration tests on GPUs
working-directory: accelerate
if: always()
run: |
source activate accelerate
make test_integrations
- name: Run examples on GPUs
working-directory: accelerate
if: always()
run: |
source activate accelerate
pip uninstall comet_ml -y
make test_examples
- name: Generate Report
working-directory: accelerate
if: always()
run: |
pip install slack_sdk tabulate
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
run-integration-tests:
if: always()
uses: ./.github/workflows/self_hosted_integration_tests.yml
uses: ./.github/workflows/self_hosted_integration_tests.yml

View File

@ -1,19 +0,0 @@
# To run this bot, comment "@bot /style" on a PR
name: Style Bot
on:
issue_comment:
types: [created]
permissions:
contents: write
pull-requests: write
jobs:
style:
uses: huggingface/huggingface_hub/.github/workflows/style-bot-action.yml@main
with:
python_quality_dependencies: "[quality]"
style_command_type: "default"
secrets:
bot_token: ${{ secrets.GITHUB_TOKEN }}

View File

@ -6,13 +6,11 @@ jobs:
quality:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python 3.9
uses: actions/setup-python@v5
- uses: actions/checkout@v3.1.0
- name: Set up Python 3.8
uses: actions/setup-python@v3
with:
python-version: 3.9
cache: 'pip'
cache-dependency-path: 'setup.py'
python-version: 3.8
- name: Install Python dependencies
run: pip install -e .[quality]
- name: Run Quality check

View File

@ -9,13 +9,12 @@ env:
IS_GITHUB_CI: "1"
jobs:
run_core_tests_single_gpu:
runs-on:
group: aws-g6-4xlarge-plus
run_all_tests_single_gpu:
runs-on: [self-hosted, single-gpu, nvidia-gpu, t4, ci]
env:
CUDA_VISIBLE_DEVICES: "0"
container:
image: huggingface/accelerate:gpu-nightly
image: huggingface/accelerate-gpu:latest
options: --gpus all --shm-size "16gb"
defaults:
run:
@ -30,17 +29,12 @@ jobs:
pip install -e .[testing,test_trackers] -U;
pip install pytest-reportlog tabulate ;
- name: Show installed libraries
run: |
source activate accelerate;
pip freeze
- name: Run CLI tests (use make cli)
working-directory: accelerate
run: |
source activate accelerate;
make test_cli
- name: Run test on GPUs
working-directory: accelerate
if: always()
@ -62,53 +56,12 @@ jobs:
pip install tabulate;
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
run_deepspeed_tests_single_gpu:
runs-on:
group: aws-g6-4xlarge-plus
env:
CUDA_VISIBLE_DEVICES: "0"
container:
image: huggingface/accelerate:gpu-deepspeed-nightly
options: --gpus all --shm-size "16gb"
defaults:
run:
shell: bash
steps:
- name: Install accelerate
run: |
source activate accelerate;
git clone https://github.com/huggingface/accelerate;
cd accelerate;
git checkout ${{ github.sha }};
pip install -e .[testing,test_trackers] -U;
pip install pytest-reportlog tabulate ;
- name: Show installed libraries
run: |
source activate accelerate;
pip freeze
- name: Run test on GPUs
working-directory: accelerate
if: always()
run: |
source activate accelerate;
make test_deepspeed
- name: Generate Report
working-directory: accelerate
if: always()
run: |
pip install tabulate;
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
run_core_tests_multi_gpu:
runs-on:
group: aws-g6-12xlarge-plus
run_all_tests_multi_gpu:
runs-on: [self-hosted, multi-gpu, nvidia-gpu, t4, ci]
env:
CUDA_VISIBLE_DEVICES: 0,1
container:
image: huggingface/accelerate:gpu-nightly
image: huggingface/accelerate-gpu:latest
options: --gpus all --shm-size "16gb"
defaults:
run:
@ -123,11 +76,6 @@ jobs:
pip install -e .[testing,test_trackers] -U;
pip install pytest-reportlog tabulate
- name: Show installed libraries
run: |
source activate accelerate;
pip freeze
- name: Run test on GPUs
working-directory: accelerate
run: |
@ -148,41 +96,3 @@ jobs:
run: |
source activate accelerate;
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
run_deepspeed_tests_multi_gpu:
runs-on:
group: aws-g6-12xlarge-plus
container:
image: huggingface/accelerate:gpu-deepspeed-nightly
options: --gpus all --shm-size "16gb"
defaults:
run:
shell: bash
steps:
- name: Install accelerate
run: |
source activate accelerate;
git clone https://github.com/huggingface/accelerate;
cd accelerate;
git checkout ${{ github.sha }};
pip install -e .[testing,test_trackers] -U;
pip install pytest-reportlog tabulate ;
- name: Show installed libraries
run: |
source activate accelerate;
pip freeze
- name: Run test on GPUs
working-directory: accelerate
if: always()
run: |
source activate accelerate;
make test_deepspeed
- name: Generate Report
working-directory: accelerate
if: always()
run: |
pip install tabulate;
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY

View File

@ -1,7 +1,7 @@
# CI for specifically ensuring integrations work fine (`transformers` mainly) on GPUs
# Useful tips:
# - `working-directory` should be set to the root of the repo, which is cloned on the actual CI runner.
# It follows the directory structure of `actions-runner/_work/{repo_name}/{repo_name}/{cloned_repo} on
# It follows the directory structure of `actions-runner/_work/{repo_name}/{repo_name}/{cloned_repo} on
# prem, but in Actions setting `working-directory` looks just in the `{repo_name}` level.
# - New integrations to test should have its own job, and follow a strategy method where we check both
# the pypi and github versions.
@ -23,15 +23,14 @@ defaults:
jobs:
run-trainer-tests:
container:
image: huggingface/accelerate:gpu-deepspeed-nightly
image: huggingface/accelerate-gpu:latest
options: --gpus all --shm-size "16gb"
runs-on:
group: aws-g6-12xlarge-plus
runs-on: [self-hosted, multi-gpu, nvidia-gpu, t4, ci]
strategy:
fail-fast: false
matrix:
cuda_visible_devices: [
"0",
"0",
"0,1"
]
steps:
@ -52,7 +51,7 @@ jobs:
pip install -e .[testing];
pip uninstall comet_ml wandb dvclive -y
cd ..;
- name: Show installed libraries
run: |
source activate accelerate;
@ -89,15 +88,14 @@ jobs:
run-skorch-tests:
container:
image: huggingface/accelerate:gpu-nightly
image: huggingface/accelerate-gpu:latest
options: --gpus all --shm-size "16gb"
runs-on:
group: aws-g6-12xlarge-plus
runs-on: [self-hosted, multi-gpu, nvidia-gpu, t4, ci]
strategy:
fail-fast: false
steps:
- name: Install accelerate
run:
run:
source activate accelerate;
git clone https://github.com/huggingface/accelerate;
cd accelerate;
@ -124,4 +122,4 @@ jobs:
working-directory: skorch/
run: |
source activate accelerate;
pytest -sv -k TestAccelerate
pytest -sv -k TestAccelerate

View File

@ -10,24 +10,19 @@ jobs:
name: Close Stale Issues
if: github.repository == 'huggingface/accelerate'
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3.1.0
- name: Setup Python
uses: actions/setup-python@v5
uses: actions/setup-python@v3
with:
python-version: 3.9
cache: 'pip'
cache-dependency-path: 'setup.py'
python-version: 3.8
- name: Install requirements
run: |
pip install PyGithub
- name: Close stale issues
run: |
python utils/stale.py
python utils/stale.py

View File

@ -38,25 +38,28 @@ jobs:
test_rest
]
steps:
- uses: actions/checkout@v4
- name: Set up python 3.9
uses: actions/setup-python@v5
- uses: actions/checkout@v3.1.0
- name: Set up python 3.8
uses: actions/setup-python@v3
with:
python-version: 3.9
cache: 'pip'
cache-dependency-path: 'setup.py'
python-version: 3.8
- name: Activate python cache
uses: actions/cache@v3
with:
path: |
${{ env.pythonLocation }}
${{ env.HF_HOME }}
key: ${{ env.pythonLocation }}-${{ matrix.pytorch-version }}-${{ matrix.test-kind }}-${{ hashFiles('setup.py') }}
- name: Install the library
run: |
pip install --upgrade pip
if [[ ${{ matrix.test-kind }} = test_prod ]]; then pip install -e .[test_prod]; fi
if [[ ${{ matrix.test-kind }} != test_prod ]]; then pip install -e .[testing,test_trackers]; fi
if [[ ${{ matrix.test-kind }} = test_rest ]]; then pip uninstall comet_ml -y; fi
if [[ ${{ matrix.pytorch-version }} = minimum ]]; then pip install torchvision==0.18.1 torch==2.3.1; fi
pip install pytest-reportlog tabulate setuptools importlib_metadata
- name: Show installed libraries
run: |
pip freeze
if [[ ${{ matrix.test-kind }} = minimum ]]; then pip install torch==1.10.0; fi
pip install pytest-reportlog tabulate
- name: Run Tests
env:
@ -67,4 +70,4 @@ jobs:
- name: Generate Report
if: always()
run: |
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY

View File

@ -1,55 +0,0 @@
name: Run Import Tests
on:
pull_request:
paths:
- "src/**"
- "tests/**"
- ".github/**"
- "examples/**"
- "setup.py"
types: [opened, synchronize, reopened]
env:
HF_HOME: ~/hf_cache
TESTING_MOCKED_DATALOADERS: "1"
IS_GITHUB_CI: "1"
jobs:
run-tests:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
pytorch-version: [
latest,
minimum,
]
steps:
- uses: actions/checkout@v4
- name: Set up python 3.9
uses: actions/setup-python@v5
with:
python-version: 3.9
cache: 'pip'
cache-dependency-path: 'setup.py'
- name: Install the library
run: |
pip install -e .
pip install pytest-reportlog tabulate setuptools git+https://github.com/muellerzr/import-timer
- name: Show installed libraries
run: |
pip freeze
- name: Run Import Tests
env:
PYTORCH_VERSION: ${{ matrix.pytorch-version }}
run: |
pytest -sv tests/test_imports.py
- name: Generate Report
if: always()
run: |
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY

View File

@ -1,15 +0,0 @@
on:
push:
name: Secret Leaks
jobs:
trufflehog:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Secret Scanning
uses: trufflesecurity/trufflehog@main

View File

@ -1,13 +0,0 @@
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.2.1
hooks:
- id: ruff
args:
- --fix
- id: ruff-format
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.5.0
hooks:
- id: check-merge-conflict
- id: check-yaml

View File

@ -123,15 +123,12 @@ Follow these steps to start contributing:
4. Set up a development environment by running the following command in a conda or a virtual environment you've created for working on this library:
```bash
$ pip install -e ".[dev]"
$ pip install -e ".[quality]"
```
This will install all testing and linting/code quality dependencies for the library (see `quality`, `test_dev`,
`test_prod` targets in [`setup.py`](./setup.py)).
(If accelerate was already installed in the virtual environment, remove
it with `pip uninstall accelerate` before reinstalling it in editable
mode with the `-e` flag).
mode with the `-e` flag.)
Alternatively, if you are using [Visual Studio Code](https://code.visualstudio.com/Download), the fastest way to get set up is by using
the provided Dev Container. Documentation on how to get started with dev containers is available [here](https://code.visualstudio.com/docs/remote/containers).
@ -155,7 +152,7 @@ Follow these steps to start contributing:
$ make test
```
`accelerate` relies on `ruff` to format its source code
`accelerate` relies on `black` and `ruff` to format its source code
consistently. After you make changes, apply automatic style corrections and code verifications
that can't be automated in one go with:
@ -175,14 +172,6 @@ Follow these steps to start contributing:
$ make quality
```
You can also set up [`pre-commit`](https://pre-commit.com/) to run these checks
automatically as Git commit hooks.
```bash
$ pip install pre-commit
$ pre-commit install
```
Once you're happy with your changes, add changed files using `git add` and
make a commit with `git commit` to record your changes locally:
@ -246,4 +235,4 @@ $ python -m pytest -sv ./tests
In fact, that's how `make test` is implemented (sans the `pip install` line)!
You can specify a smaller set of tests in order to test only the feature
you're working on.
you're working on.

View File

@ -1,6 +1,6 @@
.PHONY: quality style test docs utils
check_dirs := .
check_dirs := tests src examples benchmarks utils
# Check that source code meets quality standards
@ -12,23 +12,26 @@ extra_quality_checks:
# this target runs checks on all files
quality:
ruff check $(check_dirs)
ruff format --check $(check_dirs)
black --required-version 23 --check $(check_dirs)
ruff $(check_dirs)
doc-builder style src/accelerate docs/source --max_len 119 --check_only
# Format source code automatically and check is there are any problems left that need manual fixing
style:
ruff check $(check_dirs) --fix
ruff format $(check_dirs)
black --required-version 23 $(check_dirs)
ruff $(check_dirs) --fix
doc-builder style src/accelerate docs/source --max_len 119
# Run tests for the library
test:
python -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_all.log",)
test_big_modeling:
python -m pytest -s -v ./tests/test_big_modeling.py ./tests/test_modeling_utils.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_big_modeling.log",)
test_core:
python -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py --ignore=./tests/deepspeed --ignore=./tests/test_big_modeling.py \
--ignore=./tests/fsdp --ignore=./tests/tp --ignore=./tests/test_cli.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_core.log",)
--ignore=./tests/fsdp --ignore=./tests/test_cli.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_core.log",)
test_cli:
python -m pytest -s -v ./tests/test_cli.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_cli.log",)
@ -39,25 +42,12 @@ test_deepspeed:
test_fsdp:
python -m pytest -s -v ./tests/fsdp $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_fsdp.log",)
test_tp:
python -m pytest -s -v ./tests/tp $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_tp.log",)
# Since the new version of pytest will *change* how things are collected, we need `deepspeed` to
# run after test_core and test_cli
test:
$(MAKE) test_core
$(MAKE) test_cli
$(MAKE) test_big_modeling
$(MAKE) test_deepspeed
$(MAKE) test_fsdp
$(MAKE) test_tp
test_examples:
python -m pytest -s -v ./tests/test_examples.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_examples.log",)
# Broken down example tests for the CI runners
test_integrations:
python -m pytest -s -v ./tests/deepspeed ./tests/fsdp ./tests/tp $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_integrations.log",)
python -m pytest -s -v ./tests/deepspeed ./tests/fsdp $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_integrations.log",)
test_example_differences:
python -m pytest -s -v ./tests/test_examples.py::ExampleDifferenceTests $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_example_diff.log",)
@ -74,21 +64,3 @@ test_prod:
test_rest:
python -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k "not by_step and not by_epoch" $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_rest.log",)
# For developers to prepare a release
prepare_release:
rm -rf dist build
python setup.py bdist_wheel sdist
# Make sure this is ran in a fresh venv of some form
install_test_release:
pip uninstall accelerate -y
pip install -i https://testpypi.python.org/pypi --extra-index-url https://pypi.org/simple accelerate$(if $(version),==$(version),)
# Run as `make target=testpypi upload_release`
upload_release:
@if [ "$(target)" != "testpypi" ] && [ "$(target)" != "pypi" ]; then \
echo "Error: target must be either 'testpypi' or 'pypi'"; \
exit 1; \
fi
twine upload dist/* -r $(target)

View File

@ -22,12 +22,22 @@ limitations under the License.
<p align="center">
<!-- Uncomment when CircleCI is set up
<a href="https://circleci.com/gh/huggingface/accelerate"><img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/master"></a>
<a href="https://circleci.com/gh/huggingface/accelerate">
<img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/master">
</a>
-->
<a href="https://github.com/huggingface/accelerate/blob/main/LICENSE"><img alt="License" src="https://img.shields.io/github/license/huggingface/accelerate.svg?color=blue"></a>
<a href="https://huggingface.co/docs/accelerate/index.html"><img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/accelerate/index.html.svg?down_color=red&down_message=offline&up_message=online"></a>
<a href="https://github.com/huggingface/accelerate/releases"><img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/accelerate.svg"></a>
<a href="https://github.com/huggingface/accelerate/blob/main/CODE_OF_CONDUCT.md"><img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg"></a>
<a href="https://github.com/huggingface/accelerate/blob/main/LICENSE">
<img alt="License" src="https://img.shields.io/github/license/huggingface/accelerate.svg?color=blue">
</a>
<a href="https://huggingface.co/docs/accelerate/index.html">
<img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/accelerate/index.html.svg?down_color=red&down_message=offline&up_message=online">
</a>
<a href="https://github.com/huggingface/accelerate/releases">
<img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/accelerate.svg">
</a>
<a href="https://github.com/huggingface/accelerate/blob/main/CODE_OF_CONDUCT.md">
<img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg">
</a>
</p>
<h3 align="center">
@ -157,21 +167,11 @@ accelerate launch --multi_gpu --num_processes 2 examples/nlp_example.py
To learn more, check the CLI documentation available [here](https://huggingface.co/docs/accelerate/package_reference/cli).
Or view the configuration zoo [here](https://github.com/huggingface/accelerate/blob/main/examples/config_yaml_templates/)
## Launching multi-CPU run using MPI
🤗 Here is another way to launch multi-CPU run using MPI. You can learn how to install Open MPI on [this page](https://www.open-mpi.org/faq/?category=building#easy-build). You can use Intel MPI or MVAPICH as well.
Once you have MPI setup on your cluster, just run:
```bash
accelerate config
```
Answer the questions that are asked, selecting to run using multi-CPU, and answer "yes" when asked if you want accelerate to launch mpirun.
Then, use `accelerate launch` with your script like:
```bash
accelerate launch examples/nlp_example.py
```
Alternatively, you can use mpirun directly, without using the CLI like:
```bash
mpirun -np 2 python examples/nlp_example.py
```
@ -258,7 +258,7 @@ pip install accelerate
- multi-GPU on several nodes (machines)
- TPU
- FP16/BFloat16 mixed precision
- FP8 mixed precision with [Transformer Engine](https://github.com/NVIDIA/TransformerEngine) or [MS-AMP](https://github.com/Azure/MS-AMP/)
- FP8 mixed precision with [Transformer Engine](https://github.com/NVIDIA/TransformerEngine)
- DeepSpeed support (Experimental)
- PyTorch Fully Sharded Data Parallel (FSDP) support (Experimental)
- Megatron-LM support (Experimental)

View File

@ -1,5 +1,46 @@
# Benchmarks
# Big model inference benchmarks
The folders below contain suites to test various functionalities in Accelerate.
Running inference with Accelerate on big models.
See their relevant README.md's for more information.
## Setup
These benchmarks use the `transformers` library:
```bash
pip install transformers
```
To reproduce or test a new setup, run
```py
python inference_acc.py model_name
```
This script supports `gpt-j-6b`, `gpt-neox`, `opt` (30B version) and `T0pp` out of the box, but you can specify any valid checkpoint for `model_name`.
To force a different `torch_dtype` than the one in the config: `--torch_dtype xxx`.
If you get an error linked to disk offload, you need to add the option `--disk-offload`
## Results
On a setup with two Titan RTXs (24GB of RAM) and 32GB of RAM, we get the following benchmarks (T0pp does not run in float16, which is why it's not included).
| Model | Model load time | Generation time | dtype | GPU 0 use | GPU 1 use | CPU use | Disk offload |
|:-----:|:---------------:|:---------------:|:-----:|:---------:|:---------:|:-------:|:------------:|
| GPT-J-6B | 8.7s | 0.05s per token | float16 | 11.7GB | 0GB | 0GB | no |
| GPT-J-6B | 12.4s | 0.06s per token | float32 | 21.9GB | 1.5GB | 0GB | no |
| GPT-Neo-X-20B | 30.9s | 0.08s per token | float16 | 21.5GB | 18GB | 0GB | no |
| GPT-Neo-X-20B | 78.2s | 10.72s per token | float32 | 20.3GB | 22.7 GB | 24.4GB | yes |
| T0pp (11B) | 29.4s | 0.05s per token | float32 | 21.1GB | 21.3GB | 0GB | no |
| OPT-30B | 34.5s | 2.37s per token | float16 | 20.7GB | 22.3GB | 14.1GB | no |
| OPT-30B | 112.3s | 33.9s per token | float32 | 20.2GB | 21.2GB | 23.5GB | yes |
Note on the results:
- using two GPUs instead of one does not slow down generation
- using CPU offload slows down a bit (see OPT-30b)
- using disk offload slows down a lot (need to implement prefetching)
You will also note that Accelerate does not use anymore GPU and CPU RAM than necessary:
- peak GPU memory is exactly the size of the model put on a given GPU
- peak CPU memory is either the size of the biggest checkpoint shard or the part of the model offloaded on CPU, whichever is bigger.

View File

@ -1,46 +0,0 @@
# Big model inference benchmarks
Running inference with Accelerate on big models.
## Setup
These benchmarks use the `transformers` library:
```bash
pip install transformers
```
To reproduce or test a new setup, run
```py
python big_model_inference.py model_name
```
This script supports `gpt-j-6b`, `gpt-neox`, `opt` (30B version) and `T0pp` out of the box, but you can specify any valid checkpoint for `model_name`.
To force a different `torch_dtype` than the one in the config: `--torch_dtype xxx`.
If you get an error linked to disk offload, you need to add the option `--disk-offload`
## Results
On a setup with two Titan RTXs (24GB of RAM) and 32GB of RAM, we get the following benchmarks (T0pp does not run in float16, which is why it's not included).
| Model | Model load time | Generation time | dtype | GPU 0 use | GPU 1 use | CPU use | Disk offload |
|:-----:|:---------------:|:---------------:|:-----:|:---------:|:---------:|:-------:|:------------:|
| GPT-J-6B | 8.7s | 0.05s per token | float16 | 11.7GB | 0GB | 0GB | no |
| GPT-J-6B | 12.4s | 0.06s per token | float32 | 21.9GB | 1.5GB | 0GB | no |
| GPT-Neo-X-20B | 30.9s | 0.08s per token | float16 | 21.5GB | 18GB | 0GB | no |
| GPT-Neo-X-20B | 78.2s | 10.72s per token | float32 | 20.3GB | 22.7 GB | 24.4GB | yes |
| T0pp (11B) | 29.4s | 0.05s per token | float32 | 21.1GB | 21.3GB | 0GB | no |
| OPT-30B | 34.5s | 2.37s per token | float16 | 20.7GB | 22.3GB | 14.1GB | no |
| OPT-30B | 112.3s | 33.9s per token | float32 | 20.2GB | 21.2GB | 23.5GB | yes |
Note on the results:
- using two GPUs instead of one does not slow down generation
- using CPU offload slows down a bit (see OPT-30b)
- using disk offload slows down a lot (need to implement prefetching)
You will also note that Accelerate does not use anymore GPU and CPU RAM than necessary:
- peak GPU memory is exactly the size of the model put on a given GPU
- peak CPU memory is either the size of the biggest checkpoint shard or the part of the model offloaded on CPU, whichever is bigger.

View File

@ -1,12 +0,0 @@
FROM ghcr.io/azure/msamp
RUN pip install transformers evaluate datasets
RUN git clone https://github.com/huggingface/accelerate
RUN cd accelerate && \
pip install -e . && \
cd benchmarks/fp8
CMD ["bash"]

View File

@ -1,123 +0,0 @@
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script tests to ensure that `accelerate` performs at the same level as raw `MS-AMP`.
This particular script verifies this for DDP training.
"""
import evaluate
import msamp
import torch
from fp8_utils import evaluate_model, get_training_utilities
from torch.nn.parallel import DistributedDataParallel as DDP
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.utils import FP8RecipeKwargs, get_grad_scaler, set_seed
MODEL_NAME = "bert-base-cased"
METRIC = evaluate.load("glue", "mrpc")
def train_baseline(opt_level="O2"):
set_seed(42)
scaler = get_grad_scaler()
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(MODEL_NAME)
accelerator = Accelerator()
device = accelerator.device
model, optimizer = msamp.initialize(model, optimizer, opt_level=opt_level)
model.to(device)
# Convert the model to DDP
device_ids, output_device = [accelerator.local_process_index], accelerator.local_process_index
model = DDP(model, device_ids=device_ids, output_device=output_device)
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
model.train()
for i, batch in enumerate(train_dataloader):
with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
outputs = model(**batch)
loss = outputs.loss
scaler.scale(loss).backward()
optimizer.step()
optimizer.zero_grad()
lr_scheduler.step()
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
)
assert trained_model_results["f1"] > base_model_results["f1"], (
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
)
return base_model_results, trained_model_results
def train_integration(opt_level="O2"):
kwargs_handlers = [FP8RecipeKwargs(backend="msamp", opt_level=opt_level)]
AcceleratorState()._reset_state(True)
accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=kwargs_handlers)
set_seed(42)
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
MODEL_NAME, accelerator=accelerator
)
model, optimizer = accelerator.prepare(model, optimizer)
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
model.train()
for i, batch in enumerate(train_dataloader):
with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
outputs = model(**batch)
loss = outputs.loss
accelerator.backward(loss)
optimizer.step()
optimizer.zero_grad()
lr_scheduler.step()
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
)
assert trained_model_results["f1"] > base_model_results["f1"], (
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
)
return base_model_results, trained_model_results
if __name__ == "__main__":
for opt_level in ["O1", "O2"]:
baseline_not_trained, baseline_trained = train_baseline(opt_level)
accelerator_not_trained, accelerator_trained = train_integration(opt_level)
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
f"Accuracy not the same for untrained baseline and accelerator using opt_level={opt_level}: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
)
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
f"F1 not the same for untrained baseline and accelerator using opt_level={opt_level}: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
)
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
f"Accuracy not the same for trained baseline and accelerator using opt_level={opt_level}: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
)
assert baseline_trained["f1"] == accelerator_trained["f1"], (
f"F1 not the same for trained baseline and accelerator using opt_level={opt_level}: {baseline_trained['f1']} == {accelerator_trained['f1']}"
)

View File

@ -1,161 +0,0 @@
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script tests to ensure that `accelerate` performs at the same level as raw `MS-AMP`.
This particular script verifies this for DeepSpeed training.
NOTE: MS-AMP does *not* support ZeRO-3.
"""
# import msamp.deepspeed as msamp_deepspeed
import evaluate
import torch
from fp8_utils import evaluate_model, get_training_utilities
from msamp import deepspeed as msamp_deepspeed
from accelerate import Accelerator, DeepSpeedPlugin
from accelerate.state import AcceleratorState
from accelerate.utils import set_seed
MODEL_NAME = "bert-base-cased"
METRIC = evaluate.load("glue", "mrpc")
def train_baseline(zero_stage: int = 1, opt_level: str = "O1"):
set_seed(42)
accelerator = Accelerator()
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
MODEL_NAME, accelerator=accelerator
)
import numpy as np
config = {
"train_batch_size": 32,
"train_micro_batch_size_per_gpu": 16,
"gradient_accumulation_steps": 1,
"zero_optimization": {
"stage": zero_stage,
"offload_optimizer": {"device": "none", "nvme_path": None},
"offload_param": {"device": "none", "nvme_path": None},
},
"gradient_clipping": 1.0,
"steps_per_print": np.inf,
"bf16": {"enabled": True},
"fp16": {"enabled": False},
"zero_allow_untested_optimizer": True,
"msamp": {
"enabled": True,
"opt_level": opt_level,
},
}
(
model,
optimizer,
_,
_,
) = msamp_deepspeed.initialize(
model=model,
optimizer=optimizer,
config_params=config,
)
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
model.train()
for _ in range(2):
for batch in train_dataloader:
outputs = model(**batch)
loss = outputs.loss
model.backward(loss)
model.step()
for _ in range(accelerator.num_processes):
lr_scheduler.step()
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
model.destroy()
torch.cuda.empty_cache()
AcceleratorState()._reset_state(True)
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
)
assert trained_model_results["f1"] > base_model_results["f1"], (
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
)
return base_model_results, trained_model_results
def train_integration(zero_stage: int = 1, opt_level: str = "O1"):
set_seed(42)
deepspeed_plugin = DeepSpeedPlugin(
zero_stage=zero_stage,
enable_msamp=True,
msamp_opt_level=opt_level,
)
accelerator = Accelerator(mixed_precision="fp8", deepspeed_plugin=deepspeed_plugin)
accelerator.state.deepspeed_plugin.deepspeed_config["train_micro_batch_size_per_gpu"] = 16
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
MODEL_NAME, accelerator=accelerator
)
model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler)
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
model.train()
for _ in range(2):
for batch in train_dataloader:
outputs = model(**batch)
loss = outputs.loss
accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
model.destroy()
torch.cuda.empty_cache()
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
)
assert trained_model_results["f1"] > base_model_results["f1"], (
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
)
AcceleratorState()._reset_state(True)
return base_model_results, trained_model_results
if __name__ == "__main__":
for zero_stage in [1, 2]:
for opt_level in ["O1", "O2", "O3"]:
baseline_not_trained, baseline_trained = train_baseline(zero_stage, opt_level)
accelerator_not_trained, accelerator_trained = train_integration(zero_stage, opt_level)
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
f"ZERO stage {zero_stage}, opt_level={opt_level}:\nAccuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
)
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
f"ZERO stage {zero_stage}, opt_level={opt_level}:\nF1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
)
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
f"ZERO stage {zero_stage}, opt_level={opt_level}:\nAccuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
)
assert baseline_trained["f1"] == accelerator_trained["f1"], (
f"ZERO stage {zero_stage}, opt_level={opt_level}:\nF1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
)
torch.distributed.destroy_process_group()

View File

@ -1,118 +0,0 @@
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
def get_dataloaders(model_name: str, batch_size: int = 16):
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(model_name)
datasets = load_dataset("glue", "mrpc")
def tokenize_function(examples):
# max_length=None => use the model max length (it's actually the default)
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
remove_columns=["idx", "sentence1", "sentence2"],
)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
def collate_fn(examples):
return tokenizer.pad(
examples,
padding="longest",
pad_to_multiple_of=16, # Specific for FP8
return_tensors="pt",
)
# Instantiate dataloaders.
train_dataloader = DataLoader(
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size, drop_last=True
)
eval_dataloader = DataLoader(
tokenized_datasets["validation"],
shuffle=False,
collate_fn=collate_fn,
batch_size=16,
drop_last=True,
)
return train_dataloader, eval_dataloader
def get_training_utilities(model_name: str, batch_size: int = 16, accelerator=None):
"""
Returns a tuple of:
- Model
- Optimizer
- Train dataloader (prepared)
- Eval dataloader (prepared)
- LR Scheduler
Suitable for training on the MRPC dataset
"""
from torch.optim import AdamW
from transformers import AutoModelForSequenceClassification, get_linear_schedule_with_warmup
from accelerate import Accelerator
if accelerator is None:
accelerator = Accelerator()
model = AutoModelForSequenceClassification.from_pretrained(model_name)
train_dataloader, eval_dataloader = get_dataloaders(model_name, batch_size)
optimizer = AdamW(model.parameters(), lr=0.0001)
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=100,
num_training_steps=len(train_dataloader) * 2,
)
train_dataloader, eval_dataloader = accelerator.prepare(train_dataloader, eval_dataloader)
return model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
def get_named_parameters(model):
"""
Same thing as `Accelerator.get_named_parameters` Returns a list of the named parameters of the model (extracted
from parallel)
"""
from accelerate.utils import extract_model_from_parallel
model = extract_model_from_parallel(model)
return {n: p for n, p in model.named_parameters()}
def evaluate_model(model, dataloader, metric, accelerator=None):
"Turns model to .eval(), runs dataloader, calculates metric, then turns eval back on"
model.eval()
for step, batch in enumerate(dataloader):
with torch.no_grad():
# W/ MS-AMP, we need to cast while evaluating
with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
references = batch["labels"]
if accelerator is not None and accelerator.num_processes > 1:
predictions, references = accelerator.gather_for_metrics((predictions, references))
metric.add_batch(predictions=predictions, references=references)
return metric.compute()

View File

@ -1,118 +0,0 @@
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script tests to ensure that `accelerate` performs at the same level as raw `MS-AMP`.
This particular script verifies this for single GPU training.
"""
import evaluate
import msamp
import torch
from fp8_utils import evaluate_model, get_training_utilities
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.utils import FP8RecipeKwargs, get_grad_scaler, set_seed
MODEL_NAME = "bert-base-cased"
METRIC = evaluate.load("glue", "mrpc")
def train_baseline(opt_level="O2"):
set_seed(42)
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(MODEL_NAME)
model, optimizer = msamp.initialize(model, optimizer, opt_level=opt_level)
model.to("cuda")
base_model_results = evaluate_model(model, eval_dataloader, METRIC)
model.train()
scaler = get_grad_scaler()
for batch in train_dataloader:
batch = batch.to("cuda")
with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
outputs = model(**batch)
loss = outputs.loss
loss = scaler.scale(loss)
loss.backward()
optimizer.step()
optimizer.zero_grad()
lr_scheduler.step()
trained_model_results = evaluate_model(model, eval_dataloader, METRIC)
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
)
assert trained_model_results["f1"] > base_model_results["f1"], (
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
)
return base_model_results, trained_model_results
def train_integration(opt_level="O2"):
kwargs_handlers = [FP8RecipeKwargs(backend="msamp", opt_level=opt_level)]
AcceleratorState()._reset_state(True)
accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=kwargs_handlers)
set_seed(42)
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
MODEL_NAME, accelerator=accelerator
)
model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler)
base_model_results = evaluate_model(model, eval_dataloader, METRIC)
model.train()
for batch in train_dataloader:
outputs = model(**batch)
loss = outputs.loss
accelerator.backward(loss)
optimizer.step()
optimizer.zero_grad()
lr_scheduler.step()
trained_model_results = evaluate_model(model, eval_dataloader, METRIC)
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
)
assert trained_model_results["f1"] > base_model_results["f1"], (
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
)
return base_model_results, trained_model_results
if __name__ == "__main__":
for opt_level in ["O1", "O2"]:
baseline_not_trained, baseline_trained = train_baseline(opt_level)
accelerator_not_trained, accelerator_trained = train_integration(opt_level)
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
f"Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
)
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
f"F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
)
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
f"Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
)
assert baseline_trained["f1"] == accelerator_trained["f1"], (
f"F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
)

View File

@ -1,12 +0,0 @@
FROM nvcr.io/nvidia/pytorch:24.07-py3
RUN pip install transformers evaluate datasets
RUN git clone https://github.com/huggingface/accelerate.git
RUN cd accelerate && \
pip install -e . && \
cd benchmarks/fp8
RUN /bin/bash

View File

@ -1,32 +0,0 @@
# FP8 Benchmarks
Comparing and running [torchao](https://github.com/pytorch/ao/tree/main/torchao/float8) FP8 with accelerate
## Overview
This repo provides scripts which compare native `torchao` model training against `accelerate`'s own integration. Each modeling type is segmented out via a script, supporting the following:
* Single GPU training (`non_distributed.py`)
* Multi-GPU training via DistributedDataParallelism (`ddp.py`)
* Fully Sharded Data Parallelism (`fsdp.py`)
* DeepSpeed ZeRO 1-3 (`deepspeed.py`)
To run them, it's recommended to use a docker image (see the attached `Dockerfile`) and not install `torchao` manually.
## Running:
There are official Docker images located at `huggingface/accelerate:gpu-fp8-torchao-nightly` which can be used.
You can run all scripts using the core `accelerate launch` command without any `accelerate config` being needed.
For single GPU, run it via `python`:
```bash
python non_distributed.py
```
For the rest, run it via `accelerate launch`:
```bash
accelerate launch ddp.py # or distrib_deepspeed.py, ddp.py
```

View File

@ -1,158 +0,0 @@
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script tests to ensure that `accelerate` performs at the same level as raw `torchao`.
This particular script verifies this for DDP training.
"""
from functools import partial
import evaluate
import torch
from fp8_utils import get_training_utilities
from torch.nn.parallel import DistributedDataParallel as DDP
from torchao.float8 import convert_to_float8_training
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.utils import AORecipeKwargs, set_seed
MODEL_NAME = "bert-base-cased"
METRIC = evaluate.load("glue", "mrpc")
def evaluate_model(model, dataloader, metric, accelerator=None):
"Turns model to .eval(), runs dataloader, calculates metric, then turns eval back on"
model.eval()
for step, batch in enumerate(dataloader):
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
references = batch["labels"]
if accelerator is not None and accelerator.num_processes > 1:
predictions, references = accelerator.gather_for_metrics((predictions, references))
metric.add_batch(predictions=predictions, references=references)
return metric.compute()
def filter_linear_layers(module, fqn, first_layer_name=None, last_layer_name=None):
if isinstance(module, torch.nn.Linear):
if module.in_features % 16 != 0 or module.out_features % 16 != 0:
return False
# For stability reasons, we skip the first and last linear layers
# Otherwise can lead to the model not training or converging properly
if fqn in (first_layer_name, last_layer_name):
return False
return True
def train_baseline():
set_seed(42)
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(MODEL_NAME)
first_linear = None
last_linear = None
for name, module in model.named_modules():
if isinstance(module, torch.nn.Linear):
if first_linear is None:
first_linear = name
last_linear = name
func = partial(filter_linear_layers, first_layer_name=first_linear, last_layer_name=last_linear)
accelerator = Accelerator()
device = accelerator.device
model.to(device)
convert_to_float8_training(model, module_filter_fn=func)
# Convert the model to DDP
device_ids, output_device = [accelerator.local_process_index], accelerator.local_process_index
model = DDP(model, device_ids=device_ids, output_device=output_device)
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
model.train()
for batch in train_dataloader:
with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
batch = batch.to(device)
outputs = model(**batch)
loss = outputs.loss
loss.backward()
optimizer.step()
optimizer.zero_grad()
lr_scheduler.step()
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
)
assert trained_model_results["f1"] > base_model_results["f1"], (
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
)
return base_model_results, trained_model_results
def train_integration():
AcceleratorState()._reset_state(True)
accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=[AORecipeKwargs()])
set_seed(42)
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
MODEL_NAME, accelerator=accelerator
)
model, optimizer = accelerator.prepare(model, optimizer)
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
model.train()
for batch in train_dataloader:
outputs = model(**batch)
loss = outputs.loss
accelerator.backward(loss)
optimizer.step()
optimizer.zero_grad()
lr_scheduler.step()
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
)
assert trained_model_results["f1"] > base_model_results["f1"], (
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
)
return base_model_results, trained_model_results
if __name__ == "__main__":
baseline_not_trained, baseline_trained = train_baseline()
accelerator_not_trained, accelerator_trained = train_integration()
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
f"Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
)
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
f"F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
)
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
f"Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
)
assert baseline_trained["f1"] == accelerator_trained["f1"], (
f"F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
)
torch.distributed.destroy_process_group()

View File

@ -1,213 +0,0 @@
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script tests to ensure that `accelerate` performs at the same level as raw `torchao`.
This particular script verifies this for deepspeed training.
"""
from functools import partial
from unittest.mock import patch
import deepspeed
import evaluate
import torch
from fp8_utils import evaluate_model, get_training_utilities
from torchao.float8 import convert_to_float8_training
from transformers.integrations import HfDeepSpeedConfig
from accelerate import Accelerator, DeepSpeedPlugin
from accelerate.state import AcceleratorState
from accelerate.utils import AORecipeKwargs, set_seed
MODEL_NAME = "bert-base-cased"
METRIC = evaluate.load("glue", "mrpc")
def filter_linear_layers(module, fqn, first_layer_name=None, last_layer_name=None):
if isinstance(module, torch.nn.Linear):
if module.in_features % 16 != 0 or module.out_features % 16 != 0:
return False
# For stability reasons, we skip the first and last linear layers
# Otherwise can lead to the model not training or converging properly
if fqn in (first_layer_name, last_layer_name):
return False
return True
def train_baseline(zero_stage: int = 1):
set_seed(42)
# This forces transformers to think Zero-3 Init should be used
with patch("transformers.integrations.deepspeed.is_deepspeed_zero3_enabled") as mock:
mock.return_value = zero_stage == 3
config = HfDeepSpeedConfig(
{
"train_micro_batch_size_per_gpu": 16,
"gradient_accumulation_steps": 1,
"zero_optimization": {"stage": zero_stage},
}
)
plugin = DeepSpeedPlugin(hf_ds_config=config)
accelerator = Accelerator(deepspeed_plugin=plugin)
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
MODEL_NAME, accelerator=accelerator
)
first_linear = None
last_linear = None
for name, module in model.named_modules():
if isinstance(module, torch.nn.Linear):
if first_linear is None:
first_linear = name
last_linear = name
func = partial(filter_linear_layers, first_layer_name=first_linear, last_layer_name=last_linear)
convert_to_float8_training(model, module_filter_fn=func)
import numpy as np
config = {
"train_batch_size": 32,
"train_micro_batch_size_per_gpu": 16,
"gradient_accumulation_steps": 1,
"zero_optimization": {
"stage": zero_stage,
"offload_optimizer": {"device": "none", "nvme_path": None},
"offload_param": {"device": "none", "nvme_path": None},
"stage3_gather_16bit_weights_on_model_save": False,
},
"gradient_clipping": 1.0,
"steps_per_print": np.inf,
"bf16": {"enabled": True},
"fp16": {"enabled": False},
"zero_allow_untested_optimizer": True,
}
(
model,
optimizer,
_,
lr_scheduler,
) = deepspeed.initialize(
model=model,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
config_params=config,
)
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
model.train()
model_outputs = []
data = []
for batch in train_dataloader:
outputs = model(**batch)
data.append(batch.to("cpu"))
model_outputs.append(outputs.logits.to("cpu"))
loss = outputs.loss
model.backward(loss)
model.step()
for _ in range(accelerator.num_processes):
lr_scheduler.step()
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
model.destroy()
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
)
assert trained_model_results["f1"] > base_model_results["f1"], (
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
)
del config
return base_model_results, trained_model_results, model_outputs, data
def train_integration(zero_stage: int = 1):
set_seed(42)
AcceleratorState()._reset_state(True)
config = HfDeepSpeedConfig(
{
"train_micro_batch_size_per_gpu": 16,
"gradient_accumulation_steps": 1,
"zero_optimization": {"stage": zero_stage},
}
)
deepspeed_plugin = DeepSpeedPlugin(
hf_ds_config=config,
)
# This forces transformers to think Zero-3 Init should be used
with patch("transformers.integrations.deepspeed.is_deepspeed_zero3_enabled") as mock:
mock.return_value = zero_stage == 3
accelerator = Accelerator(
mixed_precision="fp8", kwargs_handlers=[AORecipeKwargs()], deepspeed_plugin=deepspeed_plugin
)
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
MODEL_NAME, accelerator=accelerator
)
model, optimizer, lr_scheduler, train_dataloader, eval_dataloader = accelerator.prepare(
model, optimizer, lr_scheduler, train_dataloader, eval_dataloader
)
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
model.train()
model_outputs = []
data = []
for batch in train_dataloader:
outputs = model(**batch)
data.append(batch.to("cpu"))
model_outputs.append(outputs.logits.to("cpu"))
loss = outputs.loss
accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
model.destroy()
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
)
assert trained_model_results["f1"] > base_model_results["f1"], (
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
)
del config
return base_model_results, trained_model_results, model_outputs, data
if __name__ == "__main__":
for zero_stage in [1, 2, 3]:
baseline_not_trained, baseline_trained, baseline_outputs, baseline_data = train_baseline(zero_stage)
accelerator_not_trained, accelerator_trained, accelerator_outputs, accelerator_data = train_integration(
zero_stage
)
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
f"ZERO stage {zero_stage}: Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
)
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
f"ZERO stage {zero_stage}: F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
)
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
f"ZERO stage {zero_stage}: Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
)
assert baseline_trained["f1"] == accelerator_trained["f1"], (
f"ZERO stage {zero_stage}: F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
)
AcceleratorState()._reset_state(True)
torch.distributed.destroy_process_group()

View File

@ -1,116 +0,0 @@
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
def get_dataloaders(model_name: str, batch_size: int = 16):
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(model_name)
datasets = load_dataset("glue", "mrpc")
def tokenize_function(examples):
# max_length=None => use the model max length (it's actually the default)
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
remove_columns=["idx", "sentence1", "sentence2"],
)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
def collate_fn(examples):
return tokenizer.pad(
examples,
padding="longest",
pad_to_multiple_of=16, # Specific for FP8
return_tensors="pt",
)
# Instantiate dataloaders.
train_dataloader = DataLoader(
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size, drop_last=True
)
eval_dataloader = DataLoader(
tokenized_datasets["validation"],
shuffle=False,
collate_fn=collate_fn,
batch_size=16,
drop_last=True,
)
return train_dataloader, eval_dataloader
def get_training_utilities(model_name: str, batch_size: int = 16, accelerator=None, prepare=True):
"""
Returns a tuple of:
- Model
- Optimizer
- Train dataloader (prepared)
- Eval dataloader (prepared)
- LR Scheduler
Suitable for training on the MRPC dataset
"""
from torch.optim import AdamW
from transformers import AutoModelForSequenceClassification, get_linear_schedule_with_warmup
from accelerate import Accelerator
if accelerator is None:
accelerator = Accelerator()
model = AutoModelForSequenceClassification.from_pretrained(model_name)
train_dataloader, eval_dataloader = get_dataloaders(model_name, batch_size)
optimizer = AdamW(model.parameters(), lr=0.0001)
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=100,
num_training_steps=len(train_dataloader) * 2,
)
train_dataloader, eval_dataloader = accelerator.prepare(train_dataloader, eval_dataloader)
return model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
def get_named_parameters(model):
"""
Same thing as `Accelerator.get_named_parameters` Returns a list of the named parameters of the model (extracted
from parallel)
"""
from accelerate.utils import extract_model_from_parallel
model = extract_model_from_parallel(model)
return {n: p for n, p in model.named_parameters()}
def evaluate_model(model, dataloader, metric, accelerator=None):
"Turns model to .eval(), runs dataloader, calculates metric, then turns eval back on"
model.eval()
for step, batch in enumerate(dataloader):
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
references = batch["labels"]
if accelerator is not None and accelerator.num_processes > 1:
predictions, references = accelerator.gather_for_metrics((predictions, references))
metric.add_batch(predictions=predictions, references=references)
return metric.compute()

View File

@ -1,173 +0,0 @@
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script tests to ensure that `accelerate` performs at the same level as raw `torchao`.
This particular script verifies this for FSDP training.
"""
from functools import partial
import evaluate
import torch
from fp8_utils import get_training_utilities
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp import MixedPrecision
from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy
from torchao.float8 import convert_to_float8_training
from transformers.models.bert import BertLayer
from accelerate import Accelerator
from accelerate import FullyShardedDataParallelPlugin as FSDPPlugin
from accelerate.state import AcceleratorState
from accelerate.utils import AORecipeKwargs, set_seed
MODEL_NAME = "bert-base-cased"
METRIC = evaluate.load("glue", "mrpc")
FSDP_WRAP_POLICY = partial(transformer_auto_wrap_policy, transformer_layer_cls={BertLayer})
def filter_linear_layers(module, fqn, first_layer_name=None, last_layer_name=None):
if isinstance(module, torch.nn.Linear):
if module.in_features % 16 != 0 or module.out_features % 16 != 0:
return False
# For stability reasons, we skip the first and last linear layers
# Otherwise can lead to the model not training or converging properly
if fqn in (first_layer_name, last_layer_name):
return False
return True
def evaluate_model(model, dataloader, metric, accelerator=None):
"Turns model to .eval(), runs dataloader, calculates metric, then turns eval back on"
model.eval()
for step, batch in enumerate(dataloader):
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
references = batch["labels"]
if accelerator is not None and accelerator.num_processes > 1:
predictions, references = accelerator.gather_for_metrics((predictions, references))
metric.add_batch(predictions=predictions, references=references)
return metric.compute()
def train_baseline():
set_seed(42)
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(MODEL_NAME)
first_linear = None
last_linear = None
for name, module in model.named_modules():
if isinstance(module, torch.nn.Linear):
if first_linear is None:
first_linear = name
last_linear = name
func = partial(filter_linear_layers, first_layer_name=first_linear, last_layer_name=last_linear)
accelerator = Accelerator()
device = accelerator.device
model.to(device)
convert_to_float8_training(model, module_filter_fn=func)
# Convert the model to FSDP
model = FSDP(
model,
use_orig_params=True,
mixed_precision=MixedPrecision(param_dtype=torch.bfloat16, reduce_dtype=torch.float32),
auto_wrap_policy=FSDP_WRAP_POLICY,
)
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
model.train()
for batch in train_dataloader:
with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
batch = batch.to(device)
outputs = model(**batch)
loss = outputs.loss
loss.backward()
optimizer.step()
optimizer.zero_grad()
lr_scheduler.step()
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
)
assert trained_model_results["f1"] > base_model_results["f1"], (
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
)
return base_model_results, trained_model_results
def train_integration():
AcceleratorState()._reset_state(True)
fsdp_plugin = FSDPPlugin(
auto_wrap_policy=FSDP_WRAP_POLICY,
use_orig_params=True,
mixed_precision_policy=MixedPrecision(param_dtype=torch.bfloat16, reduce_dtype=torch.float32),
)
accelerator = Accelerator(mixed_precision="fp8", fsdp_plugin=fsdp_plugin, kwargs_handlers=[AORecipeKwargs()])
set_seed(42)
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
MODEL_NAME, accelerator=accelerator
)
model, optimizer = accelerator.prepare(model, optimizer)
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
model.train()
for batch in train_dataloader:
outputs = model(**batch)
loss = outputs.loss
accelerator.backward(loss)
optimizer.step()
optimizer.zero_grad()
lr_scheduler.step()
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
)
assert trained_model_results["f1"] > base_model_results["f1"], (
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
)
return base_model_results, trained_model_results
if __name__ == "__main__":
baseline_not_trained, baseline_trained = train_baseline()
accelerator_not_trained, accelerator_trained = train_integration()
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
f"Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
)
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
f"F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
)
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
f"Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
)
assert baseline_trained["f1"] == accelerator_trained["f1"], (
f"F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
)
torch.distributed.destroy_process_group()

View File

@ -1,145 +0,0 @@
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script tests to ensure that `accelerate` performs at the same level as raw `torchao`.
This particular script verifies this for single GPU training.
"""
from functools import partial
import evaluate
import torch
from fp8_utils import get_training_utilities
from torchao.float8 import convert_to_float8_training
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.utils import AORecipeKwargs, set_seed
MODEL_NAME = "bert-base-cased"
METRIC = evaluate.load("glue", "mrpc")
def evaluate_model(model, dataloader, metric, accelerator=None):
"Turns model to .eval(), runs dataloader, calculates metric, then turns eval back on"
model.eval()
for step, batch in enumerate(dataloader):
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
references = batch["labels"]
if accelerator is not None and accelerator.num_processes > 1:
predictions, references = accelerator.gather_for_metrics((predictions, references))
metric.add_batch(predictions=predictions, references=references)
return metric.compute()
def filter_linear_layers(module, fqn, first_layer_name=None, last_layer_name=None):
if isinstance(module, torch.nn.Linear):
if module.in_features % 16 != 0 or module.out_features % 16 != 0:
return False
# For stability reasons, we skip the first and last linear layers
# Otherwise can lead to the model not training or converging properly
if fqn in (first_layer_name, last_layer_name):
return False
return True
def train_baseline():
set_seed(42)
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(MODEL_NAME)
first_linear = None
last_linear = None
for name, module in model.named_modules():
if isinstance(module, torch.nn.Linear):
if first_linear is None:
first_linear = name
last_linear = name
func = partial(filter_linear_layers, first_layer_name=first_linear, last_layer_name=last_linear)
model.to("cuda")
convert_to_float8_training(model, module_filter_fn=func)
base_model_results = evaluate_model(model, eval_dataloader, METRIC)
model.train()
for batch in train_dataloader:
with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
outputs = model(**batch)
loss = outputs.loss
loss.backward()
optimizer.step()
optimizer.zero_grad()
lr_scheduler.step()
trained_model_results = evaluate_model(model, eval_dataloader, METRIC)
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
)
assert trained_model_results["f1"] > base_model_results["f1"], (
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
)
return base_model_results, trained_model_results
def train_integration():
set_seed(42)
accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=[AORecipeKwargs()])
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
MODEL_NAME, accelerator=accelerator
)
model = accelerator.prepare(model)
base_model_results = evaluate_model(model, eval_dataloader, METRIC)
model.train()
for batch in train_dataloader:
outputs = model(**batch)
loss = outputs.loss
loss.backward()
optimizer.step()
optimizer.zero_grad()
lr_scheduler.step()
trained_model_results = evaluate_model(model, eval_dataloader, METRIC)
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
)
assert trained_model_results["f1"] > base_model_results["f1"], (
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
)
return base_model_results, trained_model_results
if __name__ == "__main__":
baseline_not_trained, baseline_trained = train_baseline()
AcceleratorState._reset_state(True)
accelerator_not_trained, accelerator_trained = train_integration()
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
f"Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
)
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
f"F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
)
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
f"Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
)
assert baseline_trained["f1"] == accelerator_trained["f1"], (
f"F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
)

View File

@ -1,15 +0,0 @@
ARG BASE_YEAR=25
ARG BASE_MONTH=03
FROM nvcr.io/nvidia/pytorch:${BASE_YEAR}.${BASE_MONTH}-py3
RUN pip install transformers evaluate datasets
RUN git clone https://github.com/huggingface/accelerate.git
RUN cd accelerate && \
pip install -e . && \
cd benchmarks/fp8
RUN /bin/bash

View File

@ -1,32 +0,0 @@
# FP8 Benchmarks
Comparing and running [TransformerEngine](https://github.com/NVIDIA/TransformerEngine) FP8 with accelerate
## Overview
This repo provides scripts which compare native TransformerEngine model training against `accelerate`'s own integration. Each modeling type is segmented out via a script, supporting the following:
* Single GPU training (`non_distributed.py`)
* Multi-GPU training via DistributedDataParallelism (`ddp.py`)
* Fully Sharded Data Parallelism (`fsdp.py`)
* DeepSpeed ZeRO 1-3 (`deepspeed.py`)
To run them, it's recommended to use a docker image (see the attached `Dockerfile`) and not install `TransformerEngine` manually.
## Running:
There are official Docker images located at `huggingface/accelerate:gpu-fp8-transformerengine-nightly` which can be used.
You can run all scripts using the core `accelerate launch` command without any `accelerate config` being needed.
For single GPU, run it via `python`:
```bash
python non_distributed.py
```
For the rest, run it via `accelerate launch`:
```bash
accelerate launch ddp.py # or distrib_deepspeed.py, ddp.py
```

View File

@ -1,144 +0,0 @@
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script tests to ensure that `accelerate` performs at the same level as raw `TransformersEngine`.
This particular script verifies this for DDP training.
"""
import evaluate
import torch
import transformer_engine.common.recipe as te_recipe
import transformer_engine.pytorch as te
from fp8_utils import evaluate_model, get_named_parameters, get_training_utilities
from torch.nn.parallel import DistributedDataParallel as DDP
from transformer_engine.common.recipe import DelayedScaling
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.utils import FP8RecipeKwargs, set_seed
from accelerate.utils.transformer_engine import convert_model
MODEL_NAME = "bert-base-cased"
METRIC = evaluate.load("glue", "mrpc")
def train_baseline():
set_seed(42)
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(MODEL_NAME)
accelerator = Accelerator()
device = accelerator.device
model.to(device)
# Convert the model to TE
old_named_params = get_named_parameters(model)
with torch.no_grad():
convert_model(model)
FP8_RECIPE_KWARGS = {"fp8_format": te_recipe.Format.HYBRID, "amax_history_len": 32, "amax_compute_algo": "max"}
fp8_recipe = DelayedScaling(**FP8_RECIPE_KWARGS)
new_named_params = get_named_parameters(model)
# Convert the model to DDP
device_ids, output_device = [accelerator.local_process_index], accelerator.local_process_index
model = DDP(model, device_ids=device_ids, output_device=output_device)
mapping = {p: new_named_params[n] for n, p in old_named_params.items()}
for param_group in optimizer.param_groups:
param_group["params"] = [mapping[p] for p in param_group["params"]]
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
model.train()
for _ in range(2):
for batch in train_dataloader:
with te.fp8_autocast(enabled=True, fp8_recipe=fp8_recipe):
with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
batch = batch.to(device)
outputs = model(**batch)
loss = outputs.loss
loss.backward()
optimizer.step()
optimizer.zero_grad()
lr_scheduler.step()
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
)
assert trained_model_results["f1"] > base_model_results["f1"], (
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
)
return base_model_results, trained_model_results
def train_integration():
FP8_RECIPE_KWARGS = {"fp8_format": "HYBRID", "amax_history_len": 32, "amax_compute_algo": "max"}
kwargs_handlers = [FP8RecipeKwargs(backend="TE", **FP8_RECIPE_KWARGS)]
AcceleratorState()._reset_state(True)
accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=kwargs_handlers)
set_seed(42)
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
MODEL_NAME, accelerator=accelerator
)
model, optimizer = accelerator.prepare(model, optimizer)
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
model.train()
for _ in range(2):
for batch in train_dataloader:
outputs = model(**batch)
loss = outputs.loss
accelerator.backward(loss)
optimizer.step()
optimizer.zero_grad()
lr_scheduler.step()
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
)
assert trained_model_results["f1"] > base_model_results["f1"], (
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
)
return base_model_results, trained_model_results
if __name__ == "__main__":
baseline_not_trained, baseline_trained = train_baseline()
accelerator_not_trained, accelerator_trained = train_integration()
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
f"Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
)
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
f"F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
)
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
f"Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
)
assert baseline_trained["f1"] == accelerator_trained["f1"], (
f"F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
)
torch.distributed.destroy_process_group()

View File

@ -1,191 +0,0 @@
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script tests to ensure that `accelerate` performs at the same level as raw `TransformersEngine`.
This particular script verifies this for DDP training.
"""
from unittest.mock import patch
import deepspeed
import evaluate
import torch
import transformer_engine.common.recipe as te_recipe
import transformer_engine.pytorch as te
from fp8_utils import evaluate_model, get_named_parameters, get_training_utilities
from transformer_engine.common.recipe import DelayedScaling
from accelerate import Accelerator, DeepSpeedPlugin
from accelerate.state import AcceleratorState
from accelerate.utils import FP8RecipeKwargs, set_seed
from accelerate.utils.transformer_engine import convert_model
MODEL_NAME = "bert-base-cased"
METRIC = evaluate.load("glue", "mrpc")
def train_baseline(zero_stage: int = 1):
# This forces transformers to think Zero-3 Init should be used
with patch("transformers.integrations.deepspeed.is_deepspeed_zero3_enabled") as mock:
mock.return_value = zero_stage == 3
set_seed(42)
accelerator = Accelerator()
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
MODEL_NAME, accelerator=accelerator
)
# Convert the model to TE
old_named_params = get_named_parameters(model)
with torch.no_grad():
convert_model(model)
new_named_params = get_named_parameters(model)
mapping = {p: new_named_params[n] for n, p in old_named_params.items()}
for param_group in optimizer.param_groups:
param_group["params"] = [mapping[p] for p in param_group["params"]]
FP8_RECIPE_KWARGS = {"fp8_format": te_recipe.Format.HYBRID, "amax_history_len": 32, "amax_compute_algo": "max"}
fp8_recipe = DelayedScaling(**FP8_RECIPE_KWARGS)
import numpy as np
config = {
"train_batch_size": 16,
"train_micro_batch_size_per_gpu": 16,
"gradient_accumulation_steps": 1,
"zero_optimization": {
"stage": zero_stage,
"offload_optimizer": {"device": "none", "nvme_path": None},
"offload_param": {"device": "none", "nvme_path": None},
"stage3_gather_16bit_weights_on_model_save": False,
},
"gradient_clipping": 1.0,
"steps_per_print": np.inf,
"bf16": {"enabled": True},
"fp16": {"enabled": False},
"zero_allow_untested_optimizer": True,
}
(
model,
optimizer,
_,
_,
) = deepspeed.initialize(
model=model,
optimizer=optimizer,
config_params=config,
)
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
model.train()
model_outputs = []
data = []
for _ in range(2):
for batch in train_dataloader:
with te.fp8_autocast(enabled=True, fp8_recipe=fp8_recipe):
outputs = model(**batch)
data.append(batch.to("cpu"))
model_outputs.append(outputs.logits.to("cpu"))
loss = outputs.loss
model.backward(loss)
model.step()
for _ in range(accelerator.num_processes):
lr_scheduler.step()
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
model.destroy()
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
)
assert trained_model_results["f1"] > base_model_results["f1"], (
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
)
return base_model_results, trained_model_results, model_outputs, data
def train_integration(zero_stage: int = 1):
set_seed(42)
FP8_RECIPE_KWARGS = {"fp8_format": "HYBRID", "amax_history_len": 32, "amax_compute_algo": "max"}
kwargs_handlers = [FP8RecipeKwargs(backend="TE", **FP8_RECIPE_KWARGS)]
AcceleratorState()._reset_state(True)
deepspeed_plugin = DeepSpeedPlugin(
zero_stage=zero_stage,
zero3_init_flag=zero_stage == 3,
)
accelerator = Accelerator(
mixed_precision="fp8", kwargs_handlers=kwargs_handlers, deepspeed_plugin=deepspeed_plugin
)
accelerator.state.deepspeed_plugin.deepspeed_config["train_micro_batch_size_per_gpu"] = 16
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
MODEL_NAME, accelerator=accelerator
)
model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler)
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
model.train()
model_outputs = []
data = []
for _ in range(2):
for batch in train_dataloader:
outputs = model(**batch)
data.append(batch.to("cpu"))
model_outputs.append(outputs.logits.to("cpu"))
loss = outputs.loss
accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
model.destroy()
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
)
assert trained_model_results["f1"] > base_model_results["f1"], (
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
)
return base_model_results, trained_model_results, model_outputs, data
if __name__ == "__main__":
for zero_stage in [1, 2, 3]:
baseline_not_trained, baseline_trained, baseline_outputs, baseline_data = train_baseline(zero_stage)
accelerator_not_trained, accelerator_trained, accelerator_outputs, accelerator_data = train_integration(
zero_stage
)
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
f"ZERO stage {zero_stage}: Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
)
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
f"ZERO stage {zero_stage}: F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
)
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
f"ZERO stage {zero_stage}: Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
)
assert baseline_trained["f1"] == accelerator_trained["f1"], (
f"ZERO stage {zero_stage}: F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
)
torch.distributed.destroy_process_group()

View File

@ -1,116 +0,0 @@
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
def get_dataloaders(model_name: str, batch_size: int = 16):
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(model_name)
datasets = load_dataset("glue", "mrpc")
def tokenize_function(examples):
# max_length=None => use the model max length (it's actually the default)
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
remove_columns=["idx", "sentence1", "sentence2"],
)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
def collate_fn(examples):
return tokenizer.pad(
examples,
padding="longest",
pad_to_multiple_of=16, # Specific for FP8
return_tensors="pt",
)
# Instantiate dataloaders.
train_dataloader = DataLoader(
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size, drop_last=True
)
eval_dataloader = DataLoader(
tokenized_datasets["validation"],
shuffle=False,
collate_fn=collate_fn,
batch_size=16,
drop_last=True,
)
return train_dataloader, eval_dataloader
def get_training_utilities(model_name: str, batch_size: int = 16, accelerator=None):
"""
Returns a tuple of:
- Model
- Optimizer
- Train dataloader (prepared)
- Eval dataloader (prepared)
- LR Scheduler
Suitable for training on the MRPC dataset
"""
from torch.optim import AdamW
from transformers import AutoModelForSequenceClassification, get_linear_schedule_with_warmup
from accelerate import Accelerator
if accelerator is None:
accelerator = Accelerator()
model = AutoModelForSequenceClassification.from_pretrained(model_name)
train_dataloader, eval_dataloader = get_dataloaders(model_name, batch_size)
optimizer = AdamW(model.parameters(), lr=0.0001)
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=100,
num_training_steps=len(train_dataloader) * 2,
)
train_dataloader, eval_dataloader = accelerator.prepare(train_dataloader, eval_dataloader)
return model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
def get_named_parameters(model):
"""
Same thing as `Accelerator.get_named_parameters` Returns a list of the named parameters of the model (extracted
from parallel)
"""
from accelerate.utils import extract_model_from_parallel
model = extract_model_from_parallel(model)
return {n: p for n, p in model.named_parameters()}
def evaluate_model(model, dataloader, metric, accelerator=None):
"Turns model to .eval(), runs dataloader, calculates metric, then turns eval back on"
model.eval()
for step, batch in enumerate(dataloader):
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
references = batch["labels"]
if accelerator is not None and accelerator.num_processes > 1:
predictions, references = accelerator.gather_for_metrics((predictions, references))
metric.add_batch(predictions=predictions, references=references)
return metric.compute()

View File

@ -1,161 +0,0 @@
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script tests to ensure that `accelerate` performs at the same level as raw `TransformersEngine`.
This particular script verifies this for FSDP training.
"""
from functools import partial
import evaluate
import torch
import transformer_engine.common.recipe as te_recipe
import transformer_engine.pytorch as te
from fp8_utils import evaluate_model, get_named_parameters, get_training_utilities
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp import MixedPrecision
from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy
from transformer_engine.common.recipe import DelayedScaling
from transformers.models.bert import BertLayer
from accelerate import Accelerator
from accelerate import FullyShardedDataParallelPlugin as FSDPPlugin
from accelerate.state import AcceleratorState
from accelerate.utils import FP8RecipeKwargs, set_seed
from accelerate.utils.transformer_engine import convert_model
MODEL_NAME = "bert-base-cased"
METRIC = evaluate.load("glue", "mrpc")
FSDP_WRAP_POLICY = partial(transformer_auto_wrap_policy, transformer_layer_cls={BertLayer})
def train_baseline():
set_seed(42)
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(MODEL_NAME)
accelerator = Accelerator()
device = accelerator.device
model.to(device)
# Convert the model to TE
old_named_params = get_named_parameters(model)
with torch.no_grad():
convert_model(model)
FP8_RECIPE_KWARGS = {"fp8_format": te_recipe.Format.HYBRID, "amax_history_len": 32, "amax_compute_algo": "max"}
fp8_recipe = DelayedScaling(**FP8_RECIPE_KWARGS)
new_named_params = get_named_parameters(model)
# Convert the model to FSDP
model = FSDP(
model,
use_orig_params=True,
mixed_precision=MixedPrecision(param_dtype=torch.bfloat16, reduce_dtype=torch.float32),
auto_wrap_policy=FSDP_WRAP_POLICY,
)
mapping = {p: new_named_params[n] for n, p in old_named_params.items()}
for param_group in optimizer.param_groups:
param_group["params"] = [mapping[p] for p in param_group["params"]]
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
model.train()
for _ in range(2):
for batch in train_dataloader:
with te.fp8_autocast(enabled=True, fp8_recipe=fp8_recipe):
with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
batch = batch.to(device)
outputs = model(**batch)
loss = outputs.loss
loss.backward()
optimizer.step()
optimizer.zero_grad()
lr_scheduler.step()
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
)
assert trained_model_results["f1"] > base_model_results["f1"], (
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
)
return base_model_results, trained_model_results
def train_integration():
FP8_RECIPE_KWARGS = {"fp8_format": "HYBRID", "amax_history_len": 32, "amax_compute_algo": "max"}
kwargs_handlers = [FP8RecipeKwargs(backend="TE", **FP8_RECIPE_KWARGS)]
AcceleratorState()._reset_state(True)
fsdp_plugin = FSDPPlugin(
auto_wrap_policy=FSDP_WRAP_POLICY,
use_orig_params=True,
mixed_precision_policy=MixedPrecision(param_dtype=torch.bfloat16, reduce_dtype=torch.float32),
)
accelerator = Accelerator(mixed_precision="fp8", fsdp_plugin=fsdp_plugin, kwargs_handlers=kwargs_handlers)
set_seed(42)
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
MODEL_NAME, accelerator=accelerator
)
model, optimizer = accelerator.prepare(model, optimizer)
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
model.train()
for _ in range(2):
for batch in train_dataloader:
outputs = model(**batch)
loss = outputs.loss
accelerator.backward(loss)
optimizer.step()
optimizer.zero_grad()
lr_scheduler.step()
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
)
assert trained_model_results["f1"] > base_model_results["f1"], (
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
)
return base_model_results, trained_model_results
if __name__ == "__main__":
baseline_not_trained, baseline_trained = train_baseline()
accelerator_not_trained, accelerator_trained = train_integration()
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
f"Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
)
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
f"F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
)
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
f"Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
)
assert baseline_trained["f1"] == accelerator_trained["f1"], (
f"F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
)
torch.distributed.destroy_process_group()

View File

@ -1,132 +0,0 @@
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script tests to ensure that `accelerate` performs at the same level as raw `TransformersEngine`.
This particular script verifies this for single GPU training.
"""
import evaluate
import torch
import transformer_engine.common.recipe as te_recipe
import transformer_engine.pytorch as te
from fp8_utils import evaluate_model, get_named_parameters, get_training_utilities
from transformer_engine.common.recipe import DelayedScaling
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.utils import FP8RecipeKwargs, set_seed
from accelerate.utils.transformer_engine import convert_model
MODEL_NAME = "bert-base-cased"
METRIC = evaluate.load("glue", "mrpc")
def train_baseline():
set_seed(42)
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(MODEL_NAME)
# Convert the model to TE
old_named_params = get_named_parameters(model)
with torch.no_grad():
convert_model(model)
new_named_params = get_named_parameters(model)
mapping = {p: new_named_params[n] for n, p in old_named_params.items()}
for param_group in optimizer.param_groups:
param_group["params"] = [mapping[p] for p in param_group["params"]]
FP8_RECIPE_KWARGS = {"fp8_format": te_recipe.Format.HYBRID, "amax_history_len": 32, "amax_compute_algo": "max"}
fp8_recipe = DelayedScaling(**FP8_RECIPE_KWARGS)
model.to("cuda")
base_model_results = evaluate_model(model, eval_dataloader, METRIC)
model.train()
for batch in train_dataloader:
with te.fp8_autocast(enabled=True, fp8_recipe=fp8_recipe):
with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
batch = batch.to("cuda")
outputs = model(**batch)
loss = outputs.loss
loss.backward()
optimizer.step()
optimizer.zero_grad()
lr_scheduler.step()
trained_model_results = evaluate_model(model, eval_dataloader, METRIC)
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
)
assert trained_model_results["f1"] > base_model_results["f1"], (
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
)
return base_model_results, trained_model_results
def train_integration():
FP8_RECIPE_KWARGS = {"fp8_format": "HYBRID", "amax_history_len": 32, "amax_compute_algo": "max"}
kwargs_handlers = [FP8RecipeKwargs(backend="TE", **FP8_RECIPE_KWARGS)]
AcceleratorState()._reset_state(True)
accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=kwargs_handlers)
set_seed(42)
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
MODEL_NAME, accelerator=accelerator
)
model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler)
base_model_results = evaluate_model(model, eval_dataloader, METRIC)
model.train()
for batch in train_dataloader:
outputs = model(**batch)
loss = outputs.loss
accelerator.backward(loss)
optimizer.step()
optimizer.zero_grad()
lr_scheduler.step()
trained_model_results = evaluate_model(model, eval_dataloader, METRIC)
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
)
assert trained_model_results["f1"] > base_model_results["f1"], (
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
)
return base_model_results, trained_model_results
if __name__ == "__main__":
baseline_not_trained, baseline_trained = train_baseline()
accelerator_not_trained, accelerator_trained = train_integration()
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
f"Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
)
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
f"F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
)
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
f"Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
)
assert baseline_trained["f1"] == accelerator_trained["f1"], (
f"F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
)

View File

@ -1,74 +0,0 @@
# FSDP2 Benchmarks
This benchmark showcases `FSDP2` in 🤗 `accelerate` and compares it to `torch` baseline.
## Overview
This benchmark consists of two parts:
- `main.py` is the main script that runs the benchmark
- `visualize.py` is the script that visualizes the results (if `--output_dir` was specified for the previous command)
## Motivation
We want to showcase that 🤗 `accelerate`'s integration of `FSDP2` is on par raw PyTorch, and highlight a "broken" part in PyTorch that creating an optimizer before applying `FSDP2` **doesn't result in a working training loop**. (more on this later)
This script showcases **matching memory usage and convergence between `accelerate` and `torch`'s baseline.**
To deal with this breaking change (and maintain backward compatibility with FSDP1 in terms of an API), `accelerate` had to come up with a workaround since `accelerate` assumes that the user will nearly always create a model, optimizer, scheduler, etc beforehand and bring them themselves. This lead to an issue of a stark increase in memory as well as the model not even training if the user creates an optimizer beforehand.
To workaround this, we replace the parameters inside the optimizer with the newly created FSDP2 sharded ones. More about this can be found in this [blog post (TBD)](TODO)
> [!WARNING]
> This script is intended to fit on 2x 24GB GPUs, though on so few GPUs it's not possible to see the memory difference (discrepancies in grad allocation result in lower memory usage in the non-fixed case), only the difference in convergence. Below are attached results from 8x H100 GPUs where the difference is visible.
> TLDR: more GPUs = bigger memory difference between fixed and non-fixed cases.
## Results
Here are the results from running the benchmark on 8x H100 GPUs:
<p align="center">
<img src="imgs/allocated_memory.png" width="80%" alt="Allocated Memory Usage">
</p>
<p align="center">
<img src="imgs/reserved_memory.png" width="80%" alt="Reserved Memory Usage">
</p>
As you can see, the memory usage of `accelerate` and `torch_post_shard` (the **intended** way) are very similar, while `torch_pre_shard_not_fixed` uses significantly more memory. Our fix in `torch_pre_shard_fixed` brings the memory usage back in line with the **intended** approach.
> [!WARNING]
> Timing discrepancies are due to the benchmarks being ran in 1 script.
## Running
To run the benchmark, you can either use `accelerate launch` or `torchrun`:
```bash
accelerate launch main.py
```
```bash
# For two GPUs
torchrun --nproc_per_node 2 main.py
```
This supports multiple configurable options, you can learn about them by running:
```bash
python3 main.py --help
```
This script will run 4 different benchmarks:
- `torch_optimizer_after_fsdp`: `torch` baseline where optimizer is created after applying `FSDP2`, this is the **intended** way to do it
- `torch_optimizer_before_fsdp_not_fixed`: `torch` baseline where optimizer is created before applying `FSDP2` without fixing the optimizer parameters
- `torch_optimizer_before_fsdp_fixed`: `torch` baseline where optimizer is created before applying `FSDP2` with our fix to the optimizer
- `accelerate`: `accelerate`'s own integration of `FSDP2` where optimizer is created before applying `FSDP2`, but we apply our fix to the optimizer
Memory results are saved in a folder specified by `--output_dir` argument.
Optionally, you can specify `--save_memory_snapshot` to save the torch memory snapshot, which can then be viewed using [`torch memory viz`](https://pytorch.org/memory_viz)
## Visualizing results
To visualize the results, you can run:
```bash
python3 visualize.py --dir <path_to_output_dir>
```
This will then create two plots, showcasing allocated and reserved memory usage between all the different benchmarks discussed above.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 124 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 56 KiB

View File

@ -1,122 +0,0 @@
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from typing import Callable
import torch
from accelerate import Accelerator
from utils import parse_args, prepare_accelerate, prepare_torch
MODEL_NAME = "Qwen/Qwen2.5-1.5B-Instruct"
LEARNING_RATE = 3e-5
CONFIG = {
"model_name": MODEL_NAME,
"learning_rate": LEARNING_RATE,
}
def train(
model: torch.nn.Module,
optimizer: torch.optim.Optimizer,
train_dataloader: torch.utils.data.DataLoader,
accelerator: Accelerator,
) -> torch.Tensor:
losses = []
for batch in train_dataloader:
optimizer.zero_grad()
outputs = model(**batch, use_cache=False)
loss = outputs.loss
losses.append(loss.item())
accelerator.backward(loss)
optimizer.step()
return torch.tensor(losses)
def evaluate(args, config: dict, init_fn: Callable, run_name: str) -> torch.Tensor:
model, optimizer, dataloader, accelerator, memory_tracker = init_fn(args, config)
loss = train(model, optimizer, dataloader, accelerator)
memory_tracker.stop()
msg = f"""Results for {run_name} (rank 0):
Loss: {loss[-1].item()}
Peak Allocated Memory: {float(memory_tracker.peak_allocated_memory):.2f} MB
Peak Reserved Memory: {float(memory_tracker.peak_reserved_memory):.2f} MB
{"-" * 34}"""
accelerator.print(msg)
return loss
def main():
args = parse_args()
evaluations = [
functools.partial(
evaluate,
init_fn=functools.partial(prepare_torch, post_shard_optimizer=False, apply_optimizer_fix=True),
run_name="Optimizer Before FSDP (w/ fix)",
),
functools.partial(
evaluate,
init_fn=functools.partial(prepare_torch, post_shard_optimizer=False, apply_optimizer_fix=False),
run_name="Optimizer Before FSDP (w/o fix)",
),
functools.partial(
evaluate,
init_fn=functools.partial(prepare_torch, post_shard_optimizer=True),
run_name="Optimizer After FSDP",
),
functools.partial(evaluate, init_fn=prepare_accelerate, run_name="Accelerate"),
]
labels = [
"Optimizer Before FSDP (w/ fix)",
"Optimizer Before FSDP (w/o fix)",
"Optimizer After FSDP",
"Accelerate",
]
results = {}
torch.use_deterministic_algorithms(True)
for evaluation, label in zip(evaluations, labels):
results[label] = evaluation(args, CONFIG)
torch.testing.assert_close(
results["Optimizer After FSDP"],
results["Optimizer Before FSDP (w/ fix)"],
msg="Optimizer After FSDP and Optimizer Before FSDP (w/ fix) should be the same",
)
torch.testing.assert_close(
results["Optimizer After FSDP"],
results["Accelerate"],
msg="Optimizer After FSDP and Accelerate should be the same",
)
torch.testing.assert_close(
results["Accelerate"],
results["Optimizer Before FSDP (w/ fix)"],
msg="Accelerate and Optimizer Before FSDP (w/ fix) should be the same",
)
torch.distributed.destroy_process_group()
if __name__ == "__main__":
main()

View File

@ -1,130 +0,0 @@
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import json
import os
import threading
import time
import psutil
import torch
from accelerate import PartialState
class MemoryTracker:
def __init__(
self,
device: torch.device,
output_directory: str,
run_name: str,
save_memory_snapshot: bool,
log_interval: float = 0.01,
):
"""Class for tracking gpu and cpu memory usage of the process.
Args:
device (`torch.device`):
PyTorch device to monitor.
output_directory (`str`):
Directory to save the memory usage data to, will be created if it doesn't exist.
run_name (`str`):
Name of the run, will be used to name the output files.
save_memory_snapshot (`bool`):
Whether to also save `torch.cuda.memory._dump_snapshot` to the output directory.
log_interval (`float`, *optional*):
Interval in seconds between memory measurements. Defaults to 0.01.
"""
self.log_interval = log_interval
self.save_memory_snapshot = save_memory_snapshot
self.output_directory = output_directory
self.run_name = run_name
self.timestamps = []
self.allocated_memory = []
self.reserved_memory = []
self.virtual_memory = []
self.start_time = None
self.running = False
self._thread = None
self._state = PartialState()
self._process = psutil.Process()
self._device = device
self.torch_accelerator_module = getattr(torch, device.type, torch.cuda)
def _monitor(self):
self.start_time = time.time()
while self.running:
allocated = self.torch_accelerator_module.memory_allocated(self._device) / (1024 * 1024)
reserved = self.torch_accelerator_module.memory_reserved(self._device) / (1024 * 1024)
virtual_memory = self._process.memory_info().rss / (1024 * 1024)
self.allocated_memory.append(allocated)
self.reserved_memory.append(reserved)
self.virtual_memory.append(virtual_memory)
self.timestamps.append(time.time() - self.start_time)
time.sleep(self.log_interval)
def start(self):
gc.collect()
self.torch_accelerator_module.empty_cache()
if self.output_directory:
os.makedirs(self.output_directory, exist_ok=True)
if self.save_memory_snapshot:
self.torch_accelerator_module.memory._record_memory_history()
self.running = True
self._thread = threading.Thread(target=self._monitor)
self._thread.daemon = True
self._thread.start()
def stop(self):
self.running = False
if self._thread:
self._thread.join()
if self.save_memory_snapshot and self._state.is_main_process and self.output_directory:
output_file = os.path.join(self.output_directory, f"{self.run_name}_memory_snapshot.pkl")
self.torch_accelerator_module.memory._dump_snapshot(output_file)
if self._state.is_main_process and self.output_directory:
path = os.path.join(self.output_directory, f"{self.run_name}_memory_usage.json")
with open(path, "w") as f:
json.dump(
{
"timestamps": self.timestamps,
"allocated_memory": self.allocated_memory,
"reserved_memory": self.reserved_memory,
"virtual_memory": self.virtual_memory,
},
f,
)
if self.save_memory_snapshot:
self.torch_accelerator_module.memory._record_memory_history(False)
self.torch_accelerator_module.empty_cache()
@property
def peak_allocated_memory(self):
return max(self.allocated_memory)
@property
def peak_reserved_memory(self):
return max(self.reserved_memory)

View File

@ -1,290 +0,0 @@
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from types import MethodType
from typing import Union
import torch
from datasets import load_dataset
from measure_utils import MemoryTracker
from torch.distributed.fsdp import MixedPrecisionPolicy, fully_shard
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, DataCollatorForLanguageModeling
from transformers.models.qwen2.modeling_qwen2 import Qwen2DecoderLayer
from accelerate import Accelerator, FullyShardedDataParallelPlugin
from accelerate.state import AcceleratorState, is_initialized
from accelerate.utils import convert_outputs_to_fp32, set_seed
SEED = 421
def get_named_parameters(model: torch.nn.Module, drop_refs: bool = False) -> dict[str, Union[torch.Tensor, int]]:
"""
This function returns a dictionary mapping the parameter names to their data pointers or
the original parameters if `drop_refs` is `False`.
It is used to get the original parameter names before `fully_shard` is applied.
We only return the data pointers, so we drop the references to the original parameters
and `fully_shard` will then trigger a new allocation for the sharded ones.
Args:
model (`torch.nn.Module`): Model instance to get the named parameters from
drop_refs (`bool`, *optional*, defaults to `False`): Whether to drop the references to the original parameters
Returns:
`dict[str, Union[torch.Tensor, int]]`: Dictionary mapping the parameter names to their data pointers or the original parameters if `drop_refs` is `False`
"""
named_parameters = {}
for n, p in model.named_parameters():
# We only preserve the data pointers to have the unique 1:1 mapping between the original and the sharded parameters
named_parameters[n] = p.data_ptr() if drop_refs else p
return named_parameters
def replace_optimizer_params(optimizer: torch.optim.Optimizer):
"""
This function is called before using `fully_shard` on the model. It replaces the parameters of the optimizer with
empty tensors, so `fully_shard` can trigger a new allocation for the sharded ones. After this, we swap the parameters
`data_ptr` to the original one, so we can reuse that later to map the sharded parameters to the original ones.
This function modifies the optimizer in-place.
Args:
optimizer (torch.optim.Optimizer): Optimizer instance which contains the original model parameters
"""
for param_group in optimizer.param_groups:
for i, p in enumerate(param_group["params"]):
# We drop a reference to the original param here, so that _move_states_to_device triggers a reallocation
# This is required or else the `fully_shard` -> `_move_states_to_device` uses the original memory address
# for the sharded parameters, and we get a weird/undefined behavior.
param_group["params"][i] = torch.empty_like(p)
# We save the original data_ptr, so we can swap back the parameters later
param_group["params"][i].data_ptr = p.data_ptr()
def swap_back_optimizer_params(
model: torch.nn.Module, optimizer: torch.optim.Optimizer, old_named_parameter_pointers: dict[str, int]
):
"""
This function is the counterpart of `replace_optimizer_params`. It is called after `fully_shard` being applied to
the model. It swaps the parameters of the optimizer to their sharded counterparts.
It is done using the `data_ptr` mapping prepared in `replace_optimizer_params` and `get_named_parameters`.
Args:
model (`torch.nn.Module`): Model instance to get the new named parameters from
optimizer (`torch.optim.Optimizer`): Optimizer instance to swap the parameters of
old_named_parameter_pointers (`dict[str, int]`): Dictionary mapping the original parameter names: data_ptrs to the new ones
"""
# We get the new named parameters after `fully_shard` being applied
# We don't drop the references as we need the sharded parameters now
new_named_parameters = get_named_parameters(model, drop_refs=False)
# We create a mapping from the original data_ptr to the new sharded param corresponding to it
mapping = {p: new_named_parameters[n] for n, p in old_named_parameter_pointers.items()}
for param_group in optimizer.param_groups:
# We swap the parameters of the optimizer to the new sharded ones
param_group["params"] = [mapping[p.data_ptr] for p in param_group["params"]]
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--output_dir",
type=str,
help="Directory to save the benchmarking results.",
)
parser.add_argument(
"--save_memory_snapshot",
action="store_true",
default=False,
help="If True, `torch.cuda.memory._dump_snapshot` will be used to additionaly save the memory trace.",
)
######################
# Training arguments #
######################
parser.add_argument(
"--batch_size",
type=int,
default=2,
help="Batch size for the training loop.",
)
parser.add_argument(
"--block_size",
type=int,
default=128,
help="The maximum sequence length to use with the model.",
)
parser.add_argument(
"--dataset_fraction",
type=float,
default=1.0,
help="Fraction of the dataset to use.",
)
return parser.parse_args()
def prepare_dataloader(tokenizer, args, accelerator: Accelerator) -> DataLoader:
dataset = load_dataset("tiny_shakespeare", split="train", trust_remote_code=True)
def tokenize_function(example):
return tokenizer(
example["text"],
)
dataset = dataset.map(
tokenize_function,
batched=True,
remove_columns=["text"],
)
block_size = min(tokenizer.model_max_length, args.block_size)
def group_texts(examples):
concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
total_length = (total_length // block_size) * block_size
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
result["labels"] = result["input_ids"].copy()
return result
dataset = dataset.map(group_texts, batched=True)
dataset = dataset.select(range(int(len(dataset) * args.dataset_fraction)))
def collate_fn(examples):
return DataCollatorForLanguageModeling(
tokenizer=tokenizer,
mlm=False,
)(examples)
dataloader = DataLoader(
dataset,
batch_size=args.batch_size,
collate_fn=collate_fn,
)
dataloader = accelerator.prepare(dataloader)
return dataloader
def get_model(model_name: str):
# We reguire model to be loaded in fp32, otherwise benchmarks don't match as accelerate does upcasting of parameters to fp32
config = AutoConfig.from_pretrained(model_name, trust_remote_code=True, torch_dtype=torch.float32)
model = AutoModelForCausalLM.from_config(config)
return model
def get_tokenizer(model_name: str):
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
tokenizer.pad_token = tokenizer.eos_token
return tokenizer
def prepare_torch(
args, config: dict, post_shard_optimizer: bool = False, apply_optimizer_fix: bool = False
) -> tuple[torch.nn.Module, torch.optim.Optimizer, torch.utils.data.DataLoader, Accelerator]:
mp_policy = MixedPrecisionPolicy(
param_dtype=torch.bfloat16,
reduce_dtype=torch.bfloat16,
output_dtype=torch.bfloat16,
)
accelerator = Accelerator(mixed_precision="bf16")
set_seed(SEED)
is_fixed = "fixed" if apply_optimizer_fix else "not_fixed"
is_post_shard = "optimizer_after_fsdp" if post_shard_optimizer else "optimizer_before_fsdp"
run_name = f"torch_{is_post_shard}" if post_shard_optimizer else f"torch_{is_post_shard}_{is_fixed}"
tokenizer = get_tokenizer(config["model_name"])
train_dataloader = prepare_dataloader(tokenizer, args, accelerator)
memory_tracker = MemoryTracker(accelerator.device, args.output_dir, run_name, args.save_memory_snapshot)
memory_tracker.start()
model = get_model(config["model_name"])
optimizer = None
if not post_shard_optimizer:
optimizer = AdamW(model.parameters(), lr=config["learning_rate"])
if apply_optimizer_fix:
# We drop the references to the original parameters, so that `fully_shard` can trigger a new allocation
# Then we get the `module_name: data_ptr` mapping, so we can swap back the parameters later
old_named_parameters = get_named_parameters(model, drop_refs=True)
# We replace the parameters of the optimizer with empty tensors, so that `fully_shard` can trigger a new allocation
# We also change the `data_ptr` of the parameters to the original ones, so we can swap back the parameters later
replace_optimizer_params(optimizer)
for module in model.modules():
if isinstance(module, Qwen2DecoderLayer):
fully_shard(module, mp_policy=mp_policy)
fully_shard(model, mp_policy=mp_policy)
# We do this to imitate how accelerate forces outputs to be in fp32 via `convert_outputs_to_fp32`
autocast_context = torch.autocast(device_type=accelerator.state.device.type, dtype=torch.bfloat16)
model_forward_func = model.forward.__func__
new_forward = autocast_context(model_forward_func)
model.forward = MethodType(new_forward, model)
model.forward = MethodType(convert_outputs_to_fp32(model.forward.__func__), model)
if post_shard_optimizer:
optimizer = AdamW(model.parameters(), lr=config["learning_rate"])
if not post_shard_optimizer and apply_optimizer_fix:
# We swap back the parameters of the optimizer to the original ones
swap_back_optimizer_params(model, optimizer, old_named_parameters)
return model, optimizer, train_dataloader, accelerator, memory_tracker
def prepare_accelerate(
args, config: dict
) -> tuple[torch.nn.Module, torch.optim.Optimizer, torch.utils.data.DataLoader, Accelerator]:
if is_initialized():
AcceleratorState()._reset_state(True)
fsdp_plugin = FullyShardedDataParallelPlugin(
fsdp_version=2,
auto_wrap_policy="transformer_based_wrap",
transformer_cls_names_to_wrap=["Qwen2DecoderLayer"],
)
accelerator = Accelerator(
fsdp_plugin=fsdp_plugin,
mixed_precision="bf16",
)
set_seed(SEED)
tokenizer = get_tokenizer(config["model_name"])
train_dataloader = prepare_dataloader(tokenizer, args, accelerator)
memory_tracker = MemoryTracker(accelerator.device, args.output_dir, "accelerate", args.save_memory_snapshot)
memory_tracker.start()
model = get_model(config["model_name"])
optimizer = AdamW(model.parameters(), lr=config["learning_rate"])
model, optimizer = accelerator.prepare(model, optimizer)
return model, optimizer, train_dataloader, accelerator, memory_tracker

View File

@ -1,114 +0,0 @@
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import matplotlib.pyplot as plt
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--dir", type=str, help="Directory containing the memory usage data")
parser.add_argument(
"--memory_threshold",
type=int,
default=0,
help="Memory threshold to filter data that is below this value (only filters 1st `--filter_partition` of the points which should roughtly correspond to the model loading)",
)
parser.add_argument(
"--filter_partition",
type=float,
default=1 / 3,
help="Partition to drop data from that are below the memory threshold",
)
return parser.parse_args()
def filter_data(data, memory_threshold, filter_partition, key):
timestamps = data["timestamps"]
memory = data[key]
mid_point = int(len(timestamps) * filter_partition)
filtered_times = []
filtered_memory = []
for i, (t, m) in enumerate(zip(timestamps, memory)):
if i < mid_point and m < memory_threshold:
continue
filtered_times.append(t)
filtered_memory.append(m)
return filtered_times, filtered_memory
def compare_memory_usage(data, labels, memory_threshold, filter_partition):
plt.style.use("seaborn-v0_8")
colors = ["#2ecc71", "#e74c3c", "#3498db", "#f1c40f"]
fig1, ax1 = plt.subplots(figsize=(15, 5))
for data_item, label, color in zip(data, labels, colors):
timestamps, allocated = filter_data(data_item, memory_threshold, filter_partition, "allocated_memory")
ax1.plot(timestamps, allocated, label=label, color=color, linewidth=2)
ax1.set_xlabel("Time (s)", fontsize=12)
ax1.set_ylabel("Allocated Memory (GB)", fontsize=12)
ax1.set_title("Allocated Memory Usage Over Time", fontsize=14, pad=15)
ax1.grid(True, linestyle="--", alpha=0.7)
ax1.legend(frameon=True, fancybox=True, shadow=True, fontsize=10)
ax1.spines["top"].set_visible(False)
ax1.spines["right"].set_visible(False)
plt.tight_layout()
fig2, ax2 = plt.subplots(figsize=(15, 5))
for data_item, label, color in zip(data, labels, colors):
timestamps, reserved = filter_data(data_item, memory_threshold, filter_partition, "reserved_memory")
ax2.plot(timestamps, reserved, label=label, color=color, linewidth=2)
ax2.set_xlabel("Time (s)", fontsize=12)
ax2.set_ylabel("Reserved Memory (GB)", fontsize=12)
ax2.set_title("Reserved Memory Usage Over Time", fontsize=14, pad=15)
ax2.grid(True, linestyle="--", alpha=0.7)
ax2.legend(frameon=True, fancybox=True, shadow=True, fontsize=10)
ax2.spines["top"].set_visible(False)
ax2.spines["right"].set_visible(False)
plt.tight_layout()
return fig1, fig2
if __name__ == "__main__":
args = parse_args()
DIR = args.dir
with open(f"{DIR}/torch_optimizer_before_fsdp_not_fixed_memory_usage.json") as f:
optimizer_before_fsdp_not_fixed = json.load(f)
with open(f"{DIR}/torch_optimizer_after_fsdp_memory_usage.json") as f:
optimizer_after_fsdp = json.load(f)
with open(f"{DIR}/torch_optimizer_before_fsdp_fixed_memory_usage.json") as f:
optimizer_before_fsdp_fixed = json.load(f)
with open(f"{DIR}/accelerate_memory_usage.json") as f:
accelerate = json.load(f)
data = [optimizer_before_fsdp_not_fixed, optimizer_before_fsdp_fixed, optimizer_after_fsdp, accelerate]
labels = [
"Optimizer Before FSDP (w/o fix)",
"Optimizer Before FSDP (w/ fix)",
"Optimizer After FSDP",
"Accelerate",
]
fig1, fig2 = compare_memory_usage(data, labels, args.memory_threshold, args.filter_partition)
fig1.savefig(f"{DIR}/allocated_memory.png")
fig2.savefig(f"{DIR}/reserved_memory.png")

View File

@ -1,16 +1,3 @@
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import threading
import time
@ -18,12 +5,6 @@ import time
import psutil
import torch
from accelerate.test_utils.testing import get_backend
torch_device_type, _, _ = get_backend()
torch_accelerator_module = getattr(torch, torch_device_type, torch.cuda)
class PeakCPUMemory:
def __init__(self):
@ -60,16 +41,16 @@ def start_measure():
measures = {"time": time.time()}
gc.collect()
torch_accelerator_module.empty_cache()
torch.cuda.empty_cache()
# CPU mem
measures["cpu"] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch_accelerator_module.device_count()):
measures[str(i)] = torch_accelerator_module.memory_allocated(i)
torch_accelerator_module.reset_peak_memory_stats()
for i in range(torch.cuda.device_count()):
measures[str(i)] = torch.cuda.memory_allocated(i)
torch.cuda.reset_peak_memory_stats()
return measures
@ -79,16 +60,16 @@ def end_measure(start_measures):
measures = {"time": time.time() - start_measures["time"]}
gc.collect()
torch_accelerator_module.empty_cache()
torch.cuda.empty_cache()
# CPU mem
measures["cpu"] = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
measures["cpu-peak"] = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch_accelerator_module.device_count()):
measures[str(i)] = (torch_accelerator_module.memory_allocated(i) - start_measures[str(i)]) / 2**20
measures[f"{i}-peak"] = (torch_accelerator_module.max_memory_allocated(i) - start_measures[str(i)]) / 2**20
for i in range(torch.cuda.device_count()):
measures[str(i)] = (torch.cuda.memory_allocated(i) - start_measures[str(i)]) / 2**20
measures[f"{i}-peak"] = (torch.cuda.max_memory_allocated(i) - start_measures[str(i)]) / 2**20
return measures
@ -96,9 +77,9 @@ def end_measure(start_measures):
def log_measures(measures, description):
print(f"{description}:")
print(f"- Time: {measures['time']:.2f}s")
for i in range(torch_accelerator_module.device_count()):
print(f"- {torch_device_type} {i} allocated: {measures[str(i)]:.2f}MiB")
for i in range(torch.cuda.device_count()):
print(f"- GPU {i} allocated: {measures[str(i)]:.2f}MiB")
peak = measures[f"{i}-peak"]
print(f"- {torch_device_type} {i} peak: {peak:.2f}MiB")
print(f"- GPU {i} peak: {peak:.2f}MiB")
print(f"- CPU RAM allocated: {measures['cpu']:.2f}MiB")
print(f"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB")

View File

@ -1,111 +0,0 @@
# Regional Compilation Benchmark
This benchmark compares different compilation strategies using PyTorch's `torch.compile` and Accelerate's `compile_regions` utility, which is based on the recipe in [PyTorch documentation](https://pytorch.org/tutorials/recipes/regional_compilation.html).
## Overview
The benchmark evaluates three approaches:
- **Baseline**: No compilation, standard PyTorch eager execution.
- **Full compilation**: Using PyTorch's `torch.compile()` on the entire model.
- **Regional compilation**: Using `accelerate.utils.compile_regions()` which targets specific blocks of the model to optimize compilation time.
Each approach is tested with different batch sizes (1 and 4) and sequence lengths (128) on various LLaMA-based models ranging from 1B to 13B parameters. We purposefully run the forward pass outside of the `torch.no_grad()` context to simulate performance in a training environment, where gradients are needed.
## Usage
To run this benchmark:
```bash
python regional_compilation.py
```
The script will automatically download the model configurations, create models, and benchmark both compilation and inference times across different scenarios.
## Requirements
- Suitable GPU memory for the models being tested.
- PyTorch with CUDA support.
- Transformers library.
- Accelerate library.
## Results
The benchmark results are summarized in the following figures:
- Compilation time is how long it takes to run the first forward pass.
- Speedup factor is the ratio of non-compiled baseline inference time to the fully/regionally compiled inference time.
<p align="center">
<img src="imgs/compilation_time.png" width="80%" alt="Compilation Time">
</p>
<p align="center">
<img src="imgs/speedup_factor.png" width="80%" alt="Speedup Factor">
</p>
Full results are available in the tables below:
```markdown
[-------------------------------------------------- NousResearch/Llama-3.2-1B ---------------------------------------------------]
| Inference time (1x128) | Inference time (4x128) | Compile time (1x128) | Compile time (4x128)
1 threads: -----------------------------------------------------------------------------------------------------------------------
Baseline | 18.3 | 18.4 | |
Full compilation | 6.3 | 10.0 | 10696.4 | 10248.0
Regional compilation | 9.7 | 10.0 | 1952.7 | 2903.9
Times are in milliseconds (ms).
[---------------------------------------------- NousResearch/Hermes-3-Llama-3.2-3B ----------------------------------------------]
| Inference time (1x128) | Inference time (4x128) | Compile time (1x128) | Compile time (4x128)
1 threads: -----------------------------------------------------------------------------------------------------------------------
Baseline | 33.4 | 33.6 | |
Full compilation | 11.2 | 23.9 | 17857.5 | 17736.5
Regional compilation | 17.3 | 23.7 | 2993.2 | 2478.8
Times are in milliseconds (ms).
[---------------------------------------------- NousResearch/Hermes-3-Llama-3.1-8B ----------------------------------------------]
| Inference time (1x128) | Inference time (4x128) | Compile time (1x128) | Compile time (4x128)
1 threads: -----------------------------------------------------------------------------------------------------------------------
Baseline | 40.3 | 59.5 | |
Full compilation | 18.9 | 54.4 | 20437.8 | 20152.3
Regional compilation | 19.7 | 54.0 | 2903.1 | 2438.0
Times are in milliseconds (ms).
[--------------------------------------------- NousResearch/Nous-Hermes-Llama2-13b ----------------------------------------------]
| Inference time (1x128) | Inference time (4x128) | Compile time (1x128) | Compile time (4x128)
1 threads: -----------------------------------------------------------------------------------------------------------------------
Baseline | 45.5 | 100.4 | |
Full compilation | 29.4 | 89.7 | 23099.4 | 22885.9
Regional compilation | 29.4 | 87.5 | 2945.5 | 2526.2
Times are in milliseconds (ms).
```
## Results Summary
### Compilation Time
Regional compilation provides significantly faster compilation times compared to full model compilation:
- **Full compilation**: Takes ~10-23 seconds depending on model size.
- **Regional compilation**: Takes only ~2-3 seconds across all model sizes.
- **Speed improvement**: Regional compilation is **5-9x faster** to compile.
### Inference Time
Regional compilation delivers inference performance close to full compilation:
- For batch size 1:
- For smaller models (1B-3B): Full compilation has a slight edge over regional compilation.
- For larger models (8B-13B): Regional compilation performs similarly to full compilation.
- For batch size 4: Regional compilation performs similarly to full compilation across all models.
## Key Takeaways
1. **Comparable Performance**: Regional compilation delivers performance speedups similar to full compilation, especially for larger models.
2. **Faster Compilation**: Regional compilation significantly reduces the time taken to compile models, making it a more efficient choice for deployment.
3. **Batch Size Impact**: At batch size 4, full compilation and regional compilation perform nearly identically.
4. **Model Size Impact**: Even with a small batch size, full compilation and regional compilation perform similarly for larger models (8B-13B).
5. **Practical Application**: For real-world applications, regional compilation is a practical choice for optimizing training cold start times, especially when working with large models.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 242 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 218 KiB

View File

@ -1,77 +0,0 @@
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.utils.benchmark import Compare, Timer
from transformers import AutoConfig, AutoModelForCausalLM
from accelerate.test_utils.testing import get_backend
from accelerate.utils import compile_regions
torch.set_float32_matmul_precision("high")
COMPILE_ITERS = 2
INFERENCE_ITERS = 100
BASELINE = "Baseline"
COMPILE_TIME = "Compile time"
INFRENCE_TIME = "Inference time"
FULL_COMPILATION = "Full compilation"
REGIONAL_COMPILATION = "Regional compilation"
INFRENCE_STMT = "model(input_ids, use_cache=False)"
COMPILE_STMT = f"torch._dynamo.reset(); torch._inductor.utils.clear_inductor_caches(); {INFRENCE_STMT}"
torch_device_type, _, _ = get_backend()
results = []
for model_id in [
# non-gated llama models
"NousResearch/Llama-3.2-1B",
"NousResearch/Hermes-3-Llama-3.2-3B",
"NousResearch/Hermes-3-Llama-3.1-8B",
"NousResearch/Nous-Hermes-Llama2-13b",
]:
with torch.device(torch_device_type):
config = AutoConfig.from_pretrained(model_id)
model = AutoModelForCausalLM.from_config(config).to(dtype=torch.float16).eval()
full_compilation_model = torch.compile(model)
regional_compilation_model = compile_regions(model)
for model, sub_label, description, stmt, iters in [
(model, BASELINE, INFRENCE_TIME, INFRENCE_STMT, INFERENCE_ITERS),
(full_compilation_model, FULL_COMPILATION, COMPILE_TIME, COMPILE_STMT, COMPILE_ITERS),
(full_compilation_model, FULL_COMPILATION, INFRENCE_TIME, INFRENCE_STMT, INFERENCE_ITERS),
(regional_compilation_model, REGIONAL_COMPILATION, COMPILE_TIME, COMPILE_STMT, COMPILE_ITERS),
(regional_compilation_model, REGIONAL_COMPILATION, INFRENCE_TIME, INFRENCE_STMT, INFERENCE_ITERS),
]:
for batch_size, sequence_length in [(1, 128), (4, 128)]:
input_ids = torch.randint(
0, 1000, size=(batch_size, sequence_length), dtype=torch.int64, device=torch_device_type
)
results.append(
Timer(
label=model_id,
sub_label=sub_label,
description=f"{description} ({batch_size}x{sequence_length})",
globals={"model": model, "input_ids": input_ids},
stmt=stmt,
).timeit(number=iters)
)
compare = Compare(results)
compare.colorize()
compare.print()

View File

@ -1,74 +0,0 @@
<!---
Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
# Official Hugging Face Accelerate Docker Images
Accelerate publishes a variety of docker versions as part of our CI that users can also use. These are stable images that Accelerate can run off of which comes with a variety of different setup configurations, all of which are officially hosted on [Docker Hub](https://hub.docker.com/r/huggingface/accelerate).
A breakdown of each are given below
## Naming Conventions
Accelerate docker images follow a tagging convention of:
```bash
huggingface/accelerate:{accelerator}-{nightly,release}
```
`accelerator` in this instance is one of many applical pre-configured backend supports:
* `gpu`: Comes compiled off of the `nvidia/cuda` image and includes core parts like `bitsandbytes`. Runs off python 3.9.
* `cpu`: Comes compiled off of `python:3.9-slim` and is designed for non-CUDA based workloads.
* More to come soon
* `gpu-deepspeed`: Comes compiled off of the `nvidia/cuda` image and includes core parts like `bitsandbytes` as well as the latest `deepspeed` version. Runs off python 3.10.
* `gpu-fp8-transformerengine`: Comes compiled off of `nvcr.io/nvidia/pytorch` and is specifically for running the `benchmarks/fp8` scripts on devices which support FP8 operations using the `TransformerEngine` library (RTX 4090, H100, etc)
## Nightlies vs Releases
Each release a new build is pushed with a version number included in the name. For a GPU-supported image of version 0.28.0 for instance, it would look like the following:
```bash
huggingface/accelerate:gpu-release-0.28.0
```
Nightlies contain two different image tags. There is a general `nightly` tag which is built each night, and a `nightly-YYYY-MM-DD` which corresponds to a build from a particular date.
For instance, here is an example nightly CPU image from 3/14/2024
```bash
huggingface/accelerate:cpu-nightly-2024-03-14
```
## Running the images
Each image comes compiled with `conda` and an `accelerate` environment contains all of the installed dependencies.
To pull down the latest nightly run:
```bash
docker pull huggingface/accelerate:gpu-nightly
```
To then run it in interactive mode with GPU-memory available, run:
```bash
docker container run --gpus all -it huggingface/accelerate:gpu-nightly
```
## DEPRECATED IMAGES
CPU and GPU docker images were hosted at `huggingface/accelerate-gpu` and `huggingface/accelerate-cpu`. These builds are now outdated and will not receive updates.
The builds at the corresponding `huggingface/accelerate:{gpu,cpu}` contain the same `Dockerfile`, so it's as simple as changing the docker image to the desired ones from above. We will not be deleting these images for posterity, but they will not be receiving updates going forward.

View File

@ -1,7 +1,7 @@
# Builds CPU-only Docker image of PyTorch
# Uses multi-staged approach to reduce size
# Stage 1
FROM python:3.9-slim as compile-image
FROM python:3.8-slim as compile-image
ARG DEBIAN_FRONTEND=noninteractive
@ -25,7 +25,7 @@ RUN python3 -m pip install --no-cache-dir \
--extra-index-url https://download.pytorch.org/whl/cpu
# Stage 2
FROM python:3.9-slim AS build-image
FROM python:3.8-slim AS build-image
COPY --from=compile-image /opt/venv /opt/venv
RUN useradd -ms /bin/bash user
USER user

View File

@ -1,46 +0,0 @@
# Builds GPU docker image of PyTorch specifically
# Uses multi-staged approach to reduce size
# Stage 1
# Use base conda image to reduce time
FROM continuumio/miniconda3:latest AS compile-image
# Specify py version
# Note: DeepSpeed beyond v0.12.6 requires py 3.10
ENV PYTHON_VERSION=3.10
# Install apt libs
RUN apt-get update && \
apt-get install -y curl git wget && \
apt-get clean && \
rm -rf /var/lib/apt/lists*
# Create our conda env
RUN conda create --name accelerate python=${PYTHON_VERSION} ipython jupyter pip
# We don't install pytorch here yet since CUDA isn't available
# instead we use the direct torch wheel
ENV PATH /opt/conda/envs/accelerate/bin:$PATH
# Activate our bash shell
RUN chsh -s /bin/bash
SHELL ["/bin/bash", "-c"]
# Activate the conda env, install mpy4pi, and install torch + accelerate
RUN source activate accelerate && conda install -c conda-forge mpi4py
RUN source activate accelerate && \
python3 -m pip install --no-cache-dir \
git+https://github.com/huggingface/accelerate#egg=accelerate[testing,test_trackers,deepspeed] \
--extra-index-url https://download.pytorch.org/whl/cu126
RUN python3 -m pip install --no-cache-dir bitsandbytes
# Stage 2
FROM nvidia/cuda:12.6.3-cudnn-devel-ubuntu22.04 AS build-image
COPY --from=compile-image /opt/conda /opt/conda
ENV PATH /opt/conda/bin:$PATH
# Install apt libs
RUN apt-get update && \
apt-get install -y curl git wget && \
apt-get clean && \
rm -rf /var/lib/apt/lists*
RUN echo "source activate accelerate" >> ~/.profile
# Activate the virtualenv
CMD ["/bin/bash"]

View File

@ -1,10 +1,10 @@
# Builds GPU docker image of PyTorch specifically
# Builds GPU docker image of PyTorch
# Uses multi-staged approach to reduce size
# Stage 1
# Use base conda image to reduce time
FROM continuumio/miniconda3:latest AS compile-image
# Specify py version
ENV PYTHON_VERSION=3.9
ENV PYTHON_VERSION=3.8
# Install apt libs
RUN apt-get update && \
apt-get install -y curl git wget && \
@ -19,17 +19,16 @@ ENV PATH /opt/conda/envs/accelerate/bin:$PATH
# Activate our bash shell
RUN chsh -s /bin/bash
SHELL ["/bin/bash", "-c"]
# Activate the conda env, install mpy4pi, and install torch + accelerate
RUN source activate accelerate && conda install -c conda-forge mpi4py
# Activate the conda env and install torch + accelerate
RUN source activate accelerate && \
python3 -m pip install --no-cache-dir \
git+https://github.com/huggingface/accelerate#egg=accelerate[testing,test_trackers] \
--extra-index-url https://download.pytorch.org/whl/cu126
--extra-index-url https://download.pytorch.org/whl/cu117
RUN python3 -m pip install --no-cache-dir bitsandbytes
# Stage 2
FROM nvidia/cuda:12.6.3-cudnn-devel-ubuntu22.04 AS build-image
FROM nvidia/cuda:12.1.0-cudnn8-devel-ubuntu20.04 AS build-image
COPY --from=compile-image /opt/conda /opt/conda
ENV PATH /opt/conda/bin:$PATH

View File

@ -10,76 +10,53 @@
- local: basic_tutorials/overview
title: Overview
- local: basic_tutorials/migration
title: Add Accelerate to your code
- local: basic_tutorials/execution
title: Execution process
- local: basic_tutorials/tpu
title: TPU training
title: Migrating to 🤗 Accelerate
- local: basic_tutorials/launch
title: Launching Accelerate scripts
title: Launching distributed code
- local: basic_tutorials/notebook
title: Launching distributed training from Jupyter Notebooks
- local: basic_tutorials/troubleshooting
title: Troubleshooting guide
title: Tutorials
- sections:
- isExpanded: true
sections:
- local: usage_guides/explore
title: Start Here!
- local: usage_guides/model_size_estimator
title: Model memory estimator
- local: usage_guides/quantization
title: Model quantization
- local: usage_guides/tracking
title: Experiment trackers
- local: usage_guides/profiler
title: Profiler
- local: usage_guides/checkpoint
title: Checkpointing
- local: basic_tutorials/troubleshooting
title: Troubleshoot
- local: usage_guides/training_zoo
title: Example Zoo
title: Accelerate
- isExpanded: true
sections:
- local: usage_guides/gradient_accumulation
title: Gradient accumulation
- local: usage_guides/local_sgd
title: Local SGD
- local: usage_guides/low_precision_training
title: Low precision (FP8) training
- local: usage_guides/deepspeed
title: DeepSpeed
- local: usage_guides/deepspeed_multiple_model
title: Using multiple models with DeepSpeed
- local: usage_guides/ddp_comm_hook
title: DDP Communication Hooks
- local: usage_guides/fsdp
title: Fully Sharded Data Parallel
- local: usage_guides/megatron_lm
title: Megatron-LM
- local: usage_guides/sagemaker
title: Amazon SageMaker
- local: usage_guides/mps
title: Apple M1 GPUs
- local: usage_guides/intel_cpu
title: Intel CPU
- local: usage_guides/gaudi
title: Intel Gaudi
- local: usage_guides/compilation
title: Compilation
title: Training
- isExpanded: true
sections:
- local: usage_guides/big_modeling
title: Big Model Inference
- local: usage_guides/distributed_inference
title: Distributed inference
title: Inference
title: How to guides
- local: usage_guides/explore
title: Start Here!
- local: usage_guides/training_zoo
title: Example Zoo
- local: usage_guides/big_modeling
title: How to perform inference on large models with small resources
- local: usage_guides/model_size_estimator
title: Knowing how big of a model you can fit into memory
- local: usage_guides/quantization
title: How to quantize model
- local: usage_guides/distributed_inference
title: How to perform distributed inference with normal resources
- local: usage_guides/gradient_accumulation
title: Performing gradient accumulation
- local: usage_guides/local_sgd
title: Accelerating training with local SGD
- local: usage_guides/checkpoint
title: Saving and loading training states
- local: usage_guides/tracking
title: Using experiment trackers
- local: usage_guides/mps
title: How to use Apple Silicon M1 GPUs
- local: usage_guides/low_precision_training
title: How to train in low precision (FP8)
- local: usage_guides/deepspeed
title: How to use DeepSpeed
- local: usage_guides/fsdp
title: How to use Fully Sharded Data Parallelism
- local: usage_guides/megatron_lm
title: How to use Megatron-LM
- local: usage_guides/sagemaker
title: How to use 🤗 Accelerate with SageMaker
- local: usage_guides/ipex
title: How to use 🤗 Accelerate with Intel® Extension for PyTorch for cpu
title: How-To Guides
- sections:
- local: concept_guides/internal_mechanism
title: Accelerate's internal mechanism
title: 🤗 Accelerate's internal mechanism
- local: concept_guides/big_model_inference
title: Loading big models into memory
- local: concept_guides/performance
@ -88,44 +65,36 @@
title: Executing and deferring jobs
- local: concept_guides/gradient_synchronization
title: Gradient synchronization
- local: concept_guides/fsdp_and_deepspeed
title: FSDP vs DeepSpeed
- local: concept_guides/fsdp1_vs_fsdp2
title: FSDP1 vs FSDP2
- local: concept_guides/low_precision_training
title: Low precision training methods
title: How training in low-precision environments is possible (FP8)
- local: concept_guides/training_tpu
title: Training on TPUs
title: TPU best practices
title: Concepts and fundamentals
- sections:
- sections:
- local: package_reference/accelerator
title: Accelerator
title: Main Accelerator class
- local: package_reference/state
title: Stateful classes
title: Stateful configuration classes
- local: package_reference/cli
title: The Command Line
- local: package_reference/torch_wrappers
title: DataLoaders, Optimizers, Schedulers
title: Torch wrapper classes
- local: package_reference/tracking
title: Experiment trackers
- local: package_reference/launchers
title: Launchers
title: Distributed launchers
- local: package_reference/deepspeed
title: DeepSpeed utilities
- local: package_reference/logging
title: Logging
- local: package_reference/big_modeling
title: Working with large models
- local: package_reference/inference
title: Pipeline parallelism
- local: package_reference/kwargs
title: Kwargs handlers
- local: package_reference/fp8
title: FP8
- local: package_reference/utilities
title: Utility functions and classes
- local: package_reference/megatron_lm
title: Megatron-LM utilities
title: Megatron-LM Utilities
- local: package_reference/fsdp
title: Fully Sharded Data Parallel utilities
title: Fully Sharded Data Parallelism Utilities
title: "Reference"

View File

@ -1,128 +0,0 @@
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Execution process
When working with distributed training systems, it is important to manage how and when processes are executed across GPUs. Some processes are completed faster than others, and some processes shouldn't begin if others haven't finished yet. Accelerate provides tools for orchestrating when processes are executed to ensure everything remains synchronized across all devices.
This tutorial will teach you how to execute a process on only one machine and how to delay execution until all processes have reached a certain point.
## Execute on one process
Certain code only needs to be run once on a given machine, such as printing a log statement or only displaying one progress bar on the local main process.
<hfoptions id="local-execution">
<hfoption id="statements">
You should use `accelerator.is_local_main_process` to indicate code that should only be executed once.
```py
from tqdm.auto import tqdm
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
```
You could also wrap a statement with `accelerator.is_local_main_process`.
> [!TIP]
> For standalone `print` statements that aren't wrapped in `accelerator.is_local_main_process`, replace `print` with Accelerate's [`~Accelerator.print`] method to only print once per process.
```py
if accelerator.is_local_main_process:
print("Accelerate is the best")
```
</hfoption>
<hfoption id="function">
For a function that should only be executed once, use [`~Accelerator.on_local_main_process`].
```py
@accelerator.on_local_main_process
def do_my_thing():
"Something done once per server"
do_thing_once_per_server()
```
</hfoption>
</hfoptions>
You could also direct Accelerate to execute code once across *all processes* regardless of the number of machines. This is useful if you're uploading a final model to the Hub.
<hfoptions id="main-execution">
<hfoption id="statement">
You should use `accelerator.is_main_process` to indicate code that should only be executed once across all processes.
```py
if accelerator.is_main_process:
repo.push_to_hub()
```
</hfoption>
<hfoption id="function">
For a function that should only be executed once across all processes, use [`~Accelerator.on_main_process`].
```py
@accelerator.on_main_process
def do_my_thing():
"Something done once per server"
do_thing_once()
```
</hfoption>
</hfoptions>
## Execute on a specific process
Accelerate can also help you execute functions that should only be executed on a specific process or a local process index.
<hfoptions id="specific-execution">
<hfoption id="specific process">
Use the [`~Accelerator.on_process`] method and specify the process index to execute a function on.
```py
@accelerator.on_process(process_index=0)
def do_my_thing():
"Something done on process index 0"
do_thing_on_index_zero()
```
</hfoption>
<hfoption id="local process">
Use the [`~Accelerator.on_local_process`] method and specify the local process index to execute a function on.
```py
@accelerator.on_local_process(local_process_idx=0)
def do_my_thing():
"Something done on process index 0 on each server"
do_thing_on_index_zero_on_each_server()
```
</hfoption>
</hfoptions>
## Defer execution
When you run your script on several GPUs at the same time, some code may be executed faster than others. You might need to wait for all processes to reach a certain point before executing the next set of instructions. For instance, you shouldnt save a model before making sure every process is done with training.
To do this, add [`~Accelerator.wait_for_everyone`] in your code. This blocks all processes that have finished first from continuing until all remaining processes have reached the same point (this has no effect if you're running on a single GPU or CPU).
```py
accelerator.wait_for_everyone()
```

View File

@ -13,29 +13,31 @@ specific language governing permissions and limitations under the License.
rendered properly in your Markdown viewer.
-->
# Installation
# Installation and Configuration
Before you start, you will need to setup your environment, install the appropriate packages, and configure Accelerate. Accelerate is tested on **Python 3.8+**.
Before you start, you will need to setup your environment, install the appropriate packages, and configure 🤗 Accelerate. 🤗 Accelerate is tested on **Python 3.8+**.
Accelerate is available on pypi and conda, as well as on GitHub. Details to install from each are below:
## Installing 🤗 Accelerate
## pip
🤗 Accelerate is available on pypi and conda, as well as on GitHub. Details to install from each are below:
To install Accelerate from pypi, perform:
### pip
To install 🤗 Accelerate from pypi, perform:
```bash
pip install accelerate
```
## conda
### conda
Accelerate can also be installed with conda with:
🤗 Accelerate can also be installed with conda with:
```bash
conda install -c conda-forge accelerate
```
## Source
### Source
New features are added every day that haven't been released yet. To try them out yourself, install
from the GitHub repository:
@ -54,9 +56,9 @@ cd accelerate
pip install -e .
```
## Configuration
## Configuring 🤗 Accelerate
After installing, you need to configure Accelerate for how the current system is setup for training.
After installing, you need to configure 🤗 Accelerate for how the current system is setup for training.
To do so run the following and answer the questions prompted to you:
```bash
@ -68,8 +70,7 @@ To write a barebones configuration that doesn't include options such as DeepSpee
```bash
python -c "from accelerate.utils import write_basic_config; write_basic_config(mixed_precision='fp16')"
```
Accelerate will automatically utilize the maximum number of GPUs available and set the mixed precision mode.
🤗 Accelerate will automatically utilize the maximum number of GPUs available and set the mixed precision mode.
To check that your configuration looks fine, run:
@ -79,36 +80,23 @@ accelerate env
An example output is shown below, which describes two GPUs on a single machine with no mixed precision being used:
```bash
- `Accelerate` version: 1.2.0.dev0
- Platform: Linux-6.8.0-47-generic-x86_64-with-glibc2.35
- `accelerate` bash location: /home/zach/miniconda3/envs/accelerate/bin/accelerate
- Python version: 3.10.13
- Numpy version: 1.26.4
- PyTorch version (GPU?): 2.5.1+cu124 (True)
- PyTorch XPU available: False
- PyTorch NPU available: False
- PyTorch MLU available: False
- PyTorch MUSA available: False
- System RAM: 187.91 GB
- GPU type: NVIDIA GeForce RTX 4090
- `Accelerate` version: 0.11.0.dev0
- Platform: Linux-5.10.0-15-cloud-amd64-x86_64-with-debian-11.3
- Python version: 3.7.12
- Numpy version: 1.19.5
- PyTorch version (GPU?): 1.12.0+cu102 (True)
- `Accelerate` default config:
- compute_environment: LOCAL_MACHINE
- distributed_type: MULTI_GPU
- mixed_precision: no
- use_cpu: False
- debug: False
- num_processes: 2
- machine_rank: 0
- num_machines: 1
- gpu_ids: all
- rdzv_backend: static
- same_network: True
- main_process_ip: None
- main_process_port: None
- main_training_function: main
- enable_cpu_affinity: False
- downcast_bf16: no
- tpu_use_cluster: False
- tpu_use_sudo: False
- tpu_env: []
```
- deepspeed_config: {}
- fsdp_config: {}
```

View File

@ -13,9 +13,9 @@ specific language governing permissions and limitations under the License.
rendered properly in your Markdown viewer.
-->
# Launching Accelerate scripts
# Launching your 🤗 Accelerate scripts
In the previous tutorial, you were introduced to how to modify your current training script to use Accelerate.
In the previous tutorial, you were introduced to how to modify your current training script to use 🤗 Accelerate.
The final version of that code is shown below:
```python
@ -69,14 +69,14 @@ Next, you need to launch it with `accelerate launch`.
<Tip warning={true}>
It's recommended you run `accelerate config` before using `accelerate launch` to configure your environment to your liking.
Otherwise Accelerate will use very basic defaults depending on your system setup.
Otherwise 🤗 Accelerate will use very basic defaults depending on your system setup.
</Tip>
## Using accelerate launch
Accelerate has a special CLI command to help you launch your code in your system through `accelerate launch`.
🤗 Accelerate has a special CLI command to help you launch your code in your system through `accelerate launch`.
This command wraps around all of the different commands needed to launch your script on various platforms, without you having to remember what each of them is.
<Tip>
@ -97,14 +97,11 @@ Since this runs the various torch spawn methods, all of the expected environment
For example, here is how to use `accelerate launch` with a single GPU:
```bash
# for cuda device:
CUDA_VISIBLE_DEVICES="0" accelerate launch {script_name.py} --arg1 --arg2 ...
# for xpu device:
ZE_AFFINITY_MASK="0" accelerate launch {script_name.py} --arg1 --arg2 ...
```
You can also use `accelerate launch` without performing `accelerate config` first, but you may need to manually pass in the right configuration parameters.
In this case, Accelerate will make some hyperparameter decisions for you, e.g., if GPUs are available, it will use all of them by default without the mixed precision.
In this case, 🤗 Accelerate will make some hyperparameter decisions for you, e.g., if GPUs are available, it will use all of them by default without the mixed precision.
Here is how you would use all GPUs and train with mixed precision disabled:
```bash
@ -132,14 +129,14 @@ accelerate launch -h
<Tip>
Even if you are not using Accelerate in your code, you can still use the launcher for starting your scripts!
Even if you are not using 🤗 Accelerate in your code, you can still use the launcher for starting your scripts!
</Tip>
For a visualization of this difference, that earlier `accelerate launch` on multi-gpu would look something like so with `torchrun`:
```bash
MIXED_PRECISION="fp16" torchrun --nproc_per_node=2 --nnodes=1 {script_name.py} {--arg1} {--arg2} ...
MIXED_PRECISION="fp16" torchrun --nproc_per_node=2 --num_machines=1 {script_name.py} {--arg1} {--arg2} ...
```
You can also launch your script utilizing the launch CLI as a python module itself, enabling the ability to pass in other python-specific
@ -181,7 +178,7 @@ accelerate launch {script_name.py} {--arg1} {--arg2} ...
## Custom Configurations
As briefly mentioned earlier, `accelerate launch` should be mostly used through combining set configurations
made with the `accelerate config` command. These configs are saved to a `default_config.yaml` file in your cache folder for Accelerate.
made with the `accelerate config` command. These configs are saved to a `default_config.yaml` file in your cache folder for 🤗 Accelerate.
This cache folder is located at (with decreasing order of priority):
- The content of your environment variable `HF_HOME` suffixed with `accelerate`.
@ -214,7 +211,7 @@ accelerate launch --config_file {path/to/config/my_config_file.yaml} {script_nam
```
## Multi-node training
Multi-node training with Accelerate is similar to [multi-node training with torchrun](https://pytorch.org/tutorials/intermediate/ddp_series_multinode.html). The simplest way to launch a multi-node training run is to do the following:
Multi-node training with 🤗Accelerate is similar to [multi-node training with torchrun](https://pytorch.org/tutorials/intermediate/ddp_series_multinode.html). The simplest way to launch a multi-node training run is to do the following:
- Copy your codebase and data to all nodes. (or place them on a shared filesystem)
- Setup your python packages on all nodes.

View File

@ -13,11 +13,21 @@ specific language governing permissions and limitations under the License.
rendered properly in your Markdown viewer.
-->
# Add Accelerate to your code
# Migrating your code to 🤗 Accelerate
Each distributed training framework has their own way of doing things which can require writing a lot of custom code to adapt it to your PyTorch training code and training environment. Accelerate offers a friendly way to interface with these distributed training frameworks without having to learn the specific details of each one. Accelerate takes care of those details for you, so you can focus on the training code and scale it to any distributed training environment.
This tutorial will detail how to easily convert existing PyTorch code to use 🤗 Accelerate!
You'll see that by just changing a few lines of code, 🤗 Accelerate can perform its magic and get you on
your way toward running your code on distributed systems with ease!
In this tutorial, you'll learn how to adapt your existing PyTorch code with Accelerate and get you on your way toward training on distributed systems with ease! You'll start with a basic PyTorch training loop (it assumes all the training objects like `model` and `optimizer` have been setup already) and progressively integrate Accelerate into it.
## The base training loop
To begin, write out a very basic PyTorch training loop.
<Tip>
We are under the presumption that `training_dataloader`, `model`, `optimizer`, `scheduler`, and `loss_function` have been defined beforehand.
</Tip>
```python
device = "cuda"
@ -35,44 +45,50 @@ for batch in training_dataloader:
scheduler.step()
```
## Accelerator
The [`Accelerator`] is the main class for adapting your code to work with Accelerate. It knows about the distributed setup you're using such as the number of different processes and your hardware type. This class also provides access to many of the necessary methods for enabling your PyTorch code to work in any distributed training environment and for managing and executing processes across devices.
That's why you should always start by importing and creating an [`Accelerator`] instance in your script.
## Add in 🤗 Accelerate
To start using 🤗 Accelerate, first import and create an [`Accelerator`] instance:
```python
from accelerate import Accelerator
accelerator = Accelerator()
```
[`Accelerator`] is the main force behind utilizing all the possible options for distributed training!
The [`Accelerator`] also knows which device to move your PyTorch objects to, so it is recommended to let Accelerate handle this for you.
### Setting the right device
The [`Accelerator`] class knows the right device to move any PyTorch object to at any time, so you should
change the definition of `device` to come from [`Accelerator`]:
```diff
- device = "cuda"
- device = 'cuda'
+ device = accelerator.device
model.to(device)
```
## Prepare PyTorch objects
### Preparing your objects
Next, you need to prepare your PyTorch objects (model, optimizer, scheduler, etc.) for distributed training. The [`~Accelerator.prepare`] method takes care of placing your model in the appropriate container (like single GPU or multi-GPU) for your training setup, adapting the optimizer and scheduler to use Accelerate's [`~optimizer.AcceleratedOptimizer`] and [`~scheduler.AcceleratedScheduler`], and creating a new dataloader that can be sharded across processes.
Next, you need to pass all of the important objects related to training into [`~Accelerator.prepare`]. 🤗 Accelerate will
make sure everything is setup in the current environment for you to start training:
> [!TIP]
> Accelerate only prepares objects that inherit from their respective PyTorch classes such as `torch.optim.Optimizer`.
The PyTorch objects are returned in the same order they're sent.
```py
```
model, optimizer, training_dataloader, scheduler = accelerator.prepare(
model, optimizer, training_dataloader, scheduler
)
```
These objects are returned in the same order they were sent in. By default when using `device_placement=True`, all of the objects that can be sent to the right device will be.
If you need to work with data that isn't passed to [~Accelerator.prepare] but should be on the active device, you should pass in the `device` you made earlier.
## Training loop
<Tip warning={true}>
Finally, remove the `to(device)` calls to the inputs and targets in the training loop because Accelerate's DataLoader classes automatically places them on the right device. You should also replace the usual `backward()` pass with Accelerate's [`~Accelerator.backward`] method which scales the gradients for you and uses the appropriate `backward()` method depending on your distributed setup (for example, DeepSpeed or Megatron).
Accelerate will only prepare objects that inherit from their respective PyTorch classes (such as `torch.optim.Optimizer`).
</Tip>
### Modifying the training loop
Finally, three lines of code need to be changed in the training loop. 🤗 Accelerate's DataLoader classes will automatically handle the device placement by default,
and [`~Accelerator.backward`] should be used for performing the backward pass:
```diff
- inputs = inputs.to(device)
@ -83,13 +99,17 @@ Finally, remove the `to(device)` calls to the inputs and targets in the training
+ accelerator.backward(loss)
```
Put everything together and your new Accelerate training loop should now look like this!
With that, your training loop is now ready to use 🤗 Accelerate!
## The finished code
Below is the final version of the converted code:
```python
from accelerate import Accelerator
accelerator = Accelerator()
device = accelerator.device
model, optimizer, training_dataloader, scheduler = accelerator.prepare(
model, optimizer, training_dataloader, scheduler
)
@ -104,121 +124,6 @@ for batch in training_dataloader:
scheduler.step()
```
## Training features
## More Resources
Accelerate offers additional features - like gradient accumulation, gradient clipping, mixed precision training and more - you can add to your script to improve your training run. Let's explore these three features.
### Gradient accumulation
Gradient accumulation enables you to train on larger batch sizes by accumulating the gradients over multiple batches before updating the weights. This can be useful for getting around memory limitations. To enable this feature in Accelerate, specify the `gradient_accumulation_steps` parameter in the [`Accelerator`] class and add the [`~Accelerator.accumulate`] context manager to your script.
```diff
+ accelerator = Accelerator(gradient_accumulation_steps=2)
model, optimizer, training_dataloader = accelerator.prepare(model, optimizer, training_dataloader)
for input, label in training_dataloader:
+ with accelerator.accumulate(model):
predictions = model(input)
loss = loss_function(predictions, label)
accelerator.backward(loss)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
```
### Gradient clipping
Gradient clipping is a technique to prevent "exploding gradients", and Accelerate offers:
* [`~Accelerator.clip_grad_value_`] to clip gradients to a minimum and maximum value
* [`~Accelerator.clip_grad_norm_`] for normalizing gradients to a certain value
### Mixed precision
Mixed precision accelerates training by using a lower precision data type like fp16 (half-precision) to calculate the gradients. For the best performance with Accelerate, the loss should be computed inside your model (like in Transformers models) because computations outside of the model are computed in full precision.
Set the mixed precision type to use in the [`Accelerator`], and then use the [`~Accelerator.autocast`] context manager to automatically cast the values to the specified data type.
> [!WARNING]
> Accelerate enables automatic mixed precision, so [`~Accelerator.autocast`] is only needed if there are other mixed precision operations besides those performed on loss by [`~Accelerator.backward`] which already handles the scaling.
```diff
+ accelerator = Accelerator(mixed_precision="fp16")
+ with accelerator.autocast():
loss = complex_loss_function(outputs, target)
```
## Save and load
Accelerate can also save and load a *model* once training is complete or you can also save the model and optimizer *state* which could be useful for resuming training.
### Model
Once all processes are complete, unwrap the model with the [`~Accelerator.unwrap_model`] method before saving it because the [`~Accelerator.prepare`] method wrapped your model into the proper interface for distributed training. If you don't unwrap the model, saving the model state dictionary also saves any potential extra layers from the larger model and you won't be able to load the weights back into your base model.
You should use the [`~Accelerator.save_model`] method to unwrap and save the model state dictionary. This method can also save a model into sharded checkpoints or into the [safetensors](https://hf.co/docs/safetensors/index) format.
<hfoptions id="save">
<hfoption id="single checkpoint">
```py
accelerator.wait_for_everyone()
accelerator.save_model(model, save_directory)
```
<Tip>
For models from the [Transformers](https://hf.co/docs/transformers/index) library, save the model with the [`~transformers.PreTrainedModel.save_pretrained`] method so that it can be reloaded with the [`~transformers.PreTrainedModel.from_pretrained`] method.
```py
from transformers import AutoModel
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(
"path/to/my_model_directory",
is_main_process=accelerator.is_main_process,
save_function=accelerator.save,
)
model = AutoModel.from_pretrained("path/to/my_model_directory")
```
</Tip>
To load your weights, use the [`~Accelerator.unwrap_model`] method to unwrap the model first before loading the weights. All model parameters are references to tensors, so this loads your weights inside `model`.
```py
unwrapped_model = accelerator.unwrap_model(model)
path_to_checkpoint = os.path.join(save_directory,"pytorch_model.bin")
unwrapped_model.load_state_dict(torch.load(path_to_checkpoint))
```
</hfoption>
<hfoption id="sharded checkpoint">
Set `safe_serialization=True` to save the model in the safetensor format.
```py
accelerator.wait_for_everyone()
accelerator.save_model(model, save_directory, max_shard_size="1GB", safe_serialization=True)
```
To load a sharded checkpoint or a safetensor formatted checkpoint, use the [`~accelerate.load_checkpoint_in_model`] method. This method allows you to load a checkpoint onto a specific device.
```py
load_checkpoint_in_model(unwrapped_model, save_directory, device_map={"":device})
```
</hfoption>
</hfoptions>
### State
During training, you may want to save the current state of the model, optimizer, random generators, and potentially learning rate schedulers so they can be restored in the *same script*. You should add the [`~Accelerator.save_state`] and [`~Accelerator.load_state`] methods to your script to save and load states.
To further customize where and how states are saved through [`~Accelerator.save_state`], use the [`~utils.ProjectConfiguration`] class. For example, if `automatic_checkpoint_naming` is enabled, each saved checkpoint is stored at `Accelerator.project_dir/checkpoints/checkpoint_{checkpoint_number}`.
Any other stateful items to be stored should be registered with the [`~Accelerator.register_for_checkpointing`] method so they can be saved and loaded. Every object passed to this method to be stored must have a `load_state_dict` and `state_dict` function.
> [!TIP]
> If you have [`torchdata>=0.8.0`](https://github.com/pytorch/data/tree/main) installed, you can additionally pass `use_stateful_dataloader=True` into your [`~utils.DataLoaderConfiguration`]. This extends Accelerate's DataLoader classes with a `load_state_dict` and `state_dict` function, and makes it so `Accelerator.save_state` and `Accelerator.load_state` also track how far into the training dataset it has read when persisting the model.
To check out more ways on how to migrate to 🤗 Accelerate, check out our [interactive migration tutorial](https://huggingface.co/docs/accelerate/usage_guides/explore) which showcases other items that need to be watched for when using Accelerate and how to do so quickly.

View File

@ -13,7 +13,7 @@ specific language governing permissions and limitations under the License.
rendered properly in your Markdown viewer.
-->
# Launching distributed training from Jupyter Notebooks
# Launching Multi-GPU Training from a Jupyter Environment
This tutorial teaches you how to fine tune a computer vision model with 🤗 Accelerate from a Jupyter Notebook on a distributed system.
You will also learn how to setup a few requirements needed for ensuring your environment is configured properly, your data has been prepared properly, and finally how to launch training.
@ -26,13 +26,13 @@ You will also learn how to setup a few requirements needed for ensuring your env
## Configuring the Environment
Before any training can be performed, an Accelerate config file must exist in the system. Usually this can be done by running the following in a terminal and answering the prompts:
Before any training can be performed, a 🤗 Accelerate config file must exist in the system. Usually this can be done by running the following in a terminal and answering the prompts:
```bash
accelerate config
```
However, if general defaults are fine and you are *not* running on a TPU, Accelerate has a utility to quickly write your GPU configuration into a config file via [`utils.write_basic_config`].
However, if general defaults are fine and you are *not* running on a TPU, 🤗Accelerate has a utility to quickly write your GPU configuration into a config file via [`utils.write_basic_config`].
The following code will restart Jupyter after writing the configuration, as CUDA code was called to perform this.
@ -52,7 +52,7 @@ os._exit(00) # Restart the notebook
## Preparing the Dataset and Model
Next you should prepare your dataset. As mentioned earlier, great care should be taken when preparing the `DataLoaders` and model to make sure that **nothing** is put on *any* GPU.
Next you should prepare your dataset. As mentioned at earlier, great care should be taken when preparing the `DataLoaders` and model to make sure that **nothing** is put on *any* GPU.
If you do, it is recommended to put that specific code into a function and call that from within the notebook launcher interface, which will be shown later.
@ -186,7 +186,7 @@ Here is a basic training loop for the animal classification problem:
<Tip>
The code has been split up to allow for explanations on each section. A full version that can be copy and pasted will be available at the end
The code has been split up to allow for explainations on each section. A full version that can be copy and pasted will be available at the end
</Tip>
@ -327,7 +327,7 @@ def training_loop(mixed_precision="fp16", seed: int = 42, batch_size: int = 64):
# Build dataloaders
train_dataloader, eval_dataloader = get_dataloaders(batch_size)
# Instantiate the model (you build the model here so that the seed also controls new weight initializations)
# Instantiate the model (you build the model here so that the seed also controls new weight initaliziations)
model = create_model("resnet50d", pretrained=True, num_classes=len(label_to_id))
# Freeze the base model
@ -344,7 +344,7 @@ def training_loop(mixed_precision="fp16", seed: int = 42, batch_size: int = 64):
mean = mean.to(accelerator.device)
std = std.to(accelerator.device)
# Instantiate the optimizer
# Intantiate the optimizer
optimizer = torch.optim.Adam(params=model.parameters(), lr=3e-2 / 25)
# Instantiate the learning rate scheduler
@ -430,17 +430,6 @@ args = (model, "fp16", 42, 64)
notebook_launcher(training_loop, args, num_processes=8)
```
To launch the training process with elasticity, enabling fault tolerance, you can use the `elastic_launch` feature provided by PyTorch. This requires setting additional parameters such as `rdzv_backend` and `max_restarts`. Here is an example of how to use `notebook_launcher` with elastic capabilities:
```python
notebook_launcher(
training_loop,
args,
num_processes=2,
max_restarts=3
)
```
As it's running it will print the progress as well as state how many devices you ran on. This tutorial was ran with two GPUs:
```python out
@ -454,12 +443,6 @@ epoch 4: 94.71
And that's it!
Please note that [`notebook_launcher`] ignores the Accelerate config file, to launch based on the config use:
```bash
accelerate launch
```
## Debugging
A common issue when running the `notebook_launcher` is receiving a CUDA has already been initialized issue. This usually stems

View File

@ -15,10 +15,10 @@ rendered properly in your Markdown viewer.
# Overview
Welcome to the Accelerate tutorials! These introductory guides will help catch you up to speed on working with Accelerate.
Welcome to the 🤗 Accelerate tutorials! These introductory guides will help catch you up to speed on working with 🤗 Accelerate.
You'll learn how to modify your code to have it work with the API seamlessly, how to launch your script properly,
and more!
These tutorials assume some basic knowledge of Python and familiarity with the PyTorch framework.
If you have any questions about Accelerate, feel free to join and ask the community on our [forum](https://discuss.huggingface.co/c/accelerate/18).
If you have any questions about 🤗 Accelerate, feel free to join and ask the community on our [forum](https://discuss.huggingface.co/c/accelerate/18).

View File

@ -1,38 +0,0 @@
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# TPU training
A [TPU (Tensor Processing Unit)](https://cloud.google.com/tpu/docs/intro-to-tpu) is a type of hardware specifically designed for training models efficiently. Accelerate supports TPU training, but there are a few things you should be aware of, namely graph compilation. This tutorial briefly discusses compilation, and for more details, take a look at the [Training on TPUs with Accelerate](../concept_guides/training_tpu) guide.
## Compilation
A TPU creates a graph of all the operations in the training step such as the forward pass, backward pass and optimizer step. This is why the first training step always takes a while because building and compiling this graph takes time. But once compilation is complete, it is cached and all subsequent steps are much faster.
The key is to avoid compiling your code again or else training is super slow. This means all your operations must be exactly the same:
* all tensors in your batches must have the same length (for example, no dynamic padding for NLP tasks)
* your code must be static (for example, no layers with for loops that have different lengths depending on the input such as a LSTM)
## Weight tying
A common language model design is to tie the weights of the embedding and softmax layers. However, moving the model to a TPU (either yourself or passing it to the [`~Accelerator.prepare`] method) breaks the weight tying and you'll need to retie the weights.
To add special behavior (like weight tying) in your script for TPUs, set [`~Accelerator.distributed_type`] to `DistributedType.TPU` first. Then you can use the [`~transformers.PreTrainedModel.tie_weights`] method to tie the weights.
```py
if accelerator.distributed_type == DistributedType.TPU:
model.tie_weights()
```

View File

@ -13,82 +13,77 @@ specific language governing permissions and limitations under the License.
rendered properly in your Markdown viewer.
-->
# Troubleshoot
# Troubleshooting guide
This guide provides solutions to some issues you might encounter when using Accelerate. Not all errors are covered because Accelerate is an active library that is continuously evolving and there are many different use cases and distributed training setups. If the solutions described here don't help with your specific error, please take a look at the [Ask for help](#ask-for-help) section to learn where and how to get help.
This guide aims to provide you the tools and knowledge required to navigate some common issues. However,
as 🤗 Accelerate continuously evolves and the use cases and setups are diverse, you might encounter an issue not covered in this
guide. If the suggestions listed in this guide do not cover your such situation, please refer to the final section of
the guide, [Asking for Help](#ask-for-help), to learn where to find help with your specific issue.
## Logging
Logging can help you identify where an error is coming from. In a distributed setup with multiple processes, logging can be a challenge, but Accelerate provides the [`~accelerate.logging`] utility to ensure logs are synchronized.
When facing an error, logging can help narrow down where it is coming from. In a distributed setup with multiple processes,
logging can be a challenge, but 🤗 Accelerate provides a utility that streamlines the logging process and ensures that
logs are synchronized and managed effectively across the distributed setup.
To troubleshoot an issue, use [`~accelerate.logging`] instead of the standard Python [`logging`](https://docs.python.org/3/library/logging.html#module-logging) module. Set the verbosity level (`INFO`, `DEBUG`, `WARNING`, `ERROR`, `CRITICAL`) with the `log_level` parameter, and then you can either:
To troubleshoot an issue, use `accelerate.logging` instead of the standard Python `logging` module:
1. Export the `log_level` as the `ACCELERATE_LOG_LEVEL` environment variable.
2. Pass the `log_level` directly to `get_logger`.
```diff
- import logging
+ from accelerate.logging import get_logger
- logger = logging.getLogger(__name__)
+ logger = get_logger(__name__)
```
For example, to set `log_level="INFO"`:
To set the log level (`INFO`, `DEBUG`, `WARNING`, `ERROR`, `CRITICAL`), export it as the `ACCELERATE_LOG_LEVEL` environment,
or pass as `log_level` to `get_logger`:
```py
```python
from accelerate.logging import get_logger
logger = get_logger(__name__, log_level="DEBUG")
logger = get_logger(__name__, log_level="INFO")
```
By default, the log is called on main processes only. To call it on all processes, pass `main_process_only=False`.
If a log should be called on all processes and in order, also pass `in_order=True`.
```py
from accelerate.logging import get_logger
logger = get_logger(__name__, log_level="DEBUG")
# log all processes
logger.debug("thing_to_log", main_process_only=False)
# log all processes in order
logger.debug("thing_to_log", main_process_only=False, in_order=True)
```
## Hanging code and timeout errors
There can be many reasons why your code is hanging. Let's take a look at how to solve some of the most common issues that can cause your code to hang.
### Mismatched tensor shapes
### Mismatched tensor shapes
If your code seems to be hanging for a significant amount time on a distributed setup, a common cause is mismatched shapes of tensors on different
devices.
Mismatched tensor shapes is a common issue that can cause your code to hang for a significant amount of time on a distributed setup.
When running scripts in a distributed fashion, functions such as [`Accelerator.gather`] and [`Accelerator.reduce`] are
necessary to grab tensors across devices to perform operations on them collectively. These (and other) functions rely on
`torch.distributed` performing a `gather` operation, which requires that tensors have the **exact same shape** across all processes.
When the tensor shapes don't match, you will experience handing code, and eventually hit a timeout exception.
When running scripts in a distributed setup, functions such as [`Accelerator.gather`] and [`Accelerator.reduce`] are necessary to grab tensors across devices to collectively perform operations on them. These (and other) functions rely on `torch.distributed` to perform a `gather` operation, which requires tensors to have the **exact same shape** across all processes. When the tensor shapes don't match, your code hangs and you'll eventually hit a timeout exception.
If you suspect this to be the case, use Accelerate's operational debug mode to immediately catch the issue.
You can use Accelerate's operational debug mode to immediately catch this issue. We recommend enabling this mode during the `accelerate config` setup, but you can also enable it from the CLI, as an environment variable, or by manually editing the `config.yaml` file.
The recommended way to enable Accelerate's operational debug mode is during `accelerate config` setup.
Alternative ways to enable debug mode are:
<hfoptions id="mismatch">
<hfoption id="CLI">
* From the CLI:
```bash
accelerate launch --debug {my_script.py} --arg1 --arg2
```
</hfoption>
<hfoption id="environment variable">
If enabling debug mode as an environment variable, you don't need to call `accelerate launch`.
* As an environmental variable (which avoids the need for `accelerate launch`):
```bash
ACCELERATE_DEBUG_MODE="1" torchrun {my_script.py} --arg1 --arg2
```
</hfoption>
<hfoption id="config.yaml">
* Manually changing the `config.yaml` file:
Add `debug: true` to your `config.yaml` file.
```yaml
compute_environment: LOCAL_MACHINE
debug: true
```diff
compute_environment: LOCAL_MACHINE
+debug: true
```
</hfoption>
</hfoptions>
Once you enable debug mode, you should get a traceback that points to the tensor shape mismatch issue.
Once you enable the debug mode, you should get a similar traceback that points to the tensor shape mismatch issue:
```py
Traceback (most recent call last):
@ -105,58 +100,57 @@ Operation: `accelerate.utils.operations.broadcast`
Input shapes:
- Process 0: [1, 5]
- Process 1: [1, 2, 5]
```
```
### Early stopping
### Early stopping leads to hanging
For early stopping in distributed training, if each process has a specific stopping condition (e.g. validation loss), it may not be synchronized across all processes. As a result, a break can happen on process 0 but not on process 1 which will cause your code to hang indefinitely until a timeout occurs.
When doing early stopping in distributed training, if each process has a specific stopping condition (e.g. validation loss),
it may not be synchronized across all of them. As a result, a break can happen on process 0 but not on process 1.
This will cause the code to hang indefinitely until a timeout occurs.
If you have early stopping conditionals, use the `set_trigger` and `check_trigger` methods to make sure all the processes
are ended correctly.
If you have early stopping conditionals, use `set_breakpoint` and `check_breakpoint` methods to make sure all the processes
are ended correctly:
```py
# Assume `should_do_breakpoint` is a custom defined function that returns a conditional,
# and that conditional might be true only on process 1
if should_do_breakpoint(loss):
accelerator.set_trigger()
accelerator.set_breakpoint()
# Later in the training script when we need to check for the breakpoint
if accelerator.check_trigger():
if accelerator.check_breakpoint():
break
```
### Low kernel versions on Linux
### Hanging on low kernel versions on Linux
On Linux with kernel version < 5.5, hanging processes have been reported. To avoid this problem, upgrade your system to a later kernel version.
This is a known issue. On Linux with kernel version < 5.5, hanging processes have been reported. To avoid
encountering this problem, we recommend upgrading your system to a later kernel version.
### MPI
## CUDA out of memory
If your distributed CPU training job using MPI is hanging, ensure that you have
[passwordless SSH](https://www.open-mpi.org/faq/?category=rsh#ssh-keys) setup (using keys) between the nodes. This means
that for all nodes in your hostfile, you should to be able to SSH from one node to another without being prompted for a password.
One of the most frustrating errors when it comes to running training scripts is hitting "CUDA Out-of-Memory",
as the entire script needs to be restarted, progress is lost, and typically a developer would want to simply
start their script and let it run.
Next, try to run the `mpirun` command as a sanity check. For example, the command below should print out the
hostnames for each of the nodes.
To address this problem, `Accelerate` offers a utility `find_executable_batch_size` that is heavily based on [toma](https://github.com/BlackHC/toma).
The utility retries code that fails due to OOM (out-of-memory) conditions and lowers batch sizes automatically.
```bash
mpirun -f hostfile -n {number of nodes} -ppn 1 hostname
```
### find_executable_batch_size
## Out-of-Memory
One of the most frustrating errors when it comes to running training scripts is hitting "Out-of-Memory" on devices like CUDA, XPU or CPU. The entire script needs to be restarted and any progress is lost.
To address this problem, Accelerate provides the [`find_executable_batch_size`] utility that is heavily based on [toma](https://github.com/BlackHC/toma).
This utility retries code that fails due to OOM (out-of-memory) conditions and automatically lowers batch sizes. For each OOM condition, the algorithm decreases the batch size by half and retries the code until it succeeds.
To use [`find_executable_batch_size`], restructure your training function to include an inner function with `find_executable_batch_size` and build your dataloaders inside it. At a minimum, this only takes 4 new lines of code.
This algorithm operates with exponential decay, decreasing the batch size in half after each failed run on some
training script. To use it, restructure your training function to include an inner function that includes this wrapper,
and build your dataloaders inside it. At a minimum, this could look like 4 new lines of code.
<Tip warning={true}>
The inner function **must** take batch size as the first parameter, but we do not pass one to it when called. The wrapper will handle this for you. Any object (models, optimizers) that consumes device memory and is passed to the [`Accelerator`] also **must** be declared inside the inner function.
The inner function *must* take in the batch size as the first parameter, but we do not pass one to it when called. The wrapper handles this for us.
</Tip>
It should also be noted that anything which will consume CUDA memory and passed to the `accelerator` **must** be declared inside the inner function,
such as models and optimizers.
```diff
def training_function(args):
accelerator = Accelerator()
@ -181,31 +175,48 @@ def training_function(args):
+ inner_training_loop()
```
To find out more, check the documentation [here](../package_reference/utilities#accelerate.find_executable_batch_size).
## Non-reproducible results between device setups
If you changed the device setup and observe different model performance, it is likely you didn't update your script when moving from one setup to another. Even if you're using the same script with the same batch size, the results will still be different on a TPU, multi-GPU, and single GPU.
If you have changed the device setup and are observing different model performance, this is likely due to the fact that
you have not updated your script when moving from one setup to another. The same script with the same batch size across TPU,
multi-GPU, and single-GPU with Accelerate will have different results.
For example, if you were training on a single GPU with a batch size of 16 and you move to a dual GPU setup, you need to change the batch size to 8 to have the same effective batch size. This is because when training with Accelerate, the batch size passed to the dataloader is the **batch size per GPU**.
For example, if you were previously training on a single GPU with a batch size of 16, when moving to two GPU setup,
you need to change the batch size to 8 to have the same effective batch size. This is because when training with Accelerate,
the batch size passed to the dataloader is the **batch size per GPU**.
To make sure you can reproduce the results between the setups, make sure to use the same seed, adjust the batch size accordingly, and consider scaling the learning rate.
To make sure you can reproduce the results between the setups, make sure to use the same seed, adjust the batch size
accordingly, consider scaling the learning rate.
For more details and a quick reference for batch sizes, check out the [Comparing performance between different device setups](../concept_guides/performance) guide.
## Performance issues on different GPUs
If your multi-GPU setup consists of different GPUs, you may encounter some performance issues:
If your multi-GPU setup consists of different GPUs, you may hit some limitations:
- There may be an imbalance in GPU memory between the GPUs. In this case, the GPU with the smaller memory will limit the batch size or the size of the model that can be loaded onto the GPUs.
- If you are using GPUs with different performance profiles, the performance will be driven by the slowest GPU you are using because the other GPUs will have to wait for it to complete its workload.
- There may be an imbalance in GPU memory between the GPUs. In this case, the GPU with smaller memory will limit the batch size or the size of the model that can be loaded onto the GPUs.
- If you are using GPUs with different performance profiles, the performance will be driven by the slowest GPU that you are using as the other GPUs will have to wait for it to complete its workload.
Vastly different GPUs within the same setup can lead to performance bottlenecks.
## Ask for help
If none of the solutions and advice here helped resolve your issue, you can always reach out to the community and Accelerate team for help.
If the above troubleshooting tools and advice did not help you resolve your issue, reach out for help to the community
and the team.
- Ask for help on the Hugging Face forums by posting your question in the [Accelerate category](https://discuss.huggingface.co/c/accelerate/18). Make sure to write a descriptive post with relevant context about your setup and reproducible code to maximize the likelihood that your problem is solved!
### Forums
- Post a question on [Discord](http://hf.co/join/discord), and let the team and the community help you.
Ask for help on the Hugging Face forums - post your question in the [🤗Accelerate category](https://discuss.huggingface.co/c/accelerate/18)
Make sure to write a descriptive post with relevant context about your setup and reproducible code to maximize the likelihood that your problem is solved!
- Create an Issue on the Accelerate [GitHub repository](https://github.com/huggingface/accelerate/issues) if you think you've found a bug related to the library. Include context regarding the bug and details about your distributed setup to help us better figure out what's wrong and how we can fix it.
### Discord
Post a question on [Discord](http://hf.co/join/discord), and let the team and the community help you.
### GitHub Issues
Create an Issue on the 🤗 Accelerate [GitHub repository](https://github.com/huggingface/accelerate/issues) if you suspect
to have found a bug related to the library. Include context regarding the bug and details about your distributed setup
to help us better figure out what's wrong and how we can fix it.

View File

@ -13,7 +13,7 @@ specific language governing permissions and limitations under the License.
rendered properly in your Markdown viewer.
-->
# Loading big models into memory
# Handling big models for inference
When loading a pre-trained model in PyTorch, the usual workflow looks like this:
@ -46,7 +46,7 @@ This API is quite new and still in its experimental stage. While we strive to pr
### Instantiating an empty model
The first tool Accelerate introduces to help with big models is a context manager [`init_empty_weights`] that helps you initialize a model without using any RAM so that step 1 can be done on models of any size. Here is how it works:
The first tool 🤗 Accelerate introduces to help with big models is a context manager [`init_empty_weights`] that helps you initialize a model without using any RAM so that step 1 can be done on models of any size. Here is how it works:
```py
from accelerate import init_empty_weights
@ -74,7 +74,7 @@ initializes an empty model with a bit more than 100B parameters. Behind the scen
It's possible your model is so big that even a single copy won't fit in RAM. That doesn't mean it can't be loaded: if you have one or several GPUs, this is more memory available to store your model. In this case, it's better if your checkpoint is split into several smaller files that we call checkpoint shards.
Accelerate will handle sharded checkpoints as long as you follow the following format: your checkpoint should be in a folder, with several files containing the partial state dicts, and there should be an index in the JSON format that contains a dictionary mapping parameter names to the file containing their weights. You can easily shard your model with [`~Accelerator.save_model`]. For instance, we could have a folder containing:
🤗 Accelerate will handle sharded checkpoints as long as you follow the following format: your checkpoint should be in a folder, with several files containing the partial state dicts, and there should be an index in the JSON format that contains a dictionary mapping parameter names to the file containing their weights. You can easily shard your model with [`~Accelerator.save_model`]. For instance, we could have a folder containing:
```bash
first_state_dict.bin
@ -97,9 +97,9 @@ and `first_state_dict.bin` containing the weights for `"linear1.weight"` and `"l
### Loading weights
The second tool Accelerate introduces is a function [`load_checkpoint_and_dispatch`], that will allow you to load a checkpoint inside your empty model. This supports full checkpoints (a single file containing the whole state dict) as well as sharded checkpoints. It will also automatically dispatch those weights across the devices you have available (GPUs, CPU RAM), so if you are loading a sharded checkpoint, the maximum RAM usage will be the size of the biggest shard.
The second tool 🤗 Accelerate introduces is a function [`load_checkpoint_and_dispatch`], that will allow you to load a checkpoint inside your empty model. This supports full checkpoints (a single file containing the whole state dict) as well as sharded checkpoints. It will also automatically dispatch those weights across the devices you have available (GPUs, CPU RAM), so if you are loading a sharded checkpoint, the maximum RAM usage will be the size of the biggest shard.
If you want to use big model inference with Transformers models, check out this [documentation](https://huggingface.co/docs/transformers/main/en/main_classes/model#large-model-loading).
If you want to use big model inference with 🤗 Transformers models, check out this [documentation](https://huggingface.co/docs/transformers/main/en/main_classes/model#large-model-loading).
Here is how we can use this to load the [GPT2-1.5B](https://huggingface.co/marcsun13/gpt2-xl-linear-sharded) model.
@ -145,7 +145,7 @@ model = load_checkpoint_and_dispatch(
)
```
By passing `device_map="auto"`, we tell Accelerate to determine automatically where to put each layer of the model depending on the available resources:
By passing `device_map="auto"`, we tell 🤗 Accelerate to determine automatically where to put each layer of the model depending on the available resources:
- first, we use the maximum space available on the GPU(s)
- if we still need space, we store the remaining weights on the CPU
- if there is not enough RAM, we store the remaining weights on the hard drive as memory-mapped tensors
@ -159,7 +159,7 @@ include a residual connection of some kind.
#### The `device_map`
You can see the `device_map` that Accelerate picked by accessing the `hf_device_map` attribute of your model:
You can see the `device_map` that 🤗 Accelerate picked by accessing the `hf_device_map` attribute of your model:
```py
model.hf_device_map
@ -210,7 +210,7 @@ outputs = model.generate(x1, max_new_tokens=10, do_sample=False)[0]
tokenizer.decode(outputs.cpu().squeeze())
```
Behind the scenes, Accelerate added hooks to the model, so that:
Behind the scenes, 🤗 Accelerate added hooks to the model, so that:
- at each layer, the inputs are put on the right device (so even if your model is spread across several GPUs, it works)
- for the weights offloaded on the CPU, they are put on a GPU just before the forward pass and cleaned up just after
- for the weights offloaded on the hard drive, they are loaded in RAM then put on a GPU just before the forward pass and cleaned up just after
@ -225,7 +225,7 @@ This way, your model can run for inference even if it doesn't fit on one of the
### Designing a device map
You can let Accelerate handle the device map computation by setting `device_map` to one of the supported options (`"auto"`, `"balanced"`, `"balanced_low_0"`, `"sequential"`) or create one yourself if you want more control over where each layer should go.
You can let 🤗 Accelerate handle the device map computation by setting `device_map` to one of the supported options (`"auto"`, `"balanced"`, `"balanced_low_0"`, `"sequential"`) or create one yourself if you want more control over where each layer should go.
<Tip>

View File

@ -13,9 +13,9 @@ specific language governing permissions and limitations under the License.
rendered properly in your Markdown viewer.
-->
# Executing and deferring jobs
# Deferring Executions
When you run your usual script, instructions are executed in order. Using Accelerate to deploy your script on several
When you run your usual script, instructions are executed in order. Using 🤗 Accelerate to deploy your script on several
GPUs at the same time introduces a complication: while each process executes all instructions in order, some may be
faster than others.
@ -127,4 +127,4 @@ for (x,y) in data_loader:
# Later in the training script when we need to check for the breakpoint
if accelerator.check_trigger():
break
```
```

View File

@ -1,105 +0,0 @@
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# FSDP1 vs FSDP2
This guide explains the key differences between `FSDP1` and `FSDP2` and helps you migrate your existing code to use `FSDP2` with minimal changes.
## How is FSDP2 better than FSDP1?
First, we want to understand how `FSDP1` and `FSDP2` work internally to understand the differences between them. This also helps us understand the limitations of `FSDP1` and how `FSDP2` solves them.
We'll be discussing a scenario where we have a single `Layer` that contains 3 `Linear` layers and is wrapped using `FSDP` to be sharded across 2 GPUs.
<div align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/accelerate/layer.png" alt="Layer">
</div>
### FSDP1
First, we have to understand the original `FSDP1` and the limitations it brings. It represents each `FSDP` module as a single `FlatParameter` which is a single 1D tensor that contains all of the module parameters, which then get sharded across ranks. I.e. if you wrap the `Layer` with `FSDP1`, you'd achieve something as such:
<div align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/accelerate/fsdp1.png" alt="FSDP1">
</div>
You might notice a problem. The whole `Layer` gets flattened into a single `FlatParameter`, which then gets sharded across ranks. But if it's a single `FlatParameter` object, how do we store metadata? That is one of the limitations. Properly storing per-parameter metadata such as `dtype`, `requires_grad`, etc. is not possible without some ugly hacks.
### FSDP2
This is why `FSDP2` was introduced. It doesn't use `FlatParameter`, instead it uses `DTensor` which is short for "Distributed Tensor". Each `DTensor` basically represents a vanilla `torch.Tensor` that has been sharded across ranks. It contains metadata about the original `torch.Tensor` and how it's sharded, what is the [placement type](https://pytorch.org/docs/stable/distributed.tensor.html#module-torch.distributed.tensor.placement_types) and so on. This is why it's called `per-parameter sharding`. The following figure shows the difference:
<div align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/accelerate/fsdp2.png" alt="FSDP2">
</div>
Each Parameter of the original `Layer` is sharded across the 0th dimension, and split between 2 GPUs. Now, each `Linear` layer is a separate `DTensor` and storing metadata per-parameter is possible and straightforward.
> [!TIP]
> In the image above, the tensors were sharded across the 1st dimension for the sake of fitting the image on the screen, in reality, they are sharded across the 0th dimension as stated above
## What does FSDP2 offer?
`FSDP2` is a new and improved version of PyTorch's fully-sharded data parallel training API. Its main advantage is using `DTensor` to represent sharded parameters. Compared to `FSDP1`, it offers:
- Simpler internal implementation, where each `Parameter` is a separate `DTensor`
- Enables simple partial parameter freezing because of the above, which makes methods as [`LORA`](https://arxiv.org/abs/2106.09685) work out of the box
- With `DTensor`, `FSDP2` supports mixing `fp8` and other parameter types in the same model out of the box
- Faster and simpler checkpointing without extra communication across ranks using `SHARDED_STATE_DICT` and [`torch.distributed.checkpoint`](https://pytorch.org/docs/stable/distributed.checkpoint.html), this way, each rank only saves its own shard and corresponding metadata
- For loading, it uses a `state_dict` of the sharded model to directly load the sharded parameters
- Support for asynchronous checkpointing, where parameters are first copied to CPU memory, after this, main thread continues training while another thread stores the parameters on disk
- Memory efficiency and deterministic memory usage, `FSDP2` doesn't use `recordStream` anymore and uses stream-to-stream synchronization (for more technical details see [this forum post](https://dev-discuss.pytorch.org/t/fsdp-cudacachingallocator-an-outsider-newb-perspective/1486) and [this issue](https://github.com/pytorch/pytorch/issues/114299))
- In the future, optimizations of the communication patterns via `torch.compile` are planned, further improving the performance and memory efficiency
## API Differences
We have already discussed the internal differences, now let's discuss the differences, you, as a user, will need to know.
Here are the main changes in configuration options when using `FSDP2` through the `accelerate` CLI:
Previous (`FSDP1`) | New (`FSDP2`) | What Changed
-- | -- | --
`--fsdp_sharding_strategy` | `--fsdp_reshard_after_forward` | replaces `--fsdp_sharding_strategy`, changed to `true` (previously `FULL_SHARD`) or `false` (previously `SHARD_GRAD_OP`)
`--fsdp_backward_prefetch` | \*\***REMOVED**\*\* | `FSDP2` uses previous `BACKWARD_PRE` option by default, as only this allows communication and computation overlap
`--fsdp_forward_prefetch` | \*\***NOT YET IMPLEMENTED**\*\* | How to implement this is under active discussion, for now it is not supported in `FSDP2`
`--fsdp_sync_module_states` | \*\***REMOVED**\*\* | with `FSDP2`, this parameter becomes redundant
`--fsdp_cpu_ram_efficient_loading` | `--fsdp_cpu_ram_efficient_loading` | if `true`, `FSDP2` will similarly load the model only on rank 0, and then parameters get synced to other ranks, this is the same behavior as `FSDP1`, however, setting `--fsdp_sync_module_states` isn't required anymore
`--fsdp_state_dict_type` | `--fsdp_state_dict_type` | `LOCAL_STATE_DICT` becomes obsolete and with `FSDP2` `SHARDED_STATE_DICT` is the default option, which results in no extra communication and each rank saving its own shard, other possible option is `FULL_STATE_DICT` which results in extra communication and spike in memory usage but saves the full model from rank 0.
`--fsdp_use_orig_params` | \*\***REMOVED**\*\* | `FSDP2` uses a `DTensor` class on the background, which means it *always* uses the original parameters by default
\*\***NEW**\*\* | `--fsdp_version` | `1` is the default option, to not break existing code, set to `2` to use `FSDP2`
For all other options that remain unchanged, see the [`FSDP` documentation](../usage_guides/fsdp.md).
## How to Switch to FSDP2
### If using Python code:
Simply set `fsdp_version=2` when creating your plugin and replace options according to the table above.
```python
from accelerate import FullyShardedDataParallelPlugin, Accelerator
fsdp_plugin = FullyShardedDataParallelPlugin(
fsdp_version=2
# other options...
)
accelerator = Accelerator(fsdp_plugin=fsdp_plugin)
```
### If using YAML config:
Use our conversion tool:
```bash
accelerate to-fsdp2 --config_file config.yaml --output_file new_config.yaml
```
This will automatically convert all FSDP1 settings to their FSDP2 equivalents. Use `--overwrite` to update the existing file instead of creating a new one.

View File

@ -1,192 +0,0 @@
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# FSDP vs DeepSpeed
Accelerate offers flexibilty of training frameworks, by integrating two extremely powerful tools for distributed training, namely [Pytorch FSDP](../usage_guides/fsdp) and [Microsoft DeepSpeed](../usage_guides/deepspeed). The aim of this tutorial is to draw parallels, as well as to outline potential differences, to empower the user to switch seamlessly between these two frameworks.
<Tip>
To switch between the frameworks, we recommend launching code `accelerate launch` passing in the correct config file with `--config_file`, or passing in the respective arguments directly for [FSDP and DeepSpeed](../package_reference/cli#accelerate-launch) .
Example Accelerate configurations can be found here for [DeepSpeed](../usage_guides/deepspeed#accelerate-deepspeed-plugin) and [FSDP](../usage_guides/fsdp#how-it-works-out-of-the-box), or in the [example zoo under "Launch Configurations"](../usage_guides/explore)
</Tip>
<Tip warning={true}>
This tutorial is for single-node, multi-GPU, scenarios only.
</Tip>
## Configuring Functionalities
Model tensors are split into different GPUs in an attempt to scale up model sizes; this is termed *sharding* in FSDP, and *partitioning* in DeepSpeed. FSDP sharding and DeepSpeed ZeRO (partitioning) stages are configured by `--fsdp_sharding_strategy`, and `--zero_stage`, respectively. In particular, FSDP `FULL_SHARD` maps to DeepSpeed ZeRO stage `3`; see this [comprehensive mapping between FSDP sharding and DeepSpeed ZeRO settings](../usage_guides/fsdp#mapping-between-fsdp-sharding-strategies-and-deepspeed-zero-stages). The below table summarizes and groups similar settings:
Group | Framework | Configuration | Example | Restrictions (if any)
--|--|--|--|--
sharding / partitioning | FSDP<br>DeepSpeed | `--fsdp_sharding_strategy`<br>`--zero_stage` | `1` (`FULL_SHARD`) <br>`3` |
offload | FSDP<br>DeepSpeed | `--fsdp_offload_params`<br>`--offload_param_device`<br>`--offload_optimizer_device` | `true`<br>`cpu`<br>`cpu` | all or nothing <br><br>
model loading | FSDP<br>DeepSpeed | <span style="white-space:nowrap;">`--fsdp_cpu_ram_efficient_loading`</span><br>`--zero3_init_flag` | `true`<br>`true` | <br>only ZeRO 3
efficient checkpointing | FSDP<br>DeepSpeed | `--fsdp_state_dict_type`<br>`--zero3_save_16bit_model` | `SHARDED_STATE_DICT`<br>`true` | <br>only ZeRO 3
weights prefetching | FSDP<br><br>DeepSpeed | `--fsdp_forward_prefetch`<br>`--fsdp_backward_prefetch`<br>None | `true`<br>`BACKWARD_PRE` | <br><br>
model | FSDP<br><br>DeepSpeed | `--fsdp_auto_wrap_policy`<br><span style="white-space:nowrap;">`--fsdp_transformer_layer_cls_to_wrap`</span><br>None | `TRANSFORMER_BASED_WRAP`<br><Layer Class> |<br>Usually not needed <br>Transparent to user.
parameters summoning | FSDP<br>DeepSpeed | `--fsdp_use_orig_params`<br>None | `true` | required for `torch.compile`<br>Transparent to user
parameters syncing | FSDP<br>DeepSpeed | `--fsdp_sync_module_states`<br>None | `true` |
training | FSDP<br>DeepSpeed | None<br>`--gradient_accumulation_steps`<br>`--gradient_clipping` | <br>`auto`<br>`auto` | Transparent to user
For detailed descriptions of the above, refer to [`Accelerate` launch documentation](../package_reference/cli#accelerate-launch).
<Tip>
To access other DeepSpeed configurations, such as mixed precision settings,
you need to pass in a `--deepspeed_config_file`, see the [documentation](../usage_guides/deepspeed#deepspeed-config-file).
DeepSpeed can be also configured via [`DeepSpeedPlugin`], e.g., `DeepSpeedPlugin.zero_stage` is equivalent of `--zero_stage`, and `DeepSpeedPlugin.hf_ds_config` can be used to pass `--deepeed_config_file.`
</Tip>
<Tip>
FSDP can be also configured via [`FullyShardedDataParallelPlugin`], e.g., `FullyShardedDataParallelPlugin.sharding_strategy` is equivalent of `--fsdp_sharding_strategy`.
</Tip>
### Checkpointing
Do note that while FSDP can be configured via `--fsdp_state_dict_type` to save either full / sharded checkpoints.
<Tip>
For DeepSpeed Zero3, one could pass a `--zero3_save_16bit_model true`, which conveniently consolidates the model to a single rank and saves; this is the FSDP equivalent of `fsdp_state_dict_type: FULL_STATE_DICT`.
</Tip>
<Tip warning={true}>
For large models, consolidating the model to a single rank can be very slow.
</Tip>
<Tip>
For quicker checkpointing, for FSDP use `fsdp_state_dict_type: SHARDED_STATE_DICT`, and for DeepSpeed Zero3 [use the `zero_to_fp32.py` script to post-convert sharded checkpoints](https://www.deepspeed.ai/tutorials/zero/#extracting-weights).
</Tip>
### Offloading
FSDP only allows *all-or-nothing* offload (i.e., either offload parameters, gradients, and optimizer, or keep them all in GPU), but DeepSpeed can offload parameters and optimizer differently. Furthermore, DeepSpeed also supports [offloading to NVME](https://www.deepspeed.ai/docs/config-json/#parameter-offloading).
### Prefetching
FSDP allows two prefetching configurations `--fsdp_forward_prefetch` and `--fsdp_backward_prefetch` to improve overlap of comms / computation at a cost of extra memory, see [FSDP documentation](https://pytorch.org/docs/stable/fsdp.html).
For DeepSpeed, the prefetching will be turned on when needed, and it turns on depending on certain hyper-params like `stage3_param_persistence_threshold`, `stage3_max_reuse_distance`, etc, [that can be configured for Zero3](https://www.deepspeed.ai/docs/config-json/#parameter-offloading); `accelerate` may set these hyper-params automatically if you don't set those explicitly in the deepspeed config file.
<Tip>
For FSDP set `fsdp_backward_prefetch: BACKWARD_PRE` for improved throughputs if memory allows.
</Tip>
### Model Loading
While FSDP require an explicit `--fsdp_cpu_ram_efficient_loading true` to activate efficient model loading, `transformers` will activate the similar feature whenever DeepSpeed Zero3 is used.
<Tip>
For FSDP, whenever setting `--fsdp_cpu_ram_efficient_loading true`, `accelerate` will automatically set `sync_module_states` to true.
For RAM efficient loading the weights will be loaded only in a single rank, and thus requires `sync_module_states` to broadcast weights to other ranks.
</Tip>
### Model
FSDP requires an explicit `--fsdp_auto_wrap_policy` for the algorithm to decide how to schedule the all-gather and reduce-scatter operations. But for DeepSpeed this is transparent to the user.
<Tip>
For FSDP, simply set `fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP`. With the latest [`transformers`] versions, we try our best to figure out the suitable `fsdp_transformer_layer_cls_to_wrap` for HF transformers models. However, if you get an error regarding it, please specify this.
</Tip>
### Parameters Summoning
FSDP requires an explicit `--fsdp_use_orig_params` flag if using `torch.compile`, see [the pytorch documentation](https://pytorch.org/docs/stable/fsdp.html#module-torch.distributed.fsdp). For DeepSpeed this is transparent to the user.
<Tip>
For FSDP, when using `torch.compile` please set `fsdp_use_orig_params: True`.
</Tip>
## Training
Deepspeed requires explicit `--gradient_accumulation_steps` and `--gradient_clipping` flags. For FSDP this is transparent to the user.
<Tip>
When using DeepSpeed, set `gradient_accumulation_steps: "auto"` and `gradient_clipping: "auto"` to automatically pick up values set in the [`Accelerator`] or [`TrainingArguments`] (if using `transformers`).
</Tip>
## On Differences in Data Precision Handling
To discuss how data precision is handled in both FSDP and Deepspeed, it is instructive to first give an overview of how model parameters are handled in these frameworks. Before the model / optimizer parameters are distributed across GPUs, parameter preparation is involved to first "flatten" them to one-dimensional [`torch.Tensor`](https://pytorch.org/docs/stable/tensors.html#torch-tensor). The implementation of FSDP / DeepSpeed varies in the respect of the `dtype` in which these "flattened" parameters are stored, and there are ramifications with regards to how [`torch.Optimizer`](https://pytorch.org/docs/stable/optim.html#module-torch.optim) allocate their `dtype`s. The table below outlines the processes for both frameworks; the "Local" column indicates the process occurring at a per-gpu level, therefore any memory overheads by upcasting should be understood to be amortized by the number of gpus used.
<Tip>
As a rule of thumb, for stable training with automatic mixed precision, all the trainable parameters have to be in `torch.float32`.
</Tip>
Process | Local | Framework | Details
--|--|--|--
Loading, i.e., [`AutoModel.from_pretrained(..., torch_dtype=torch_dtype)`] |
Preparation, i.e., creation of "flat params" | ✅ | FSDP<br>DeepSpeed | created in `torch_dtype`.<br> disregards `torch_dtype`, created in `float32`.
Optimizer initialization | ✅ | FSDP<br>DeepSpeed | creates parameters in `torch_dtype`<br> creates parameters in `float32`
Training Step, i.e, forward, backward, reduction | | FSDP<br>DeepSpeed | follows [`MixedPrecision`](https://pytorch.org/docs/stable/fsdp.html#torch.distributed.fsdp.MixedPrecision)<br> follows `deepspeed_config_file` mixed precision settings.
Optimizer (Pre-Step) | ✅ | FSDP<br>DeepSpeed | upcasting (if any) to `torch_dtype`<br>upcasted to `float32`
Optimizer (Actual Step) | ✅ | FSDP<br>DeepSpeed | occurs in `torch_dtype` <br> occurs in `float32`.
<Tip warning={true}>
Therefore when using DeepSpeed a small number of GPUs, be aware of potentially significant memory overheads due to the upcasting during preparation.
</Tip>
<Tip>
With FSDP, in the absence of mixed precision, it is possible to operate the [`torch.Optimizer`](https://pytorch.org/docs/stable/optim.html#module-torch.optim) in low precision `torch_dtype`, which may be helpful when using small number of GPUs.
</Tip>
<Tip warning={true}>
With mixed precision, FSDP and DeepSpeed will upcast in the model preparation step (c.f. table above). But do note that FSDP will then save checkpoints in the upcasted precision; Deepspeed may still save low precision checkpoints if `--zero3_save_16bit_model` is specified.
</Tip>
To clarify the above table consider the concrete examples below; the optimizer pre- and actual step combined for brevity. With FSDP it is possible to operate in the two modes shown below, but DeepSpeed can only operate in one.
Framework | Model Loading (`torch_dtype`) | Mixed Precision | Preparation (Local) | Training | Optimizer (Local)
--|--|--|--|--|--
FSDP | bf16 | default (none) | bf16 | bf16 | bf16
FSDP | bf16 | bf16 | fp32 | bf16 | fp32
DeepSpeed | bf16 | bf16 | fp32 | bf16 | fp32

View File

@ -13,7 +13,7 @@ specific language governing permissions and limitations under the License.
rendered properly in your Markdown viewer.
-->
# Gradient synchronization
# Gradient Synchronization
PyTorch's distributed module operates by communicating back and forth between all of the GPUs in your system.
This communication takes time, and ensuring all processes know the states of each other happens at particular triggerpoints
@ -28,7 +28,7 @@ from torch.nn.parallel import DistributedDataParallel
model = nn.Linear(10, 10)
ddp_model = DistributedDataParallel(model)
```
In Accelerate this conversion happens automatically when calling [`~Accelerator.prepare`] and passing in your model.
In 🤗 Accelerate this conversion happens automatically when calling [`~Accelerator.prepare`] and passing in your model.
```diff
+ from accelerate import Accelerator
@ -90,7 +90,7 @@ for index, batch in enumerate(dataloader):
optimizer.step()
```
In Accelerate to make this an API that can be called no matter the training device (though it may not do anything if you are not in a distributed system!),
In 🤗 Accelerate to make this an API that can be called no matter the training device (though it may not do anything if you are not in a distributed system!),
`ddp_model.no_sync` gets replaced with [`~Accelerator.no_sync`] and operates the same way:
```diff
@ -167,18 +167,3 @@ As you can see, if you are not careful about how you set up your gradient synchr
If you are worried about making sure everything is done properly, we highly recommend utilizing the [`~Accelerator.accumulate`] function and passing in
`gradient_accumulation_steps` or `gradient_accumulation_plugin` to the [`Accelerator`] object so Accelerate can handle this for you.
### `no_sync` requires additional GPU memory when using FSDP
Be aware that not syncing gradients can have adverse effects while performing FSDP training. As it has been warned in `torch`, the [`no_sync` context manager for FSDP](https://pytorch.org/docs/stable/fsdp.html#torch.distributed.fsdp.FullyShardedDataParallel.no_sync) will require additional memory.
Therefore in memory intensive situations while using FSDP, we recommend to set `sync_each_batch` to `True` in the [`~utils.GradientAccumulationPlugin`] to disable `no_sync`.
See the example below where we fine-tune Mixtral (47B parameters) on 8 A100-80GB GPUs. We see that even for a modest `gradient_accumulation_steps=2` we quickly go out-of-memory (OOM) if `no_sync` is enabled. Again, this is due to additional memory overheads due to FSDP's `no_sync`. However, if `no_sync` is disabled via `sync_each_batch=True`, then the memory consumption for `gradient_accumulation_steps=16` reverts to that of `gradient_accumulation_steps=1`.
| Model | `no_sync` (accum=1) | `no_sync` (accum=2) | `no_sync` disabled (accum=16)
| :-------------: | :-----------------: | :-----------------: | :-----------------:
mixtral 8x7B | 69G | OOM | 69G
> [!WARNING]
> Disabling `no_sync` means there _will be slowdown_ due the extra data syncs, as explained by the earlier sections of this guide.

View File

@ -13,9 +13,9 @@ specific language governing permissions and limitations under the License.
rendered properly in your Markdown viewer.
-->
# Accelerate's internal mechanisms
# 🤗 Accelerate's internal mechanisms
Internally, Accelerate works by first analyzing the environment in which the script is launched to determine which
Internally, 🤗 Accelerate works by first analyzing the environment in which the script is launched to determine which
kind of distributed setup is used, how many different processes there are and which one the current script is in. All
that information is stored in the [`~AcceleratorState`].
@ -69,6 +69,4 @@ setting the same seed in the main random number generator in all processes.
</Tip>
If you have [`torchdata>=0.8.0`](https://github.com/pytorch/data/tree/main) installed, and you have passed `use_stateful_dataloader=True` into your [`~utils.DataLoaderConfiguration`], these classes will directly inherit from `StatefulDataLoader` instead, and maintain a `state_dict`.
For more details about the internals, see the [Internals page](../package_reference/torch_wrappers).
For more details about the internals, see the [Internals page](package_reference/torch_wrappers).

View File

@ -13,12 +13,12 @@ specific language governing permissions and limitations under the License.
rendered properly in your Markdown viewer.
-->
# Low precision training methods
# Low Precision Training Methods
The release of new kinds of hardware led to the emergence of new training paradigms that better utilize them. Currently, this is in the form of training
in 8-bit precision using packages such as [TransformersEngine](https://github.com/NVIDIA/TransformerEngine) (TE) or [MS-AMP](https://github.com/Azure/MS-AMP/tree/main).
in 8-bit precision using packages such as [TranformersEngine](https://github.com/NVIDIA/TransformerEngine) (TE) or [MS-AMP](https://github.com/Azure/MS-AMP/tree/main).
For an introduction to the topics discussed today, we recommend reviewing the [low-precision usage guide](../usage_guides/low_precision_training) as this documentation will reference it regularly.
For an introduction to the topics discussed today, we recommend reviewing the [low-precision usage guide](../usage_guides/low_precision_training.md) as this documentation will reference it regularly.
## A Quick Chart
@ -34,9 +34,9 @@ MS-AMP O3 | FP8 | FP8 | FP8 | FP16 | FP8 | FP8+FP16
## `TransformersEngine`
`TransformersEngine` is the first solution to trying to train in 8-bit floating point. It works by using drop-in replacement layers for certain ones in a model that utilizes their FP8-engine to reduce the number of bits (such as 32 to 8) without degrading the final accuracy of the model.
`TranformersEngine` is the first solution to trying to train in 8-bit floating point. It works by using drop-in replacement layers for certain ones in a model that utilize their FP8-engine to reduce the number of bits (such as 32 to 8) without degrading the final accuracy of the model.
Specifically, Accelerate will find and replace the following layers with `TransformersEngine` versions:
Specifically, 🤗 Accelerate will find and replace the following layers with `TranformersEngine` versions:
* `nn.LayerNorm` for `te.LayerNorm`
* `nn.Linear` for `te.Linear`
@ -50,7 +50,7 @@ The `TransformerEngine` can receive many different arguments that customize how
* `margin`: The margin to use for the gradient scaling.
* `interval`: The interval to use for how often the scaling factor is recomputed.
* `fp8_format``: The format to use for the FP8 recipe. Must be one of `HYBRID` or `E4M3`. (Generally `HYBRID` for training, `E4M3` for evaluation)
* `fp8_format``: The format to use for the FP8 recipe. Must be one of `E4M3` or `HYBRID`.
* `amax_history_len`: The length of the history to use for the scaling factor computation
* `amax_compute_algo`: The algorithm to use for the scaling factor computation. Must be one of `max` or `most_recent`.
* `override_linear_precision`: Whether or not to execute `fprop`, `dgrad`, and `wgrad` GEMMS in higher precision.
@ -65,10 +65,10 @@ MS-AMP takes a different approach to `TransformersEngine` by providing three dif
* The base optimization level (`O1`), passes communications of the weights (such as in DDP) in FP8, stores the weights of the model in FP16, and leaves the optimizer states in FP32. The main benefit of this optimization level is that we can reduce the communication bandwidth by essentially half. Additionally, more GPU memory is saved due to 1/2 of everything being cast in FP8, and the weights being cast to FP16. Notably, both the optimizer states remain in FP32.
* The second optimization level (`O2`) improves upon this by also reducing the precision of the optimizer states. One is in FP8 while the other is in FP16. Generally it's been shown that this will only provide a net-gain of no degraded end accuracy, increased training speed, and reduced memory as now every state is either in FP16 or FP8.
* The second optimization level (`O2`) improves upon this by also reducing the precision of the optimizer states. One is in FP8 while the other is in FP16. Generally it's been shown that this will only provide a net-gain of no degredated end accuracy, increased training speed, and reduced memory as now every state is either in FP16 or FP8.
* Finally, MS-AMP has a third optimization level (`O3`) which helps during DDP scenarios such as DeepSpeed. The weights of the model in memory are fully cast to FP8, and the master weights are now stored in FP16. This fully reduces memory by the highest factor as now not only is almost everything in FP8, only two states are left in FP16. Currently, only DeepSpeed versions up through 0.9.2 are supported, so this capability is not included in the Accelerate integration
* Finally, MS-AMP has a third optimization level (`O3`) which helps during DDP scenarios such as DeepSpeed. The weights of the model in memory are fully cast to FP8, and the master weights are now stored in FP16. This fully reduces memory by the highest factor as now not only is almost everything in FP8, only two states are left in FP16. Currently, only DeepSpeed versions up through 0.9.2 are supported, so this capability is not included in the 🤗 Accelerate integration
## Combining the two
More experiments need to be performed but it's been noted that combining both MS-AMP and TransformersEngine can lead to the highest throughput by relying on NVIDIA's optimized FP8 operators and utilizing how MS-AMP reduces the memory overhead.
More experiments need to be performed but it's been noted that combining both MS-AMP and TransformersEngine can lead to the highest throughput by relying on NVIDIA's optimized FP8 operators and utilizing how MS-AMP reduces the memory overhead.

View File

@ -13,7 +13,7 @@ specific language governing permissions and limitations under the License.
rendered properly in your Markdown viewer.
-->
# Comparing performance across distributed setups
# Comparing performance between different device setups
Evaluating and comparing the performance from different setups can be quite tricky if you don't know what to look for.
For example, you cannot run the same script with the same batch size across TPU, multi-GPU, and single-GPU with Accelerate
@ -43,13 +43,13 @@ Why is this important? Under the hood this will set **5** different seed setting
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # or torch.xpu.manual_seed_all, etc
torch.cuda.manual_seed_all(seed)
# ^^ safe to call this function even if cuda is not available
if is_torch_xla_available():
if is_tpu_available():
xm.set_rng_state(seed)
```
The random state, numpy's state, torch, torch's device state, and if TPUs are available torch_xla's cuda state.
The random state, numpy's state, torch, torch's cuda state, and if TPUs are available torch_xla's cuda state.
## Observed Batch Sizes

View File

@ -13,9 +13,9 @@ specific language governing permissions and limitations under the License.
rendered properly in your Markdown viewer.
-->
# Training on TPUs
# Training on TPUs with 🤗 Accelerate
Training on TPUs can be slightly different from training on multi-gpu, even with Accelerate. This guide aims to show you
Training on TPUs can be slightly different from training on multi-gpu, even with 🤗 Accelerate. This guide aims to show you
where you should be careful and why, as well as the best practices in general.
## Training in a Notebook
@ -81,7 +81,7 @@ notebook_launcher(training_function)
<Tip>
The `notebook_launcher` will default to 8 processes if Accelerate has been configured for a TPU
The `notebook_launcher` will default to 8 processes if 🤗 Accelerate has been configured for a TPU
</Tip>
@ -128,10 +128,10 @@ And finally calling the training function with:
## Mixed Precision and Global Variables
As mentioned in the [mixed precision tutorial](../usage_guides/mixed_precision), Accelerate supports fp16 and bf16, both of which can be used on TPUs.
As mentioned in the [mixed precision tutorial](../usage_guides/mixed_precision), 🤗 Accelerate supports fp16 and bf16, both of which can be used on TPUs.
That being said, ideally `bf16` should be utilized as it is extremely efficient to use.
There are two "layers" when using `bf16` and Accelerate on TPUs, at the base level and at the operation level.
There are two "layers" when using `bf16` and 🤗 Accelerate on TPUs, at the base level and at the operation level.
At the base level, this is enabled when passing `mixed_precision="bf16"` to `Accelerator`, such as:
```python

Binary file not shown.

Before

Width:  |  Height:  |  Size: 105 KiB

View File

@ -15,7 +15,7 @@ rendered properly in your Markdown viewer.
# Accelerate
Accelerate is a library that enables the same PyTorch code to be run across any distributed configuration by adding just four lines of code! In short, training and inference at scale made simple, efficient and adaptable.
🤗 Accelerate is a library that enables the same PyTorch code to be run across any distributed configuration by adding just four lines of code! In short, training and inference at scale made simple, efficient and adaptable.
```diff
+ from accelerate import Accelerator
@ -37,7 +37,7 @@ Accelerate is a library that enables the same PyTorch code to be run across any
scheduler.step()
```
Built on `torch_xla` and `torch.distributed`, Accelerate takes care of the heavy lifting, so you don't have to write any custom code to adapt to these platforms.
Built on `torch_xla` and `torch.distributed`, 🤗 Accelerate takes care of the heavy lifting, so you don't have to write any custom code to adapt to these platforms.
Convert existing codebases to utilize [DeepSpeed](usage_guides/deepspeed), perform [fully sharded data parallelism](usage_guides/fsdp), and have automatic support for mixed-precision training!
<Tip>
@ -56,11 +56,11 @@ accelerate launch {my_script.py}
<div class="w-full flex flex-col space-y-4 md:space-y-0 md:grid md:grid-cols-2 md:gap-y-4 md:gap-x-5">
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./basic_tutorials/overview"
><div class="w-full text-center bg-gradient-to-br from-blue-400 to-blue-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Tutorials</div>
<p class="text-gray-700">Learn the basics and become familiar with using Accelerate. Start here if you are using Accelerate for the first time!</p>
<p class="text-gray-700">Learn the basics and become familiar with using 🤗 Accelerate. Start here if you are using 🤗 Accelerate for the first time!</p>
</a>
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./usage_guides/explore"
><div class="w-full text-center bg-gradient-to-br from-indigo-400 to-indigo-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">How-to guides</div>
<p class="text-gray-700">Practical guides to help you achieve a specific goal. Take a look at these guides to learn how to use Accelerate to solve real-world problems.</p>
<p class="text-gray-700">Practical guides to help you achieve a specific goal. Take a look at these guides to learn how to use 🤗 Accelerate to solve real-world problems.</p>
</a>
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./concept_guides/gradient_synchronization"
><div class="w-full text-center bg-gradient-to-br from-pink-400 to-pink-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Conceptual guides</div>
@ -68,7 +68,7 @@ accelerate launch {my_script.py}
</a>
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./package_reference/accelerator"
><div class="w-full text-center bg-gradient-to-br from-purple-400 to-purple-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Reference</div>
<p class="text-gray-700">Technical descriptions of how Accelerate classes and methods work.</p>
<p class="text-gray-700">Technical descriptions of how 🤗 Accelerate classes and methods work.</p>
</a>
</div>
</div>

View File

@ -15,12 +15,197 @@ rendered properly in your Markdown viewer.
# Accelerator
The [`Accelerator`] is the main class for enabling distributed training on any type of training setup. Read the [Add Accelerator to your code](../basic_tutorials/migration) tutorial to learn more about how to add the [`Accelerator`] to your script.
The [`Accelerator`] is the main class provided by 🤗 Accelerate.
It serves at the main entry point for the API.
## Accelerator[[api]]
## Quick adaptation of your code
To quickly adapt your script to work on any kind of setup with 🤗 Accelerate just:
1. Initialize an [`Accelerator`] object (that we will call `accelerator` throughout this page) as early as possible in your script.
2. Pass your dataloader(s), model(s), optimizer(s), and scheduler(s) to the [`~Accelerator.prepare`] method.
3. Remove all the `.cuda()` or `.to(device)` from your code and let the `accelerator` handle the device placement for you.
<Tip>
Step three is optional, but considered a best practice.
</Tip>
4. Replace `loss.backward()` in your code with `accelerator.backward(loss)`
5. Gather your predictions and labels before storing them or using them for metric computation using [`~Accelerator.gather`]
<Tip warning={true}>
Step five is mandatory when using distributed evaluation
</Tip>
In most cases this is all that is needed. The next section lists a few more advanced use cases and nice features
you should search for and replace by the corresponding methods of your `accelerator`:
## Advanced recommendations
### Printing
`print` statements should be replaced by [`~Accelerator.print`] to be printed once per process:
```diff
- print("My thing I want to print!")
+ accelerator.print("My thing I want to print!")
```
### Executing processes
#### Once on a single server
For statements that should be executed once per server, use [`~Accelerator.is_local_main_process`]:
```python
if accelerator.is_local_main_process:
do_thing_once_per_server()
```
A function can be wrapped using the [`~Accelerator.on_local_main_process`] function to achieve the same
behavior on a function's execution:
```python
@accelerator.on_local_main_process
def do_my_thing():
"Something done once per server"
do_thing_once_per_server()
```
#### Only ever once across all servers
For statements that should only ever be executed once, use [`~Accelerator.is_main_process`]:
```python
if accelerator.is_main_process:
do_thing_once()
```
A function can be wrapped using the [`~Accelerator.on_main_process`] function to achieve the same
behavior on a function's execution:
```python
@accelerator.on_main_process
def do_my_thing():
"Something done once per server"
do_thing_once()
```
#### On specific processes
If a function should be ran on a specific overall or local process index, there are similar decorators
to achieve this:
```python
@accelerator.on_local_process(local_process_idx=0)
def do_my_thing():
"Something done on process index 0 on each server"
do_thing_on_index_zero_on_each_server()
```
```python
@accelerator.on_process(process_index=0)
def do_my_thing():
"Something done on process index 0"
do_thing_on_index_zero()
```
### Synchronicity control
Use [`~Accelerator.wait_for_everyone`] to make sure all processes join that point before continuing. (Useful before a model save for instance).
### Saving and loading
```python
model = MyModel()
model = accelerator.prepare(model)
```
Use [`~Accelerator.save_model`] instead of `torch.save` to save a model. It will remove all model wrappers added during the distributed process, get the state_dict of the model and save it. The state_dict will be in the same precision as the model being trained.
```diff
- torch.save(state_dict, "my_state.pkl")
+ accelerator.save_model(model, save_directory)
```
[`~Accelerator.save_model`] can also save a model into sharded checkpoints or with safetensors format.
Here is an example:
```python
accelerator.save_model(model, save_directory, max_shard_size="1GB", safe_serialization=True)
```
#### 🤗 Transformers models
If you are using models from the [🤗 Transformers](https://huggingface.co/docs/transformers/) library, you can use the `.save_pretrained()` method.
```python
from transformers import AutoModel
model = AutoModel.from_pretrained("bert-base-cased")
model = accelerator.prepare(model)
# ...fine-tune with PyTorch...
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(
"path/to/my_model_directory",
is_main_process=accelerator.is_main_process,
save_function=accelerator.save,
)
```
This will ensure your model stays compatible with other 🤗 Transformers functionality like the `.from_pretrained()` method.
```python
from transformers import AutoModel
model = AutoModel.from_pretrained("path/to/my_model_directory")
```
### Operations
Use [`~Accelerator.clip_grad_norm_`] instead of ``torch.nn.utils.clip_grad_norm_`` and [`~Accelerator.clip_grad_value_`] instead of ``torch.nn.utils.clip_grad_value``
### Gradient Accumulation
To perform gradient accumulation use [`~Accelerator.accumulate`] and specify a gradient_accumulation_steps.
This will also automatically ensure the gradients are synced or unsynced when on
multi-device training, check if the step should actually be performed, and auto-scale the loss:
```diff
- accelerator = Accelerator()
+ accelerator = Accelerator(gradient_accumulation_steps=2)
for (input, label) in training_dataloader:
+ with accelerator.accumulate(model):
predictions = model(input)
loss = loss_function(predictions, labels)
accelerator.backward(loss)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
```
#### GradientAccumulationPlugin
[[autodoc]] utils.GradientAccumulationPlugin
Instead of passing `gradient_accumulation_steps` you can instantiate a GradientAccumulationPlugin and pass it to the [`Accelerator`]'s `__init__`
as `gradient_accumulation_plugin`. You can only pass either one of `gradient_accumulation_plugin` or `gradient_accumulation_steps` passing both will raise an error.
```diff
from accelerate.utils import GradientAccumulationPlugin
gradient_accumulation_plugin = GradientAccumulationPlugin(num_steps=2)
- accelerator = Accelerator()
+ accelerator = Accelerator(gradient_accumulation_plugin=gradient_accumulation_plugin)
```
In addition to the number of steps, this also lets you configure whether or not you adjust your learning rate scheduler to account for the change in steps due to accumulation.
## Overall API documentation:
[[autodoc]] Accelerator
## Utilities
[[autodoc]] accelerate.utils.gather_object

View File

@ -15,96 +15,33 @@ rendered properly in your Markdown viewer.
# Working with large models
## Dispatch and offload
### init_empty_weights
## Dispatching and Offloading Models
[[autodoc]] big_modeling.init_empty_weights
### cpu_offload
[[autodoc]] big_modeling.cpu_offload
### cpu_offload_with_hook
[[autodoc]] big_modeling.cpu_offload_with_hook
### disk_offload
[[autodoc]] big_modeling.disk_offload
### dispatch_model
[[autodoc]] big_modeling.dispatch_model
### load_checkpoint_and_dispatch
[[autodoc]] big_modeling.load_checkpoint_and_dispatch
### load_checkpoint_in_model
[[autodoc]] big_modeling.load_checkpoint_in_model
### infer_auto_device_map
[[autodoc]] utils.infer_auto_device_map
## Hooks
## Model Hooks
### ModelHook
### Hook Classes
[[autodoc]] hooks.ModelHook
### AlignDevicesHook
[[autodoc]] hooks.AlignDevicesHook
### SequentialHook
[[autodoc]] hooks.SequentialHook
### LayerwiseCastingHook
[[autodoc]] hooks.LayerwiseCastingHook
## Adding Hooks
### add_hook_to_module
### Adding Hooks
[[autodoc]] hooks.add_hook_to_module
### attach_execution_device_hook
[[autodoc]] hooks.attach_execution_device_hook
### attach_align_device_hook
[[autodoc]] hooks.attach_align_device_hook
### attach_align_device_hook_on_blocks
[[autodoc]] hooks.attach_align_device_hook_on_blocks
### attach_layerwise_casting_hooks
[[autodoc]] big_modeling.attach_layerwise_casting_hooks
## Removing Hooks
### remove_hook_from_module
### Removing Hooks
[[autodoc]] hooks.remove_hook_from_module
### remove_hook_from_submodules
[[autodoc]] hooks.remove_hook_from_submodules
## Utilities
### has_offloaded_params
[[autodoc]] utils.has_offloaded_params
### align_module_device
[[autodoc]] utils.align_module_device
[[autodoc]] hooks.remove_hook_from_submodules

View File

@ -139,17 +139,16 @@ values. They can also be passed in manually.
* `--cpu` (`bool`) -- Whether or not to force the training on the CPU.
* `--multi_gpu` (`bool`) -- Whether or not this should launch a distributed GPU training.
* `--tpu` (`bool`) -- Whether or not this should launch a TPU training.
* `--ipex` (`bool`) -- Whether or not this should launch an Intel Pytorch Extension (IPEX) training. **This argument is deprecated, will be removed in Accelerate v1.10**
* `--ipex` (`bool`) -- Whether or not this should launch an Intel Pytorch Extension (IPEX) training.
**Resource Selection Arguments**:
The following arguments are useful for fine-tuning how available hardware should be used
* `--mixed_precision {no,fp16,bf16,fp8}` (`str`) -- Whether or not to use mixed precision training. Choose between FP16 and BF16 (bfloat16) training. BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.
* `--mixed_precision {no,fp16,bf16}` (`str`) -- Whether or not to use mixed precision training. Choose between FP16 and BF16 (bfloat16) training. BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.
* `--num_processes NUM_PROCESSES` (`int`) -- The total number of processes to be launched in parallel.
* `--num_machines NUM_MACHINES` (`int`) -- The total number of machines used in this training.
* `--num_cpu_threads_per_process NUM_CPU_THREADS_PER_PROCESS` (`int`) -- The number of CPU threads per process. Can be tuned for optimal performance.
* `--enable_cpu_affinity` (`bool`) -- Whether or not CPU affinity and balancing should be enabled. Currently only supported on NVIDIA hardware.
**Training Paradigm Arguments**:
@ -158,34 +157,27 @@ The following arguments are useful for selecting which training paradigm to use.
* `--use_deepspeed` (`bool`) -- Whether or not to use DeepSpeed for training.
* `--use_fsdp` (`bool`) -- Whether or not to use FullyShardedDataParallel for training.
* `--use_megatron_lm` (`bool`) -- Whether or not to use Megatron-LM for training.
* `--use_xpu` (`bool`) -- Whether to use IPEX plugin to speed up training on XPU specifically. **This argument is deprecated and ignored, will be removed in Accelerate v1.10**
* `--use_xpu` (`bool`) -- Whether to use IPEX plugin to speed up training on XPU specifically.
**Distributed GPU Arguments**:
The following arguments are only useful when `multi_gpu` is passed or multi-gpu training is configured through `accelerate config`:
* `--gpu_ids` (`str`) -- What GPUs (by id) should be used for training on this machine as a comma-separated list
* `--gpu_ids` (`str`) -- What GPUs (by id) should be used for training on this machine as a comma-seperated list
* `--same_network` (`bool`) -- Whether all machines used for multinode training exist on the same local network.
* `--machine_rank` (`int`) -- The rank of the machine on which this script is launched.
* `--main_process_ip` (`str`) -- The IP address of the machine of rank 0.
* `--main_process_port` (`int`) -- The port to use to communicate with the machine of rank 0.
* `-t`, `--tee` (`str`) -- Tee std streams into a log file and also to console.
* `--log_dir` (`str`) -- Base directory to use for log files when using torchrun/torch.distributed.run as launcher. Use with --tee to redirect std streams info log files.
* `--role` (`str`) -- User-defined role for the workers.
* `--rdzv_backend` (`str`) -- The rendezvous method to use, such as 'static' (the default) or 'c10d'
* `--machine_rank MACHINE_RANK` (`int`) -- The rank of the machine on which this script is launched.
* `--main_process_ip MAIN_PROCESS_IP` (`str`) -- The IP address of the machine of rank 0.
* `--main_process_port MAIN_PROCESS_PORT` (`int`) -- The port to use to communicate with the machine of rank 0.
* `--rdzv_backend` (`str`) -- The rendezvous method to use, such as "static" or "c10d"
* `--rdzv_conf` (`str`) -- Additional rendezvous configuration (<key1>=<value1>,<key2>=<value2>,...).
* `--max_restarts` (`int`) -- Maximum number of worker group restarts before failing.
* `--monitor_interval` (`int`) -- Interval, in seconds, to monitor the state of workers.
* `--monitor_interval` (`float`) -- Interval, in seconds, to monitor the state of workers.
**TPU Arguments**:
The following arguments are only useful when `tpu` is passed or TPU training is configured through `accelerate config`:
* `--tpu_cluster` (`bool`) -- Whether to use a GCP TPU pod for training.
* `--tpu_use_sudo` (`bool`) -- Whether to use `sudo` when running the TPU training script in each pod.
* `--vm` (`str`) -- List of single Compute VM instance names. If not provided we assume usage of instance groups. For TPU pods.
* `--env` (`str`) -- List of environment variables to set on the Compute VM instances. For TPU pods.
* `--main_training_function` (`str`) -- The name of the main function to be executed in your script (only for TPU training).
* `--main_training_function MAIN_TRAINING_FUNCTION` (`str`) -- The name of the main function to be executed in your script.
* `--downcast_bf16` (`bool`) -- Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32.
**DeepSpeed Arguments**:
@ -196,16 +188,14 @@ The following arguments are only useful when `use_deepspeed` is passed or `deeps
* `--zero_stage` (`int`) -- DeepSpeed's ZeRO optimization stage.
* `--offload_optimizer_device` (`str`) -- Decides where (none|cpu|nvme) to offload optimizer states.
* `--offload_param_device` (`str`) -- Decides where (none|cpu|nvme) to offload parameters.
* `--offload_optimizer_nvme_path` (`str`) -- Decides Nvme Path to offload optimizer states.
* `--gradient_accumulation_steps` (`int`) -- No of gradient_accumulation_steps used in your training script.
* `--gradient_clipping` (`float`) -- Gradient clipping value used in your training script.
* `--zero3_init_flag` (`str`) -- Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. Only applicable with DeepSpeed ZeRO Stage-3.
* `--zero3_save_16bit_model` (`str`) -- Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. Only applicable with DeepSpeed ZeRO Stage-3.
* `--deepspeed_hostfile` (`str`) -- DeepSpeed hostfile for configuring multi-node compute resources.
* `--deepspeed_exclusion_filter` (`str`) -- DeepSpeed exclusion filter string when using multi-node setup.
* `--deepspeed_inclusion_filter` (`str`) -- DeepSpeed inclusion filter string when using multi-node setup.
* `--deepspeed_exclusion_filter` (`str`) -- DeepSpeed exclusion filter string when using mutli-node setup.
* `--deepspeed_inclusion_filter` (`str`) -- DeepSpeed inclusion filter string when using mutli-node setup.
* `--deepspeed_multinode_launcher` (`str`) -- DeepSpeed multi-node launcher to use.
* `--deepspeed_moe_layer_cls_names` (`str`) -- comma-separated list of transformer MoE layer class names (case-sensitive) to wrap, e.g, `MixtralSparseMoeBlock` `Qwen2MoeSparseMoeBlock`, `JetMoEAttention,JetMoEBlock`
**Fully Sharded Data Parallelism Arguments**:
@ -218,11 +208,6 @@ The following arguments are only useful when `use_fsdp` is passed or Fully Shard
* `--fsdp_transformer_layer_cls_to_wrap` (`str`) -- Transformer layer class name (case-sensitive) to wrap, e.g, `BertLayer`, `GPTJBlock`, `T5Block` ...
* `--fsdp_backward_prefetch_policy` (`str`) -- FSDP's backward prefetch policy.
* `--fsdp_state_dict_type` (`str`) -- FSDP's state dict type.
* `--fsdp_forward_prefetch` (`str`) -- FSDP forward prefetch.
* `--fsdp_use_orig_params` (`str`) -- If True, allows non-uniform `requires_grad` mixed in a FSDP unit.
* `--fsdp_cpu_ram_efficient_loading` (`str`) -- If true, only the first process loads the pretrained model checkoint while all other processes have empty weights. When using this, `--fsdp_sync_module_states` needs to True.
* `--fsdp_sync_module_states` (`str`) -- If true, each individually wrapped FSDP unit will broadcast module parameters from rank 0.
* `--fsdp_activation_checkpointing` (`bool`) -- Decides Whether intermediate activations are freed during the forward pass, and a checkpoint is left as a placeholder
**Megatron-LM Arguments**:
@ -233,21 +218,9 @@ The following arguments are only useful when `use_megatron_lm` is passed or Mega
* `--megatron_lm_num_micro_batches` (``) -- Megatron-LM's number of micro batches when PP degree > 1.
* `--megatron_lm_sequence_parallelism` (``) -- Decides Whether (true|false) to enable Sequence Parallelism when TP degree > 1.
* `--megatron_lm_recompute_activations` (``) -- Decides Whether (true|false) to enable Selective Activation Recomputation.
* `--megatron_lm_use_distributed_optimizer` (``) -- Decides Whether (true|false) to use distributed optimizer which shards optimizer state and gradients across Data Parallel (DP) ranks.
* `--megatron_lm_use_distributed_optimizer` (``) -- Decides Whether (true|false) to use distributed optimizer which shards optimizer state and gradients across Data Pralellel (DP) ranks.
* `--megatron_lm_gradient_clipping` (``) -- Megatron-LM's gradient clipping value based on global L2 Norm (0 to disable).
**FP8 Arguments**:
* `--fp8_backend` (`str`) -- Choose a backend to train with FP8 (`te` or `msamp`)
* `--fp8_use_autocast_during_eval` (`bool`) -- Whether to use FP8 autocast during eval mode (useful only when `--fp8_backend=te` is passed). Generally better metrics are found when this is not passed.
* `--fp8_margin` (`int`) -- The margin to use for the gradient scaling (useful only when `--fp8_backend=te` is passed).
* `--fp8_interval` (`int`) -- The interval to use for how often the scaling factor is recomputed (useful only when `--fp8_backend=te` is passed).
* `--fp8_format` (`str`) -- The format to use for the FP8 recipe (useful only when `--fp8_backend=te` is passed).
* `--fp8_amax_history_len` (`int`) -- The length of the history to use for the scaling factor computation (useful only when `--fp8_backend=te` is passed).
* `--fp8_amax_compute_algo` (`str`) -- The algorithm to use for the scaling factor computation. (useful only when `--fp8_backend=te` is passed).
* `--fp8_override_linear_precision` (`Tuple[bool, bool, bool]`) -- Whether or not to execute `fprop`, `dgrad`, and `wgrad` GEMMS in higher precision.
* `--fp8_opt_level` (`str`) -- What level of 8-bit collective communication should be used with MS-AMP (useful only when `--fp8_backend=msamp` is passed)
**AWS SageMaker Arguments**:
The following arguments are only useful when training in SageMaker

View File

@ -13,32 +13,16 @@ specific language governing permissions and limitations under the License.
rendered properly in your Markdown viewer.
-->
# DeepSpeed utilities
## DeepSpeedPlugin
## get_active_deepspeed_plugin
[[autodoc]] utils.get_active_deepspeed_plugin
# Utilities for DeepSpeed
[[autodoc]] utils.DeepSpeedPlugin
[[autodoc]] utils.deepspeed.DummyScheduler
[[autodoc]] utils.DummyOptim
## DeepSpeedEnginerWrapper
[[autodoc]] utils.DummyScheduler
[[autodoc]] utils.deepspeed.DeepSpeedEngineWrapper
[[autodoc]] utils.DeepSpeedEngineWrapper
## DeepSpeedOptimizerWrapper
[[autodoc]] utils.DeepSpeedOptimizerWrapper
[[autodoc]] utils.deepspeed.DeepSpeedOptimizerWrapper
## DeepSpeedSchedulerWrapper
[[autodoc]] utils.deepspeed.DeepSpeedSchedulerWrapper
## DummyOptim
[[autodoc]] utils.deepspeed.DummyOptim
## DummyScheduler
[[autodoc]] utils.DeepSpeedSchedulerWrapper

View File

@ -1,38 +0,0 @@
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# FP8
Below are functions and classes relative to the underlying FP8 implementation
## FP8RecipeKwargs
[[autodoc]] utils.FP8RecipeKwargs
## convert_model
[[autodoc]] utils.convert_model
## has_transformer_engine_layers
[[autodoc]] utils.has_transformer_engine_layers
## contextual_fp8_autocast
[[autodoc]] utils.contextual_fp8_autocast
## apply_fp8_autowrap
[[autodoc]] utils.apply_fp8_autowrap

View File

@ -13,34 +13,6 @@ specific language governing permissions and limitations under the License.
rendered properly in your Markdown viewer.
-->
# Fully Sharded Data Parallel utilities
# Utilities for Fully Sharded Data Parallelism
## enable_fsdp_ram_efficient_loading
[[autodoc]] utils.enable_fsdp_ram_efficient_loading
## disable_fsdp_ram_efficient_loading
[[autodoc]] utils.disable_fsdp_ram_efficient_loading
## merge_fsdp_weights
[[autodoc]] utils.merge_fsdp_weights
## FullyShardedDataParallelPlugin
[[autodoc]] utils.FullyShardedDataParallelPlugin
## fsdp2_load_full_state_dict
[[autodoc]] utils.fsdp2_load_full_state_dict
## fsdp2_switch_optimizer_parameters
[[autodoc]] utils.fsdp2_switch_optimizer_parameters
## fsdp2_prepare_model
[[autodoc]] utils.fsdp2_prepare_model
## fsdp2_prepare_auto_wrap_policy
[[autodoc]] utils.FullyShardedDataParallelPlugin

View File

@ -1,22 +0,0 @@
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Pipeline parallelism
Accelerate supports pipeline parallelism for large-scale training with the PyTorch [torch.distributed.pipelining](https://pytorch.org/docs/stable/distributed.pipelining.html) API.
## prepare_pippy
[[autodoc]] inference.prepare_pippy

View File

@ -13,7 +13,7 @@ specific language governing permissions and limitations under the License.
rendered properly in your Markdown viewer.
-->
# Kwargs handlers
# Kwargs Handlers
The following objects can be passed to the main [`Accelerator`] to customize how some PyTorch objects
related to distributed training or mixed precision are created.
@ -30,10 +30,6 @@ related to distributed training or mixed precision are created.
[[autodoc]] utils.FP8RecipeKwargs
## ProfileKwargs
[[autodoc]] utils.ProfileKwargs
## GradScalerKwargs
[[autodoc]] GradScalerKwargs
@ -41,7 +37,3 @@ related to distributed training or mixed precision are created.
## InitProcessGroupKwargs
[[autodoc]] InitProcessGroupKwargs
## KwargsHandler
[[autodoc]] utils.KwargsHandler

View File

@ -17,10 +17,6 @@ rendered properly in your Markdown viewer.
Functions for launching training on distributed processes.
## notebook_launcher
[[autodoc]] accelerate.notebook_launcher
## debug_launcher
[[autodoc]] accelerate.debug_launcher

View File

@ -13,9 +13,9 @@ specific language governing permissions and limitations under the License.
rendered properly in your Markdown viewer.
-->
# Logging
# Logging with Accelerate
Refer to the [Troubleshooting guide](../usage_guides/troubleshooting#logging) or to the example below to learn
how to use Accelerate's logger.
how to use 🤗 Accelerate's logger.
[[autodoc]] logging.get_logger

View File

@ -13,36 +13,20 @@ specific language governing permissions and limitations under the License.
rendered properly in your Markdown viewer.
-->
# Megatron-LM utilities
## MegatronLMPlugin
# Utilities for Megatron-LM
[[autodoc]] utils.MegatronLMPlugin
## MegatronLMDummyScheduler
[[autodoc]] utils.MegatronLMDummyScheduler
## MegatronLMDummyDataLoader
[[autodoc]] utils.MegatronLMDummyDataLoader
## AbstractTrainStep
[[autodoc]] utils.AbstractTrainStep
## GPTTrainStep
[[autodoc]] utils.GPTTrainStep
## BertTrainStep
[[autodoc]] utils.BertTrainStep
## T5TrainStep
[[autodoc]] utils.T5TrainStep
## avg_losses_across_data_parallel_group
[[autodoc]] utils.avg_losses_across_data_parallel_group

View File

@ -21,14 +21,8 @@ instances share the same state, which is initialized on the first instantiation.
These classes are immutable and store information about certain configurations or
states.
## PartialState
[[autodoc]] state.PartialState
## AcceleratorState
[[autodoc]] state.AcceleratorState
## GradientState
[[autodoc]] state.GradientState

View File

@ -13,36 +13,25 @@ specific language governing permissions and limitations under the License.
rendered properly in your Markdown viewer.
-->
# DataLoaders, Optimizers, and Schedulers
# Wrapper classes for torch Dataloaders, Optimizers, and Schedulers
The internal classes Accelerate uses to prepare objects for distributed training
when calling [`~Accelerator.prepare`].
## DataLoader utilities
## Datasets and DataLoaders
[[autodoc]] data_loader.prepare_data_loader
[[autodoc]] data_loader.skip_first_batches
## BatchSamplerShard
[[autodoc]] data_loader.BatchSamplerShard
## IterableDatasetShard
[[autodoc]] data_loader.IterableDatasetShard
## DataLoaderShard
[[autodoc]] data_loader.DataLoaderShard
## DataLoaderDispatcher
[[autodoc]] data_loader.DataLoaderDispatcher
## AcceleratedOptimizer
## Optimizers
[[autodoc]] optimizer.AcceleratedOptimizer
## AcceleratedScheduler
## Schedulers
[[autodoc]] scheduler.AcceleratedScheduler

View File

@ -13,43 +13,23 @@ specific language governing permissions and limitations under the License.
rendered properly in your Markdown viewer.
-->
# Experiment Trackers
# Experiment Tracking
## GeneralTracker
## The Base Tracker Class
[[autodoc]] tracking.GeneralTracker
## TensorBoardTracker
## Integrated Trackers
[[autodoc]] tracking.TensorBoardTracker
- __init__
## WandBTracker
[[autodoc]] tracking.WandBTracker
- __init__
## CometMLTracker
[[autodoc]] tracking.CometMLTracker
- __init__
## AimTracker
[[autodoc]] tracking.AimTracker
- __init__
## MLflowTracker
[[autodoc]] tracking.MLflowTracker
- __init__
## ClearMLTracker
[[autodoc]] tracking.ClearMLTracker
- __init__
## SwanLabTracker
[[autodoc]] tracking.SwanLabTracker
- __init__

View File

@ -13,7 +13,7 @@ specific language governing permissions and limitations under the License.
rendered properly in your Markdown viewer.
-->
# Utility functions and classes
# Helpful Utilities
Below are a variety of utility functions that 🤗 Accelerate provides, broken down by use-case.
@ -60,10 +60,12 @@ These are standalone dataclasses used for checks, such as the type of distribute
### Kwargs
These are configurable arguments for specific interactions throughout the PyTorch ecosystem that Accelerate handles under the hood.
These are configurable arguemnts for specific interactions throughout the PyTorch ecosystem that Accelerate handles under the hood.
[[autodoc]] utils.AutocastKwargs
[[autodoc]] utils.DistributedDataParallelKwargs
[[autodoc]] utils.FP8RecipeKwargs
@ -72,12 +74,10 @@ These are configurable arguments for specific interactions throughout the PyTorc
[[autodoc]] utils.InitProcessGroupKwargs
[[autodoc]] utils.KwargsHandler
## Plugins
These are plugins that can be passed to the [`Accelerator`] object. While they are defined elsewhere in the documentation,
for convenience all of them are available to see here:
for convience all of them are available to see here:
[[autodoc]] utils.DeepSpeedPlugin
@ -95,8 +95,6 @@ These are classes which can be configured and passed through to the appropriate
[[autodoc]] utils.BnbQuantizationConfig
[[autodoc]] utils.DataLoaderConfiguration
[[autodoc]] utils.ProjectConfiguration
## Environmental Variables
@ -126,10 +124,6 @@ These include data operations that mimic the same `torch` ops but can be used on
[[autodoc]] utils.gather_object
[[autodoc]] utils.get_grad_scaler
[[autodoc]] utils.get_mixed_precision_context_manager
[[autodoc]] utils.listify
[[autodoc]] utils.pad_across_processes
@ -156,7 +150,7 @@ These functionalities check the state of the current working environment includi
[[autodoc]] utils.is_torch_version
[[autodoc]] utils.is_torch_xla_available
[[autodoc]] utils.is_tpu_available
[[autodoc]] utils.is_xpu_available
@ -170,12 +164,6 @@ These functionalities check the state of the current working environment includi
When setting up 🤗 Accelerate for the first time, rather than running `accelerate config` [~utils.write_basic_config] can be used as an alternative for quick configuration.
[[autodoc]] utils.set_numa_affinity
[[autodoc]] utils.environment.override_numa_affinity
[[autodoc]] utils.purge_accelerate_environment
## Memory
[[autodoc]] utils.find_executable_batch_size
@ -208,7 +196,8 @@ These utilities relate to interacting with PyTorch models
[[autodoc]] utils.set_module_tensor_to_device
[[autodoc]] utils.get_module_children_bottom_up
[[autodoc]] utils.shard_checkpoint
## Parallel
@ -218,8 +207,6 @@ These include general utilities that should be used when working in parallel.
[[autodoc]] utils.save
[[autodoc]] utils.load
[[autodoc]] utils.wait_for_everyone

View File

@ -9,80 +9,26 @@ Unless required by applicable law or agreed to in writing, software distributed
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Quicktour
# Quick tour
There are many ways to launch and run your code depending on your training environment ([torchrun](https://pytorch.org/docs/stable/elastic/run.html), [DeepSpeed](https://www.deepspeed.ai/), etc.) and available hardware. Accelerate offers a unified interface for launching and training on different distributed setups, allowing you to focus on your PyTorch training code instead of the intricacies of adapting your code to these different setups. This allows you to easily scale your PyTorch code for training and inference on distributed setups with hardware like GPUs and TPUs. Accelerate also provides Big Model Inference to make loading and running inference with really large models that usually don't fit in memory more accessible.
This guide aims to help you get started with 🤗 Accelerate quickly. It covers the essential steps you need to take to
enable distributed training, as well as the adjustments that you need to make in some common scenarios.
This quicktour introduces the three main features of Accelerate:
To help you navigate, the guide is split into two sections:
* [Getting Started with 🤗 Accelerate](#getting-started-with--accelerate): start here to learn how to modify your script to enable distributed training with 🤗 Accelerate
* [Common adaptations to the base case](#common-adaptations-to-the-base-case): check out this section for common deviations from the baseline scenario and what adjustments may need to be made to support them.
* a unified command line launching interface for distributed training scripts
* a training library for adapting PyTorch training code to run on different distributed setups
* Big Model Inference
## Getting started with 🤗 Accelerate
## Unified launch interface
### Enable distributed training in your script
Accelerate automatically selects the appropriate configuration values for any given distributed training framework (DeepSpeed, FSDP, etc.) through a unified configuration file generated from the [`accelerate config`](package_reference/cli#accelerate-config) command. You could also pass the configuration values explicitly to the command line which is helpful in certain situations like if you're using SLURM.
To use 🤗 Accelerate in your own training script, you have to modify four things:
But in most cases, you should always run [`accelerate config`](package_reference/cli#accelerate-config) first to help Accelerate learn about your training setup.
```bash
accelerate config
```
The [`accelerate config`](package_reference/cli#accelerate-config) command creates and saves a default_config.yaml file in Accelerates cache folder. This file stores the configuration for your training environment, which helps Accelerate correctly launch your training script based on your machine.
After you've configured your environment, you can test your setup with [`accelerate test`](package_reference/cli#accelerate-test), which launches a short script to test the distributed environment.
```bash
accelerate test
```
> [!TIP]
> Add `--config_file` to the `accelerate test` or `accelerate launch` command to specify the location of the configuration file if it is saved in a non-default location like the cache.
Once your environment is setup, launch your training script with [`accelerate launch`](package_reference/cli#accelerate-launch)!
```bash
accelerate launch path_to_script.py --args_for_the_script
```
To learn more, check out the [Launch distributed code](basic_tutorials/launch) tutorial for more information about launching your scripts.
We also have a [configuration zoo](https://github.com/huggingface/accelerate/blob/main/examples/config_yaml_templates) which showcases a number of premade **minimal** example configurations for a variety of setups you can run.
## Adapt training code
The next main feature of Accelerate is the [`Accelerator`] class which adapts your PyTorch code to run on different distributed setups.
You only need to add a few lines of code to your training script to enable it to run on multiple GPUs or TPUs.
```diff
+ from accelerate import Accelerator
+ accelerator = Accelerator()
+ device = accelerator.device
+ model, optimizer, training_dataloader, scheduler = accelerator.prepare(
+ model, optimizer, training_dataloader, scheduler
+ )
for batch in training_dataloader:
optimizer.zero_grad()
inputs, targets = batch
- inputs = inputs.to(device)
- targets = targets.to(device)
outputs = model(inputs)
loss = loss_function(outputs, targets)
+ accelerator.backward(loss)
optimizer.step()
scheduler.step()
```
1. Import and instantiate the [`Accelerator`] class at the beginning of your training script. The [`Accelerator`] class initializes everything necessary for distributed training, and it automatically detects your training environment (a single machine with a GPU, a machine with several GPUs, several machines with multiple GPUs or a TPU, etc.) based on how the code was launched.
1. Import the [`Accelerator`] main class and instantiate one in an `accelerator` object.
```python
from accelerate import Accelerator
@ -90,19 +36,27 @@ from accelerate import Accelerator
accelerator = Accelerator()
```
2. Remove calls like `.cuda()` on your model and input data. The [`Accelerator`] class automatically places these objects on the appropriate device for you.
Add this at the beginning of your training script as it will initialize everything necessary for distributed training.
You don't need to indicate the kind of environment you are in (a single machine with a GPU, a machine with several GPUs,
or several machines with multiple GPUs or a TPU), the library will detect this automatically.
> [!WARNING]
> This step is *optional* but it is considered best practice to allow Accelerate to handle device placement. You could also deactivate automatic device placement by passing `device_placement=False` when initializing the [`Accelerator`]. If you want to explicitly place objects on a device with `.to(device)`, make sure you use `accelerator.device` instead. For example, if you create an optimizer before placing a model on `accelerator.device`, training fails on a TPU.
2. Remove the `.to(device)` or `.cuda()` calls for your model and input data.
> [!WARNING]
> Accelerate does not use non-blocking transfers by default for its automatic device placement, which can result in potentially unwanted CUDA synchronizations. You can enable non-blocking transfers by passing a [`~utils.dataclasses.DataLoaderConfiguration`] with `non_blocking=True` set as the `dataloader_config` when initializing the [`Accelerator`]. As usual, non-blocking transfers will only work if the dataloader also has `pin_memory=True` set. Be wary that using non-blocking transfers from GPU to CPU may cause incorrect results if it results in CPU operations being performed on non-ready tensors.
The `accelerator` object will handle placing these objects on the right device for you.
If you choose to leave those `.to(device)` calls, make sure to use the device provided by the `accelerator` object: `accelerator.device`.
```py
device = accelerator.device
```
<Tip warning={true}>
3. Pass all relevant PyTorch objects for training (optimizer, model, dataloader(s), learning rate scheduler) to the [`~Accelerator.prepare`] method as soon as they're created. This method wraps the model in a container optimized for your distributed setup, uses Accelerates version of the optimizer and scheduler, and creates a sharded version of your dataloader for distribution across GPUs or TPUs.
You can fully deactivate the automatic device placement by passing along `device_placement=False` when
initializing the [`Accelerator`].
However, if you place your objects manually on the proper device, be careful to create your optimizer after putting your
model on `accelerator.device` or your training will fail on TPU.
</Tip>
3. Pass all PyTorch objects relevant to training (optimizer, model, dataloader(s), learning rate scheduler) to the
[`~Accelerator.prepare`] method as soon as these objects are created, before starting your actual
training loop:
```python
model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
@ -110,23 +64,55 @@ model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
)
```
4. Replace `loss.backward()` with [`~Accelerator.backward`] to use the correct `backward()` method for your training setup.
**Important notes**:
```py
accelerator.backward(loss)
```
* You should always pass the the learning rate scheduler to [`~Accelerator.prepare`], however if the scheduler should *not* be stepped at each optimization step, pass `step_with_optimizer=False` to the [`Accelerator`] init.
* While you can send your dataloader to [`~Accelerator.prepare`] on its own (and there are cases for doing so, such as distributed inference), it's best to send it to [`~Accelerator.prepare`] together with the model and optimizer.
* If you wish to run distributed evaluation, send your validation dataloader to [`~Accelerator.prepare`] as well. There are some nuances to distributed validation, check the [Distributed evaluation](#add-distributed-evaluation) section of the guide.
* Any instruction using your training dataloader length (for instance if you want to log the number of total training
steps) should go after the call to [`~Accelerator.prepare`].
Read [Accelerates internal mechanisms](concept_guides/internal_mechanism) guide to learn more details about how Accelerate adapts your code.
Passing `DataLoader` objects to the [`~Accelerator.prepare`] method ensures that your dataloader will be sharded across
all GPUs/TPU cores available so that each one sees a different portion of the training dataset. In other words, if there are 8 processes and a dataset of 64 items, each process will see 8 of these items per iteration. Also, the random states
of all processes will be synchronized at the beginning of each iteration through your dataloader, to make sure the data
is shuffled the same way (if you decided to use `shuffle=True` or any kind of random sampler).
### Distributed evaluation
<Tip>
To perform distributed evaluation, pass your validation dataloader to the [`~Accelerator.prepare`] method:
The actual batch size for your training will be the number of devices used multiplied by the batch size you set in
your script. For instance, training on 4 GPUs with a batch size of 16 set when creating the training dataloader will
train at an actual batch size of 64 (4 * 16).
If you want the batch size remain the same regardless of how many GPUs the script is run on, you can use the
option `split_batches=True` when creating and initializing [`Accelerator`].
Your training dataloader may change length when going through this method: if you run on X GPUs, it will have its
length divided by X (since your actual batch size will be multiplied by X), unless you set
`split_batches=True`.
</Tip>
4. Replace the `loss.backward()` line with `accelerator.backward(loss)`.
And you're all set! With all these changes, your script will run on your local machine as well as on multiple GPUs or a
TPU! You can either use your favorite tool to launch the distributed training, or you can use the 🤗 Accelerate
launcher.
### Add distributed evaluation
You can perform regular evaluation in your training script if you leave your validation dataloader out of the
[`~Accelerator.prepare`] method. In this case, you will need to put the input data on the
`accelerator.device` manually.
To perform distributed evaluation, send along your validation dataloader to the [`~Accelerator.prepare`]
method:
```python
validation_dataloader = accelerator.prepare(validation_dataloader)
```
Each device in your distributed setup only receives a part of the evaluation data, which means you should group your predictions together with the [`~Accelerator.gather_for_metrics`] method. This method requires all tensors to be the same size on each process, so if your tensors have different sizes on each process (for instance when dynamically padding to the maximum length in a batch), you should use the [`~Accelerator.pad_across_processes`] method to pad you tensor to the largest size across processes. Note that the tensors needs to be 1D and that we concatenate the tensors along the first dimension.
Same as with your training dataloader, each device will only see part of the evaluation data should you run your script
on multiple devices. This means you will need to group your predictions together which you can do with
the [`~Accelerator.gather_for_metrics`] method.
```python
for inputs, targets in validation_dataloader:
@ -137,53 +123,319 @@ for inputs, targets in validation_dataloader:
metric.add_batch(all_predictions, all_targets)
```
For more complex cases (e.g. 2D tensors, don't want to concatenate tensors, dict of 3D tensors), you can pass `use_gather_object=True` in `gather_for_metrics`. This will return the list of objects after gathering. Note that using it with GPU tensors is not well supported and inefficient.
<Tip warning={true}>
> [!TIP]
> Data at the end of a dataset may be duplicated so the batch can be equally divided among all workers. The [`~Accelerator.gather_for_metrics`] method automatically removes the duplicated data to calculate a more accurate metric.
Similar to the training dataloader, passing your validation dataloader through
[`~Accelerator.prepare`] may change it: if you run on X GPUs, it will have its length divided by X
(since your actual batch size will be multiplied by X), unless you set `split_batches=True`.
## Big Model Inference
</Tip>
Accelerate's Big Model Inference has two main features, [`~accelerate.init_empty_weights`] and [`~accelerate.load_checkpoint_and_dispatch`], to load large models for inference that typically don't fit into memory.
Some data at the end of the dataset may be duplicated so the batch can be divided equally among all workers. As a result,
metrics should be calculated through the [`~Accelerator.gather_for_metrics`] method to automatically remove the duplicated
data while gathering and provide a more accurate metric.
> [!TIP]
> Take a look at the [Handling big models for inference](concept_guides/big_model_inference) guide for a better understanding of how Big Model Inference works under the hood.
<Tip>
### Empty weights initialization
If for some reason you don't wish to have this automatically done, [`~Accelerator.gather`] can be used instead to gather
the data across all processes and this can manually be done instead.
The [`~accelerate.init_empty_weights`] context manager initializes models of any size by creating a *model skeleton* and moving and placing parameters each time they're created to PyTorch's [**meta**](https://pytorch.org/docs/main/meta.html) device. This way, not all weights are immediately loaded and only a small part of the model is loaded into memory at a time.
</Tip>
For example, loading an empty [Mixtral-8x7B](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1) model takes significantly less memory than fully loading the models and weights on the CPU.
```py
from accelerate import init_empty_weights
from transformers import AutoConfig, AutoModelForCausalLM
<Tip warning={true}>
config = AutoConfig.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1")
with init_empty_weights():
model = AutoModelForCausalLM.from_config(config)
The [`~Accelerator.gather`] and [`~Accelerator.gather_for_metrics`] methods require the tensors to be all the same size on each process. If
you have tensors of different sizes on each process (for instance when dynamically padding to the maximum length in
a batch), you should use the [`~Accelerator.pad_across_processes`] method to pad you tensor to the
biggest size across processes.
</Tip>
### Launch your distributed script
You can use the regular commands to launch your distributed training (like `torch.distributed.run` for
PyTorch) - they are fully compatible with 🤗 Accelerate.
Alternatively, 🤗 Accelerate provides a CLI tool that unifies all launchers, so you only have to remember one command. \
To use it, run a quick configuration setup first on your machine and answer the questions:
```bash
accelerate config
```
### Load and dispatch weights
At the end of the setup, a *default_config.yaml* file will be saved in your cache folder for 🤗 Accelerate. That cache
folder is (with decreasing order of priority):
The [`~accelerate.load_checkpoint_and_dispatch`] function loads full or sharded checkpoints into the empty model, and automatically distribute weights across all available devices.
- The content of your environment variable `HF_HOME` suffixed with *accelerate*.
- If it does not exist, the content of your environment variable `XDG_CACHE_HOME` suffixed with
*huggingface/accelerate*.
- If this does not exist either, the folder *~/.cache/huggingface/accelerate*.
The `device_map` parameter determines where to place each model layer, and specifying `"auto"` places them on the GPU first, then the CPU, and finally the hard drive as memory-mapped tensors if there's still not enough memory. Use the `no_split_module_classes` parameter to indicate which modules shouldn't be split across devices (typically those with a residual connection).
By specifying the `--config_file` flag you can specify an alternative location of the configuration file.
Once the configuration setup is complete, you can test your setup by running:
```py
from accelerate import load_checkpoint_and_dispatch
model_checkpoint = "your-local-model-folder"
model = load_checkpoint_and_dispatch(
model, checkpoint=model_checkpoint, device_map="auto", no_split_module_classes=['Block']
)
```bash
accelerate test
```
## Next steps
This will launch a short script that will test the distributed environment. If it runs without issues, you are ready for
the next step!
Now that you've been introduced to the main Accelerate features, your next steps could include:
Note that if you specified a location for the config file in the previous step, you need to pass it here as well:
* Check out the [tutorials](basic_tutorials/overview) for a gentle walkthrough of Accelerate. This is especially useful if you're new to distributed training and the library.
* Dive into the [guides](usage_guides/explore) to see how to use Accelerate for specific use-cases.
* Deepen your conceptual understanding of how Accelerate works internally by reading the [concept guides](concept_guides/internal_mechanism).
* Look up classes and commands in the [API reference](package_reference/accelerator) to see what parameters and options are available.
```bash
accelerate test --config_file path_to_config.yaml
```
Now that this is done, you can run your script with the following command:
```bash
accelerate launch path_to_script.py --args_for_the_script
```
If you stored the config file in a non-default location, you can indicate it to the launcher like this:
```bash
accelerate launch --config_file path_to_config.yaml path_to_script.py --args_for_the_script
```
You can override any of the arguments determined by your config file. To see the complete list of parameters that you
can pass in, run `accelerate launch -h`. (And further niche argument help by passing in partial commands, such as `accelerate launch --multi_gpu -h` for all `multi_gpu` args)
Check out the [Launch tutorial](basic_tutorials/launch) for more information about launching your scripts.
## Common modifications of the base case
The previous section covers the minimal essential steps to move a training script into a distributed setup with 🤗 Accelerate.
Here we describe common modifications/deviations from the base case scenario and the adjustments you need to make to accommodate for them.
### Launch distributed training from a notebook
Accelerate has a [`notebook_launcher`] to help you launch your training function from a
notebook. This launcher supports launching a training with TPUs on Colab or Kaggle, as well as training on several GPUs and machines
(if the machine on which you are running your notebook has them).
Define a function responsible for your whole training and/or evaluation in a cell of the notebook, then execute a
cell with the following code:
```python
from accelerate import notebook_launcher
notebook_launcher(training_function)
```
<Tip warning={true}>
Your [`Accelerator`] object should only be defined inside the training function. This is because the
initialization should be done inside the launcher only.
</Tip>
Check out the [Notebook Launcher tutorial](basic_tutorials/notebook) for more information about training on TPUs.
### Specifics of training on TPU
If you want to launch your script on TPUs, there are a few caveats you should be aware of. Behind the scenes, the TPUs
will create a graph of all the operations happening in your training step (forward pass, backward pass and optimizer
step). This is why your first step of training will always be very long as building and compiling this graph for
optimizations takes some time.
The good news is that this compilation will be cached so the second step and all the following will be much faster. The
bad news is that it only applies if all of your steps do exactly the same operations, which implies:
- having all tensors of the same length in all your batches
- having static code (i.e., not a for loop of length that could change from step to step)
Having any of the things above change between two steps will trigger a new compilation which will, once again, take a
lot of time. In practice, that means you must take special care to have all your tensors in your inputs of the same
shape (so no dynamic padding for instance if you are in an NLP problem) and should not use layers with for loops that
have different lengths depending on the inputs (such as an LSTM) or the training will be excruciatingly slow.
To introduce special behavior in your script for TPUs you can check the `distributed_type` of your
`accelerator`:
```python docstyle-ignore
from accelerate import DistributedType
if accelerator.distributed_type == DistributedType.TPU:
# do something of static shape
else:
# go crazy and be dynamic
```
The [NLP example](https://github.com/huggingface/accelerate/blob/main/examples/nlp_example.py) shows an example in a
situation with dynamic padding.
One last thing to pay close attention to: if your model has tied weights (such as language models which tie the weights
of the embedding matrix with the weights of the decoder), moving this model to the TPU (either yourself or after you
passed your model to [`~Accelerator.prepare`]) will break the tying. You will need to retie the weights
after. You can find an example of this in the [run_clm_no_trainer](https://github.com/huggingface/transformers/blob/master/examples/pytorch/language-modeling/run_clm.py) script in
the Transformers repository.
Check out the [TPU tutorial](concept_guides/training_tpu) for more information about training on TPUs.
### Execute a statement only on one processes
Some of your instructions only need to run for one process on a given server: for instance a data download or a log
statement. To do this, wrap the statement in a test like this:
```python docstyle-ignore
if accelerator.is_local_main_process:
# Is executed once per server
```
Another example is progress bars: to avoid having multiple progress bars in your output, you should only display one on
the local main process:
```python
from tqdm.auto import tqdm
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
```
The *local* means per machine: if you are running your training on two servers with several GPUs, the instruction will
be executed once on each of those servers. If you need to execute something only once for all processes (and not per
machine) for instance, uploading the final model to the 🤗 model hub, wrap it in a test like this:
```python docstyle-ignore
if accelerator.is_main_process:
# Is executed once only
```
For printing statements you only want executed once per machine, you can just replace the `print` function by
`accelerator.print`.
### Defer execution on multiple GPUs
When you run your usual script, instructions are executed in order. Using 🤗 Accelerate to deploy your script on several
GPUs at the same time introduces a complication: while each process executes all instructions in order, some may be
faster than others.
You might need to wait for all processes to have reached a certain point before executing a given instruction. For
instance, you shouldn't save a model before making sure every process is done with training. To do this, add the
following line in your code:
```
accelerator.wait_for_everyone()
```
This instruction will block all the processes that arrive first until all the other processes have reached that
point (if you run your script on just one GPU or CPU, this won't do anything).
### Save/load a model in a distributed setup
Saving the model you trained might need a bit of adjustment: first you should wait for all processes to reach that
point in the script as shown above, and then, you should unwrap your model before saving it. This is because when going
through the [`~Accelerator.prepare`] method, your model may have been placed inside a bigger model,
which deals with the distributed training. This in turn means that saving your model state dictionary without taking
any precaution will take that potential extra layer into account, and you will end up with weights you can't load back
in your base model. The [`~Accelerator.save_model`] method will help you to achieve that. It will unwrap your model and save
the model state dictionary.
Here is an example:
```
accelerator.wait_for_everyone()
accelerator.save_model(model, save_directory)
```
The [`~Accelerator.save_model`] method can also save a model into sharded checkpoints or with safetensors format:
```python
accelerator.wait_for_everyone()
accelerator.save_model(model, save_directory, max_shard_size="1GB", safe_serialization=True)
```
If your script contains logic to load a checkpoint, we also recommend you load your weights in the unwrapped model
(this is only useful if you use the load function after making your model go through
[`~Accelerator.prepare`]). Here is an example:
```python
unwrapped_model = accelerator.unwrap_model(model)
path_to_checkpoint = os.path.join(save_directory,"pytorch_model.bin")
unwrapped_model.load_state_dict(torch.load(path_to_checkpoint))
```
Note that since all the model parameters are references to tensors, this will load your weights inside `model`.
If you want to load a sharded checkpoint or a checkpoint with safetensors format into the model with a specific `device`,
we recommend you to load it with [`~utils.load_checkpoint_in_model`] function. Here's an example:
```python
load_checkpoint_in_model(unwrapped_model, save_directory, device_map={"":device})
```
### Save/load entire states
When training your model, you may want to save the current state of the model, optimizer, random generators, and potentially
learning rate schedulers to be restored in the _same script_.
You can use [`~Accelerator.save_state`] and [`~Accelerator.load_state`] respectively to do so.
To further customize where and how states saved through [`~Accelerator.save_state`] the [`~utils.ProjectConfiguration`] class can be used. For example
if `automatic_checkpoint_naming` is enabled each saved checkpoint will be located then at `Accelerator.project_dir/checkpoints/checkpoint_{checkpoint_number}`.
If you have registered any other stateful items to be stored through [`~Accelerator.register_for_checkpointing`] they will also be saved and/or loaded.
<Tip>
Every object passed to [`~Accelerator.register_for_checkpointing`] must have a `load_state_dict` and `state_dict` function to be stored
</Tip>
### Use gradient clipping
If you are using gradient clipping in your script, you should replace the calls to
`torch.nn.utils.clip_grad_norm_` or `torch.nn.utils.clip_grad_value_` with [`~Accelerator.clip_grad_norm_`]
and [`~Accelerator.clip_grad_value_`] respectively.
### Train with mixed precision
If you are running your training in Mixed Precision with 🤗 Accelerate, you will get the best result with your loss being
computed inside your model (like in Transformer models for instance). Every computation outside of the model will be
executed in full precision (which is generally what you want for loss computation, especially if it involves a
softmax). However, you might want to put your loss computation inside the [`~Accelerator.autocast`] context manager:
```
with accelerator.autocast():
loss = complex_loss_function(outputs, target):
```
Another caveat with Mixed Precision training is that the gradient will skip a few updates at the beginning and
sometimes during training: because of the dynamic loss scaling strategy, there are points during training where the
gradients have overflown, and the loss scaling factor is reduced to avoid this happening again at the next step.
This means that you may update your learning rate scheduler when there was no update, which is fine in general, but may
have an impact when you have very little training data, or if the first learning rate values of your scheduler are very
important. In this case, you can skip the learning rate scheduler updates when the optimizer step was not done like
this:
```
if not accelerator.optimizer_step_was_skipped:
lr_scheduler.step()
```
### Use gradient accumulation
To perform gradient accumulation use [`~Accelerator.accumulate`] and specify a `gradient_accumulation_steps`.
This will also automatically ensure the gradients are synced or unsynced when on multi-device training, check if the step should
actually be performed, and auto-scale the loss:
```python
accelerator = Accelerator(gradient_accumulation_steps=2)
model, optimizer, training_dataloader = accelerator.prepare(model, optimizer, training_dataloader)
for input, label in training_dataloader:
with accelerator.accumulate(model):
predictions = model(input)
loss = loss_function(predictions, label)
accelerator.backward(loss)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
```

View File

@ -13,15 +13,15 @@ specific language governing permissions and limitations under the License.
rendered properly in your Markdown viewer.
-->
# Big Model Inference
# Handling big models for inference
One of the biggest advancements Accelerate provides is [Big Model Inference](../concept_guides/big_model_inference), which allows you to perform inference with models that don't fully fit on your graphics card.
One of the biggest advancements 🤗 Accelerate provides is the concept of [large model inference](../concept_guides/big_model_inference) wherein you can perform *inference* on models that cannot fully fit on your graphics card.
This tutorial will show you how to use Big Model Inference in Accelerate and the Hugging Face ecosystem.
This tutorial will be broken down into two parts showcasing how to use both 🤗 Accelerate and 🤗 Transformers (a higher API-level) to make use of this idea.
## Accelerate
## Using 🤗 Accelerate
A typical workflow for loading a PyTorch model is shown below. `ModelClass` is a model that exceeds the GPU memory of your device (mps or cuda or xpu).
For these tutorials, we'll assume a typical workflow for loading your model in such that:
```py
import torch
@ -31,7 +31,9 @@ state_dict = torch.load(checkpoint_file)
my_model.load_state_dict(state_dict)
```
With Big Model Inference, the first step is to init an empty skeleton of the model with the `init_empty_weights` context manager. This doesn't require any memory because `my_model` is "parameterless".
Note that here we assume that `ModelClass` is a model that takes up more video-card memory than what can fit on your device (be it `mps` or `cuda`).
The first step is to init an empty skeleton of the model which won't take up any RAM using the [`init_empty_weights`] context manager:
```py
from accelerate import init_empty_weights
@ -39,14 +41,22 @@ with init_empty_weights():
my_model = ModelClass(...)
```
Next, the weights are loaded into the model for inference.
With this `my_model` currently is "parameterless", hence leaving the smaller footprint than what one would normally get loading this onto the CPU directly.
The [`load_checkpoint_and_dispatch`] method loads a checkpoint inside your empty model and dispatches the weights for each layer across all available devices, starting with the fastest devices (GPU, MPS, XPU, NPU, MLU, SDAA, MUSA) first before moving to the slower ones (CPU and hard drive).
Next we need to load in the weights to our model so we can perform inference.
Setting `device_map="auto"` automatically fills all available space on the GPU(s) first, then the CPU, and finally, the hard drive (the absolute slowest option) if there is still not enough memory.
For this we will use [`load_checkpoint_and_dispatch`], which as the name implies will load a checkpoint inside your empty model and dispatch the weights for each layer across all the devices you have available (GPU/MPS and CPU RAM).
> [!TIP]
> Refer to the [Designing a device map](../concept_guides/big_model_inference#designing-a-device-map) guide for more details on how to design your own device map.
To determine how this `dispatch` can be performed, generally specifying `device_map="auto"` will be good enough as 🤗 Accelerate
will attempt to fill all the space in your GPU(s), then loading them to the CPU, and finally if there is not enough RAM it will be loaded to the disk (the absolute slowest option).
<Tip>
For more details on desigining your own device map, see this section of the [concept guide](../concept_guide/big_model_inference#designing-a-device-map)
</Tip>
See an example below:
```py
from accelerate import load_checkpoint_and_dispatch
@ -56,29 +66,42 @@ model = load_checkpoint_and_dispatch(
)
```
If there are certain “chunks” of layers that shouldnt be split, pass them to `no_split_module_classes` (see [here](../concept_guides/big_model_inference#loading-weights) for more details).
<Tip>
A models weights can also be sharded into multiple checkpoints to save memory, such as when the `state_dict` doesn't fit in memory (see [here](../concept_guides/big_model_inference#sharded-checkpoints) for more details).
If there are certain "chunks" of layers that shouldn't be split, you can pass them in as `no_split_module_classes`. Read more about it [here](../concept_guides/big_model_inference#loading-weights)
Now that the model is fully dispatched, you can perform inference.
</Tip>
<Tip>
Also to save on memory (such as if the `state_dict` will not fit in RAM), a model's weights can be divided and split into multiple checkpoint files. Read more about it [here](../concept_guides/big_model_inference#sharded-checkpoints)
</Tip>
Now that the model is dispatched fully, you can perform inference as normal with the model:
```py
input = torch.randn(2,3)
device_type = next(iter(model.parameters())).device.type
input = input.to(device_type)
input = input.to("cuda")
output = model(input)
```
Each time an input is passed through a layer, it is sent from the CPU to the GPU (or disk to CPU to GPU), the output is calculated, and the layer is removed from the GPU going back down the line. While this adds some overhead to inference, it enables you to run any size model on your system, as long as the largest layer fits on your GPU.
What will happen now is each time the input gets passed through a layer, it will be sent from the CPU to the GPU (or disk to CPU to GPU), the output is calculated, and then the layer is pulled back off the GPU going back down the line. While this adds some overhead to the inference being performed, through this method it is possible to run **any size model** on your system, as long as the largest layer is capable of fitting on your GPU.
Multiple GPUs, or "model parallelism", can be utilized but only one GPU will be active at any given moment. This forces the GPU to wait for the previous GPU to send it the output. You should launch your script normally with Python instead of other tools like torchrun and accelerate launch.
<Tip>
> [!TIP]
> You may also be interested in *pipeline parallelism* which utilizes all available GPUs at once, instead of only having one GPU active at a time. This approach is less flexbile though. For more details, refer to the [Memory-efficient pipeline parallelism](./distributed_inference#memory-efficient-pipeline-parallelism-experimental) guide.
Multiple GPUs can be utilized, however this is considered "model parallism" and as a result only one GPU will be active at a given moment, waiting for the prior one to send it the output. You should launch your script normally with `python`
and not need `torchrun`, `accelerate launch`, etc.
<Youtube id="MWCSGj9jEAo"/>
</Tip>
Take a look at a full example of Big Model Inference below.
For a visual representation of this, check out the animation below:
<Youtube id="MWCSGj9jEAo" />
### Complete Example
Below is the full example showcasing what we performed above:
```py
import torch
@ -92,18 +115,17 @@ model = load_checkpoint_and_dispatch(
)
input = torch.randn(2,3)
device_type = next(iter(model.parameters())).device.type
input = input.to(device_type)
input = input.to("cuda")
output = model(input)
```
## Hugging Face ecosystem
## Using 🤗 Transformers, 🤗 Diffusers, and other 🤗 Open Source Libraries
Other libraries in the Hugging Face ecosystem, like Transformers or Diffusers, supports Big Model Inference in their [`~transformers.PreTrainedModel.from_pretrained`] constructors.
Libraries that support 🤗 Accelerate big model inference include all of the earlier logic in their `from_pretrained` constructors.
You just need to add `device_map="auto"` in [`~transformers.PreTrainedModel.from_pretrained`] to enable Big Model Inference.
These operate by specifying a string representing the model to download from the [🤗 Hub](https://hf.co/models) and then denoting `device_map="auto"` along with a few extra parameters.
For example, load Big Sciences T0pp 11 billion parameter model with Big Model Inference.
As a brief example, we will look at using `transformers` and loading in Big Science's T0pp model.
```py
from transformers import AutoModelForSeq2SeqLM
@ -111,7 +133,9 @@ from transformers import AutoModelForSeq2SeqLM
model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0pp", device_map="auto")
```
After loading the model, the empty init and smart dispatch steps from before are executed and the model is fully ready to make use of all the resources in your machine. Through these constructors, you can also save more memory by specifying the `torch_dtype` parameter to load a model in a lower precision.
After loading the model in, the initial steps from before to prepare a model have all been done and the model is fully
ready to make use of all the resources in your machine. Through these constructors, you can also save *more* memory by
specifying the precision the model is loaded into as well, through the `torch_dtype` parameter, such as:
```py
from transformers import AutoModelForSeq2SeqLM
@ -119,6 +143,8 @@ from transformers import AutoModelForSeq2SeqLM
model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0pp", device_map="auto", torch_dtype=torch.float16)
```
## Next steps
To learn more about this, check out the 🤗 Transformers documentation available [here](https://huggingface.co/docs/transformers/main/en/main_classes/model#large-model-loading).
For a more detailed explanation of Big Model Inference, make sure to check out the [conceptual guide](../concept_guides/big_model_inference)!
## Where to go from here
For a much more detailed look at big model inference, be sure to check out the [Conceptual Guide on it](../concept_guides/big_model_inference)

View File

@ -15,8 +15,8 @@ rendered properly in your Markdown viewer.
# Checkpointing
When training a PyTorch model with Accelerate, you may often want to save and continue a state of training. Doing so requires
saving and loading the model, optimizer, RNG generators, and the GradScaler. Inside Accelerate are two convenience functions to achieve this quickly:
When training a PyTorch model with 🤗 Accelerate, you may often want to save and continue a state of training. Doing so requires
saving and loading the model, optimizer, RNG generators, and the GradScaler. Inside 🤗 Accelerate are two convenience functions to achieve this quickly:
- Use [`~Accelerator.save_state`] for saving everything mentioned above to a folder location
- Use [`~Accelerator.load_state`] for loading everything stored from an earlier `save_state`

View File

@ -1,76 +0,0 @@
# Compilation
## Overview
Pytorch 2.0 introduced `torch.compile`, a powerful feature that makes PyTorch code run faster by JIT-compiling PyTorch code into optimized kernels. Key features of `torch.compile` include:
- **Performance Improvement**: Significantly speeds up model execution by optimizing the computation graph.
- **Ease of Use**: Requires minimal code changes to implement, making it highly accessible.
- **Compatibility**: Works seamlessly with existing PyTorch code and models.
When used with Accelerate, `torch.compile` integrates smoothly into distributed training workflows, allowing you to benefit from both distributed execution and compilation optimizations simultaneously.
The first execution of compiled code typically takes longer as it includes the compilation time, but subsequent runs are significantly faster. For optimal performance in different scenarios, `torch.compile` offers various modes like `"default"`, `"reduce-overhead"` (which uses CUDA graphs to further reduce overhead), and `"max-autotune"` (which performs extensive autotuning to find the best kernels for your model).
## Using `torch.compile` with Accelerate
Accelerate provides `TorchDynamoPlugin` for easy and seemless integration of `torch.compile` into your training scripts.
```python
from accelerate import Accelerator
from accelerate.utils import TorchDynamoPlugin
# Configure the compilation backend
dynamo_plugin = TorchDynamoPlugin(
backend="inductor", # Options: "inductor", "aot_eager", "aot_nvfuser", etc.
mode="default", # Options: "default", "reduce-overhead", "max-autotune"
fullgraph=True,
dynamic=False
)
# Initialize accelerator with the plugin
accelerator = Accelerator(dynamo_plugin=dynamo_plugin)
# This will apply torch.compile to your model
model = accelerator.prepare(model)
```
It is compatible with all other features and plugins of Accelerate, including mixed precision, distributed training (DDP, FSDP, Deepspeed), etc.
## Regional Compilation
Instead of trying to compile the whole model, which usually has a big problem space for optimization. Regional compilation targets repeated blocks of the same class and compiles them sequentially to hit the compiler's cache. For example, in `GPT2LMHeadModel`, the repeated block/class is `GPT2Block`, and can be accessed as `model.transformer.h[0]`. The rest of the model (e.g model.lm_head) is compiled separately.
This allows us to speed up the compilation overhead / cold start of models like LLMs and Transformers in general.
See <https://pytorch.org/tutorials/recipes/regional_compilation.html> for more details.
### How to Use Regional Compilation
It can be enabled by setting `use_regional_compilation=True` in the `TorchDynamoPlugin` configuration:
```python
# Configure the compilation backend
dynamo_plugin = TorchDynamoPlugin(
use_regional_compilation=True,
... # other parameters
)
# Initialize accelerator with the plugin
accelerator = Accelerator(dynamo_plugin=dynamo_plugin)
# This will apply compile_regions to your model
model = accelerator.prepare(model)
```
You could also use the `accelerate.utils.compile_regions` utility directly the same way you would use `torch.compile`.
### Benefits of Regional Compilation
We have conducted extensive benchmarks comparing full compilation and regional compilation using the `torch.compile` feature in PyTorch. The full results are available in the [accelerate repository](https://github.com/huggingface/accelerate/tree/main/benchmarks/torch.compile/regional_compilation). The key findings from our benchmarks are:
1. **Comparable Performance**: Regional compilation delivers performance speedups similar to full compilation, especially for larger models.
2. **Faster Compilation**: Regional compilation significantly reduces the time taken to compile models, making it a more efficient choice for deployment.
3. **Batch Size Impact**: The performance difference between compilation strategies diminishes with larger batch sizes, indicating that the overhead of compilation is less impactful in those scenarios.
4. **Model Size Consideration**: The benefits of regional compilation are more pronounced in larger models, where the compilation time savings can be substantial.
5. **Practical Application**: For real-world applications, regional compilation is a practical choice for optimizing training cold start times, especially when working with large models.
## Conclusion
Both full and regional compilation can significantly speed up your models. Regional compilation offers a practical balance between compilation time and runtime performance, especially for training large models with substantial batch sizes.

View File

@ -1,337 +0,0 @@
<!--
Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# DDP Communication Hooks
Distributed Data Parallel (DDP) communication hooks provide a generic interface to control how gradients are communicated across workers by overriding the vanilla allreduce in `DistributedDataParallel`. A few built-in communication hooks are provided, and users can easily apply any of these hooks to optimize communication.
- **FP16 Compression Hook**: Compresses gradients by casting them to half-precision floating-point format (`torch.float16`), reducing communication overhead.
- **BF16 Compression Hook**: Similar to FP16, but uses the Brain Floating Point format (`torch.bfloat16`), which can be more efficient on certain hardware.
- **PowerSGD Hook**: An advanced gradient compression algorithm that provides high compression rates and can accelerate bandwidth-bound distributed training.
In this tutorial, you will see how to quickly set up DDP communication hooks and perform training with the utilities provided in Accelerate, which can be as simple as adding just one new line of code! This demonstrates how to use DDP communication hooks to optimize gradient communication in distributed training with the Accelerate library.
## FP16 Compression Hook
<hfoptions id="fp16">
<hfoption id="PyTorch">
```python
import torch
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.distributed.algorithms.ddp_comm_hooks import default_hooks
from accelerate.test_utils.testing import get_backend
device_type, _, _ = get_backend()
device_id = getattr(torch, device_type, torch.cuda).current_device()
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer = torch.nn.Linear(10, 10)
def forward(self, x):
return self.layer(x)
model = MyModel()
model = DDP(model, device_ids=[device_id])
model.register_comm_hook(state=None, hook=default_hooks.fp16_compress_hook)
# Training loop
for data, targets in data_loader:
outputs = model(data)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
optimizer.zero_grad()
```
</hfoption>
<hfoption id="Accelerate">
```python
from accelerate import Accelerator, DDPCommunicationHookType, DistributedDataParallelKwargs
import torch
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer = torch.nn.Linear(10, 10)
def forward(self, x):
return self.layer(x)
# DDP Communication Hook setup
ddp_kwargs = DistributedDataParallelKwargs(comm_hook=DDPCommunicationHookType.FP16)
accelerator = Accelerator(kwargs_handlers=[ddp_kwargs])
model = MyModel()
optimizer = torch.optim.Adam(model.parameters())
data_loader = DataLoader(dataset, batch_size=16)
model, optimizer, data_loader = accelerator.prepare(model, optimizer, data_loader)
# Training loop
for data, targets in data_loader:
outputs = model(data)
loss = criterion(outputs, targets)
accelerator.backward(loss)
optimizer.step()
optimizer.zero_grad()
```
</hfoption>
</hfoptions>
### BF16 Compression Hook
<Tip warning={true}>
BF16 Compression Hook API is experimental, and it requires NCCL version later than 2.9.6.
</Tip>
<hfoptions id="bf16">
<hfoption id="PyTorch">
```python
import torch
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.distributed.algorithms.ddp_comm_hooks import default_hooks
from accelerate.test_utils.testing import get_backend
device_type, _, _ = get_backend()
device_id = getattr(torch, device_type, torch.cuda).current_device()
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer = torch.nn.Linear(10, 10)
def forward(self, x):
return self.layer(x)
model = MyModel()
model = DDP(model, device_ids=[device_id])
model.register_comm_hook(state=None, hook=default_hooks.bf16_compress_hook)
# Training loop
for data, targets in data_loader:
outputs = model(data)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
optimizer.zero_grad()
```
</hfoption>
<hfoption id="Accelerate">
```python
from accelerate import Accelerator, DDPCommunicationHookType, DistributedDataParallelKwargs
import torch
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer = torch.nn.Linear(10, 10)
def forward(self, x):
return self.layer(x)
# DDP Communication Hook setup
ddp_kwargs = DistributedDataParallelKwargs(comm_hook=DDPCommunicationHookType.BF16)
accelerator = Accelerator(kwargs_handlers=[ddp_kwargs])
model = MyModel()
optimizer = torch.optim.Adam(model.parameters())
data_loader = DataLoader(dataset, batch_size=16)
model, optimizer, data_loader = accelerator.prepare(model, optimizer, data_loader)
# Training loop
for data, targets in data_loader:
outputs = model(data)
loss = criterion(outputs, targets)
accelerator.backward(loss)
optimizer.step()
optimizer.zero_grad()
```
</hfoption>
</hfoptions>
### PowerSGD Hook
<Tip warning={true}>
PowerSGD typically requires extra memory of the same size as the models gradients to enable error feedback, which can compensate for biased compressed communication and improve accuracy.
</Tip>
<hfoptions id="powerSGD">
<hfoption id="PyTorch">
```python
import torch
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.distributed.algorithms.ddp_comm_hooks import powerSGD_hook
from accelerate.test_utils.testing import get_backend
device_type, _, _ = get_backend()
device_id = getattr(torch, device_type, torch.cuda).current_device()
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer = torch.nn.Linear(10, 10)
def forward(self, x):
return self.layer(x)
model = MyModel()
model = DDP(model, device_ids=[device_id])
state = powerSGD_hook.PowerSGDState(process_group=None)
model.register_comm_hook(state=state, hook=powerSGD_hook.powerSGD_hook)
# Training loop
for data, targets in data_loader:
outputs = model(data)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
optimizer.zero_grad()
```
</hfoption>
<hfoption id="Accelerate">
```python
from accelerate import Accelerator, DDPCommunicationHookType, DistributedDataParallelKwargs
import torch
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer = torch.nn.Linear(10, 10)
def forward(self, x):
return self.layer(x)
# DDP Communication Hook setup
ddp_kwargs = DistributedDataParallelKwargs(comm_hook=DDPCommunicationHookType.POWER_SGD)
accelerator = Accelerator(kwargs_handlers=[ddp_kwargs])
model = MyModel()
optimizer = torch.optim.Adam(model.parameters())
data_loader = DataLoader(dataset, batch_size=16)
model, optimizer, data_loader = accelerator.prepare(model, optimizer, data_loader)
# Training loop
for data, targets in data_loader:
outputs = model(data)
loss = criterion(outputs, targets)
accelerator.backward(loss)
optimizer.step()
optimizer.zero_grad()
```
</hfoption>
</hfoptions>
## DDP Communication Hooks utilities
There are two additional utilities for supporting optional functionalities with the communication hooks.
### comm_wrapper
`comm_wrapper` is an option to wrap a communication hook with additional functionality. For example, it can be used to combine FP16 compression with other communication strategies. Currently supported wrappers are `no`, `fp16`, and `bf16`.
```python
from accelerate import Accelerator, DDPCommunicationHookType, DistributedDataParallelKwargs
import torch
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer = torch.nn.Linear(10, 10)
def forward(self, x):
return self.layer(x)
# DDP Communication Hook setup
ddp_kwargs = DistributedDataParallelKwargs(
comm_hook=DDPCommunicationHookType.POWER_SGD,
comm_wrapper=DDPCommunicationHookType.FP16
)
accelerator = Accelerator(kwargs_handlers=[ddp_kwargs])
model = MyModel()
optimizer = torch.optim.Adam(model.parameters())
data_loader = DataLoader(dataset, batch_size=16)
model, optimizer, data_loader = accelerator.prepare(model, optimizer, data_loader)
# Training loop
for data, targets in data_loader:
outputs = model(data)
loss = criterion(outputs, targets)
accelerator.backward(loss)
optimizer.step()
optimizer.zero_grad()
```
### comm_state_option
`comm_state_option` allows you to pass additional state information required by certain communication hooks. This is particularly useful for stateful hooks like `PowerSGD`, which require maintaining hyperparameters and internal states across training steps. Below is an example showcasing the use of `comm_state_option` with the `PowerSGD` hook.
```python
from accelerate import Accelerator, DDPCommunicationHookType, DistributedDataParallelKwargs
import torch
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer = torch.nn.Linear(10, 10)
def forward(self, x):
return self.layer(x)
# DDP Communication Hook setup
ddp_kwargs = DistributedDataParallelKwargs(
comm_hook=DDPCommunicationHookType.POWER_SGD,
comm_state_option={"matrix_approximation_rank": 2}
)
accelerator = Accelerator(kwargs_handlers=[ddp_kwargs])
model = MyModel()
optimizer = torch.optim.Adam(model.parameters())
data_loader = DataLoader(dataset, batch_size=16)
model, optimizer, data_loader = accelerator.prepare(model, optimizer, data_loader)
# Training loop
for data, targets in data_loader:
outputs = model(data)
loss = criterion(outputs, targets)
accelerator.backward(loss)
optimizer.step()
optimizer.zero_grad()
```
For more advanced usage and additional hooks, refer to the [PyTorch DDP Communication Hooks documentation](https://pytorch.org/docs/stable/ddp_comm_hooks.html).

Some files were not shown because too many files have changed in this diff Show More