Compare commits

..

11 Commits

173 changed files with 2963 additions and 9228 deletions

View File

@ -37,7 +37,7 @@ jobs:
with:
file: docker/accelerate-cpu/Dockerfile
push: true
tags: huggingface/accelerate:cpu-release-${{ needs.get-version.outputs.version }}
tags: huggingface/accelerate-cpu:${{needs.get-version.outputs.version}}
version-cuda:
name: "Latest Accelerate GPU [version]"
@ -57,25 +57,4 @@ jobs:
with:
file: docker/accelerate-gpu/Dockerfile
push: true
tags: huggingface/accelerate:gpu-release-${{needs.get-version.outputs.version}}
version-cuda-deepspeed:
name: "Latest Accelerate GPU DeepSpeed [version]"
runs-on: [self-hosted, single-gpu, nvidia-gpu, t4, ci]
needs: get-version
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to DockerHub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_PASSWORD }}
- name: Build and Push GPU
uses: docker/build-push-action@v4
with:
file: docker/accelerate-gpu-deepspeed/Dockerfile
push: true
tags: huggingface/accelerate:gpu-deepspeed-release-${{needs.get-version.outputs.version}}
tags: huggingface/accelerate-gpu:${{needs.get-version.outputs.version}}

View File

@ -22,18 +22,12 @@ jobs:
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_PASSWORD }}
- name: Get current date
id: date
run: |
echo "date=$(date '+%Y-%m-%d')" >> $GITHUB_ENV
- name: Build and Push CPU
uses: docker/build-push-action@v4
with:
file: docker/accelerate-cpu/Dockerfile
push: true
tags: |
huggingface/accelerate:cpu-nightly
huggingface/accelerate:cpu-nightly-${{ env.date }}
tags: huggingface/accelerate-cpu
latest-cuda:
name: "Latest Accelerate GPU [dev]"
@ -46,40 +40,10 @@ jobs:
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_PASSWORD }}
- name: Get current date
id: date
run: |
echo "date=$(date '+%Y-%m-%d')" >> $GITHUB_ENV
- name: Build and Push GPU
uses: docker/build-push-action@v4
with:
file: docker/accelerate-gpu/Dockerfile
push: true
tags: |
huggingface/accelerate:gpu-nightly
huggingface/accelerate:gpu-nightly-${{ env.date }}
latest-cuda-deepspeed:
name: "Latest Accelerate GPU DeepSpeed [dev]"
runs-on: [self-hosted, nvidia-gpu, t4, ci]
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to DockerHub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_PASSWORD }}
- name: Get current date
id: date
run: |
echo "date=$(date '+%Y-%m-%d')" >> $GITHUB_ENV
- name: Build and Push GPU
uses: docker/build-push-action@v4
with:
file: docker/accelerate-gpu-deepspeed/Dockerfile
push: true
tags: |
huggingface/accelerate:gpu-deepspeed-nightly
huggingface/accelerate:gpu-deepspeed-nightly-${{ env.date }}
tags: huggingface/accelerate-gpu

View File

@ -13,6 +13,5 @@ jobs:
with:
commit_sha: ${{ github.sha }}
package: accelerate
custom_container: huggingface/transformers-doc-builder
secrets:
hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}

View File

@ -14,4 +14,3 @@ jobs:
commit_sha: ${{ github.event.pull_request.head.sha }}
pr_number: ${{ github.event.number }}
package: accelerate
custom_container: huggingface/transformers-doc-builder

View File

@ -12,13 +12,13 @@ env:
jobs:
run_core_tests_single_gpu:
run_all_tests_single_gpu:
runs-on: [self-hosted, single-gpu, nvidia-gpu, t4, ci]
env:
CUDA_VISIBLE_DEVICES: "0"
TEST_TYPE: "single_gpu"
container:
image: huggingface/accelerate:gpu-nightly
image: huggingface/accelerate-gpu:latest
options: --gpus all --shm-size "16gb"
defaults:
run:
@ -33,197 +33,79 @@ jobs:
pip install -e . --no-deps
pip install pytest-reportlog tabulate
- name: Show installed libraries
run: |
source activate accelerate;
pip freeze
- name: Run test on GPUs
working-directory: accelerate
run: |
source activate accelerate
make test
# - name: Run examples on GPUs
# working-directory: accelerate
# if: always()
# run: |
# source activate accelerate
# pip uninstall comet_ml -y
# make test_examples
- name: Run examples on GPUs
working-directory: accelerate
if: always()
run: |
source activate accelerate
pip uninstall comet_ml -y
make test_examples
# - name: Generate Report
# working-directory: accelerate
# if: always()
# run: |
# pip install slack_sdk tabulate
# python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
- name: Generate Report
working-directory: accelerate
if: always()
run: |
pip install slack_sdk tabulate
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
# run_deepspeed_tests_single_gpu:
# runs-on: [self-hosted, single-gpu, nvidia-gpu, t4, ci]
# env:
# CUDA_VISIBLE_DEVICES: "0"
# TEST_TYPE: "single_gpu_deepspeed"
# container:
# image: huggingface/accelerate:gpu-deepspeed-nightly
# options: --gpus all --shm-size "16gb"
# defaults:
# run:
# shell: bash
# steps:
# - name: Update clone & pip install
# run: |
# source activate accelerate
# git clone https://github.com/huggingface/accelerate;
# cd accelerate;
# git checkout ${{ github.sha }};
# pip install -e . --no-deps
# pip install pytest-reportlog tabulate
run_all_tests_multi_gpu:
runs-on: [self-hosted, multi-gpu, nvidia-gpu, t4, ci]
env:
CUDA_VISIBLE_DEVICES: "0,1"
TEST_TYPE: "multi_gpu"
container:
image: huggingface/accelerate-gpu:latest
options: --gpus all --shm-size "16gb"
defaults:
run:
shell: bash
steps:
- name: Update clone
run: |
source activate accelerate
git clone https://github.com/huggingface/accelerate;
cd accelerate;
git checkout ${{ github.sha }};
pip install -e . --no-deps
pip install pytest-reportlog tabulate
# - name: Show installed libraries
# run: |
# source activate accelerate;
# pip freeze
- name: Run core and big modeling tests on GPUs
working-directory: accelerate
run: |
source activate accelerate
make test_core
make test_big_modeling
make test_cli
# - name: Run test on GPUs
# working-directory: accelerate
# run: |
# source activate accelerate
# make test_deepspeed
- name: Run Integration tests on GPUs
working-directory: accelerate
if: always()
run: |
source activate accelerate
make test_integrations
# - name: Run Integration tests on GPUs
# working-directory: accelerate
# if: always()
# run: |
# source activate accelerate
# make test_integrations
- name: Run examples on GPUs
working-directory: accelerate
if: always()
run: |
source activate accelerate
pip uninstall comet_ml -y
make test_examples
# - name: Run examples on GPUs
# working-directory: accelerate
# if: always()
# run: |
# source activate accelerate
# pip uninstall comet_ml -y
# make test_examples
# - name: Generate Report
# working-directory: accelerate
# if: always()
# run: |
# pip install slack_sdk tabulate
# python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
# run_core_tests_multi_gpu:
# runs-on: [self-hosted, multi-gpu, nvidia-gpu, t4, ci]
# env:
# CUDA_VISIBLE_DEVICES: "0,1"
# TEST_TYPE: "multi_gpu"
# container:
# image: huggingface/accelerate:gpu-nightly
# options: --gpus all --shm-size "16gb"
# defaults:
# run:
# shell: bash
# steps:
# - name: Update clone
# run: |
# source activate accelerate
# git clone https://github.com/huggingface/accelerate;
# cd accelerate;
# git checkout ${{ github.sha }};
# pip install -e . --no-deps
# pip install pytest-reportlog tabulate
# - name: Show installed libraries
# run: |
# source activate accelerate;
# pip freeze
# - name: Run core and big modeling tests on GPUs
# working-directory: accelerate
# run: |
# source activate accelerate
# make test_core
# make test_big_modeling
# make test_cli
# - name: Run Integration tests on GPUs
# working-directory: accelerate
# if: always()
# run: |
# source activate accelerate
# make test_integrations
# - name: Run examples on GPUs
# working-directory: accelerate
# if: always()
# run: |
# source activate accelerate
# pip uninstall comet_ml -y
# make test_examples
# - name: Generate Report
# working-directory: accelerate
# if: always()
# run: |
# pip install slack_sdk tabulate
# python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
# run_deepspeed_tests_multi_gpu:
# runs-on: [self-hosted, multi-gpu, nvidia-gpu, t4, ci]
# env:
# CUDA_VISIBLE_DEVICES: "0,1"
# TEST_TYPE: "multi_gpu_deepspeed"
# container:
# image: huggingface/accelerate:gpu-deepspeed-nightly
# options: --gpus all --shm-size "16gb"
# defaults:
# run:
# shell: bash
# steps:
# - name: Update clone
# run: |
# source activate accelerate
# git clone https://github.com/huggingface/accelerate;
# cd accelerate;
# git checkout ${{ github.sha }};
# pip install -e . --no-deps
# pip install pytest-reportlog tabulate
# - name: Show installed libraries
# run: |
# source activate accelerate;
# pip freeze
# - name: Run DeepSpeed tests
# working-directory: accelerate
# run: |
# source activate accelerate
# make test_deepspeed
# - name: Run Integration tests on GPUs
# working-directory: accelerate
# if: always()
# run: |
# source activate accelerate
# make test_integrations
# - name: Run examples on GPUs
# working-directory: accelerate
# if: always()
# run: |
# source activate accelerate
# pip uninstall comet_ml -y
# make test_examples
# - name: Generate Report
# working-directory: accelerate
# if: always()
# run: |
# pip install slack_sdk tabulate
# python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
- name: Generate Report
working-directory: accelerate
if: always()
run: |
pip install slack_sdk tabulate
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
# run-integration-tests:
# if: always()
# uses: ./.github/workflows/self_hosted_integration_tests.yml
run-integration-tests:
if: always()
uses: ./.github/workflows/self_hosted_integration_tests.yml

View File

@ -9,12 +9,12 @@ env:
IS_GITHUB_CI: "1"
jobs:
run_core_tests_single_gpu:
run_all_tests_single_gpu:
runs-on: [self-hosted, single-gpu, nvidia-gpu, t4, ci]
env:
CUDA_VISIBLE_DEVICES: "0"
container:
image: huggingface/accelerate:gpu-nightly
image: huggingface/accelerate-gpu:latest
options: --gpus all --shm-size "16gb"
defaults:
run:
@ -29,11 +29,6 @@ jobs:
pip install -e .[testing,test_trackers] -U;
pip install pytest-reportlog tabulate ;
- name: Show installed libraries
run: |
source activate accelerate;
pip freeze
- name: Run CLI tests (use make cli)
working-directory: accelerate
run: |
@ -61,51 +56,12 @@ jobs:
pip install tabulate;
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
run_deepspeed_tests_single_gpu:
runs-on: [self-hosted, single-gpu, nvidia-gpu, t4, ci]
env:
CUDA_VISIBLE_DEVICES: "0"
container:
image: huggingface/accelerate:gpu-deepspeed-nightly
options: --gpus all --shm-size "16gb"
defaults:
run:
shell: bash
steps:
- name: Install accelerate
run: |
source activate accelerate;
git clone https://github.com/huggingface/accelerate;
cd accelerate;
git checkout ${{ github.sha }};
pip install -e .[testing,test_trackers] -U;
pip install pytest-reportlog tabulate ;
- name: Show installed libraries
run: |
source activate accelerate;
pip freeze
- name: Run test on GPUs
working-directory: accelerate
if: always()
run: |
source activate accelerate;
make test_deepspeed
- name: Generate Report
working-directory: accelerate
if: always()
run: |
pip install tabulate;
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
run_core_tests_multi_gpu:
run_all_tests_multi_gpu:
runs-on: [self-hosted, multi-gpu, nvidia-gpu, t4, ci]
env:
CUDA_VISIBLE_DEVICES: 0,1
container:
image: huggingface/accelerate:gpu-nightly
image: huggingface/accelerate-gpu:latest
options: --gpus all --shm-size "16gb"
defaults:
run:
@ -120,11 +76,6 @@ jobs:
pip install -e .[testing,test_trackers] -U;
pip install pytest-reportlog tabulate
- name: Show installed libraries
run: |
source activate accelerate;
pip freeze
- name: Run test on GPUs
working-directory: accelerate
run: |
@ -145,40 +96,3 @@ jobs:
run: |
source activate accelerate;
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
run_deepspeed_tests_multi_gpu:
runs-on: [self-hosted, multi-gpu, nvidia-gpu, t4, ci]
container:
image: huggingface/accelerate:gpu-deepspeed-nightly
options: --gpus all --shm-size "16gb"
defaults:
run:
shell: bash
steps:
- name: Install accelerate
run: |
source activate accelerate;
git clone https://github.com/huggingface/accelerate;
cd accelerate;
git checkout ${{ github.sha }};
pip install -e .[testing,test_trackers] -U;
pip install pytest-reportlog tabulate ;
- name: Show installed libraries
run: |
source activate accelerate;
pip freeze
- name: Run test on GPUs
working-directory: accelerate
if: always()
run: |
source activate accelerate;
make test_deepspeed
- name: Generate Report
working-directory: accelerate
if: always()
run: |
pip install tabulate;
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY

View File

@ -23,7 +23,7 @@ defaults:
jobs:
run-trainer-tests:
container:
image: huggingface/accelerate:gpu-deepspeed-nightly
image: huggingface/accelerate-gpu:latest
options: --gpus all --shm-size "16gb"
runs-on: [self-hosted, multi-gpu, nvidia-gpu, t4, ci]
strategy:
@ -88,7 +88,7 @@ jobs:
run-skorch-tests:
container:
image: huggingface/accelerate:gpu-nightly
image: huggingface/accelerate-gpu:latest
options: --gpus all --shm-size "16gb"
runs-on: [self-hosted, multi-gpu, nvidia-gpu, t4, ci]
strategy:

View File

@ -44,17 +44,22 @@ jobs:
with:
python-version: 3.8
- name: Activate python cache
uses: actions/cache@v3
with:
path: |
${{ env.pythonLocation }}
${{ env.HF_HOME }}
key: ${{ env.pythonLocation }}-${{ matrix.pytorch-version }}-${{ matrix.test-kind }}-${{ hashFiles('setup.py') }}
- name: Install the library
run: |
pip install --upgrade pip
if [[ ${{ matrix.test-kind }} = test_prod ]]; then pip install -e .[test_prod]; fi
if [[ ${{ matrix.test-kind }} != test_prod ]]; then pip install -e .[testing,test_trackers]; fi
if [[ ${{ matrix.test-kind }} = test_rest ]]; then pip uninstall comet_ml -y; fi
if [[ ${{ matrix.test-kind }} = minimum ]]; then pip install torch==1.10.0; fi
pip install pytest-reportlog tabulate setuptools
- name: Show installed libraries
run: |
pip freeze
pip install pytest-reportlog tabulate
- name: Run Tests
env:

View File

@ -1,13 +0,0 @@
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.2.1
hooks:
- id: ruff
args:
- --fix
- id: ruff-format
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.5.0
hooks:
- id: check-merge-conflict
- id: check-yaml

View File

@ -152,7 +152,7 @@ Follow these steps to start contributing:
$ make test
```
`accelerate` relies on `ruff` to format its source code
`accelerate` relies on `black` and `ruff` to format its source code
consistently. After you make changes, apply automatic style corrections and code verifications
that can't be automated in one go with:
@ -172,14 +172,6 @@ Follow these steps to start contributing:
$ make quality
```
You can also set up [`pre-commit`](https://pre-commit.com/) to run these checks
automatically as Git commit hooks.
```bash
$ pip install pre-commit
$ pre-commit install
```
Once you're happy with your changes, add changed files using `git add` and
make a commit with `git commit` to record your changes locally:
@ -243,4 +235,4 @@ $ python -m pytest -sv ./tests
In fact, that's how `make test` is implemented (sans the `pip install` line)!
You can specify a smaller set of tests in order to test only the feature
you're working on.
you're working on.

View File

@ -1,6 +1,6 @@
.PHONY: quality style test docs utils
check_dirs := .
check_dirs := tests src examples benchmarks utils
# Check that source code meets quality standards
@ -12,17 +12,20 @@ extra_quality_checks:
# this target runs checks on all files
quality:
ruff check $(check_dirs)
ruff format --check $(check_dirs)
black --required-version 23 --check $(check_dirs)
ruff $(check_dirs)
doc-builder style src/accelerate docs/source --max_len 119 --check_only
# Format source code automatically and check is there are any problems left that need manual fixing
style:
ruff check $(check_dirs) --fix
ruff format $(check_dirs)
black --required-version 23 $(check_dirs)
ruff $(check_dirs) --fix
doc-builder style src/accelerate docs/source --max_len 119
# Run tests for the library
test:
python -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_all.log",)
test_big_modeling:
python -m pytest -s -v ./tests/test_big_modeling.py ./tests/test_modeling_utils.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_big_modeling.log",)
@ -39,11 +42,6 @@ test_deepspeed:
test_fsdp:
python -m pytest -s -v ./tests/fsdp $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_fsdp.log",)
# Since the new version of pytest will *change* how things are collected, we need `deepspeed` to
# run after test_core and test_cli
test:
$(MAKE) test_big_modeling
test_examples:
python -m pytest -s -v ./tests/test_examples.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_examples.log",)

View File

@ -171,15 +171,7 @@ To learn more, check the CLI documentation available [here](https://huggingface.
🤗 Here is another way to launch multi-CPU run using MPI. You can learn how to install Open MPI on [this page](https://www.open-mpi.org/faq/?category=building#easy-build). You can use Intel MPI or MVAPICH as well.
Once you have MPI setup on your cluster, just run:
```bash
accelerate config
```
Answer the questions that are asked, selecting to run using multi-CPU, and answer "yes" when asked if you want accelerate to launch mpirun.
Then, use `accelerate launch` with your script like:
```bash
accelerate launch examples/nlp_example.py
```
Alternatively, you can use mpirun directly, without using the CLI like:
```bash
mpirun -np 2 python examples/nlp_example.py
```

View File

@ -1,16 +1,3 @@
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import threading
import time

View File

@ -1,73 +0,0 @@
<!---
Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
# Official Hugging Face Accelerate Docker Images
Accelerate publishes a variety of docker versions as part of our CI that users can also use. These are stable images that Accelerate can run off of which comes with a variety of different setup configurations, all of which are officially hosted on [Docker Hub](https://hub.docker.com/r/huggingface/accelerate).
A breakdown of each are given below
## Naming Conventions
Accelerate docker images follow a tagging convention of:
```bash
huggingface/accelerate:{accelerator}-{nightly,release}
```
`accelerator` in this instance is one of many applical pre-configured backend supports:
* `gpu`: Comes compiled off of the `nvidia/cuda` image and includes core parts like `bitsandbytes`. Runs off python 3.9.
* `cpu`: Comes compiled off of `python:3.9-slim` and is designed for non-CUDA based workloads.
* More to come soon
* `gpu-deepspeed`: Comes compiled off of the `nvidia/cuda` image and includes core parts like `bitsandbytes` as well as the latest `deepspeed` version. Runs off python 3.10.
## Nightlies vs Releases
Each release a new build is pushed with a version number included in the name. For a GPU-supported image of version 0.28.0 for instance, it would look like the following:
```bash
huggingface/accelerate:gpu-release-0.28.0
```
Nightlies contain two different image tags. There is a general `nightly` tag which is built each night, and a `nightly-YYYY-MM-DD` which corresponds to a build from a particular date.
For instance, here is an example nightly CPU image from 3/14/2024
```bash
huggingface/accelerate:cpu-nightly-2024-03-14
```
## Running the images
Each image comes compiled with `conda` and an `accelerate` environment contains all of the installed dependencies.
To pull down the latest nightly run:
```bash
docker pull huggingface/accelerate:gpu-nightly
```
To then run it in interactive mode with GPU-memory available, run:
```bash
docker container run --gpus all -it huggingface/accelerate:gpu-nightly
```
## DEPRECATED IMAGES
CPU and GPU docker images were hosted at `huggingface/accelerate-gpu` and `huggingface/accelerate-cpu`. These builds are now outdated and will not receive updates.
The builds at the corresponding `huggingface/accelerate:{gpu,cpu}` contain the same `Dockerfile`, so it's as simple as changing the docker image to the desired ones from above. We will not be deleting these images for posterity, but they will not be receiving updates going forward.

View File

@ -1,46 +0,0 @@
# Builds GPU docker image of PyTorch specifically
# Uses multi-staged approach to reduce size
# Stage 1
# Use base conda image to reduce time
FROM continuumio/miniconda3:latest AS compile-image
# Specify py version
# Note: DeepSpeed beyond v0.12.6 requires py 3.10
ENV PYTHON_VERSION=3.10
# Install apt libs
RUN apt-get update && \
apt-get install -y curl git wget && \
apt-get clean && \
rm -rf /var/lib/apt/lists*
# Create our conda env
RUN conda create --name accelerate python=${PYTHON_VERSION} ipython jupyter pip
# We don't install pytorch here yet since CUDA isn't available
# instead we use the direct torch wheel
ENV PATH /opt/conda/envs/accelerate/bin:$PATH
# Activate our bash shell
RUN chsh -s /bin/bash
SHELL ["/bin/bash", "-c"]
# Activate the conda env, install mpy4pi, and install torch + accelerate
RUN source activate accelerate && conda install -c conda-forge mpi4py
RUN source activate accelerate && \
python3 -m pip install --no-cache-dir \
git+https://github.com/huggingface/accelerate#egg=accelerate[testing,test_trackers,deepspeed] \
--extra-index-url https://download.pytorch.org/whl/cu117
RUN python3 -m pip install --no-cache-dir bitsandbytes
# Stage 2
FROM nvidia/cuda:12.1.0-cudnn8-devel-ubuntu20.04 AS build-image
COPY --from=compile-image /opt/conda /opt/conda
ENV PATH /opt/conda/bin:$PATH
# Install apt libs
RUN apt-get update && \
apt-get install -y curl git wget && \
apt-get clean && \
rm -rf /var/lib/apt/lists*
RUN echo "source activate accelerate" >> ~/.profile
# Activate the virtualenv
CMD ["/bin/bash"]

View File

@ -1,10 +1,10 @@
# Builds GPU docker image of PyTorch specifically
# Builds GPU docker image of PyTorch
# Uses multi-staged approach to reduce size
# Stage 1
# Use base conda image to reduce time
FROM continuumio/miniconda3:latest AS compile-image
# Specify py version
ENV PYTHON_VERSION=3.9
ENV PYTHON_VERSION=3.8
# Install apt libs
RUN apt-get update && \
apt-get install -y curl git wget && \
@ -19,8 +19,7 @@ ENV PATH /opt/conda/envs/accelerate/bin:$PATH
# Activate our bash shell
RUN chsh -s /bin/bash
SHELL ["/bin/bash", "-c"]
# Activate the conda env, install mpy4pi, and install torch + accelerate
RUN source activate accelerate && conda install -c conda-forge mpi4py
# Activate the conda env and install torch + accelerate
RUN source activate accelerate && \
python3 -m pip install --no-cache-dir \
git+https://github.com/huggingface/accelerate#egg=accelerate[testing,test_trackers] \

View File

@ -10,63 +10,50 @@
- local: basic_tutorials/overview
title: Overview
- local: basic_tutorials/migration
title: Add Accelerate to your code
- local: basic_tutorials/execution
title: Execution process
- local: basic_tutorials/tpu
title: TPU training
title: Migrating to 🤗 Accelerate
- local: basic_tutorials/launch
title: Launching distributed code
- local: basic_tutorials/notebook
title: Launching distributed training from Jupyter Notebooks
- local: basic_tutorials/troubleshooting
title: Troubleshooting guide
title: Tutorials
- sections:
- isExpanded: true
sections:
- local: usage_guides/explore
title: Start Here!
- local: usage_guides/model_size_estimator
title: Model memory estimator
- local: usage_guides/quantization
title: Model quantization
- local: usage_guides/tracking
title: Experiment trackers
- local: usage_guides/checkpoint
title: Save and load training states
- local: basic_tutorials/troubleshooting
title: Troubleshoot
- local: usage_guides/training_zoo
title: Example Zoo
title: Accelerate
- isExpanded: true
sections:
- local: usage_guides/gradient_accumulation
title: Gradient accumulation
- local: usage_guides/local_sgd
title: Local SGD
- local: usage_guides/low_precision_training
title: Low precision (FP8) training
- local: usage_guides/deepspeed
title: DeepSpeed
- local: usage_guides/fsdp
title: Fully Sharded Data Parallelism
- local: usage_guides/megatron_lm
title: Megatron-LM
- local: usage_guides/sagemaker
title: Amazon SageMaker
- local: usage_guides/mps
title: Apple M1 GPUs
- local: usage_guides/ipex
title: IPEX training with CPU
title: Training
- isExpanded: true
sections:
- local: usage_guides/big_modeling
title: Big Model Inference
- local: usage_guides/distributed_inference
title: Distributed inference
title: Inference
title: How to guides
- local: usage_guides/explore
title: Start Here!
- local: usage_guides/training_zoo
title: Example Zoo
- local: usage_guides/big_modeling
title: How to perform inference on large models with small resources
- local: usage_guides/model_size_estimator
title: Knowing how big of a model you can fit into memory
- local: usage_guides/quantization
title: How to quantize model
- local: usage_guides/distributed_inference
title: How to perform distributed inference with normal resources
- local: usage_guides/gradient_accumulation
title: Performing gradient accumulation
- local: usage_guides/local_sgd
title: Accelerating training with local SGD
- local: usage_guides/checkpoint
title: Saving and loading training states
- local: usage_guides/tracking
title: Using experiment trackers
- local: usage_guides/mps
title: How to use Apple Silicon M1 GPUs
- local: usage_guides/low_precision_training
title: How to train in low precision (FP8)
- local: usage_guides/deepspeed
title: How to use DeepSpeed
- local: usage_guides/fsdp
title: How to use Fully Sharded Data Parallelism
- local: usage_guides/megatron_lm
title: How to use Megatron-LM
- local: usage_guides/sagemaker
title: How to use 🤗 Accelerate with SageMaker
- local: usage_guides/ipex
title: How to use 🤗 Accelerate with Intel® Extension for PyTorch for cpu
title: How-To Guides
- sections:
- local: concept_guides/internal_mechanism
title: 🤗 Accelerate's internal mechanism
@ -78,8 +65,6 @@
title: Executing and deferring jobs
- local: concept_guides/gradient_synchronization
title: Gradient synchronization
- local: concept_guides/fsdp_and_deepspeed
title: FSDP vs DeepSpeed
- local: concept_guides/low_precision_training
title: How training in low-precision environments is possible (FP8)
- local: concept_guides/training_tpu
@ -87,7 +72,7 @@
title: Concepts and fundamentals
- sections:
- local: package_reference/accelerator
title: Accelerator
title: Main Accelerator class
- local: package_reference/state
title: Stateful configuration classes
- local: package_reference/cli
@ -104,8 +89,6 @@
title: Logging
- local: package_reference/big_modeling
title: Working with large models
- local: package_reference/inference
title: Distributed inference with big models
- local: package_reference/kwargs
title: Kwargs handlers
- local: package_reference/utilities

View File

@ -1,128 +0,0 @@
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Execution process
When working with distributed training systems, it is important to manage how and when processes are executed across GPUs. Some processes are completed faster than others, and some processes shouldn't begin if others haven't finished yet. Accelerate provides tools for orchestrating when processes are executed to ensure everything remains synchronized across all devices.
This tutorial will teach you how to execute a process on only one machine and how to delay execution until all processes have reached a certain point.
## Execute on one process
Certain code only needs to be run once on a given machine, such as printing a log statement or only displaying one progress bar on the local main process.
<hfoptions id="local-execution">
<hfoption id="statements">
You should use `accelerator.is_local_main_process` to indicate code that should only be executed once.
```py
from tqdm.auto import tqdm
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
```
You could also wrap a statement with `accelerator.is_local_main_process`.
> [!TIP]
> For standalone `print` statements that aren't wrapped in `accelerator.is_local_main_process`, replace `print` with Accelerate's [`~Accelerator.print`] method to only print once per process.
```py
if accelerator.is_local_main_process:
print("Accelerate is the best")
```
</hfoption>
<hfoption id="function">
For a function that should only be executed once, use [`~Accelerator.on_local_main_process`].
```py
@accelerator.on_local_main_process
def do_my_thing():
"Something done once per server"
do_thing_once_per_server()
```
</hfoption>
</hfoptions>
You could also direct Accelerate to execute code once across *all processes* regardless of the number of machines. This is useful if you're uploading a final model to the Hub.
<hfoptions id="main-execution">
<hfoption id="statement">
You should use `accelerator.is_main_process` to indicate code that should only be executed once across all processes.
```py
if accelerator.is_main_process:
repo.push_to_hub()
```
</hfoption>
<hfoption id="function">
For a function that should only be executed once across all processes, use [`~Accelerator.on_main_process`].
```py
@accelerator.on_main_process
def do_my_thing():
"Something done once per server"
do_thing_once()
```
</hfoption>
</hfoptions>
## Execute on a specific process
Accelerate can also help you execute functions that should only be executed on a specific process or a local process index.
<hfoptions id="specific-execution">
<hfoption id="specific process">
Use the [`~Accelerator.on_process`] method and specify the process index to execute a function on.
```py
@accelerator.on_process(process_index=0)
def do_my_thing():
"Something done on process index 0"
do_thing_on_index_zero()
```
</hfoption>
<hfoption id="local process">
Use the [`~Accelerator.on_local_process`] method and specify the local process index to execute a function on.
```py
@accelerator.on_local_process(local_process_idx=0)
def do_my_thing():
"Something done on process index 0 on each server"
do_thing_on_index_zero_on_each_server()
```
</hfoption>
</hfoptions>
## Defer execution
When you run your script on several GPUs at the same time, some code may be executed faster than others. You might need to wait for all processes to reach a certain point before executing the next set of instructions. For instance, you shouldnt save a model before making sure every process is done with training.
To do this, add [`~Accelerator.wait_for_everyone`] in your code. This blocks all processes that have finished first from continuing until all remaining processes have reached the same point (this has no effect if you're running on a single GPU or CPU).
```py
accelerator.wait_for_everyone()
```

View File

@ -13,11 +13,21 @@ specific language governing permissions and limitations under the License.
rendered properly in your Markdown viewer.
-->
# Add Accelerate to your code
# Migrating your code to 🤗 Accelerate
Each distributed training framework has their own way of doing things which can require writing a lot of custom code to adapt it to your PyTorch training code and training environment. Accelerate offers a friendly way to interface with these distributed training frameworks without having to learn the specific details of each one. Accelerate takes care of those details for you, so you can focus on the training code and scale it to any distributed training environment.
This tutorial will detail how to easily convert existing PyTorch code to use 🤗 Accelerate!
You'll see that by just changing a few lines of code, 🤗 Accelerate can perform its magic and get you on
your way toward running your code on distributed systems with ease!
In this tutorial, you'll learn how to adapt your existing PyTorch code with Accelerate and get you on your way toward training on distributed systems with ease! You'll start with a basic PyTorch training loop (it assumes all the training objects like `model` and `optimizer` have been setup already) and progressively integrate Accelerate into it.
## The base training loop
To begin, write out a very basic PyTorch training loop.
<Tip>
We are under the presumption that `training_dataloader`, `model`, `optimizer`, `scheduler`, and `loss_function` have been defined beforehand.
</Tip>
```python
device = "cuda"
@ -35,44 +45,50 @@ for batch in training_dataloader:
scheduler.step()
```
## Accelerator
The [`Accelerator`] is the main class for adapting your code to work with Accelerate. It knows about the distributed setup you're using such as the number of different processes and your hardware type. This class also provides access to many of the necessary methods for enabling your PyTorch code to work in any distributed training environment and for managing and executing processes across devices.
That's why you should always start by importing and creating an [`Accelerator`] instance in your script.
## Add in 🤗 Accelerate
To start using 🤗 Accelerate, first import and create an [`Accelerator`] instance:
```python
from accelerate import Accelerator
accelerator = Accelerator()
```
[`Accelerator`] is the main force behind utilizing all the possible options for distributed training!
The [`Accelerator`] also knows which device to move your PyTorch objects to, so it is recommended to let Accelerate handle this for you.
### Setting the right device
The [`Accelerator`] class knows the right device to move any PyTorch object to at any time, so you should
change the definition of `device` to come from [`Accelerator`]:
```diff
- device = "cuda"
- device = 'cuda'
+ device = accelerator.device
model.to(device)
```
## Prepare PyTorch objects
### Preparing your objects
Next, you need to prepare your PyTorch objects (model, optimizer, scheduler, etc.) for distributed training. The [`~Accelerator.prepare`] method takes care of placing your model in the appropriate container (like single GPU or multi-GPU) for your training setup, adapting the optimizer and scheduler to use Accelerate's [`~optimizer.AcceleratedOptimizer`] and [`~scheduler.AcceleratedScheduler`], and creating a new dataloader that can be sharded across processes.
Next, you need to pass all of the important objects related to training into [`~Accelerator.prepare`]. 🤗 Accelerate will
make sure everything is setup in the current environment for you to start training:
> [!TIP]
> Accelerate only prepares objects that inherit from their respective PyTorch classes such as `torch.optim.Optimizer`.
The PyTorch objects are returned in the same order they're sent.
```py
```
model, optimizer, training_dataloader, scheduler = accelerator.prepare(
model, optimizer, training_dataloader, scheduler
)
```
These objects are returned in the same order they were sent in. By default when using `device_placement=True`, all of the objects that can be sent to the right device will be.
If you need to work with data that isn't passed to [~Accelerator.prepare] but should be on the active device, you should pass in the `device` you made earlier.
## Training loop
<Tip warning={true}>
Finally, remove the `to(device)` calls to the inputs and targets in the training loop because Accelerate's DataLoader classes automatically places them on the right device. You should also replace the usual `backward()` pass with Accelerate's [`~Accelerator.backward`] method which scales the gradients for you and uses the appropriate `backward()` method depending on your distributed setup (for example, DeepSpeed or Megatron).
Accelerate will only prepare objects that inherit from their respective PyTorch classes (such as `torch.optim.Optimizer`).
</Tip>
### Modifying the training loop
Finally, three lines of code need to be changed in the training loop. 🤗 Accelerate's DataLoader classes will automatically handle the device placement by default,
and [`~Accelerator.backward`] should be used for performing the backward pass:
```diff
- inputs = inputs.to(device)
@ -83,13 +99,17 @@ Finally, remove the `to(device)` calls to the inputs and targets in the training
+ accelerator.backward(loss)
```
Put everything together and your new Accelerate training loop should now look like this!
With that, your training loop is now ready to use 🤗 Accelerate!
## The finished code
Below is the final version of the converted code:
```python
from accelerate import Accelerator
accelerator = Accelerator()
device = accelerator.device
model, optimizer, training_dataloader, scheduler = accelerator.prepare(
model, optimizer, training_dataloader, scheduler
)
@ -104,118 +124,6 @@ for batch in training_dataloader:
scheduler.step()
```
## Training features
## More Resources
Accelerate offers additional features - like gradient accumulation, gradient clipping, mixed precision training and more - you can add to your script to improve your training run. Let's explore these three features.
### Gradient accumulation
Gradient accumulation enables you to train on larger batch sizes by accumulating the gradients over multiple batches before updating the weights. This can be useful for getting around memory limitations. To enable this feature in Accelerate, specify the `gradient_accumulation_steps` parameter in the [`Accelerator`] class and add the [`~Accelerator.accumulate`] context manager to your script.
```diff
+ accelerator = Accelerator(gradient_accumulation_steps=2)
model, optimizer, training_dataloader = accelerator.prepare(model, optimizer, training_dataloader)
for input, label in training_dataloader:
+ with accelerator.accumulate(model):
predictions = model(input)
loss = loss_function(predictions, label)
accelerator.backward(loss)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
```
### Gradient clipping
Gradient clipping is a technique to prevent "exploding gradients", and Accelerate offers:
* [`~Accelerator.clip_grad_value_`] to clip gradients to a minimum and maximum value
* [`~Accelerator.clip_grad_norm_`] for normalizing gradients to a certain value
### Mixed precision
Mixed precision accelerates training by using a lower precision data type like fp16 (half-precision) to calculate the gradients. For the best performance with Accelerate, the loss should be computed inside your model (like in Transformers models) because computations outside of the model are computed in full precision.
Set the mixed precision type to use in the [`Accelerator`], and then use the [`~Accelerator.autocast`] context manager to automatically cast the values to the specified data type.
> [!WARNING]
> Accelerate enables automatic mixed precision, so [`~Accelerator.autocast`] is only needed if there are other mixed precision operations besides those performed on loss by [`~Accelerator.backward`] which already handles the scaling.
```diff
+ accelerator = Accelerator(mixed_precision="fp16")
+ with accelerator.autocast():
loss = complex_loss_function(outputs, target):
```
## Save and load
Accelerate can also save and load a *model* once training is complete or you can also save the model and optimizer *state* which could be useful for resuming training.
### Model
Once all processes are complete, unwrap the model with the [`~Accelerator.unwrap_model`] method before saving it because the [`~Accelerator.prepare`] method wrapped your model into the proper interface for distributed training. If you don't unwrap the model, saving the model state dictionary also saves any potential extra layers from the larger model and you won't be able to load the weights back into your base model.
You should use the [`~Accelerator.save_model`] method to unwrap and save the model state dictionary. This method can also save a model into sharded checkpoints or into the [safetensors](https://hf.co/docs/safetensors/index) format.
<hfoptions id="save">
<hfoption id="single checkpoint">
```py
accelerator.wait_for_everyone()
accelerator.save_model(model, save_directory)
```
<Tip>
For models from the [Transformers](https://hf.co/docs/transformers/index) library, save the model with the [`~transformers.PreTrainedModel.save_pretrained`] method so that it can be reloaded with the [`~transformers.PreTrainedModel.from_pretrained`] method.
```py
from transformers import AutoModel
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(
"path/to/my_model_directory",
is_main_process=accelerator.is_main_process,
save_function=accelerator.save,
)
model = AutoModel.from_pretrained("path/to/my_model_directory")
```
</Tip>
To load your weights, use the [`~Accelerator.unwrap_model`] method to unwrap the model first before loading the weights. All model parameters are references to tensors, so this loads your weights inside `model`.
```py
unwrapped_model = accelerator.unwrap_model(model)
path_to_checkpoint = os.path.join(save_directory,"pytorch_model.bin")
unwrapped_model.load_state_dict(torch.load(path_to_checkpoint))
```
</hfoption>
<hfoption id="sharded checkpoint">
Set `safe_serialization=True` to save the model in the safetensor format.
```py
accelerator.wait_for_everyone()
accelerator.save_model(model, save_directory, max_shard_size="1GB", safe_serialization=True)
```
To load a sharded checkpoint or a safetensor formatted checkpoint, use the [`~accelerate.load_checkpoint_in_model`] method. This method allows you to load a checkpoint onto a specific device.
```py
load_checkpoint_in_model(unwrapped_model, save_directory, device_map={"":device})
```
</hfoption>
</hfoptions>
### State
During training, you may want to save the current state of the model, optimizer, random generators, and potentially learning rate schedulers so they can be restored in the *same script*. You should add the [`~Accelerator.save_state`] and [`~Accelerator.load_state`] methods to your script to save and load states.
To further customize where and how states are saved through [`~Accelerator.save_state`], use the [`~utils.ProjectConfiguration`] class. For example, if `automatic_checkpoint_naming` is enabled, each saved checkpoint is stored at `Accelerator.project_dir/checkpoints/checkpoint_{checkpoint_number}`.
Any other stateful items to be stored should be registered with the [`~Accelerator.register_for_checkpointing`] method so they can be saved and loaded. Every object passed to this method to be stored must have a `load_state_dict` and `state_dict` function.
To check out more ways on how to migrate to 🤗 Accelerate, check out our [interactive migration tutorial](https://huggingface.co/docs/accelerate/usage_guides/explore) which showcases other items that need to be watched for when using Accelerate and how to do so quickly.

View File

@ -186,7 +186,7 @@ Here is a basic training loop for the animal classification problem:
<Tip>
The code has been split up to allow for explanations on each section. A full version that can be copy and pasted will be available at the end
The code has been split up to allow for explainations on each section. A full version that can be copy and pasted will be available at the end
</Tip>
@ -344,7 +344,7 @@ def training_loop(mixed_precision="fp16", seed: int = 42, batch_size: int = 64):
mean = mean.to(accelerator.device)
std = std.to(accelerator.device)
# Instantiate the optimizer
# Intantiate the optimizer
optimizer = torch.optim.Adam(params=model.parameters(), lr=3e-2 / 25)
# Instantiate the learning rate scheduler
@ -443,12 +443,6 @@ epoch 4: 94.71
And that's it!
Please note that [`notebook_launcher`] ignores the 🤗 Accelerate config file, to launch based on the config use:
```bash
accelerate launch
```
## Debugging
A common issue when running the `notebook_launcher` is receiving a CUDA has already been initialized issue. This usually stems

View File

@ -1,38 +0,0 @@
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# TPU training
A [TPU (Tensor Processing Unit)](https://cloud.google.com/tpu/docs/intro-to-tpu) is a type of hardware specifically designed for training models efficiently. Accelerate supports TPU training, but there are a few things you should be aware of, namely graph compilation. This tutorial briefly discusses compilation, and for more details, take a look at the [Training on TPUs with Accelerate](../concept_guides/training_tpu) guide.
## Compilation
A TPU creates a graph of all the operations in the training step such as the forward pass, backward pass and optimizer step. This is why the first training step always takes a while because building and compiling this graph takes time. But once compilation is complete, it is cached and all subsequent steps are much faster.
The key is to avoid compiling your code again or else training is super slow. This means all your operations must be exactly the same:
* all tensors in your batches must have the same length (for example, no dynamic padding for NLP tasks)
* your code must be static (for example, no layers with for loops that have different lengths depending on the input such as a LSTM)
## Weight tying
A common language model design is to tie the weights of the embedding and softmax layers. However, moving the model to a TPU (either yourself or passing it to the [`~Accelerator.prepare`] method) breaks the weight tying and you'll need to retie the weights.
To add special behavior (like weight tying) in your script for TPUs, set [`~Accelerator.distributed_type`] to `DistributedType.TPU` first. Then you can use the [`~transformers.PreTrainedModel.tie_weights`] method to tie the weights.
```py
if accelerator.distributed_type == DistributedType.TPU:
model.tie_weights()
```

View File

@ -13,82 +13,77 @@ specific language governing permissions and limitations under the License.
rendered properly in your Markdown viewer.
-->
# Troubleshoot
# Troubleshooting guide
This guide provides solutions to some issues you might encounter when using Accelerate. Not all errors are covered because Accelerate is an active library that is continuously evolving and there are many different use cases and distributed training setups. If the solutions described here don't help with your specific error, please take a look at the [Ask for help](#ask-for-help) section to learn where and how to get help.
This guide aims to provide you the tools and knowledge required to navigate some common issues. However,
as 🤗 Accelerate continuously evolves and the use cases and setups are diverse, you might encounter an issue not covered in this
guide. If the suggestions listed in this guide do not cover your such situation, please refer to the final section of
the guide, [Asking for Help](#ask-for-help), to learn where to find help with your specific issue.
## Logging
Logging can help you identify where an error is coming from. In a distributed setup with multiple processes, logging can be a challenge, but Accelerate provides the [`~accelerate.logging`] utility to ensure logs are synchronized.
When facing an error, logging can help narrow down where it is coming from. In a distributed setup with multiple processes,
logging can be a challenge, but 🤗 Accelerate provides a utility that streamlines the logging process and ensures that
logs are synchronized and managed effectively across the distributed setup.
To troubleshoot an issue, use [`~accelerate.logging`] instead of the standard Python [`logging`](https://docs.python.org/3/library/logging.html#module-logging) module. Set the verbosity level (`INFO`, `DEBUG`, `WARNING`, `ERROR`, `CRITICAL`) with the `log_level` parameter, and then you can either:
To troubleshoot an issue, use `accelerate.logging` instead of the standard Python `logging` module:
1. Export the `log_level` as the `ACCELERATE_LOG_LEVEL` environment variable.
2. Pass the `log_level` directly to `get_logger`.
```diff
- import logging
+ from accelerate.logging import get_logger
- logger = logging.getLogger(__name__)
+ logger = get_logger(__name__)
```
For example, to set `log_level="INFO"`:
To set the log level (`INFO`, `DEBUG`, `WARNING`, `ERROR`, `CRITICAL`), export it as the `ACCELERATE_LOG_LEVEL` environment,
or pass as `log_level` to `get_logger`:
```py
```python
from accelerate.logging import get_logger
logger = get_logger(__name__, log_level="DEBUG")
logger = get_logger(__name__, log_level="INFO")
```
By default, the log is called on main processes only. To call it on all processes, pass `main_process_only=False`.
If a log should be called on all processes and in order, also pass `in_order=True`.
```py
from accelerate.logging import get_logger
logger = get_logger(__name__, log_level="DEBUG")
# log all processes
logger.debug("thing_to_log", main_process_only=False)
# log all processes in order
logger.debug("thing_to_log", main_process_only=False, in_order=True)
```
## Hanging code and timeout errors
There can be many reasons why your code is hanging. Let's take a look at how to solve some of the most common issues that can cause your code to hang.
### Mismatched tensor shapes
### Mismatched tensor shapes
If your code seems to be hanging for a significant amount time on a distributed setup, a common cause is mismatched shapes of tensors on different
devices.
Mismatched tensor shapes is a common issue that can cause your code to hang for a significant amount of time on a distributed setup.
When running scripts in a distributed fashion, functions such as [`Accelerator.gather`] and [`Accelerator.reduce`] are
necessary to grab tensors across devices to perform operations on them collectively. These (and other) functions rely on
`torch.distributed` performing a `gather` operation, which requires that tensors have the **exact same shape** across all processes.
When the tensor shapes don't match, you will experience handing code, and eventually hit a timeout exception.
When running scripts in a distributed setup, functions such as [`Accelerator.gather`] and [`Accelerator.reduce`] are necessary to grab tensors across devices to collectively perform operations on them. These (and other) functions rely on `torch.distributed` to perform a `gather` operation, which requires tensors to have the **exact same shape** across all processes. When the tensor shapes don't match, your code hangs and you'll eventually hit a timeout exception.
If you suspect this to be the case, use Accelerate's operational debug mode to immediately catch the issue.
You can use Accelerate's operational debug mode to immediately catch this issue. We recommend enabling this mode during the `accelerate config` setup, but you can also enable it from the CLI, as an environment variable, or by manually editing the `config.yaml` file.
The recommended way to enable Accelerate's operational debug mode is during `accelerate config` setup.
Alternative ways to enable debug mode are:
<hfoptions id="mismatch">
<hfoption id="CLI">
* From the CLI:
```bash
accelerate launch --debug {my_script.py} --arg1 --arg2
```
</hfoption>
<hfoption id="environment variable">
If enabling debug mode as an environment variable, you don't need to call `accelerate launch`.
* As an environmental variable (which avoids the need for `accelerate launch`):
```bash
ACCELERATE_DEBUG_MODE="1" torchrun {my_script.py} --arg1 --arg2
```
</hfoption>
<hfoption id="config.yaml">
* Manually changing the `config.yaml` file:
Add `debug: true` to your `config.yaml` file.
```yaml
compute_environment: LOCAL_MACHINE
debug: true
```diff
compute_environment: LOCAL_MACHINE
+debug: true
```
</hfoption>
</hfoptions>
Once you enable debug mode, you should get a traceback that points to the tensor shape mismatch issue.
Once you enable the debug mode, you should get a similar traceback that points to the tensor shape mismatch issue:
```py
Traceback (most recent call last):
@ -105,14 +100,16 @@ Operation: `accelerate.utils.operations.broadcast`
Input shapes:
- Process 0: [1, 5]
- Process 1: [1, 2, 5]
```
```
### Early stopping
### Early stopping leads to hanging
For early stopping in distributed training, if each process has a specific stopping condition (e.g. validation loss), it may not be synchronized across all processes. As a result, a break can happen on process 0 but not on process 1 which will cause your code to hang indefinitely until a timeout occurs.
When doing early stopping in distributed training, if each process has a specific stopping condition (e.g. validation loss),
it may not be synchronized across all of them. As a result, a break can happen on process 0 but not on process 1.
This will cause the code to hang indefinitely until a timeout occurs.
If you have early stopping conditionals, use the `set_breakpoint` and `check_breakpoint` methods to make sure all the processes
are ended correctly.
If you have early stopping conditionals, use `set_breakpoint` and `check_breakpoint` methods to make sure all the processes
are ended correctly:
```py
# Assume `should_do_breakpoint` is a custom defined function that returns a conditional,
@ -125,38 +122,35 @@ if accelerator.check_breakpoint():
break
```
### Low kernel versions on Linux
### Hanging on low kernel versions on Linux
On Linux with kernel version < 5.5, hanging processes have been reported. To avoid this problem, upgrade your system to a later kernel version.
This is a known issue. On Linux with kernel version < 5.5, hanging processes have been reported. To avoid
encountering this problem, we recommend upgrading your system to a later kernel version.
### MPI
## CUDA out of memory
If your distributed CPU training job using MPI is hanging, ensure that you have
[passwordless SSH](https://www.open-mpi.org/faq/?category=rsh#ssh-keys) setup (using keys) between the nodes. This means
that for all nodes in your hostfile, you should to be able to SSH from one node to another without being prompted for a password.
One of the most frustrating errors when it comes to running training scripts is hitting "CUDA Out-of-Memory",
as the entire script needs to be restarted, progress is lost, and typically a developer would want to simply
start their script and let it run.
Next, try to run the `mpirun` command as a sanity check. For example, the command below should print out the
hostnames for each of the nodes.
To address this problem, `Accelerate` offers a utility `find_executable_batch_size` that is heavily based on [toma](https://github.com/BlackHC/toma).
The utility retries code that fails due to OOM (out-of-memory) conditions and lowers batch sizes automatically.
```bash
mpirun -f hostfile -n {number of nodes} -ppn 1 hostname
```
### find_executable_batch_size
## CUDA Out-of-Memory
One of the most frustrating errors when it comes to running training scripts is hitting "CUDA Out-of-Memory". The entire script needs to be restarted and any progress is lost.
To address this problem, Accelerate provides the [`find_executable_batch_size`] utility that is heavily based on [toma](https://github.com/BlackHC/toma).
This utility retries code that fails due to OOM (out-of-memory) conditions and automatically lowers batch sizes. For each OOM condition, the algorithm decreases the batch size by half and retries the code until it succeeds.
To use [`find_executable_batch_size`], restructure your training function to include an inner function with `find_executable_batch_size` and build your dataloaders inside it. At a minimum, this only takes 4 new lines of code.
This algorithm operates with exponential decay, decreasing the batch size in half after each failed run on some
training script. To use it, restructure your training function to include an inner function that includes this wrapper,
and build your dataloaders inside it. At a minimum, this could look like 4 new lines of code.
<Tip warning={true}>
The inner function **must** take batch size as the first parameter, but we do not pass one to it when called. The wrapper will handles this for you. Any object (models, optimizers) that consumes CUDA memory and is passed to the [`Accelerator`] also **must** be declared inside the inner function.
The inner function *must* take in the batch size as the first parameter, but we do not pass one to it when called. The wrapper handles this for us.
</Tip>
It should also be noted that anything which will consume CUDA memory and passed to the `accelerator` **must** be declared inside the inner function,
such as models and optimizers.
```diff
def training_function(args):
accelerator = Accelerator()
@ -181,31 +175,48 @@ def training_function(args):
+ inner_training_loop()
```
To find out more, check the documentation [here](../package_reference/utilities#accelerate.find_executable_batch_size).
## Non-reproducible results between device setups
If you changed the device setup and observe different model performance, it is likely you didn't update your script when moving from one setup to another. Even if you're using the same script with the same batch size, the results will still be different on a TPU, multi-GPU, and single GPU.
If you have changed the device setup and are observing different model performance, this is likely due to the fact that
you have not updated your script when moving from one setup to another. The same script with the same batch size across TPU,
multi-GPU, and single-GPU with Accelerate will have different results.
For example, if you were training on a single GPU with a batch size of 16 and you move to a dual GPU setup, you need to change the batch size to 8 to have the same effective batch size. This is because when training with Accelerate, the batch size passed to the dataloader is the **batch size per GPU**.
For example, if you were previously training on a single GPU with a batch size of 16, when moving to two GPU setup,
you need to change the batch size to 8 to have the same effective batch size. This is because when training with Accelerate,
the batch size passed to the dataloader is the **batch size per GPU**.
To make sure you can reproduce the results between the setups, make sure to use the same seed, adjust the batch size accordingly, and consider scaling the learning rate.
To make sure you can reproduce the results between the setups, make sure to use the same seed, adjust the batch size
accordingly, consider scaling the learning rate.
For more details and a quick reference for batch sizes, check out the [Comparing performance between different device setups](../concept_guides/performance) guide.
## Performance issues on different GPUs
If your multi-GPU setup consists of different GPUs, you may encounter some performance issues:
If your multi-GPU setup consists of different GPUs, you may hit some limitations:
- There may be an imbalance in GPU memory between the GPUs. In this case, the GPU with the smaller memory will limit the batch size or the size of the model that can be loaded onto the GPUs.
- If you are using GPUs with different performance profiles, the performance will be driven by the slowest GPU you are using because the other GPUs will have to wait for it to complete its workload.
- There may be an imbalance in GPU memory between the GPUs. In this case, the GPU with smaller memory will limit the batch size or the size of the model that can be loaded onto the GPUs.
- If you are using GPUs with different performance profiles, the performance will be driven by the slowest GPU that you are using as the other GPUs will have to wait for it to complete its workload.
Vastly different GPUs within the same setup can lead to performance bottlenecks.
## Ask for help
If none of the solutions and advice here helped resolve your issue, you can always reach out to the community and Accelerate team for help.
If the above troubleshooting tools and advice did not help you resolve your issue, reach out for help to the community
and the team.
- Ask for help on the Hugging Face forums by posting your question in the [🤗 Accelerate category](https://discuss.huggingface.co/c/accelerate/18). Make sure to write a descriptive post with relevant context about your setup and reproducible code to maximize the likelihood that your problem is solved!
### Forums
- Post a question on [Discord](http://hf.co/join/discord), and let the team and the community help you.
Ask for help on the Hugging Face forums - post your question in the [🤗Accelerate category](https://discuss.huggingface.co/c/accelerate/18)
Make sure to write a descriptive post with relevant context about your setup and reproducible code to maximize the likelihood that your problem is solved!
- Create an Issue on the 🤗 Accelerate [GitHub repository](https://github.com/huggingface/accelerate/issues) if you think you've found a bug related to the library. Include context regarding the bug and details about your distributed setup to help us better figure out what's wrong and how we can fix it.
### Discord
Post a question on [Discord](http://hf.co/join/discord), and let the team and the community help you.
### GitHub Issues
Create an Issue on the 🤗 Accelerate [GitHub repository](https://github.com/huggingface/accelerate/issues) if you suspect
to have found a bug related to the library. Include context regarding the bug and details about your distributed setup
to help us better figure out what's wrong and how we can fix it.

View File

@ -1,192 +0,0 @@
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Moving between FSDP And DeepSpeed
🤗 Accelerate offers flexibilty of training frameworks, by integrating two extremely powerful tools for distributed training, namely [Pytorch FSDP](../usage_guides/fsdp.md) and [Microsoft DeepSpeed](../usage_guides/deepspeed.md). The aim of this tutorial is to draw parallels, as well as to outline potential differences, to empower the user to switch seamlessly between these two frameworks.
<Tip>
To switch between the frameworks, we recommend launching code 🤗 `accelerate launch` passing in the correct config file with `--config_file`, or passing in the respective arguments directly for [FSDP and DeepSpeed](../package_reference/cli#accelerate-launch) .
Example 🤗 Accelerate configurations can be found here for [DeepSpeed](../usage_guides/deepspeed#accelerate-deepspeed-plugin) and [FSDP](../usage_guides/fsdp#how-it-works-out-of-the-box), or in the [example zoo under "Launch Configurations"](../usage_guides/explore)
</Tip>
<Tip warning={true}>
This tutorial is for single-node, multi-GPU, scenarios only.
</Tip>
## Configuring Functionalities
Model tensors are split into different GPUs in an attempt to scale up model sizes; this is termed *sharding* in FSDP, and *partitioning* in DeepSpeed. FSDP sharding and DeepSpeed ZeRO (partitioning) stages are configured by `--fsdp_sharding_strategy`, and `--zero_stage`, respectively. In particular, FSDP `FULL_SHARD` maps to DeepSpeed ZeRO stage `3`; see this [comprehensive mapping between FSDP sharding and DeepSpeed ZeRO settings](../usage_guides/fsdp#mapping-between-fsdp-sharding-strategies-and-deepspeed-zero-stages). The below table summarizes and groups similar settings:
Group | Framework | Configuration | Example | Restrictions (if any)
--|--|--|--|--
sharding / partitioning | FSDP<br>DeepSpeed | `--fsdp_sharding_strategy`<br>`--zero_stage` | `1` (`FULL_SHARD`) <br>`3` |
offload | FSDP<br>DeepSpeed | `--fsdp_offload_params`<br>`--offload_param_device`<br>`--offload_optimizer_device` | `true`<br>`cpu`<br>`cpu` | all or nothing <br><br>
model loading | FSDP<br>DeepSpeed | <span style="white-space:nowrap;">`--fsdp_cpu_ram_efficient_loading`</span><br>`--zero3_init_flag` | `true`<br>`true` | <br>only ZeRO 3
efficient checkpointing | FSDP<br>DeepSpeed | `--fsdp_state_dict_type`<br>`--zero3_save_16bit_model` | `SHARDED_STATE_DICT`<br>`true` | <br>only ZeRO 3
weights prefetching | FSDP<br><br>DeepSpeed | `--fsdp_forward_prefetch`<br>`--fsdp_backward_prefetch`<br>None | `true`<br>`BACKWARD_PRE` | <br><br>
model | FSDP<br><br>DeepSpeed | `--fsdp_auto_wrap_policy`<br><span style="white-space:nowrap;">`--fsdp_transformer_layer_cls_to_wrap`</span><br>None | `TRANSFORMER_BASED_WRAP`<br><Layer Class> |<br>Usually not needed <br>Transparent to user.
parameters summoning | FSDP<br>DeepSpeed | `--fsdp_use_orig_params`<br>None | `true` | required for `torch.compile`<br>Transparent to user
parameters syncing | FSDP<br>DeepSpeed | `--fsdp_sync_module_states`<br>None | `true` |
training | FSDP<br>DeepSpeed | None<br>`--gradient_accumulation_steps`<br>`--gradient_clipping` | <br>`auto`<br>`auto` | Transparent to user
For detailed descriptions of the above, refer to [🤗 `Accelerate` launch documentation](../package_reference/cli#accelerate-launch).
<Tip>
To access other DeepSpeed configurations, such as mixed precision settings,
you need to pass in a `--deepspeed_config_file`, see the [documentation](../usage_guides/deepspeed#deepspeed-config-file).
DeepSpeed can be also configured via [`DeepSpeedPlugin`], e.g., `DeepSpeedPlugin.zero_stage` is equivalent of `--zero_stage`, and `DeepSpeedPlugin.hf_ds_config` can be used to pass `--deepeed_config_file.`
</Tip>
<Tip>
FSDP can be also configured via [`FullyShardedDataParallelPlugin`], e.g., `FullyShardedDataParallelPlugin.sharding_strategy` is equivalent of `--fsdp_sharding_strategy`.
</Tip>
### Checkpointing
Do note that while FSDP can be configured via `--fsdp_state_dict_type` to save either full / sharded checkpoints.
<Tip>
For DeepSpeed Zero3, one could pass a `--zero3_save_16bit_model true`, which conveniently consolidates the model to a single rank and saves; this is the FSDP equivalent of `fsdp_state_dict_type: FULL_STATE_DICT`.
</Tip>
<Tip warning={true}>
For large models, consolidating the model to a single rank can be very slow.
</Tip>
<Tip>
For quicker checkpointing, for FSDP use `fsdp_state_dict_type: SHARDED_STATE_DICT`, and for DeepSpeed Zero3 [use the `zero_to_fp32.py` script to post-convert sharded checkpoints](https://www.deepspeed.ai/tutorials/zero/#extracting-weights).
</Tip>
### Offloading
FSDP only allows *all-or-nothing* offload (i.e., either offload parameters, gradients, and optimizer, or keep them all in GPU), but DeepSpeed can offload parameters and optimizer differently. Furthermore, DeepSpeed also supports [offloading to NVME](https://www.deepspeed.ai/docs/config-json/#parameter-offloading).
### Prefetching
FSDP allows two prefetching configurations `--fsdp_forward_prefetch` and `--fsdp_backward_prefetch` to improve overlap of comms / computation at a cost of extra memory, see [FSDP documentation](https://pytorch.org/docs/stable/fsdp.html).
For DeepSpeed, the prefetching will be turned on when needed, and it turns on depending on certain hyper-params like `stage3_param_persistence_threshold`, `stage3_max_reuse_distance`, etc, [that can be configured for Zero3](https://www.deepspeed.ai/docs/config-json/#parameter-offloading); 🤗 `accelerate` may set these hyper-params automatically if you don't set those explicitly in the deepspeed config file.
<Tip>
For FSDP set `fsdp_backward_prefetch: BACKWARD_PRE` for improved throughputs if memory allows.
</Tip>
### Model Loading
While FSDP require an explicit `--fsdp_cpu_ram_efficient_loading true` to activate efficient model loading, 🤗 `transformers` will activate the similar feature whenever DeepSpeed Zero3 is used.
<Tip>
For FSDP, whenever setting `--fsdp_cpu_ram_efficient_loading true`, 🤗 `accelerate` will automatically set `sync_module_states` to true.
For RAM efficient loading the weights will be loaded only in a singe rank, and thus requires `sync_module_states` to broadcast weights to other ranks.
</Tip>
### Model
FSDP requires an explicit `--fsdp_auto_wrap_policy` for the algorithm to decide how to schedule the all-gather and reduce-scatter operations. But for DeepSpeed this is transparent to the user.
<Tip>
For FSDP, simply set `fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP`. With the latest [`transformers`] versions, we try our best to figure out the suitable `fsdp_transformer_layer_cls_to_wrap` for HF transformers models. However, if you get an error regarding it, please specify this.
</Tip>
### Parameters Summoning
FSDP requires an explicit `--fsdp_use_orig_params` flag if using `torch.compile`, see [the pytorch documenation](https://pytorch.org/docs/stable/fsdp.html#module-torch.distributed.fsdp). For DeepSpeed this is transparent to the user.
<Tip>
For FSDP, when using `torch.compile` please set `fsdp_use_orig_params: True`.
</Tip>
## Training
Deepspeed requires explicit `--gradient_accumulation_steps` and `--gradient_clipping` flags. For FSDP this is transparent to the user.
<Tip>
When using DeepSpeed, set `gradient_accumulation_steps: "auto"` and `gradient_clipping: "auto"` to automatically pick up values set in the [`Accelerator`] or [`TrainingArguments`] (if using `transformers`).
</Tip>
## On Differences in Data Precision Handling
To discuss the how data precision is handled in both FSDP and Deepspeed, it is instructive to first give an overview of how model parameters are handled in these frameworks. Before the model / optimizer parameters are distributed across GPUs, parameter preparation is involved to first "flatten" them to one-dimensional [`torch.Tensor`](https://pytorch.org/docs/stable/tensors.html#torch-tensor). The implementation of FSDP / DeepSpeed varies in the respect of the `dtype` in which these "flattened" parameters are stored, and there are ramifications with regards to how [`torch.Optimizer`](https://pytorch.org/docs/stable/optim.html#module-torch.optim) allocate their `dtype`s. The table below outlines the processes for both frameworks; the "Local" column indicates the process occurring at a per-gpu level, therefore any memory overheads by upcasting should be understood to be amortized by the number of gpus used.
<Tip>
As a rule of thumb, for stable training with automatic mixed precision, all the trainable parameters have to be in `torch.float32`.
</Tip>
Process | Local | Framework | Details
--|--|--|--
Loading, i.e., [`AutoModel.from_pretrained(..., torch_dtype=torch_dtype)`] |
Preparation, i.e., creation of "flat params" | ✅ | FSDP<br>DeepSpeed | created in `torch_dtype`.<br> disregards `torch_dtype`, created in `float32`.
Optimizer initialization | ✅ | FSDP<br>DeepSpeed | creates parameters in `torch_dtype`<br> creates parameters in `float32`
Training Step, i.e, forward, backward, reduction | | FSDP<br>DeepSpeed | follows [`MixedPrecision`](https://pytorch.org/docs/stable/fsdp.html#torch.distributed.fsdp.MixedPrecision)<br> follows `deepspeed_config_file` mixed precision settings.
Optimizer (Pre-Step) | ✅ | FSDP<br>DeepSpeed | upcasting (if any) to `torch_dtype`<br>upcasted to `float32`
Optimizer (Actual Step) | ✅ | FSDP<br>DeepSpeed | occurs in `torch_dtype` <br> occurs in `float32`.
<Tip warning={true}>
Therefore when using DeepSpeed a small number of GPUs, be aware of potentially significant memory overheads due to the upcasting during preperation.
</Tip>
<Tip>
With FSDP, in the absence of mixed precision, it is possible to operate the [`torch.Optimizer`](https://pytorch.org/docs/stable/optim.html#module-torch.optim) in low precision `torch_dtype`, which may be helpful when using small number of GPUs.
</Tip>
<Tip warning={true}>
With mixed precision, FSDP and DeepSpeed will upcast in the model preparation step (c.f. table above). But do note that FSDP will then save checkpoints in the upcasted precision; Deepspeed may still save low precision checkpoints if `--zero3_save_16bit_model` is specified.
</Tip>
To clarify the above table consider the concrete examples below; the optimizer pre- and actual step combined for brevity. With FSDP it is possible to operate in the two modes shown below, but DeepSpeed can only operate in one.
Framework | Model Loading (`torch_dtype`) | Mixed Precision | Preparation (Local) | Training | Optimizer (Local)
--|--|--|--|--|--
FSDP | bf16 | default (none) | bf16 | bf16 | bf16
FSDP | bf16 | bf16 | fp32 | bf16 | fp32
DeepSpeed | bf16 | bf16 | fp32 | bf16 | fp32

View File

@ -167,18 +167,3 @@ As you can see, if you are not careful about how you set up your gradient synchr
If you are worried about making sure everything is done properly, we highly recommend utilizing the [`~Accelerator.accumulate`] function and passing in
`gradient_accumulation_steps` or `gradient_accumulation_plugin` to the [`Accelerator`] object so Accelerate can handle this for you.
### `no_sync` requires additional GPU memory when using FSDP
Be aware that not syncing gradients can have adverse effects while performing FSDP training. As it has been warned in `torch`, the [`no_sync` context manager for FSDP](https://pytorch.org/docs/stable/fsdp.html#torch.distributed.fsdp.FullyShardedDataParallel.no_sync) will require additional memory.
Therefore in memory intensive situations while using FSDP, we recommend to set `sync_each_batch` to `True` in the [`~utils.GradientAccumulationPlugin`] to disable `no_sync`.
See the example below where we fine-tune Mixtral (47B parameters) on 8 A100-80GB GPUs. We see that even for a modest `gradient_accumulation_steps=2` we quickly go out-of-memory (OOM) if `no_sync` is enabled. Again, this is due to additional memory overheads due to FSDP's `no_sync`. However, if `no_sync` is disabled via `sync_each_batch=True`, then the memory consumption for `gradient_accumulation_steps=16` reverts to that of `gradient_accumulation_steps=1`.
| Model | `no_sync` (accum=1) | `no_sync` (accum=2) | `no_sync` disabled (accum=16)
| :-------------: | :-----------------: | :-----------------: | :-----------------:
mixtral 8x7B | 69G | OOM | 69G
> [!WARNING]
> Disabling `no_sync` means there _will be slowdown_ due the extra data syncs, as explained by the earlier sections of this guide.

View File

@ -16,7 +16,7 @@ rendered properly in your Markdown viewer.
# Low Precision Training Methods
The release of new kinds of hardware led to the emergence of new training paradigms that better utilize them. Currently, this is in the form of training
in 8-bit precision using packages such as [TransformersEngine](https://github.com/NVIDIA/TransformerEngine) (TE) or [MS-AMP](https://github.com/Azure/MS-AMP/tree/main).
in 8-bit precision using packages such as [TranformersEngine](https://github.com/NVIDIA/TransformerEngine) (TE) or [MS-AMP](https://github.com/Azure/MS-AMP/tree/main).
For an introduction to the topics discussed today, we recommend reviewing the [low-precision usage guide](../usage_guides/low_precision_training.md) as this documentation will reference it regularly.
@ -34,9 +34,9 @@ MS-AMP O3 | FP8 | FP8 | FP8 | FP16 | FP8 | FP8+FP16
## `TransformersEngine`
`TransformersEngine` is the first solution to trying to train in 8-bit floating point. It works by using drop-in replacement layers for certain ones in a model that utilizes their FP8-engine to reduce the number of bits (such as 32 to 8) without degrading the final accuracy of the model.
`TranformersEngine` is the first solution to trying to train in 8-bit floating point. It works by using drop-in replacement layers for certain ones in a model that utilize their FP8-engine to reduce the number of bits (such as 32 to 8) without degrading the final accuracy of the model.
Specifically, 🤗 Accelerate will find and replace the following layers with `TransformersEngine` versions:
Specifically, 🤗 Accelerate will find and replace the following layers with `TranformersEngine` versions:
* `nn.LayerNorm` for `te.LayerNorm`
* `nn.Linear` for `te.Linear`
@ -65,10 +65,10 @@ MS-AMP takes a different approach to `TransformersEngine` by providing three dif
* The base optimization level (`O1`), passes communications of the weights (such as in DDP) in FP8, stores the weights of the model in FP16, and leaves the optimizer states in FP32. The main benefit of this optimization level is that we can reduce the communication bandwidth by essentially half. Additionally, more GPU memory is saved due to 1/2 of everything being cast in FP8, and the weights being cast to FP16. Notably, both the optimizer states remain in FP32.
* The second optimization level (`O2`) improves upon this by also reducing the precision of the optimizer states. One is in FP8 while the other is in FP16. Generally it's been shown that this will only provide a net-gain of no degraded end accuracy, increased training speed, and reduced memory as now every state is either in FP16 or FP8.
* The second optimization level (`O2`) improves upon this by also reducing the precision of the optimizer states. One is in FP8 while the other is in FP16. Generally it's been shown that this will only provide a net-gain of no degredated end accuracy, increased training speed, and reduced memory as now every state is either in FP16 or FP8.
* Finally, MS-AMP has a third optimization level (`O3`) which helps during DDP scenarios such as DeepSpeed. The weights of the model in memory are fully cast to FP8, and the master weights are now stored in FP16. This fully reduces memory by the highest factor as now not only is almost everything in FP8, only two states are left in FP16. Currently, only DeepSpeed versions up through 0.9.2 are supported, so this capability is not included in the 🤗 Accelerate integration
## Combining the two
More experiments need to be performed but it's been noted that combining both MS-AMP and TransformersEngine can lead to the highest throughput by relying on NVIDIA's optimized FP8 operators and utilizing how MS-AMP reduces the memory overhead.
More experiments need to be performed but it's been noted that combining both MS-AMP and TransformersEngine can lead to the highest throughput by relying on NVIDIA's optimized FP8 operators and utilizing how MS-AMP reduces the memory overhead.

View File

@ -45,7 +45,7 @@ Why is this important? Under the hood this will set **5** different seed setting
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# ^^ safe to call this function even if cuda is not available
if is_torch_xla_available():
if is_tpu_available():
xm.set_rng_state(seed)
```

View File

@ -15,12 +15,197 @@ rendered properly in your Markdown viewer.
# Accelerator
The [`Accelerator`] is the main class for enabling distributed training on any type of training setup. Read the [Add Accelerator to your code](../basic_tutorials/migration) tutorial to learn more about how to add the [`Accelerator`] to your script.
The [`Accelerator`] is the main class provided by 🤗 Accelerate.
It serves at the main entry point for the API.
## Accelerator[[api]]
## Quick adaptation of your code
To quickly adapt your script to work on any kind of setup with 🤗 Accelerate just:
1. Initialize an [`Accelerator`] object (that we will call `accelerator` throughout this page) as early as possible in your script.
2. Pass your dataloader(s), model(s), optimizer(s), and scheduler(s) to the [`~Accelerator.prepare`] method.
3. Remove all the `.cuda()` or `.to(device)` from your code and let the `accelerator` handle the device placement for you.
<Tip>
Step three is optional, but considered a best practice.
</Tip>
4. Replace `loss.backward()` in your code with `accelerator.backward(loss)`
5. Gather your predictions and labels before storing them or using them for metric computation using [`~Accelerator.gather`]
<Tip warning={true}>
Step five is mandatory when using distributed evaluation
</Tip>
In most cases this is all that is needed. The next section lists a few more advanced use cases and nice features
you should search for and replace by the corresponding methods of your `accelerator`:
## Advanced recommendations
### Printing
`print` statements should be replaced by [`~Accelerator.print`] to be printed once per process:
```diff
- print("My thing I want to print!")
+ accelerator.print("My thing I want to print!")
```
### Executing processes
#### Once on a single server
For statements that should be executed once per server, use [`~Accelerator.is_local_main_process`]:
```python
if accelerator.is_local_main_process:
do_thing_once_per_server()
```
A function can be wrapped using the [`~Accelerator.on_local_main_process`] function to achieve the same
behavior on a function's execution:
```python
@accelerator.on_local_main_process
def do_my_thing():
"Something done once per server"
do_thing_once_per_server()
```
#### Only ever once across all servers
For statements that should only ever be executed once, use [`~Accelerator.is_main_process`]:
```python
if accelerator.is_main_process:
do_thing_once()
```
A function can be wrapped using the [`~Accelerator.on_main_process`] function to achieve the same
behavior on a function's execution:
```python
@accelerator.on_main_process
def do_my_thing():
"Something done once per server"
do_thing_once()
```
#### On specific processes
If a function should be ran on a specific overall or local process index, there are similar decorators
to achieve this:
```python
@accelerator.on_local_process(local_process_idx=0)
def do_my_thing():
"Something done on process index 0 on each server"
do_thing_on_index_zero_on_each_server()
```
```python
@accelerator.on_process(process_index=0)
def do_my_thing():
"Something done on process index 0"
do_thing_on_index_zero()
```
### Synchronicity control
Use [`~Accelerator.wait_for_everyone`] to make sure all processes join that point before continuing. (Useful before a model save for instance).
### Saving and loading
```python
model = MyModel()
model = accelerator.prepare(model)
```
Use [`~Accelerator.save_model`] instead of `torch.save` to save a model. It will remove all model wrappers added during the distributed process, get the state_dict of the model and save it. The state_dict will be in the same precision as the model being trained.
```diff
- torch.save(state_dict, "my_state.pkl")
+ accelerator.save_model(model, save_directory)
```
[`~Accelerator.save_model`] can also save a model into sharded checkpoints or with safetensors format.
Here is an example:
```python
accelerator.save_model(model, save_directory, max_shard_size="1GB", safe_serialization=True)
```
#### 🤗 Transformers models
If you are using models from the [🤗 Transformers](https://huggingface.co/docs/transformers/) library, you can use the `.save_pretrained()` method.
```python
from transformers import AutoModel
model = AutoModel.from_pretrained("bert-base-cased")
model = accelerator.prepare(model)
# ...fine-tune with PyTorch...
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(
"path/to/my_model_directory",
is_main_process=accelerator.is_main_process,
save_function=accelerator.save,
)
```
This will ensure your model stays compatible with other 🤗 Transformers functionality like the `.from_pretrained()` method.
```python
from transformers import AutoModel
model = AutoModel.from_pretrained("path/to/my_model_directory")
```
### Operations
Use [`~Accelerator.clip_grad_norm_`] instead of ``torch.nn.utils.clip_grad_norm_`` and [`~Accelerator.clip_grad_value_`] instead of ``torch.nn.utils.clip_grad_value``
### Gradient Accumulation
To perform gradient accumulation use [`~Accelerator.accumulate`] and specify a gradient_accumulation_steps.
This will also automatically ensure the gradients are synced or unsynced when on
multi-device training, check if the step should actually be performed, and auto-scale the loss:
```diff
- accelerator = Accelerator()
+ accelerator = Accelerator(gradient_accumulation_steps=2)
for (input, label) in training_dataloader:
+ with accelerator.accumulate(model):
predictions = model(input)
loss = loss_function(predictions, labels)
accelerator.backward(loss)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
```
#### GradientAccumulationPlugin
[[autodoc]] utils.GradientAccumulationPlugin
Instead of passing `gradient_accumulation_steps` you can instantiate a GradientAccumulationPlugin and pass it to the [`Accelerator`]'s `__init__`
as `gradient_accumulation_plugin`. You can only pass either one of `gradient_accumulation_plugin` or `gradient_accumulation_steps` passing both will raise an error.
```diff
from accelerate.utils import GradientAccumulationPlugin
gradient_accumulation_plugin = GradientAccumulationPlugin(num_steps=2)
- accelerator = Accelerator()
+ accelerator = Accelerator(gradient_accumulation_plugin=gradient_accumulation_plugin)
```
In addition to the number of steps, this also lets you configure whether or not you adjust your learning rate scheduler to account for the change in steps due to accumulation.
## Overall API documentation:
[[autodoc]] Accelerator
## Utilities
[[autodoc]] accelerate.utils.gather_object

View File

@ -208,10 +208,6 @@ The following arguments are only useful when `use_fsdp` is passed or Fully Shard
* `--fsdp_transformer_layer_cls_to_wrap` (`str`) -- Transformer layer class name (case-sensitive) to wrap, e.g, `BertLayer`, `GPTJBlock`, `T5Block` ...
* `--fsdp_backward_prefetch_policy` (`str`) -- FSDP's backward prefetch policy.
* `--fsdp_state_dict_type` (`str`) -- FSDP's state dict type.
* `--fsdp_forward_prefetch` (`str`) -- FSDP forward prefetch.
* `--fsdp_use_orig_params` (`str`) -- If True, allows non-uniform `requires_grad` mixed in a FSDP unit.
* `--fsdp_cpu_ram_efficient_loading` (`str`) - If true, only the first process loads the pretrained model checkoint while all other processes have empty weights. When using this, `--fsdp_sync_module_states` needs to True.
* `--fsdp_sync_module_states` (`str`) - If true, each individually wrapped FSDP unit will broadcast module parameters from rank 0.
**Megatron-LM Arguments**:
@ -222,7 +218,7 @@ The following arguments are only useful when `use_megatron_lm` is passed or Mega
* `--megatron_lm_num_micro_batches` (``) -- Megatron-LM's number of micro batches when PP degree > 1.
* `--megatron_lm_sequence_parallelism` (``) -- Decides Whether (true|false) to enable Sequence Parallelism when TP degree > 1.
* `--megatron_lm_recompute_activations` (``) -- Decides Whether (true|false) to enable Selective Activation Recomputation.
* `--megatron_lm_use_distributed_optimizer` (``) -- Decides Whether (true|false) to use distributed optimizer which shards optimizer state and gradients across Data Parallel (DP) ranks.
* `--megatron_lm_use_distributed_optimizer` (``) -- Decides Whether (true|false) to use distributed optimizer which shards optimizer state and gradients across Data Pralellel (DP) ranks.
* `--megatron_lm_gradient_clipping` (``) -- Megatron-LM's gradient clipping value based on global L2 Norm (0 to disable).
**AWS SageMaker Arguments**:

View File

@ -17,12 +17,12 @@ rendered properly in your Markdown viewer.
[[autodoc]] utils.DeepSpeedPlugin
[[autodoc]] utils.deepspeed.DummyOptim
[[autodoc]] utils.DummyOptim
[[autodoc]] utils.deepspeed.DummyScheduler
[[autodoc]] utils.DummyScheduler
[[autodoc]] utils.deepspeed.DeepSpeedEngineWrapper
[[autodoc]] utils.DeepSpeedEngineWrapper
[[autodoc]] utils.deepspeed.DeepSpeedOptimizerWrapper
[[autodoc]] utils.DeepSpeedOptimizerWrapper
[[autodoc]] utils.deepspeed.DeepSpeedSchedulerWrapper
[[autodoc]] utils.DeepSpeedSchedulerWrapper

View File

@ -1,20 +0,0 @@
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# The inference API
These docs refer to the [PiPPy](https://github.com/PyTorch/PiPPy) integration.
[[autodoc]] inference.prepare_pippy

View File

@ -37,7 +37,3 @@ related to distributed training or mixed precision are created.
## InitProcessGroupKwargs
[[autodoc]] InitProcessGroupKwargs
## KwargsHandler
[[autodoc]] utils.KwargsHandler

View File

@ -60,10 +60,12 @@ These are standalone dataclasses used for checks, such as the type of distribute
### Kwargs
These are configurable arguments for specific interactions throughout the PyTorch ecosystem that Accelerate handles under the hood.
These are configurable arguemnts for specific interactions throughout the PyTorch ecosystem that Accelerate handles under the hood.
[[autodoc]] utils.AutocastKwargs
[[autodoc]] utils.DistributedDataParallelKwargs
[[autodoc]] utils.FP8RecipeKwargs
@ -72,12 +74,10 @@ These are configurable arguments for specific interactions throughout the PyTorc
[[autodoc]] utils.InitProcessGroupKwargs
[[autodoc]] utils.KwargsHandler
## Plugins
These are plugins that can be passed to the [`Accelerator`] object. While they are defined elsewhere in the documentation,
for convenience all of them are available to see here:
for convience all of them are available to see here:
[[autodoc]] utils.DeepSpeedPlugin
@ -95,8 +95,6 @@ These are classes which can be configured and passed through to the appropriate
[[autodoc]] utils.BnbQuantizationConfig
[[autodoc]] utils.DataLoaderConfiguration
[[autodoc]] utils.ProjectConfiguration
## Environmental Variables
@ -152,7 +150,7 @@ These functionalities check the state of the current working environment includi
[[autodoc]] utils.is_torch_version
[[autodoc]] utils.is_torch_xla_available
[[autodoc]] utils.is_tpu_available
[[autodoc]] utils.is_xpu_available
@ -166,10 +164,6 @@ These functionalities check the state of the current working environment includi
When setting up 🤗 Accelerate for the first time, rather than running `accelerate config` [~utils.write_basic_config] can be used as an alternative for quick configuration.
[[autodoc]] utils.set_numa_affinity
[[autodoc]] utils.environment.override_numa_affinity
## Memory
[[autodoc]] utils.find_executable_batch_size

View File

@ -9,78 +9,26 @@ Unless required by applicable law or agreed to in writing, software distributed
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Quicktour
# Quick tour
There are many ways to launch and run your code depending on your training environment ([torchrun](https://pytorch.org/docs/stable/elastic/run.html), [DeepSpeed](https://www.deepspeed.ai/), etc.) and available hardware. Accelerate offers a unified interface for launching and training on different distributed setups, allowing you to focus on your PyTorch training code instead of the intricacies of adapting your code to these different setups. This allows you to easily scale your PyTorch code for training and inference on distributed setups with hardware like GPUs and TPUs. Accelerate also provides Big Model Inference to make loading and running inference with really large models that usually don't fit in memory more accessible.
This guide aims to help you get started with 🤗 Accelerate quickly. It covers the essential steps you need to take to
enable distributed training, as well as the adjustments that you need to make in some common scenarios.
This quicktour introduces the three main features of Accelerate:
To help you navigate, the guide is split into two sections:
* [Getting Started with 🤗 Accelerate](#getting-started-with--accelerate): start here to learn how to modify your script to enable distributed training with 🤗 Accelerate
* [Common adaptations to the base case](#common-adaptations-to-the-base-case): check out this section for common deviations from the baseline scenario and what adjustments may need to be made to support them.
* a unified command line launching interface for distributed training scripts
* a training library for adapting PyTorch training code to run on different distributed setups
* Big Model Inference
## Getting started with 🤗 Accelerate
## Unified launch interface
### Enable distributed training in your script
Accelerate automatically selects the appropriate configuration values for any given distributed training framework (DeepSpeed, FSDP, etc.) through a unified configuration file generated from the [`accelerate config`](package_reference/cli#accelerate-config) command. You could also pass the configuration values explicitly to the command line which is helpful in certain situations like if you're using SLURM.
To use 🤗 Accelerate in your own training script, you have to modify four things:
But in most cases, you should always run [`accelerate config`](package_reference/cli#accelerate-config) first to help Accelerate learn about your training setup.
```bash
accelerate config
```
The [`accelerate config`](package_reference/cli#accelerate-config) command creates and saves a default_config.yaml file in Accelerates cache folder. This file stores the configuration for your training environment, which helps Accelerate correctly launch your training script based on your machine.
After you've configured your environment, you can test your setup with [`accelerate test`](package_reference/cli#accelerate-test), which launches a short script to test the distributed environment.
```bash
accelerate test
```
> [!TIP]
> Add `--config_file` to the `accelerate test` or `accelerate launch` command to specify the location of the configuration file if it is saved in a non-default location like the cache.
Once your environment is setup, launch your training script with [`accelerate launch`](package_reference/cli#accelerate-launch)!
```bash
accelerate launch path_to_script.py --args_for_the_script
```
To learn more, check out the [Launch distributed code](basic_tutorials/launch) tutorial for more information about launching your scripts.
## Adapt training code
The next main feature of Accelerate is the [`Accelerator`] class which adapts your PyTorch code to run on different distributed setups.
You only need to add a few lines of code to your training script to enable it to run on multiple GPUs or TPUs.
```diff
+ from accelerate import Accelerator
+ accelerator = Accelerator()
+ device = accelerator.device
+ model, optimizer, training_dataloader, scheduler = accelerator.prepare(
+ model, optimizer, training_dataloader, scheduler
+ )
for batch in training_dataloader:
optimizer.zero_grad()
inputs, targets = batch
- inputs = inputs.to(device)
- targets = targets.to(device)
outputs = model(inputs)
loss = loss_function(outputs, targets)
+ accelerator.backward(loss)
optimizer.step()
scheduler.step()
```
1. Import and instantiate the [`Accelerator`] class at the beginning of your training script. The [`Accelerator`] class initializes everything necessary for distributed training, and it automatically detects your training environment (a single machine with a GPU, a machine with several GPUs, several machines with multiple GPUs or a TPU, etc.) based on how the code was launched.
1. Import the [`Accelerator`] main class and instantiate one in an `accelerator` object.
```python
from accelerate import Accelerator
@ -88,19 +36,27 @@ from accelerate import Accelerator
accelerator = Accelerator()
```
2. Remove calls like `.cuda()` on your model and input data. The [`Accelerator`] class automatically places these objects on the appropriate device for you.
Add this at the beginning of your training script as it will initialize everything necessary for distributed training.
You don't need to indicate the kind of environment you are in (a single machine with a GPU, a machine with several GPUs,
or several machines with multiple GPUs or a TPU), the library will detect this automatically.
> [!WARNING]
> This step is *optional* but it is considered best practice to allow Accelerate to handle device placement. You could also deactivate automatic device placement by passing `device_placement=False` when initializing the [`Accelerator`]. If you want to explicitly place objects on a device with `.to(device)`, make sure you use `accelerator.device` instead. For example, if you create an optimizer before placing a model on `accelerator.device`, training fails on a TPU.
2. Remove the `.to(device)` or `.cuda()` calls for your model and input data.
> [!WARNING]
> Accelerate does not use non-blocking transfers by default for its automatic device placement, which can result in potentially unwanted CUDA synchronizations. You can enable non-blocking transfers by passing a [`~utils.dataclasses.DataLoaderConfiguration`] with `non_blocking=True` set as the `dataloader_config` when initializing the [`Accelerator`]. As usual, non-blocking transfers will only work if the dataloader also has `pin_memory=True` set. Be wary that using non-blocking transfers from GPU to CPU may cause incorrect results if it results in CPU operations being performed on non-ready tensors.
The `accelerator` object will handle placing these objects on the right device for you.
If you choose to leave those `.to(device)` calls, make sure to use the device provided by the `accelerator` object: `accelerator.device`.
```py
device = accelerator.device
```
<Tip warning={true}>
3. Pass all relevant PyTorch objects for training (optimizer, model, dataloader(s), learning rate scheduler) to the [`~Accelerator.prepare`] method as soon as they're created. This method wraps the model in a container optimized for your distributed setup, uses Accelerates version of the optimizer and scheduler, and creates a sharded version of your dataloader for distribution across GPUs or TPUs.
You can fully deactivate the automatic device placement by passing along `device_placement=False` when
initializing the [`Accelerator`].
However, if you place your objects manually on the proper device, be careful to create your optimizer after putting your
model on `accelerator.device` or your training will fail on TPU.
</Tip>
3. Pass all PyTorch objects relevant to training (optimizer, model, dataloader(s), learning rate scheduler) to the
[`~Accelerator.prepare`] method as soon as these objects are created, before starting your actual
training loop:
```python
model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
@ -108,23 +64,55 @@ model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
)
```
4. Replace `loss.backward()` with [`~Accelerator.backward`] to use the correct `backward()` method for your training setup.
**Important notes**:
```py
accelerator.backward(loss)
```
* You should always pass the the learning rate scheduler to [`~Accelerator.prepare`], however if the scheduler should *not* be stepped at each optimization step, pass `step_with_optimizer=False` to the [`Accelerator`] init.
* While you can send your dataloader to [`~Accelerator.prepare`] on its own (and there are cases for doing so, such as distributed inference), it's best to send it to [`~Accelerator.prepare`] together with the model and optimizer.
* If you wish to run distributed evaluation, send your validation dataloader to [`~Accelerator.prepare`] as well. There are some nuances to distributed validation, check the [Distributed evaluation](#add-distributed-evaluation) section of the guide.
* Any instruction using your training dataloader length (for instance if you want to log the number of total training
steps) should go after the call to [`~Accelerator.prepare`].
Read [Accelerates internal mechanisms](concept_guides/internal_mechanism) guide to learn more details about how Accelerate adapts your code.
Passing `DataLoader` objects to the [`~Accelerator.prepare`] method ensures that your dataloader will be sharded across
all GPUs/TPU cores available so that each one sees a different portion of the training dataset. In other words, if there are 8 processes and a dataset of 64 items, each process will see 8 of these items per iteration. Also, the random states
of all processes will be synchronized at the beginning of each iteration through your dataloader, to make sure the data
is shuffled the same way (if you decided to use `shuffle=True` or any kind of random sampler).
### Distributed evaluation
<Tip>
To perform distributed evaluation, pass your validation dataloader to the [`~Accelerator.prepare`] method:
The actual batch size for your training will be the number of devices used multiplied by the batch size you set in
your script. For instance, training on 4 GPUs with a batch size of 16 set when creating the training dataloader will
train at an actual batch size of 64 (4 * 16).
If you want the batch size remain the same regardless of how many GPUs the script is run on, you can use the
option `split_batches=True` when creating and initializing [`Accelerator`].
Your training dataloader may change length when going through this method: if you run on X GPUs, it will have its
length divided by X (since your actual batch size will be multiplied by X), unless you set
`split_batches=True`.
</Tip>
4. Replace the `loss.backward()` line with `accelerator.backward(loss)`.
And you're all set! With all these changes, your script will run on your local machine as well as on multiple GPUs or a
TPU! You can either use your favorite tool to launch the distributed training, or you can use the 🤗 Accelerate
launcher.
### Add distributed evaluation
You can perform regular evaluation in your training script if you leave your validation dataloader out of the
[`~Accelerator.prepare`] method. In this case, you will need to put the input data on the
`accelerator.device` manually.
To perform distributed evaluation, send along your validation dataloader to the [`~Accelerator.prepare`]
method:
```python
validation_dataloader = accelerator.prepare(validation_dataloader)
```
Each device in your distributed setup only receives a part of the evaluation data, which means you should group your predictions together with the [`~Accelerator.gather_for_metrics`] method. This method requires all tensors to be the same size on each process, so if your tensors have different sizes on each process (for instance when dynamically padding to the maximum length in a batch), you should use the [`~Accelerator.pad_across_processes`] method to pad you tensor to the largest size across processes. Note that the tensors needs to be 1D and that we concatenate the tensors along the first dimension.
Same as with your training dataloader, each device will only see part of the evaluation data should you run your script
on multiple devices. This means you will need to group your predictions together which you can do with
the [`~Accelerator.gather_for_metrics`] method.
```python
for inputs, targets in validation_dataloader:
@ -135,52 +123,319 @@ for inputs, targets in validation_dataloader:
metric.add_batch(all_predictions, all_targets)
```
For more complex cases (e.g. 2D tensors, don't want to concatenate tensors, dict of 3D tensors), you can pass `use_gather_object=True` in `gather_for_metrics`. This will return the list of objects after gathering. Note that using it with GPU tensors is not well supported and inefficient.
<Tip warning={true}>
> [!TIP]
> Data at the end of a dataset may be duplicated so the batch can be equally divided among all workers. The [`~Accelerator.gather_for_metrics`] method automatically removes the duplicated data to calculate a more accurate metric.
Similar to the training dataloader, passing your validation dataloader through
[`~Accelerator.prepare`] may change it: if you run on X GPUs, it will have its length divided by X
(since your actual batch size will be multiplied by X), unless you set `split_batches=True`.
## Big Model Inference
</Tip>
Accelerate's Big Model Inference has two main features, [`~accelerate.init_empty_weights`] and [`~accelerate.load_checkpoint_and_dispatch`], to load large models for inference that typically don't fit into memory.
Some data at the end of the dataset may be duplicated so the batch can be divided equally among all workers. As a result,
metrics should be calculated through the [`~Accelerator.gather_for_metrics`] method to automatically remove the duplicated
data while gathering and provide a more accurate metric.
> [!TIP]
> Take a look at the [Handling big models for inference](concept_guides/big_model_inference) guide for a better understanding of how Big Model Inference works under the hood.
<Tip>
### Empty weights initialization
If for some reason you don't wish to have this automatically done, [`~Accelerator.gather`] can be used instead to gather
the data across all processes and this can manually be done instead.
The [`~accelerate.init_empty_weights`] context manager initializes models of any size by creating a *model skeleton* and moving and placing parameters each time they're created to PyTorch's [**meta**](https://pytorch.org/docs/main/meta.html) device. This way, not all weights are immediately loaded and only a small part of the model is loaded into memory at a time.
</Tip>
For example, loading an empty [Mixtral-8x7B](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1) model takes significantly less memory than fully loading the models and weights on the CPU.
```py
from accelerate import init_empty_weights
from transformers import AutoConfig, AutoModelForCausalLM
<Tip warning={true}>
config = AutoConfig.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1")
with init_empty_weights():
model = AutoModelForCausalLM.from_config(config)
The [`~Accelerator.gather`] and [`~Accelerator.gather_for_metrics`] methods require the tensors to be all the same size on each process. If
you have tensors of different sizes on each process (for instance when dynamically padding to the maximum length in
a batch), you should use the [`~Accelerator.pad_across_processes`] method to pad you tensor to the
biggest size across processes.
</Tip>
### Launch your distributed script
You can use the regular commands to launch your distributed training (like `torch.distributed.run` for
PyTorch) - they are fully compatible with 🤗 Accelerate.
Alternatively, 🤗 Accelerate provides a CLI tool that unifies all launchers, so you only have to remember one command. \
To use it, run a quick configuration setup first on your machine and answer the questions:
```bash
accelerate config
```
### Load and dispatch weights
At the end of the setup, a *default_config.yaml* file will be saved in your cache folder for 🤗 Accelerate. That cache
folder is (with decreasing order of priority):
The [`~accelerate.load_checkpoint_and_dispatch`] function loads full or sharded checkpoints into the empty model, and automatically distribute weights across all available devices.
- The content of your environment variable `HF_HOME` suffixed with *accelerate*.
- If it does not exist, the content of your environment variable `XDG_CACHE_HOME` suffixed with
*huggingface/accelerate*.
- If this does not exist either, the folder *~/.cache/huggingface/accelerate*.
The `device_map` parameter determines where to place each model layer, and specifiying `"auto"` places them on the GPU first, then the CPU, and finally the hard drive as memory-mapped tensors if there's still not enough memory. Use the `no_split_module_classes` parameter to indicate which modules shouldn't be split across devices (typically those with a residual connection).
By specifying the `--config_file` flag you can specify an alternative location of the configuration file.
Once the configuration setup is complete, you can test your setup by running:
```py
from accelerate import load_checkpoint_and_dispatch
model = load_checkpoint_and_dispatch(
model, checkpoint="mistralai/Mixtral-8x7B-Instruct-v0.1", device_map="auto", no_split_module_classes=['Block']
)
```bash
accelerate test
```
## Next steps
This will launch a short script that will test the distributed environment. If it runs without issues, you are ready for
the next step!
Now that you've been introduced to the main Accelerate features, your next steps could include:
Note that if you specified a location for the config file in the previous step, you need to pass it here as well:
* Check out the [tutorials](basic_tutorials/overview) for a gentle walkthrough of Accelerate. This is especially useful if you're new to distributed training and the library.
* Dive into the [guides](usage_guides/explore) to see how to use Accelerate for specific use-cases.
* Deepen your conceptual understanding of how Accelerate works internally by reading the [concept guides](concept_guides/internal_mechanism).
* Look up classes and commands in the [API reference](package_reference/accelerator) to see what parameters and options are available.
```bash
accelerate test --config_file path_to_config.yaml
```
Now that this is done, you can run your script with the following command:
```bash
accelerate launch path_to_script.py --args_for_the_script
```
If you stored the config file in a non-default location, you can indicate it to the launcher like this:
```bash
accelerate launch --config_file path_to_config.yaml path_to_script.py --args_for_the_script
```
You can override any of the arguments determined by your config file. To see the complete list of parameters that you
can pass in, run `accelerate launch -h`. (And further niche argument help by passing in partial commands, such as `accelerate launch --multi_gpu -h` for all `multi_gpu` args)
Check out the [Launch tutorial](basic_tutorials/launch) for more information about launching your scripts.
## Common modifications of the base case
The previous section covers the minimal essential steps to move a training script into a distributed setup with 🤗 Accelerate.
Here we describe common modifications/deviations from the base case scenario and the adjustments you need to make to accommodate for them.
### Launch distributed training from a notebook
Accelerate has a [`notebook_launcher`] to help you launch your training function from a
notebook. This launcher supports launching a training with TPUs on Colab or Kaggle, as well as training on several GPUs and machines
(if the machine on which you are running your notebook has them).
Define a function responsible for your whole training and/or evaluation in a cell of the notebook, then execute a
cell with the following code:
```python
from accelerate import notebook_launcher
notebook_launcher(training_function)
```
<Tip warning={true}>
Your [`Accelerator`] object should only be defined inside the training function. This is because the
initialization should be done inside the launcher only.
</Tip>
Check out the [Notebook Launcher tutorial](basic_tutorials/notebook) for more information about training on TPUs.
### Specifics of training on TPU
If you want to launch your script on TPUs, there are a few caveats you should be aware of. Behind the scenes, the TPUs
will create a graph of all the operations happening in your training step (forward pass, backward pass and optimizer
step). This is why your first step of training will always be very long as building and compiling this graph for
optimizations takes some time.
The good news is that this compilation will be cached so the second step and all the following will be much faster. The
bad news is that it only applies if all of your steps do exactly the same operations, which implies:
- having all tensors of the same length in all your batches
- having static code (i.e., not a for loop of length that could change from step to step)
Having any of the things above change between two steps will trigger a new compilation which will, once again, take a
lot of time. In practice, that means you must take special care to have all your tensors in your inputs of the same
shape (so no dynamic padding for instance if you are in an NLP problem) and should not use layers with for loops that
have different lengths depending on the inputs (such as an LSTM) or the training will be excruciatingly slow.
To introduce special behavior in your script for TPUs you can check the `distributed_type` of your
`accelerator`:
```python docstyle-ignore
from accelerate import DistributedType
if accelerator.distributed_type == DistributedType.TPU:
# do something of static shape
else:
# go crazy and be dynamic
```
The [NLP example](https://github.com/huggingface/accelerate/blob/main/examples/nlp_example.py) shows an example in a
situation with dynamic padding.
One last thing to pay close attention to: if your model has tied weights (such as language models which tie the weights
of the embedding matrix with the weights of the decoder), moving this model to the TPU (either yourself or after you
passed your model to [`~Accelerator.prepare`]) will break the tying. You will need to retie the weights
after. You can find an example of this in the [run_clm_no_trainer](https://github.com/huggingface/transformers/blob/master/examples/pytorch/language-modeling/run_clm.py) script in
the Transformers repository.
Check out the [TPU tutorial](concept_guides/training_tpu) for more information about training on TPUs.
### Execute a statement only on one processes
Some of your instructions only need to run for one process on a given server: for instance a data download or a log
statement. To do this, wrap the statement in a test like this:
```python docstyle-ignore
if accelerator.is_local_main_process:
# Is executed once per server
```
Another example is progress bars: to avoid having multiple progress bars in your output, you should only display one on
the local main process:
```python
from tqdm.auto import tqdm
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
```
The *local* means per machine: if you are running your training on two servers with several GPUs, the instruction will
be executed once on each of those servers. If you need to execute something only once for all processes (and not per
machine) for instance, uploading the final model to the 🤗 model hub, wrap it in a test like this:
```python docstyle-ignore
if accelerator.is_main_process:
# Is executed once only
```
For printing statements you only want executed once per machine, you can just replace the `print` function by
`accelerator.print`.
### Defer execution on multiple GPUs
When you run your usual script, instructions are executed in order. Using 🤗 Accelerate to deploy your script on several
GPUs at the same time introduces a complication: while each process executes all instructions in order, some may be
faster than others.
You might need to wait for all processes to have reached a certain point before executing a given instruction. For
instance, you shouldn't save a model before making sure every process is done with training. To do this, add the
following line in your code:
```
accelerator.wait_for_everyone()
```
This instruction will block all the processes that arrive first until all the other processes have reached that
point (if you run your script on just one GPU or CPU, this won't do anything).
### Save/load a model in a distributed setup
Saving the model you trained might need a bit of adjustment: first you should wait for all processes to reach that
point in the script as shown above, and then, you should unwrap your model before saving it. This is because when going
through the [`~Accelerator.prepare`] method, your model may have been placed inside a bigger model,
which deals with the distributed training. This in turn means that saving your model state dictionary without taking
any precaution will take that potential extra layer into account, and you will end up with weights you can't load back
in your base model. The [`~Accelerator.save_model`] method will help you to achieve that. It will unwrap your model and save
the model state dictionary.
Here is an example:
```
accelerator.wait_for_everyone()
accelerator.save_model(model, save_directory)
```
The [`~Accelerator.save_model`] method can also save a model into sharded checkpoints or with safetensors format:
```python
accelerator.wait_for_everyone()
accelerator.save_model(model, save_directory, max_shard_size="1GB", safe_serialization=True)
```
If your script contains logic to load a checkpoint, we also recommend you load your weights in the unwrapped model
(this is only useful if you use the load function after making your model go through
[`~Accelerator.prepare`]). Here is an example:
```python
unwrapped_model = accelerator.unwrap_model(model)
path_to_checkpoint = os.path.join(save_directory,"pytorch_model.bin")
unwrapped_model.load_state_dict(torch.load(path_to_checkpoint))
```
Note that since all the model parameters are references to tensors, this will load your weights inside `model`.
If you want to load a sharded checkpoint or a checkpoint with safetensors format into the model with a specific `device`,
we recommend you to load it with [`~utils.load_checkpoint_in_model`] function. Here's an example:
```python
load_checkpoint_in_model(unwrapped_model, save_directory, device_map={"":device})
```
### Save/load entire states
When training your model, you may want to save the current state of the model, optimizer, random generators, and potentially
learning rate schedulers to be restored in the _same script_.
You can use [`~Accelerator.save_state`] and [`~Accelerator.load_state`] respectively to do so.
To further customize where and how states saved through [`~Accelerator.save_state`] the [`~utils.ProjectConfiguration`] class can be used. For example
if `automatic_checkpoint_naming` is enabled each saved checkpoint will be located then at `Accelerator.project_dir/checkpoints/checkpoint_{checkpoint_number}`.
If you have registered any other stateful items to be stored through [`~Accelerator.register_for_checkpointing`] they will also be saved and/or loaded.
<Tip>
Every object passed to [`~Accelerator.register_for_checkpointing`] must have a `load_state_dict` and `state_dict` function to be stored
</Tip>
### Use gradient clipping
If you are using gradient clipping in your script, you should replace the calls to
`torch.nn.utils.clip_grad_norm_` or `torch.nn.utils.clip_grad_value_` with [`~Accelerator.clip_grad_norm_`]
and [`~Accelerator.clip_grad_value_`] respectively.
### Train with mixed precision
If you are running your training in Mixed Precision with 🤗 Accelerate, you will get the best result with your loss being
computed inside your model (like in Transformer models for instance). Every computation outside of the model will be
executed in full precision (which is generally what you want for loss computation, especially if it involves a
softmax). However, you might want to put your loss computation inside the [`~Accelerator.autocast`] context manager:
```
with accelerator.autocast():
loss = complex_loss_function(outputs, target):
```
Another caveat with Mixed Precision training is that the gradient will skip a few updates at the beginning and
sometimes during training: because of the dynamic loss scaling strategy, there are points during training where the
gradients have overflown, and the loss scaling factor is reduced to avoid this happening again at the next step.
This means that you may update your learning rate scheduler when there was no update, which is fine in general, but may
have an impact when you have very little training data, or if the first learning rate values of your scheduler are very
important. In this case, you can skip the learning rate scheduler updates when the optimizer step was not done like
this:
```
if not accelerator.optimizer_step_was_skipped:
lr_scheduler.step()
```
### Use gradient accumulation
To perform gradient accumulation use [`~Accelerator.accumulate`] and specify a `gradient_accumulation_steps`.
This will also automatically ensure the gradients are synced or unsynced when on multi-device training, check if the step should
actually be performed, and auto-scale the loss:
```python
accelerator = Accelerator(gradient_accumulation_steps=2)
model, optimizer, training_dataloader = accelerator.prepare(model, optimizer, training_dataloader)
for input, label in training_dataloader:
with accelerator.accumulate(model):
predictions = model(input)
loss = loss_function(predictions, label)
accelerator.backward(loss)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
```

View File

@ -52,7 +52,7 @@ will attempt to fill all the space in your GPU(s), then loading them to the CPU,
<Tip>
For more details on designing your own device map, see this section of the [concept guide](../concept_guides/big_model_inference#designing-a-device-map)
For more details on desigining your own device map, see this section of the [concept guide](../concept_guide/big_model_inference#designing-a-device-map)
</Tip>
@ -90,7 +90,7 @@ What will happen now is each time the input gets passed through a layer, it will
<Tip>
Multiple GPUs can be utilized, however this is considered "model parallelism" and as a result only one GPU will be active at a given moment, waiting for the prior one to send it the output. You should launch your script normally with `python`
Multiple GPUs can be utilized, however this is considered "model parallism" and as a result only one GPU will be active at a given moment, waiting for the prior one to send it the output. You should launch your script normally with `python`
and not need `torchrun`, `accelerate launch`, etc.
</Tip>

View File

@ -9,7 +9,7 @@ Unless required by applicable law or agreed to in writing, software distributed
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
@ -23,7 +23,7 @@ rendered properly in your Markdown viewer.
4. Custom mixed precision training handling
5. A range of fast CUDA-extension-based optimizers
6. ZeRO-Offload to CPU and Disk/NVMe
7. Hierarchical partitioning of model parameters (ZeRO++)
7. Heirarchical partitioning of model parameters (ZeRO++)
ZeRO-Offload has its own dedicated paper: [ZeRO-Offload: Democratizing Billion-Scale Model Training](https://arxiv.org/abs/2101.06840). And NVMe-support is described in the paper [ZeRO-Infinity: Breaking the GPU
Memory Wall for Extreme Scale Deep Learning](https://arxiv.org/abs/2104.07857).
@ -61,7 +61,7 @@ Below is a short description of Data Parallelism using ZeRO - Zero Redundancy Op
e. **Param Offload**: Offloads the model parameters to CPU/Disk building on top of ZERO Stage 3
f. **Hierarchical Partitioning**: Enables efficient multi-node training with data-parallel training across nodes and ZeRO-3 sharding within a node, built on top of ZeRO Stage 3.
f. **Heirarchical Paritioning**: Enables efficient multi-node training with data-parallel training across nodes and ZeRO-3 sharding within a node, built on top of ZeRO Stage 3.
<u>Note</u>: With respect to Disk Offload, the disk should be an NVME for decent speed but it technically works on any Disk
@ -157,18 +157,10 @@ Currently, `Accelerate` supports following config through the CLI:
`gradient_accumulation_steps`: Number of training steps to accumulate gradients before averaging and applying them.
`gradient_clipping`: Enable gradient clipping with value.
`offload_optimizer_device`: [none] Disable optimizer offloading, [cpu] offload optimizer to CPU, [nvme] offload optimizer to NVMe SSD. Only applicable with ZeRO >= Stage-2.
`offload_optimizer_nvme_path`: Decides Nvme Path to offload optimizer states. If unspecified, will default to 'none'.
`offload_param_device`: [none] Disable parameter offloading, [cpu] offload parameters to CPU, [nvme] offload parameters to NVMe SSD. Only applicable with ZeRO Stage-3.
`offload_param_nvme_path`: Decides Nvme Path to offload parameters. If unspecified, will default to 'none'.
`zero3_init_flag`: Decides whether to enable `deepspeed.zero.Init` for constructing massive models. Only applicable with ZeRO Stage-3.
`zero3_save_16bit_model`: Decides whether to save 16-bit model weights when using ZeRO Stage-3.
`mixed_precision`: `no` for FP32 training, `fp16` for FP16 mixed-precision training and `bf16` for BF16 mixed-precision training.
`deepspeed_moe_layer_cls_names`: Comma-separated list of transformer Mixture-of-Experts (MoE) layer class names (case-sensitive) to wrap ,e.g, `MixtralSparseMoeBlock`, `Qwen2MoeSparseMoeBlock`, `JetMoEAttention,JetMoEBlock` ...
`deepspeed_hostfile`: DeepSpeed hostfile for configuring multi-node compute resources.
`deepspeed_exclusion_filter`: DeepSpeed exclusion filter string when using mutli-node setup.
`deepspeed_inclusion_filter`: DeepSpeed inclusion filter string when using mutli-node setup.
`deepspeed_multinode_launcher`: DeepSpeed multi-node launcher to use. If unspecified, will default to `pdsh`.
`deepspeed_config_file`: path to the DeepSpeed config file in `json` format. See the next section for more details on this.
```
To be able to tweak more options, you will need to use a DeepSpeed config file.
@ -361,7 +353,7 @@ accelerate launch examples/by_feature/deepspeed_with_config_support.py \
```
**ZeRO++ Config Example**
You can use the features of ZeRO++ by using the appropriate config parameters. Note that ZeRO++ is an extension for ZeRO Stage 3. Here is how the config file can be modified, from [DeepSpeed's ZeRO++ tutorial](https://www.deepspeed.ai/tutorials/zeropp/):
You can use the the features of ZeRO++ by using the appropriate config parameters. Note that ZeRO++ is an extension for ZeRO Stage 3. Here is how the config file can be modified, from [DeepSpeed's ZeRO++ tutorial](https://www.deepspeed.ai/tutorials/zeropp/):
```json
{
@ -379,7 +371,7 @@ You can use the features of ZeRO++ by using the appropriate config parameters. N
}
```
For hierarchical partitioning, the partition size `zero_hpz_partition_size` should ideally be set to the number of GPUs per node. (For example, the above config file assumes 8 GPUs per node)
For heirarchical partitioning, the partition size `zero_hpz_partition_size` should ideally be set to the number of GPUs per node. (For example, the above config file assumes 8 GPUs per node)
**Important code changes when using DeepSpeed Config File**
@ -391,7 +383,7 @@ We will look at the changes needed in the code when using these.
In this situation, those will be used and the user has to use `accelerate.utils.DummyOptim` and `accelerate.utils.DummyScheduler` to replace the PyTorch/Custom optimizers and schedulers in their code.
Below is the snippet from `examples/by_feature/deepspeed_with_config_support.py` showing this:
```python
# Creates Dummy Optimizer if `optimizer` was specified in the config file else creates Adam Optimizer
# Creates Dummy Optimizer if `optimizer` was spcified in the config file else creates Adam Optimizer
optimizer_cls = (
torch.optim.AdamW
if accelerator.state.deepspeed_plugin is None
@ -400,7 +392,7 @@ We will look at the changes needed in the code when using these.
)
optimizer = optimizer_cls(optimizer_grouped_parameters, lr=args.learning_rate)
# Creates Dummy Scheduler if `scheduler` was specified in the config file else creates `args.lr_scheduler_type` Scheduler
# Creates Dummy Scheduler if `scheduler` was spcified in the config file else creates `args.lr_scheduler_type` Scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
@ -527,7 +519,7 @@ ValueError: When using `deepspeed_config_file`, the following accelerate config
['gradient_accumulation_steps', 'gradient_clipping', 'zero_stage', 'offload_optimizer_device', 'offload_param_device',
'zero3_save_16bit_model', 'mixed_precision'].
Please specify them appropriately in the DeepSpeed config file.
If you are using an accelerate config file, remove other config variables mentioned in the above specified list.
If you are using an accelerate config file, remove others config variables mentioned in the above specified list.
The easiest method is to create a new config following the questionnaire via `accelerate config`.
It will only ask for the necessary config variables when using `deepspeed_config_file`.
```
@ -664,7 +656,7 @@ ZeRO Stage-3 has 2 options:
Below is the snippet from `examples/by_feature/deepspeed_with_config_support.py` showing this:
```python
success = model.save_checkpoint(PATH, ckpt_id, checkpoint_state_dict)
status_msg = f"checkpointing: PATH={PATH}, ckpt_id={ckpt_id}"
status_msg = "checkpointing: PATH={}, ckpt_id={}".format(PATH, ckpt_id)
if success:
logging.info(f"Success {status_msg}")
else:
@ -729,10 +721,3 @@ Papers:
Finally, please, remember that 🤗 `Accelerate` only integrates DeepSpeed, therefore if you
have any problems or questions with regards to DeepSpeed usage, please, file an issue with [DeepSpeed GitHub](https://github.com/microsoft/DeepSpeed/issues).
<Tip>
For those interested in the similarities and differences between FSDP and DeepSpeed, please check out the [concept guide here](../concept_guides/fsdp_and_deepspeed.md)!
</Tip>

View File

@ -15,18 +15,12 @@ rendered properly in your Markdown viewer.
# Distributed Inference with 🤗 Accelerate
Distributed inference can fall into three brackets:
Distributed inference is a common use case, especially with natural language processing (NLP) models. Users often want to
send a number of different prompts, each to a different GPU, and then get the results back. This also has other cases
outside of just NLP, however for this tutorial we will focus on just this idea of each GPU receiving a different prompt,
and then returning the results.
1. Loading an entire model onto each GPU and sending chunks of a batch through each GPU's model copy at a time
2. Loading parts of a model onto each GPU and processing a single input at one time
3. Loading parts of a model onto each GPU and using what is called scheduled Pipeline Parallelism to combine the two prior techniques.
We're going to go through the first and the last bracket, showcasing how to do each as they are more realistic scenarios.
## Sending chunks of a batch automatically to each loaded model
This is the most memory-intensive solution, as it requires each GPU to keep a full copy of the model in memory at a given time.
## The Problem
Normally when doing this, users send the model to a specific device to load it from the CPU, and then move each prompt to a different device.
@ -61,6 +55,7 @@ a simple way to manage this. (To learn more, check out the relevant section in t
Can it manage it? Yes. Does it add unneeded extra code however: also yes.
## The Solution
With 🤗 Accelerate, we can simplify this process by using the [`Accelerator.split_between_processes`] context manager (which also exists in `PartialState` and `AcceleratorState`).
This function will automatically split whatever data you pass to it (be it a prompt, a set of tensors, a dictionary of the prior data, etc.) across all the processes (with a potential
@ -139,99 +134,3 @@ with distributed_state.split_between_processes(["a dog", "a cat", "a chicken"],
On the first GPU, the prompts will be `["a dog", "a cat"]`, and on the second GPU it will be `["a chicken", "a chicken"]`.
Make sure to drop the final sample, as it will be a duplicate of the previous one.
You can find more complex examples [here](https://github.com/huggingface/accelerate/tree/main/examples/inference/distributed) such as how to use it with LLMs.
## Memory-efficient pipeline parallelism (experimental)
This next part will discuss using *pipeline parallelism*. This is an **experimental** API utilizing the [PiPPy library by PyTorch](https://github.com/pytorch/PiPPy/) as a native solution.
The general idea with pipeline parallelism is: say you have 4 GPUs and a model big enough it can be *split* on four GPUs using `device_map="auto"`. With this method you can send in 4 inputs at a time (for example here, any amount works) and each model chunk will work on an input, then receive the next input once the prior chunk finished, making it *much* more efficient **and faster** than the method described earlier. Here's a visual taken from the PyTorch repository:
![PiPPy example](https://camo.githubusercontent.com/681d7f415d6142face9dd1b837bdb2e340e5e01a58c3a4b119dea6c0d99e2ce0/68747470733a2f2f692e696d6775722e636f6d2f657955633934372e706e67)
To illustrate how you can use this with Accelerate, we have created an [example zoo](https://github.com/huggingface/accelerate/tree/main/examples/inference) showcasing a number of different models and situations. In this tutorial, we'll show this method for GPT2 across two GPUs.
Before you proceed, please make sure you have the latest pippy installed by running the following:
```bash
pip install torchpippy
```
We require at least version 0.2.0. To confirm that you have the correct version, run `pip show torchpippy`.
Start by creating the model on the CPU:
```{python}
from transformers import GPT2ForSequenceClassification, GPT2Config
config = GPT2Config()
model = GPT2ForSequenceClassification(config)
model.eval()
```
Next you'll need to create some example inputs to use. These help PiPPy trace the model.
<Tip warning={true}>
However you make this example will determine the relative batch size that will be used/passed
through the model at a given time, so make sure to remember how many items there are!
</Tip>
```{python}
input = torch.randint(
low=0,
high=config.vocab_size,
size=(2, 1024), # bs x seq_len
device="cpu",
dtype=torch.int64,
requires_grad=False,
)
```
Next we need to actually perform the tracing and get the model ready. To do so, use the [`inference.prepare_pippy`] function and it will fully wrap the model for pipeline parallelism automatically:
```{python}
from accelerate.inference import prepare_pippy
example_inputs = {"input_ids": input}
model = prepare_pippy(model, example_args=(input,))
```
<Tip>
There are a variety of parameters you can pass through to `prepare_pippy`:
* `split_points` lets you determine what layers to split the model at. By default we use wherever `device_map="auto" declares, such as `fc` or `conv1`.
* `num_chunks` determines how the batch will be split and sent to the model itself (so `num_chunks=1` with four split points/four GPUs will have a naive MP where a single input gets passed between the four layer split points)
</Tip>
From here, all that's left is to actually perform the distributed inference!
<Tip warning={true}>
When passing inputs, we highly recommend to pass them in as a tuple of arguments. Using `kwargs` is supported, however, this approach is experimental.
</Tip>
```{python}
args = some_more_arguments
with torch.no_grad():
output = model(*args)
```
When finished all the data will be on the last process only:
```{python}
from accelerate import PartialState
if PartialState().is_last_process:
print(output)
```
<Tip>
If you pass in `gather_output=True` to [`inference.prepare_pippy`], the output will be sent
across to all the GPUs afterwards without needing the `is_last_process` check. This is
`False` by default as it incurs a communication call.
</Tip>
And that's it! To explore more, please check out the inference examples in the [Accelerate repo](https://github.com/huggingface/accelerate/tree/main/examples/inference/pippy) and our [documentation](../package_reference/inference) as we work to improving this integration.

View File

@ -73,7 +73,7 @@ accelerate launch examples/nlp_example.py
Currently, `Accelerate` supports the following config through the CLI:
`fsdp_sharding_strategy`: [1] FULL_SHARD (shards optimizer states, gradients and parameters), [2] SHARD_GRAD_OP (shards optimizer states and gradients), [3] NO_SHARD (DDP), [4] HYBRID_SHARD (shards optimizer states, gradients and parameters within each node while each node has full copy), [5] HYBRID_SHARD_ZERO2 (shards optimizer states and gradients within each node while each node has full copy). For more information, please refer the official [PyTorch docs](https://pytorch.org/docs/stable/fsdp.html#torch.distributed.fsdp.ShardingStrategy).
`fsdp_sharding_strategy`: [1] FULL_SHARD (shards optimizer states, gradients and parameters), [2] SHARD_GRAD_OP (shards optimizer states and gradients), [3] NO_SHARD (DDP), [4] HYBRID_SHARD (shards optimizer states, gradients and parameters within each node while each node has full copy), [5] HYBRID_SHARD_ZERO2 (shards optimizer states and gradients within each node while each node has full copy)
`fsdp_offload_params` : Decides Whether to offload parameters and gradients to CPU
@ -85,13 +85,13 @@ Currently, `Accelerate` supports the following config through the CLI:
`fsdp_backward_prefetch_policy`: [1] BACKWARD_PRE, [2] BACKWARD_POST, [3] NO_PREFETCH
`fsdp_forward_prefetch`: if True, then FSDP explicitly prefetches the next upcoming all-gather while executing in the forward pass. Should only be used for static-graph models since the prefetching follows the first iterations execution order. i.e., if the sub-modules' order changes dynamically during the model's execution do not enable this feature.
`fsdp_forward_prefetch`: if True, then FSDP explicitly prefetches the next upcoming all-gather while executing in the forward pass. Should only be used for static-graph models since the prefetching follows the first iterations execution order. i.e., if the sub-modules' order changes dynamically during the model's executation do not enable this feature.
`fsdp_state_dict_type`: [1] FULL_STATE_DICT, [2] LOCAL_STATE_DICT, [3] SHARDED_STATE_DICT
`fsdp_use_orig_params`: If True, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable parameters. This setting is useful in cases such as parameter-efficient fine-tuning as discussed in [this post](https://dev-discuss.pytorch.org/t/rethinking-pytorch-fully-sharded-data-parallel-fsdp-from-first-principles/1019). This option also allows one to have multiple optimizer param groups. This should be `True` when creating an optimizer before preparing/wrapping the model with FSDP.
`fsdp_use_orig_params`: If True, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable paramteres. This setting is useful in cases such as parameter-efficient fine-tuning as discussed in [this post](https://dev-discuss.pytorch.org/t/rethinking-pytorch-fully-sharded-data-parallel-fsdp-from-first-principles/1019). This option also allows one to have multiple optimizer param groups. This should be `True` when creating an optimizer before preparing/wrapping the model with FSDP.
`fsdp_cpu_ram_efficient_loading`: Only applicable for 🤗 Transformers models. If True, only the first process loads the pretrained model checkpoint while all other processes have empty weights. This should be set to False if you experience errors when loading the pretrained 🤗 Transformers model via `from_pretrained` method. When this setting is True `fsdp_sync_module_states` also must to be True, otherwise all the processes except the main process would have random weights leading to unexpected behaviour during training. For this to work, make sure the distributed process group is initialized before calling Transformers `from_pretrained` method. When using 🤗 Trainer API, the distributed process group is initialized when you create an instance of `TrainingArguments` class.
`fsdp_cpu_ram_efficient_loading`: Only applicable for 🤗 Transformers models. If True, only the first process loads the pretrained model checkpoint while all other processes have empty weights. This should be set to False if you experience errors when loading the pretrained 🤗 Transformers model via `from_pretrained` method. When this setting is True `fsdp_sync_module_states` also must to be True, otherwise all the processes except the main process would have random weights leading to unexpected behaviour during training.
`fsdp_sync_module_states`: If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0.
@ -123,7 +123,7 @@ Below is the code snippet to save using `save_state` utility of accelerate.
accelerator.save_state("ckpt")
```
Inspect the checkpoint folder to see model and optimizer as shards per process:
Inspect the ckeckpoint folder to see model and optimizer as shards per process:
```
ls ckpt
# optimizer_0 pytorch_model_0 random_states_0.pkl random_states_1.pkl scheduler.bin
@ -161,13 +161,6 @@ When using transformers `save_pretrained`, pass `state_dict=accelerator.get_stat
You can then pass `state` into the `save_pretrained` method. There are several modes for `StateDictType` and `FullStateDictConfig` that you can use to control the behavior of `state_dict`. For more information, see the [PyTorch documentation](https://pytorch.org/docs/stable/fsdp.html).
## Mapping between FSDP sharding strategies and DeepSpeed ZeRO Stages
* `FULL_SHARD` maps to the DeepSpeed `ZeRO Stage-3`. Shards optimizer states, gradients and parameters.
* `SHARD_GRAD_OP` maps to the DeepSpeed `ZeRO Stage-2`. Shards optimizer states and gradients.
* `NO_SHARD` maps to `ZeRO Stage-0`. No sharding wherein each GPU has full copy of model, optimizer states and gradients.
* `HYBRID_SHARD` maps to `ZeRO++ Stage-3` wherein `zero_hpz_partition_size=<num_gpus_per_node>`. Here, this will shard optimizer states, gradients and parameters within each node while each node has full copy.
## A few caveats to be aware of
- In case of multiple models, pass the optimizers to the prepare call in the same order as corresponding models else `accelerator.save_state()` and `accelerator.load_state()` will result in wrong/unexpected behaviour.
@ -175,10 +168,3 @@ You can then pass `state` into the `save_pretrained` method. There are several
For more control, users can leverage the `FullyShardedDataParallelPlugin`. After creating an instance of this class, users can pass it to the Accelerator class instantiation.
For more information on these options, please refer to the PyTorch [FullyShardedDataParallel](https://github.com/pytorch/pytorch/blob/0df2e863fbd5993a7b9e652910792bd21a516ff3/torch/distributed/fsdp/fully_sharded_data_parallel.py#L236) code.
<Tip>
For those interested in the similarities and differences between FSDP and DeepSpeed, please check out the [concept guide here](../concept_guides/fsdp_and_deepspeed.md)!
</Tip>

View File

@ -115,11 +115,8 @@ What is the IP address of the machine that will host the main process? 36.112.23
What is the port you will use to communicate with the main process? 29500
Are all the machines on the same local network? Answer `no` if nodes are on the cloud and/or on different network hosts [YES/no]: yes
Do you want to use Intel PyTorch Extension (IPEX) to speed up training on CPU? [yes/NO]:yes
Do you want accelerate to launch mpirun? [yes/NO]: yes
Please enter the path to the hostfile to use with mpirun [~/hostfile]: ~/hostfile
Enter the number of oneCCL worker threads [1]: 1
Do you wish to optimize your script with torch dynamo?[yes/NO]:NO
How many processes should be used for distributed training? [1]:16
How many CPU(s) should be used for distributed training? [1]:16
-----------------------------------------------------------------------------------------------------------------------------------------------------------
Do you wish to use FP16 or BF16 (mixed precision)?
bf16
@ -138,9 +135,6 @@ main_process_ip: 36.112.23.24
main_process_port: 29500
main_training_function: main
mixed_precision: bf16
mpirun_config:
mpirun_ccl: '1'
mpirun_hostfile: /home/user/hostfile
num_machines: 4
num_processes: 16
rdzv_backend: static
@ -154,7 +148,6 @@ use_cpu: true
Set following env and using intel MPI to launch the training
In node0, you need to create a configuration file which contains the IP addresses of each node (for example hostfile) and pass that configuration file path as an argument.
If you selected to have Accelerate launch `mpirun`, ensure that the location of your hostfile matches the path in the config.
```bash
$ cat hostfile
xxx.xxx.xxx.xxx #node0 ip
@ -162,18 +155,7 @@ xxx.xxx.xxx.xxx #node1 ip
xxx.xxx.xxx.xxx #node2 ip
xxx.xxx.xxx.xxx #node3 ip
```
When Accelerate is launching `mpirun`, source the oneCCL bindings setvars.sh to get your Intel MPI environment, and then
run your script using `accelerate launch`. Note that the python script and environment needs to exist on all of the
machines being used for multi-CPU training.
```bash
oneccl_bindings_for_pytorch_path=$(python -c "from oneccl_bindings_for_pytorch import cwd; print(cwd)")
source $oneccl_bindings_for_pytorch_path/env/setvars.sh
accelerate launch examples/nlp_example.py
```
Otherwise, if you selected not to have Accelerate launch `mpirun`, run the following command in node0 and **16DDP** will
be enabled in node0,node1,node2,node3 with BF16 mixed precision. When using this method, the python script, python
environment, and accelerate config file need to be present on all of the machines used for multi-CPU training.
Now, run the following command in node0 and **16DDP** will be enabled in node0,node1,node2,node3 with BF16 mixed precision:
```bash
oneccl_bindings_for_pytorch_path=$(python -c "from oneccl_bindings_for_pytorch import cwd; print(cwd)")
source $oneccl_bindings_for_pytorch_path/env/setvars.sh

View File

@ -88,7 +88,7 @@ achieved by adding one `with LocalSGD` statement and one call `local_sgd.step()`
+ local_sgd.step()
```
Under the hood, the Local SGD code **disables** automatic gradient synchronization (but accumulation still works as expected!). Instead it averages model parameters every `local_sgd_steps` steps (as well as at the end of the training loop).
Under the hood, the Local SGD code **disables** automatic gradient synchornization (but accumulation still works as expected!). Instead it averages model parameters every `local_sgd_steps` steps (as well as in the end of the training loop).
## Limitations

View File

@ -19,7 +19,7 @@ rendered properly in your Markdown viewer.
## What training on FP8 means
To explore more of the nitty-gritty in training in FP8 with PyTorch and 🤗 Accelerate, check out the [concept_guide](../concept_guides/low_precision_training.md) on why this can be difficult. But essentially rather than training in BF16, some (or all) aspects of training a model can be performed using 8 bits instead of 16. The challenge is doing so without degrading final performance.
To explore more of the nitty-gritty in traninig in FP8 with PyTorch and 🤗 Accelerate, check out the [concept_guide](../concept_guides/low_precision_training.md) on why this can be difficult. But essentially rather than training in BF16, some (or all) aspects of training a model can be performed using 8 bits instead of 16. The challenge is doing so without degrading final performance.
This is only enabled on specific NVIDIA hardware, namely:
@ -57,7 +57,7 @@ Of the two, `MS-AMP` is traditionally the easier one to configure as there is on
Currently two levels of optimization are supported in the 🤗 Accelerate integration, `"O1"` and `"O2"` (using the letter 'o', not zero).
* `"O1"` will cast the weight gradients and `all_reduce` communications to happen in 8-bit, while the rest are done in 16 bit. This reduces the general GPU memory usage and speeds up communication bandwidths.
* `"O2"` will also cast first-order optimizer states into 8 bit, while the second order states are in FP16. (Currently just the `Adam` optimizer is supported). This tries its best to minimize final accuracy degradation and will save the highest potential memory.
* `"O2"` will also cast first-order optimizer states into 8 bit, while the second order states are in FP16. (Currently just the `Adam` optimizer is supported). This tries it's best to minimize final accuracy degredation and will save the highest potential memory.
To specify an optimization level, pass it to the `FP8KwargsHandler` by setting the `optimization_level` argument:
@ -70,7 +70,7 @@ accelerator = Accelerator(mixed_precision="fp8", kwarg_handlers=kwargs)
## Configuring TransformersEngine
TransformersEngine has much more available for customizing how and what FP8 calculations are performed. A full list of supported arguments and what they mean are available in [NVIDIA's documentation](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html), however they are restated as part of [`FP8KwargsHandler`]'s docstring for your convenience.
TransformersEngine has much more available for customizing how and what FP8 calculations are performed. A full list of supported arguments and what they mean are available in [NVIDIA's documentation](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html), however they are restated as part of [`FP8KwargsHandler`]'s docstring for your convience.
🤗 Accelerate tries to set sensible defaults, but exploring and tweaking the various parameters yourself can lead to better performance potentially.
@ -83,10 +83,10 @@ kwargs = [FP8RecipeKwargs(backend="te", ...)]
accelerator = Accelerator(mixed_precision="fp8", kwarg_handlers=kwargs)
```
## Further Reading
## Futher Reading
To learn more about training in FP8 please check out the following resources:
* [Our concept guide](../concept_guides/low_precision_training.md) detailing into more about both TransformersEngine and MS-AMP
* [The `transformers-engine` documentation](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html)
* [The `MS-AMP` documentation](https://azure.github.io/MS-AMP/docs/)
* [The `MS-AMP` documentation](https://azure.github.io/MS-AMP/docs/)

View File

@ -9,7 +9,7 @@ Unless required by applicable law or agreed to in writing, software distributed
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
@ -113,7 +113,7 @@ pip install git+https://github.com/huggingface/Megatron-LM.git
## Accelerate Megatron-LM Plugin
Important features are directly supported via the `accelerate config` command.
An example of the corresponding questions for using Megatron-LM features is shown below:
An example of thr corresponding questions for using Megatron-LM features is shown below:
```bash
:~$ accelerate config --config_file "megatron_gpt_config.yaml"
@ -542,7 +542,7 @@ megatron_lm_plugin = MegatronLMPlugin(other_megatron_args=other_megatron_args)
This covers Decoder only, Encode only and Encoder-Decoder model classes.
2. Only loss is returned from model forward pass as
there is quite complex interplay of pipeline, tensor and data parallelism behind the scenes.
there is quite complex interplay of pipeline, tensor and data parallelsim behind the scenes.
The `model(**batch_data)` call return loss(es) averaged across the data parallel ranks.
This is fine for most cases wherein pre-training jobs are run using Megatron-LM features and
you can easily compute the `perplexity` using the loss.
@ -580,4 +580,4 @@ b. Megatron-LM [GPTModel](https://github.com/NVIDIA/Megatron-LM/blob/main/megatr
c. Megatron-LM [T5Model](https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/t5_model.py) :
🤗 transformers models with `t5` in config's model type, e.g.,
[T5](https://huggingface.co/docs/transformers/model_doc/t5) and
[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)
[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)

View File

@ -51,7 +51,7 @@ Below are a few gradio demos related to what was described above. The first is t
></iframe>
</div>
A community member has taken the idea and expanded it further, allowing you to filter models directly and see if you can run a particular LLM given GPU constraints and LoRA configurations. To play with it, see [here](https://huggingface.co/spaces/Vokturz/can-it-run-llm) for more details.
A community member has taken the idea and expended it further, allowing you to filter models directly and see if you can run a particular LLM given GPU constraints and LoRA configurations. To play with it, see [here](https://huggingface.co/spaces/Vokturz/can-it-run-llm) for more details.
## The Command
@ -134,4 +134,4 @@ This calculator will tell you how much memory is needed to purely load the model
This calculation is accurate within a few % of the actual value, so it is a very good view of just how much memory it will take. For instance loading `bert-base-cased` actually takes `413.68 MB` when loaded on CUDA in full precision, and the calculator estimates `413.18 MB`.
When performing inference you can expect to add up to an additional 20% as found by [EleutherAI](https://blog.eleuther.ai/transformer-math/). We'll be conducting research into finding a more accurate estimate to these values, and will update
this calculator once done.
this calculator once done.

View File

@ -198,7 +198,7 @@ achieve the same outcome with:
```python
wandb_tracker = accelerator.get_tracker("wandb", unwrap=True)
if accelerator.is_main_process:
with accelerator.on_main_process:
wandb_tracker.log_artifact(some_artifact_to_log)
```

View File

@ -28,7 +28,6 @@ pip install datasets evaluate transformers
The same script can be run in any of the following configurations:
- single CPU or single GPU
- multi CPUs
- multi GPUs (using PyTorch distributed mode)
- (multi) TPUs
- fp16 (mixed-precision) or fp32 (normal precision)
@ -59,18 +58,6 @@ To run it in each of these various modes, use the following commands:
* from any server with Accelerate launcher
```bash
accelerate launch --mixed_precision fp16 ./nlp_example.py
- multi CPUs (requires Open MPI, Intel MPI, or MVAPICH)
* With Accelerate config and launcher, execute the following from node 0:
```bash
accelerate config # Select to have accelerate launch mpirun
accelerate launch ./nlp_example.py # This will run the script on each server
```
* With Intel MPI:
```bash
export CCL_WORKER_COUNT=1
export MASTER_ADDR=xxx.xxx.xxx.xxx #node0 ip
mpirun -f hostfile -n 16 -ppn 4 python ./nlp_example.py
```
- multi GPUs (using PyTorch distributed mode)
* With Accelerate config and launcher
```bash
@ -113,7 +100,6 @@ The [cv_example.py](./cv_example.py) script is a simple example to fine-tune a R
The same script can be run in any of the following configurations:
- single CPU or single GPU
- multi CPUs
- multi GPUs (using PyTorch distributed mode)
- (multi) TPUs
- fp16 (mixed-precision) or fp32 (normal precision)
@ -157,18 +143,6 @@ To run it in each of these various modes, use the following commands:
* from any server with Accelerate launcher
```bash
accelerate launch --mixed_precison fp16 ./cv_example.py --data_dir path_to_data
- multi CPUs (requires Open MPI, Intel MPI, or MVAPICH)
* With Accelerate config and launcher, run the following from node 0:
```bash
accelerate config --config_file config.yaml # Select to have accelerate launch mpirun
accelerate launch ./cv_example.py --data_dir path_to_data # This will run the script on each server
```
* With Intel MPI, execute mpirun from node 0:
```bash
export CCL_WORKER_COUNT=1
export MASTER_ADDR=xxx.xxx.xxx.xxx #node0 ip
mpirun -f hostfile -n 16 -ppn 4 python ./cv_example.py --data_dir path_to_data
```
- multi GPUs (using PyTorch distributed mode)
* With Accelerate config and launcher
```bash
@ -233,22 +207,6 @@ In [/slurm/submit_multigpu.sh](./slurm/submit_multigpu.sh) the only parameter in
In [/slurm/submit_multinode.sh](./slurm/submit_multinode.sh) we must specify the number of nodes that will be part of the training (`--num_machines`), how many GPUs we will use in total (`--num_processes`), the [`backend`](https://pytorch.org/docs/stable/elastic/run.html#note-on-rendezvous-backend), `--main_process_ip` which will be the address the master node and the `--main_process_port`.
In both scripts, we run `activateEnviroment.sh` at the beginning. This script should contain the necessary instructions to initialize the environment for execution. Below, we show an example that loads the necessary libraries ([Environment modules](https://github.com/cea-hpc/modules)), activates the Python environment, and sets up various environment variables, most of them to run the scripts in offline mode in case we don't have internet connection from the cluster.
```bash
# activateEnvironment.sh
module purge
module load anaconda3/2020.02 cuda/10.2 cudnn/8.0.5 nccl/2.9.9 arrow/7.0.0 openmpi
source activate /home/nct01/nct01328/pytorch_antoni_local
export HF_HOME=/gpfs/projects/nct01/nct01328/
export HF_LOCAL_HOME=/gpfs/projects/nct01/nct01328/HF_LOCAL
export HF_DATASETS_OFFLINE=1
export TRANSFORMERS_OFFLINE=1
export PYTHONPATH=/home/nct01/nct01328/transformers-in-supercomputers:$PYTHONPATH
export GPUS_PER_NODE=4
```
## Finer Examples
While the first two scripts are extremely barebones when it comes to what you can do with accelerate, more advanced features are documented in two other locations.

View File

@ -1,3 +1,4 @@
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -85,7 +86,7 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
def collate_fn(examples):
# On TPU it's best to pad everything to the same length or training will be very slow.
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
pad_to_multiple_of = 16
@ -153,7 +154,7 @@ def training_function(config, args):
# If the batch size is too big we use gradient accumulation
gradient_accumulation_steps = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA:
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
batch_size = MAX_GPU_BATCH_SIZE

View File

@ -1,3 +1,4 @@
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -105,7 +106,7 @@ def get_fold_dataloaders(
def collate_fn(examples):
# On TPU it's best to pad everything to the same length or training will be very slow.
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
pad_to_multiple_of = 16
@ -156,7 +157,7 @@ def training_function(config, args):
# If the batch size is too big we use gradient accumulation
gradient_accumulation_steps = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA:
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
batch_size = MAX_GPU_BATCH_SIZE
@ -248,7 +249,7 @@ def training_function(config, args):
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(fold_predictions, dim=0))
# We now need to release all our memory and get rid of the current model, optimizer, etc
model, optimizer = accelerator.free_memory(model, optimizer)
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
test_references = torch.cat(test_references, dim=0)

View File

@ -1,4 +1,5 @@
#!/usr/bin/env python
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -34,7 +35,7 @@ import datasets
import torch
import transformers
from datasets import load_dataset
from huggingface_hub import HfApi
from huggingface_hub import Repository
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
from transformers import (
@ -47,6 +48,7 @@ from transformers import (
default_data_collator,
get_scheduler,
)
from transformers.utils import get_full_repo_name
from transformers.utils.versions import require_version
from accelerate import Accelerator, DistributedType
@ -302,13 +304,11 @@ def main():
# Handle the repository creation
if accelerator.is_main_process:
if args.push_to_hub:
api = HfApi(token=args.hub_token)
# Create repo (repo_name from args or inferred)
repo_name = args.hub_model_id
if repo_name is None:
repo_name = Path(args.output_dir).absolute().name
repo_id = api.create_repo(repo_name, exist_ok=True).repo_id
if args.hub_model_id is None:
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
else:
repo_name = args.hub_model_id
repo = Repository(args.output_dir, clone_from=repo_name)
with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
if "step_*" not in gitignore:
@ -512,7 +512,7 @@ def main():
optimizer = optimizer_cls(optimizer_grouped_parameters, lr=args.learning_rate)
# On TPU, the tie weights in our model have been disconnected, so we need to restore the ties.
if accelerator.distributed_type == DistributedType.XLA:
if accelerator.distributed_type == DistributedType.TPU:
model.tie_weights()
# Scheduler and math around the number of training steps.
@ -708,11 +708,7 @@ def main():
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
if args.push_to_hub:
api.upload_folder(
repo_id=repo_id,
folder_path=args.output_dir,
commit_message="End of training",
)
repo.push_to_hub(commit_message="End of training", auto_lfs_prune=True)
with open(os.path.join(args.output_dir, "all_results.json"), "w") as f:
json.dump({"perplexity": perplexity, "eval_loss": eval_loss.item()}, f)

View File

@ -1,3 +1,4 @@
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -80,7 +81,7 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
def collate_fn(examples):
# On TPU it's best to pad everything to the same length or training will be very slow.
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
pad_to_multiple_of = 16
@ -150,7 +151,7 @@ def training_function(config, args):
# If the batch size is too big we use gradient accumulation
gradient_accumulation_steps = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA:
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
batch_size = MAX_GPU_BATCH_SIZE

View File

@ -1,3 +1,4 @@
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -208,13 +209,13 @@ def training_function(config, args):
# If the batch size is too big we use gradient accumulation
gradient_accumulation_steps = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA:
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
batch_size = MAX_GPU_BATCH_SIZE
def collate_fn(examples):
# On TPU it's best to pad everything to the same length or training will be very slow.
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
pad_to_multiple_of = 16
@ -333,11 +334,13 @@ def training_function(config, args):
accelerator.save_state(output_dir)
# New Code #
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print(f"Memory before entering the train : {b2mb(tracemalloc.begin)}")
accelerator.print(f"Memory consumed at the end of the train (end-begin): {tracemalloc.used}")
accelerator.print(f"Peak Memory consumed during the train (max-begin): {tracemalloc.peaked}")
accelerator.print("Memory before entering the train : {}".format(b2mb(tracemalloc.begin)))
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used))
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked))
accelerator.print(
f"Total Peak Memory consumed during the train (max): {tracemalloc.peaked + b2mb(tracemalloc.begin)}"
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + b2mb(tracemalloc.begin)
)
)
# Logging the peak memory usage of the GPU to the tracker
if args.with_tracking:
@ -384,11 +387,11 @@ def training_function(config, args):
accelerator.save_state(output_dir)
# New Code #
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print(f"Memory before entering the eval : {b2mb(tracemalloc.begin)}")
accelerator.print(f"Memory consumed at the end of the eval (end-begin): {tracemalloc.used}")
accelerator.print(f"Peak Memory consumed during the eval (max-begin): {tracemalloc.peaked}")
accelerator.print("Memory before entering the eval : {}".format(b2mb(tracemalloc.begin)))
accelerator.print("Memory consumed at the end of the eval (end-begin): {}".format(tracemalloc.used))
accelerator.print("Peak Memory consumed during the eval (max-begin): {}".format(tracemalloc.peaked))
accelerator.print(
f"Total Peak Memory consumed during the eval (max): {tracemalloc.peaked + b2mb(tracemalloc.begin)}"
"Total Peak Memory consumed during the eval (max): {}".format(tracemalloc.peaked + b2mb(tracemalloc.begin))
)
# Logging the peak memory usage of the GPU to the tracker
if args.with_tracking:

View File

@ -1,3 +1,4 @@
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -80,7 +81,7 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
def collate_fn(examples):
# On TPU it's best to pad everything to the same length or training will be very slow.
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
pad_to_multiple_of = 16
@ -125,7 +126,7 @@ def training_function(config, args):
accelerator = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=gradient_accumulation_steps
)
if accelerator.distributed_type == DistributedType.XLA and gradient_accumulation_steps > 1:
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`"
)

View File

@ -1,3 +1,4 @@
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -83,7 +84,7 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
def collate_fn(examples):
# On TPU it's best to pad everything to the same length or training will be very slow.
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
pad_to_multiple_of = 16
@ -129,6 +130,8 @@ def training_function(config, args):
accelerator = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=gradient_accumulation_steps
)
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)")
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lr = config["lr"]
num_epochs = int(config["num_epochs"])

View File

@ -1,4 +1,5 @@
#!/usr/bin/env python
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -34,7 +35,7 @@ import datasets
import torch
import transformers
from datasets import load_dataset
from huggingface_hub import HfApi
from huggingface_hub import Repository
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
from transformers import (
@ -47,7 +48,7 @@ from transformers import (
default_data_collator,
get_scheduler,
)
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry
from transformers.utils.versions import require_version
from accelerate import Accelerator, DistributedType
@ -277,13 +278,11 @@ def main():
# Handle the repository creation
if accelerator.is_main_process:
if args.push_to_hub:
api = HfApi(token=args.hub_token)
# Create repo (repo_name from args or inferred)
repo_name = args.hub_model_id
if repo_name is None:
repo_name = Path(args.output_dir).absolute().name
repo_id = api.create_repo(repo_name, exist_ok=True).repo_id
if args.hub_model_id is None:
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
else:
repo_name = args.hub_model_id
repo = Repository(args.output_dir, clone_from=repo_name)
with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
if "step_*" not in gitignore:
@ -406,7 +405,7 @@ def main():
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --block_size xxx."
)
block_size = 1024
block_size = 1024
else:
if args.block_size > tokenizer.model_max_length:
logger.warning(
@ -507,7 +506,7 @@ def main():
)
# On TPU, the tie weights in our model have been disconnected, so we need to restore the ties.
if accelerator.distributed_type == DistributedType.XLA:
if accelerator.distributed_type == DistributedType.TPU:
model.tie_weights()
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
@ -663,11 +662,8 @@ def main():
)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
api.upload_folder(
repo_id=repo_id,
folder_path=args.output_dir,
commit_message=f"Training in progress epoch {epoch}",
run_as_future=True,
repo.push_to_hub(
commit_message=f"Training in progress epoch {epoch}", blocking=False, auto_lfs_prune=True
)
if args.checkpointing_steps == "epoch":
@ -695,11 +691,7 @@ def main():
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
if args.push_to_hub:
api.upload_folder(
repo_id=repo_id,
folder_path=args.output_dir,
commit_message="End of training",
)
repo.push_to_hub(commit_message="End of training", auto_lfs_prune=True)
with open(os.path.join(args.output_dir, "all_results.json"), "w") as f:
json.dump({"perplexity": perplexity}, f)

View File

@ -86,7 +86,7 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
def collate_fn(examples):
# On TPU it's best to pad everything to the same length or training will be very slow.
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
pad_to_multiple_of = 16

View File

@ -1,3 +1,4 @@
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -87,7 +88,7 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
def collate_fn(examples):
# On TPU it's best to pad everything to the same length or training will be very slow.
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
pad_to_multiple_of = 16
@ -138,7 +139,7 @@ def training_function(config, args):
# If the batch size is too big we use gradient accumulation
gradient_accumulation_steps = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA:
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
batch_size = MAX_GPU_BATCH_SIZE

View File

@ -1,225 +0,0 @@
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import is_schedulefree_available
if is_schedulefree_available():
import schedulefree
else:
raise ImportError(
"This example requires the `schedulefree` library. Please install it with `pip install schedulefree`"
)
########################################################################
# This is a fully working simple example to use Accelerate and Facebook's
# scheduler-free optimizer: https://github.com/facebookresearch/schedule_free/
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
MAX_GPU_BATCH_SIZE = 16
EVAL_BATCH_SIZE = 32
def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
"""
Creates a set of `DataLoader`s for the `glue` dataset,
using "bert-base-cased" as the tokenizer.
Args:
accelerator (`Accelerator`):
An `Accelerator` object
batch_size (`int`, *optional*):
The batch size for the train and validation DataLoaders.
"""
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
datasets = load_dataset("glue", "mrpc")
def tokenize_function(examples):
# max_length=None => use the model max length (it's actually the default)
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
remove_columns=["idx", "sentence1", "sentence2"],
)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
def collate_fn(examples):
# For Torchxla, it's best to pad everything to the same length or training will be very slow.
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
pad_to_multiple_of = 16
elif accelerator.mixed_precision != "no":
pad_to_multiple_of = 8
else:
pad_to_multiple_of = None
return tokenizer.pad(
examples,
padding="longest",
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors="pt",
)
# Instantiate dataloaders.
train_dataloader = DataLoader(
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size, drop_last=True
)
eval_dataloader = DataLoader(
tokenized_datasets["validation"],
shuffle=False,
collate_fn=collate_fn,
batch_size=EVAL_BATCH_SIZE,
drop_last=(accelerator.mixed_precision == "fp8"),
)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
get_dataloaders = mocked_dataloaders # noqa: F811
def training_function(config, args):
# Initialize accelerator
accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lr = config["lr"]
num_epochs = int(config["num_epochs"])
seed = int(config["seed"])
batch_size = int(config["batch_size"])
metric = evaluate.load("glue", "mrpc")
# If the batch size is too big we use gradient accumulation
gradient_accumulation_steps = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA:
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
batch_size = MAX_GPU_BATCH_SIZE
set_seed(seed)
train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
model = model.to(accelerator.device)
# Instantiate optimizer with warmup steps
optimizer = schedulefree.AdamWScheduleFree(
model.parameters(),
lr=lr,
warmup_steps=100,
)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader
)
# Now we train the model
for epoch in range(num_epochs):
model.train()
optimizer.train()
for step, batch in enumerate(train_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
outputs = model(**batch)
loss = outputs.loss
loss = loss / gradient_accumulation_steps
accelerator.backward(loss)
if step % gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
model.eval()
optimizer.eval()
for step, batch in enumerate(eval_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=predictions,
references=references,
)
eval_metric = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:", eval_metric)
def main():
parser = argparse.ArgumentParser(description="Simple example of training script.")
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16", "fp8"],
help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.",
)
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.")
args = parser.parse_args()
config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(config, args)
if __name__ == "__main__":
main()

View File

@ -1,3 +1,4 @@
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -85,7 +86,7 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
def collate_fn(examples):
# On TPU it's best to pad everything to the same length or training will be very slow.
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
pad_to_multiple_of = 16
@ -148,7 +149,7 @@ def training_function(config, args):
# If the batch size is too big we use gradient accumulation
gradient_accumulation_steps = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA:
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
batch_size = MAX_GPU_BATCH_SIZE

View File

@ -1,3 +1,4 @@
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");

View File

@ -1,3 +1,4 @@
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -102,13 +103,13 @@ def training_function(config, args):
# If the batch size is too big we use gradient accumulation
gradient_accumulation_steps = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA:
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
batch_size = MAX_GPU_BATCH_SIZE
def collate_fn(examples):
# On TPU it's best to pad everything to the same length or training will be very slow.
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
pad_to_multiple_of = 16

View File

@ -1,3 +1,4 @@
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");

View File

@ -1,25 +0,0 @@
# Distributed inference examples
This folder contains a variety of tutorials for running distributed inference with the following strategy:
Load an entire model onto each GPU and sending chunks of a batch through each GPUs model copy at a time
## Installation
```bash
pip install accelerate torch
```
## Running code
You can either use `torchrun` or the recommended way of `accelerate launch` (without needing to run `accelerate config`) on each script:
```bash
accelerate launch --num_processes {NUM_GPUS} phi2.py
```
Or:
```bash
torchrun --nproc-per-node {NUM_GPUS} phi2.py
```

View File

@ -1,86 +0,0 @@
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from accelerate import PartialState
from accelerate.utils import gather_object
# Start up the distributed environment without needing the Accelerator.
distributed_state = PartialState()
# You can change the model to any LLM such as mistralai/Mistral-7B-v0.1 or meta-llama/Llama-2-7b-chat-hf
model_name = "microsoft/phi-2"
model = AutoModelForCausalLM.from_pretrained(
model_name, device_map=distributed_state.device, torch_dtype=torch.float16
)
tokenizer = AutoTokenizer.from_pretrained(model_name)
# Need to set the padding token to the eos token for generation
tokenizer.pad_token = tokenizer.eos_token
prompts = [
"I would like to",
"hello how are you",
"what is going on",
"roses are red and",
"welcome to the hotel",
]
# You can change the batch size depending on your GPU RAM
batch_size = 2
# We set it to 8 since it is better for some hardware. More information here https://github.com/huggingface/tokenizers/issues/991
pad_to_multiple_of = 8
# Split into batches
# We will get the following results:
# [ ["I would like to", "hello how are you"], [ "what is going on", "roses are red and"], [ "welcome to the hotel"] ]
formatted_prompts = [prompts[i : i + batch_size] for i in range(0, len(prompts), batch_size)]
# Apply padding on the left since we are doing generation
padding_side_default = tokenizer.padding_side
tokenizer.padding_side = "left"
# Tokenize each batch
tokenized_prompts = [
tokenizer(formatted_prompt, padding=True, pad_to_multiple_of=pad_to_multiple_of, return_tensors="pt")
for formatted_prompt in formatted_prompts
]
# Put back the original padding behavior
tokenizer.padding_side = padding_side_default
completions_per_process = []
# We automatically split the batched data we passed to it across all the processes. We also set apply_padding=True
# so that the GPUs will have the same number of prompts, and you can then gather the results.
# For example, if we have 2 gpus, the distribution will be:
# GPU 0: ["I would like to", "hello how are you"], "what is going on", "roses are red and"]
# GPU 1: ["welcome to the hotel"], ["welcome to the hotel"] -> this prompt is duplicated to ensure that all gpus have the same number of prompts
with distributed_state.split_between_processes(tokenized_prompts, apply_padding=True) as batched_prompts:
for batch in batched_prompts:
# Move the batch to the device
batch = batch.to(distributed_state.device)
# We generate the text, decode it and add it to the list completions_per_process
outputs = model.generate(**batch, max_new_tokens=20)
generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True)
completions_per_process.extend(generated_text)
# We are gathering string, so we need to use gather_object.
# If you need to gather tensors, you can use gather from accelerate.utils
completions_gather = gather_object(completions_per_process)
# Drop duplicates produced by apply_padding in split_between_processes
completions = completions_gather[: len(prompts)]
distributed_state.print(completions)

View File

@ -1,30 +0,0 @@
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from diffusers import DiffusionPipeline
from accelerate import PartialState # Can also be Accelerator or AcceleratorState
pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
distributed_state = PartialState()
pipe.to(distributed_state.device)
# Assume two processes
# On the first GPU, the prompts will be ["a dog", "a cat"],
# and on the second GPU it will be ["a chicken", "a chicken"].
# Make sure to drop the final sample, as it will be a duplicate of the previous one.
with distributed_state.split_between_processes(["a dog", "a cat", "a chicken"], apply_padding=True) as prompt:
result = pipe(prompt).images

View File

@ -1,62 +0,0 @@
# Distributed inference examples with PiPPy
This repo contains a variety of tutorials for using the [PiPPy](https://github.com/PyTorch/PiPPy) pipeline parallelism library with accelerate. You will find examples covering:
1. How to trace the model using `accelerate.prepare_pippy`
2. How to specify inputs based on what the model expects (when to use `kwargs`, `args`, and such)
3. How to gather the results at the end.
## Installation
This requires the `main` branch of accelerate (or a version at least 0.27.0), `pippy` version of 0.2.0 or greater, and at least python 3.9. Please install using `pip install .` to pull from the `setup.py` in this repo, or run manually:
```bash
pip install 'accelerate>=0.27.0' 'torchpippy>=0.2.0'
```
## Running code
You can either use `torchrun` or the recommended way of `accelerate launch` (without needing to run `accelerate config`) on each script:
```bash
accelerate launch bert.py
```
Or:
```bash
accelerate launch --num_processes {NUM_GPUS} bert.py
```
Or:
```bash
torchrun --nproc-per-node {NUM_GPUS} bert.py
```
## General speedups
One can expect that PiPPy will outperform native model parallism by a multiplicative factor since all GPUs are running at all times with inputs, rather than one input being passed through a GPU at a time waiting for the prior to finish.
Below are some benchmarks we have found when using the accelerate-pippy integration for a few models when running on 2x4090's:
### Bert
| | Accelerate/Sequential | PiPPy + Accelerate |
|---|---|---|
| First batch | 0.2137s | 0.3119s |
| Average of 5 batches | 0.0099s | **0.0062s** |
### GPT2
| | Accelerate/Sequential | PiPPy + Accelerate |
|---|---|---|
| First batch | 0.1959s | 0.4189s |
| Average of 5 batches | 0.0205s | **0.0126s** |
### T5
| | Accelerate/Sequential | PiPPy + Accelerate |
|---|---|---|
| First batch | 0.2789s | 0.3809s |
| Average of 5 batches | 0.0198s | **0.0166s** |

View File

@ -1,78 +0,0 @@
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import torch
from transformers import AutoModelForMaskedLM
from accelerate import PartialState, prepare_pippy
from accelerate.utils import set_seed
# Set the random seed to have reproducable outputs
set_seed(42)
# Create an example model
model = AutoModelForMaskedLM.from_pretrained("bert-base-uncased")
model.eval()
# Input configs
# Create example inputs for the model
input = torch.randint(
low=0,
high=model.config.vocab_size,
size=(2, 512), # bs x seq_len
device="cpu",
dtype=torch.int64,
requires_grad=False,
)
# Create a pipeline stage from the model
# Using `auto` is equivalent to letting `device_map="auto"` figure
# out device mapping and will also split the model according to the
# number of total GPUs available if it fits on one GPU
model = prepare_pippy(model, split_points="auto", example_args=(input,))
# You can pass `gather_output=True` to have the output from the model
# available on all GPUs
# model = prepare_pippy(model, split_points="auto", example_args=(input,), gather_output=True)
# Move the inputs to the first device
input = input.to("cuda:0")
# Take an average of 5 times
# Measure first batch
torch.cuda.synchronize()
start_time = time.time()
with torch.no_grad():
output = model(input)
torch.cuda.synchronize()
end_time = time.time()
first_batch = end_time - start_time
# Now that CUDA is init, measure after
torch.cuda.synchronize()
start_time = time.time()
for i in range(5):
with torch.no_grad():
output = model(input)
torch.cuda.synchronize()
end_time = time.time()
# The outputs are only on the final process by default
if PartialState().is_last_process:
output = torch.stack(tuple(output[0]))
print(f"Time of first pass: {first_batch}")
print(f"Average time per batch: {(end_time - start_time) / 5}")

View File

@ -1,77 +0,0 @@
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import torch
from transformers import AutoModelForSequenceClassification
from accelerate import PartialState, prepare_pippy
from accelerate.utils import set_seed
# Set the random seed to have reproducable outputs
set_seed(42)
# Create an example model
model = AutoModelForSequenceClassification.from_pretrained("gpt2")
model.eval()
# Input configs
# Create example inputs for the model
input = torch.randint(
low=0,
high=model.config.vocab_size,
size=(2, 1024), # bs x seq_len
device="cpu",
dtype=torch.int64,
requires_grad=False,
)
# Create a pipeline stage from the model
# Using `auto` is equivalent to letting `device_map="auto"` figure
# out device mapping and will also split the model according to the
# number of total GPUs available if it fits on one GPU
model = prepare_pippy(model, split_points="auto", example_args=(input,))
# You can pass `gather_output=True` to have the output from the model
# available on all GPUs
# model = prepare_pippy(model, split_points="auto", example_args=(input,), gather_output=True)
# Move the inputs to the first device
input = input.to("cuda:0")
# Take an average of 5 times
# Measure first batch
torch.cuda.synchronize()
start_time = time.time()
with torch.no_grad():
output = model(input)
torch.cuda.synchronize()
end_time = time.time()
first_batch = end_time - start_time
# Now that CUDA is init, measure after
torch.cuda.synchronize()
start_time = time.time()
for i in range(5):
with torch.no_grad():
output = model(input)
torch.cuda.synchronize()
end_time = time.time()
# The outputs are only on the final process by default
if PartialState().is_last_process:
output = torch.stack(tuple(output[0]))
print(f"Time of first pass: {first_batch}")
print(f"Average time per batch: {(end_time - start_time) / 5}")

View File

@ -1,54 +0,0 @@
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from accelerate import PartialState, prepare_pippy
# sdpa implementation which is the default torch>2.1.2 fails with the tracing + attention mask kwarg
# with attn_implementation="eager" mode, the forward is very slow for some reason
model = AutoModelForCausalLM.from_pretrained(
"meta-llama/Llama-2-7b-chat-hf", low_cpu_mem_usage=True, attn_implementation="sdpa"
)
model.eval()
# Input configs
# Create example inputs for the model
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
prompts = ("I would like to", "I really like to", "The weather is pretty") # bs = 3
tokenizer.pad_token = tokenizer.eos_token
inputs = tokenizer(prompts, return_tensors="pt", padding=True)
# Create a pipeline stage from the model
# Using `auto` is equivalent to letting `device_map="auto"` figure
# out device mapping and will also split the model according to the
# number of total GPUs available if it fits on one GPU
model = prepare_pippy(model, split_points="auto", example_kwargs=inputs)
# You can pass `gather_output=True` to have the output from the model
# available on all GPUs
# model = prepare_pippy(model, split_points="auto", example_args=(input,), gather_output=True)
# currently we don't support `model.generate`
# output = model.generate(**inputs, max_new_tokens=1)
inputs = inputs.to(0)
with torch.no_grad():
output = model(**inputs)
# The outputs are only on the final process by default
if PartialState().is_last_process:
next_token_logits = output[0][:, -1, :]
next_token = torch.argmax(next_token_logits, dim=-1)
print(tokenizer.batch_decode(next_token))

View File

@ -1,2 +0,0 @@
accelerate
pippy>=0.2.0

View File

@ -1,89 +0,0 @@
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import torch
from transformers import AutoModelForSeq2SeqLM
from accelerate import PartialState, prepare_pippy
from accelerate.utils import set_seed
# Set the random seed to have reproducable outputs
set_seed(42)
# Create an example model
model = AutoModelForSeq2SeqLM.from_pretrained("t5-small")
model.eval()
# Input configs
# Create example inputs for the model
input = torch.randint(
low=0,
high=model.config.vocab_size,
size=(2, 1024), # bs x seq_len
device="cpu",
dtype=torch.int64,
requires_grad=False,
)
example_inputs = {"input_ids": input, "decoder_input_ids": input}
# Create a pipeline stage from the model
# Using `auto` is equivalent to letting `device_map="auto"` figure
# out device mapping and will also split the model according to the
# number of total GPUs available if it fits on one GPU
model = prepare_pippy(
model,
no_split_module_classes=["T5Block"],
example_kwargs=example_inputs,
)
# You can pass `gather_output=True` to have the output from the model
# available on all GPUs
# model = prepare_pippy(
# model,
# no_split_module_classes=["T5Block"],
# example_kwargs=example_inputs,
# gather_outputs=True
# )
# The model expects a tuple during real inference
# with the data on the first device
args = (example_inputs["input_ids"].to("cuda:0"), example_inputs["decoder_input_ids"].to("cuda:0"))
# Take an average of 5 times
# Measure first batch
torch.cuda.synchronize()
start_time = time.time()
with torch.no_grad():
output = model(*args)
torch.cuda.synchronize()
end_time = time.time()
first_batch = end_time - start_time
# Now that CUDA is init, measure after
torch.cuda.synchronize()
start_time = time.time()
for i in range(5):
with torch.no_grad():
output = model(*args)
torch.cuda.synchronize()
end_time = time.time()
# The outputs are only on the final process by default
if PartialState().is_last_process:
output = torch.stack(tuple(output[0]))
print(f"Time of first pass: {first_batch}")
print(f"Average time per batch: {(end_time - start_time) / 5}")

View File

@ -1,16 +1,3 @@
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import runhouse as rh

View File

@ -1,3 +1,4 @@
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -77,8 +78,8 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
def collate_fn(examples):
# For Torchxla, it's best to pad everything to the same length or training will be very slow.
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
# On TPU it's best to pad everything to the same length or training will be very slow.
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
pad_to_multiple_of = 16
@ -123,7 +124,7 @@ def training_function(config, args):
# If the batch size is too big we use gradient accumulation
gradient_accumulation_steps = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA:
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
batch_size = MAX_GPU_BATCH_SIZE

View File

@ -1,5 +1,3 @@
accelerate # used to be installed in Amazon SageMaker environment
evaluate
datasets==2.3.2
schedulefree
huggingface_hub>=0.20.0
datasets==2.3.2

View File

@ -1,32 +0,0 @@
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from manim import *
class Stage0(Scene):
def construct(self):
mascot = ImageMobject("mascot_bookie.png")
mascot.scale(.35)
mascot.move_to([-3.75,-1,0])
text = Paragraph(
"Distributed Training,\nHugging Face Accelerate,\nand PyTorch DataLoaders\n\nHow do they all interact?",
font_size=36,
line_spacing=1,
alignment="center",
weight=BOLD,
)
text.move_to([1.75,.5,0])
self.add(mascot)
self.add(text)

View File

@ -1,31 +0,0 @@
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from manim import *
class Stage01(Scene):
def construct(self):
mascot = ImageMobject("mascot_bookie.png")
mascot.scale(.35)
mascot.move_to([-3.75,-1,0])
text = Paragraph(
"Distributed Training,\nHugging Face Accelerate,\nand PyTorch DataLoaders\n\nHow do they all interact?",
font_size=36,
line_spacing=1,
alignment="center",
weight=BOLD,
)
text.move_to([1.75,.5,0])
self.add(mascot)
self.add(text)

View File

@ -1,176 +0,0 @@
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from manim import *
class Stage2(Scene):
def construct(self):
# The dataset items
fill = Rectangle(height=0.46,width=0.46).set_stroke(width=0)
columns = [
VGroup(*[Rectangle(height=0.25,width=0.25,color="green") for i in range(8)]).arrange(RIGHT,buff=0)
for j in range(4)
]
dataset_recs = VGroup(*columns).arrange(UP, buff=0)
dataset_text = Text("Dataset", font_size=24)
dataset = Group(dataset_recs,dataset_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
dataset.move_to([-2,0,0])
self.add(dataset)
code = Code(
code="dataloader = DataLoader(...)\nfor batch in dataloader():\n\t...",
tab_width=4,
background="window",
language="Python",
font="Monospace",
font_size=14,
corner_radius=.2,
insert_line_no=False,
line_spacing=.75,
style=Code.styles_list[1],
)
code.move_to([-3.5, 2.5, 0])
self.add(code)
# The dataloader itself
dataloader = Group(
Rectangle(color="red", height=2, width=2),
Text("DataLoader", font_size=24)
).arrange(DOWN, buff=.5, aligned_edge=DOWN)
sampler = Group(
Rectangle(color="blue", height=1, width=1),
Text("Sampler", font_size=12)
).arrange(DOWN, buff=.25, aligned_edge=DOWN)
dataloader.move_to([1, 0, 0])
sampler.move_to([.75,.25,0])
self.add(dataloader)
self.add(sampler)
gpu_1 = Group(
Rectangle(color="white", height=1, width=1),
Text("GPU 1", font_size=12)
).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4, 2, 0])
gpu_2 = Group(
Rectangle(color="white", height=1, width=1),
Text("GPU 2", font_size=12)
).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4, .5, 0])
gpu_3 = Group(
Rectangle(color="white", height=1, width=1),
Text("GPU 3", font_size=12)
).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4, -1, 0])
gpu_4 = Group(
Rectangle(color="white", height=1, width=1),
Text("GPU 4", font_size=12)
).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4, -2.5, 0])
gpus = [gpu_1[0], gpu_2[0], gpu_3[0], gpu_4[0]]
self.add(gpu_1, gpu_2, gpu_3, gpu_4)
# Animate their existence
self.play(
Create(gpu_1[0], run_time=0.5),
Create(gpu_2[0], run_time=0.5),
Create(gpu_3[0], run_time=0.5),
Create(gpu_4[0], run_time=0.5),
Create(dataset_recs, run_time=1),
Create(sampler[0], run_time=1),
Create(dataloader[0], run_time=1)
)
step_1 = MarkupText(
f"Without any special care, \nthe same data is sent though each sampler, \nand the same samples are spit out on each GPU",
font_size=18
)
step_1.move_to([0, -2.5, 0])
self.play(
Write(step_1, run_time=4),
)
first_animations = []
second_animations = []
colors = ["BLUE_E", "DARK_BROWN", "GOLD_E", "GRAY_A"]
current_color = colors[0]
buff = 0
lr_buff = .25
old_target = None
new_datasets = []
for i,data in enumerate(dataset_recs[-1]):
if i % 2 == 0:
# current_color = colors[i//2]
current_color = "BLUE_E"
dataset_target = Rectangle(height=0.46/2,width=0.46/2).set_stroke(width=0.).set_fill(current_color, opacity=0.7)
dataset_target.move_to(data)
dataset_target.generate_target()
aligned_edge = ORIGIN
if i % 2 == 0:
old_target = dataset_target.target
buff -= .25
aligned_edge = LEFT
dataset_target.target.next_to(
sampler, buff=buff, direction=UP,
aligned_edge=LEFT
)
else:
dataset_target.target.next_to(
old_target, direction=RIGHT, buff=0.01,
)
new_datasets.append(dataset_target)
first_animations.append(data.animate(run_time=0.5).set_stroke(current_color))
second_animations.append(MoveToTarget(dataset_target, run_time=1.5))
self.play(*first_animations)
self.play(*second_animations)
self.wait()
move_animation = []
for j,gpu in enumerate(gpus):
buff = 0
for i,data in enumerate(new_datasets):
if i % 2 == 0:
current_color = colors[i//2]
if j != 3:
data = data.copy()
data.generate_target()
aligned_edge = ORIGIN
if i % 2 == 0:
old_target = data.target
buff -= .25
aligned_edge = LEFT
data.target.next_to(
gpu, buff=buff, direction=UP,
aligned_edge=LEFT
)
else:
data.target.next_to(
old_target, direction=RIGHT, buff=0.01,
)
move_animation.append(MoveToTarget(data, run_time=1.5))
self.play(*move_animation)
self.remove(step_1)
step_2 = MarkupText(
f"This behavior is undesireable, because we want\neach GPU to see different data for efficient training.",
font_size=18
)
step_2.move_to([0, -2.5, 0])
self.play(
Write(step_2, run_time=2.5),
)
self.wait()

View File

@ -1,34 +0,0 @@
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from manim import *
class Stage3(Scene):
def construct(self):
step_1 = MarkupText(
f"To combat this, Accelerate employs one of two different\nSampler wrapper methods depending on the scenario:",
font_size=24
)
step_1.move_to([0, 1.5, 0])
self.add(step_1)
step_2 = MarkupText(
f"1. Sharding the dataset before drawing:\n\t● <span fgcolor='{RED}'>IterableDatasetShard</span>\n\t● <span fgcolor='{RED}'>BatchSamplerShard</span>",
font_size=24,
).next_to(step_1, direction=DOWN, aligned_edge=LEFT)
self.add(step_2)
step_3 = MarkupText(
f"\n\n2. Splitting the batch after drawing:\n\t● <span fgcolor='{BLUE}'>DataLoaderDispatcher</span>",
font_size=24,
).next_to(step_2, direction=DOWN, aligned_edge=LEFT)
self.add(step_3)

View File

@ -1,52 +0,0 @@
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from manim import *
class Stage4(Scene):
def construct(self):
step_1 = MarkupText(
f"To understand the next part fully, let's define two terms,\n<span fgcolor='{RED}'>`batch_size`</span> and <span fgcolor='{BLUE}'>`global_batch_size`</span>:",
font_size=18
)
step_1.move_to([0, 1.5, 0])
# <span fgcolor='{YELLOW}'>●</span>
step_2 = MarkupText(
f"\n\n● <span fgcolor='{RED}'>`batch_size`</span>: \n\tThis will be defined as the batch size seen on a given\n\t*individual* GPU",
font_size=18,
).next_to(step_1, direction=DOWN, aligned_edge=LEFT)
step_3 = MarkupText(
f"\n\n● <span fgcolor='{BLUE}'>`global_batch_size`</span>:\n\tThis will be defined as the *total* number of\n\tdifferent items seen in the dataset, across all GPUs",
font_size=18,
).next_to(step_2, direction=DOWN, aligned_edge=LEFT)
step_4 = MarkupText(
f"\n\nSo if we have a dataset of 64 items, 8 GPUs, \nand a `batch_size` of 8, each *step* will go through\nthe entire dataset one time as 8*8=64",
font_size=18,
).next_to(step_3, direction=DOWN, aligned_edge=LEFT)
self.play(
Write(step_1, run_time=4),
)
self.play(
Write(step_2, run_time=4)
)
self.play(
Write(step_3, run_time=4)
)
self.play(
Write(step_4, run_time=6)
)
self.wait()

View File

@ -1,203 +0,0 @@
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from manim import *
class Stage5(Scene):
def construct(self):
# The dataset items
colors = ["BLUE_E", "DARK_BROWN", "GOLD_E", "GRAY_A"]
fill = Rectangle(height=0.46,width=0.46).set_stroke(width=0)
columns = [
VGroup(*[Rectangle(height=0.25,width=0.25,color=colors[j]) for i in range(8)]).arrange(RIGHT,buff=0)
for j in range(4)
]
dataset_recs = VGroup(*columns).arrange(UP, buff=0)
dataset_text = Text("Dataset", font_size=24)
dataset = Group(dataset_recs,dataset_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
dataset.move_to([-2,0,0])
self.add(dataset)
code = Code(
code="# We enable this by default\naccelerator = Accelerator()\ndataloader = DataLoader(...)\ndataloader = accelerator.prepare(dataloader)\nfor batch in dataloader:\n\t...",
tab_width=4,
background="window",
language="Python",
font="Monospace",
font_size=14,
corner_radius=.2,
insert_line_no=False,
line_spacing=.75,
style=Code.styles_list[1],
)
code.move_to([-3.5, 2.5, 0])
self.add(code)
# The dataloader itself
sampler_1 = Group(
Rectangle(color="blue", height=1, width=1),
Text("Sampler GPU 1", font_size=12)
).arrange(DOWN, buff=.25, aligned_edge=DOWN)
sampler_2 = Group(
Rectangle(color="blue", height=1, width=1),
Text("Sampler GPU 2", font_size=12)
).arrange(DOWN, buff=.25, aligned_edge=DOWN)
sampler_3 = Group(
Rectangle(color="blue", height=1, width=1),
Text("Sampler GPU 3", font_size=12)
).arrange(DOWN, buff=.25, aligned_edge=DOWN)
sampler_4 = Group(
Rectangle(color="blue", height=1, width=1),
Text("Sampler GPU 4", font_size=12)
).arrange(DOWN, buff=.25, aligned_edge=DOWN)
sampler_1.move_to([2,2,0])
sampler_2.move_to([2,.5,0])
sampler_3.move_to([2,-1.,0])
sampler_4.move_to([2,-2.5,0])
self.add(sampler_1, sampler_2, sampler_3, sampler_4)
samplers = [sampler_1[0], sampler_2[0], sampler_3[0], sampler_4[0]]
gpu_1 = Group(
Rectangle(color="white", height=1, width=1),
Text("Output GPU 1", font_size=12)
).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4.5, 2, 0])
gpu_2 = Group(
Rectangle(color="white", height=1, width=1),
Text("Output GPU 2", font_size=12)
).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4.5, .5, 0])
gpu_3 = Group(
Rectangle(color="white", height=1, width=1),
Text("Output GPU 3", font_size=12)
).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4.5, -1, 0])
gpu_4 = Group(
Rectangle(color="white", height=1, width=1),
Text("Output GPU 4", font_size=12)
).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4.5, -2.5, 0])
gpus = [gpu_1[0], gpu_2[0], gpu_3[0], gpu_4[0]]
self.add(gpu_1, gpu_2, gpu_3, gpu_4)
# Animate their existence
self.play(
Create(gpu_1[0], run_time=1),
Create(gpu_2[0], run_time=1),
Create(gpu_3[0], run_time=1),
Create(gpu_4[0], run_time=1),
Create(dataset_recs, run_time=1),
Create(sampler_1[0], run_time=1),
Create(sampler_2[0], run_time=1),
Create(sampler_3[0], run_time=1),
Create(sampler_4[0], run_time=1),
)
first_animations = []
second_animations = []
colors = ["BLUE_E", "DARK_BROWN", "GOLD_E", "GRAY_A"]
current_color = colors[0]
buff = 0
lr_buff = .25
old_target = None
new_datasets = []
for i,row_data in enumerate(dataset_recs):
new_row = []
current_color = colors[i]
if i == 0:
idx = -3
elif i == 1:
idx = -2
elif i == 2:
idx = -1
elif i == 3:
idx = 0
for j,indiv_data in enumerate(row_data):
dataset_target = Rectangle(height=0.46/2,width=0.46/2).set_stroke(width=0.).set_fill(current_color, opacity=0.7)
dataset_target.move_to(indiv_data)
dataset_target.generate_target()
aligned_edge = ORIGIN
if j % 8 == 0:
aligned_edge = LEFT
dataset_target.target.next_to(
samplers[abs(idx)].get_corner(UP+LEFT), buff=.02, direction=RIGHT+DOWN,
)
dataset_target.target.set_x(dataset_target.target.get_x())
elif j % 4 == 0:
old_target = dataset_target.target
dataset_target.target.next_to(
samplers[abs(idx)].get_corner(UP+LEFT), buff=.02, direction=RIGHT+DOWN,
)
dataset_target.target.set_x(dataset_target.target.get_x())
dataset_target.target.set_y(dataset_target.target.get_y()-.25)
else:
dataset_target.target.next_to(
old_target, direction=RIGHT, buff=0.02,
)
old_target = dataset_target.target
new_row.append(dataset_target)
first_animations.append(indiv_data.animate(run_time=0.5).set_stroke(current_color))
second_animations.append(MoveToTarget(dataset_target, run_time=1.5))
new_datasets.append(new_row)
step_1 = MarkupText(
f"Since we splice the dataset between each GPU,\nthe models weights can be averaged during `backward()`\nActing as though we did one giant epoch\nvery quickly.",
font_size=18
)
step_1.move_to([-2.5, -2, 0])
self.play(
Write(step_1, run_time=3),
)
self.play(
*first_animations,
)
self.play(*second_animations)
self.wait(duration=.5)
move_animation = []
import random
for i,row in enumerate(new_datasets):
# row = [row[k] for k in random.sample(range(8), 8)]
current_color = colors[i]
if i == 0:
idx = -3
elif i == 1:
idx = -2
elif i == 2:
idx = -1
elif i == 3:
idx = 0
for j,indiv_data in enumerate(row):
indiv_data.generate_target()
aligned_edge = ORIGIN
if j % 8 == 0:
aligned_edge = LEFT
indiv_data.target.next_to(
gpus[abs(idx)].get_corner(UP+LEFT), buff=.02, direction=RIGHT+DOWN,
)
indiv_data.target.set_x(indiv_data.target.get_x())
elif j % 4 == 0:
indiv_data.target.next_to(
gpus[abs(idx)].get_corner(UP+LEFT), buff=.02, direction=RIGHT+DOWN,
)
indiv_data.target.set_x(indiv_data.target.get_x())
indiv_data.target.set_y(indiv_data.target.get_y()-.25)
else:
indiv_data.target.next_to(
old_target, direction=RIGHT, buff=0.02,
)
old_target = indiv_data.target
move_animation.append(MoveToTarget(indiv_data, run_time=1.5))
self.play(*move_animation)
self.wait()

View File

@ -1,193 +0,0 @@
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from manim import *
class Stage6(Scene):
def construct(self):
# The dataset items
colors = ["BLUE_E", "DARK_BROWN", "GOLD_E", "GRAY_A"]
fill = Rectangle(height=0.46,width=0.46).set_stroke(width=0)
columns = [
VGroup(*[Rectangle(height=0.25,width=0.25,color=colors[j]) for i in range(8)]).arrange(RIGHT,buff=0)
for j in range(4)
]
dataset_recs = VGroup(*columns).arrange(UP, buff=0)
dataset_text = Text("Dataset", font_size=24)
dataset = Group(dataset_recs,dataset_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
dataset.move_to([-2,0,0])
self.add(dataset)
code = Code(
code="# We enable this by default\naccelerator = Accelerator()\ndataloader = DataLoader(..., shuffle=True)\ndataloader = accelerator.prepare(dataloader)\nfor batch in dataloader:\n\t...",
tab_width=4,
background="window",
language="Python",
font="Monospace",
font_size=14,
corner_radius=.2,
insert_line_no=False,
line_spacing=.75,
style=Code.styles_list[1],
)
code.move_to([-3.5, 2.5, 0])
self.add(code)
# The dataloader itself
sampler_1 = Group(
Rectangle(color="blue", height=1, width=1),
Text("Sampler GPU 1", font_size=12)
).arrange(DOWN, buff=.25, aligned_edge=DOWN)
sampler_2 = Group(
Rectangle(color="blue", height=1, width=1),
Text("Sampler GPU 2", font_size=12)
).arrange(DOWN, buff=.25, aligned_edge=DOWN)
sampler_3 = Group(
Rectangle(color="blue", height=1, width=1),
Text("Sampler GPU 3", font_size=12)
).arrange(DOWN, buff=.25, aligned_edge=DOWN)
sampler_4 = Group(
Rectangle(color="blue", height=1, width=1),
Text("Sampler GPU 4", font_size=12)
).arrange(DOWN, buff=.25, aligned_edge=DOWN)
sampler_1.move_to([2,2,0])
sampler_2.move_to([2,.5,0])
sampler_3.move_to([2,-1.,0])
sampler_4.move_to([2,-2.5,0])
self.add(sampler_1, sampler_2, sampler_3, sampler_4)
samplers = [sampler_1[0], sampler_2[0], sampler_3[0], sampler_4[0]]
gpu_1 = Group(
Rectangle(color="white", height=1, width=1),
Text("Output GPU 1", font_size=12)
).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4.5, 2, 0])
gpu_2 = Group(
Rectangle(color="white", height=1, width=1),
Text("Output GPU 2", font_size=12)
).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4.5, .5, 0])
gpu_3 = Group(
Rectangle(color="white", height=1, width=1),
Text("Output GPU 3", font_size=12)
).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4.5, -1, 0])
gpu_4 = Group(
Rectangle(color="white", height=1, width=1),
Text("Output GPU 4", font_size=12)
).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4.5, -2.5, 0])
gpus = [gpu_1[0], gpu_2[0], gpu_3[0], gpu_4[0]]
self.add(gpu_1, gpu_2, gpu_3, gpu_4)
first_animations = []
second_animations = []
colors = ["BLUE_E", "DARK_BROWN", "GOLD_E", "GRAY_A"]
current_color = colors[0]
buff = 0
lr_buff = .25
old_target = None
new_datasets = []
for i,row_data in enumerate(dataset_recs):
new_row = []
current_color = colors[i]
if i == 0:
idx = -3
elif i == 1:
idx = -2
elif i == 2:
idx = -1
elif i == 3:
idx = 0
for j,indiv_data in enumerate(row_data):
dataset_target = Rectangle(height=0.46/2,width=0.46/2).set_stroke(width=0.).set_fill(current_color, opacity=0.7)
dataset_target.move_to(indiv_data)
dataset_target.generate_target()
aligned_edge = ORIGIN
if j % 8 == 0:
aligned_edge = LEFT
old_target = dataset_target.target
dataset_target.target.next_to(
samplers[abs(idx)].get_corner(UP+LEFT), buff=.02, direction=RIGHT+DOWN,
)
dataset_target.target.set_x(dataset_target.target.get_x())
elif j % 4 == 0:
old_target = dataset_target.target
dataset_target.target.next_to(
samplers[abs(idx)].get_corner(UP+LEFT), buff=.02, direction=RIGHT+DOWN,
)
dataset_target.target.set_x(dataset_target.target.get_x())
dataset_target.target.set_y(dataset_target.target.get_y()-.25)
else:
dataset_target.target.next_to(
old_target, direction=RIGHT, buff=0.02,
)
old_target = dataset_target.target
new_row.append(dataset_target)
first_animations.append(indiv_data.animate(run_time=0.5).set_stroke(current_color))
second_animations.append(MoveToTarget(dataset_target, run_time=1.5))
new_datasets.append(new_row)
step_1 = MarkupText(
f"During shuffling, each mini-batch's\noutput order will be modified",
font_size=18
)
step_1.move_to([-1.5, -2, 0])
self.play(
Write(step_1, run_time=3),
)
self.play(
*first_animations,
)
self.play(*second_animations)
self.wait(duration=.5)
move_animation = []
import random
for i,row in enumerate(new_datasets):
row = [row[k] for k in random.sample(range(8), 8)]
current_color = colors[i]
if i == 0:
idx = -3
elif i == 1:
idx = -2
elif i == 2:
idx = -1
elif i == 3:
idx = 0
for j,indiv_data in enumerate(row):
indiv_data.generate_target()
aligned_edge = ORIGIN
if j % 8 == 0:
aligned_edge = LEFT
indiv_data.target.next_to(
gpus[abs(idx)].get_corner(UP+LEFT), buff=.02, direction=RIGHT+DOWN,
)
indiv_data.target.set_x(indiv_data.target.get_x())
elif j % 4 == 0:
indiv_data.target.next_to(
gpus[abs(idx)].get_corner(UP+LEFT), buff=.02, direction=RIGHT+DOWN,
)
indiv_data.target.set_x(indiv_data.target.get_x())
indiv_data.target.set_y(indiv_data.target.get_y()-.25)
else:
indiv_data.target.next_to(
old_target, direction=RIGHT, buff=0.02,
)
old_target = indiv_data.target
move_animation.append(MoveToTarget(indiv_data, run_time=1.5))
self.play(*move_animation)
self.wait()

View File

@ -1,182 +0,0 @@
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from manim import *
class Stage7(Scene):
def construct(self):
# The dataset items
code = Code(
code="accelerator = Accelerator(dispatch_batches=True)\ndataloader = DataLoader(...)\ndataloader = accelerator.prepare(dataloader)\nfor batch in dataloader:\n\t...",
tab_width=4,
background="window",
language="Python",
font="Monospace",
font_size=14,
corner_radius=.2,
insert_line_no=False,
line_spacing=.75,
style=Code.styles_list[1],
)
code.move_to([-3.5, 2.5, 0])
self.add(code)
colors = ["BLUE_E", "DARK_BROWN", "GOLD_E", "GRAY_A"]
fill = Rectangle(height=0.46,width=0.46).set_stroke(width=0)
columns = [
VGroup(*[Rectangle(height=0.25,width=0.25,color=colors[j]) for i in range(8)]).arrange(RIGHT,buff=0)
for j in range(4)
]
dataset_recs = VGroup(*columns).arrange(UP, buff=0)
dataset_text = Text("Dataset", font_size=24)
dataset = Group(dataset_recs,dataset_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
dataset.move_to([-2,0,0])
self.add(dataset)
# The dataloader itself
sampler_1 = Group(
Rectangle(color="blue", height=1.02, width=1.02),
Text("Sampler GPU 1", font_size=12)
).arrange(DOWN, buff=.25, aligned_edge=DOWN)
sampler_2 = Group(
Rectangle(color="blue", height=1.02, width=1.02),
Text("Sampler GPU 2", font_size=12)
).arrange(DOWN, buff=.25, aligned_edge=DOWN)
sampler_3 = Group(
Rectangle(color="blue", height=1.02, width=1.02),
Text("Sampler GPU 3", font_size=12)
).arrange(DOWN, buff=.25, aligned_edge=DOWN)
sampler_4 = Group(
Rectangle(color="blue", height=1.02, width=1.02),
Text("Sampler GPU 4", font_size=12)
).arrange(DOWN, buff=.25, aligned_edge=DOWN)
sampler_1.move_to([2,2,0])
sampler_2.move_to([2,.5,0])
sampler_3.move_to([2,-1.,0])
sampler_4.move_to([2,-2.5,0])
self.add(sampler_1, sampler_2, sampler_3, sampler_4)
samplers = [sampler_1[0], sampler_2[0], sampler_3[0], sampler_4[0]]
gpu_1 = Group(
Rectangle(color="white", height=1.02, width=.98),
Text("Output GPU 1", font_size=12)
).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4.5, 2, 0])
gpu_2 = Group(
Rectangle(color="white", height=1.02, width=.98),
Text("Output GPU 2", font_size=12)
).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4.5, .5, 0])
gpu_3 = Group(
Rectangle(color="white", height=1.02, width=.98),
Text("Output GPU 3", font_size=12)
).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4.5, -1, 0])
gpu_4 = Group(
Rectangle(color="white", height=1.02, width=.98),
Text("Output GPU 4", font_size=12)
).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4.5, -2.5, 0])
gpus = [gpu_1[0], gpu_2[0], gpu_3[0], gpu_4[0]]
self.add(gpu_1, gpu_2, gpu_3, gpu_4)
step_1 = MarkupText(
f"When using a `DataLoaderDispatcher`, all\nof the samples are collected from GPU 0's dataset,\nthen divided and sent to each GPU.\nAs a result, this will be slower.",
font_size=18
)
step_1.move_to([-2.5, -2, 0])
self.play(
Write(step_1, run_time=3.5),
)
first_animations = []
second_animations = []
colors = ["BLUE_E", "DARK_BROWN", "GOLD_E", "GRAY_A"]
current_color = colors[0]
ud_buff = 0.01
lr_buff = 0.01
old_target = None
new_datasets = []
for i,row_data in enumerate(dataset_recs):
new_row = []
current_color = colors[i]
for j,indiv_data in enumerate(row_data):
dataset_target = Rectangle(height=0.46/4,width=0.46/2).set_stroke(width=0.).set_fill(current_color, opacity=0.7)
dataset_target.move_to(indiv_data)
dataset_target.generate_target()
aligned_edge = ORIGIN
if j % 8 == 0:
aligned_edge = LEFT
dataset_target.target.next_to(
samplers[0].get_corner(DOWN+LEFT), buff=0.0125, direction=RIGHT+UP,
)
dataset_target.target.set_x(dataset_target.target.get_x())
dataset_target.target.set_y(dataset_target.target.get_y() + (.25 * i))
elif j % 4 == 0:
old_target = dataset_target.target
dataset_target.target.next_to(
samplers[0].get_corner(DOWN+LEFT), buff=0.0125, direction=RIGHT+UP,
)
dataset_target.target.set_x(dataset_target.target.get_x())
dataset_target.target.set_y(dataset_target.target.get_y()+.125 + (.25 * i))
else:
dataset_target.target.next_to(
old_target, direction=RIGHT, buff=0.0125,
)
old_target = dataset_target.target
new_row.append(dataset_target)
first_animations.append(indiv_data.animate(run_time=0.5).set_stroke(current_color))
second_animations.append(MoveToTarget(dataset_target, run_time=1.5))
new_datasets.append(new_row)
self.play(
*first_animations,
)
self.play(*second_animations)
move_animation = []
for i,row in enumerate(new_datasets):
current_color = colors[i]
if i == 0:
idx = -3
elif i == 1:
idx = -2
elif i == 2:
idx = -1
elif i == 3:
idx = 0
for j,indiv_data in enumerate(row):
indiv_data.generate_target()
indiv_data.animate.stretch_to_fit_height(0.46/2)
aligned_edge = ORIGIN
if j % 8 == 0:
aligned_edge = LEFT
indiv_data.target.next_to(
gpus[abs(idx)].get_corner(UP+LEFT), buff=.01, direction=RIGHT+DOWN,
)
indiv_data.target.set_x(indiv_data.target.get_x())
indiv_data.target.set_y(indiv_data.target.get_y()-.25)
elif j % 4 == 0:
indiv_data.target.next_to(
gpus[abs(idx)].get_corner(UP+LEFT), buff=.01, direction=RIGHT+DOWN,
)
indiv_data.target.set_x(indiv_data.target.get_x())
else:
indiv_data.target.next_to(
old_target, direction=RIGHT, buff=0.01,
)
old_target = indiv_data.target
move_animation.append(MoveToTarget(indiv_data, run_time=1.5))
self.play(*move_animation)
self.wait()

View File

@ -1,44 +1,17 @@
[tool.ruff]
[tool.black]
line-length = 119
target-version = "py38"
target-version = ['py37']
[tool.ruff.lint]
preview = true
ignore-init-module-imports = true
extend-select = [
"B009", # static getattr
"B010", # static setattr
"CPY", # Copyright
"E", # PEP8 errors
"F", # PEP8 formatting
"I", # Import sorting
"TID251", # Banned API
"UP", # Pyupgrade
"W", # PEP8 warnings
]
ignore = [
"E501", # Line length (handled by ruff-format)
"E741", # Ambiguous variable name
"W605", # Invalid escape sequence
"UP007", # X | Y type annotations
]
[tool.ruff]
# Never enforce `E501` (line length violations).
ignore = ["E501", "E741", "W605"]
select = ["E", "F", "I", "W"]
line-length = 119
[tool.ruff.lint.per-file-ignores]
"__init__.py" = [
"F401", # Ignore seemingly unused imports (they're meant for re-export)
]
"manim_animations/*" = ["ALL"]
# Ignore import violations in all `__init__.py` files.
[tool.ruff.per-file-ignores]
"__init__.py" = ["E402", "F401", "F403", "F811"]
[tool.ruff.lint.isort]
[tool.ruff.isort]
lines-after-imports = 2
known-first-party = ["accelerate"]
[tool.ruff.format]
exclude = [
"manim_animations/*"
]
[tool.ruff.lint.flake8-tidy-imports.banned-api]
"os.getenv".msg = "Use os.environ instead"
"os.putenv".msg = "Use os.environ instead"
"os.unsetenv".msg = "Use os.environ instead"

14
setup.cfg Normal file
View File

@ -0,0 +1,14 @@
[isort]
default_section = FIRSTPARTY
ensure_newline_before_comments = True
force_grid_wrap = 0
include_trailing_comma = True
known_first_party = accelerate
line_length = 119
lines_after_imports = 2
multi_line_output = 3
use_parentheses = True
[flake8]
ignore = E203, E722, E501, E741, W503, W605
max-line-length = 119

View File

@ -12,31 +12,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import find_packages, setup
from setuptools import setup
from setuptools import find_packages
extras = {}
extras["quality"] = [
"black ~= 23.1", # hf-doc-builder has a hidden dependency on `black`
"hf-doc-builder >= 0.3.0",
"ruff ~= 0.2.1",
]
extras["quality"] = ["black ~= 23.1", "ruff >= 0.0.241", "hf-doc-builder >= 0.3.0", "urllib3 < 2.0.0"]
extras["docs"] = []
extras["test_prod"] = ["pytest>=7.2.0,<=8.0.0", "pytest-xdist", "pytest-subtests", "parameterized"]
extras["test_prod"] = ["pytest", "pytest-xdist", "pytest-subtests", "parameterized"]
extras["test_dev"] = [
"datasets",
"diffusers",
"evaluate",
"torchpippy>=0.2.0",
"transformers",
"scipy",
"scikit-learn",
"tqdm",
"bitsandbytes",
"timm",
"datasets", "evaluate", "transformers", "scipy", "scikit-learn", "deepspeed", "tqdm", "bitsandbytes", "timm"
]
extras["testing"] = extras["test_prod"] + extras["test_dev"]
extras["deepspeed"] = ["deepspeed<=0.14.0"]
extras["rich"] = ["rich"]
extras["test_trackers"] = ["wandb", "comet-ml", "tensorboard", "dvclive"]
@ -48,14 +34,14 @@ extras["sagemaker"] = [
setup(
name="accelerate",
version="0.31.0.dev0",
version="0.25.0.dev0",
description="Accelerate",
long_description=open("README.md", encoding="utf-8").read(),
long_description=open("README.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",
keywords="deep learning",
license="Apache",
author="The HuggingFace team",
author_email="zach.mueller@huggingface.co",
author_email="sylvain@huggingface.co",
url="https://github.com/huggingface/accelerate",
package_dir={"": "src"},
packages=find_packages("src"),
@ -68,15 +54,7 @@ setup(
]
},
python_requires=">=3.8.0",
install_requires=[
"numpy>=1.17",
"packaging>=20.0",
"psutil",
"pyyaml",
"torch>=1.10.0",
"huggingface_hub",
"safetensors>=0.3.1",
],
install_requires=["numpy>=1.17", "packaging>=20.0", "psutil", "pyyaml", "torch>=1.10.0", "huggingface_hub", "safetensors>=0.3.1"],
extras_require=extras,
classifiers=[
"Development Status :: 5 - Production/Stable",

View File

@ -1,17 +1,4 @@
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "0.30.1.dev0"
__version__ = "0.25.0.dev0"
from .accelerator import Accelerator
from .big_modeling import (
@ -24,12 +11,10 @@ from .big_modeling import (
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .inference import prepare_pippy
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
AutocastKwargs,
DataLoaderConfiguration,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,

View File

@ -47,7 +47,6 @@ from .utils import (
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
AutocastKwargs,
DataLoaderConfiguration,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
@ -79,13 +78,11 @@ from .utils import (
is_deepspeed_available,
is_fp8_available,
is_ipex_available,
is_lomo_available,
is_megatron_lm_available,
is_mlu_available,
is_msamp_available,
is_npu_available,
is_torch_version,
is_torch_xla_available,
is_tpu_available,
is_xpu_available,
load_fsdp_model,
load_fsdp_optimizer,
@ -136,8 +133,7 @@ if is_megatron_lm_available():
from torch.distributed.algorithms.join import Join
if is_torch_xla_available():
import torch_xla.amp as xamp
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.distributed.xla_multiprocessing as xmp
@ -153,12 +149,6 @@ except ImportError:
logger = get_logger(__name__)
# Sentinel values for defaults
_split_batches = object()
_dispatch_batches = object()
_even_batches = object()
_use_seedable_sampler = object()
class Accelerator:
"""
@ -168,6 +158,11 @@ class Accelerator:
device_placement (`bool`, *optional*, defaults to `True`):
Whether or not the accelerator should put objects on device (tensors yielded by the dataloader, model,
etc...).
split_batches (`bool`, *optional*, defaults to `False`):
Whether or not the accelerator should split the batches yielded by the dataloaders across the devices. If
`True` the actual batch size used will be the same on any kind of distributed processes, but it must be a
round multiple of the `num_processes` you are using. If `False`, actual batch size used will be the one set
in your script multiplied by the number of processes.
mixed_precision (`str`, *optional*):
Whether or not to use mixed precision training. Choose from 'no','fp16','bf16 or 'fp8'. Will default to the
value in the environment variable `ACCELERATE_MIXED_PRECISION`, which will use the default value in the
@ -180,15 +175,13 @@ class Accelerator:
cpu (`bool`, *optional*):
Whether or not to force the script to execute on CPU. Will ignore GPU available if set to `True` and force
the execution on one process only.
dataloader_config (`DataLoaderConfiguration`, *optional*):
A configuration for how the dataloaders should be handled in distributed scenarios.
deepspeed_plugin ([`~utils.DeepSpeedPlugin`], *optional*):
deepspeed_plugin (`DeepSpeedPlugin`, *optional*):
Tweak your DeepSpeed related args using this argument. This argument is optional and can be configured
directly using *accelerate config*
fsdp_plugin ([`~utils.FullyShardedDataParallelPlugin`], *optional*):
fsdp_plugin (`FullyShardedDataParallelPlugin`, *optional*):
Tweak your FSDP related args using this argument. This argument is optional and can be configured directly
using *accelerate config*
megatron_lm_plugin ([`~utils.MegatronLMPlugin`], *optional*):
megatron_lm_plugin (`MegatronLMPlugin`, *optional*):
Tweak your MegatronLM related args using this argument. This argument is optional and can be configured
directly using *accelerate config*
rng_types (list of `str` or [`~utils.RNGType`]):
@ -211,20 +204,32 @@ class Accelerator:
- `"comet_ml"`
If `"all"` is selected, will pick up all available trackers in the environment and initialize them. Can
also accept implementations of `GeneralTracker` for custom trackers, and can be combined with `"all"`.
project_config ([`~utils.ProjectConfiguration`], *optional*):
project_config (`ProjectConfiguration`, *optional*):
A configuration for how saving the state can be handled.
project_dir (`str`, `os.PathLike`, *optional*):
A path to a directory for storing data such as logs of locally-compatible loggers and potentially saved
checkpoints.
step_scheduler_with_optimizer (`bool`, *optional*, defaults to `True`):
dispatch_batches (`bool`, *optional*):
If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process
and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose
underlying dataset is an `IterableDataset`, `False` otherwise.
even_batches (`bool`, *optional*, defaults to `True`):
If set to `True`, in cases where the total batch size across all processes does not exactly divide the
dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among
all workers.
use_seedable_sampler (`bool`, *optional*, defaults to `False`):
Whether or not use a fully seedable random sampler ([`~data_loader.SeedableRandomSampler`]). Comes at a
cost of potentially different performances due to different shuffling algorithms, but will ensure the
training results are fully reproducible.
step_scheduler_with_optimizer (`bool`, *optional`, defaults to `True`):
Set `True` if the learning rate scheduler is stepped at the same time as the optimizer, `False` if only
done under certain circumstances (at the end of each epoch, for instance).
kwargs_handlers (list of [`~utils.KwargsHandler`], *optional*)
A list of [`~utils.KwargsHandler`] to customize how the objects related to distributed training or mixed
precision are created. See [kwargs](kwargs) for more information.
dynamo_backend (`str` or [`~utils.DynamoBackend`], *optional*, defaults to `"no"`):
kwargs_handlers (`list[KwargHandler]`, *optional*)
A list of `KwargHandler` to customize how the objects related to distributed training or mixed precision
are created. See [kwargs](kwargs) for more information.
dynamo_backend (`str` or `DynamoBackend`, *optional*, defaults to `"no"`):
Set to one of the possible dynamo backends to optimize your training with torch dynamo.
gradient_accumulation_plugin ([`~utils.GradientAccumulationPlugin`], *optional*):
gradient_accumulation_plugin (`GradientAccumulationPlugin`, *optional*):
A configuration for how gradient accumulation should be handled, if more tweaking than just the
`gradient_accumulation_steps` is needed.
@ -247,11 +252,10 @@ class Accelerator:
def __init__(
self,
device_placement: bool = True,
split_batches: bool = _split_batches,
split_batches: bool = False,
mixed_precision: PrecisionType | str | None = None,
gradient_accumulation_steps: int = 1,
cpu: bool = False,
dataloader_config: DataLoaderConfiguration | None = None,
deepspeed_plugin: DeepSpeedPlugin | None = None,
fsdp_plugin: FullyShardedDataParallelPlugin | None = None,
megatron_lm_plugin: MegatronLMPlugin | None = None,
@ -260,9 +264,9 @@ class Accelerator:
project_dir: str | os.PathLike | None = None,
project_config: ProjectConfiguration | None = None,
gradient_accumulation_plugin: GradientAccumulationPlugin | None = None,
dispatch_batches: bool | None = _dispatch_batches,
even_batches: bool = _even_batches,
use_seedable_sampler: bool = _use_seedable_sampler,
dispatch_batches: bool | None = None,
even_batches: bool = True,
use_seedable_sampler: bool = False,
step_scheduler_with_optimizer: bool = True,
kwargs_handlers: list[KwargsHandler] | None = None,
dynamo_backend: DynamoBackend | str | None = None,
@ -295,10 +299,7 @@ class Accelerator:
if deepspeed_plugin:
if not is_deepspeed_available():
raise ImportError("DeepSpeed is not installed => run `pip install deepspeed` or build it from source.")
if is_mlu_available():
if compare_versions("deepspeed-mlu", "<", "0.10.1"):
raise ImportError("DeepSpeed MLU version must be >= 0.10.1. Please update DeepSpeed MLU.")
elif compare_versions("deepspeed", "<", "0.9.3"):
if compare_versions("deepspeed", "<", "0.9.3"):
raise ImportError("DeepSpeed version must be >= 0.9.3. Please update DeepSpeed.")
mixed_precision = (
@ -341,8 +342,6 @@ class Accelerator:
self.init_handler = None
self.fp8_recipe_handler = None
self.autocast_handler = None
self.has_lomo_optimizer = False
if kwargs_handlers is not None:
for handler in kwargs_handlers:
assert isinstance(
@ -373,6 +372,8 @@ class Accelerator:
raise ValueError("You can only pass one `AutocastKwargs` in `kwargs_handler`.")
else:
self.autocast_handler = handler
if self.fp8_recipe_handler is None and mixed_precision == "fp8":
self.fp8_recipe_handler = FP8RecipeKwargs()
kwargs = self.init_handler.to_kwargs() if self.init_handler is not None else {}
self.state = AcceleratorState(
@ -386,16 +387,6 @@ class Accelerator:
**kwargs,
)
self.delayed_fp8_autocast = False
if self.fp8_recipe_handler is not None:
# We already check if FP8 is available during `self.state`
if self.state.mixed_precision != "fp8":
raise ValueError("Passing in a `FP8RecipeKwargs` object requires setting `mixed_precision='fp8'`.")
self.delayed_fp8_autocast = self.fp8_recipe_handler.backend == "TE" and self.distributed_type in (
DistributedType.MULTI_GPU,
DistributedType.FSDP,
)
trackers = filter_trackers(log_with, self.logging_dir)
if len(trackers) < 1 and log_with is not None:
warnings.warn(f"`log_with={log_with}` was passed but no supported trackers are currently installed.")
@ -404,7 +395,7 @@ class Accelerator:
if (
(mixed_precision != "bf16")
and getattr(self.state, "downcast_bfloat", False)
and (self.state.distributedType != DistributedType.XLA)
and (self.state.distributedType != DistributedType.TPU)
):
raise ValueError("Can only use `downcast_bf16` when using `mixed_precision='bf16'` and on a TPU")
@ -421,58 +412,36 @@ class Accelerator:
self.gradient_state = GradientState(
gradient_accumulation_plugin=gradient_accumulation_plugin,
)
if self.state.distributed_type == DistributedType.TPU:
if self.gradient_state.num_steps != 1:
raise ValueError(
"Gradient accumulation is not supported on TPU. Please set `gradient_accumulation_steps` to 1 and don't pass in a `GradientAccumulationPlugin` object."
)
self.device_placement = device_placement
if dataloader_config is None:
dataloader_config = DataLoaderConfiguration()
self.dataloader_config = dataloader_config
# Deal with deprecated args
# TODO: Remove in v1.0.0
deprecated_dl_args = {}
if dispatch_batches is not _dispatch_batches:
deprecated_dl_args["dispatch_batches"] = dispatch_batches
self.dataloader_config.dispatch_batches = dispatch_batches
if split_batches is not _split_batches:
deprecated_dl_args["split_batches"] = split_batches
self.dataloader_config.split_batches = split_batches
if even_batches is not _even_batches:
deprecated_dl_args["even_batches"] = even_batches
self.dataloader_config.even_batches = even_batches
if use_seedable_sampler is not _use_seedable_sampler:
deprecated_dl_args["use_seedable_sampler"] = use_seedable_sampler
self.dataloader_config.use_seedable_sampler = use_seedable_sampler
if len(deprecated_dl_args) > 0:
values = ", ".join([f"{k}={v}" for k, v in deprecated_dl_args.items()])
warnings.warn(
f"Passing the following arguments to `Accelerator` is deprecated and will be removed in version 1.0 of Accelerate: {deprecated_dl_args.keys()}. "
"Please pass an `accelerate.DataLoaderConfiguration` instead: \n"
f"dataloader_config = DataLoaderConfiguration({values})",
FutureWarning,
)
self.split_batches = split_batches
self.dispatch_batches = dispatch_batches
self.even_batches = even_batches
self.use_seedable_sampler = use_seedable_sampler
self.step_scheduler_with_optimizer = step_scheduler_with_optimizer
# Mixed precision attributes
self.scaler = None
self.native_amp = False
err = "{mode} mixed precision requires {requirement}"
if (
self.state.mixed_precision == "fp16"
and self.device.type != "cpu"
and self.distributed_type not in (DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM)
):
self.native_amp = True
if self.device.type not in ("xpu", "cuda", "npu", "xla", "mlu") or is_torch_xla_available(
check_is_tpu=True
):
raise ValueError(f"fp16 mixed precision requires a GPU (not {self.device.type!r}).")
if self.device.type not in ("xpu", "cuda", "mps", "npu"):
raise ValueError(err.format(mode="fp16", requirement="a GPU"))
kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {}
if self.distributed_type == DistributedType.FSDP:
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
self.scaler = ShardedGradScaler(**kwargs)
elif is_torch_xla_available(check_is_gpu=True):
self.scaler = xamp.GradScaler(**kwargs)
elif is_mlu_available():
self.scaler = torch.mlu.amp.GradScaler(**kwargs)
elif is_npu_available():
self.scaler = torch.npu.amp.GradScaler(**kwargs)
else:
@ -486,12 +455,8 @@ class Accelerator:
self.native_amp = True
else:
self.native_amp = is_bf16_available(True)
if mixed_precision == "bf16" and not self.native_amp and not is_torch_xla_available():
raise ValueError("bf16 mixed precision requires PyTorch >= 1.10 and a supported device.")
elif self.state.mixed_precision == "fp8":
# We always enable `native_amp` for FP8
self.native_amp = True
if mixed_precision == "bf16" and not self.native_amp and not is_tpu_available():
raise ValueError(err.format(mode="bf16", requirement="PyTorch >= 1.10 and a supported device."))
# Start of internal step tracking
self.step = 0
@ -544,30 +509,6 @@ class Accelerator:
def device(self):
return self.state.device
@property
def split_batches(self):
return self.dataloader_config.split_batches
@property
def dispatch_batches(self):
return self.dataloader_config.dispatch_batches
@property
def even_batches(self):
return self.dataloader_config.even_batches
@even_batches.setter
def even_batches(self, value: bool):
self.dataloader_config.even_batches = value
@property
def use_seedable_sampler(self):
return self.dataloader_config.use_seedable_sampler
@property
def non_blocking(self):
return self.dataloader_config.non_blocking
@property
def project_dir(self):
return self.project_configuration.project_dir
@ -996,14 +937,14 @@ class Accelerator:
model.require_backward_grad_sync = old_require_backward_grad_sync
model.require_forward_param_sync = old_require_forward_param_sync
def _do_sync(self, force: bool = False):
def _do_sync(self):
"Sets the right `sync_gradients` context and either resets or increases `self.step`"
if self.gradient_state.sync_with_dataloader and self.gradient_state.end_of_dataloader:
self.step = 0
self.gradient_state._set_sync_gradients(True)
else:
self.step += 1
self.gradient_state._set_sync_gradients(force or ((self.step % self.gradient_state.num_steps) == 0))
self.gradient_state._set_sync_gradients((self.step % self.gradient_state.num_steps) == 0)
@property
def sync_gradients(self):
@ -1049,9 +990,7 @@ class Accelerator:
... optimizer.zero_grad()
```
"""
# sync_each_batch=True will guarantee below that self.sync_gradients=True, therefore
# resulting in the nullcontext always being selected.
self._do_sync(force=self.gradient_state.plugin_kwargs.get("sync_each_batch", False))
self._do_sync()
with contextlib.ExitStack() as cm_stack:
for m in models:
cm_stack.enter_context(contextlib.nullcontext() if self.sync_gradients else self.no_sync(m))
@ -1102,12 +1041,7 @@ class Accelerator:
... optimizer.zero_grad()
```
"""
if self.distributed_type in (
DistributedType.MULTI_GPU,
DistributedType.MULTI_NPU,
DistributedType.MULTI_MLU,
DistributedType.MULTI_XPU,
):
if self.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU):
dl_even_batches_values = []
if even_batches is not None:
@ -1258,7 +1192,7 @@ class Accelerator:
# On TPUs, putting the model on the XLA device will create new parameters, so the corresponding optimizer will
# have parameters disconnected from the model (so no training :-( ).
# If the model and optimizer have parameters on different devices we raise an error.
if self.distributed_type == DistributedType.XLA:
if self.distributed_type == DistributedType.TPU:
model_device, optimizer_device = self._get_devices()
if model_device is not None and optimizer_device is not None and model_device != optimizer_device:
raise ValueError(
@ -1270,7 +1204,7 @@ class Accelerator:
)
# If we're dealing with device placement, this deals with that by...
tpu_should_fix_optimizer = self.device_placement and self.distributed_type == DistributedType.XLA
tpu_should_fix_optimizer = self.device_placement and self.distributed_type == DistributedType.TPU
if tpu_should_fix_optimizer or (self.mixed_precision == "fp8" and self.fp8_recipe_handler.backend == "TE"):
# 1. grabbing old model parameters
old_named_params = self._get_named_parameters(*args)
@ -1309,7 +1243,7 @@ class Accelerator:
item in container
for container in (self._dataloaders, self._models, self._optimizers, self._schedulers)
):
item._is_accelerate_prepared = True
setattr(item, "_is_accelerate_prepared", True)
return result if len(result) > 1 else result[0]
@ -1363,22 +1297,18 @@ class Accelerator:
model.forward = MethodType(convert_outputs_to_fp32(model.forward.__func__), model)
else:
model.forward = convert_outputs_to_fp32(new_forward)
# We prepare fp8 after, allowing for bf16 autocast to happen first
if getattr(self.fp8_recipe_handler, "backend", None) == "TE":
elif self.mixed_precision == "fp8" and self.fp8_recipe_handler.backend == "TE":
if not has_transformer_engine_layers(model):
with torch.no_grad():
convert_model(model)
model._converted_to_transformer_engine = True
model._original_forward = model.forward
kwargs = self.fp8_recipe_handler.to_kwargs() if self.fp8_recipe_handler is not None else {}
if "fp8_format" in kwargs:
kwargs["fp8_format"] = getattr(te_recipe.Format, kwargs["fp8_format"])
fp8_recipe = te_recipe.DelayedScaling(**kwargs)
# If we are in DDP or FSDP, we delay `autocast` until after FSDP/DDP has been initialized
# to make use of the process group
if not self.delayed_fp8_autocast:
model.forward = fp8_autocast(enabled=True, fp8_recipe=fp8_recipe)(model.forward)
model.forward = fp8_autocast(enabled=True, fp8_recipe=fp8_recipe)(model.forward)
if (getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False)) and getattr(
model, "hf_device_map", False
@ -1390,19 +1320,16 @@ class Accelerator:
" In order to use 8-bit models that have been loaded across multiple GPUs the solution is to use Naive Pipeline Parallelism."
" Therefore you should not specify that you are under any distributed regime in your accelerate config."
)
elif len(model_devices) == 1:
current_device = list(model_devices)[0]
current_device_index = (
current_device.index if isinstance(current_device, torch.device) else current_device
)
current_device = list(model_devices)[0]
current_device_index = current_device.index if isinstance(current_device, torch.device) else current_device
if torch.device(current_device_index) != self.device:
# if on the first device (GPU 0) we don't care
if (self.device.index is not None) or (current_device_index != 0):
raise ValueError(
"You can't train a model that has been loaded in 8-bit precision on a different device than the one "
"you're training on. Make sure you loaded the model on the correct device using for example `device_map={'':torch.cuda.current_device() or device_map={'':torch.xpu.current_device()}"
)
if torch.device(current_device_index) != self.device:
# if on the first device (GPU 0) we don't care
if (self.device.index is not None) or (current_device_index != 0):
raise ValueError(
"You can't train a model that has been loaded in 8-bit precision on a different device than the one "
"you're training on. Make sure you loaded the model on the correct device using for example `device_map={'':torch.cuda.current_device() or device_map={'':torch.xpu.current_device()}"
)
if "cpu" in model_devices or "disk" in model_devices:
raise ValueError(
@ -1413,7 +1340,6 @@ class Accelerator:
if not evaluation_mode:
if self.distributed_type in (
DistributedType.MULTI_GPU,
DistributedType.MULTI_MLU,
DistributedType.MULTI_NPU,
DistributedType.MULTI_XPU,
):
@ -1472,73 +1398,6 @@ class Accelerator:
),
auto_wrap_policy=fsdp_plugin.auto_wrap_policy,
)
# In the event the model had been loaded in low precision, but
# mixed precision had also been activated, then we follow DeepSpeed's
# strategy to hold the parameters in full precision.
# - assume that trainer.args.bf16 and trainer.args.fp16 are already checked against
# fsdp_plugin.mixed_precision_policy.
# - NOTE: we do not check the mixed_precision attribute on the FSDP root wrapper.
# * this attribute will always set by init_utils.init_core_state so its always not None.
# * mixed_precision.param_dtype only regards _fwd_bwd_param_dtype
# * if model is loaded in 16bit, and even if mixed_precision.param_dtype is None,
# we sill want to upcast the flat_param.
if self.mixed_precision != "no": # if mixed precision is set
upcasted_log = []
for module in FSDP.fsdp_modules(model):
# Referencing DeepSpeed Zero3
# - in Init, params are converted to 16bit while partitioning.
# - in accelerator.prepare, deepspeed.initalize is called to:
# * creates the DeepSpeeedEngine.
# * since zero_optimization() is True , calls engine._configure_zero_optimizer.
#
# Inside the DeepSpeed Zero3 optimizer configuration, which initalizes
# DeepSpeedZeroOptimizer_Stage3, during which:
# * trainable_param_groups are obtained from the attached optimizer
# (already partitioned in 16bit).
# * then _setup_for_real_optimizer -> _create_fp32_partitions
# which performs the fp32 upcasting.
# To mimick DeepSeepds's casting in FSDP, we look at the (single) FlatParameter held
# within an FSDP wrapper. This FlatParameter will be seen by the optimizer.
# - even though there is a torch.device('meta') guard below, we
# expect _init_utils._init_param_handle_from_module to already
# sync the parameter.
if not module._has_params:
continue # skip if FSDP module not managing parameters
param = module._flat_param
if (
param.dtype != torch.float32
and param.device != torch.device("meta")
and param.requires_grad
):
# keep log of names_params that was upcasted
# NOTE: resorted to this because warnings.simplefilter("once") is somehow not working
name_param_log = (module.module.__class__.__name__, ", ".join(module._flat_param._fqns))
if name_param_log not in upcasted_log:
upcasted_log.append(name_param_log)
# this works because of FSDP's _runtime_utils.lazy_init.
# Have to be careful not to call anything before this that
# triggers lazy_init (e.g., _is_fsdp_root).
param.data = param.data.to(torch.float32) # upcasting
module._handle._orig_param_dtype = torch.float32 # update
# report the warnings
# some messages can be quite repetitive, especially when reporting about layers that have identical architecture.
if self.is_main_process:
for name_log, param_log in upcasted_log:
warnings.warn(
f"Upcasted low precision parameters in {name_log} because mixed precision turned on in FSDP. "
f"Affects: {param_log}."
)
if len(upcasted_log) > 0:
warnings.warn(
"FSDP upcast of low precision parameters may affect the precision of model checkpoints."
)
# if the previous and current models are same, delete the previous one
if len(self._models) > 1 and (self._models[-2] is self._models[-1]):
del self._models[-2]
@ -1546,13 +1405,8 @@ class Accelerator:
elif self.distributed_type == DistributedType.MULTI_CPU:
kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}
model = torch.nn.parallel.DistributedDataParallel(model, **kwargs)
elif self.distributed_type == DistributedType.XLA and self.state.fork_launched:
elif self.distributed_type == DistributedType.TPU and self.state.fork_launched:
model = xmp.MpModelWrapper(model).to(self.device)
# Now we can apply the FP8 autocast
if self.delayed_fp8_autocast:
model.forward = fp8_autocast(enabled=True, fp8_recipe=fp8_recipe, fp8_group=model.process_group)(
model.forward
)
# torch.compile should be called last and only if the model isn't already compiled.
if self.state.dynamo_plugin.backend != DynamoBackend.NO and not is_compiled_module(model):
if not is_torch_version(">=", "2.0"):
@ -1571,7 +1425,7 @@ class Accelerator:
for obj in args
]
if deepspeed_plugin.is_auto("train_micro_batch_size_per_gpu"):
if deepspeed_plugin.deepspeed_config["train_micro_batch_size_per_gpu"] == "auto":
if is_dataloader_present:
batch_sizes = [obj.batch_size for obj in args if hasattr(obj, "batch_size")]
if any(bs is None for bs in batch_sizes):
@ -1597,7 +1451,7 @@ class Accelerator:
"or assign integer value to `AcceleratorState().deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu']`."
)
else:
batch_size_per_device = deepspeed_plugin.get_value("train_micro_batch_size_per_gpu")
batch_size_per_device = deepspeed_plugin.deepspeed_config["train_micro_batch_size_per_gpu"]
# handle `gradient_accumulation_steps` when the value is `auto`
deepspeed_plugin.fill_match(
@ -1609,7 +1463,7 @@ class Accelerator:
config_kwargs = {
"train_micro_batch_size_per_gpu": batch_size_per_device,
"train_batch_size": batch_size_per_device
* deepspeed_plugin.get_value("gradient_accumulation_steps")
* deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"]
* self.num_processes,
"gradient_clipping": 1.0,
"zero_optimization.stage3_gather_16bit_weights_on_model_save": False,
@ -1668,15 +1522,20 @@ class Accelerator:
)
if model is not None:
# if the model is an MOE, set the appropriate MOE layers as leaf Z3 modules
deepspeed_plugin.set_moe_leaf_modules(model)
ds_config = deepspeed_plugin.deepspeed_config
# deal with config keys that use `auto` value and rely on model's hidden_size
hidden_size_based_keys = [
"zero_optimization.reduce_bucket_size",
"zero_optimization.stage3_prefetch_bucket_size",
"zero_optimization.stage3_param_persistence_threshold",
]
hidden_size_auto_keys = [x for x in hidden_size_based_keys if deepspeed_plugin.is_auto(x)]
def is_auto(ds_config, ds_key_long):
nodes = ds_key_long.split(".")
val = ds_config.get(nodes[0], {}).get(nodes[1], None)
return False if None else val == "auto"
hidden_size_auto_keys = [x for x in hidden_size_based_keys if is_auto(ds_config, x)]
if len(hidden_size_auto_keys) > 0:
reasoning = (
"therefore it's not possible to automatically fill out the following `auto` entries "
@ -1693,7 +1552,7 @@ class Accelerator:
hidden_size = max(model.config.hidden_sizes)
else:
raise ValueError(
"Can find neither `model.config.hidden_size` nor `model.config.hidden_sizes`, " + reasoning
"Can't find neither `model.config.hidden_size` nor `model.config.hidden_sizes`, " + reasoning
)
config_kwargs.update(
@ -1745,7 +1604,10 @@ class Accelerator:
optimizer = DeepSpeedCPUAdam(optimizer.param_groups, **defaults)
kwargs["optimizer"] = optimizer
if scheduler is not None:
if type(scheduler).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES:
if (
isinstance(scheduler, LRScheduler)
or type(scheduler).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES
):
kwargs["lr_scheduler"] = scheduler
engine, optimizer, _, lr_scheduler = deepspeed.initialize(**kwargs)
@ -1900,11 +1762,10 @@ class Accelerator:
for obj in result:
if isinstance(obj, torch.nn.Module):
model = obj
model.train()
elif isinstance(obj, (torch.optim.Optimizer)):
optimizer = obj
if optimizer is not None and model is not None:
dtype = torch.bfloat16 if self.state.mixed_precision == "bf16" else None
dtype = torch.bfloat16 if self.state.mixed_precision == "bf16" else torch.float32
if self.device.type == "xpu" and is_xpu_available():
model = model.to(self.device)
model, optimizer = torch.xpu.optimize(
@ -1956,7 +1817,10 @@ class Accelerator:
return tuple(result)
def prepare_data_loader(
self, data_loader: torch.utils.data.DataLoader, device_placement=None, slice_fn_for_dispatch=None
self,
data_loader: torch.utils.data.DataLoader,
device_placement=None,
slice_fn_for_dispatch=None,
):
"""
Prepares a PyTorch DataLoader for training in any distributed setup. It is recommended to use
@ -1973,6 +1837,7 @@ class Accelerator:
[`~utils.slice_tensors`]. This argument is used only when `dispatch_batches` is set to `True` and will
be ignored otherwise.
Example:
```python
@ -1990,7 +1855,7 @@ class Accelerator:
self._dataloaders.append(data_loader)
return data_loader
if device_placement is None:
device_placement = self.device_placement if self.distributed_type != DistributedType.XLA else False
device_placement = self.device_placement if self.distributed_type != DistributedType.TPU else False
prepared_data_loader = prepare_data_loader(
data_loader,
self.device,
@ -2003,7 +1868,6 @@ class Accelerator:
even_batches=self.even_batches,
slice_fn_for_dispatch=slice_fn_for_dispatch,
use_seedable_sampler=self.use_seedable_sampler,
non_blocking=self.non_blocking,
)
self._dataloaders.append(prepared_data_loader)
return prepared_data_loader
@ -2030,14 +1894,6 @@ class Accelerator:
>>> optimizer = accelerator.prepare_optimizer(optimizer, device_placement=True)
```
"""
if is_lomo_available():
# We need to import locally to avoid circular imports since lomo imports stuff from
# transformers & accelerate
from lomo_optim import AdaLomo, Lomo
# Support multiple optimizers: https://github.com/huggingface/accelerate/pull/2695#discussion_r1589164607
self.has_lomo_optimizer |= isinstance(optimizer, (Lomo, AdaLomo))
# Ensure we can't double wrap an optimizer due to `find_batch_size`
if getattr(optimizer, "_is_accelerate_prepared", False):
if optimizer not in self._optimizers:
@ -2108,8 +1964,6 @@ class Accelerator:
>>> accelerator.backward(loss)
```
"""
learning_rate = kwargs.get("learning_rate")
if self.distributed_type != DistributedType.DEEPSPEED:
# deepspeed handles loss scaling by gradient_accumulation_steps in its `backward`
loss = loss / self.gradient_accumulation_steps
@ -2119,8 +1973,6 @@ class Accelerator:
return
elif self.scaler is not None:
self.scaler.scale(loss).backward(**kwargs)
elif learning_rate is not None and self.has_lomo_optimizer:
self.lomo_backward(loss, learning_rate)
else:
loss.backward(**kwargs)
@ -2216,6 +2068,10 @@ class Accelerator:
for opt in optimizer:
while isinstance(opt, AcceleratedOptimizer):
opt = opt.optimizer
# Reduce gradients first for XLA
if self.distributed_type == DistributedType.TPU:
gradients = xm._fetch_gradients(opt)
self.reduce(gradients, scale=1.0 / self.num_processes)
self.scaler.unscale_(opt)
def clip_grad_norm_(self, parameters, max_norm, norm_type=2):
@ -2253,19 +2109,6 @@ class Accelerator:
# `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
# We cannot return the gradient norm because DeepSpeed does it.
return None
elif self.distributed_type == DistributedType.XLA:
# Reduce gradients first for XLA
for acc_opt in self._optimizers:
if not acc_opt.gradient_state.is_xla_gradients_synced:
opt = acc_opt
while isinstance(opt, AcceleratedOptimizer):
opt = opt.optimizer
gradients = xm._fetch_gradients(opt)
# Use xm.all_reduce to perform an in-place all-reduce. Recusrsive all-reduce each tensor
# one by one in self.reduce is non-inplace.
xm.all_reduce("sum", gradients, scale=1.0 / self.num_processes)
# Set is_xla_gradients_synced to True to avoid all-reduce twice in the AcceleratedOptimizer step.
acc_opt.gradient_state.is_xla_gradients_synced = True
self.unscale_gradients()
return torch.nn.utils.clip_grad_norm_(parameters, max_norm, norm_type=norm_type)
@ -2328,7 +2171,7 @@ class Accelerator:
"""
return gather(tensor)
def gather_for_metrics(self, input_data, use_gather_object=False):
def gather_for_metrics(self, input_data):
"""
Gathers `input_data` and potentially drops duplicates in the last batch if on a distributed system. Should be
used for gathering the inputs and targets for metric calculation.
@ -2336,11 +2179,6 @@ class Accelerator:
Args:
input (`torch.Tensor`, `object`, a nested tuple/list/dictionary of `torch.Tensor`, or a nested tuple/list/dictionary of `object`):
The tensors or objects for calculating metrics across all processes
use_gather_object(`bool`):
Whether to forcibly use gather_object instead of gather (which is already done if all objects passed do
not contain tensors). This flag can be useful for gathering tensors with different sizes that we don't
want to pad and concatenate along the first dimension. Using it with GPU tensors is not well supported
and inefficient as it incurs GPU -> CPU transfer since tensors would be pickled.
Example:
@ -2365,9 +2203,7 @@ class Accelerator:
except TypeError:
all_tensors = False
use_gather_object = use_gather_object or not all_tensors
if use_gather_object:
if not all_tensors:
data = gather_object(input_data)
else:
data = self.gather(input_data)
@ -2386,11 +2222,7 @@ class Accelerator:
def _adjust_samples(tensor):
return tensor[: self.gradient_state.remainder]
if use_gather_object:
# gather_object put the objects in a list
return _adjust_samples(data)
else:
return recursively_apply(_adjust_samples, data)
return recursively_apply(_adjust_samples, data)
else: # remainder is 0
# no remainder even though at end of dataloader, so nothing to do.
return data
@ -2565,7 +2397,7 @@ class Accelerator:
self.trackers.append(tracker)
else:
tracker_init = LOGGER_TYPE_TO_CLASS[str(tracker)]
if tracker_init.requires_logging_directory:
if getattr(tracker_init, "requires_logging_directory"):
# We can skip this check since it was done in `__init__`
self.trackers.append(
tracker_init(project_name, self.logging_dir, **init_kwargs.get(str(tracker), {}))
@ -2894,7 +2726,7 @@ class Accelerator:
os.makedirs(output_dir, exist_ok=True)
logger.info(f"Saving current state to {output_dir}")
if self.distributed_type == DistributedType.XLA:
if self.distributed_type == DistributedType.TPU:
# Finish running the previous step before checkpointing
xm.mark_step()
@ -3092,7 +2924,6 @@ class Accelerator:
if map_location is None:
if self.num_processes > 1 and self.distributed_type in (
DistributedType.MULTI_GPU,
DistributedType.MULTI_MLU,
DistributedType.MULTI_NPU,
):
map_location = "on_device"
@ -3125,7 +2956,7 @@ class Accelerator:
for index, obj in enumerate(self._custom_objects):
load_custom_state(obj, input_dir, index)
def free_memory(self, *objects):
def free_memory(self):
"""
Will release all references to the internal objects stored and call the garbage collector. You should call this
method between two trainings with different models/optimizers. Also will reset `Accelerator.step` to 0.
@ -3138,23 +2969,19 @@ class Accelerator:
>>> accelerator = Accelerator()
>>> model, optimizer, scheduler = ...
>>> model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler)
>>> model, optimizer, scheduler = accelerator.free_memory(model, optimizer, scheduler)
>>> accelerator.free_memory()
>>> del model, optimizer, scheduler
```
"""
# Deepspeed needs a bit more prep that should be done first
if hasattr(self, "deepspeed_engine_wrapped"):
if self.deepspeed_engine_wrapped is not None:
self.deepspeed_engine_wrapped.engine.destroy()
self.deepspeed_engine_wrapped = None
objects = release_memory(*objects)
self._schedulers = []
self._optimizers = []
self._models = []
self._dataloaders = []
self.deepspeed_engine_wrapped = None
self.step = 0
return objects
release_memory()
def clear(self, *objects):
def clear(self):
"""
Alias for [`Accelerate.free_memory`], releases all references to the internal objects stored and call the
garbage collector. You should call this method between two trainings with different models/optimizers.
@ -3167,10 +2994,11 @@ class Accelerator:
>>> accelerator = Accelerator()
>>> model, optimizer, scheduler = ...
>>> model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler)
>>> model, optimizer, scheduler = accelerator.clear(model, optimizer, scheduler)
>>> accelerator.free_memory()
>>> del model, optimizer, scheduler
```
"""
return self.free_memory(*objects)
self.free_memory()
def _get_named_parameters(self, *args):
named_parameters = {}
@ -3322,7 +3150,6 @@ class Accelerator:
autocast_handler = self.autocast_handler
autocast_context = get_mixed_precision_context_manager(self.native_amp, autocast_handler)
autocast_context.__enter__()
# TODO: should the `yield` be in a try/finally block?
yield
autocast_context.__exit__(*sys.exc_info())
@ -3383,27 +3210,3 @@ class Accelerator:
return True
return False
def lomo_backward(self, loss: torch.Tensor, learning_rate: float) -> None:
"""
Runs backward pass on LOMO optimizers.
"""
if is_lomo_available():
# We need to import locally to avoid circular imports since lomo imports stuff from
# transformers & accelerate
from lomo_optim import AdaLomo, Lomo
if learning_rate is None:
raise ValueError("A learning rate must be passed in order to call backward pass with LOMO optimizers.")
_backward_called = False
for optimizer in self._optimizers:
if isinstance(optimizer.optimizer, (Lomo, AdaLomo)):
optimizer.optimizer.fused_backward(loss, learning_rate)
_backward_called = True
if not _backward_called:
raise ValueError(
"Backward pass not properly called on LOMO optimizers. Are you sure you passed a LOMO optimizer in accelerator.prepare()?"
)

View File

@ -31,22 +31,18 @@ from .hooks import (
)
from .utils import (
OffloadedWeightsLoader,
check_cuda_p2p_ib_support,
check_device_map,
extract_submodules_state_dict,
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
is_mlu_available,
is_npu_available,
is_torch_version,
is_xpu_available,
load_checkpoint_in_model,
offload_state_dict,
parse_flag_from_env,
retie_parameters,
)
from .utils.other import recursive_getattr
logger = logging.getLogger(__name__)
@ -127,7 +123,6 @@ def init_on_device(device: torch.device, include_buffers: bool = None):
if param is not None:
param_cls = type(module._parameters[name])
kwargs = module._parameters[name].__dict__
kwargs["requires_grad"] = param.requires_grad
module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs)
def register_empty_buffer(module, name, buffer, persistent=True):
@ -397,25 +392,10 @@ def dispatch_model(
weights_map = OffloadedWeightsLoader(
state_dict=state_dict, save_folder=save_folder, index=offload_index, device=device
)
print(weights_map)
else:
weights_map = None
# When dispatching the model's parameters to the devices specified in device_map, we want to avoid allocating memory several times for the
# tied parameters. The dictionary tied_params_map keeps track of the already allocated data for a given tied parameter (represented by its
# original pointer) on each devices.
tied_params = find_tied_parameters(model)
tied_params_map = {}
for group in tied_params:
for param_name in group:
# data_ptr() is enough here, as `find_tied_parameters` finds tied params simply by comparing `param1 is param2`, so we don't need
# to care about views of tensors through storage_offset.
data_ptr = recursive_getattr(model, param_name).data_ptr()
tied_params_map[data_ptr] = {}
# Note: To handle the disk offloading case, we can not simply use weights_map[param_name].data_ptr() as the reference pointer,
# as we have no guarantee that safetensors' `file.get_tensor()` will always give the same pointer.
attach_align_device_hook_on_blocks(
model,
execution_device=execution_device,
@ -424,7 +404,6 @@ def dispatch_model(
weights_map=weights_map,
skip_keys=skip_keys,
preload_module_classes=preload_module_classes,
tied_params_map=tied_params_map,
)
# warn if there is any params on the meta device
@ -443,13 +422,7 @@ def dispatch_model(
def add_warning(fn, model):
@wraps(fn)
def wrapper(*args, **kwargs):
warning_msg = "You shouldn't move a model that is dispatched using accelerate hooks."
if str(fn.__name__) == "to":
to_device = torch._C._nn._parse_to(*args, **kwargs)[0]
if to_device is not None:
logger.warning(warning_msg)
else:
logger.warning(warning_msg)
logger.warning("You shouldn't move a model when it is dispatched on multiple devices.")
for param in model.parameters():
if param.device == torch.device("meta"):
raise RuntimeError("You can't move a model that has some modules offloaded to cpu or disk.")
@ -460,38 +433,21 @@ def dispatch_model(
model.to = add_warning(model.to, model)
if is_npu_available():
model.npu = add_warning(model.npu, model)
elif is_mlu_available():
model.mlu = add_warning(model.mlu, model)
elif is_xpu_available():
model.xpu = add_warning(model.xpu, model)
else:
model.cuda = add_warning(model.cuda, model)
# Check if we are using multi-gpus with RTX 4000 series
use_multi_gpu = len([device for device in set(device_map.values()) if device not in ("cpu", "disk")]) > 1
if use_multi_gpu and not check_cuda_p2p_ib_support():
logger.warning(
"We've detected an older driver with an RTX 4000 series GPU. These drivers have issues with P2P. "
"This can affect the multi-gpu inference when using accelerate device_map."
"Please make sure to update your driver to the latest version which resolves this."
)
else:
device = list(device_map.values())[0]
# `torch.Tensor.to(<int num>)` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)).
if is_npu_available() and isinstance(device, int):
device = f"npu:{device}"
elif is_mlu_available() and isinstance(device, int):
device = f"mlu:{device}"
elif is_xpu_available() and isinstance(device, int):
device = f"xpu:{device}"
if device != "disk":
model.to(device)
else:
raise ValueError(
"You are trying to offload the whole model to the disk. Please use the `disk_offload` function instead."
)
# Convert OrderedDict back to dict for easier usage
model.hf_device_map = dict(device_map)
model.hf_device_map = device_map
return model
@ -508,7 +464,6 @@ def load_checkpoint_and_dispatch(
skip_keys: Optional[Union[str, List[str]]] = None,
preload_module_classes: Optional[List[str]] = None,
force_hooks: bool = False,
strict: bool = False,
):
"""
Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are
@ -555,9 +510,6 @@ def load_checkpoint_and_dispatch(
force_hooks (`bool`, *optional*, defaults to `False`):
Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a
single device.
strict (`bool`, *optional*, defaults to `False`):
Whether to strictly enforce that the keys in the checkpoint state_dict match the keys of the model's
state_dict.
Example:
@ -596,11 +548,7 @@ def load_checkpoint_and_dispatch(
low_zero=(device_map == "balanced_low_0"),
)
device_map = infer_auto_device_map(
model,
max_memory=max_memory,
no_split_module_classes=no_split_module_classes,
dtype=dtype,
offload_buffers=offload_buffers,
model, max_memory=max_memory, no_split_module_classes=no_split_module_classes, dtype=dtype
)
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
offload_state_dict = True
@ -612,7 +560,6 @@ def load_checkpoint_and_dispatch(
dtype=dtype,
offload_state_dict=offload_state_dict,
offload_buffers=offload_buffers,
strict=strict,
)
if device_map is None:
return model

View File

@ -32,13 +32,13 @@ from .utils import (
SCHEDULER_NAME,
WEIGHTS_NAME,
get_pretty_name,
is_torch_xla_available,
is_tpu_available,
is_xpu_available,
save,
)
if is_torch_xla_available():
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
from .logging import get_logger
@ -120,7 +120,8 @@ def save_accelerator_state(
from .data_loader import IterableDatasetShard, SeedableRandomSampler
if isinstance(dataloader.dataset, IterableDatasetShard):
sampler = dataloader.get_sampler()
sampler = dataloader.sampler.sampler
if isinstance(sampler, SeedableRandomSampler):
save(sampler, output_sampler_file, save_on_each_node=save_on_each_node, safe_serialization=False)
logger.info(f"Sampler state for dataloader {i} saved in {output_sampler_file}")
@ -141,7 +142,7 @@ def save_accelerator_state(
states["torch_xpu_manual_seed"] = torch.xpu.get_rng_state_all()
else:
states["torch_cuda_manual_seed"] = torch.cuda.get_rng_state_all()
if is_torch_xla_available():
if is_tpu_available():
states["xm_seed"] = xm.get_rng_state()
output_states_file = output_dir.joinpath(states_name)
torch.save(states, output_states_file)
@ -226,9 +227,10 @@ def load_accelerator_state(
from .data_loader import IterableDatasetShard, SeedableRandomSampler
if isinstance(dataloader.dataset, IterableDatasetShard):
sampler = dataloader.get_sampler()
sampler = dataloader.sampler.sampler
if isinstance(sampler, SeedableRandomSampler):
sampler = dataloader.set_sampler(torch.load(input_sampler_file))
dataloader.sampler.sampler = torch.load(input_sampler_file)
logger.info("All dataloader sampler states loaded successfully")
# GradScaler state
@ -247,7 +249,7 @@ def load_accelerator_state(
torch.xpu.set_rng_state_all(states["torch_xpu_manual_seed"])
else:
torch.cuda.set_rng_state_all(states["torch_cuda_manual_seed"])
if is_torch_xla_available():
if is_tpu_available():
xm.set_rng_state(states["xm_seed"])
logger.info("All random states loaded successfully")
except Exception:

View File

@ -1,13 +0,0 @@
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -14,17 +14,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.estimate import estimate_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
from accelerate.commands.utils import CustomArgumentParser
def main():
parser = CustomArgumentParser("Accelerate CLI tool", usage="accelerate <command> [<args>]", allow_abbrev=False)
parser = ArgumentParser("Accelerate CLI tool", usage="accelerate <command> [<args>]", allow_abbrev=False)
subparsers = parser.add_subparsers(help="accelerate command helpers")
# Register commands

View File

@ -20,7 +20,6 @@ from ...utils import (
ComputeEnvironment,
DistributedType,
is_deepspeed_available,
is_mlu_available,
is_mps_available,
is_npu_available,
is_transformers_available,
@ -49,7 +48,7 @@ from .config_utils import (
def get_cluster_input():
distributed_type = _ask_options(
"Which type of machine are you using?",
["No distributed training", "multi-CPU", "multi-XPU", "multi-GPU", "multi-NPU", "multi-MLU", "TPU"],
["No distributed training", "multi-CPU", "multi-XPU", "multi-GPU", "multi-NPU", "TPU"],
_convert_distributed_mode,
)
@ -65,7 +64,6 @@ def get_cluster_input():
if distributed_type in [
DistributedType.MULTI_GPU,
DistributedType.MULTI_MLU,
DistributedType.MULTI_NPU,
DistributedType.MULTI_XPU,
DistributedType.MULTI_CPU,
@ -118,7 +116,6 @@ def get_cluster_input():
use_cpu = False
ipex_config = {}
mpirun_config = {}
if use_cpu:
ipex_config["ipex"] = _ask_field(
"Do you want to use Intel PyTorch Extension (IPEX) to speed up training on CPU? [yes/NO]:",
@ -126,26 +123,10 @@ def get_cluster_input():
default=False,
error_message="Please enter yes or no.",
)
if distributed_type == DistributedType.MULTI_CPU:
use_mpirun = _ask_field(
"Do you want accelerate to launch mpirun? [yes/NO]: ",
_convert_yes_no_to_bool,
default=False,
error_message="Please enter yes or no.",
)
if use_mpirun:
mpirun_hostfile = _ask_field(
"Please enter the path to the hostfile to use with mpirun [~/hostfile]: ",
str,
default="~/hostfile",
)
mpirun_config["mpirun_hostfile"] = os.path.expanduser(mpirun_hostfile.strip())
mpirun_config["mpirun_ccl"] = _ask_field("Enter the number of oneCCL worker threads [1]: ", default=1)
if (
not use_cpu
and is_xpu_available()
and distributed_type
not in [DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_MLU, DistributedType.XLA]
and distributed_type not in [DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.TPU]
):
ipex_config["use_xpu"] = _ask_field(
"Do you want to use XPU plugin to speed up training on XPU? [yes/NO]:",
@ -198,17 +179,7 @@ def get_cluster_input():
use_mps = not use_cpu and is_mps_available()
deepspeed_config = {}
if (
distributed_type
in [
DistributedType.MULTI_GPU,
DistributedType.MULTI_XPU,
DistributedType.MULTI_NPU,
DistributedType.MULTI_MLU,
DistributedType.NO,
]
and not use_mps
):
if distributed_type in [DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.NO] and not use_mps:
use_deepspeed = _ask_field(
"Do you want to use DeepSpeed? [yes/NO]: ",
_convert_yes_no_to_bool,
@ -298,18 +269,6 @@ def get_cluster_input():
"When `zero3_init_flag` is set, it requires Transformers to be installed. "
"Please run `pip3 install transformers`."
)
use_moe = _ask_field(
"Do you want to enable Mixture-of-Experts training (MoE)? [yes/NO]: ",
_convert_yes_no_to_bool,
default=False,
error_message="Please enter yes or no.",
)
if use_moe:
deepspeed_config["deepspeed_moe_layer_cls_names"] = _ask_field(
"Specify the comma-separated list of transformers MoE layer class names (case-sensitive), e.g : "
" `MixtralSparseMoeBlock`, `Qwen2MoeSparseMoeBlock`, `JetMoEAttention,JetMoEBlock` ... : ",
str,
)
if num_machines > 1:
launcher_query = "Which Type of launcher do you want to use?"
@ -354,12 +313,7 @@ def get_cluster_input():
)
fsdp_config = {}
if distributed_type in [
DistributedType.MULTI_GPU,
DistributedType.MULTI_NPU,
DistributedType.MULTI_MLU,
DistributedType.MULTI_XPU,
]:
if distributed_type in [DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU]:
use_fsdp = _ask_field(
"Do you want to use FullyShardedDataParallel? [yes/NO]: ",
_convert_yes_no_to_bool,
@ -446,12 +400,6 @@ def get_cluster_input():
default=True,
error_message="Please enter yes or no.",
)
fsdp_config["fsdp_activation_checkpointing"] = _ask_field(
"Do you want to enable FSDP activation checkpointing? [yes/NO]: ",
_convert_yes_no_to_bool,
default=False,
error_message="Please enter yes or no.",
)
megatron_lm_config = {}
if distributed_type in [DistributedType.MULTI_GPU]:
@ -528,15 +476,12 @@ def get_cluster_input():
DistributedType.MULTI_CPU,
DistributedType.MULTI_XPU,
DistributedType.MULTI_GPU,
DistributedType.MULTI_MLU,
DistributedType.MULTI_NPU,
DistributedType.XLA,
DistributedType.TPU,
]:
machine_type = str(distributed_type).split(".")[1].replace("MULTI_", "")
if machine_type == "TPU":
machine_type += " cores"
elif machine_type == "CPU":
machine_type = "processes"
else:
machine_type += "(s)"
num_processes = _ask_field(
@ -564,7 +509,6 @@ def get_cluster_input():
distributed_type
in [
DistributedType.MULTI_GPU,
DistributedType.MULTI_MLU,
DistributedType.MULTI_NPU,
DistributedType.MULTI_XPU,
DistributedType.NO,
@ -574,8 +518,6 @@ def get_cluster_input():
):
if is_npu_available():
machine_type = "NPU(s)"
elif is_mlu_available():
machine_type = "MLU(s)"
else:
machine_type = "GPU(s)"
gpu_ids = _ask_field(
@ -583,17 +525,7 @@ def get_cluster_input():
default="all",
)
# CPU affinity is only supported on NVIDIA hardware for now
enable_cpu_affinity = False
if distributed_type in (DistributedType.NO, DistributedType.MULTI_GPU) and not use_cpu and not use_mps:
enable_cpu_affinity = _ask_field(
"Would you like to enable numa efficiency? (Currently only supported on NVIDIA hardware). [yes/NO]: ",
_convert_yes_no_to_bool,
default=False,
error_message="Please enter yes or no.",
)
if distributed_type == DistributedType.XLA:
if distributed_type == DistributedType.TPU:
mixed_precision = "no"
main_training_function = _ask_field(
"What is the name of the function in your script that should be launched in all parallel scripts? [main]: ",
@ -684,7 +616,7 @@ def get_cluster_input():
"Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts."
)
if distributed_type == DistributedType.XLA and mixed_precision == "bf16":
if distributed_type == DistributedType.TPU and mixed_precision == "bf16":
tpu_downcast_bf16 = _ask_field(
"Should `torch.float` be cast as `bfloat16` and `torch.double` remain `float32` on TPUs?", default="no"
)
@ -705,7 +637,6 @@ def get_cluster_input():
fsdp_config=fsdp_config,
megatron_lm_config=megatron_lm_config,
ipex_config=ipex_config,
mpirun_config=mpirun_config,
use_cpu=use_cpu,
rdzv_backend=rdzv_backend,
same_network=same_network,
@ -719,5 +650,4 @@ def get_cluster_input():
tpu_use_cluster=tpu_use_cluster,
dynamo_config=dynamo_config,
debug=debug,
enable_cpu_affinity=enable_cpu_affinity,
)

View File

@ -27,7 +27,7 @@ from ...utils.constants import SAGEMAKER_PYTHON_VERSION, SAGEMAKER_PYTORCH_VERSI
hf_cache_home = os.path.expanduser(
os.environ.get("HF_HOME", os.path.join(os.environ.get("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
)
cache_dir = os.path.join(hf_cache_home, "accelerate")
default_json_config_file = os.path.join(cache_dir, "default_config.yaml")
@ -45,13 +45,13 @@ def load_config_from_file(config_file):
if not os.path.isfile(config_file):
raise FileNotFoundError(
f"The passed configuration file `{config_file}` does not exist. "
"Please pass an existing file to `accelerate launch`, or use the default one "
"Please pass an existing file to `accelerate launch`, or use the the default one "
"created through `accelerate config` and run `accelerate launch` "
"without the `--config_file` argument."
)
else:
config_file = default_config_file
with open(config_file, encoding="utf-8") as f:
with open(config_file, "r", encoding="utf-8") as f:
if config_file.endswith(".json"):
if (
json.load(f).get("compute_environment", ComputeEnvironment.LOCAL_MACHINE)
@ -94,7 +94,7 @@ class BaseConfig:
@classmethod
def from_json_file(cls, json_file=None):
json_file = default_json_config_file if json_file is None else json_file
with open(json_file, encoding="utf-8") as f:
with open(json_file, "r", encoding="utf-8") as f:
config_dict = json.load(f)
if "compute_environment" not in config_dict:
config_dict["compute_environment"] = ComputeEnvironment.LOCAL_MACHINE
@ -109,8 +109,6 @@ class BaseConfig:
config_dict["use_cpu"] = False
if "debug" not in config_dict:
config_dict["debug"] = False
if "enable_cpu_affinity" not in config_dict:
config_dict["enable_cpu_affinity"] = False
extra_keys = sorted(set(config_dict.keys()) - set(cls.__dataclass_fields__.keys()))
if len(extra_keys) > 0:
raise ValueError(
@ -128,7 +126,7 @@ class BaseConfig:
@classmethod
def from_yaml_file(cls, yaml_file=None):
yaml_file = default_yaml_config_file if yaml_file is None else yaml_file
with open(yaml_file, encoding="utf-8") as f:
with open(yaml_file, "r", encoding="utf-8") as f:
config_dict = yaml.safe_load(f)
if "compute_environment" not in config_dict:
config_dict["compute_environment"] = ComputeEnvironment.LOCAL_MACHINE
@ -145,8 +143,6 @@ class BaseConfig:
config_dict["use_cpu"] = False
if "debug" not in config_dict:
config_dict["debug"] = False
if "enable_cpu_affinity" not in config_dict:
config_dict["enable_cpu_affinity"] = False
extra_keys = sorted(set(config_dict.keys()) - set(cls.__dataclass_fields__.keys()))
if len(extra_keys) > 0:
raise ValueError(
@ -167,7 +163,7 @@ class BaseConfig:
self.distributed_type = SageMakerDistributedType(self.distributed_type)
else:
self.distributed_type = DistributedType(self.distributed_type)
if getattr(self, "dynamo_config", None) is None:
if self.dynamo_config is None:
self.dynamo_config = {}
@ -182,7 +178,6 @@ class ClusterConfig(BaseConfig):
rdzv_backend: Optional[str] = "static"
same_network: Optional[bool] = False
main_training_function: str = "main"
enable_cpu_affinity: bool = False
# args for deepspeed_plugin
deepspeed_config: dict = None
@ -192,8 +187,6 @@ class ClusterConfig(BaseConfig):
megatron_lm_config: dict = None
# args for ipex
ipex_config: dict = None
# args for mpirun
mpirun_config: dict = None
# args for TPU
downcast_bf16: bool = False
@ -219,8 +212,6 @@ class ClusterConfig(BaseConfig):
self.megatron_lm_config = {}
if self.ipex_config is None:
self.ipex_config = {}
if self.mpirun_config is None:
self.mpirun_config = {}
return super().__post_init__()
@ -241,4 +232,3 @@ class SageMakerConfig(BaseConfig):
sagemaker_metrics_file: str = None
additional_args: dict = None
dynamo_config: dict = None
enable_cpu_affinity: bool = False

View File

@ -68,7 +68,7 @@ def _convert_compute_environment(value):
def _convert_distributed_mode(value):
value = int(value)
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "MULTI_MLU", "XLA"][value])
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value])
def _convert_dynamo_backend(value):

View File

@ -18,7 +18,7 @@ from pathlib import Path
import torch
from ...utils import is_mlu_available, is_npu_available, is_xpu_available
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
@ -57,15 +57,7 @@ def write_basic_config(mixed_precision="no", save_location: str = default_json_c
"compute_environment": "LOCAL_MACHINE",
"mixed_precision": mixed_precision,
}
if is_mlu_available():
num_mlus = torch.mlu.device_count()
config["num_processes"] = num_mlus
config["use_cpu"] = False
if num_mlus > 1:
config["distributed_type"] = "MULTI_MLU"
else:
config["distributed_type"] = "NO"
elif torch.cuda.is_available():
if torch.cuda.is_available():
num_gpus = torch.cuda.device_count()
config["num_processes"] = num_gpus
config["use_cpu"] = False
@ -95,7 +87,6 @@ def write_basic_config(mixed_precision="no", save_location: str = default_json_c
config["num_processes"] = 1
config["distributed_type"] = "NO"
config["debug"] = False
config["enable_cpu_affinity"] = False
config = ClusterConfig(**config)
config.to_json_file(path)
return path

View File

@ -17,7 +17,6 @@
import argparse
import os
import platform
import subprocess
import numpy as np
import psutil
@ -26,7 +25,7 @@ import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_mlu_available, is_npu_available, is_xpu_available
from ..utils import is_npu_available, is_xpu_available
def env_command_parser(subparsers=None):
@ -48,7 +47,6 @@ def env_command(args):
pt_version = torch.__version__
pt_cuda_available = torch.cuda.is_available()
pt_xpu_available = is_xpu_available()
pt_mlu_available = is_mlu_available()
pt_npu_available = is_npu_available()
accelerate_config = "Not found"
@ -56,31 +54,18 @@ def env_command(args):
if args.config_file is not None or os.path.isfile(default_config_file):
accelerate_config = load_config_from_file(args.config_file).to_dict()
# if we can run which, get it
command = None
bash_location = "Not found"
if os.name == "nt":
command = ["where", "accelerate"]
elif os.name == "posix":
command = ["which", "accelerate"]
if command is not None:
bash_location = subprocess.check_output(command, text=True, stderr=subprocess.STDOUT).strip()
info = {
"`Accelerate` version": version,
"Platform": platform.platform(),
"`accelerate` bash location": bash_location,
"Python version": platform.python_version(),
"Numpy version": np.__version__,
"PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})",
"PyTorch XPU available": str(pt_xpu_available),
"PyTorch NPU available": str(pt_npu_available),
"PyTorch MLU available": str(pt_mlu_available),
"System RAM": f"{psutil.virtual_memory().total / 1024 ** 3:.2f} GB",
}
if pt_cuda_available:
info["GPU type"] = torch.cuda.get_device_name()
if pt_npu_available:
info["CANN version"] = torch.version.cann
print("\nCopy-and-paste the text below in your GitHub issue\n")
print("\n".join([f"- {prop}: {val}" for prop, val in info.items()]))

View File

@ -13,11 +13,12 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from huggingface_hub import model_info
from huggingface_hub.utils import GatedRepoError, RepositoryNotFoundError
from accelerate import init_empty_weights
from accelerate.commands.utils import CustomArgumentParser
from accelerate.utils import (
calculate_maximum_sizes,
convert_bytes,
@ -104,11 +105,10 @@ def create_empty_model(model_name: str, library_name: str, trust_remote_code: bo
f"To check `{model_name}`, `transformers` must be installed. Please install it via `pip install transformers`"
)
print(f"Loading pretrained config for `{model_name}` from `transformers`...")
if model_info.config is None:
raise RuntimeError(f"Tried to load `{model_name}` with `transformers` but it does not have any metadata.")
auto_map = model_info.config.get("auto_map", False)
config = AutoConfig.from_pretrained(model_name, trust_remote_code=trust_remote_code, token=access_token)
config = AutoConfig.from_pretrained(model_name, trust_remote_code=trust_remote_code)
with init_empty_weights():
# remote code could specify a specific `AutoModel` class in the `auto_map`
constructor = AutoModel
@ -181,7 +181,7 @@ def estimate_command_parser(subparsers=None):
if subparsers is not None:
parser = subparsers.add_parser("estimate-memory")
else:
parser = CustomArgumentParser(description="Model size estimator for fitting a model onto CUDA memory.")
parser = argparse.ArgumentParser(description="Model size estimator for fitting a model onto CUDA memory.")
parser.add_argument("model_name", type=str, help="The model name on the Hugging Face Hub.")
parser.add_argument(
@ -204,7 +204,6 @@ def estimate_command_parser(subparsers=None):
help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. This flag
should only be used for repositories you trust and in which you have read the code, as it will execute
code present on the Hub on your local machine.""",
default=False,
)
if subparsers is not None:
@ -212,41 +211,6 @@ def estimate_command_parser(subparsers=None):
return parser
def estimate_training_usage(bytes: int, mixed_precision: str, msamp_config: str = None) -> dict:
"""
Given an amount of `bytes` and `mixed_precision`, calculates how much training memory is needed for a batch size of
1.
Args:
bytes (`int`):
The size of the model being trained.
mixed_precision (`str`):
The mixed precision that would be ran.
msamp_config (`str`):
The msamp config to estimate the training memory for if `mixed_precision` is set to `"fp8"`.
"""
memory_sizes = {"model": -1, "optimizer": -1, "gradients": -1, "step": -1}
fp32_size = bytes
fp16_size = bytes // 2
if mixed_precision == "float32":
memory_sizes["model"] = fp32_size
memory_sizes["gradients"] = fp32_size
memory_sizes["optimizer"] = fp32_size * 2
memory_sizes["step"] = fp32_size * 4
elif mixed_precision in ("float16", "bfloat16") or (mixed_precision == "fp8" and msamp_config is None):
# With native `TransformersEngine`, there is no memory savings with FP8
# With mixed precision training, the model has weights stored
# in FP16 and FP32
memory_sizes["model"] = fp32_size
# 1.5 from weight gradient + computation (GEMM)
memory_sizes["gradients"] = fp32_size + fp16_size
# 2x from optimizer states
memory_sizes["optimizer"] = fp32_size * 2 # Optimizer states
memory_sizes["step"] = memory_sizes["optimizer"]
return memory_sizes
def gather_data(args):
"Creates an empty model and gathers the data for the sizes"
try:
@ -268,7 +232,6 @@ def gather_data(args):
for dtype in args.dtypes:
dtype_total_size = total_size
dtype_largest_layer = largest_layer[0]
dtype_training_size = estimate_training_usage(dtype_total_size, dtype)
if dtype == "float16":
dtype_total_size /= 2
dtype_largest_layer /= 2
@ -278,6 +241,7 @@ def gather_data(args):
elif dtype == "int4":
dtype_total_size /= 8
dtype_largest_layer /= 8
dtype_training_size = dtype_total_size * 4
data.append([dtype, dtype_largest_layer, dtype_total_size, dtype_training_size])
return data
@ -288,9 +252,6 @@ def estimate_command(args):
for i, item in enumerate(row):
if isinstance(item, (int, float)):
row[i] = convert_bytes(item)
elif isinstance(item, dict):
training_usage = max(item.values())
row[i] = convert_bytes(training_usage) if training_usage != -1 else "N/A"
headers = ["dtype", "Largest Layer", "Total Size", "Training using Adam"]

View File

@ -28,7 +28,6 @@ import torch
from accelerate.commands.config import default_config_file, load_config_from_file
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.commands.config.config_utils import DYNAMO_BACKENDS
from accelerate.commands.utils import CustomArgumentParser
from accelerate.state import get_int_from_env
from accelerate.utils import (
ComputeEnvironment,
@ -36,15 +35,13 @@ from accelerate.utils import (
PrepareForLaunch,
_filter_args,
check_cuda_p2p_ib_support,
convert_dict_to_env_variables,
is_bf16_available,
is_deepspeed_available,
is_mlu_available,
is_npu_available,
is_rich_available,
is_sagemaker_available,
is_torch_version,
is_torch_xla_available,
is_tpu_available,
is_xpu_available,
patch_environment,
prepare_deepspeed_cmd_env,
@ -66,93 +63,80 @@ if is_rich_available():
logger = logging.getLogger(__name__)
options_to_group = {
"multi_gpu": "Distributed GPUs",
"tpu": "TPU",
"use_deepspeed": "DeepSpeed Arguments",
"use_fsdp": "FSDP Arguments",
"use_megatron_lm": "Megatron-LM Arguments",
"--multi-gpu": "Distributed GPUs",
"--tpu": "TPU",
"--use_deepspeed": "DeepSpeed Arguments",
"--use_fsdp": "FSDP Arguments",
"--use_megatron_lm": "Megatron-LM Arguments",
}
def clean_option(option):
"Finds all cases of - after the first two characters and changes them to _"
if option.startswith("--"):
return option[2:].replace("-", "_")
return option[:3] + option[3:].replace("-", "_")
class CustomHelpFormatter(argparse.HelpFormatter):
class _CustomHelpAction(argparse._HelpAction):
"""
This is a custom help formatter that will hide all arguments that are not used in the command line when the help is
This is a custom help action that will hide all arguments that are not used in the command line when the help is
called. This is useful for the case where the user is using a specific platform and only wants to see the arguments
for that platform.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.titles = [
def __call__(self, parser, namespace, values, option_string=None):
if "accelerate" in sys.argv[0] and "launch" in sys.argv[1:]:
args = sys.argv[2:]
else:
args = sys.argv[1:]
opts = parser._actions
titles = [
"Hardware Selection Arguments",
"Resource Selection Arguments",
"Training Paradigm Arguments",
"positional arguments",
"optional arguments",
]
def add_argument(self, action: argparse.Action):
if "accelerate" in sys.argv[0] and "launch" in sys.argv[1:]:
args = sys.argv[2:]
else:
args = sys.argv[1:]
if len(args) > 1:
args = list(map(clean_option, args))
used_platforms = [arg for arg in args if arg in options_to_group.keys()]
args = list(map(clean_option, args))
used_titles = [options_to_group[o] for o in used_platforms]
if action.container.title not in self.titles + used_titles:
action.help = argparse.SUPPRESS
elif action.container.title == "Hardware Selection Arguments":
if set(action.option_strings).isdisjoint(set(args)):
action.help = argparse.SUPPRESS
else:
action.help = action.help + " (currently selected)"
elif action.container.title == "Training Paradigm Arguments":
if set(action.option_strings).isdisjoint(set(args)):
action.help = argparse.SUPPRESS
else:
action.help = action.help + " (currently selected)"
for i, arg in enumerate(opts):
# If the argument's container is outside of the used titles, hide it
if arg.container.title not in titles + used_titles:
setattr(opts[i], "help", argparse.SUPPRESS)
# If the argument is hardware selection, but not being passed, hide it
elif arg.container.title == "Hardware Selection Arguments":
if set(arg.option_strings).isdisjoint(set(args)):
setattr(opts[i], "help", argparse.SUPPRESS)
else:
setattr(opts[i], "help", arg.help + " (currently selected)")
# If the argument is a training paradigm, but not being passed, hide it
elif arg.container.title == "Training Paradigm Arguments":
if set(arg.option_strings).isdisjoint(set(used_platforms)):
setattr(opts[i], "help", argparse.SUPPRESS)
else:
setattr(opts[i], "help", arg.help + " (currently selected)")
for i, group in enumerate(list(parser._action_groups)):
# If all arguments in the group are hidden, hide the group
if all([arg.help == argparse.SUPPRESS for arg in group._group_actions]):
parser._action_groups.remove(group)
action.option_strings = [s for s in action.option_strings if "-" not in s[2:]]
super().add_argument(action)
def end_section(self):
if len(self._current_section.items) < 2:
self._current_section.items = []
self._current_section.heading = ""
super().end_section()
super().__call__(parser, namespace, values, option_string)
def launch_command_parser(subparsers=None):
description = "Launch a python script in a distributed scenario. Arguments can be passed in with either hyphens (`--num-processes=2`) or underscores (`--num_processes=2`)"
if subparsers is not None:
parser = subparsers.add_parser(
"launch", description=description, add_help=False, allow_abbrev=False, formatter_class=CustomHelpFormatter
)
parser = subparsers.add_parser("launch", add_help=False, allow_abbrev=False)
else:
parser = CustomArgumentParser(
"Accelerate launch command",
description=description,
add_help=False,
allow_abbrev=False,
formatter_class=CustomHelpFormatter,
)
parser = argparse.ArgumentParser("Accelerate launch command", add_help=False, allow_abbrev=False)
parser.register("action", "help", _CustomHelpAction)
parser.add_argument("-h", "--help", action="help", help="Show this help message and exit.")
parser.add_argument(
"--config_file",
default=None,
help="The config file to use for the default values in the launching script.",
"--config_file", default=None, help="The config file to use for the default values in the launching script."
)
parser.add_argument(
"--quiet",
@ -207,12 +191,6 @@ def launch_command_parser(subparsers=None):
default=None,
help="The number of CPU threads per process. Can be tuned for optimal performance.",
)
resource_args.add_argument(
"--enable_cpu_affinity",
default=False,
action="store_true",
help="Whether or not CPU affinity and balancing should be enabled. Currently only supported on NVIDIA hardware.",
)
# Dynamo arguments
resource_args.add_argument(
@ -303,15 +281,6 @@ def launch_command_parser(subparsers=None):
type=str,
help="Tee std streams into a log file and also to console.",
)
distributed_args.add_argument(
"--log_dir",
type=str,
default=None,
help=(
"Base directory to use for log files when using torchrun/torch.distributed.run as launcher. "
"Use with --tee to redirect std streams info log files."
),
)
distributed_args.add_argument(
"--role",
type=str,
@ -496,13 +465,6 @@ def launch_command_parser(subparsers=None):
type=str,
help="DeepSpeed multi-node launcher to use. If unspecified, will default to `pdsh`.",
)
deepspeed_args.add_argument(
"--deepspeed_moe_layer_cls_names",
default=None,
type=str,
help="comma-separated list of transformer MoE layer class names (case-sensitive) to wrap ,e.g, `MixtralSparseMoeBlock`, `Qwen2MoeSparseMoeBlock`, `JetMoEAttention,JetMoEBlock` ..."
" (useful only when `use_deepspeed` flag is passed).",
)
# fsdp arguments
fsdp_args = parser.add_argument_group("FSDP Arguments", "Arguments related to Fully Shared Data Parallelism.")
@ -663,22 +625,6 @@ def launch_command_parser(subparsers=None):
),
)
# MPI arguments
mpirun_args = parser.add_argument_group("MPI Arguments", "Arguments related to mpirun for Multi-CPU")
mpirun_args.add_argument(
"--mpirun_hostfile",
type=str,
default=None,
help="Location for a hostfile for using Accelerate to launch a multi-CPU training job with mpirun. This will "
"get passed to the MPI --hostfile or -f parameter, depending on which MPI program is installed.",
)
mpirun_args.add_argument(
"--mpirun_ccl",
type=int,
default=1,
help="The number of oneCCL worker threads when using Accelerate to launch multi-CPU training with mpirun.",
)
# Other arguments of the training scripts
parser.add_argument("training_script_args", nargs=argparse.REMAINDER, help="Arguments of the training script.")
@ -704,7 +650,7 @@ def multi_gpu_launcher(args):
current_env = prepare_multi_gpu_env(args)
if not check_cuda_p2p_ib_support():
message = "Using RTX 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled."
message = "Using RTX 3090 or 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled."
warn = False
if "NCCL_P2P_DISABLE" not in current_env:
current_env["NCCL_P2P_DISABLE"] = "1"
@ -721,7 +667,6 @@ def multi_gpu_launcher(args):
distrib_run.get_args_parser(),
["--training_script", args.training_script, "--training_script_args", args.training_script_args],
)
with patch_environment(**current_env):
try:
distrib_run.run(args)
@ -739,12 +684,10 @@ def deepspeed_launcher(args):
if not is_deepspeed_available():
raise ImportError("DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source.")
else:
from deepspeed.launcher.runner import DEEPSPEED_ENVIRONMENT_NAME
cmd, current_env = prepare_deepspeed_cmd_env(args)
if not check_cuda_p2p_ib_support():
message = "Using RTX 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled."
message = "Using RTX 3090 or 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled."
warn = False
if "NCCL_P2P_DISABLE" not in current_env:
current_env["NCCL_P2P_DISABLE"] = "1"
@ -756,10 +699,11 @@ def deepspeed_launcher(args):
logger.warning(message)
if args.num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]:
with open(DEEPSPEED_ENVIRONMENT_NAME, "a") as f:
valid_env_items = convert_dict_to_env_variables(current_env)
if len(valid_env_items) > 1:
f.writelines(valid_env_items)
with open(".deepspeed_env", "a") as f:
for key, value in current_env.items():
if ";" in value or " " in value:
continue
f.write(f"{key}={value}\n")
process = subprocess.Popen(cmd, env=current_env)
process.wait()
@ -924,15 +868,10 @@ def _validate_launch_command(args):
args.multi_gpu = (
True
if defaults.distributed_type
in (
DistributedType.MULTI_GPU,
DistributedType.MULTI_NPU,
DistributedType.MULTI_MLU,
DistributedType.MULTI_XPU,
)
in (DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU)
else False
)
args.tpu = defaults.distributed_type == DistributedType.XLA
args.tpu = defaults.distributed_type == DistributedType.TPU
args.use_fsdp = defaults.distributed_type == DistributedType.FSDP
args.use_megatron_lm = defaults.distributed_type == DistributedType.MEGATRON_LM
args.tpu_use_cluster = defaults.tpu_use_cluster if args.tpu else False
@ -967,8 +906,6 @@ def _validate_launch_command(args):
setattr(args, k, defaults.dynamo_config[k])
for k in defaults.ipex_config:
setattr(args, k, defaults.ipex_config[k])
for k in defaults.mpirun_config:
setattr(args, k, defaults.mpirun_config[k])
continue
# Those args are handled separately
@ -987,16 +924,14 @@ def _validate_launch_command(args):
args.mixed_precision = defaults.mixed_precision
mp_from_config_flag = True
else:
native_amp = False
err = "{mode} mixed precision requires {requirement}"
if args.use_cpu or (args.use_xpu and torch.xpu.is_available()):
native_amp = is_torch_version(">=", "1.10")
else:
native_amp = is_bf16_available(True)
if (
args.mixed_precision == "bf16"
and not native_amp
and not (args.tpu and is_torch_xla_available(check_is_tpu=True))
):
raise ValueError("bf16 mixed precision requires PyTorch >= 1.10 and a supported device.")
if args.mixed_precision == "bf16" and not native_amp and not (args.tpu and is_tpu_available()):
raise ValueError(err.format(mode="bf16", requirement="PyTorch >= 1.10 and a supported device."))
# Silently set the default here
if args.dynamo_backend is None:
@ -1005,8 +940,6 @@ def _validate_launch_command(args):
if args.num_processes is None:
if args.use_xpu and is_xpu_available():
args.num_processes = torch.xpu.device_count()
elif is_mlu_available():
args.num_processes = torch.mlu.device_count()
elif is_npu_available():
args.num_processes = torch.npu.device_count()
else:
@ -1016,7 +949,6 @@ def _validate_launch_command(args):
args.debug = False
if not args.multi_gpu and (
(args.use_xpu and is_xpu_available() and torch.xpu.device_count() > 1)
or (is_mlu_available() and torch.mlu.device_count() > 1)
or (is_npu_available() and torch.npu.device_count() > 1)
or (torch.cuda.device_count() > 1)
):
@ -1043,8 +975,8 @@ def _validate_launch_command(args):
defaults is not None and defaults.compute_environment != ComputeEnvironment.AMAZON_SAGEMAKER
)
if is_aws_env_disabled and args.num_cpu_threads_per_process is None:
args.num_cpu_threads_per_process = get_int_from_env(["OMP_NUM_THREADS"], 1)
if args.use_cpu and args.num_processes >= 1 and get_int_from_env(["OMP_NUM_THREADS"], 0) == 0:
args.num_cpu_threads_per_process = 1
if args.use_cpu and args.num_processes >= 1:
local_size = get_int_from_env(
["MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"], 1
)

View File

@ -1,14 +1 @@
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .selection_menu import BulletMenu

View File

@ -30,7 +30,7 @@ def mark(key: str):
def decorator(func):
handle = getattr(func, "handle_key", [])
handle += [key]
func.handle_key = handle
setattr(func, "handle_key", handle)
return func
return decorator
@ -44,7 +44,7 @@ def mark_multiple(*keys: List[str]):
def decorator(func):
handle = getattr(func, "handle_key", [])
handle += keys
func.handle_key = handle
setattr(func, "handle_key", handle)
return func
return decorator
@ -58,8 +58,8 @@ class KeyHandler(type):
def __new__(cls, name, bases, attrs):
new_cls = super().__new__(cls, name, bases, attrs)
if not hasattr(new_cls, "key_handler"):
new_cls.key_handler = {}
new_cls.handle_input = KeyHandler.handle_input
setattr(new_cls, "key_handler", {})
setattr(new_cls, "handle_input", KeyHandler.handle_input)
for value in attrs.values():
handled_keys = getattr(value, "handle_key", [])

View File

@ -16,6 +16,7 @@
Utilities relating to parsing raw characters from the keyboard, based on https://github.com/bchao1/bullet
"""
import os
import string
import sys

View File

@ -15,7 +15,6 @@
"""
Main driver for the selection menu, based on https://github.com/bchao1/bullet
"""
import builtins
import sys

View File

@ -15,8 +15,9 @@
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async, path_in_accelerate_package
from accelerate.test_utils import execute_subprocess_async
def test_command_parser(subparsers=None):
@ -42,15 +43,15 @@ def test_command_parser(subparsers=None):
def test_command(args):
script_name = path_in_accelerate_package("test_utils", "scripts", "test_script.py")
script_name = os.path.sep.join(__file__.split(os.path.sep)[:-2] + ["test_utils", "scripts", "test_script.py"])
if args.config_file is None:
test_args = [script_name]
test_args = script_name
else:
test_args = f"--config_file={args.config_file} {script_name}".split()
test_args = f"--config_file={args.config_file} {script_name}"
cmd = ["accelerate-launch"] + test_args
result = execute_subprocess_async(cmd)
cmd = ["accelerate-launch"] + test_args.split()
result = execute_subprocess_async(cmd, env=os.environ.copy())
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!")

Some files were not shown because too many files have changed in this diff Show More