mirror of
https://github.com/huggingface/accelerate.git
synced 2025-11-13 21:59:16 +08:00
Compare commits
114 Commits
fix-warnin
...
v0.29.0
| Author | SHA1 | Date | |
|---|---|---|---|
| ec88c8f54a | |||
| 7531e8c13e | |||
| 8e439de744 | |||
| d96a5aa730 | |||
| d7bcd85d4d | |||
| d927b8f3a2 | |||
| f579d9550d | |||
| bbecad4e8e | |||
| b82999a84b | |||
| 11568e562c | |||
| d9a1b8f975 | |||
| b634388ef1 | |||
| 4d415f2129 | |||
| 829171a9a4 | |||
| 5a232de2fa | |||
| 5f8048cd04 | |||
| 4378b560e8 | |||
| 8644e23b71 | |||
| b2fc3a3b0e | |||
| 290446d446 | |||
| 85a75d4c3d | |||
| f94f0ff912 | |||
| 1b2e634970 | |||
| dd62fc90ce | |||
| 10b418495e | |||
| c2f193a25c | |||
| 1812152392 | |||
| b8b353b7a7 | |||
| f2778d6502 | |||
| 2ad42e77c3 | |||
| e8aaee5d9b | |||
| 910c1b6a8f | |||
| 92d3240bb5 | |||
| 02a8a9a3a7 | |||
| ee163b66fb | |||
| 354db5b5f7 | |||
| 92b1ad01f3 | |||
| 60bfdaa934 | |||
| 16eb6d76bf | |||
| c8acfa700b | |||
| e70e3c87de | |||
| bc8dfe3caf | |||
| e3d324240f | |||
| 10882eeddd | |||
| 145a98fc12 | |||
| 64ae9ea3fe | |||
| 8aa72b9748 | |||
| 97d115a266 | |||
| 63cfd9efdc | |||
| 6cf8221a09 | |||
| 7a2feecad4 | |||
| ee004674b9 | |||
| 65544d8fe9 | |||
| 5fce525f90 | |||
| ca37b0e471 | |||
| 82a1258ffc | |||
| 21b225e8d5 | |||
| 25ee6ab3b7 | |||
| 2d3e822d11 | |||
| 811dc1e464 | |||
| c59c6c9bff | |||
| 422bd23f3f | |||
| c0b16b684f | |||
| 78b15561a1 | |||
| 8f9673f509 | |||
| 9c071103f0 | |||
| 1127e670ca | |||
| fa83efc33e | |||
| 4aa71049c3 | |||
| c0b441f6be | |||
| 34fdddd7df | |||
| 3fb9a3a231 | |||
| 065d88729b | |||
| 67e698cf4d | |||
| 46ac6c9bba | |||
| 9b24f56e42 | |||
| f20445d4ac | |||
| 97d2168e59 | |||
| 79016eb163 | |||
| 164193fa7e | |||
| 482a9f9fa4 | |||
| d7de8d1794 | |||
| b443be70fb | |||
| 613ad7089a | |||
| 13e79ccfab | |||
| aba3b8c72f | |||
| 70cdf5fe52 | |||
| b38590a28a | |||
| 5318bc7733 | |||
| ef68b4655c | |||
| ecebfa19c9 | |||
| 5a39359fb2 | |||
| b3d2111708 | |||
| f75c6245ba | |||
| 9c1d5bac15 | |||
| b0b867da85 | |||
| 433d693b70 | |||
| c3aec59b12 | |||
| 9467a62744 | |||
| 86228e321d | |||
| 06b138d845 | |||
| 0867c09318 | |||
| 0e1ee4b92d | |||
| d8a64cb79d | |||
| b703efdcc3 | |||
| 68f54720dc | |||
| 46f1391b79 | |||
| cd7ff5e137 | |||
| f4b411f84b | |||
| 7ba64e632c | |||
| 8b770a7dab | |||
| 3d8b998fbb | |||
| 03365a3d17 | |||
| 7aafa25673 |
@ -37,7 +37,7 @@ jobs:
|
||||
with:
|
||||
file: docker/accelerate-cpu/Dockerfile
|
||||
push: true
|
||||
tags: huggingface/accelerate-cpu:${{needs.get-version.outputs.version}}
|
||||
tags: huggingface/accelerate:cpu-release-${{ needs.get-version.outputs.version }}
|
||||
|
||||
version-cuda:
|
||||
name: "Latest Accelerate GPU [version]"
|
||||
@ -57,4 +57,4 @@ jobs:
|
||||
with:
|
||||
file: docker/accelerate-gpu/Dockerfile
|
||||
push: true
|
||||
tags: huggingface/accelerate-gpu:${{needs.get-version.outputs.version}}
|
||||
tags: huggingface/accelerate:gpu-release-${{needs.get-version.outputs.version}}
|
||||
|
||||
17
.github/workflows/build_docker_images.yml
vendored
17
.github/workflows/build_docker_images.yml
vendored
@ -22,12 +22,18 @@ jobs:
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
- name: Get current date
|
||||
id: date
|
||||
run: |
|
||||
echo "date=$(date '+%Y-%m-%d')" >> $GITHUB_ENV
|
||||
- name: Build and Push CPU
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
file: docker/accelerate-cpu/Dockerfile
|
||||
push: true
|
||||
tags: huggingface/accelerate-cpu
|
||||
tags: |
|
||||
huggingface/accelerate:cpu-nightly
|
||||
huggingface/accelerate:cpu-nightly-${{ env.date }}
|
||||
|
||||
latest-cuda:
|
||||
name: "Latest Accelerate GPU [dev]"
|
||||
@ -40,10 +46,15 @@ jobs:
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Get current date
|
||||
id: date
|
||||
run: |
|
||||
echo "date=$(date '+%Y-%m-%d')" >> $GITHUB_ENV
|
||||
- name: Build and Push GPU
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
file: docker/accelerate-gpu/Dockerfile
|
||||
push: true
|
||||
tags: huggingface/accelerate-gpu
|
||||
tags: |
|
||||
huggingface/accelerate:gpu-nightly
|
||||
huggingface/accelerate:gpu-nightly-${{ env.date }}
|
||||
4
.github/workflows/run_merge_tests.yml
vendored
4
.github/workflows/run_merge_tests.yml
vendored
@ -14,7 +14,7 @@ jobs:
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: "0"
|
||||
container:
|
||||
image: huggingface/accelerate-gpu:latest
|
||||
image: huggingface/accelerate:gpu-nightly
|
||||
options: --gpus all --shm-size "16gb"
|
||||
defaults:
|
||||
run:
|
||||
@ -61,7 +61,7 @@ jobs:
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: 0,1
|
||||
container:
|
||||
image: huggingface/accelerate-gpu:latest
|
||||
image: huggingface/accelerate:gpu-nightly
|
||||
options: --gpus all --shm-size "16gb"
|
||||
defaults:
|
||||
run:
|
||||
|
||||
@ -23,7 +23,7 @@ defaults:
|
||||
jobs:
|
||||
run-trainer-tests:
|
||||
container:
|
||||
image: huggingface/accelerate-gpu:latest
|
||||
image: huggingface/accelerate:gpu-nightly
|
||||
options: --gpus all --shm-size "16gb"
|
||||
runs-on: [self-hosted, multi-gpu, nvidia-gpu, t4, ci]
|
||||
strategy:
|
||||
@ -88,7 +88,7 @@ jobs:
|
||||
|
||||
run-skorch-tests:
|
||||
container:
|
||||
image: huggingface/accelerate-gpu:latest
|
||||
image: huggingface/accelerate:gpu-nightly
|
||||
options: --gpus all --shm-size "16gb"
|
||||
runs-on: [self-hosted, multi-gpu, nvidia-gpu, t4, ci]
|
||||
strategy:
|
||||
|
||||
11
.github/workflows/test.yml
vendored
11
.github/workflows/test.yml
vendored
@ -44,22 +44,13 @@ jobs:
|
||||
with:
|
||||
python-version: 3.8
|
||||
|
||||
- name: Activate python cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
${{ env.pythonLocation }}
|
||||
${{ env.HF_HOME }}
|
||||
key: ${{ env.pythonLocation }}-${{ matrix.pytorch-version }}-${{ matrix.test-kind }}-${{ hashFiles('setup.py') }}
|
||||
|
||||
- name: Install the library
|
||||
run: |
|
||||
pip install --upgrade pip
|
||||
if [[ ${{ matrix.test-kind }} = test_prod ]]; then pip install -e .[test_prod]; fi
|
||||
if [[ ${{ matrix.test-kind }} != test_prod ]]; then pip install -e .[testing,test_trackers]; fi
|
||||
if [[ ${{ matrix.test-kind }} = test_rest ]]; then pip uninstall comet_ml -y; fi
|
||||
if [[ ${{ matrix.test-kind }} = minimum ]]; then pip install torch==1.10.0; fi
|
||||
pip install pytest-reportlog tabulate
|
||||
pip install pytest-reportlog tabulate setuptools
|
||||
|
||||
- name: Run Tests
|
||||
env:
|
||||
|
||||
13
.pre-commit-config.yaml
Normal file
13
.pre-commit-config.yaml
Normal file
@ -0,0 +1,13 @@
|
||||
repos:
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.2.1
|
||||
hooks:
|
||||
- id: ruff
|
||||
args:
|
||||
- --fix
|
||||
- id: ruff-format
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.5.0
|
||||
hooks:
|
||||
- id: check-merge-conflict
|
||||
- id: check-yaml
|
||||
@ -152,7 +152,7 @@ Follow these steps to start contributing:
|
||||
$ make test
|
||||
```
|
||||
|
||||
`accelerate` relies on `black` and `ruff` to format its source code
|
||||
`accelerate` relies on `ruff` to format its source code
|
||||
consistently. After you make changes, apply automatic style corrections and code verifications
|
||||
that can't be automated in one go with:
|
||||
|
||||
@ -172,6 +172,14 @@ Follow these steps to start contributing:
|
||||
$ make quality
|
||||
```
|
||||
|
||||
You can also set up [`pre-commit`](https://pre-commit.com/) to run these checks
|
||||
automatically as Git commit hooks.
|
||||
|
||||
```bash
|
||||
$ pip install pre-commit
|
||||
$ pre-commit install
|
||||
```
|
||||
|
||||
Once you're happy with your changes, add changed files using `git add` and
|
||||
make a commit with `git commit` to record your changes locally:
|
||||
|
||||
@ -235,4 +243,4 @@ $ python -m pytest -sv ./tests
|
||||
In fact, that's how `make test` is implemented (sans the `pip install` line)!
|
||||
|
||||
You can specify a smaller set of tests in order to test only the feature
|
||||
you're working on.
|
||||
you're working on.
|
||||
|
||||
22
Makefile
22
Makefile
@ -1,6 +1,6 @@
|
||||
.PHONY: quality style test docs utils
|
||||
|
||||
check_dirs := tests src examples benchmarks utils
|
||||
check_dirs := .
|
||||
|
||||
# Check that source code meets quality standards
|
||||
|
||||
@ -12,20 +12,17 @@ extra_quality_checks:
|
||||
|
||||
# this target runs checks on all files
|
||||
quality:
|
||||
black --required-version 23 --check $(check_dirs)
|
||||
ruff $(check_dirs)
|
||||
ruff check $(check_dirs)
|
||||
ruff format --check $(check_dirs)
|
||||
doc-builder style src/accelerate docs/source --max_len 119 --check_only
|
||||
|
||||
# Format source code automatically and check is there are any problems left that need manual fixing
|
||||
style:
|
||||
black --required-version 23 $(check_dirs)
|
||||
ruff $(check_dirs) --fix
|
||||
ruff check $(check_dirs) --fix
|
||||
ruff format $(check_dirs)
|
||||
doc-builder style src/accelerate docs/source --max_len 119
|
||||
|
||||
# Run tests for the library
|
||||
test:
|
||||
python -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_all.log",)
|
||||
|
||||
test_big_modeling:
|
||||
python -m pytest -s -v ./tests/test_big_modeling.py ./tests/test_modeling_utils.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_big_modeling.log",)
|
||||
|
||||
@ -42,6 +39,15 @@ test_deepspeed:
|
||||
test_fsdp:
|
||||
python -m pytest -s -v ./tests/fsdp $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_fsdp.log",)
|
||||
|
||||
# Since the new version of pytest will *change* how things are collected, we need `deepspeed` to
|
||||
# run after test_core and test_cli
|
||||
test:
|
||||
$(MAKE) test_core
|
||||
$(MAKE) test_cli
|
||||
$(MAKE) test_big_modeling
|
||||
$(MAKE) test_deepspeed
|
||||
$(MAKE) test_fsdp
|
||||
|
||||
test_examples:
|
||||
python -m pytest -s -v ./tests/test_examples.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_examples.log",)
|
||||
|
||||
|
||||
10
README.md
10
README.md
@ -171,7 +171,15 @@ To learn more, check the CLI documentation available [here](https://huggingface.
|
||||
|
||||
🤗 Here is another way to launch multi-CPU run using MPI. You can learn how to install Open MPI on [this page](https://www.open-mpi.org/faq/?category=building#easy-build). You can use Intel MPI or MVAPICH as well.
|
||||
Once you have MPI setup on your cluster, just run:
|
||||
|
||||
```bash
|
||||
accelerate config
|
||||
```
|
||||
Answer the questions that are asked, selecting to run using multi-CPU, and answer "yes" when asked if you want accelerate to launch mpirun.
|
||||
Then, use `accelerate launch` with your script like:
|
||||
```bash
|
||||
accelerate launch examples/nlp_example.py
|
||||
```
|
||||
Alternatively, you can use mpirun directly, without using the CLI like:
|
||||
```bash
|
||||
mpirun -np 2 python examples/nlp_example.py
|
||||
```
|
||||
|
||||
@ -1,3 +1,16 @@
|
||||
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import gc
|
||||
import threading
|
||||
import time
|
||||
|
||||
72
docker/README.md
Normal file
72
docker/README.md
Normal file
@ -0,0 +1,72 @@
|
||||
<!---
|
||||
Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
|
||||
# Official Hugging Face Accelerate Docker Images
|
||||
|
||||
Accelerate publishes a variety of docker versions as part of our CI that users can also use. These are stable images that Accelerate can run off of which comes with a variety of different setup configurations, all of which are officially hosted on [Docker Hub](https://hub.docker.com/r/huggingface/accelerate).
|
||||
|
||||
A breakdown of each are given below
|
||||
|
||||
## Naming Conventions
|
||||
|
||||
Accelerate docker images follow a tagging convention of:
|
||||
|
||||
```bash
|
||||
huggingface/accelerate:{accelerator}-{nightly,release}
|
||||
```
|
||||
|
||||
`accelerator` in this instance is one of many applical pre-configured backend supports:
|
||||
* `gpu`: Comes compiled off of the `nvidia/cuda` image and includes everything such as `deepspeed`, `bitsandbytes`, etc.
|
||||
* `cpu`: Comes compiled off of `python:3.8-slim` and is designed for non-CUDA based workloads.
|
||||
* More to come soon
|
||||
|
||||
## Nightlies vs Releases
|
||||
|
||||
Each release a new build is pushed with a version number included in the name. For a GPU-supported image of version 0.28.0 for instance, it would look like the following:
|
||||
|
||||
```bash
|
||||
huggingface/accelerate:gpu-release-0.28.0
|
||||
```
|
||||
|
||||
Nightlies contain two different image tags. There is a general `nightly` tag which is built each night, and a `nightly-YYYY-MM-DD` which corresponds to a build from a particular date.
|
||||
|
||||
For instance, here is an example nightly CPU image from 3/14/2024
|
||||
|
||||
```bash
|
||||
huggingface/accelerate:cpu-nightly-2024-03-14
|
||||
```
|
||||
|
||||
## Running the images
|
||||
|
||||
Each image comes compiled with `conda` and an `accelerate` environment contains all of the installed dependencies.
|
||||
|
||||
To pull down the latest nightly run:
|
||||
|
||||
```bash
|
||||
docker pull huggingface/accelerate:gpu-nightly
|
||||
```
|
||||
|
||||
To then run it in interactive mode with GPU-memory available, run:
|
||||
|
||||
```bash
|
||||
docker container run --gpus all -it huggingface/accelerate:gpu-nightly
|
||||
```
|
||||
|
||||
## DEPRECATED IMAGES
|
||||
|
||||
CPU and GPU docker images were hosted at `huggingface/accelerate-gpu` and `huggingface/accelerate-cpu`. These builds are now outdated and will not receive updates.
|
||||
|
||||
The builds at the corresponding `huggingface/accelerate:{gpu,cpu}` contain the same `Dockerfile`, so it's as simple as changing the docker image to the desired ones from above. We will not be deleting these images for posterity, but they will not be receiving updates going forward.
|
||||
@ -4,7 +4,7 @@
|
||||
# Use base conda image to reduce time
|
||||
FROM continuumio/miniconda3:latest AS compile-image
|
||||
# Specify py version
|
||||
ENV PYTHON_VERSION=3.8
|
||||
ENV PYTHON_VERSION=3.9
|
||||
# Install apt libs
|
||||
RUN apt-get update && \
|
||||
apt-get install -y curl git wget && \
|
||||
|
||||
@ -10,50 +10,63 @@
|
||||
- local: basic_tutorials/overview
|
||||
title: Overview
|
||||
- local: basic_tutorials/migration
|
||||
title: Migrating to 🤗 Accelerate
|
||||
title: Add Accelerate to your code
|
||||
- local: basic_tutorials/execution
|
||||
title: Execution process
|
||||
- local: basic_tutorials/tpu
|
||||
title: TPU training
|
||||
- local: basic_tutorials/launch
|
||||
title: Launching distributed code
|
||||
- local: basic_tutorials/notebook
|
||||
title: Launching distributed training from Jupyter Notebooks
|
||||
- local: basic_tutorials/troubleshooting
|
||||
title: Troubleshooting guide
|
||||
title: Tutorials
|
||||
- sections:
|
||||
- local: usage_guides/explore
|
||||
title: Start Here!
|
||||
- local: usage_guides/training_zoo
|
||||
title: Example Zoo
|
||||
- local: usage_guides/big_modeling
|
||||
title: How to perform inference on large models with small resources
|
||||
- local: usage_guides/model_size_estimator
|
||||
title: Knowing how big of a model you can fit into memory
|
||||
- local: usage_guides/quantization
|
||||
title: How to quantize model
|
||||
- local: usage_guides/distributed_inference
|
||||
title: How to perform distributed inference with normal resources
|
||||
- local: usage_guides/gradient_accumulation
|
||||
title: Performing gradient accumulation
|
||||
- local: usage_guides/local_sgd
|
||||
title: Accelerating training with local SGD
|
||||
- local: usage_guides/checkpoint
|
||||
title: Saving and loading training states
|
||||
- local: usage_guides/tracking
|
||||
title: Using experiment trackers
|
||||
- local: usage_guides/mps
|
||||
title: How to use Apple Silicon M1 GPUs
|
||||
- local: usage_guides/low_precision_training
|
||||
title: How to train in low precision (FP8)
|
||||
- local: usage_guides/deepspeed
|
||||
title: How to use DeepSpeed
|
||||
- local: usage_guides/fsdp
|
||||
title: How to use Fully Sharded Data Parallelism
|
||||
- local: usage_guides/megatron_lm
|
||||
title: How to use Megatron-LM
|
||||
- local: usage_guides/sagemaker
|
||||
title: How to use 🤗 Accelerate with SageMaker
|
||||
- local: usage_guides/ipex
|
||||
title: How to use 🤗 Accelerate with Intel® Extension for PyTorch for cpu
|
||||
title: How-To Guides
|
||||
- isExpanded: true
|
||||
sections:
|
||||
- local: usage_guides/explore
|
||||
title: Start Here!
|
||||
- local: usage_guides/model_size_estimator
|
||||
title: Model memory estimator
|
||||
- local: usage_guides/quantization
|
||||
title: Model quantization
|
||||
- local: usage_guides/tracking
|
||||
title: Experiment trackers
|
||||
- local: usage_guides/checkpoint
|
||||
title: Save and load training states
|
||||
- local: basic_tutorials/troubleshooting
|
||||
title: Troubleshoot
|
||||
- local: usage_guides/training_zoo
|
||||
title: Example Zoo
|
||||
title: Accelerate
|
||||
- isExpanded: true
|
||||
sections:
|
||||
- local: usage_guides/gradient_accumulation
|
||||
title: Gradient accumulation
|
||||
- local: usage_guides/local_sgd
|
||||
title: Local SGD
|
||||
- local: usage_guides/low_precision_training
|
||||
title: Low precision (FP8) training
|
||||
- local: usage_guides/deepspeed
|
||||
title: DeepSpeed
|
||||
- local: usage_guides/fsdp
|
||||
title: Fully Sharded Data Parallelism
|
||||
- local: usage_guides/megatron_lm
|
||||
title: Megatron-LM
|
||||
- local: usage_guides/sagemaker
|
||||
title: Amazon SageMaker
|
||||
- local: usage_guides/mps
|
||||
title: Apple M1 GPUs
|
||||
- local: usage_guides/ipex
|
||||
title: IPEX training with CPU
|
||||
title: Training
|
||||
- isExpanded: true
|
||||
sections:
|
||||
- local: usage_guides/big_modeling
|
||||
title: Big Model Inference
|
||||
- local: usage_guides/distributed_inference
|
||||
title: Distributed inference
|
||||
title: Inference
|
||||
title: How to guides
|
||||
- sections:
|
||||
- local: concept_guides/internal_mechanism
|
||||
title: 🤗 Accelerate's internal mechanism
|
||||
@ -72,7 +85,7 @@
|
||||
title: Concepts and fundamentals
|
||||
- sections:
|
||||
- local: package_reference/accelerator
|
||||
title: Main Accelerator class
|
||||
title: Accelerator
|
||||
- local: package_reference/state
|
||||
title: Stateful configuration classes
|
||||
- local: package_reference/cli
|
||||
@ -89,6 +102,8 @@
|
||||
title: Logging
|
||||
- local: package_reference/big_modeling
|
||||
title: Working with large models
|
||||
- local: package_reference/inference
|
||||
title: Distributed inference with big models
|
||||
- local: package_reference/kwargs
|
||||
title: Kwargs handlers
|
||||
- local: package_reference/utilities
|
||||
|
||||
128
docs/source/basic_tutorials/execution.md
Normal file
128
docs/source/basic_tutorials/execution.md
Normal file
@ -0,0 +1,128 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Execution process
|
||||
|
||||
When working with distributed training systems, it is important to manage how and when processes are executed across GPUs. Some processes are completed faster than others, and some processes shouldn't begin if others haven't finished yet. Accelerate provides tools for orchestrating when processes are executed to ensure everything remains synchronized across all devices.
|
||||
|
||||
This tutorial will teach you how to execute a process on only one machine and how to delay execution until all processes have reached a certain point.
|
||||
|
||||
## Execute on one process
|
||||
|
||||
Certain code only needs to be run once on a given machine, such as printing a log statement or only displaying one progress bar on the local main process.
|
||||
|
||||
<hfoptions id="local-execution">
|
||||
<hfoption id="statements">
|
||||
|
||||
You should use `accelerator.is_local_main_process` to indicate code that should only be executed once.
|
||||
|
||||
```py
|
||||
from tqdm.auto import tqdm
|
||||
|
||||
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
|
||||
```
|
||||
|
||||
You could also wrap a statement with `accelerator.is_local_main_process`.
|
||||
|
||||
> [!TIP]
|
||||
> For standalone `print` statements that aren't wrapped in `accelerator.is_local_main_process`, replace `print` with Accelerate's [`~Accelerator.print`] method to only print once per process.
|
||||
|
||||
```py
|
||||
if accelerator.is_local_main_process:
|
||||
print("Accelerate is the best")
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="function">
|
||||
|
||||
For a function that should only be executed once, use [`~Accelerator.on_local_main_process`].
|
||||
|
||||
```py
|
||||
@accelerator.on_local_main_process
|
||||
def do_my_thing():
|
||||
"Something done once per server"
|
||||
do_thing_once_per_server()
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
You could also direct Accelerate to execute code once across *all processes* regardless of the number of machines. This is useful if you're uploading a final model to the Hub.
|
||||
|
||||
<hfoptions id="main-execution">
|
||||
<hfoption id="statement">
|
||||
|
||||
You should use `accelerator.is_main_process` to indicate code that should only be executed once across all processes.
|
||||
|
||||
```py
|
||||
if accelerator.is_main_process:
|
||||
repo.push_to_hub()
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="function">
|
||||
|
||||
For a function that should only be executed once across all processes, use [`~Accelerator.on_main_process`].
|
||||
|
||||
```py
|
||||
@accelerator.on_main_process
|
||||
def do_my_thing():
|
||||
"Something done once per server"
|
||||
do_thing_once()
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
## Execute on a specific process
|
||||
|
||||
Accelerate can also help you execute functions that should only be executed on a specific process or a local process index.
|
||||
|
||||
<hfoptions id="specific-execution">
|
||||
<hfoption id="specific process">
|
||||
|
||||
Use the [`~Accelerator.on_process`] method and specify the process index to execute a function on.
|
||||
|
||||
```py
|
||||
@accelerator.on_process(process_index=0)
|
||||
def do_my_thing():
|
||||
"Something done on process index 0"
|
||||
do_thing_on_index_zero()
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="local process">
|
||||
|
||||
Use the [`~Accelerator.on_local_process`] method and specify the local process index to execute a function on.
|
||||
|
||||
```py
|
||||
@accelerator.on_local_process(local_process_idx=0)
|
||||
def do_my_thing():
|
||||
"Something done on process index 0 on each server"
|
||||
do_thing_on_index_zero_on_each_server()
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
## Defer execution
|
||||
|
||||
When you run your script on several GPUs at the same time, some code may be executed faster than others. You might need to wait for all processes to reach a certain point before executing the next set of instructions. For instance, you shouldn’t save a model before making sure every process is done with training.
|
||||
|
||||
To do this, add [`~Accelerator.wait_for_everyone`] in your code. This blocks all processes that have finished first from continuing until all remaining processes have reached the same point (this has no effect if you're running on a single GPU or CPU).
|
||||
|
||||
```py
|
||||
accelerator.wait_for_everyone()
|
||||
```
|
||||
@ -13,21 +13,11 @@ specific language governing permissions and limitations under the License.
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Migrating your code to 🤗 Accelerate
|
||||
# Add Accelerate to your code
|
||||
|
||||
This tutorial will detail how to easily convert existing PyTorch code to use 🤗 Accelerate!
|
||||
You'll see that by just changing a few lines of code, 🤗 Accelerate can perform its magic and get you on
|
||||
your way toward running your code on distributed systems with ease!
|
||||
Each distributed training framework has their own way of doing things which can require writing a lot of custom code to adapt it to your PyTorch training code and training environment. Accelerate offers a friendly way to interface with these distributed training frameworks without having to learn the specific details of each one. Accelerate takes care of those details for you, so you can focus on the training code and scale it to any distributed training environment.
|
||||
|
||||
## The base training loop
|
||||
|
||||
To begin, write out a very basic PyTorch training loop.
|
||||
|
||||
<Tip>
|
||||
|
||||
We are under the presumption that `training_dataloader`, `model`, `optimizer`, `scheduler`, and `loss_function` have been defined beforehand.
|
||||
|
||||
</Tip>
|
||||
In this tutorial, you'll learn how to adapt your existing PyTorch code with Accelerate and get you on your way toward training on distributed systems with ease! You'll start with a basic PyTorch training loop (it assumes all the training objects like `model` and `optimizer` have been setup already) and progressively integrate Accelerate into it.
|
||||
|
||||
```python
|
||||
device = "cuda"
|
||||
@ -45,50 +35,44 @@ for batch in training_dataloader:
|
||||
scheduler.step()
|
||||
```
|
||||
|
||||
## Add in 🤗 Accelerate
|
||||
## Accelerator
|
||||
|
||||
The [`Accelerator`] is the main class for adapting your code to work with Accelerate. It knows about the distributed setup you're using such as the number of different processes and your hardware type. This class also provides access to many of the necessary methods for enabling your PyTorch code to work in any distributed training environment and for managing and executing processes across devices.
|
||||
|
||||
That's why you should always start by importing and creating an [`Accelerator`] instance in your script.
|
||||
|
||||
To start using 🤗 Accelerate, first import and create an [`Accelerator`] instance:
|
||||
```python
|
||||
from accelerate import Accelerator
|
||||
|
||||
accelerator = Accelerator()
|
||||
```
|
||||
[`Accelerator`] is the main force behind utilizing all the possible options for distributed training!
|
||||
|
||||
### Setting the right device
|
||||
|
||||
The [`Accelerator`] class knows the right device to move any PyTorch object to at any time, so you should
|
||||
change the definition of `device` to come from [`Accelerator`]:
|
||||
The [`Accelerator`] also knows which device to move your PyTorch objects to, so it is recommended to let Accelerate handle this for you.
|
||||
|
||||
```diff
|
||||
- device = 'cuda'
|
||||
- device = "cuda"
|
||||
+ device = accelerator.device
|
||||
model.to(device)
|
||||
```
|
||||
|
||||
### Preparing your objects
|
||||
## Prepare PyTorch objects
|
||||
|
||||
Next, you need to pass all of the important objects related to training into [`~Accelerator.prepare`]. 🤗 Accelerate will
|
||||
make sure everything is setup in the current environment for you to start training:
|
||||
Next, you need to prepare your PyTorch objects (model, optimizer, scheduler, etc.) for distributed training. The [`~Accelerator.prepare`] method takes care of placing your model in the appropriate container (like single GPU or multi-GPU) for your training setup, adapting the optimizer and scheduler to use Accelerate's [`~optimizer.AcceleratedOptimizer`] and [`~scheduler.AcceleratedScheduler`], and creating a new dataloader that can be sharded across processes.
|
||||
|
||||
```
|
||||
> [!TIP]
|
||||
> Accelerate only prepares objects that inherit from their respective PyTorch classes such as `torch.optim.Optimizer`.
|
||||
|
||||
The PyTorch objects are returned in the same order they're sent.
|
||||
|
||||
```py
|
||||
model, optimizer, training_dataloader, scheduler = accelerator.prepare(
|
||||
model, optimizer, training_dataloader, scheduler
|
||||
)
|
||||
```
|
||||
These objects are returned in the same order they were sent in. By default when using `device_placement=True`, all of the objects that can be sent to the right device will be.
|
||||
If you need to work with data that isn't passed to [~Accelerator.prepare] but should be on the active device, you should pass in the `device` you made earlier.
|
||||
|
||||
<Tip warning={true}>
|
||||
## Training loop
|
||||
|
||||
Accelerate will only prepare objects that inherit from their respective PyTorch classes (such as `torch.optim.Optimizer`).
|
||||
|
||||
</Tip>
|
||||
|
||||
### Modifying the training loop
|
||||
|
||||
Finally, three lines of code need to be changed in the training loop. 🤗 Accelerate's DataLoader classes will automatically handle the device placement by default,
|
||||
and [`~Accelerator.backward`] should be used for performing the backward pass:
|
||||
Finally, remove the `to(device)` calls to the inputs and targets in the training loop because Accelerate's DataLoader classes automatically places them on the right device. You should also replace the usual `backward()` pass with Accelerate's [`~Accelerator.backward`] method which scales the gradients for you and uses the appropriate `backward()` method depending on your distributed setup (for example, DeepSpeed or Megatron).
|
||||
|
||||
```diff
|
||||
- inputs = inputs.to(device)
|
||||
@ -99,17 +83,13 @@ and [`~Accelerator.backward`] should be used for performing the backward pass:
|
||||
+ accelerator.backward(loss)
|
||||
```
|
||||
|
||||
With that, your training loop is now ready to use 🤗 Accelerate!
|
||||
|
||||
## The finished code
|
||||
|
||||
Below is the final version of the converted code:
|
||||
Put everything together and your new Accelerate training loop should now look like this!
|
||||
|
||||
```python
|
||||
from accelerate import Accelerator
|
||||
|
||||
accelerator = Accelerator()
|
||||
|
||||
device = accelerator.device
|
||||
model, optimizer, training_dataloader, scheduler = accelerator.prepare(
|
||||
model, optimizer, training_dataloader, scheduler
|
||||
)
|
||||
@ -124,6 +104,118 @@ for batch in training_dataloader:
|
||||
scheduler.step()
|
||||
```
|
||||
|
||||
## More Resources
|
||||
## Training features
|
||||
|
||||
To check out more ways on how to migrate to 🤗 Accelerate, check out our [interactive migration tutorial](https://huggingface.co/docs/accelerate/usage_guides/explore) which showcases other items that need to be watched for when using Accelerate and how to do so quickly.
|
||||
Accelerate offers additional features - like gradient accumulation, gradient clipping, mixed precision training and more - you can add to your script to improve your training run. Let's explore these three features.
|
||||
|
||||
### Gradient accumulation
|
||||
|
||||
Gradient accumulation enables you to train on larger batch sizes by accumulating the gradients over multiple batches before updating the weights. This can be useful for getting around memory limitations. To enable this feature in Accelerate, specify the `gradient_accumulation_steps` parameter in the [`Accelerator`] class and add the [`~Accelerator.accumulate`] context manager to your script.
|
||||
|
||||
```diff
|
||||
+ accelerator = Accelerator(gradient_accumulation_steps=2)
|
||||
model, optimizer, training_dataloader = accelerator.prepare(model, optimizer, training_dataloader)
|
||||
|
||||
for input, label in training_dataloader:
|
||||
+ with accelerator.accumulate(model):
|
||||
predictions = model(input)
|
||||
loss = loss_function(predictions, label)
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
scheduler.step()
|
||||
optimizer.zero_grad()
|
||||
```
|
||||
|
||||
### Gradient clipping
|
||||
|
||||
Gradient clipping is a technique to prevent "exploding gradients", and Accelerate offers:
|
||||
|
||||
* [`~Accelerator.clip_grad_value_`] to clip gradients to a minimum and maximum value
|
||||
* [`~Accelerator.clip_grad_norm_`] for normalizing gradients to a certain value
|
||||
|
||||
### Mixed precision
|
||||
|
||||
Mixed precision accelerates training by using a lower precision data type like fp16 (half-precision) to calculate the gradients. For the best performance with Accelerate, the loss should be computed inside your model (like in Transformers models) because computations outside of the model are computed in full precision.
|
||||
|
||||
Set the mixed precision type to use in the [`Accelerator`], and then use the [`~Accelerator.autocast`] context manager to automatically cast the values to the specified data type.
|
||||
|
||||
> [!WARNING]
|
||||
> Accelerate enables automatic mixed precision, so [`~Accelerator.autocast`] is only needed if there are other mixed precision operations besides those performed on loss by [`~Accelerator.backward`] which already handles the scaling.
|
||||
|
||||
```diff
|
||||
+ accelerator = Accelerator(mixed_precision="fp16")
|
||||
+ with accelerator.autocast():
|
||||
loss = complex_loss_function(outputs, target):
|
||||
```
|
||||
|
||||
## Save and load
|
||||
|
||||
Accelerate can also save and load a *model* once training is complete or you can also save the model and optimizer *state* which could be useful for resuming training.
|
||||
|
||||
### Model
|
||||
|
||||
Once all processes are complete, unwrap the model with the [`~Accelerator.unwrap_model`] method before saving it because the [`~Accelerator.prepare`] method wrapped your model into the proper interface for distributed training. If you don't unwrap the model, saving the model state dictionary also saves any potential extra layers from the larger model and you won't be able to load the weights back into your base model.
|
||||
|
||||
You should use the [`~Accelerator.save_model`] method to unwrap and save the model state dictionary. This method can also save a model into sharded checkpoints or into the [safetensors](https://hf.co/docs/safetensors/index) format.
|
||||
|
||||
<hfoptions id="save">
|
||||
<hfoption id="single checkpoint">
|
||||
|
||||
```py
|
||||
accelerator.wait_for_everyone()
|
||||
accelerator.save_model(model, save_directory)
|
||||
```
|
||||
|
||||
<Tip>
|
||||
|
||||
For models from the [Transformers](https://hf.co/docs/transformers/index) library, save the model with the [`~transformers.PreTrainedModel.save_pretrained`] method so that it can be reloaded with the [`~transformers.PreTrainedModel.from_pretrained`] method.
|
||||
|
||||
```py
|
||||
from transformers import AutoModel
|
||||
|
||||
unwrapped_model = accelerator.unwrap_model(model)
|
||||
unwrapped_model.save_pretrained(
|
||||
"path/to/my_model_directory",
|
||||
is_main_process=accelerator.is_main_process,
|
||||
save_function=accelerator.save,
|
||||
)
|
||||
|
||||
model = AutoModel.from_pretrained("path/to/my_model_directory")
|
||||
```
|
||||
|
||||
</Tip>
|
||||
|
||||
To load your weights, use the [`~Accelerator.unwrap_model`] method to unwrap the model first before loading the weights. All model parameters are references to tensors, so this loads your weights inside `model`.
|
||||
|
||||
```py
|
||||
unwrapped_model = accelerator.unwrap_model(model)
|
||||
path_to_checkpoint = os.path.join(save_directory,"pytorch_model.bin")
|
||||
unwrapped_model.load_state_dict(torch.load(path_to_checkpoint))
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="sharded checkpoint">
|
||||
|
||||
Set `safe_serialization=True` to save the model in the safetensor format.
|
||||
|
||||
```py
|
||||
accelerator.wait_for_everyone()
|
||||
accelerator.save_model(model, save_directory, max_shard_size="1GB", safe_serialization=True)
|
||||
```
|
||||
|
||||
To load a sharded checkpoint or a safetensor formatted checkpoint, use the [`~accelerate.load_checkpoint_in_model`] method. This method allows you to load a checkpoint onto a specific device.
|
||||
|
||||
```py
|
||||
load_checkpoint_in_model(unwrapped_model, save_directory, device_map={"":device})
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
### State
|
||||
|
||||
During training, you may want to save the current state of the model, optimizer, random generators, and potentially learning rate schedulers so they can be restored in the *same script*. You should add the [`~Accelerator.save_state`] and [`~Accelerator.load_state`] methods to your script to save and load states.
|
||||
|
||||
To further customize where and how states are saved through [`~Accelerator.save_state`], use the [`~utils.ProjectConfiguration`] class. For example, if `automatic_checkpoint_naming` is enabled, each saved checkpoint is stored at `Accelerator.project_dir/checkpoints/checkpoint_{checkpoint_number}`.
|
||||
|
||||
Any other stateful items to be stored should be registered with the [`~Accelerator.register_for_checkpointing`] method so they can be saved and loaded. Every object passed to this method to be stored must have a `load_state_dict` and `state_dict` function.
|
||||
|
||||
@ -443,6 +443,12 @@ epoch 4: 94.71
|
||||
|
||||
And that's it!
|
||||
|
||||
Please note that [`notebook_launcher`] ignores the 🤗 Accelerate config file, to launch based on the config use:
|
||||
|
||||
```bash
|
||||
accelerate launch
|
||||
```
|
||||
|
||||
## Debugging
|
||||
|
||||
A common issue when running the `notebook_launcher` is receiving a CUDA has already been initialized issue. This usually stems
|
||||
|
||||
38
docs/source/basic_tutorials/tpu.md
Normal file
38
docs/source/basic_tutorials/tpu.md
Normal file
@ -0,0 +1,38 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# TPU training
|
||||
|
||||
A [TPU (Tensor Processing Unit)](https://cloud.google.com/tpu/docs/intro-to-tpu) is a type of hardware specifically designed for training models efficiently. Accelerate supports TPU training, but there are a few things you should be aware of, namely graph compilation. This tutorial briefly discusses compilation, and for more details, take a look at the [Training on TPUs with Accelerate](../concept_guides/training_tpu) guide.
|
||||
|
||||
## Compilation
|
||||
|
||||
A TPU creates a graph of all the operations in the training step such as the forward pass, backward pass and optimizer step. This is why the first training step always takes a while because building and compiling this graph takes time. But once compilation is complete, it is cached and all subsequent steps are much faster.
|
||||
|
||||
The key is to avoid compiling your code again or else training is super slow. This means all your operations must be exactly the same:
|
||||
|
||||
* all tensors in your batches must have the same length (for example, no dynamic padding for NLP tasks)
|
||||
* your code must be static (for example, no layers with for loops that have different lengths depending on the input such as a LSTM)
|
||||
|
||||
## Weight tying
|
||||
|
||||
A common language model design is to tie the weights of the embedding and softmax layers. However, moving the model to a TPU (either yourself or passing it to the [`~Accelerator.prepare`] method) breaks the weight tying and you'll need to retie the weights.
|
||||
|
||||
To add special behavior (like weight tying) in your script for TPUs, set [`~Accelerator.distributed_type`] to `DistributedType.TPU` first. Then you can use the [`~transformers.PreTrainedModel.tie_weights`] method to tie the weights.
|
||||
|
||||
```py
|
||||
if accelerator.distributed_type == DistributedType.TPU:
|
||||
model.tie_weights()
|
||||
```
|
||||
@ -13,77 +13,82 @@ specific language governing permissions and limitations under the License.
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Troubleshooting guide
|
||||
# Troubleshoot
|
||||
|
||||
This guide aims to provide you the tools and knowledge required to navigate some common issues. However,
|
||||
as 🤗 Accelerate continuously evolves and the use cases and setups are diverse, you might encounter an issue not covered in this
|
||||
guide. If the suggestions listed in this guide do not cover your such situation, please refer to the final section of
|
||||
the guide, [Asking for Help](#ask-for-help), to learn where to find help with your specific issue.
|
||||
This guide provides solutions to some issues you might encounter when using Accelerate. Not all errors are covered because Accelerate is an active library that is continuously evolving and there are many different use cases and distributed training setups. If the solutions described here don't help with your specific error, please take a look at the [Ask for help](#ask-for-help) section to learn where and how to get help.
|
||||
|
||||
## Logging
|
||||
|
||||
When facing an error, logging can help narrow down where it is coming from. In a distributed setup with multiple processes,
|
||||
logging can be a challenge, but 🤗 Accelerate provides a utility that streamlines the logging process and ensures that
|
||||
logs are synchronized and managed effectively across the distributed setup.
|
||||
Logging can help you identify where an error is coming from. In a distributed setup with multiple processes, logging can be a challenge, but Accelerate provides the [`~accelerate.logging`] utility to ensure logs are synchronized.
|
||||
|
||||
To troubleshoot an issue, use `accelerate.logging` instead of the standard Python `logging` module:
|
||||
To troubleshoot an issue, use [`~accelerate.logging`] instead of the standard Python [`logging`](https://docs.python.org/3/library/logging.html#module-logging) module. Set the verbosity level (`INFO`, `DEBUG`, `WARNING`, `ERROR`, `CRITICAL`) with the `log_level` parameter, and then you can either:
|
||||
|
||||
```diff
|
||||
- import logging
|
||||
+ from accelerate.logging import get_logger
|
||||
- logger = logging.getLogger(__name__)
|
||||
+ logger = get_logger(__name__)
|
||||
```
|
||||
1. Export the `log_level` as the `ACCELERATE_LOG_LEVEL` environment variable.
|
||||
2. Pass the `log_level` directly to `get_logger`.
|
||||
|
||||
To set the log level (`INFO`, `DEBUG`, `WARNING`, `ERROR`, `CRITICAL`), export it as the `ACCELERATE_LOG_LEVEL` environment,
|
||||
or pass as `log_level` to `get_logger`:
|
||||
For example, to set `log_level="INFO"`:
|
||||
|
||||
```python
|
||||
```py
|
||||
from accelerate.logging import get_logger
|
||||
|
||||
logger = get_logger(__name__, log_level="INFO")
|
||||
logger = get_logger(__name__, log_level="DEBUG")
|
||||
```
|
||||
|
||||
By default, the log is called on main processes only. To call it on all processes, pass `main_process_only=False`.
|
||||
If a log should be called on all processes and in order, also pass `in_order=True`.
|
||||
|
||||
```py
|
||||
from accelerate.logging import get_logger
|
||||
|
||||
logger = get_logger(__name__, log_level="DEBUG")
|
||||
# log all processes
|
||||
logger.debug("thing_to_log", main_process_only=False)
|
||||
# log all processes in order
|
||||
logger.debug("thing_to_log", main_process_only=False, in_order=True)
|
||||
```
|
||||
|
||||
## Hanging code and timeout errors
|
||||
|
||||
### Mismatched tensor shapes
|
||||
There can be many reasons why your code is hanging. Let's take a look at how to solve some of the most common issues that can cause your code to hang.
|
||||
|
||||
If your code seems to be hanging for a significant amount time on a distributed setup, a common cause is mismatched shapes of tensors on different
|
||||
devices.
|
||||
### Mismatched tensor shapes
|
||||
|
||||
When running scripts in a distributed fashion, functions such as [`Accelerator.gather`] and [`Accelerator.reduce`] are
|
||||
necessary to grab tensors across devices to perform operations on them collectively. These (and other) functions rely on
|
||||
`torch.distributed` performing a `gather` operation, which requires that tensors have the **exact same shape** across all processes.
|
||||
When the tensor shapes don't match, you will experience handing code, and eventually hit a timeout exception.
|
||||
Mismatched tensor shapes is a common issue that can cause your code to hang for a significant amount of time on a distributed setup.
|
||||
|
||||
If you suspect this to be the case, use Accelerate's operational debug mode to immediately catch the issue.
|
||||
When running scripts in a distributed setup, functions such as [`Accelerator.gather`] and [`Accelerator.reduce`] are necessary to grab tensors across devices to collectively perform operations on them. These (and other) functions rely on `torch.distributed` to perform a `gather` operation, which requires tensors to have the **exact same shape** across all processes. When the tensor shapes don't match, your code hangs and you'll eventually hit a timeout exception.
|
||||
|
||||
The recommended way to enable Accelerate's operational debug mode is during `accelerate config` setup.
|
||||
Alternative ways to enable debug mode are:
|
||||
You can use Accelerate's operational debug mode to immediately catch this issue. We recommend enabling this mode during the `accelerate config` setup, but you can also enable it from the CLI, as an environment variable, or by manually editing the `config.yaml` file.
|
||||
|
||||
* From the CLI:
|
||||
<hfoptions id="mismatch">
|
||||
<hfoption id="CLI">
|
||||
|
||||
```bash
|
||||
accelerate launch --debug {my_script.py} --arg1 --arg2
|
||||
```
|
||||
|
||||
* As an environmental variable (which avoids the need for `accelerate launch`):
|
||||
</hfoption>
|
||||
<hfoption id="environment variable">
|
||||
|
||||
If enabling debug mode as an environment variable, you don't need to call `accelerate launch`.
|
||||
|
||||
```bash
|
||||
ACCELERATE_DEBUG_MODE="1" torchrun {my_script.py} --arg1 --arg2
|
||||
```
|
||||
|
||||
* Manually changing the `config.yaml` file:
|
||||
</hfoption>
|
||||
<hfoption id="config.yaml">
|
||||
|
||||
```diff
|
||||
compute_environment: LOCAL_MACHINE
|
||||
+debug: true
|
||||
Add `debug: true` to your `config.yaml` file.
|
||||
|
||||
```yaml
|
||||
compute_environment: LOCAL_MACHINE
|
||||
debug: true
|
||||
```
|
||||
|
||||
Once you enable the debug mode, you should get a similar traceback that points to the tensor shape mismatch issue:
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
Once you enable debug mode, you should get a traceback that points to the tensor shape mismatch issue.
|
||||
|
||||
```py
|
||||
Traceback (most recent call last):
|
||||
@ -100,16 +105,14 @@ Operation: `accelerate.utils.operations.broadcast`
|
||||
Input shapes:
|
||||
- Process 0: [1, 5]
|
||||
- Process 1: [1, 2, 5]
|
||||
```
|
||||
```
|
||||
|
||||
### Early stopping leads to hanging
|
||||
### Early stopping
|
||||
|
||||
When doing early stopping in distributed training, if each process has a specific stopping condition (e.g. validation loss),
|
||||
it may not be synchronized across all of them. As a result, a break can happen on process 0 but not on process 1.
|
||||
This will cause the code to hang indefinitely until a timeout occurs.
|
||||
For early stopping in distributed training, if each process has a specific stopping condition (e.g. validation loss), it may not be synchronized across all processes. As a result, a break can happen on process 0 but not on process 1 which will cause your code to hang indefinitely until a timeout occurs.
|
||||
|
||||
If you have early stopping conditionals, use `set_breakpoint` and `check_breakpoint` methods to make sure all the processes
|
||||
are ended correctly:
|
||||
If you have early stopping conditionals, use the `set_breakpoint` and `check_breakpoint` methods to make sure all the processes
|
||||
are ended correctly.
|
||||
|
||||
```py
|
||||
# Assume `should_do_breakpoint` is a custom defined function that returns a conditional,
|
||||
@ -122,35 +125,38 @@ if accelerator.check_breakpoint():
|
||||
break
|
||||
```
|
||||
|
||||
### Hanging on low kernel versions on Linux
|
||||
### Low kernel versions on Linux
|
||||
|
||||
This is a known issue. On Linux with kernel version < 5.5, hanging processes have been reported. To avoid
|
||||
encountering this problem, we recommend upgrading your system to a later kernel version.
|
||||
On Linux with kernel version < 5.5, hanging processes have been reported. To avoid this problem, upgrade your system to a later kernel version.
|
||||
|
||||
## CUDA out of memory
|
||||
### MPI
|
||||
|
||||
One of the most frustrating errors when it comes to running training scripts is hitting "CUDA Out-of-Memory",
|
||||
as the entire script needs to be restarted, progress is lost, and typically a developer would want to simply
|
||||
start their script and let it run.
|
||||
If your distributed CPU training job using MPI is hanging, ensure that you have
|
||||
[passwordless SSH](https://www.open-mpi.org/faq/?category=rsh#ssh-keys) setup (using keys) between the nodes. This means
|
||||
that for all nodes in your hostfile, you should to be able to SSH from one node to another without being prompted for a password.
|
||||
|
||||
To address this problem, `Accelerate` offers a utility `find_executable_batch_size` that is heavily based on [toma](https://github.com/BlackHC/toma).
|
||||
The utility retries code that fails due to OOM (out-of-memory) conditions and lowers batch sizes automatically.
|
||||
Next, try to run the `mpirun` command as a sanity check. For example, the command below should print out the
|
||||
hostnames for each of the nodes.
|
||||
|
||||
### find_executable_batch_size
|
||||
```bash
|
||||
mpirun -f hostfile -n {number of nodes} -ppn 1 hostname
|
||||
```
|
||||
|
||||
This algorithm operates with exponential decay, decreasing the batch size in half after each failed run on some
|
||||
training script. To use it, restructure your training function to include an inner function that includes this wrapper,
|
||||
and build your dataloaders inside it. At a minimum, this could look like 4 new lines of code.
|
||||
## CUDA Out-of-Memory
|
||||
|
||||
One of the most frustrating errors when it comes to running training scripts is hitting "CUDA Out-of-Memory". The entire script needs to be restarted and any progress is lost.
|
||||
|
||||
To address this problem, Accelerate provides the [`find_executable_batch_size`] utility that is heavily based on [toma](https://github.com/BlackHC/toma).
|
||||
This utility retries code that fails due to OOM (out-of-memory) conditions and automatically lowers batch sizes. For each OOM condition, the algorithm decreases the batch size by half and retries the code until it succeeds.
|
||||
|
||||
To use [`find_executable_batch_size`], restructure your training function to include an inner function with `find_executable_batch_size` and build your dataloaders inside it. At a minimum, this only takes 4 new lines of code.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
The inner function *must* take in the batch size as the first parameter, but we do not pass one to it when called. The wrapper handles this for us.
|
||||
The inner function **must** take batch size as the first parameter, but we do not pass one to it when called. The wrapper will handles this for you. Any object (models, optimizers) that consumes CUDA memory and is passed to the [`Accelerator`] also **must** be declared inside the inner function.
|
||||
|
||||
</Tip>
|
||||
|
||||
It should also be noted that anything which will consume CUDA memory and passed to the `accelerator` **must** be declared inside the inner function,
|
||||
such as models and optimizers.
|
||||
|
||||
```diff
|
||||
def training_function(args):
|
||||
accelerator = Accelerator()
|
||||
@ -175,48 +181,31 @@ def training_function(args):
|
||||
+ inner_training_loop()
|
||||
```
|
||||
|
||||
To find out more, check the documentation [here](../package_reference/utilities#accelerate.find_executable_batch_size).
|
||||
|
||||
## Non-reproducible results between device setups
|
||||
|
||||
If you have changed the device setup and are observing different model performance, this is likely due to the fact that
|
||||
you have not updated your script when moving from one setup to another. The same script with the same batch size across TPU,
|
||||
multi-GPU, and single-GPU with Accelerate will have different results.
|
||||
If you changed the device setup and observe different model performance, it is likely you didn't update your script when moving from one setup to another. Even if you're using the same script with the same batch size, the results will still be different on a TPU, multi-GPU, and single GPU.
|
||||
|
||||
For example, if you were previously training on a single GPU with a batch size of 16, when moving to two GPU setup,
|
||||
you need to change the batch size to 8 to have the same effective batch size. This is because when training with Accelerate,
|
||||
the batch size passed to the dataloader is the **batch size per GPU**.
|
||||
For example, if you were training on a single GPU with a batch size of 16 and you move to a dual GPU setup, you need to change the batch size to 8 to have the same effective batch size. This is because when training with Accelerate, the batch size passed to the dataloader is the **batch size per GPU**.
|
||||
|
||||
To make sure you can reproduce the results between the setups, make sure to use the same seed, adjust the batch size
|
||||
accordingly, consider scaling the learning rate.
|
||||
To make sure you can reproduce the results between the setups, make sure to use the same seed, adjust the batch size accordingly, and consider scaling the learning rate.
|
||||
|
||||
For more details and a quick reference for batch sizes, check out the [Comparing performance between different device setups](../concept_guides/performance) guide.
|
||||
|
||||
## Performance issues on different GPUs
|
||||
|
||||
If your multi-GPU setup consists of different GPUs, you may hit some limitations:
|
||||
If your multi-GPU setup consists of different GPUs, you may encounter some performance issues:
|
||||
|
||||
- There may be an imbalance in GPU memory between the GPUs. In this case, the GPU with smaller memory will limit the batch size or the size of the model that can be loaded onto the GPUs.
|
||||
- If you are using GPUs with different performance profiles, the performance will be driven by the slowest GPU that you are using as the other GPUs will have to wait for it to complete its workload.
|
||||
- There may be an imbalance in GPU memory between the GPUs. In this case, the GPU with the smaller memory will limit the batch size or the size of the model that can be loaded onto the GPUs.
|
||||
- If you are using GPUs with different performance profiles, the performance will be driven by the slowest GPU you are using because the other GPUs will have to wait for it to complete its workload.
|
||||
|
||||
Vastly different GPUs within the same setup can lead to performance bottlenecks.
|
||||
|
||||
## Ask for help
|
||||
|
||||
If the above troubleshooting tools and advice did not help you resolve your issue, reach out for help to the community
|
||||
and the team.
|
||||
If none of the solutions and advice here helped resolve your issue, you can always reach out to the community and Accelerate team for help.
|
||||
|
||||
### Forums
|
||||
- Ask for help on the Hugging Face forums by posting your question in the [🤗 Accelerate category](https://discuss.huggingface.co/c/accelerate/18). Make sure to write a descriptive post with relevant context about your setup and reproducible code to maximize the likelihood that your problem is solved!
|
||||
|
||||
Ask for help on the Hugging Face forums - post your question in the [🤗Accelerate category](https://discuss.huggingface.co/c/accelerate/18)
|
||||
Make sure to write a descriptive post with relevant context about your setup and reproducible code to maximize the likelihood that your problem is solved!
|
||||
- Post a question on [Discord](http://hf.co/join/discord), and let the team and the community help you.
|
||||
|
||||
### Discord
|
||||
|
||||
Post a question on [Discord](http://hf.co/join/discord), and let the team and the community help you.
|
||||
|
||||
### GitHub Issues
|
||||
|
||||
Create an Issue on the 🤗 Accelerate [GitHub repository](https://github.com/huggingface/accelerate/issues) if you suspect
|
||||
to have found a bug related to the library. Include context regarding the bug and details about your distributed setup
|
||||
to help us better figure out what's wrong and how we can fix it.
|
||||
- Create an Issue on the 🤗 Accelerate [GitHub repository](https://github.com/huggingface/accelerate/issues) if you think you've found a bug related to the library. Include context regarding the bug and details about your distributed setup to help us better figure out what's wrong and how we can fix it.
|
||||
|
||||
@ -167,3 +167,18 @@ As you can see, if you are not careful about how you set up your gradient synchr
|
||||
|
||||
If you are worried about making sure everything is done properly, we highly recommend utilizing the [`~Accelerator.accumulate`] function and passing in
|
||||
`gradient_accumulation_steps` or `gradient_accumulation_plugin` to the [`Accelerator`] object so Accelerate can handle this for you.
|
||||
|
||||
### `no_sync` requires additional GPU memory when using FSDP
|
||||
|
||||
Be aware that not syncing gradients can have adverse effects while performing FSDP training. As it has been warned in `torch`, the [`no_sync` context manager for FSDP](https://pytorch.org/docs/stable/fsdp.html#torch.distributed.fsdp.FullyShardedDataParallel.no_sync) will require additional memory.
|
||||
|
||||
Therefore in memory intensive situations while using FSDP, we recommend to set `sync_each_batch` to `True` in the [`~utils.GradientAccumulationPlugin`] to disable `no_sync`.
|
||||
|
||||
See the example below where we fine-tune Mixtral (47B parameters) on 8 A100-80GB GPUs. We see that even for a modest `gradient_accumulation_steps=2` we quickly go out-of-memory (OOM) if `no_sync` is enabled. Again, this is due to additional memory overheads due to FSDP's `no_sync`. However, if `no_sync` is disabled via `sync_each_batch=True`, then the memory consumption for `gradient_accumulation_steps=16` reverts to that of `gradient_accumulation_steps=1`.
|
||||
|
||||
| Model | `no_sync` (accum=1) | `no_sync` (accum=2) | `no_sync` disabled (accum=16)
|
||||
| :-------------: | :-----------------: | :-----------------: | :-----------------:
|
||||
mixtral 8x7B | 69G | OOM | 69G
|
||||
|
||||
> [!WARNING]
|
||||
> Disabling `no_sync` means there _will be slowdown_ due the extra data syncs, as explained by the earlier sections of this guide.
|
||||
@ -34,7 +34,7 @@ MS-AMP O3 | FP8 | FP8 | FP8 | FP16 | FP8 | FP8+FP16
|
||||
|
||||
## `TransformersEngine`
|
||||
|
||||
`TransformersEngine` is the first solution to trying to train in 8-bit floating point. It works by using drop-in replacement layers for certain ones in a model that utilize their FP8-engine to reduce the number of bits (such as 32 to 8) without degrading the final accuracy of the model.
|
||||
`TransformersEngine` is the first solution to trying to train in 8-bit floating point. It works by using drop-in replacement layers for certain ones in a model that utilizes their FP8-engine to reduce the number of bits (such as 32 to 8) without degrading the final accuracy of the model.
|
||||
|
||||
Specifically, 🤗 Accelerate will find and replace the following layers with `TransformersEngine` versions:
|
||||
|
||||
@ -71,4 +71,4 @@ MS-AMP takes a different approach to `TransformersEngine` by providing three dif
|
||||
|
||||
## Combining the two
|
||||
|
||||
More experiments need to be performed but it's been noted that combining both MS-AMP and TransformersEngine can lead to the highest throughput by relying on NVIDIA's optimized FP8 operators and utilizing how MS-AMP reduces the memory overhead.
|
||||
More experiments need to be performed but it's been noted that combining both MS-AMP and TransformersEngine can lead to the highest throughput by relying on NVIDIA's optimized FP8 operators and utilizing how MS-AMP reduces the memory overhead.
|
||||
|
||||
@ -45,7 +45,7 @@ Why is this important? Under the hood this will set **5** different seed setting
|
||||
torch.manual_seed(seed)
|
||||
torch.cuda.manual_seed_all(seed)
|
||||
# ^^ safe to call this function even if cuda is not available
|
||||
if is_tpu_available():
|
||||
if is_torch_xla_available():
|
||||
xm.set_rng_state(seed)
|
||||
```
|
||||
|
||||
|
||||
@ -15,197 +15,12 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
# Accelerator
|
||||
|
||||
The [`Accelerator`] is the main class provided by 🤗 Accelerate.
|
||||
It serves at the main entry point for the API.
|
||||
The [`Accelerator`] is the main class for enabling distributed training on any type of training setup. Read the [Add Accelerator to your code](../basic_tutorials/migration) tutorial to learn more about how to add the [`Accelerator`] to your script.
|
||||
|
||||
## Quick adaptation of your code
|
||||
|
||||
To quickly adapt your script to work on any kind of setup with 🤗 Accelerate just:
|
||||
|
||||
1. Initialize an [`Accelerator`] object (that we will call `accelerator` throughout this page) as early as possible in your script.
|
||||
2. Pass your dataloader(s), model(s), optimizer(s), and scheduler(s) to the [`~Accelerator.prepare`] method.
|
||||
3. Remove all the `.cuda()` or `.to(device)` from your code and let the `accelerator` handle the device placement for you.
|
||||
|
||||
<Tip>
|
||||
|
||||
Step three is optional, but considered a best practice.
|
||||
|
||||
</Tip>
|
||||
|
||||
4. Replace `loss.backward()` in your code with `accelerator.backward(loss)`
|
||||
5. Gather your predictions and labels before storing them or using them for metric computation using [`~Accelerator.gather`]
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
Step five is mandatory when using distributed evaluation
|
||||
|
||||
</Tip>
|
||||
|
||||
In most cases this is all that is needed. The next section lists a few more advanced use cases and nice features
|
||||
you should search for and replace by the corresponding methods of your `accelerator`:
|
||||
|
||||
## Advanced recommendations
|
||||
|
||||
### Printing
|
||||
|
||||
`print` statements should be replaced by [`~Accelerator.print`] to be printed once per process:
|
||||
|
||||
```diff
|
||||
- print("My thing I want to print!")
|
||||
+ accelerator.print("My thing I want to print!")
|
||||
```
|
||||
|
||||
### Executing processes
|
||||
|
||||
#### Once on a single server
|
||||
|
||||
For statements that should be executed once per server, use [`~Accelerator.is_local_main_process`]:
|
||||
|
||||
```python
|
||||
if accelerator.is_local_main_process:
|
||||
do_thing_once_per_server()
|
||||
```
|
||||
|
||||
A function can be wrapped using the [`~Accelerator.on_local_main_process`] function to achieve the same
|
||||
behavior on a function's execution:
|
||||
|
||||
```python
|
||||
@accelerator.on_local_main_process
|
||||
def do_my_thing():
|
||||
"Something done once per server"
|
||||
do_thing_once_per_server()
|
||||
```
|
||||
|
||||
#### Only ever once across all servers
|
||||
|
||||
For statements that should only ever be executed once, use [`~Accelerator.is_main_process`]:
|
||||
|
||||
```python
|
||||
if accelerator.is_main_process:
|
||||
do_thing_once()
|
||||
```
|
||||
|
||||
A function can be wrapped using the [`~Accelerator.on_main_process`] function to achieve the same
|
||||
behavior on a function's execution:
|
||||
|
||||
```python
|
||||
@accelerator.on_main_process
|
||||
def do_my_thing():
|
||||
"Something done once per server"
|
||||
do_thing_once()
|
||||
```
|
||||
|
||||
#### On specific processes
|
||||
|
||||
If a function should be ran on a specific overall or local process index, there are similar decorators
|
||||
to achieve this:
|
||||
|
||||
```python
|
||||
@accelerator.on_local_process(local_process_idx=0)
|
||||
def do_my_thing():
|
||||
"Something done on process index 0 on each server"
|
||||
do_thing_on_index_zero_on_each_server()
|
||||
```
|
||||
|
||||
```python
|
||||
@accelerator.on_process(process_index=0)
|
||||
def do_my_thing():
|
||||
"Something done on process index 0"
|
||||
do_thing_on_index_zero()
|
||||
```
|
||||
|
||||
### Synchronicity control
|
||||
|
||||
Use [`~Accelerator.wait_for_everyone`] to make sure all processes join that point before continuing. (Useful before a model save for instance).
|
||||
|
||||
### Saving and loading
|
||||
|
||||
```python
|
||||
model = MyModel()
|
||||
model = accelerator.prepare(model)
|
||||
```
|
||||
|
||||
Use [`~Accelerator.save_model`] instead of `torch.save` to save a model. It will remove all model wrappers added during the distributed process, get the state_dict of the model and save it. The state_dict will be in the same precision as the model being trained.
|
||||
|
||||
```diff
|
||||
- torch.save(state_dict, "my_state.pkl")
|
||||
+ accelerator.save_model(model, save_directory)
|
||||
```
|
||||
|
||||
[`~Accelerator.save_model`] can also save a model into sharded checkpoints or with safetensors format.
|
||||
Here is an example:
|
||||
|
||||
```python
|
||||
accelerator.save_model(model, save_directory, max_shard_size="1GB", safe_serialization=True)
|
||||
```
|
||||
|
||||
#### 🤗 Transformers models
|
||||
|
||||
If you are using models from the [🤗 Transformers](https://huggingface.co/docs/transformers/) library, you can use the `.save_pretrained()` method.
|
||||
|
||||
```python
|
||||
from transformers import AutoModel
|
||||
|
||||
model = AutoModel.from_pretrained("bert-base-cased")
|
||||
model = accelerator.prepare(model)
|
||||
|
||||
# ...fine-tune with PyTorch...
|
||||
|
||||
unwrapped_model = accelerator.unwrap_model(model)
|
||||
unwrapped_model.save_pretrained(
|
||||
"path/to/my_model_directory",
|
||||
is_main_process=accelerator.is_main_process,
|
||||
save_function=accelerator.save,
|
||||
)
|
||||
```
|
||||
|
||||
This will ensure your model stays compatible with other 🤗 Transformers functionality like the `.from_pretrained()` method.
|
||||
|
||||
```python
|
||||
from transformers import AutoModel
|
||||
|
||||
model = AutoModel.from_pretrained("path/to/my_model_directory")
|
||||
```
|
||||
|
||||
### Operations
|
||||
|
||||
Use [`~Accelerator.clip_grad_norm_`] instead of ``torch.nn.utils.clip_grad_norm_`` and [`~Accelerator.clip_grad_value_`] instead of ``torch.nn.utils.clip_grad_value``
|
||||
|
||||
### Gradient Accumulation
|
||||
|
||||
To perform gradient accumulation use [`~Accelerator.accumulate`] and specify a gradient_accumulation_steps.
|
||||
This will also automatically ensure the gradients are synced or unsynced when on
|
||||
multi-device training, check if the step should actually be performed, and auto-scale the loss:
|
||||
|
||||
```diff
|
||||
- accelerator = Accelerator()
|
||||
+ accelerator = Accelerator(gradient_accumulation_steps=2)
|
||||
|
||||
for (input, label) in training_dataloader:
|
||||
+ with accelerator.accumulate(model):
|
||||
predictions = model(input)
|
||||
loss = loss_function(predictions, labels)
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
scheduler.step()
|
||||
optimizer.zero_grad()
|
||||
```
|
||||
#### GradientAccumulationPlugin
|
||||
[[autodoc]] utils.GradientAccumulationPlugin
|
||||
|
||||
|
||||
Instead of passing `gradient_accumulation_steps` you can instantiate a GradientAccumulationPlugin and pass it to the [`Accelerator`]'s `__init__`
|
||||
as `gradient_accumulation_plugin`. You can only pass either one of `gradient_accumulation_plugin` or `gradient_accumulation_steps` passing both will raise an error.
|
||||
```diff
|
||||
from accelerate.utils import GradientAccumulationPlugin
|
||||
|
||||
gradient_accumulation_plugin = GradientAccumulationPlugin(num_steps=2)
|
||||
- accelerator = Accelerator()
|
||||
+ accelerator = Accelerator(gradient_accumulation_plugin=gradient_accumulation_plugin)
|
||||
```
|
||||
|
||||
In addition to the number of steps, this also lets you configure whether or not you adjust your learning rate scheduler to account for the change in steps due to accumulation.
|
||||
|
||||
## Overall API documentation:
|
||||
## Accelerator[[api]]
|
||||
|
||||
[[autodoc]] Accelerator
|
||||
|
||||
## Utilities
|
||||
|
||||
[[autodoc]] accelerate.utils.gather_object
|
||||
|
||||
20
docs/source/package_reference/inference.md
Normal file
20
docs/source/package_reference/inference.md
Normal file
@ -0,0 +1,20 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# The inference API
|
||||
|
||||
These docs refer to the [PiPPy](https://github.com/PyTorch/PiPPy) integration.
|
||||
|
||||
[[autodoc]] inference.prepare_pippy
|
||||
@ -37,3 +37,7 @@ related to distributed training or mixed precision are created.
|
||||
## InitProcessGroupKwargs
|
||||
|
||||
[[autodoc]] InitProcessGroupKwargs
|
||||
|
||||
## KwargsHandler
|
||||
|
||||
[[autodoc]] utils.KwargsHandler
|
||||
|
||||
@ -62,10 +62,8 @@ These are standalone dataclasses used for checks, such as the type of distribute
|
||||
|
||||
These are configurable arguments for specific interactions throughout the PyTorch ecosystem that Accelerate handles under the hood.
|
||||
|
||||
|
||||
[[autodoc]] utils.AutocastKwargs
|
||||
|
||||
|
||||
[[autodoc]] utils.DistributedDataParallelKwargs
|
||||
|
||||
[[autodoc]] utils.FP8RecipeKwargs
|
||||
@ -74,6 +72,8 @@ These are configurable arguments for specific interactions throughout the PyTorc
|
||||
|
||||
[[autodoc]] utils.InitProcessGroupKwargs
|
||||
|
||||
[[autodoc]] utils.KwargsHandler
|
||||
|
||||
## Plugins
|
||||
|
||||
These are plugins that can be passed to the [`Accelerator`] object. While they are defined elsewhere in the documentation,
|
||||
@ -95,6 +95,8 @@ These are classes which can be configured and passed through to the appropriate
|
||||
|
||||
[[autodoc]] utils.BnbQuantizationConfig
|
||||
|
||||
[[autodoc]] utils.DataLoaderConfiguration
|
||||
|
||||
[[autodoc]] utils.ProjectConfiguration
|
||||
|
||||
## Environmental Variables
|
||||
@ -150,7 +152,7 @@ These functionalities check the state of the current working environment includi
|
||||
|
||||
[[autodoc]] utils.is_torch_version
|
||||
|
||||
[[autodoc]] utils.is_tpu_available
|
||||
[[autodoc]] utils.is_torch_xla_available
|
||||
|
||||
[[autodoc]] utils.is_xpu_available
|
||||
|
||||
@ -164,6 +166,10 @@ These functionalities check the state of the current working environment includi
|
||||
|
||||
When setting up 🤗 Accelerate for the first time, rather than running `accelerate config` [~utils.write_basic_config] can be used as an alternative for quick configuration.
|
||||
|
||||
[[autodoc]] utils.set_numa_affinity
|
||||
|
||||
[[autodoc]] utils.environment.override_numa_affinity
|
||||
|
||||
## Memory
|
||||
|
||||
[[autodoc]] utils.find_executable_batch_size
|
||||
|
||||
@ -9,26 +9,78 @@ Unless required by applicable law or agreed to in writing, software distributed
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
⚠️ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Quick tour
|
||||
# Quicktour
|
||||
|
||||
This guide aims to help you get started with 🤗 Accelerate quickly. It covers the essential steps you need to take to
|
||||
enable distributed training, as well as the adjustments that you need to make in some common scenarios.
|
||||
There are many ways to launch and run your code depending on your training environment ([torchrun](https://pytorch.org/docs/stable/elastic/run.html), [DeepSpeed](https://www.deepspeed.ai/), etc.) and available hardware. Accelerate offers a unified interface for launching and training on different distributed setups, allowing you to focus on your PyTorch training code instead of the intricacies of adapting your code to these different setups. This allows you to easily scale your PyTorch code for training and inference on distributed setups with hardware like GPUs and TPUs. Accelerate also provides Big Model Inference to make loading and running inference with really large models that usually don't fit in memory more accessible.
|
||||
|
||||
To help you navigate, the guide is split into two sections:
|
||||
* [Getting Started with 🤗 Accelerate](#getting-started-with--accelerate): start here to learn how to modify your script to enable distributed training with 🤗 Accelerate
|
||||
* [Common adaptations to the base case](#common-adaptations-to-the-base-case): check out this section for common deviations from the baseline scenario and what adjustments may need to be made to support them.
|
||||
This quicktour introduces the three main features of Accelerate:
|
||||
|
||||
## Getting started with 🤗 Accelerate
|
||||
* a unified command line launching interface for distributed training scripts
|
||||
* a training library for adapting PyTorch training code to run on different distributed setups
|
||||
* Big Model Inference
|
||||
|
||||
### Enable distributed training in your script
|
||||
## Unified launch interface
|
||||
|
||||
To use 🤗 Accelerate in your own training script, you have to modify four things:
|
||||
Accelerate automatically selects the appropriate configuration values for any given distributed training framework (DeepSpeed, FSDP, etc.) through a unified configuration file generated from the [`accelerate config`](package_reference/cli#accelerate-config) command. You could also pass the configuration values explicitly to the command line which is helpful in certain situations like if you're using SLURM.
|
||||
|
||||
1. Import the [`Accelerator`] main class and instantiate one in an `accelerator` object.
|
||||
|
||||
But in most cases, you should always run [`accelerate config`](package_reference/cli#accelerate-config) first to help Accelerate learn about your training setup.
|
||||
|
||||
```bash
|
||||
accelerate config
|
||||
```
|
||||
|
||||
The [`accelerate config`](package_reference/cli#accelerate-config) command creates and saves a default_config.yaml file in Accelerates cache folder. This file stores the configuration for your training environment, which helps Accelerate correctly launch your training script based on your machine.
|
||||
|
||||
After you've configured your environment, you can test your setup with [`accelerate test`](package_reference/cli#accelerate-test), which launches a short script to test the distributed environment.
|
||||
|
||||
```bash
|
||||
accelerate test
|
||||
```
|
||||
|
||||
> [!TIP]
|
||||
> Add `--config_file` to the `accelerate test` or `accelerate launch` command to specify the location of the configuration file if it is saved in a non-default location like the cache.
|
||||
|
||||
Once your environment is setup, launch your training script with [`accelerate launch`](package_reference/cli#accelerate-launch)!
|
||||
|
||||
```bash
|
||||
accelerate launch path_to_script.py --args_for_the_script
|
||||
```
|
||||
|
||||
To learn more, check out the [Launch distributed code](basic_tutorials/launch) tutorial for more information about launching your scripts.
|
||||
|
||||
## Adapt training code
|
||||
|
||||
The next main feature of Accelerate is the [`Accelerator`] class which adapts your PyTorch code to run on different distributed setups.
|
||||
|
||||
You only need to add a few lines of code to your training script to enable it to run on multiple GPUs or TPUs.
|
||||
|
||||
```diff
|
||||
+ from accelerate import Accelerator
|
||||
+ accelerator = Accelerator()
|
||||
|
||||
+ device = accelerator.device
|
||||
+ model, optimizer, training_dataloader, scheduler = accelerator.prepare(
|
||||
+ model, optimizer, training_dataloader, scheduler
|
||||
+ )
|
||||
|
||||
for batch in training_dataloader:
|
||||
optimizer.zero_grad()
|
||||
inputs, targets = batch
|
||||
- inputs = inputs.to(device)
|
||||
- targets = targets.to(device)
|
||||
outputs = model(inputs)
|
||||
loss = loss_function(outputs, targets)
|
||||
+ accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
scheduler.step()
|
||||
```
|
||||
|
||||
1. Import and instantiate the [`Accelerator`] class at the beginning of your training script. The [`Accelerator`] class initializes everything necessary for distributed training, and it automatically detects your training environment (a single machine with a GPU, a machine with several GPUs, several machines with multiple GPUs or a TPU, etc.) based on how the code was launched.
|
||||
|
||||
```python
|
||||
from accelerate import Accelerator
|
||||
@ -36,27 +88,16 @@ from accelerate import Accelerator
|
||||
accelerator = Accelerator()
|
||||
```
|
||||
|
||||
Add this at the beginning of your training script as it will initialize everything necessary for distributed training.
|
||||
You don't need to indicate the kind of environment you are in (a single machine with a GPU, a machine with several GPUs,
|
||||
or several machines with multiple GPUs or a TPU), the library will detect this automatically.
|
||||
2. Remove calls like `.cuda()` on your model and input data. The [`Accelerator`] class automatically places these objects on the appropriate device for you.
|
||||
|
||||
2. Remove the `.to(device)` or `.cuda()` calls for your model and input data.
|
||||
> [!WARNING]
|
||||
> This step is *optional* but it is considered best practice to allow Accelerate to handle device placement. You could also deactivate automatic device placement by passing `device_placement=False` when initializing the [`Accelerator`]. If you want to explicitly place objects on a device with `.to(device)`, make sure you use `accelerator.device` instead. For example, if you create an optimizer before placing a model on `accelerator.device`, training fails on a TPU.
|
||||
|
||||
The `accelerator` object will handle placing these objects on the right device for you.
|
||||
If you choose to leave those `.to(device)` calls, make sure to use the device provided by the `accelerator` object: `accelerator.device`.
|
||||
```py
|
||||
device = accelerator.device
|
||||
```
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
You can fully deactivate the automatic device placement by passing along `device_placement=False` when
|
||||
initializing the [`Accelerator`].
|
||||
However, if you place your objects manually on the proper device, be careful to create your optimizer after putting your
|
||||
model on `accelerator.device` or your training will fail on TPU.
|
||||
|
||||
</Tip>
|
||||
|
||||
3. Pass all PyTorch objects relevant to training (optimizer, model, dataloader(s), learning rate scheduler) to the
|
||||
[`~Accelerator.prepare`] method as soon as these objects are created, before starting your actual
|
||||
training loop:
|
||||
3. Pass all relevant PyTorch objects for training (optimizer, model, dataloader(s), learning rate scheduler) to the [`~Accelerator.prepare`] method as soon as they're created. This method wraps the model in a container optimized for your distributed setup, uses Accelerates version of the optimizer and scheduler, and creates a sharded version of your dataloader for distribution across GPUs or TPUs.
|
||||
|
||||
```python
|
||||
model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
|
||||
@ -64,55 +105,23 @@ model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
|
||||
)
|
||||
```
|
||||
|
||||
**Important notes**:
|
||||
4. Replace `loss.backward()` with [`~Accelerator.backward`] to use the correct `backward()` method for your training setup.
|
||||
|
||||
* You should always pass the the learning rate scheduler to [`~Accelerator.prepare`], however if the scheduler should *not* be stepped at each optimization step, pass `step_with_optimizer=False` to the [`Accelerator`] init.
|
||||
* While you can send your dataloader to [`~Accelerator.prepare`] on its own (and there are cases for doing so, such as distributed inference), it's best to send it to [`~Accelerator.prepare`] together with the model and optimizer.
|
||||
* If you wish to run distributed evaluation, send your validation dataloader to [`~Accelerator.prepare`] as well. There are some nuances to distributed validation, check the [Distributed evaluation](#add-distributed-evaluation) section of the guide.
|
||||
* Any instruction using your training dataloader length (for instance if you want to log the number of total training
|
||||
steps) should go after the call to [`~Accelerator.prepare`].
|
||||
```py
|
||||
accelerator.backward(loss)
|
||||
```
|
||||
|
||||
Passing `DataLoader` objects to the [`~Accelerator.prepare`] method ensures that your dataloader will be sharded across
|
||||
all GPUs/TPU cores available so that each one sees a different portion of the training dataset. In other words, if there are 8 processes and a dataset of 64 items, each process will see 8 of these items per iteration. Also, the random states
|
||||
of all processes will be synchronized at the beginning of each iteration through your dataloader, to make sure the data
|
||||
is shuffled the same way (if you decided to use `shuffle=True` or any kind of random sampler).
|
||||
Read [Accelerate’s internal mechanisms](concept_guides/internal_mechanism) guide to learn more details about how Accelerate adapts your code.
|
||||
|
||||
<Tip>
|
||||
### Distributed evaluation
|
||||
|
||||
The actual batch size for your training will be the number of devices used multiplied by the batch size you set in
|
||||
your script. For instance, training on 4 GPUs with a batch size of 16 set when creating the training dataloader will
|
||||
train at an actual batch size of 64 (4 * 16).
|
||||
If you want the batch size remain the same regardless of how many GPUs the script is run on, you can use the
|
||||
option `split_batches=True` when creating and initializing [`Accelerator`].
|
||||
Your training dataloader may change length when going through this method: if you run on X GPUs, it will have its
|
||||
length divided by X (since your actual batch size will be multiplied by X), unless you set
|
||||
`split_batches=True`.
|
||||
|
||||
</Tip>
|
||||
|
||||
|
||||
4. Replace the `loss.backward()` line with `accelerator.backward(loss)`.
|
||||
|
||||
And you're all set! With all these changes, your script will run on your local machine as well as on multiple GPUs or a
|
||||
TPU! You can either use your favorite tool to launch the distributed training, or you can use the 🤗 Accelerate
|
||||
launcher.
|
||||
|
||||
### Add distributed evaluation
|
||||
|
||||
You can perform regular evaluation in your training script if you leave your validation dataloader out of the
|
||||
[`~Accelerator.prepare`] method. In this case, you will need to put the input data on the
|
||||
`accelerator.device` manually.
|
||||
|
||||
To perform distributed evaluation, send along your validation dataloader to the [`~Accelerator.prepare`]
|
||||
method:
|
||||
To perform distributed evaluation, pass your validation dataloader to the [`~Accelerator.prepare`] method:
|
||||
|
||||
```python
|
||||
validation_dataloader = accelerator.prepare(validation_dataloader)
|
||||
```
|
||||
|
||||
Same as with your training dataloader, each device will only see part of the evaluation data should you run your script
|
||||
on multiple devices. This means you will need to group your predictions together which you can do with
|
||||
the [`~Accelerator.gather_for_metrics`] method.
|
||||
Each device in your distributed setup only receives a part of the evaluation data, which means you should group your predictions together with the [`~Accelerator.gather_for_metrics`] method. This method requires all tensors to be the same size on each process, so if your tensors have different sizes on each process (for instance when dynamically padding to the maximum length in a batch), you should use the [`~Accelerator.pad_across_processes`] method to pad you tensor to the largest size across processes.
|
||||
|
||||
```python
|
||||
for inputs, targets in validation_dataloader:
|
||||
@ -123,319 +132,50 @@ for inputs, targets in validation_dataloader:
|
||||
metric.add_batch(all_predictions, all_targets)
|
||||
```
|
||||
|
||||
<Tip warning={true}>
|
||||
> [!TIP]
|
||||
> Data at the end of a dataset may be duplicated so the batch can be equally divided among all workers. The [`~Accelerator.gather_for_metrics`] method automatically removes the duplicated data to calculate a more accurate metric.
|
||||
|
||||
Similar to the training dataloader, passing your validation dataloader through
|
||||
[`~Accelerator.prepare`] may change it: if you run on X GPUs, it will have its length divided by X
|
||||
(since your actual batch size will be multiplied by X), unless you set `split_batches=True`.
|
||||
## Big Model Inference
|
||||
|
||||
</Tip>
|
||||
Accelerate's Big Model Inference has two main features, [`~accelerate.init_empty_weights`] and [`~accelerate.load_checkpoint_and_dispatch`], to load large models for inference that typically don't fit into memory.
|
||||
|
||||
Some data at the end of the dataset may be duplicated so the batch can be divided equally among all workers. As a result,
|
||||
metrics should be calculated through the [`~Accelerator.gather_for_metrics`] method to automatically remove the duplicated
|
||||
data while gathering and provide a more accurate metric.
|
||||
> [!TIP]
|
||||
> Take a look at the [Handling big models for inference](concept_guides/big_model_inference) guide for a better understanding of how Big Model Inference works under the hood.
|
||||
|
||||
<Tip>
|
||||
### Empty weights initialization
|
||||
|
||||
If for some reason you don't wish to have this automatically done, [`~Accelerator.gather`] can be used instead to gather
|
||||
the data across all processes and this can manually be done instead.
|
||||
The [`~accelerate.init_empty_weights`] context manager initializes models of any size by creating a *model skeleton* and moving and placing parameters each time they're created to PyTorch's [**meta**](https://pytorch.org/docs/main/meta.html) device. This way, not all weights are immediately loaded and only a small part of the model is loaded into memory at a time.
|
||||
|
||||
</Tip>
|
||||
For example, loading an empty [Mixtral-8x7B](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1) model takes significantly less memory than fully loading the models and weights on the CPU.
|
||||
|
||||
```py
|
||||
from accelerate import init_empty_weights
|
||||
from transformers import AutoConfig, AutoModelForCausalLM
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
The [`~Accelerator.gather`] and [`~Accelerator.gather_for_metrics`] methods require the tensors to be all the same size on each process. If
|
||||
you have tensors of different sizes on each process (for instance when dynamically padding to the maximum length in
|
||||
a batch), you should use the [`~Accelerator.pad_across_processes`] method to pad you tensor to the
|
||||
biggest size across processes.
|
||||
|
||||
</Tip>
|
||||
|
||||
### Launch your distributed script
|
||||
|
||||
You can use the regular commands to launch your distributed training (like `torch.distributed.run` for
|
||||
PyTorch) - they are fully compatible with 🤗 Accelerate.
|
||||
|
||||
Alternatively, 🤗 Accelerate provides a CLI tool that unifies all launchers, so you only have to remember one command. \
|
||||
To use it, run a quick configuration setup first on your machine and answer the questions:
|
||||
|
||||
```bash
|
||||
accelerate config
|
||||
config = AutoConfig.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
||||
with init_empty_weights():
|
||||
model = AutoModelForCausalLM.from_config(config)
|
||||
```
|
||||
|
||||
At the end of the setup, a *default_config.yaml* file will be saved in your cache folder for 🤗 Accelerate. That cache
|
||||
folder is (with decreasing order of priority):
|
||||
### Load and dispatch weights
|
||||
|
||||
- The content of your environment variable `HF_HOME` suffixed with *accelerate*.
|
||||
- If it does not exist, the content of your environment variable `XDG_CACHE_HOME` suffixed with
|
||||
*huggingface/accelerate*.
|
||||
- If this does not exist either, the folder *~/.cache/huggingface/accelerate*.
|
||||
The [`~accelerate.load_checkpoint_and_dispatch`] function loads full or sharded checkpoints into the empty model, and automatically distribute weights across all available devices.
|
||||
|
||||
By specifying the `--config_file` flag you can specify an alternative location of the configuration file.
|
||||
Once the configuration setup is complete, you can test your setup by running:
|
||||
The `device_map` parameter determines where to place each model layer, and specifiying `"auto"` places them on the GPU first, then the CPU, and finally the hard drive as memory-mapped tensors if there's still not enough memory. Use the `no_split_module_classes` parameter to indicate which modules shouldn't be split across devices (typically those with a residual connection).
|
||||
|
||||
```bash
|
||||
accelerate test
|
||||
```py
|
||||
from accelerate import load_checkpoint_and_dispatch
|
||||
|
||||
model = load_checkpoint_and_dispatch(
|
||||
model, checkpoint="mistralai/Mixtral-8x7B-Instruct-v0.1", device_map="auto", no_split_module_classes=['Block']
|
||||
)
|
||||
```
|
||||
|
||||
This will launch a short script that will test the distributed environment. If it runs without issues, you are ready for
|
||||
the next step!
|
||||
## Next steps
|
||||
|
||||
Note that if you specified a location for the config file in the previous step, you need to pass it here as well:
|
||||
Now that you've been introduced to the main Accelerate features, your next steps could include:
|
||||
|
||||
```bash
|
||||
accelerate test --config_file path_to_config.yaml
|
||||
```
|
||||
|
||||
Now that this is done, you can run your script with the following command:
|
||||
|
||||
```bash
|
||||
accelerate launch path_to_script.py --args_for_the_script
|
||||
```
|
||||
|
||||
If you stored the config file in a non-default location, you can indicate it to the launcher like this:
|
||||
|
||||
```bash
|
||||
accelerate launch --config_file path_to_config.yaml path_to_script.py --args_for_the_script
|
||||
```
|
||||
|
||||
You can override any of the arguments determined by your config file. To see the complete list of parameters that you
|
||||
can pass in, run `accelerate launch -h`. (And further niche argument help by passing in partial commands, such as `accelerate launch --multi_gpu -h` for all `multi_gpu` args)
|
||||
|
||||
Check out the [Launch tutorial](basic_tutorials/launch) for more information about launching your scripts.
|
||||
|
||||
## Common modifications of the base case
|
||||
|
||||
The previous section covers the minimal essential steps to move a training script into a distributed setup with 🤗 Accelerate.
|
||||
Here we describe common modifications/deviations from the base case scenario and the adjustments you need to make to accommodate for them.
|
||||
|
||||
### Launch distributed training from a notebook
|
||||
|
||||
Accelerate has a [`notebook_launcher`] to help you launch your training function from a
|
||||
notebook. This launcher supports launching a training with TPUs on Colab or Kaggle, as well as training on several GPUs and machines
|
||||
(if the machine on which you are running your notebook has them).
|
||||
|
||||
Define a function responsible for your whole training and/or evaluation in a cell of the notebook, then execute a
|
||||
cell with the following code:
|
||||
|
||||
```python
|
||||
from accelerate import notebook_launcher
|
||||
|
||||
notebook_launcher(training_function)
|
||||
```
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
Your [`Accelerator`] object should only be defined inside the training function. This is because the
|
||||
initialization should be done inside the launcher only.
|
||||
|
||||
</Tip>
|
||||
|
||||
Check out the [Notebook Launcher tutorial](basic_tutorials/notebook) for more information about training on TPUs.
|
||||
|
||||
### Specifics of training on TPU
|
||||
|
||||
If you want to launch your script on TPUs, there are a few caveats you should be aware of. Behind the scenes, the TPUs
|
||||
will create a graph of all the operations happening in your training step (forward pass, backward pass and optimizer
|
||||
step). This is why your first step of training will always be very long as building and compiling this graph for
|
||||
optimizations takes some time.
|
||||
|
||||
The good news is that this compilation will be cached so the second step and all the following will be much faster. The
|
||||
bad news is that it only applies if all of your steps do exactly the same operations, which implies:
|
||||
|
||||
- having all tensors of the same length in all your batches
|
||||
- having static code (i.e., not a for loop of length that could change from step to step)
|
||||
|
||||
Having any of the things above change between two steps will trigger a new compilation which will, once again, take a
|
||||
lot of time. In practice, that means you must take special care to have all your tensors in your inputs of the same
|
||||
shape (so no dynamic padding for instance if you are in an NLP problem) and should not use layers with for loops that
|
||||
have different lengths depending on the inputs (such as an LSTM) or the training will be excruciatingly slow.
|
||||
|
||||
To introduce special behavior in your script for TPUs you can check the `distributed_type` of your
|
||||
`accelerator`:
|
||||
|
||||
```python docstyle-ignore
|
||||
from accelerate import DistributedType
|
||||
|
||||
if accelerator.distributed_type == DistributedType.TPU:
|
||||
# do something of static shape
|
||||
else:
|
||||
# go crazy and be dynamic
|
||||
```
|
||||
|
||||
The [NLP example](https://github.com/huggingface/accelerate/blob/main/examples/nlp_example.py) shows an example in a
|
||||
situation with dynamic padding.
|
||||
|
||||
One last thing to pay close attention to: if your model has tied weights (such as language models which tie the weights
|
||||
of the embedding matrix with the weights of the decoder), moving this model to the TPU (either yourself or after you
|
||||
passed your model to [`~Accelerator.prepare`]) will break the tying. You will need to retie the weights
|
||||
after. You can find an example of this in the [run_clm_no_trainer](https://github.com/huggingface/transformers/blob/master/examples/pytorch/language-modeling/run_clm.py) script in
|
||||
the Transformers repository.
|
||||
|
||||
Check out the [TPU tutorial](concept_guides/training_tpu) for more information about training on TPUs.
|
||||
|
||||
### Execute a statement only on one processes
|
||||
|
||||
Some of your instructions only need to run for one process on a given server: for instance a data download or a log
|
||||
statement. To do this, wrap the statement in a test like this:
|
||||
|
||||
```python docstyle-ignore
|
||||
if accelerator.is_local_main_process:
|
||||
# Is executed once per server
|
||||
```
|
||||
|
||||
Another example is progress bars: to avoid having multiple progress bars in your output, you should only display one on
|
||||
the local main process:
|
||||
|
||||
```python
|
||||
from tqdm.auto import tqdm
|
||||
|
||||
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
|
||||
```
|
||||
|
||||
The *local* means per machine: if you are running your training on two servers with several GPUs, the instruction will
|
||||
be executed once on each of those servers. If you need to execute something only once for all processes (and not per
|
||||
machine) for instance, uploading the final model to the 🤗 model hub, wrap it in a test like this:
|
||||
|
||||
```python docstyle-ignore
|
||||
if accelerator.is_main_process:
|
||||
# Is executed once only
|
||||
```
|
||||
|
||||
For printing statements you only want executed once per machine, you can just replace the `print` function by
|
||||
`accelerator.print`.
|
||||
|
||||
|
||||
### Defer execution on multiple GPUs
|
||||
|
||||
When you run your usual script, instructions are executed in order. Using 🤗 Accelerate to deploy your script on several
|
||||
GPUs at the same time introduces a complication: while each process executes all instructions in order, some may be
|
||||
faster than others.
|
||||
|
||||
You might need to wait for all processes to have reached a certain point before executing a given instruction. For
|
||||
instance, you shouldn't save a model before making sure every process is done with training. To do this, add the
|
||||
following line in your code:
|
||||
|
||||
```
|
||||
accelerator.wait_for_everyone()
|
||||
```
|
||||
|
||||
This instruction will block all the processes that arrive first until all the other processes have reached that
|
||||
point (if you run your script on just one GPU or CPU, this won't do anything).
|
||||
|
||||
|
||||
### Save/load a model in a distributed setup
|
||||
|
||||
Saving the model you trained might need a bit of adjustment: first you should wait for all processes to reach that
|
||||
point in the script as shown above, and then, you should unwrap your model before saving it. This is because when going
|
||||
through the [`~Accelerator.prepare`] method, your model may have been placed inside a bigger model,
|
||||
which deals with the distributed training. This in turn means that saving your model state dictionary without taking
|
||||
any precaution will take that potential extra layer into account, and you will end up with weights you can't load back
|
||||
in your base model. The [`~Accelerator.save_model`] method will help you to achieve that. It will unwrap your model and save
|
||||
the model state dictionary.
|
||||
|
||||
Here is an example:
|
||||
|
||||
```
|
||||
accelerator.wait_for_everyone()
|
||||
accelerator.save_model(model, save_directory)
|
||||
```
|
||||
|
||||
The [`~Accelerator.save_model`] method can also save a model into sharded checkpoints or with safetensors format:
|
||||
|
||||
```python
|
||||
accelerator.wait_for_everyone()
|
||||
accelerator.save_model(model, save_directory, max_shard_size="1GB", safe_serialization=True)
|
||||
```
|
||||
|
||||
If your script contains logic to load a checkpoint, we also recommend you load your weights in the unwrapped model
|
||||
(this is only useful if you use the load function after making your model go through
|
||||
[`~Accelerator.prepare`]). Here is an example:
|
||||
|
||||
```python
|
||||
unwrapped_model = accelerator.unwrap_model(model)
|
||||
path_to_checkpoint = os.path.join(save_directory,"pytorch_model.bin")
|
||||
unwrapped_model.load_state_dict(torch.load(path_to_checkpoint))
|
||||
```
|
||||
|
||||
Note that since all the model parameters are references to tensors, this will load your weights inside `model`.
|
||||
|
||||
If you want to load a sharded checkpoint or a checkpoint with safetensors format into the model with a specific `device`,
|
||||
we recommend you to load it with [`~utils.load_checkpoint_in_model`] function. Here's an example:
|
||||
|
||||
```python
|
||||
load_checkpoint_in_model(unwrapped_model, save_directory, device_map={"":device})
|
||||
```
|
||||
|
||||
|
||||
### Save/load entire states
|
||||
|
||||
When training your model, you may want to save the current state of the model, optimizer, random generators, and potentially
|
||||
learning rate schedulers to be restored in the _same script_.
|
||||
You can use [`~Accelerator.save_state`] and [`~Accelerator.load_state`] respectively to do so.
|
||||
|
||||
To further customize where and how states saved through [`~Accelerator.save_state`] the [`~utils.ProjectConfiguration`] class can be used. For example
|
||||
if `automatic_checkpoint_naming` is enabled each saved checkpoint will be located then at `Accelerator.project_dir/checkpoints/checkpoint_{checkpoint_number}`.
|
||||
|
||||
If you have registered any other stateful items to be stored through [`~Accelerator.register_for_checkpointing`] they will also be saved and/or loaded.
|
||||
|
||||
<Tip>
|
||||
|
||||
Every object passed to [`~Accelerator.register_for_checkpointing`] must have a `load_state_dict` and `state_dict` function to be stored
|
||||
|
||||
</Tip>
|
||||
|
||||
|
||||
### Use gradient clipping
|
||||
|
||||
If you are using gradient clipping in your script, you should replace the calls to
|
||||
`torch.nn.utils.clip_grad_norm_` or `torch.nn.utils.clip_grad_value_` with [`~Accelerator.clip_grad_norm_`]
|
||||
and [`~Accelerator.clip_grad_value_`] respectively.
|
||||
|
||||
|
||||
### Train with mixed precision
|
||||
|
||||
If you are running your training in Mixed Precision with 🤗 Accelerate, you will get the best result with your loss being
|
||||
computed inside your model (like in Transformer models for instance). Every computation outside of the model will be
|
||||
executed in full precision (which is generally what you want for loss computation, especially if it involves a
|
||||
softmax). However, you might want to put your loss computation inside the [`~Accelerator.autocast`] context manager:
|
||||
|
||||
```
|
||||
with accelerator.autocast():
|
||||
loss = complex_loss_function(outputs, target):
|
||||
```
|
||||
|
||||
Another caveat with Mixed Precision training is that the gradient will skip a few updates at the beginning and
|
||||
sometimes during training: because of the dynamic loss scaling strategy, there are points during training where the
|
||||
gradients have overflown, and the loss scaling factor is reduced to avoid this happening again at the next step.
|
||||
|
||||
This means that you may update your learning rate scheduler when there was no update, which is fine in general, but may
|
||||
have an impact when you have very little training data, or if the first learning rate values of your scheduler are very
|
||||
important. In this case, you can skip the learning rate scheduler updates when the optimizer step was not done like
|
||||
this:
|
||||
|
||||
```
|
||||
if not accelerator.optimizer_step_was_skipped:
|
||||
lr_scheduler.step()
|
||||
```
|
||||
|
||||
### Use gradient accumulation
|
||||
|
||||
To perform gradient accumulation use [`~Accelerator.accumulate`] and specify a `gradient_accumulation_steps`.
|
||||
This will also automatically ensure the gradients are synced or unsynced when on multi-device training, check if the step should
|
||||
actually be performed, and auto-scale the loss:
|
||||
|
||||
```python
|
||||
accelerator = Accelerator(gradient_accumulation_steps=2)
|
||||
model, optimizer, training_dataloader = accelerator.prepare(model, optimizer, training_dataloader)
|
||||
|
||||
for input, label in training_dataloader:
|
||||
with accelerator.accumulate(model):
|
||||
predictions = model(input)
|
||||
loss = loss_function(predictions, label)
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
scheduler.step()
|
||||
optimizer.zero_grad()
|
||||
```
|
||||
* Check out the [tutorials](basic_tutorials/overview) for a gentle walkthrough of Accelerate. This is especially useful if you're new to distributed training and the library.
|
||||
* Dive into the [guides](usage_guides/explore) to see how to use Accelerate for specific use-cases.
|
||||
* Deepen your conceptual understanding of how Accelerate works internally by reading the [concept guides](concept_guides/internal_mechanism).
|
||||
* Look up classes and commands in the [API reference](package_reference/accelerator) to see what parameters and options are available.
|
||||
|
||||
@ -52,7 +52,7 @@ will attempt to fill all the space in your GPU(s), then loading them to the CPU,
|
||||
|
||||
<Tip>
|
||||
|
||||
For more details on designing your own device map, see this section of the [concept guide](../concept_guide/big_model_inference#designing-a-device-map)
|
||||
For more details on designing your own device map, see this section of the [concept guide](../concept_guides/big_model_inference#designing-a-device-map)
|
||||
|
||||
</Tip>
|
||||
|
||||
|
||||
@ -9,7 +9,7 @@ Unless required by applicable law or agreed to in writing, software distributed
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
⚠️ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
@ -353,7 +353,7 @@ accelerate launch examples/by_feature/deepspeed_with_config_support.py \
|
||||
```
|
||||
|
||||
**ZeRO++ Config Example**
|
||||
You can use the the features of ZeRO++ by using the appropriate config parameters. Note that ZeRO++ is an extension for ZeRO Stage 3. Here is how the config file can be modified, from [DeepSpeed's ZeRO++ tutorial](https://www.deepspeed.ai/tutorials/zeropp/):
|
||||
You can use the features of ZeRO++ by using the appropriate config parameters. Note that ZeRO++ is an extension for ZeRO Stage 3. Here is how the config file can be modified, from [DeepSpeed's ZeRO++ tutorial](https://www.deepspeed.ai/tutorials/zeropp/):
|
||||
|
||||
```json
|
||||
{
|
||||
@ -519,7 +519,7 @@ ValueError: When using `deepspeed_config_file`, the following accelerate config
|
||||
['gradient_accumulation_steps', 'gradient_clipping', 'zero_stage', 'offload_optimizer_device', 'offload_param_device',
|
||||
'zero3_save_16bit_model', 'mixed_precision'].
|
||||
Please specify them appropriately in the DeepSpeed config file.
|
||||
If you are using an accelerate config file, remove others config variables mentioned in the above specified list.
|
||||
If you are using an accelerate config file, remove other config variables mentioned in the above specified list.
|
||||
The easiest method is to create a new config following the questionnaire via `accelerate config`.
|
||||
It will only ask for the necessary config variables when using `deepspeed_config_file`.
|
||||
```
|
||||
@ -656,7 +656,7 @@ ZeRO Stage-3 has 2 options:
|
||||
Below is the snippet from `examples/by_feature/deepspeed_with_config_support.py` showing this:
|
||||
```python
|
||||
success = model.save_checkpoint(PATH, ckpt_id, checkpoint_state_dict)
|
||||
status_msg = "checkpointing: PATH={}, ckpt_id={}".format(PATH, ckpt_id)
|
||||
status_msg = f"checkpointing: PATH={PATH}, ckpt_id={ckpt_id}"
|
||||
if success:
|
||||
logging.info(f"Success {status_msg}")
|
||||
else:
|
||||
|
||||
@ -15,12 +15,18 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
# Distributed Inference with 🤗 Accelerate
|
||||
|
||||
Distributed inference is a common use case, especially with natural language processing (NLP) models. Users often want to
|
||||
send a number of different prompts, each to a different GPU, and then get the results back. This also has other cases
|
||||
outside of just NLP, however for this tutorial we will focus on just this idea of each GPU receiving a different prompt,
|
||||
and then returning the results.
|
||||
Distributed inference can fall into three brackets:
|
||||
|
||||
## The Problem
|
||||
1. Loading an entire model onto each GPU and sending chunks of a batch through each GPU's model copy at a time
|
||||
2. Loading parts of a model onto each GPU and processing a single input at one time
|
||||
3. Loading parts of a model onto each GPU and using what is called scheduled Pipeline Parallelism to combine the two prior techniques.
|
||||
|
||||
We're going to go through the first and the last bracket, showcasing how to do each as they are more realistic scenarios.
|
||||
|
||||
|
||||
## Sending chunks of a batch automatically to each loaded model
|
||||
|
||||
This is the most memory-intensive solution, as it requires each GPU to keep a full copy of the model in memory at a given time.
|
||||
|
||||
Normally when doing this, users send the model to a specific device to load it from the CPU, and then move each prompt to a different device.
|
||||
|
||||
@ -55,7 +61,6 @@ a simple way to manage this. (To learn more, check out the relevant section in t
|
||||
|
||||
Can it manage it? Yes. Does it add unneeded extra code however: also yes.
|
||||
|
||||
## The Solution
|
||||
|
||||
With 🤗 Accelerate, we can simplify this process by using the [`Accelerator.split_between_processes`] context manager (which also exists in `PartialState` and `AcceleratorState`).
|
||||
This function will automatically split whatever data you pass to it (be it a prompt, a set of tensors, a dictionary of the prior data, etc.) across all the processes (with a potential
|
||||
@ -134,3 +139,97 @@ with distributed_state.split_between_processes(["a dog", "a cat", "a chicken"],
|
||||
|
||||
On the first GPU, the prompts will be `["a dog", "a cat"]`, and on the second GPU it will be `["a chicken", "a chicken"]`.
|
||||
Make sure to drop the final sample, as it will be a duplicate of the previous one.
|
||||
|
||||
## Memory-efficient pipeline parallelism (experimental)
|
||||
|
||||
This next part will discuss using *pipeline parallelism*. This is an **experimental** API utilizing the [PiPPy library by PyTorch](https://github.com/pytorch/PiPPy/) as a native solution.
|
||||
|
||||
The general idea with pipeline parallelism is: say you have 4 GPUs and a model big enough it can be *split* on four GPUs using `device_map="auto"`. With this method you can send in 4 inputs at a time (for example here, any amount works) and each model chunk will work on an input, then receive the next input once the prior chunk finished, making it *much* more efficient **and faster** than the method described earlier. Here's a visual taken from the PyTorch repository:
|
||||
|
||||

|
||||
|
||||
To illustrate how you can use this with Accelerate, we have created an [example zoo](https://github.com/huggingface/accelerate/tree/main/examples/inference) showcasing a number of different models and situations. In this tutorial, we'll show this method for GPT2 across two GPUs.
|
||||
|
||||
Before you proceed, please make sure you have the latest pippy installed by running the following:
|
||||
|
||||
```bash
|
||||
pip install torchpippy
|
||||
```
|
||||
|
||||
We require at least version 0.2.0. To confirm that you have the correct version, run `pip show torchpippy`.
|
||||
|
||||
Start by creating the model on the CPU:
|
||||
|
||||
```{python}
|
||||
from transformers import GPT2ForSequenceClassification, GPT2Config
|
||||
|
||||
config = GPT2Config()
|
||||
model = GPT2ForSequenceClassification(config)
|
||||
model.eval()
|
||||
```
|
||||
|
||||
Next you'll need to create some example inputs to use. These help PiPPy trace the model.
|
||||
|
||||
<Tip warning={true}>
|
||||
However you make this example will determine the relative batch size that will be used/passed
|
||||
through the model at a given time, so make sure to remember how many items there are!
|
||||
</Tip>
|
||||
|
||||
```{python}
|
||||
input = torch.randint(
|
||||
low=0,
|
||||
high=config.vocab_size,
|
||||
size=(2, 1024), # bs x seq_len
|
||||
device="cpu",
|
||||
dtype=torch.int64,
|
||||
requires_grad=False,
|
||||
)
|
||||
```
|
||||
Next we need to actually perform the tracing and get the model ready. To do so, use the [`inference.prepare_pippy`] function and it will fully wrap the model for pipeline parallelism automatically:
|
||||
|
||||
```{python}
|
||||
from accelerate.inference import prepare_pippy
|
||||
example_inputs = {"input_ids": input}
|
||||
model = prepare_pippy(model, example_args=(input,))
|
||||
```
|
||||
|
||||
<Tip>
|
||||
|
||||
There are a variety of parameters you can pass through to `prepare_pippy`:
|
||||
|
||||
* `split_points` lets you determine what layers to split the model at. By default we use wherever `device_map="auto" declares, such as `fc` or `conv1`.
|
||||
|
||||
* `num_chunks` determines how the batch will be split and sent to the model itself (so `num_chunks=1` with four split points/four GPUs will have a naive MP where a single input gets passed between the four layer split points)
|
||||
|
||||
</Tip>
|
||||
|
||||
From here, all that's left is to actually perform the distributed inference!
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
When passing inputs, we highly recommend to pass them in as a tuple of arguments. Using `kwargs` is supported, however, this approach is experimental.
|
||||
</Tip>
|
||||
|
||||
```{python}
|
||||
args = some_more_arguments
|
||||
with torch.no_grad():
|
||||
output = model(*args)
|
||||
```
|
||||
|
||||
When finished all the data will be on the last process only:
|
||||
|
||||
```{python}
|
||||
from accelerate import PartialState
|
||||
if PartialState().is_last_process:
|
||||
print(output)
|
||||
```
|
||||
|
||||
<Tip>
|
||||
|
||||
If you pass in `gather_output=True` to [`inference.prepare_pippy`], the output will be sent
|
||||
across to all the GPUs afterwards without needing the `is_last_process` check. This is
|
||||
`False` by default as it incurs a communication call.
|
||||
|
||||
</Tip>
|
||||
|
||||
And that's it! To explore more, please check out the inference examples in the [Accelerate repo](https://github.com/huggingface/accelerate/tree/main/examples/inference) and our [documentation](../package_reference/inference) as we work to improving this integration.
|
||||
|
||||
@ -73,7 +73,7 @@ accelerate launch examples/nlp_example.py
|
||||
|
||||
Currently, `Accelerate` supports the following config through the CLI:
|
||||
|
||||
`fsdp_sharding_strategy`: [1] FULL_SHARD (shards optimizer states, gradients and parameters), [2] SHARD_GRAD_OP (shards optimizer states and gradients), [3] NO_SHARD (DDP), [4] HYBRID_SHARD (shards optimizer states, gradients and parameters within each node while each node has full copy), [5] HYBRID_SHARD_ZERO2 (shards optimizer states and gradients within each node while each node has full copy)
|
||||
`fsdp_sharding_strategy`: [1] FULL_SHARD (shards optimizer states, gradients and parameters), [2] SHARD_GRAD_OP (shards optimizer states and gradients), [3] NO_SHARD (DDP), [4] HYBRID_SHARD (shards optimizer states, gradients and parameters within each node while each node has full copy), [5] HYBRID_SHARD_ZERO2 (shards optimizer states and gradients within each node while each node has full copy). For more information, please refer the official [PyTorch docs](https://pytorch.org/docs/stable/fsdp.html#torch.distributed.fsdp.ShardingStrategy).
|
||||
|
||||
`fsdp_offload_params` : Decides Whether to offload parameters and gradients to CPU
|
||||
|
||||
@ -91,7 +91,7 @@ Currently, `Accelerate` supports the following config through the CLI:
|
||||
|
||||
`fsdp_use_orig_params`: If True, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable parameters. This setting is useful in cases such as parameter-efficient fine-tuning as discussed in [this post](https://dev-discuss.pytorch.org/t/rethinking-pytorch-fully-sharded-data-parallel-fsdp-from-first-principles/1019). This option also allows one to have multiple optimizer param groups. This should be `True` when creating an optimizer before preparing/wrapping the model with FSDP.
|
||||
|
||||
`fsdp_cpu_ram_efficient_loading`: Only applicable for 🤗 Transformers models. If True, only the first process loads the pretrained model checkpoint while all other processes have empty weights. This should be set to False if you experience errors when loading the pretrained 🤗 Transformers model via `from_pretrained` method. When this setting is True `fsdp_sync_module_states` also must to be True, otherwise all the processes except the main process would have random weights leading to unexpected behaviour during training.
|
||||
`fsdp_cpu_ram_efficient_loading`: Only applicable for 🤗 Transformers models. If True, only the first process loads the pretrained model checkpoint while all other processes have empty weights. This should be set to False if you experience errors when loading the pretrained 🤗 Transformers model via `from_pretrained` method. When this setting is True `fsdp_sync_module_states` also must to be True, otherwise all the processes except the main process would have random weights leading to unexpected behaviour during training. For this to work, make sure the distributed process group is initialized before calling Transformers `from_pretrained` method. When using 🤗 Trainer API, the distributed process group is initialized when you create an instance of `TrainingArguments` class.
|
||||
|
||||
`fsdp_sync_module_states`: If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0.
|
||||
|
||||
@ -161,6 +161,13 @@ When using transformers `save_pretrained`, pass `state_dict=accelerator.get_stat
|
||||
|
||||
You can then pass `state` into the `save_pretrained` method. There are several modes for `StateDictType` and `FullStateDictConfig` that you can use to control the behavior of `state_dict`. For more information, see the [PyTorch documentation](https://pytorch.org/docs/stable/fsdp.html).
|
||||
|
||||
|
||||
## Mapping between FSDP sharding strategies and DeepSpeed ZeRO Stages
|
||||
* `FULL_SHARD` maps to the DeepSpeed `ZeRO Stage-3`. Shards optimizer states, gradients and parameters.
|
||||
* `SHARD_GRAD_OP` maps to the DeepSpeed `ZeRO Stage-2`. Shards optimizer states and gradients.
|
||||
* `NO_SHARD` maps to `ZeRO Stage-0`. No sharding wherein each GPU has full copy of model, optimizer states and gradients.
|
||||
* `HYBRID_SHARD` maps to `ZeRO++ Stage-3` wherein `zero_hpz_partition_size=<num_gpus_per_node>`. Here, this will shard optimizer states, gradients and parameters within each node while each node has full copy.
|
||||
|
||||
## A few caveats to be aware of
|
||||
|
||||
- In case of multiple models, pass the optimizers to the prepare call in the same order as corresponding models else `accelerator.save_state()` and `accelerator.load_state()` will result in wrong/unexpected behaviour.
|
||||
|
||||
@ -115,8 +115,11 @@ What is the IP address of the machine that will host the main process? 36.112.23
|
||||
What is the port you will use to communicate with the main process? 29500
|
||||
Are all the machines on the same local network? Answer `no` if nodes are on the cloud and/or on different network hosts [YES/no]: yes
|
||||
Do you want to use Intel PyTorch Extension (IPEX) to speed up training on CPU? [yes/NO]:yes
|
||||
Do you want accelerate to launch mpirun? [yes/NO]: yes
|
||||
Please enter the path to the hostfile to use with mpirun [~/hostfile]: ~/hostfile
|
||||
Enter the number of oneCCL worker threads [1]: 1
|
||||
Do you wish to optimize your script with torch dynamo?[yes/NO]:NO
|
||||
How many CPU(s) should be used for distributed training? [1]:16
|
||||
How many processes should be used for distributed training? [1]:16
|
||||
-----------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
Do you wish to use FP16 or BF16 (mixed precision)?
|
||||
bf16
|
||||
@ -135,6 +138,9 @@ main_process_ip: 36.112.23.24
|
||||
main_process_port: 29500
|
||||
main_training_function: main
|
||||
mixed_precision: bf16
|
||||
mpirun_config:
|
||||
mpirun_ccl: '1'
|
||||
mpirun_hostfile: /home/user/hostfile
|
||||
num_machines: 4
|
||||
num_processes: 16
|
||||
rdzv_backend: static
|
||||
@ -148,6 +154,7 @@ use_cpu: true
|
||||
Set following env and using intel MPI to launch the training
|
||||
|
||||
In node0, you need to create a configuration file which contains the IP addresses of each node (for example hostfile) and pass that configuration file path as an argument.
|
||||
If you selected to have Accelerate launch `mpirun`, ensure that the location of your hostfile matches the path in the config.
|
||||
```bash
|
||||
$ cat hostfile
|
||||
xxx.xxx.xxx.xxx #node0 ip
|
||||
@ -155,7 +162,18 @@ xxx.xxx.xxx.xxx #node1 ip
|
||||
xxx.xxx.xxx.xxx #node2 ip
|
||||
xxx.xxx.xxx.xxx #node3 ip
|
||||
```
|
||||
Now, run the following command in node0 and **16DDP** will be enabled in node0,node1,node2,node3 with BF16 mixed precision:
|
||||
When Accelerate is launching `mpirun`, source the oneCCL bindings setvars.sh to get your Intel MPI environment, and then
|
||||
run your script using `accelerate launch`. Note that the python script and environment needs to exist on all of the
|
||||
machines being used for multi-CPU training.
|
||||
```bash
|
||||
oneccl_bindings_for_pytorch_path=$(python -c "from oneccl_bindings_for_pytorch import cwd; print(cwd)")
|
||||
source $oneccl_bindings_for_pytorch_path/env/setvars.sh
|
||||
|
||||
accelerate launch examples/nlp_example.py
|
||||
```
|
||||
Otherwise, if you selected not to have Accelerate launch `mpirun`, run the following command in node0 and **16DDP** will
|
||||
be enabled in node0,node1,node2,node3 with BF16 mixed precision. When using this method, the python script, python
|
||||
environment, and accelerate config file need to be present on all of the machines used for multi-CPU training.
|
||||
```bash
|
||||
oneccl_bindings_for_pytorch_path=$(python -c "from oneccl_bindings_for_pytorch import cwd; print(cwd)")
|
||||
source $oneccl_bindings_for_pytorch_path/env/setvars.sh
|
||||
|
||||
@ -88,7 +88,7 @@ achieved by adding one `with LocalSGD` statement and one call `local_sgd.step()`
|
||||
+ local_sgd.step()
|
||||
```
|
||||
|
||||
Under the hood, the Local SGD code **disables** automatic gradient synchornization (but accumulation still works as expected!). Instead it averages model parameters every `local_sgd_steps` steps (as well as in the end of the training loop).
|
||||
Under the hood, the Local SGD code **disables** automatic gradient synchronization (but accumulation still works as expected!). Instead it averages model parameters every `local_sgd_steps` steps (as well as at the end of the training loop).
|
||||
|
||||
## Limitations
|
||||
|
||||
|
||||
@ -57,7 +57,7 @@ Of the two, `MS-AMP` is traditionally the easier one to configure as there is on
|
||||
Currently two levels of optimization are supported in the 🤗 Accelerate integration, `"O1"` and `"O2"` (using the letter 'o', not zero).
|
||||
|
||||
* `"O1"` will cast the weight gradients and `all_reduce` communications to happen in 8-bit, while the rest are done in 16 bit. This reduces the general GPU memory usage and speeds up communication bandwidths.
|
||||
* `"O2"` will also cast first-order optimizer states into 8 bit, while the second order states are in FP16. (Currently just the `Adam` optimizer is supported). This tries it's best to minimize final accuracy degradation and will save the highest potential memory.
|
||||
* `"O2"` will also cast first-order optimizer states into 8 bit, while the second order states are in FP16. (Currently just the `Adam` optimizer is supported). This tries its best to minimize final accuracy degradation and will save the highest potential memory.
|
||||
|
||||
To specify an optimization level, pass it to the `FP8KwargsHandler` by setting the `optimization_level` argument:
|
||||
|
||||
@ -70,7 +70,7 @@ accelerator = Accelerator(mixed_precision="fp8", kwarg_handlers=kwargs)
|
||||
|
||||
## Configuring TransformersEngine
|
||||
|
||||
TransformersEngine has much more available for customizing how and what FP8 calculations are performed. A full list of supported arguments and what they mean are available in [NVIDIA's documentation](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html), however they are restated as part of [`FP8KwargsHandler`]'s docstring for your convience.
|
||||
TransformersEngine has much more available for customizing how and what FP8 calculations are performed. A full list of supported arguments and what they mean are available in [NVIDIA's documentation](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html), however they are restated as part of [`FP8KwargsHandler`]'s docstring for your convenience.
|
||||
|
||||
🤗 Accelerate tries to set sensible defaults, but exploring and tweaking the various parameters yourself can lead to better performance potentially.
|
||||
|
||||
@ -83,10 +83,10 @@ kwargs = [FP8RecipeKwargs(backend="te", ...)]
|
||||
accelerator = Accelerator(mixed_precision="fp8", kwarg_handlers=kwargs)
|
||||
```
|
||||
|
||||
## Futher Reading
|
||||
## Further Reading
|
||||
|
||||
To learn more about training in FP8 please check out the following resources:
|
||||
|
||||
* [Our concept guide](../concept_guides/low_precision_training.md) detailing into more about both TransformersEngine and MS-AMP
|
||||
* [The `transformers-engine` documentation](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html)
|
||||
* [The `MS-AMP` documentation](https://azure.github.io/MS-AMP/docs/)
|
||||
* [The `MS-AMP` documentation](https://azure.github.io/MS-AMP/docs/)
|
||||
|
||||
@ -9,7 +9,7 @@ Unless required by applicable law or agreed to in writing, software distributed
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
⚠️ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
@ -542,7 +542,7 @@ megatron_lm_plugin = MegatronLMPlugin(other_megatron_args=other_megatron_args)
|
||||
This covers Decoder only, Encode only and Encoder-Decoder model classes.
|
||||
|
||||
2. Only loss is returned from model forward pass as
|
||||
there is quite complex interplay of pipeline, tensor and data parallelsim behind the scenes.
|
||||
there is quite complex interplay of pipeline, tensor and data parallelism behind the scenes.
|
||||
The `model(**batch_data)` call return loss(es) averaged across the data parallel ranks.
|
||||
This is fine for most cases wherein pre-training jobs are run using Megatron-LM features and
|
||||
you can easily compute the `perplexity` using the loss.
|
||||
@ -580,4 +580,4 @@ b. Megatron-LM [GPTModel](https://github.com/NVIDIA/Megatron-LM/blob/main/megatr
|
||||
c. Megatron-LM [T5Model](https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/t5_model.py) :
|
||||
🤗 transformers models with `t5` in config's model type, e.g.,
|
||||
[T5](https://huggingface.co/docs/transformers/model_doc/t5) and
|
||||
[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)
|
||||
[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)
|
||||
|
||||
@ -51,7 +51,7 @@ Below are a few gradio demos related to what was described above. The first is t
|
||||
></iframe>
|
||||
</div>
|
||||
|
||||
A community member has taken the idea and expended it further, allowing you to filter models directly and see if you can run a particular LLM given GPU constraints and LoRA configurations. To play with it, see [here](https://huggingface.co/spaces/Vokturz/can-it-run-llm) for more details.
|
||||
A community member has taken the idea and expanded it further, allowing you to filter models directly and see if you can run a particular LLM given GPU constraints and LoRA configurations. To play with it, see [here](https://huggingface.co/spaces/Vokturz/can-it-run-llm) for more details.
|
||||
|
||||
## The Command
|
||||
|
||||
@ -134,4 +134,4 @@ This calculator will tell you how much memory is needed to purely load the model
|
||||
This calculation is accurate within a few % of the actual value, so it is a very good view of just how much memory it will take. For instance loading `bert-base-cased` actually takes `413.68 MB` when loaded on CUDA in full precision, and the calculator estimates `413.18 MB`.
|
||||
|
||||
When performing inference you can expect to add up to an additional 20% as found by [EleutherAI](https://blog.eleuther.ai/transformer-math/). We'll be conducting research into finding a more accurate estimate to these values, and will update
|
||||
this calculator once done.
|
||||
this calculator once done.
|
||||
|
||||
@ -28,6 +28,7 @@ pip install datasets evaluate transformers
|
||||
|
||||
The same script can be run in any of the following configurations:
|
||||
- single CPU or single GPU
|
||||
- multi CPUs
|
||||
- multi GPUs (using PyTorch distributed mode)
|
||||
- (multi) TPUs
|
||||
- fp16 (mixed-precision) or fp32 (normal precision)
|
||||
@ -58,6 +59,18 @@ To run it in each of these various modes, use the following commands:
|
||||
* from any server with Accelerate launcher
|
||||
```bash
|
||||
accelerate launch --mixed_precision fp16 ./nlp_example.py
|
||||
- multi CPUs (requires Open MPI, Intel MPI, or MVAPICH)
|
||||
* With Accelerate config and launcher, execute the following from node 0:
|
||||
```bash
|
||||
accelerate config # Select to have accelerate launch mpirun
|
||||
accelerate launch ./nlp_example.py # This will run the script on each server
|
||||
```
|
||||
* With Intel MPI:
|
||||
```bash
|
||||
export CCL_WORKER_COUNT=1
|
||||
export MASTER_ADDR=xxx.xxx.xxx.xxx #node0 ip
|
||||
mpirun -f hostfile -n 16 -ppn 4 python ./nlp_example.py
|
||||
```
|
||||
- multi GPUs (using PyTorch distributed mode)
|
||||
* With Accelerate config and launcher
|
||||
```bash
|
||||
@ -100,6 +113,7 @@ The [cv_example.py](./cv_example.py) script is a simple example to fine-tune a R
|
||||
|
||||
The same script can be run in any of the following configurations:
|
||||
- single CPU or single GPU
|
||||
- multi CPUs
|
||||
- multi GPUs (using PyTorch distributed mode)
|
||||
- (multi) TPUs
|
||||
- fp16 (mixed-precision) or fp32 (normal precision)
|
||||
@ -143,6 +157,18 @@ To run it in each of these various modes, use the following commands:
|
||||
* from any server with Accelerate launcher
|
||||
```bash
|
||||
accelerate launch --mixed_precison fp16 ./cv_example.py --data_dir path_to_data
|
||||
- multi CPUs (requires Open MPI, Intel MPI, or MVAPICH)
|
||||
* With Accelerate config and launcher, run the following from node 0:
|
||||
```bash
|
||||
accelerate config --config_file config.yaml # Select to have accelerate launch mpirun
|
||||
accelerate launch ./cv_example.py --data_dir path_to_data # This will run the script on each server
|
||||
```
|
||||
* With Intel MPI, execute mpirun from node 0:
|
||||
```bash
|
||||
export CCL_WORKER_COUNT=1
|
||||
export MASTER_ADDR=xxx.xxx.xxx.xxx #node0 ip
|
||||
mpirun -f hostfile -n 16 -ppn 4 python ./cv_example.py --data_dir path_to_data
|
||||
```
|
||||
- multi GPUs (using PyTorch distributed mode)
|
||||
* With Accelerate config and launcher
|
||||
```bash
|
||||
@ -207,6 +233,22 @@ In [/slurm/submit_multigpu.sh](./slurm/submit_multigpu.sh) the only parameter in
|
||||
|
||||
In [/slurm/submit_multinode.sh](./slurm/submit_multinode.sh) we must specify the number of nodes that will be part of the training (`--num_machines`), how many GPUs we will use in total (`--num_processes`), the [`backend`](https://pytorch.org/docs/stable/elastic/run.html#note-on-rendezvous-backend), `--main_process_ip` which will be the address the master node and the `--main_process_port`.
|
||||
|
||||
In both scripts, we run `activateEnviroment.sh` at the beginning. This script should contain the necessary instructions to initialize the environment for execution. Below, we show an example that loads the necessary libraries ([Environment modules](https://github.com/cea-hpc/modules)), activates the Python environment, and sets up various environment variables, most of them to run the scripts in offline mode in case we don't have internet connection from the cluster.
|
||||
|
||||
```bash
|
||||
# activateEnvironment.sh
|
||||
module purge
|
||||
module load anaconda3/2020.02 cuda/10.2 cudnn/8.0.5 nccl/2.9.9 arrow/7.0.0 openmpi
|
||||
source activate /home/nct01/nct01328/pytorch_antoni_local
|
||||
|
||||
export HF_HOME=/gpfs/projects/nct01/nct01328/
|
||||
export HF_LOCAL_HOME=/gpfs/projects/nct01/nct01328/HF_LOCAL
|
||||
export HF_DATASETS_OFFLINE=1
|
||||
export TRANSFORMERS_OFFLINE=1
|
||||
export PYTHONPATH=/home/nct01/nct01328/transformers-in-supercomputers:$PYTHONPATH
|
||||
export GPUS_PER_NODE=4
|
||||
```
|
||||
|
||||
## Finer Examples
|
||||
|
||||
While the first two scripts are extremely barebones when it comes to what you can do with accelerate, more advanced features are documented in two other locations.
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -86,7 +85,7 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
@ -154,7 +153,7 @@ def training_function(config, args):
|
||||
|
||||
# If the batch size is too big we use gradient accumulation
|
||||
gradient_accumulation_steps = 1
|
||||
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
|
||||
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA:
|
||||
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
|
||||
batch_size = MAX_GPU_BATCH_SIZE
|
||||
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -106,7 +105,7 @@ def get_fold_dataloaders(
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
@ -157,7 +156,7 @@ def training_function(config, args):
|
||||
|
||||
# If the batch size is too big we use gradient accumulation
|
||||
gradient_accumulation_steps = 1
|
||||
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
|
||||
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA:
|
||||
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
|
||||
batch_size = MAX_GPU_BATCH_SIZE
|
||||
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -512,7 +511,7 @@ def main():
|
||||
optimizer = optimizer_cls(optimizer_grouped_parameters, lr=args.learning_rate)
|
||||
|
||||
# On TPU, the tie weights in our model have been disconnected, so we need to restore the ties.
|
||||
if accelerator.distributed_type == DistributedType.TPU:
|
||||
if accelerator.distributed_type == DistributedType.XLA:
|
||||
model.tie_weights()
|
||||
|
||||
# Scheduler and math around the number of training steps.
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -81,7 +80,7 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
@ -151,7 +150,7 @@ def training_function(config, args):
|
||||
|
||||
# If the batch size is too big we use gradient accumulation
|
||||
gradient_accumulation_steps = 1
|
||||
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
|
||||
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA:
|
||||
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
|
||||
batch_size = MAX_GPU_BATCH_SIZE
|
||||
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -209,13 +208,13 @@ def training_function(config, args):
|
||||
|
||||
# If the batch size is too big we use gradient accumulation
|
||||
gradient_accumulation_steps = 1
|
||||
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
|
||||
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA:
|
||||
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
|
||||
batch_size = MAX_GPU_BATCH_SIZE
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
@ -334,13 +333,11 @@ def training_function(config, args):
|
||||
accelerator.save_state(output_dir)
|
||||
# New Code #
|
||||
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
|
||||
accelerator.print("Memory before entering the train : {}".format(b2mb(tracemalloc.begin)))
|
||||
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used))
|
||||
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked))
|
||||
accelerator.print(f"Memory before entering the train : {b2mb(tracemalloc.begin)}")
|
||||
accelerator.print(f"Memory consumed at the end of the train (end-begin): {tracemalloc.used}")
|
||||
accelerator.print(f"Peak Memory consumed during the train (max-begin): {tracemalloc.peaked}")
|
||||
accelerator.print(
|
||||
"Total Peak Memory consumed during the train (max): {}".format(
|
||||
tracemalloc.peaked + b2mb(tracemalloc.begin)
|
||||
)
|
||||
f"Total Peak Memory consumed during the train (max): {tracemalloc.peaked + b2mb(tracemalloc.begin)}"
|
||||
)
|
||||
# Logging the peak memory usage of the GPU to the tracker
|
||||
if args.with_tracking:
|
||||
@ -387,11 +384,11 @@ def training_function(config, args):
|
||||
accelerator.save_state(output_dir)
|
||||
# New Code #
|
||||
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
|
||||
accelerator.print("Memory before entering the eval : {}".format(b2mb(tracemalloc.begin)))
|
||||
accelerator.print("Memory consumed at the end of the eval (end-begin): {}".format(tracemalloc.used))
|
||||
accelerator.print("Peak Memory consumed during the eval (max-begin): {}".format(tracemalloc.peaked))
|
||||
accelerator.print(f"Memory before entering the eval : {b2mb(tracemalloc.begin)}")
|
||||
accelerator.print(f"Memory consumed at the end of the eval (end-begin): {tracemalloc.used}")
|
||||
accelerator.print(f"Peak Memory consumed during the eval (max-begin): {tracemalloc.peaked}")
|
||||
accelerator.print(
|
||||
"Total Peak Memory consumed during the eval (max): {}".format(tracemalloc.peaked + b2mb(tracemalloc.begin))
|
||||
f"Total Peak Memory consumed during the eval (max): {tracemalloc.peaked + b2mb(tracemalloc.begin)}"
|
||||
)
|
||||
# Logging the peak memory usage of the GPU to the tracker
|
||||
if args.with_tracking:
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -81,7 +80,7 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
@ -126,7 +125,7 @@ def training_function(config, args):
|
||||
accelerator = Accelerator(
|
||||
cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=gradient_accumulation_steps
|
||||
)
|
||||
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
|
||||
if accelerator.distributed_type == DistributedType.XLA and gradient_accumulation_steps > 1:
|
||||
raise NotImplementedError(
|
||||
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`"
|
||||
)
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -84,7 +83,7 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
@ -130,8 +129,6 @@ def training_function(config, args):
|
||||
accelerator = Accelerator(
|
||||
cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=gradient_accumulation_steps
|
||||
)
|
||||
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
|
||||
raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)")
|
||||
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
|
||||
lr = config["lr"]
|
||||
num_epochs = int(config["num_epochs"])
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -506,7 +505,7 @@ def main():
|
||||
)
|
||||
|
||||
# On TPU, the tie weights in our model have been disconnected, so we need to restore the ties.
|
||||
if accelerator.distributed_type == DistributedType.TPU:
|
||||
if accelerator.distributed_type == DistributedType.XLA:
|
||||
model.tie_weights()
|
||||
|
||||
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
|
||||
|
||||
@ -86,7 +86,7 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -88,7 +87,7 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
@ -139,7 +138,7 @@ def training_function(config, args):
|
||||
|
||||
# If the batch size is too big we use gradient accumulation
|
||||
gradient_accumulation_steps = 1
|
||||
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
|
||||
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA:
|
||||
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
|
||||
batch_size = MAX_GPU_BATCH_SIZE
|
||||
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -86,7 +85,7 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
@ -149,7 +148,7 @@ def training_function(config, args):
|
||||
|
||||
# If the batch size is too big we use gradient accumulation
|
||||
gradient_accumulation_steps = 1
|
||||
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
|
||||
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA:
|
||||
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
|
||||
batch_size = MAX_GPU_BATCH_SIZE
|
||||
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -103,13 +102,13 @@ def training_function(config, args):
|
||||
|
||||
# If the batch size is too big we use gradient accumulation
|
||||
gradient_accumulation_steps = 1
|
||||
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
|
||||
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA:
|
||||
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
|
||||
batch_size = MAX_GPU_BATCH_SIZE
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
||||
62
examples/inference/README.md
Normal file
62
examples/inference/README.md
Normal file
@ -0,0 +1,62 @@
|
||||
# Distributed inference examples with PiPPy
|
||||
|
||||
This repo contains a variety of tutorials for using the [PiPPy](https://github.com/PyTorch/PiPPy) pipeline parallelism library with accelerate. You will find examples covering:
|
||||
|
||||
1. How to trace the model using `accelerate.prepare_pippy`
|
||||
2. How to specify inputs based on what the model expects (when to use `kwargs`, `args`, and such)
|
||||
3. How to gather the results at the end.
|
||||
|
||||
## Installation
|
||||
|
||||
This requires the `main` branch of accelerate (or a version at least 0.27.0), `pippy` version of 0.2.0 or greater, and at least python 3.9. Please install using `pip install .` to pull from the `setup.py` in this repo, or run manually:
|
||||
|
||||
```bash
|
||||
pip install 'accelerate>=0.27.0' 'torchpippy>=0.2.0'
|
||||
```
|
||||
|
||||
## Running code
|
||||
|
||||
You can either use `torchrun` or the recommended way of `accelerate launch` (without needing to run `accelerate config`) on each script:
|
||||
|
||||
```bash
|
||||
accelerate launch bert.py
|
||||
```
|
||||
|
||||
Or:
|
||||
|
||||
```bash
|
||||
accelerate launch --num_processes {NUM_GPUS} bert.py
|
||||
```
|
||||
|
||||
Or:
|
||||
|
||||
```bash
|
||||
torchrun --nproc-per-node {NUM_GPUS} bert.py
|
||||
```
|
||||
|
||||
## General speedups
|
||||
|
||||
One can expect that PiPPy will outperform native model parallism by a multiplicative factor since all GPUs are running at all times with inputs, rather than one input being passed through a GPU at a time waiting for the prior to finish.
|
||||
|
||||
Below are some benchmarks we have found when using the accelerate-pippy integration for a few models when running on 2x4090's:
|
||||
|
||||
### Bert
|
||||
|
||||
| | Accelerate/Sequential | PiPPy + Accelerate |
|
||||
|---|---|---|
|
||||
| First batch | 0.2137s | 0.3119s |
|
||||
| Average of 5 batches | 0.0099s | **0.0062s** |
|
||||
|
||||
### GPT2
|
||||
|
||||
| | Accelerate/Sequential | PiPPy + Accelerate |
|
||||
|---|---|---|
|
||||
| First batch | 0.1959s | 0.4189s |
|
||||
| Average of 5 batches | 0.0205s | **0.0126s** |
|
||||
|
||||
### T5
|
||||
|
||||
| | Accelerate/Sequential | PiPPy + Accelerate |
|
||||
|---|---|---|
|
||||
| First batch | 0.2789s | 0.3809s |
|
||||
| Average of 5 batches | 0.0198s | **0.0166s** |
|
||||
78
examples/inference/bert.py
Normal file
78
examples/inference/bert.py
Normal file
@ -0,0 +1,78 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import time
|
||||
|
||||
import torch
|
||||
from transformers import AutoModelForMaskedLM
|
||||
|
||||
from accelerate import PartialState, prepare_pippy
|
||||
from accelerate.utils import set_seed
|
||||
|
||||
|
||||
# Set the random seed to have reproducable outputs
|
||||
set_seed(42)
|
||||
|
||||
# Create an example model
|
||||
model = AutoModelForMaskedLM.from_pretrained("bert-base-uncased")
|
||||
model.eval()
|
||||
|
||||
# Input configs
|
||||
# Create example inputs for the model
|
||||
input = torch.randint(
|
||||
low=0,
|
||||
high=model.config.vocab_size,
|
||||
size=(2, 512), # bs x seq_len
|
||||
device="cpu",
|
||||
dtype=torch.int64,
|
||||
requires_grad=False,
|
||||
)
|
||||
|
||||
|
||||
# Create a pipeline stage from the model
|
||||
# Using `auto` is equivalent to letting `device_map="auto"` figure
|
||||
# out device mapping and will also split the model according to the
|
||||
# number of total GPUs available if it fits on one GPU
|
||||
model = prepare_pippy(model, split_points="auto", example_args=(input,))
|
||||
|
||||
# You can pass `gather_output=True` to have the output from the model
|
||||
# available on all GPUs
|
||||
# model = prepare_pippy(model, split_points="auto", example_args=(input,), gather_output=True)
|
||||
|
||||
# Move the inputs to the first device
|
||||
input = input.to("cuda:0")
|
||||
|
||||
# Take an average of 5 times
|
||||
# Measure first batch
|
||||
torch.cuda.synchronize()
|
||||
start_time = time.time()
|
||||
with torch.no_grad():
|
||||
output = model(input)
|
||||
torch.cuda.synchronize()
|
||||
end_time = time.time()
|
||||
first_batch = end_time - start_time
|
||||
|
||||
# Now that CUDA is init, measure after
|
||||
torch.cuda.synchronize()
|
||||
start_time = time.time()
|
||||
for i in range(5):
|
||||
with torch.no_grad():
|
||||
output = model(input)
|
||||
torch.cuda.synchronize()
|
||||
end_time = time.time()
|
||||
|
||||
# The outputs are only on the final process by default
|
||||
if PartialState().is_last_process:
|
||||
output = torch.stack(tuple(output[0]))
|
||||
print(f"Time of first pass: {first_batch}")
|
||||
print(f"Average time per batch: {(end_time - start_time) / 5}")
|
||||
77
examples/inference/gpt2.py
Normal file
77
examples/inference/gpt2.py
Normal file
@ -0,0 +1,77 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import time
|
||||
|
||||
import torch
|
||||
from transformers import AutoModelForSequenceClassification
|
||||
|
||||
from accelerate import PartialState, prepare_pippy
|
||||
from accelerate.utils import set_seed
|
||||
|
||||
|
||||
# Set the random seed to have reproducable outputs
|
||||
set_seed(42)
|
||||
|
||||
# Create an example model
|
||||
model = AutoModelForSequenceClassification.from_pretrained("gpt2")
|
||||
model.eval()
|
||||
|
||||
# Input configs
|
||||
# Create example inputs for the model
|
||||
input = torch.randint(
|
||||
low=0,
|
||||
high=model.config.vocab_size,
|
||||
size=(2, 1024), # bs x seq_len
|
||||
device="cpu",
|
||||
dtype=torch.int64,
|
||||
requires_grad=False,
|
||||
)
|
||||
|
||||
# Create a pipeline stage from the model
|
||||
# Using `auto` is equivalent to letting `device_map="auto"` figure
|
||||
# out device mapping and will also split the model according to the
|
||||
# number of total GPUs available if it fits on one GPU
|
||||
model = prepare_pippy(model, split_points="auto", example_args=(input,))
|
||||
|
||||
# You can pass `gather_output=True` to have the output from the model
|
||||
# available on all GPUs
|
||||
# model = prepare_pippy(model, split_points="auto", example_args=(input,), gather_output=True)
|
||||
|
||||
# Move the inputs to the first device
|
||||
input = input.to("cuda:0")
|
||||
|
||||
# Take an average of 5 times
|
||||
# Measure first batch
|
||||
torch.cuda.synchronize()
|
||||
start_time = time.time()
|
||||
with torch.no_grad():
|
||||
output = model(input)
|
||||
torch.cuda.synchronize()
|
||||
end_time = time.time()
|
||||
first_batch = end_time - start_time
|
||||
|
||||
# Now that CUDA is init, measure after
|
||||
torch.cuda.synchronize()
|
||||
start_time = time.time()
|
||||
for i in range(5):
|
||||
with torch.no_grad():
|
||||
output = model(input)
|
||||
torch.cuda.synchronize()
|
||||
end_time = time.time()
|
||||
|
||||
# The outputs are only on the final process by default
|
||||
if PartialState().is_last_process:
|
||||
output = torch.stack(tuple(output[0]))
|
||||
print(f"Time of first pass: {first_batch}")
|
||||
print(f"Average time per batch: {(end_time - start_time) / 5}")
|
||||
54
examples/inference/llama.py
Normal file
54
examples/inference/llama.py
Normal file
@ -0,0 +1,54 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import torch
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
from accelerate import PartialState, prepare_pippy
|
||||
|
||||
|
||||
# sdpa implementation which is the default torch>2.1.2 fails with the tracing + attention mask kwarg
|
||||
# with attn_implementation="eager" mode, the forward is very slow for some reason
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
"meta-llama/Llama-2-7b-chat-hf", low_cpu_mem_usage=True, attn_implementation="sdpa"
|
||||
)
|
||||
model.eval()
|
||||
|
||||
# Input configs
|
||||
# Create example inputs for the model
|
||||
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
|
||||
prompts = ("I would like to", "I really like to", "The weather is pretty") # bs = 3
|
||||
tokenizer.pad_token = tokenizer.eos_token
|
||||
inputs = tokenizer(prompts, return_tensors="pt", padding=True)
|
||||
|
||||
# Create a pipeline stage from the model
|
||||
# Using `auto` is equivalent to letting `device_map="auto"` figure
|
||||
# out device mapping and will also split the model according to the
|
||||
# number of total GPUs available if it fits on one GPU
|
||||
model = prepare_pippy(model, split_points="auto", example_kwargs=inputs)
|
||||
|
||||
# You can pass `gather_output=True` to have the output from the model
|
||||
# available on all GPUs
|
||||
# model = prepare_pippy(model, split_points="auto", example_args=(input,), gather_output=True)
|
||||
|
||||
# currently we don't support `model.generate`
|
||||
# output = model.generate(**inputs, max_new_tokens=1)
|
||||
inputs = inputs.to(0)
|
||||
with torch.no_grad():
|
||||
output = model(**inputs)
|
||||
|
||||
# The outputs are only on the final process by default
|
||||
if PartialState().is_last_process:
|
||||
next_token_logits = output[0][:, -1, :]
|
||||
next_token = torch.argmax(next_token_logits, dim=-1)
|
||||
print(tokenizer.batch_decode(next_token))
|
||||
2
examples/inference/requirements.txt
Normal file
2
examples/inference/requirements.txt
Normal file
@ -0,0 +1,2 @@
|
||||
accelerate
|
||||
pippy>=0.2.0
|
||||
89
examples/inference/t5.py
Normal file
89
examples/inference/t5.py
Normal file
@ -0,0 +1,89 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import time
|
||||
|
||||
import torch
|
||||
from transformers import AutoModelForSeq2SeqLM
|
||||
|
||||
from accelerate import PartialState, prepare_pippy
|
||||
from accelerate.utils import set_seed
|
||||
|
||||
|
||||
# Set the random seed to have reproducable outputs
|
||||
set_seed(42)
|
||||
|
||||
# Create an example model
|
||||
model = AutoModelForSeq2SeqLM.from_pretrained("t5-small")
|
||||
model.eval()
|
||||
|
||||
# Input configs
|
||||
# Create example inputs for the model
|
||||
input = torch.randint(
|
||||
low=0,
|
||||
high=model.config.vocab_size,
|
||||
size=(2, 1024), # bs x seq_len
|
||||
device="cpu",
|
||||
dtype=torch.int64,
|
||||
requires_grad=False,
|
||||
)
|
||||
|
||||
example_inputs = {"input_ids": input, "decoder_input_ids": input}
|
||||
|
||||
# Create a pipeline stage from the model
|
||||
# Using `auto` is equivalent to letting `device_map="auto"` figure
|
||||
# out device mapping and will also split the model according to the
|
||||
# number of total GPUs available if it fits on one GPU
|
||||
model = prepare_pippy(
|
||||
model,
|
||||
no_split_module_classes=["T5Block"],
|
||||
example_kwargs=example_inputs,
|
||||
)
|
||||
|
||||
# You can pass `gather_output=True` to have the output from the model
|
||||
# available on all GPUs
|
||||
# model = prepare_pippy(
|
||||
# model,
|
||||
# no_split_module_classes=["T5Block"],
|
||||
# example_kwargs=example_inputs,
|
||||
# gather_outputs=True
|
||||
# )
|
||||
|
||||
# The model expects a tuple during real inference
|
||||
# with the data on the first device
|
||||
args = (example_inputs["input_ids"].to("cuda:0"), example_inputs["decoder_input_ids"].to("cuda:0"))
|
||||
|
||||
# Take an average of 5 times
|
||||
# Measure first batch
|
||||
torch.cuda.synchronize()
|
||||
start_time = time.time()
|
||||
with torch.no_grad():
|
||||
output = model(*args)
|
||||
torch.cuda.synchronize()
|
||||
end_time = time.time()
|
||||
first_batch = end_time - start_time
|
||||
|
||||
# Now that CUDA is init, measure after
|
||||
torch.cuda.synchronize()
|
||||
start_time = time.time()
|
||||
for i in range(5):
|
||||
with torch.no_grad():
|
||||
output = model(*args)
|
||||
torch.cuda.synchronize()
|
||||
end_time = time.time()
|
||||
|
||||
# The outputs are only on the final process by default
|
||||
if PartialState().is_last_process:
|
||||
output = torch.stack(tuple(output[0]))
|
||||
print(f"Time of first pass: {first_batch}")
|
||||
print(f"Average time per batch: {(end_time - start_time) / 5}")
|
||||
@ -1,3 +1,16 @@
|
||||
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import argparse
|
||||
|
||||
import runhouse as rh
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -78,8 +77,8 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
# For Torchxla, it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
@ -124,7 +123,7 @@ def training_function(config, args):
|
||||
|
||||
# If the batch size is too big we use gradient accumulation
|
||||
gradient_accumulation_steps = 1
|
||||
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
|
||||
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA:
|
||||
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
|
||||
batch_size = MAX_GPU_BATCH_SIZE
|
||||
|
||||
|
||||
@ -1,17 +1,44 @@
|
||||
[tool.black]
|
||||
line-length = 119
|
||||
target-version = ['py37']
|
||||
|
||||
[tool.ruff]
|
||||
# Never enforce `E501` (line length violations).
|
||||
ignore = ["E501", "E741", "W605"]
|
||||
select = ["E", "F", "I", "W"]
|
||||
line-length = 119
|
||||
target-version = "py38"
|
||||
|
||||
# Ignore import violations in all `__init__.py` files.
|
||||
[tool.ruff.per-file-ignores]
|
||||
"__init__.py" = ["E402", "F401", "F403", "F811"]
|
||||
[tool.ruff.lint]
|
||||
preview = true
|
||||
ignore-init-module-imports = true
|
||||
extend-select = [
|
||||
"B009", # static getattr
|
||||
"B010", # static setattr
|
||||
"CPY", # Copyright
|
||||
"E", # PEP8 errors
|
||||
"F", # PEP8 formatting
|
||||
"I", # Import sorting
|
||||
"TID251", # Banned API
|
||||
"UP", # Pyupgrade
|
||||
"W", # PEP8 warnings
|
||||
]
|
||||
ignore = [
|
||||
"E501", # Line length (handled by ruff-format)
|
||||
"E741", # Ambiguous variable name
|
||||
"W605", # Invalid escape sequence
|
||||
"UP007", # X | Y type annotations
|
||||
]
|
||||
|
||||
[tool.ruff.isort]
|
||||
[tool.ruff.lint.per-file-ignores]
|
||||
"__init__.py" = [
|
||||
"F401", # Ignore seemingly unused imports (they're meant for re-export)
|
||||
]
|
||||
"manim_animations/*" = ["ALL"]
|
||||
|
||||
[tool.ruff.lint.isort]
|
||||
lines-after-imports = 2
|
||||
known-first-party = ["accelerate"]
|
||||
|
||||
[tool.ruff.format]
|
||||
exclude = [
|
||||
"manim_animations/*"
|
||||
]
|
||||
|
||||
[tool.ruff.lint.flake8-tidy-imports.banned-api]
|
||||
"os.getenv".msg = "Use os.environ instead"
|
||||
"os.putenv".msg = "Use os.environ instead"
|
||||
"os.unsetenv".msg = "Use os.environ instead"
|
||||
|
||||
14
setup.cfg
14
setup.cfg
@ -1,14 +0,0 @@
|
||||
[isort]
|
||||
default_section = FIRSTPARTY
|
||||
ensure_newline_before_comments = True
|
||||
force_grid_wrap = 0
|
||||
include_trailing_comma = True
|
||||
known_first_party = accelerate
|
||||
line_length = 119
|
||||
lines_after_imports = 2
|
||||
multi_line_output = 3
|
||||
use_parentheses = True
|
||||
|
||||
[flake8]
|
||||
ignore = E203, E722, E501, E741, W503, W605
|
||||
max-line-length = 119
|
||||
39
setup.py
39
setup.py
@ -12,15 +12,28 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from setuptools import setup
|
||||
from setuptools import find_packages
|
||||
from setuptools import find_packages, setup
|
||||
|
||||
|
||||
extras = {}
|
||||
extras["quality"] = ["black ~= 23.1", "ruff >= 0.0.241", "hf-doc-builder >= 0.3.0", "urllib3 < 2.0.0"]
|
||||
extras["quality"] = [
|
||||
"black ~= 23.1", # hf-doc-builder has a hidden dependency on `black`
|
||||
"hf-doc-builder >= 0.3.0",
|
||||
"ruff ~= 0.2.1",
|
||||
]
|
||||
extras["docs"] = []
|
||||
extras["test_prod"] = ["pytest", "pytest-xdist", "pytest-subtests", "parameterized"]
|
||||
extras["test_prod"] = ["pytest>=7.2.0,<=8.0.0", "pytest-xdist", "pytest-subtests", "parameterized"]
|
||||
extras["test_dev"] = [
|
||||
"datasets", "evaluate", "transformers", "scipy", "scikit-learn", "deepspeed<0.13.0", "tqdm", "bitsandbytes", "timm"
|
||||
"datasets",
|
||||
"evaluate",
|
||||
"torchpippy>=0.2.0",
|
||||
"transformers",
|
||||
"scipy",
|
||||
"scikit-learn",
|
||||
"deepspeed",
|
||||
"tqdm",
|
||||
"bitsandbytes",
|
||||
"timm",
|
||||
]
|
||||
extras["testing"] = extras["test_prod"] + extras["test_dev"]
|
||||
extras["rich"] = ["rich"]
|
||||
@ -34,14 +47,14 @@ extras["sagemaker"] = [
|
||||
|
||||
setup(
|
||||
name="accelerate",
|
||||
version="0.27.0.dev0",
|
||||
version="0.29.0",
|
||||
description="Accelerate",
|
||||
long_description=open("README.md", "r", encoding="utf-8").read(),
|
||||
long_description=open("README.md", encoding="utf-8").read(),
|
||||
long_description_content_type="text/markdown",
|
||||
keywords="deep learning",
|
||||
license="Apache",
|
||||
author="The HuggingFace team",
|
||||
author_email="sylvain@huggingface.co",
|
||||
author_email="zach.mueller@huggingface.co",
|
||||
url="https://github.com/huggingface/accelerate",
|
||||
package_dir={"": "src"},
|
||||
packages=find_packages("src"),
|
||||
@ -54,7 +67,15 @@ setup(
|
||||
]
|
||||
},
|
||||
python_requires=">=3.8.0",
|
||||
install_requires=["numpy>=1.17", "packaging>=20.0", "psutil", "pyyaml", "torch>=1.10.0", "huggingface_hub", "safetensors>=0.3.1"],
|
||||
install_requires=[
|
||||
"numpy>=1.17",
|
||||
"packaging>=20.0",
|
||||
"psutil",
|
||||
"pyyaml",
|
||||
"torch>=1.10.0",
|
||||
"huggingface_hub",
|
||||
"safetensors>=0.3.1",
|
||||
],
|
||||
extras_require=extras,
|
||||
classifiers=[
|
||||
"Development Status :: 5 - Production/Stable",
|
||||
|
||||
@ -1,4 +1,17 @@
|
||||
__version__ = "0.27.0.dev0"
|
||||
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
__version__ = "0.29.0"
|
||||
|
||||
from .accelerator import Accelerator
|
||||
from .big_modeling import (
|
||||
@ -11,10 +24,12 @@ from .big_modeling import (
|
||||
load_checkpoint_and_dispatch,
|
||||
)
|
||||
from .data_loader import skip_first_batches
|
||||
from .inference import prepare_pippy
|
||||
from .launchers import debug_launcher, notebook_launcher
|
||||
from .state import PartialState
|
||||
from .utils import (
|
||||
AutocastKwargs,
|
||||
DataLoaderConfiguration,
|
||||
DeepSpeedPlugin,
|
||||
DistributedDataParallelKwargs,
|
||||
DistributedType,
|
||||
|
||||
@ -47,6 +47,7 @@ from .utils import (
|
||||
WEIGHTS_INDEX_NAME,
|
||||
WEIGHTS_NAME,
|
||||
AutocastKwargs,
|
||||
DataLoaderConfiguration,
|
||||
DeepSpeedPlugin,
|
||||
DistributedDataParallelKwargs,
|
||||
DistributedType,
|
||||
@ -79,10 +80,11 @@ from .utils import (
|
||||
is_fp8_available,
|
||||
is_ipex_available,
|
||||
is_megatron_lm_available,
|
||||
is_mlu_available,
|
||||
is_msamp_available,
|
||||
is_npu_available,
|
||||
is_torch_version,
|
||||
is_tpu_available,
|
||||
is_torch_xla_available,
|
||||
is_xpu_available,
|
||||
load_fsdp_model,
|
||||
load_fsdp_optimizer,
|
||||
@ -133,7 +135,8 @@ if is_megatron_lm_available():
|
||||
from torch.distributed.algorithms.join import Join
|
||||
|
||||
|
||||
if is_tpu_available(check_device=False):
|
||||
if is_torch_xla_available():
|
||||
import torch_xla.amp as xamp
|
||||
import torch_xla.core.xla_model as xm
|
||||
import torch_xla.distributed.xla_multiprocessing as xmp
|
||||
|
||||
@ -149,6 +152,12 @@ except ImportError:
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
# Sentinel values for defaults
|
||||
_split_batches = object()
|
||||
_dispatch_batches = object()
|
||||
_even_batches = object()
|
||||
_use_seedable_sampler = object()
|
||||
|
||||
|
||||
class Accelerator:
|
||||
"""
|
||||
@ -158,11 +167,6 @@ class Accelerator:
|
||||
device_placement (`bool`, *optional*, defaults to `True`):
|
||||
Whether or not the accelerator should put objects on device (tensors yielded by the dataloader, model,
|
||||
etc...).
|
||||
split_batches (`bool`, *optional*, defaults to `False`):
|
||||
Whether or not the accelerator should split the batches yielded by the dataloaders across the devices. If
|
||||
`True` the actual batch size used will be the same on any kind of distributed processes, but it must be a
|
||||
round multiple of the `num_processes` you are using. If `False`, actual batch size used will be the one set
|
||||
in your script multiplied by the number of processes.
|
||||
mixed_precision (`str`, *optional*):
|
||||
Whether or not to use mixed precision training. Choose from 'no','fp16','bf16 or 'fp8'. Will default to the
|
||||
value in the environment variable `ACCELERATE_MIXED_PRECISION`, which will use the default value in the
|
||||
@ -175,13 +179,15 @@ class Accelerator:
|
||||
cpu (`bool`, *optional*):
|
||||
Whether or not to force the script to execute on CPU. Will ignore GPU available if set to `True` and force
|
||||
the execution on one process only.
|
||||
deepspeed_plugin (`DeepSpeedPlugin`, *optional*):
|
||||
dataloader_config (`DataLoaderConfiguration`, *optional*):
|
||||
A configuration for how the dataloaders should be handled in distributed scenarios.
|
||||
deepspeed_plugin ([`~utils.DeepSpeedPlugin`], *optional*):
|
||||
Tweak your DeepSpeed related args using this argument. This argument is optional and can be configured
|
||||
directly using *accelerate config*
|
||||
fsdp_plugin (`FullyShardedDataParallelPlugin`, *optional*):
|
||||
fsdp_plugin ([`~utils.FullyShardedDataParallelPlugin`], *optional*):
|
||||
Tweak your FSDP related args using this argument. This argument is optional and can be configured directly
|
||||
using *accelerate config*
|
||||
megatron_lm_plugin (`MegatronLMPlugin`, *optional*):
|
||||
megatron_lm_plugin ([`~utils.MegatronLMPlugin`], *optional*):
|
||||
Tweak your MegatronLM related args using this argument. This argument is optional and can be configured
|
||||
directly using *accelerate config*
|
||||
rng_types (list of `str` or [`~utils.RNGType`]):
|
||||
@ -204,33 +210,20 @@ class Accelerator:
|
||||
- `"comet_ml"`
|
||||
If `"all"` is selected, will pick up all available trackers in the environment and initialize them. Can
|
||||
also accept implementations of `GeneralTracker` for custom trackers, and can be combined with `"all"`.
|
||||
project_config (`ProjectConfiguration`, *optional*):
|
||||
project_config ([`~utils.ProjectConfiguration`], *optional*):
|
||||
A configuration for how saving the state can be handled.
|
||||
project_dir (`str`, `os.PathLike`, *optional*):
|
||||
A path to a directory for storing data such as logs of locally-compatible loggers and potentially saved
|
||||
checkpoints.
|
||||
dispatch_batches (`bool`, *optional*):
|
||||
If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process
|
||||
and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose
|
||||
underlying dataset is an `IterableDataset`, `False` otherwise.
|
||||
even_batches (`bool`, *optional*, defaults to `True`):
|
||||
If set to `True`, in cases where the total batch size across all processes does not exactly divide the
|
||||
dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among
|
||||
all workers.
|
||||
use_seedable_sampler (`bool`, *optional*, defaults to `False`):
|
||||
Whether or not use a fully seedable random sampler ([`~data_loader.SeedableRandomSampler`]). Ensures
|
||||
training results are fully reproducable using a different sampling technique. While seed-to-seed results
|
||||
may differ, on average the differences are neglible when using multiple different seeds to compare. Should
|
||||
also be ran with [`~utils.set_seed`] for the best results.
|
||||
step_scheduler_with_optimizer (`bool`, *optional`, defaults to `True`):
|
||||
Set `True` if the learning rate scheduler is stepped at the same time as the optimizer, `False` if only
|
||||
done under certain circumstances (at the end of each epoch, for instance).
|
||||
kwargs_handlers (`list[KwargHandler]`, *optional*)
|
||||
A list of `KwargHandler` to customize how the objects related to distributed training or mixed precision
|
||||
are created. See [kwargs](kwargs) for more information.
|
||||
dynamo_backend (`str` or `DynamoBackend`, *optional*, defaults to `"no"`):
|
||||
kwargs_handlers (list of [`~utils.KwargsHandler`], *optional*)
|
||||
A list of [`~utils.KwargsHandler`] to customize how the objects related to distributed training or mixed
|
||||
precision are created. See [kwargs](kwargs) for more information.
|
||||
dynamo_backend (`str` or [`~utils.DynamoBackend`], *optional*, defaults to `"no"`):
|
||||
Set to one of the possible dynamo backends to optimize your training with torch dynamo.
|
||||
gradient_accumulation_plugin (`GradientAccumulationPlugin`, *optional*):
|
||||
gradient_accumulation_plugin ([`~utils.GradientAccumulationPlugin`], *optional*):
|
||||
A configuration for how gradient accumulation should be handled, if more tweaking than just the
|
||||
`gradient_accumulation_steps` is needed.
|
||||
|
||||
@ -253,10 +246,11 @@ class Accelerator:
|
||||
def __init__(
|
||||
self,
|
||||
device_placement: bool = True,
|
||||
split_batches: bool = False,
|
||||
split_batches: bool = _split_batches,
|
||||
mixed_precision: PrecisionType | str | None = None,
|
||||
gradient_accumulation_steps: int = 1,
|
||||
cpu: bool = False,
|
||||
dataloader_config: DataLoaderConfiguration | None = None,
|
||||
deepspeed_plugin: DeepSpeedPlugin | None = None,
|
||||
fsdp_plugin: FullyShardedDataParallelPlugin | None = None,
|
||||
megatron_lm_plugin: MegatronLMPlugin | None = None,
|
||||
@ -265,9 +259,9 @@ class Accelerator:
|
||||
project_dir: str | os.PathLike | None = None,
|
||||
project_config: ProjectConfiguration | None = None,
|
||||
gradient_accumulation_plugin: GradientAccumulationPlugin | None = None,
|
||||
dispatch_batches: bool | None = None,
|
||||
even_batches: bool = True,
|
||||
use_seedable_sampler: bool = False,
|
||||
dispatch_batches: bool | None = _dispatch_batches,
|
||||
even_batches: bool = _even_batches,
|
||||
use_seedable_sampler: bool = _use_seedable_sampler,
|
||||
step_scheduler_with_optimizer: bool = True,
|
||||
kwargs_handlers: list[KwargsHandler] | None = None,
|
||||
dynamo_backend: DynamoBackend | str | None = None,
|
||||
@ -300,7 +294,10 @@ class Accelerator:
|
||||
if deepspeed_plugin:
|
||||
if not is_deepspeed_available():
|
||||
raise ImportError("DeepSpeed is not installed => run `pip install deepspeed` or build it from source.")
|
||||
if compare_versions("deepspeed", "<", "0.9.3"):
|
||||
if is_mlu_available():
|
||||
if compare_versions("deepspeed-mlu", "<", "0.10.1"):
|
||||
raise ImportError("DeepSpeed MLU version must be >= 0.10.1. Please update DeepSpeed MLU.")
|
||||
elif compare_versions("deepspeed", "<", "0.9.3"):
|
||||
raise ImportError("DeepSpeed version must be >= 0.9.3. Please update DeepSpeed.")
|
||||
|
||||
mixed_precision = (
|
||||
@ -373,8 +370,6 @@ class Accelerator:
|
||||
raise ValueError("You can only pass one `AutocastKwargs` in `kwargs_handler`.")
|
||||
else:
|
||||
self.autocast_handler = handler
|
||||
if self.fp8_recipe_handler is None and mixed_precision == "fp8":
|
||||
self.fp8_recipe_handler = FP8RecipeKwargs()
|
||||
|
||||
kwargs = self.init_handler.to_kwargs() if self.init_handler is not None else {}
|
||||
self.state = AcceleratorState(
|
||||
@ -388,6 +383,9 @@ class Accelerator:
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
if self.fp8_recipe_handler is None and self.state.mixed_precision == "fp8":
|
||||
self.fp8_recipe_handler = FP8RecipeKwargs(backend="MSAMP" if is_msamp_available() else "TE")
|
||||
|
||||
trackers = filter_trackers(log_with, self.logging_dir)
|
||||
if len(trackers) < 1 and log_with is not None:
|
||||
warnings.warn(f"`log_with={log_with}` was passed but no supported trackers are currently installed.")
|
||||
@ -396,7 +394,7 @@ class Accelerator:
|
||||
if (
|
||||
(mixed_precision != "bf16")
|
||||
and getattr(self.state, "downcast_bfloat", False)
|
||||
and (self.state.distributedType != DistributedType.TPU)
|
||||
and (self.state.distributedType != DistributedType.XLA)
|
||||
):
|
||||
raise ValueError("Can only use `downcast_bf16` when using `mixed_precision='bf16'` and on a TPU")
|
||||
|
||||
@ -413,36 +411,58 @@ class Accelerator:
|
||||
self.gradient_state = GradientState(
|
||||
gradient_accumulation_plugin=gradient_accumulation_plugin,
|
||||
)
|
||||
if self.state.distributed_type == DistributedType.TPU:
|
||||
if self.gradient_state.num_steps != 1:
|
||||
raise ValueError(
|
||||
"Gradient accumulation is not supported on TPU. Please set `gradient_accumulation_steps` to 1 and don't pass in a `GradientAccumulationPlugin` object."
|
||||
)
|
||||
|
||||
self.device_placement = device_placement
|
||||
self.split_batches = split_batches
|
||||
self.dispatch_batches = dispatch_batches
|
||||
self.even_batches = even_batches
|
||||
self.use_seedable_sampler = use_seedable_sampler
|
||||
if dataloader_config is None:
|
||||
dataloader_config = DataLoaderConfiguration()
|
||||
self.dataloader_config = dataloader_config
|
||||
# Deal with deprecated args
|
||||
# TODO: Remove in v1.0.0
|
||||
deprecated_dl_args = {}
|
||||
if dispatch_batches is not _dispatch_batches:
|
||||
deprecated_dl_args["dispatch_batches"] = dispatch_batches
|
||||
self.dataloader_config.dispatch_batches = dispatch_batches
|
||||
if split_batches is not _split_batches:
|
||||
deprecated_dl_args["split_batches"] = split_batches
|
||||
self.dataloader_config.split_batches = split_batches
|
||||
if even_batches is not _even_batches:
|
||||
deprecated_dl_args["even_batches"] = even_batches
|
||||
self.dataloader_config.even_batches = even_batches
|
||||
if use_seedable_sampler is not _use_seedable_sampler:
|
||||
deprecated_dl_args["use_seedable_sampler"] = use_seedable_sampler
|
||||
self.dataloader_config.use_seedable_sampler = use_seedable_sampler
|
||||
if len(deprecated_dl_args) > 0:
|
||||
values = ", ".join([f"{k}={v}" for k, v in deprecated_dl_args.items()])
|
||||
warnings.warn(
|
||||
f"Passing the following arguments to `Accelerator` is deprecated and will be removed in version 1.0 of Accelerate: {deprecated_dl_args.keys()}. "
|
||||
"Please pass an `accelerate.DataLoaderConfiguration` instead: \n"
|
||||
f"dataloader_config = DataLoaderConfiguration({values})",
|
||||
FutureWarning,
|
||||
)
|
||||
self.step_scheduler_with_optimizer = step_scheduler_with_optimizer
|
||||
|
||||
# Mixed precision attributes
|
||||
self.scaler = None
|
||||
self.native_amp = False
|
||||
err = "{mode} mixed precision requires {requirement}"
|
||||
if (
|
||||
self.state.mixed_precision == "fp16"
|
||||
and self.device.type != "cpu"
|
||||
and self.distributed_type not in (DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM)
|
||||
):
|
||||
self.native_amp = True
|
||||
if self.device.type not in ("xpu", "cuda", "mps", "npu"):
|
||||
raise ValueError(err.format(mode="fp16", requirement="a GPU"))
|
||||
if self.device.type not in ("xpu", "cuda", "mps", "npu", "xla", "mlu") or is_torch_xla_available(
|
||||
check_is_tpu=True
|
||||
):
|
||||
raise ValueError(f"fp16 mixed precision requires a GPU (not {self.device.type!r}).")
|
||||
kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {}
|
||||
if self.distributed_type == DistributedType.FSDP:
|
||||
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
|
||||
|
||||
self.scaler = ShardedGradScaler(**kwargs)
|
||||
elif is_torch_xla_available(check_is_gpu=True):
|
||||
self.scaler = xamp.GradScaler(**kwargs)
|
||||
elif is_mlu_available():
|
||||
self.scaler = torch.mlu.amp.GradScaler(**kwargs)
|
||||
elif is_npu_available():
|
||||
self.scaler = torch.npu.amp.GradScaler(**kwargs)
|
||||
else:
|
||||
@ -456,8 +476,8 @@ class Accelerator:
|
||||
self.native_amp = True
|
||||
else:
|
||||
self.native_amp = is_bf16_available(True)
|
||||
if mixed_precision == "bf16" and not self.native_amp and not is_tpu_available():
|
||||
raise ValueError(err.format(mode="bf16", requirement="PyTorch >= 1.10 and a supported device."))
|
||||
if mixed_precision == "bf16" and not self.native_amp and not is_torch_xla_available():
|
||||
raise ValueError("bf16 mixed precision requires PyTorch >= 1.10 and a supported device.")
|
||||
|
||||
# Start of internal step tracking
|
||||
self.step = 0
|
||||
@ -510,6 +530,26 @@ class Accelerator:
|
||||
def device(self):
|
||||
return self.state.device
|
||||
|
||||
@property
|
||||
def split_batches(self):
|
||||
return self.dataloader_config.split_batches
|
||||
|
||||
@property
|
||||
def dispatch_batches(self):
|
||||
return self.dataloader_config.dispatch_batches
|
||||
|
||||
@property
|
||||
def even_batches(self):
|
||||
return self.dataloader_config.even_batches
|
||||
|
||||
@even_batches.setter
|
||||
def even_batches(self, value: bool):
|
||||
self.dataloader_config.even_batches = value
|
||||
|
||||
@property
|
||||
def use_seedable_sampler(self):
|
||||
return self.dataloader_config.use_seedable_sampler
|
||||
|
||||
@property
|
||||
def project_dir(self):
|
||||
return self.project_configuration.project_dir
|
||||
@ -938,14 +978,14 @@ class Accelerator:
|
||||
model.require_backward_grad_sync = old_require_backward_grad_sync
|
||||
model.require_forward_param_sync = old_require_forward_param_sync
|
||||
|
||||
def _do_sync(self):
|
||||
def _do_sync(self, force: bool = False):
|
||||
"Sets the right `sync_gradients` context and either resets or increases `self.step`"
|
||||
if self.gradient_state.sync_with_dataloader and self.gradient_state.end_of_dataloader:
|
||||
self.step = 0
|
||||
self.gradient_state._set_sync_gradients(True)
|
||||
else:
|
||||
self.step += 1
|
||||
self.gradient_state._set_sync_gradients((self.step % self.gradient_state.num_steps) == 0)
|
||||
self.gradient_state._set_sync_gradients(force or ((self.step % self.gradient_state.num_steps) == 0))
|
||||
|
||||
@property
|
||||
def sync_gradients(self):
|
||||
@ -991,7 +1031,9 @@ class Accelerator:
|
||||
... optimizer.zero_grad()
|
||||
```
|
||||
"""
|
||||
self._do_sync()
|
||||
# sync_each_batch=True will guarantee below that self.sync_gradients=True, therefore
|
||||
# resulting in the nullcontext always being selected.
|
||||
self._do_sync(force=self.gradient_state.plugin_kwargs.get("sync_each_batch", False))
|
||||
with contextlib.ExitStack() as cm_stack:
|
||||
for m in models:
|
||||
cm_stack.enter_context(contextlib.nullcontext() if self.sync_gradients else self.no_sync(m))
|
||||
@ -1042,7 +1084,12 @@ class Accelerator:
|
||||
... optimizer.zero_grad()
|
||||
```
|
||||
"""
|
||||
if self.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU):
|
||||
if self.distributed_type in (
|
||||
DistributedType.MULTI_GPU,
|
||||
DistributedType.MULTI_NPU,
|
||||
DistributedType.MULTI_MLU,
|
||||
DistributedType.MULTI_XPU,
|
||||
):
|
||||
dl_even_batches_values = []
|
||||
|
||||
if even_batches is not None:
|
||||
@ -1193,7 +1240,7 @@ class Accelerator:
|
||||
# On TPUs, putting the model on the XLA device will create new parameters, so the corresponding optimizer will
|
||||
# have parameters disconnected from the model (so no training :-( ).
|
||||
# If the model and optimizer have parameters on different devices we raise an error.
|
||||
if self.distributed_type == DistributedType.TPU:
|
||||
if self.distributed_type == DistributedType.XLA:
|
||||
model_device, optimizer_device = self._get_devices()
|
||||
if model_device is not None and optimizer_device is not None and model_device != optimizer_device:
|
||||
raise ValueError(
|
||||
@ -1205,7 +1252,7 @@ class Accelerator:
|
||||
)
|
||||
|
||||
# If we're dealing with device placement, this deals with that by...
|
||||
tpu_should_fix_optimizer = self.device_placement and self.distributed_type == DistributedType.TPU
|
||||
tpu_should_fix_optimizer = self.device_placement and self.distributed_type == DistributedType.XLA
|
||||
if tpu_should_fix_optimizer or (self.mixed_precision == "fp8" and self.fp8_recipe_handler.backend == "TE"):
|
||||
# 1. grabbing old model parameters
|
||||
old_named_params = self._get_named_parameters(*args)
|
||||
@ -1244,7 +1291,7 @@ class Accelerator:
|
||||
item in container
|
||||
for container in (self._dataloaders, self._models, self._optimizers, self._schedulers)
|
||||
):
|
||||
setattr(item, "_is_accelerate_prepared", True)
|
||||
item._is_accelerate_prepared = True
|
||||
|
||||
return result if len(result) > 1 else result[0]
|
||||
|
||||
@ -1341,6 +1388,7 @@ class Accelerator:
|
||||
if not evaluation_mode:
|
||||
if self.distributed_type in (
|
||||
DistributedType.MULTI_GPU,
|
||||
DistributedType.MULTI_MLU,
|
||||
DistributedType.MULTI_NPU,
|
||||
DistributedType.MULTI_XPU,
|
||||
):
|
||||
@ -1406,7 +1454,7 @@ class Accelerator:
|
||||
elif self.distributed_type == DistributedType.MULTI_CPU:
|
||||
kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}
|
||||
model = torch.nn.parallel.DistributedDataParallel(model, **kwargs)
|
||||
elif self.distributed_type == DistributedType.TPU and self.state.fork_launched:
|
||||
elif self.distributed_type == DistributedType.XLA and self.state.fork_launched:
|
||||
model = xmp.MpModelWrapper(model).to(self.device)
|
||||
# torch.compile should be called last and only if the model isn't already compiled.
|
||||
if self.state.dynamo_plugin.backend != DynamoBackend.NO and not is_compiled_module(model):
|
||||
@ -1753,10 +1801,11 @@ class Accelerator:
|
||||
for obj in result:
|
||||
if isinstance(obj, torch.nn.Module):
|
||||
model = obj
|
||||
model.train()
|
||||
elif isinstance(obj, (torch.optim.Optimizer)):
|
||||
optimizer = obj
|
||||
if optimizer is not None and model is not None:
|
||||
dtype = torch.bfloat16 if self.state.mixed_precision == "bf16" else torch.float32
|
||||
dtype = torch.bfloat16 if self.state.mixed_precision == "bf16" else None
|
||||
if self.device.type == "xpu" and is_xpu_available():
|
||||
model = model.to(self.device)
|
||||
model, optimizer = torch.xpu.optimize(
|
||||
@ -1842,7 +1891,7 @@ class Accelerator:
|
||||
self._dataloaders.append(data_loader)
|
||||
return data_loader
|
||||
if device_placement is None:
|
||||
device_placement = self.device_placement if self.distributed_type != DistributedType.TPU else False
|
||||
device_placement = self.device_placement if self.distributed_type != DistributedType.XLA else False
|
||||
prepared_data_loader = prepare_data_loader(
|
||||
data_loader,
|
||||
self.device,
|
||||
@ -2055,10 +2104,6 @@ class Accelerator:
|
||||
for opt in optimizer:
|
||||
while isinstance(opt, AcceleratedOptimizer):
|
||||
opt = opt.optimizer
|
||||
# Reduce gradients first for XLA
|
||||
if self.distributed_type == DistributedType.TPU:
|
||||
gradients = xm._fetch_gradients(opt)
|
||||
self.reduce(gradients, scale=1.0 / self.num_processes)
|
||||
self.scaler.unscale_(opt)
|
||||
|
||||
def clip_grad_norm_(self, parameters, max_norm, norm_type=2):
|
||||
@ -2096,6 +2141,19 @@ class Accelerator:
|
||||
# `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
|
||||
# We cannot return the gradient norm because DeepSpeed does it.
|
||||
return None
|
||||
elif self.distributed_type == DistributedType.XLA:
|
||||
# Reduce gradients first for XLA
|
||||
for acc_opt in self._optimizers:
|
||||
if not acc_opt.gradient_state.is_xla_gradients_synced:
|
||||
opt = acc_opt
|
||||
while isinstance(opt, AcceleratedOptimizer):
|
||||
opt = opt.optimizer
|
||||
gradients = xm._fetch_gradients(opt)
|
||||
# Use xm.all_reduce to perform an in-place all-reduce. Recusrsive all-reduce each tensor
|
||||
# one by one in self.reduce is non-inplace.
|
||||
xm.all_reduce("sum", gradients, scale=1.0 / self.num_processes)
|
||||
# Set is_xla_gradients_synced to True to avoid all-reduce twice in the AcceleratedOptimizer step.
|
||||
acc_opt.gradient_state.is_xla_gradients_synced = True
|
||||
self.unscale_gradients()
|
||||
return torch.nn.utils.clip_grad_norm_(parameters, max_norm, norm_type=norm_type)
|
||||
|
||||
@ -2384,7 +2442,7 @@ class Accelerator:
|
||||
self.trackers.append(tracker)
|
||||
else:
|
||||
tracker_init = LOGGER_TYPE_TO_CLASS[str(tracker)]
|
||||
if getattr(tracker_init, "requires_logging_directory"):
|
||||
if tracker_init.requires_logging_directory:
|
||||
# We can skip this check since it was done in `__init__`
|
||||
self.trackers.append(
|
||||
tracker_init(project_name, self.logging_dir, **init_kwargs.get(str(tracker), {}))
|
||||
@ -2713,7 +2771,7 @@ class Accelerator:
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
logger.info(f"Saving current state to {output_dir}")
|
||||
|
||||
if self.distributed_type == DistributedType.TPU:
|
||||
if self.distributed_type == DistributedType.XLA:
|
||||
# Finish running the previous step before checkpointing
|
||||
xm.mark_step()
|
||||
|
||||
@ -2911,6 +2969,7 @@ class Accelerator:
|
||||
if map_location is None:
|
||||
if self.num_processes > 1 and self.distributed_type in (
|
||||
DistributedType.MULTI_GPU,
|
||||
DistributedType.MULTI_MLU,
|
||||
DistributedType.MULTI_NPU,
|
||||
):
|
||||
map_location = "on_device"
|
||||
@ -3137,6 +3196,7 @@ class Accelerator:
|
||||
autocast_handler = self.autocast_handler
|
||||
autocast_context = get_mixed_precision_context_manager(self.native_amp, autocast_handler)
|
||||
autocast_context.__enter__()
|
||||
# TODO: should the `yield` be in a try/finally block?
|
||||
yield
|
||||
autocast_context.__exit__(*sys.exc_info())
|
||||
|
||||
|
||||
@ -31,13 +31,16 @@ from .hooks import (
|
||||
)
|
||||
from .utils import (
|
||||
OffloadedWeightsLoader,
|
||||
check_cuda_p2p_ib_support,
|
||||
check_device_map,
|
||||
extract_submodules_state_dict,
|
||||
find_tied_parameters,
|
||||
get_balanced_memory,
|
||||
infer_auto_device_map,
|
||||
is_mlu_available,
|
||||
is_npu_available,
|
||||
is_torch_version,
|
||||
is_xpu_available,
|
||||
load_checkpoint_in_model,
|
||||
offload_state_dict,
|
||||
parse_flag_from_env,
|
||||
@ -440,7 +443,13 @@ def dispatch_model(
|
||||
def add_warning(fn, model):
|
||||
@wraps(fn)
|
||||
def wrapper(*args, **kwargs):
|
||||
logger.warning("You shouldn't move a model when it is dispatched on multiple devices.")
|
||||
warning_msg = "You shouldn't move a model that is dispatched using accelerate hooks."
|
||||
if str(fn.__name__) == "to":
|
||||
to_device = torch._C._nn._parse_to(*args, **kwargs)[0]
|
||||
if to_device is not None:
|
||||
logger.warning(warning_msg)
|
||||
else:
|
||||
logger.warning(warning_msg)
|
||||
for param in model.parameters():
|
||||
if param.device == torch.device("meta"):
|
||||
raise RuntimeError("You can't move a model that has some modules offloaded to cpu or disk.")
|
||||
@ -451,14 +460,30 @@ def dispatch_model(
|
||||
model.to = add_warning(model.to, model)
|
||||
if is_npu_available():
|
||||
model.npu = add_warning(model.npu, model)
|
||||
elif is_mlu_available():
|
||||
model.mlu = add_warning(model.mlu, model)
|
||||
elif is_xpu_available():
|
||||
model.xpu = add_warning(model.xpu, model)
|
||||
else:
|
||||
model.cuda = add_warning(model.cuda, model)
|
||||
|
||||
# Check if we are using multi-gpus with RTX 4000 series
|
||||
use_multi_gpu = len([device for device in set(device_map.values()) if device not in ("cpu", "disk")]) > 1
|
||||
if use_multi_gpu and not check_cuda_p2p_ib_support():
|
||||
logger.warning(
|
||||
"We've detected an older driver with an RTX 4000 series GPU. These drivers have issues with P2P. "
|
||||
"This can affect the multi-gpu inference when using accelerate device_map."
|
||||
"Please make sure to update your driver to the latest version which resolves this."
|
||||
)
|
||||
else:
|
||||
device = list(device_map.values())[0]
|
||||
# `torch.Tensor.to(<int num>)` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)).
|
||||
if is_npu_available() and isinstance(device, int):
|
||||
device = f"npu:{device}"
|
||||
elif is_mlu_available() and isinstance(device, int):
|
||||
device = f"mlu:{device}"
|
||||
elif is_xpu_available() and isinstance(device, int):
|
||||
device = f"xpu:{device}"
|
||||
if device != "disk":
|
||||
model.to(device)
|
||||
else:
|
||||
@ -567,7 +592,11 @@ def load_checkpoint_and_dispatch(
|
||||
low_zero=(device_map == "balanced_low_0"),
|
||||
)
|
||||
device_map = infer_auto_device_map(
|
||||
model, max_memory=max_memory, no_split_module_classes=no_split_module_classes, dtype=dtype
|
||||
model,
|
||||
max_memory=max_memory,
|
||||
no_split_module_classes=no_split_module_classes,
|
||||
dtype=dtype,
|
||||
offload_buffers=offload_buffers,
|
||||
)
|
||||
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
|
||||
offload_state_dict = True
|
||||
|
||||
@ -32,13 +32,13 @@ from .utils import (
|
||||
SCHEDULER_NAME,
|
||||
WEIGHTS_NAME,
|
||||
get_pretty_name,
|
||||
is_tpu_available,
|
||||
is_torch_xla_available,
|
||||
is_xpu_available,
|
||||
save,
|
||||
)
|
||||
|
||||
|
||||
if is_tpu_available(check_device=False):
|
||||
if is_torch_xla_available():
|
||||
import torch_xla.core.xla_model as xm
|
||||
|
||||
from .logging import get_logger
|
||||
@ -142,7 +142,7 @@ def save_accelerator_state(
|
||||
states["torch_xpu_manual_seed"] = torch.xpu.get_rng_state_all()
|
||||
else:
|
||||
states["torch_cuda_manual_seed"] = torch.cuda.get_rng_state_all()
|
||||
if is_tpu_available():
|
||||
if is_torch_xla_available():
|
||||
states["xm_seed"] = xm.get_rng_state()
|
||||
output_states_file = output_dir.joinpath(states_name)
|
||||
torch.save(states, output_states_file)
|
||||
@ -249,7 +249,7 @@ def load_accelerator_state(
|
||||
torch.xpu.set_rng_state_all(states["torch_xpu_manual_seed"])
|
||||
else:
|
||||
torch.cuda.set_rng_state_all(states["torch_cuda_manual_seed"])
|
||||
if is_tpu_available():
|
||||
if is_torch_xla_available():
|
||||
xm.set_rng_state(states["xm_seed"])
|
||||
logger.info("All random states loaded successfully")
|
||||
except Exception:
|
||||
|
||||
@ -0,0 +1,13 @@
|
||||
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
@ -14,18 +14,17 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from argparse import ArgumentParser
|
||||
|
||||
from accelerate.commands.config import get_config_parser
|
||||
from accelerate.commands.env import env_command_parser
|
||||
from accelerate.commands.estimate import estimate_command_parser
|
||||
from accelerate.commands.launch import launch_command_parser
|
||||
from accelerate.commands.test import test_command_parser
|
||||
from accelerate.commands.tpu import tpu_command_parser
|
||||
from accelerate.commands.utils import CustomArgumentParser
|
||||
|
||||
|
||||
def main():
|
||||
parser = ArgumentParser("Accelerate CLI tool", usage="accelerate <command> [<args>]", allow_abbrev=False)
|
||||
parser = CustomArgumentParser("Accelerate CLI tool", usage="accelerate <command> [<args>]", allow_abbrev=False)
|
||||
subparsers = parser.add_subparsers(help="accelerate command helpers")
|
||||
|
||||
# Register commands
|
||||
|
||||
@ -20,6 +20,7 @@ from ...utils import (
|
||||
ComputeEnvironment,
|
||||
DistributedType,
|
||||
is_deepspeed_available,
|
||||
is_mlu_available,
|
||||
is_mps_available,
|
||||
is_npu_available,
|
||||
is_transformers_available,
|
||||
@ -48,7 +49,7 @@ from .config_utils import (
|
||||
def get_cluster_input():
|
||||
distributed_type = _ask_options(
|
||||
"Which type of machine are you using?",
|
||||
["No distributed training", "multi-CPU", "multi-XPU", "multi-GPU", "multi-NPU", "TPU"],
|
||||
["No distributed training", "multi-CPU", "multi-XPU", "multi-GPU", "multi-NPU", "multi-MLU", "TPU"],
|
||||
_convert_distributed_mode,
|
||||
)
|
||||
|
||||
@ -64,6 +65,7 @@ def get_cluster_input():
|
||||
|
||||
if distributed_type in [
|
||||
DistributedType.MULTI_GPU,
|
||||
DistributedType.MULTI_MLU,
|
||||
DistributedType.MULTI_NPU,
|
||||
DistributedType.MULTI_XPU,
|
||||
DistributedType.MULTI_CPU,
|
||||
@ -116,6 +118,7 @@ def get_cluster_input():
|
||||
use_cpu = False
|
||||
|
||||
ipex_config = {}
|
||||
mpirun_config = {}
|
||||
if use_cpu:
|
||||
ipex_config["ipex"] = _ask_field(
|
||||
"Do you want to use Intel PyTorch Extension (IPEX) to speed up training on CPU? [yes/NO]:",
|
||||
@ -123,10 +126,26 @@ def get_cluster_input():
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
if distributed_type == DistributedType.MULTI_CPU:
|
||||
use_mpirun = _ask_field(
|
||||
"Do you want accelerate to launch mpirun? [yes/NO]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
if use_mpirun:
|
||||
mpirun_hostfile = _ask_field(
|
||||
"Please enter the path to the hostfile to use with mpirun [~/hostfile]: ",
|
||||
str,
|
||||
default="~/hostfile",
|
||||
)
|
||||
mpirun_config["mpirun_hostfile"] = os.path.expanduser(mpirun_hostfile.strip())
|
||||
mpirun_config["mpirun_ccl"] = _ask_field("Enter the number of oneCCL worker threads [1]: ", default=1)
|
||||
if (
|
||||
not use_cpu
|
||||
and is_xpu_available()
|
||||
and distributed_type not in [DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.TPU]
|
||||
and distributed_type
|
||||
not in [DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_MLU, DistributedType.XLA]
|
||||
):
|
||||
ipex_config["use_xpu"] = _ask_field(
|
||||
"Do you want to use XPU plugin to speed up training on XPU? [yes/NO]:",
|
||||
@ -181,7 +200,13 @@ def get_cluster_input():
|
||||
deepspeed_config = {}
|
||||
if (
|
||||
distributed_type
|
||||
in [DistributedType.MULTI_GPU, DistributedType.MULTI_XPU, DistributedType.MULTI_NPU, DistributedType.NO]
|
||||
in [
|
||||
DistributedType.MULTI_GPU,
|
||||
DistributedType.MULTI_XPU,
|
||||
DistributedType.MULTI_NPU,
|
||||
DistributedType.MULTI_MLU,
|
||||
DistributedType.NO,
|
||||
]
|
||||
and not use_mps
|
||||
):
|
||||
use_deepspeed = _ask_field(
|
||||
@ -317,7 +342,12 @@ def get_cluster_input():
|
||||
)
|
||||
|
||||
fsdp_config = {}
|
||||
if distributed_type in [DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU]:
|
||||
if distributed_type in [
|
||||
DistributedType.MULTI_GPU,
|
||||
DistributedType.MULTI_NPU,
|
||||
DistributedType.MULTI_MLU,
|
||||
DistributedType.MULTI_XPU,
|
||||
]:
|
||||
use_fsdp = _ask_field(
|
||||
"Do you want to use FullyShardedDataParallel? [yes/NO]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
@ -480,12 +510,15 @@ def get_cluster_input():
|
||||
DistributedType.MULTI_CPU,
|
||||
DistributedType.MULTI_XPU,
|
||||
DistributedType.MULTI_GPU,
|
||||
DistributedType.MULTI_MLU,
|
||||
DistributedType.MULTI_NPU,
|
||||
DistributedType.TPU,
|
||||
DistributedType.XLA,
|
||||
]:
|
||||
machine_type = str(distributed_type).split(".")[1].replace("MULTI_", "")
|
||||
if machine_type == "TPU":
|
||||
machine_type += " cores"
|
||||
elif machine_type == "CPU":
|
||||
machine_type = "processes"
|
||||
else:
|
||||
machine_type += "(s)"
|
||||
num_processes = _ask_field(
|
||||
@ -513,6 +546,7 @@ def get_cluster_input():
|
||||
distributed_type
|
||||
in [
|
||||
DistributedType.MULTI_GPU,
|
||||
DistributedType.MULTI_MLU,
|
||||
DistributedType.MULTI_NPU,
|
||||
DistributedType.MULTI_XPU,
|
||||
DistributedType.NO,
|
||||
@ -522,6 +556,8 @@ def get_cluster_input():
|
||||
):
|
||||
if is_npu_available():
|
||||
machine_type = "NPU(s)"
|
||||
elif is_mlu_available():
|
||||
machine_type = "MLU(s)"
|
||||
else:
|
||||
machine_type = "GPU(s)"
|
||||
gpu_ids = _ask_field(
|
||||
@ -529,7 +565,17 @@ def get_cluster_input():
|
||||
default="all",
|
||||
)
|
||||
|
||||
if distributed_type == DistributedType.TPU:
|
||||
# CPU affinity is only supported on NVIDIA hardware for now
|
||||
enable_cpu_affinity = False
|
||||
if distributed_type == (DistributedType.NO, DistributedType.MULTI_GPU) and not use_cpu and not use_mps:
|
||||
enable_cpu_affinity = _ask_field(
|
||||
"Would you like to enable numa efficiency? (Currently only supported on NVIDIA hardware). [yes/NO]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
|
||||
if distributed_type == DistributedType.XLA:
|
||||
mixed_precision = "no"
|
||||
main_training_function = _ask_field(
|
||||
"What is the name of the function in your script that should be launched in all parallel scripts? [main]: ",
|
||||
@ -620,7 +666,7 @@ def get_cluster_input():
|
||||
"Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts."
|
||||
)
|
||||
|
||||
if distributed_type == DistributedType.TPU and mixed_precision == "bf16":
|
||||
if distributed_type == DistributedType.XLA and mixed_precision == "bf16":
|
||||
tpu_downcast_bf16 = _ask_field(
|
||||
"Should `torch.float` be cast as `bfloat16` and `torch.double` remain `float32` on TPUs?", default="no"
|
||||
)
|
||||
@ -641,6 +687,7 @@ def get_cluster_input():
|
||||
fsdp_config=fsdp_config,
|
||||
megatron_lm_config=megatron_lm_config,
|
||||
ipex_config=ipex_config,
|
||||
mpirun_config=mpirun_config,
|
||||
use_cpu=use_cpu,
|
||||
rdzv_backend=rdzv_backend,
|
||||
same_network=same_network,
|
||||
@ -654,4 +701,5 @@ def get_cluster_input():
|
||||
tpu_use_cluster=tpu_use_cluster,
|
||||
dynamo_config=dynamo_config,
|
||||
debug=debug,
|
||||
enable_cpu_affinity=enable_cpu_affinity,
|
||||
)
|
||||
|
||||
@ -27,7 +27,7 @@ from ...utils.constants import SAGEMAKER_PYTHON_VERSION, SAGEMAKER_PYTORCH_VERSI
|
||||
|
||||
|
||||
hf_cache_home = os.path.expanduser(
|
||||
os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
|
||||
os.environ.get("HF_HOME", os.path.join(os.environ.get("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
|
||||
)
|
||||
cache_dir = os.path.join(hf_cache_home, "accelerate")
|
||||
default_json_config_file = os.path.join(cache_dir, "default_config.yaml")
|
||||
@ -45,13 +45,13 @@ def load_config_from_file(config_file):
|
||||
if not os.path.isfile(config_file):
|
||||
raise FileNotFoundError(
|
||||
f"The passed configuration file `{config_file}` does not exist. "
|
||||
"Please pass an existing file to `accelerate launch`, or use the the default one "
|
||||
"Please pass an existing file to `accelerate launch`, or use the default one "
|
||||
"created through `accelerate config` and run `accelerate launch` "
|
||||
"without the `--config_file` argument."
|
||||
)
|
||||
else:
|
||||
config_file = default_config_file
|
||||
with open(config_file, "r", encoding="utf-8") as f:
|
||||
with open(config_file, encoding="utf-8") as f:
|
||||
if config_file.endswith(".json"):
|
||||
if (
|
||||
json.load(f).get("compute_environment", ComputeEnvironment.LOCAL_MACHINE)
|
||||
@ -94,7 +94,7 @@ class BaseConfig:
|
||||
@classmethod
|
||||
def from_json_file(cls, json_file=None):
|
||||
json_file = default_json_config_file if json_file is None else json_file
|
||||
with open(json_file, "r", encoding="utf-8") as f:
|
||||
with open(json_file, encoding="utf-8") as f:
|
||||
config_dict = json.load(f)
|
||||
if "compute_environment" not in config_dict:
|
||||
config_dict["compute_environment"] = ComputeEnvironment.LOCAL_MACHINE
|
||||
@ -109,6 +109,8 @@ class BaseConfig:
|
||||
config_dict["use_cpu"] = False
|
||||
if "debug" not in config_dict:
|
||||
config_dict["debug"] = False
|
||||
if "enable_cpu_affinity" not in config_dict:
|
||||
config_dict["enable_cpu_affinity"] = False
|
||||
extra_keys = sorted(set(config_dict.keys()) - set(cls.__dataclass_fields__.keys()))
|
||||
if len(extra_keys) > 0:
|
||||
raise ValueError(
|
||||
@ -126,7 +128,7 @@ class BaseConfig:
|
||||
@classmethod
|
||||
def from_yaml_file(cls, yaml_file=None):
|
||||
yaml_file = default_yaml_config_file if yaml_file is None else yaml_file
|
||||
with open(yaml_file, "r", encoding="utf-8") as f:
|
||||
with open(yaml_file, encoding="utf-8") as f:
|
||||
config_dict = yaml.safe_load(f)
|
||||
if "compute_environment" not in config_dict:
|
||||
config_dict["compute_environment"] = ComputeEnvironment.LOCAL_MACHINE
|
||||
@ -143,6 +145,8 @@ class BaseConfig:
|
||||
config_dict["use_cpu"] = False
|
||||
if "debug" not in config_dict:
|
||||
config_dict["debug"] = False
|
||||
if "enable_cpu_affinity" not in config_dict:
|
||||
config_dict["enable_cpu_affinity"] = False
|
||||
extra_keys = sorted(set(config_dict.keys()) - set(cls.__dataclass_fields__.keys()))
|
||||
if len(extra_keys) > 0:
|
||||
raise ValueError(
|
||||
@ -163,7 +167,7 @@ class BaseConfig:
|
||||
self.distributed_type = SageMakerDistributedType(self.distributed_type)
|
||||
else:
|
||||
self.distributed_type = DistributedType(self.distributed_type)
|
||||
if self.dynamo_config is None:
|
||||
if getattr(self, "dynamo_config", None) is None:
|
||||
self.dynamo_config = {}
|
||||
|
||||
|
||||
@ -178,6 +182,7 @@ class ClusterConfig(BaseConfig):
|
||||
rdzv_backend: Optional[str] = "static"
|
||||
same_network: Optional[bool] = False
|
||||
main_training_function: str = "main"
|
||||
enable_cpu_affinity: bool = False
|
||||
|
||||
# args for deepspeed_plugin
|
||||
deepspeed_config: dict = None
|
||||
@ -187,6 +192,8 @@ class ClusterConfig(BaseConfig):
|
||||
megatron_lm_config: dict = None
|
||||
# args for ipex
|
||||
ipex_config: dict = None
|
||||
# args for mpirun
|
||||
mpirun_config: dict = None
|
||||
# args for TPU
|
||||
downcast_bf16: bool = False
|
||||
|
||||
@ -212,6 +219,8 @@ class ClusterConfig(BaseConfig):
|
||||
self.megatron_lm_config = {}
|
||||
if self.ipex_config is None:
|
||||
self.ipex_config = {}
|
||||
if self.mpirun_config is None:
|
||||
self.mpirun_config = {}
|
||||
return super().__post_init__()
|
||||
|
||||
|
||||
|
||||
@ -68,7 +68,7 @@ def _convert_compute_environment(value):
|
||||
|
||||
def _convert_distributed_mode(value):
|
||||
value = int(value)
|
||||
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value])
|
||||
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "MULTI_MLU", "XLA"][value])
|
||||
|
||||
|
||||
def _convert_dynamo_backend(value):
|
||||
|
||||
@ -18,7 +18,7 @@ from pathlib import Path
|
||||
|
||||
import torch
|
||||
|
||||
from ...utils import is_npu_available, is_xpu_available
|
||||
from ...utils import is_mlu_available, is_npu_available, is_xpu_available
|
||||
from .config_args import ClusterConfig, default_json_config_file
|
||||
from .config_utils import SubcommandHelpFormatter
|
||||
|
||||
@ -57,7 +57,15 @@ def write_basic_config(mixed_precision="no", save_location: str = default_json_c
|
||||
"compute_environment": "LOCAL_MACHINE",
|
||||
"mixed_precision": mixed_precision,
|
||||
}
|
||||
if torch.cuda.is_available():
|
||||
if is_mlu_available():
|
||||
num_mlus = torch.mlu.device_count()
|
||||
config["num_processes"] = num_mlus
|
||||
config["use_cpu"] = False
|
||||
if num_mlus > 1:
|
||||
config["distributed_type"] = "MULTI_MLU"
|
||||
else:
|
||||
config["distributed_type"] = "NO"
|
||||
elif torch.cuda.is_available():
|
||||
num_gpus = torch.cuda.device_count()
|
||||
config["num_processes"] = num_gpus
|
||||
config["use_cpu"] = False
|
||||
|
||||
@ -17,6 +17,7 @@
|
||||
import argparse
|
||||
import os
|
||||
import platform
|
||||
import subprocess
|
||||
|
||||
import numpy as np
|
||||
import psutil
|
||||
@ -25,7 +26,7 @@ import torch
|
||||
from accelerate import __version__ as version
|
||||
from accelerate.commands.config import default_config_file, load_config_from_file
|
||||
|
||||
from ..utils import is_npu_available, is_xpu_available
|
||||
from ..utils import is_mlu_available, is_npu_available, is_xpu_available
|
||||
|
||||
|
||||
def env_command_parser(subparsers=None):
|
||||
@ -47,6 +48,7 @@ def env_command(args):
|
||||
pt_version = torch.__version__
|
||||
pt_cuda_available = torch.cuda.is_available()
|
||||
pt_xpu_available = is_xpu_available()
|
||||
pt_mlu_available = is_mlu_available()
|
||||
pt_npu_available = is_npu_available()
|
||||
|
||||
accelerate_config = "Not found"
|
||||
@ -54,14 +56,25 @@ def env_command(args):
|
||||
if args.config_file is not None or os.path.isfile(default_config_file):
|
||||
accelerate_config = load_config_from_file(args.config_file).to_dict()
|
||||
|
||||
# if we can run which, get it
|
||||
command = None
|
||||
bash_location = "Not found"
|
||||
if os.name == "nt":
|
||||
command = ["where", "accelerate"]
|
||||
elif os.name == "posix":
|
||||
command = ["which", "accelerate"]
|
||||
if command is not None:
|
||||
bash_location = subprocess.check_output(command, text=True, stderr=subprocess.STDOUT).strip()
|
||||
info = {
|
||||
"`Accelerate` version": version,
|
||||
"Platform": platform.platform(),
|
||||
"`accelerate` bash location": bash_location,
|
||||
"Python version": platform.python_version(),
|
||||
"Numpy version": np.__version__,
|
||||
"PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})",
|
||||
"PyTorch XPU available": str(pt_xpu_available),
|
||||
"PyTorch NPU available": str(pt_npu_available),
|
||||
"PyTorch MLU available": str(pt_mlu_available),
|
||||
"System RAM": f"{psutil.virtual_memory().total / 1024 ** 3:.2f} GB",
|
||||
}
|
||||
if pt_cuda_available:
|
||||
|
||||
@ -13,12 +13,11 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import argparse
|
||||
|
||||
from huggingface_hub import model_info
|
||||
from huggingface_hub.utils import GatedRepoError, RepositoryNotFoundError
|
||||
|
||||
from accelerate import init_empty_weights
|
||||
from accelerate.commands.utils import CustomArgumentParser
|
||||
from accelerate.utils import (
|
||||
calculate_maximum_sizes,
|
||||
convert_bytes,
|
||||
@ -105,10 +104,11 @@ def create_empty_model(model_name: str, library_name: str, trust_remote_code: bo
|
||||
f"To check `{model_name}`, `transformers` must be installed. Please install it via `pip install transformers`"
|
||||
)
|
||||
print(f"Loading pretrained config for `{model_name}` from `transformers`...")
|
||||
if model_info.config is None:
|
||||
raise RuntimeError(f"Tried to load `{model_name}` with `transformers` but it does not have any metadata.")
|
||||
|
||||
auto_map = model_info.config.get("auto_map", False)
|
||||
config = AutoConfig.from_pretrained(model_name, trust_remote_code=trust_remote_code, token=access_token)
|
||||
|
||||
with init_empty_weights():
|
||||
# remote code could specify a specific `AutoModel` class in the `auto_map`
|
||||
constructor = AutoModel
|
||||
@ -181,7 +181,7 @@ def estimate_command_parser(subparsers=None):
|
||||
if subparsers is not None:
|
||||
parser = subparsers.add_parser("estimate-memory")
|
||||
else:
|
||||
parser = argparse.ArgumentParser(description="Model size estimator for fitting a model onto CUDA memory.")
|
||||
parser = CustomArgumentParser(description="Model size estimator for fitting a model onto CUDA memory.")
|
||||
|
||||
parser.add_argument("model_name", type=str, help="The model name on the Hugging Face Hub.")
|
||||
parser.add_argument(
|
||||
@ -204,6 +204,7 @@ def estimate_command_parser(subparsers=None):
|
||||
help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. This flag
|
||||
should only be used for repositories you trust and in which you have read the code, as it will execute
|
||||
code present on the Hub on your local machine.""",
|
||||
default=False,
|
||||
)
|
||||
|
||||
if subparsers is not None:
|
||||
@ -211,6 +212,41 @@ def estimate_command_parser(subparsers=None):
|
||||
return parser
|
||||
|
||||
|
||||
def estimate_training_usage(bytes: int, mixed_precision: str, msamp_config: str = None) -> dict:
|
||||
"""
|
||||
Given an amount of `bytes` and `mixed_precision`, calculates how much training memory is needed for a batch size of
|
||||
1.
|
||||
|
||||
Args:
|
||||
bytes (`int`):
|
||||
The size of the model being trained.
|
||||
mixed_precision (`str`):
|
||||
The mixed precision that would be ran.
|
||||
msamp_config (`str`):
|
||||
The msamp config to estimate the training memory for if `mixed_precision` is set to `"fp8"`.
|
||||
"""
|
||||
memory_sizes = {"model": -1, "optimizer": -1, "gradients": -1, "step": -1}
|
||||
fp32_size = bytes
|
||||
fp16_size = bytes // 2
|
||||
|
||||
if mixed_precision == "float32":
|
||||
memory_sizes["model"] = fp32_size
|
||||
memory_sizes["gradients"] = fp32_size
|
||||
memory_sizes["optimizer"] = fp32_size * 2
|
||||
memory_sizes["step"] = fp32_size * 4
|
||||
elif mixed_precision in ("float16", "bfloat16") or (mixed_precision == "fp8" and msamp_config is None):
|
||||
# With native `TransformersEngine`, there is no memory savings with FP8
|
||||
# With mixed precision training, the model has weights stored
|
||||
# in FP16 and FP32
|
||||
memory_sizes["model"] = fp32_size
|
||||
# 1.5 from weight gradient + computation (GEMM)
|
||||
memory_sizes["gradients"] = fp32_size + fp16_size
|
||||
# 2x from optimizer states
|
||||
memory_sizes["optimizer"] = fp32_size * 2 # Optimizer states
|
||||
memory_sizes["step"] = memory_sizes["optimizer"]
|
||||
return memory_sizes
|
||||
|
||||
|
||||
def gather_data(args):
|
||||
"Creates an empty model and gathers the data for the sizes"
|
||||
try:
|
||||
@ -232,6 +268,7 @@ def gather_data(args):
|
||||
for dtype in args.dtypes:
|
||||
dtype_total_size = total_size
|
||||
dtype_largest_layer = largest_layer[0]
|
||||
dtype_training_size = estimate_training_usage(dtype_total_size, dtype)
|
||||
if dtype == "float16":
|
||||
dtype_total_size /= 2
|
||||
dtype_largest_layer /= 2
|
||||
@ -241,7 +278,6 @@ def gather_data(args):
|
||||
elif dtype == "int4":
|
||||
dtype_total_size /= 8
|
||||
dtype_largest_layer /= 8
|
||||
dtype_training_size = dtype_total_size * 4
|
||||
data.append([dtype, dtype_largest_layer, dtype_total_size, dtype_training_size])
|
||||
return data
|
||||
|
||||
@ -252,6 +288,9 @@ def estimate_command(args):
|
||||
for i, item in enumerate(row):
|
||||
if isinstance(item, (int, float)):
|
||||
row[i] = convert_bytes(item)
|
||||
elif isinstance(item, dict):
|
||||
training_usage = max(item.values())
|
||||
row[i] = convert_bytes(training_usage) if training_usage != -1 else "N/A"
|
||||
|
||||
headers = ["dtype", "Largest Layer", "Total Size", "Training using Adam"]
|
||||
|
||||
|
||||
@ -28,6 +28,7 @@ import torch
|
||||
from accelerate.commands.config import default_config_file, load_config_from_file
|
||||
from accelerate.commands.config.config_args import SageMakerConfig
|
||||
from accelerate.commands.config.config_utils import DYNAMO_BACKENDS
|
||||
from accelerate.commands.utils import CustomArgumentParser
|
||||
from accelerate.state import get_int_from_env
|
||||
from accelerate.utils import (
|
||||
ComputeEnvironment,
|
||||
@ -35,13 +36,15 @@ from accelerate.utils import (
|
||||
PrepareForLaunch,
|
||||
_filter_args,
|
||||
check_cuda_p2p_ib_support,
|
||||
convert_dict_to_env_variables,
|
||||
is_bf16_available,
|
||||
is_deepspeed_available,
|
||||
is_mlu_available,
|
||||
is_npu_available,
|
||||
is_rich_available,
|
||||
is_sagemaker_available,
|
||||
is_torch_version,
|
||||
is_tpu_available,
|
||||
is_torch_xla_available,
|
||||
is_xpu_available,
|
||||
patch_environment,
|
||||
prepare_deepspeed_cmd_env,
|
||||
@ -63,80 +66,93 @@ if is_rich_available():
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
options_to_group = {
|
||||
"--multi-gpu": "Distributed GPUs",
|
||||
"--tpu": "TPU",
|
||||
"--use_deepspeed": "DeepSpeed Arguments",
|
||||
"--use_fsdp": "FSDP Arguments",
|
||||
"--use_megatron_lm": "Megatron-LM Arguments",
|
||||
"multi_gpu": "Distributed GPUs",
|
||||
"tpu": "TPU",
|
||||
"use_deepspeed": "DeepSpeed Arguments",
|
||||
"use_fsdp": "FSDP Arguments",
|
||||
"use_megatron_lm": "Megatron-LM Arguments",
|
||||
}
|
||||
|
||||
|
||||
def clean_option(option):
|
||||
"Finds all cases of - after the first two characters and changes them to _"
|
||||
if option.startswith("--"):
|
||||
return option[:3] + option[3:].replace("-", "_")
|
||||
return option[2:].replace("-", "_")
|
||||
|
||||
|
||||
class _CustomHelpAction(argparse._HelpAction):
|
||||
class CustomHelpFormatter(argparse.HelpFormatter):
|
||||
"""
|
||||
This is a custom help action that will hide all arguments that are not used in the command line when the help is
|
||||
This is a custom help formatter that will hide all arguments that are not used in the command line when the help is
|
||||
called. This is useful for the case where the user is using a specific platform and only wants to see the arguments
|
||||
for that platform.
|
||||
"""
|
||||
|
||||
def __call__(self, parser, namespace, values, option_string=None):
|
||||
if "accelerate" in sys.argv[0] and "launch" in sys.argv[1:]:
|
||||
args = sys.argv[2:]
|
||||
else:
|
||||
args = sys.argv[1:]
|
||||
opts = parser._actions
|
||||
titles = [
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.titles = [
|
||||
"Hardware Selection Arguments",
|
||||
"Resource Selection Arguments",
|
||||
"Training Paradigm Arguments",
|
||||
"positional arguments",
|
||||
"optional arguments",
|
||||
]
|
||||
if len(args) > 1:
|
||||
used_platforms = [arg for arg in args if arg in options_to_group.keys()]
|
||||
args = list(map(clean_option, args))
|
||||
used_titles = [options_to_group[o] for o in used_platforms]
|
||||
for i, arg in enumerate(opts):
|
||||
# If the argument's container is outside of the used titles, hide it
|
||||
if arg.container.title not in titles + used_titles:
|
||||
setattr(opts[i], "help", argparse.SUPPRESS)
|
||||
# If the argument is hardware selection, but not being passed, hide it
|
||||
elif arg.container.title == "Hardware Selection Arguments":
|
||||
if set(arg.option_strings).isdisjoint(set(args)):
|
||||
setattr(opts[i], "help", argparse.SUPPRESS)
|
||||
else:
|
||||
setattr(opts[i], "help", arg.help + " (currently selected)")
|
||||
# If the argument is a training paradigm, but not being passed, hide it
|
||||
elif arg.container.title == "Training Paradigm Arguments":
|
||||
if set(arg.option_strings).isdisjoint(set(used_platforms)):
|
||||
setattr(opts[i], "help", argparse.SUPPRESS)
|
||||
else:
|
||||
setattr(opts[i], "help", arg.help + " (currently selected)")
|
||||
for i, group in enumerate(list(parser._action_groups)):
|
||||
# If all arguments in the group are hidden, hide the group
|
||||
if all([arg.help == argparse.SUPPRESS for arg in group._group_actions]):
|
||||
parser._action_groups.remove(group)
|
||||
|
||||
super().__call__(parser, namespace, values, option_string)
|
||||
def add_argument(self, action: argparse.Action):
|
||||
if "accelerate" in sys.argv[0] and "launch" in sys.argv[1:]:
|
||||
args = sys.argv[2:]
|
||||
else:
|
||||
args = sys.argv[1:]
|
||||
|
||||
if len(args) > 1:
|
||||
args = list(map(clean_option, args))
|
||||
used_platforms = [arg for arg in args if arg in options_to_group.keys()]
|
||||
used_titles = [options_to_group[o] for o in used_platforms]
|
||||
if action.container.title not in self.titles + used_titles:
|
||||
action.help = argparse.SUPPRESS
|
||||
elif action.container.title == "Hardware Selection Arguments":
|
||||
if set(action.option_strings).isdisjoint(set(args)):
|
||||
action.help = argparse.SUPPRESS
|
||||
else:
|
||||
action.help = action.help + " (currently selected)"
|
||||
elif action.container.title == "Training Paradigm Arguments":
|
||||
if set(action.option_strings).isdisjoint(set(args)):
|
||||
action.help = argparse.SUPPRESS
|
||||
else:
|
||||
action.help = action.help + " (currently selected)"
|
||||
|
||||
action.option_strings = [s for s in action.option_strings if "-" not in s[2:]]
|
||||
super().add_argument(action)
|
||||
|
||||
def end_section(self):
|
||||
if len(self._current_section.items) < 2:
|
||||
self._current_section.items = []
|
||||
self._current_section.heading = ""
|
||||
super().end_section()
|
||||
|
||||
|
||||
def launch_command_parser(subparsers=None):
|
||||
description = "Launch a python script in a distributed scenario. Arguments can be passed in with either hyphens (`--num-processes=2`) or underscores (`--num_processes=2`)"
|
||||
if subparsers is not None:
|
||||
parser = subparsers.add_parser("launch", add_help=False, allow_abbrev=False)
|
||||
parser = subparsers.add_parser(
|
||||
"launch", description=description, add_help=False, allow_abbrev=False, formatter_class=CustomHelpFormatter
|
||||
)
|
||||
else:
|
||||
parser = argparse.ArgumentParser("Accelerate launch command", add_help=False, allow_abbrev=False)
|
||||
parser = CustomArgumentParser(
|
||||
"Accelerate launch command",
|
||||
description=description,
|
||||
add_help=False,
|
||||
allow_abbrev=False,
|
||||
formatter_class=CustomHelpFormatter,
|
||||
)
|
||||
|
||||
parser.register("action", "help", _CustomHelpAction)
|
||||
parser.add_argument("-h", "--help", action="help", help="Show this help message and exit.")
|
||||
|
||||
parser.add_argument(
|
||||
"--config_file", default=None, help="The config file to use for the default values in the launching script."
|
||||
"--config_file",
|
||||
default=None,
|
||||
help="The config file to use for the default values in the launching script.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--quiet",
|
||||
@ -191,6 +207,12 @@ def launch_command_parser(subparsers=None):
|
||||
default=None,
|
||||
help="The number of CPU threads per process. Can be tuned for optimal performance.",
|
||||
)
|
||||
resource_args.add_argument(
|
||||
"--enable_cpu_affinity",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="Whether or not CPU affinity and balancing should be enabled. Currently only supported on NVIDIA hardware.",
|
||||
)
|
||||
|
||||
# Dynamo arguments
|
||||
resource_args.add_argument(
|
||||
@ -625,6 +647,22 @@ def launch_command_parser(subparsers=None):
|
||||
),
|
||||
)
|
||||
|
||||
# MPI arguments
|
||||
mpirun_args = parser.add_argument_group("MPI Arguments", "Arguments related to mpirun for Multi-CPU")
|
||||
mpirun_args.add_argument(
|
||||
"--mpirun_hostfile",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Location for a hostfile for using Accelerate to launch a multi-CPU training job with mpirun. This will "
|
||||
"get passed to the MPI --hostfile or -f parameter, depending on which MPI program is installed.",
|
||||
)
|
||||
mpirun_args.add_argument(
|
||||
"--mpirun_ccl",
|
||||
type=int,
|
||||
default=1,
|
||||
help="The number of oneCCL worker threads when using Accelerate to launch multi-CPU training with mpirun.",
|
||||
)
|
||||
|
||||
# Other arguments of the training scripts
|
||||
parser.add_argument("training_script_args", nargs=argparse.REMAINDER, help="Arguments of the training script.")
|
||||
|
||||
@ -667,6 +705,7 @@ def multi_gpu_launcher(args):
|
||||
distrib_run.get_args_parser(),
|
||||
["--training_script", args.training_script, "--training_script_args", args.training_script_args],
|
||||
)
|
||||
|
||||
with patch_environment(**current_env):
|
||||
try:
|
||||
distrib_run.run(args)
|
||||
@ -684,6 +723,8 @@ def deepspeed_launcher(args):
|
||||
|
||||
if not is_deepspeed_available():
|
||||
raise ImportError("DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source.")
|
||||
else:
|
||||
from deepspeed.launcher.runner import DEEPSPEED_ENVIRONMENT_NAME
|
||||
|
||||
cmd, current_env = prepare_deepspeed_cmd_env(args)
|
||||
if not check_cuda_p2p_ib_support():
|
||||
@ -699,11 +740,10 @@ def deepspeed_launcher(args):
|
||||
logger.warning(message)
|
||||
|
||||
if args.num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]:
|
||||
with open(".deepspeed_env", "a") as f:
|
||||
for key, value in current_env.items():
|
||||
if ";" in value or " " in value:
|
||||
continue
|
||||
f.write(f"{key}={value}\n")
|
||||
with open(DEEPSPEED_ENVIRONMENT_NAME, "a") as f:
|
||||
valid_env_items = convert_dict_to_env_variables(current_env)
|
||||
if len(valid_env_items) > 1:
|
||||
f.writelines(valid_env_items)
|
||||
|
||||
process = subprocess.Popen(cmd, env=current_env)
|
||||
process.wait()
|
||||
@ -868,10 +908,15 @@ def _validate_launch_command(args):
|
||||
args.multi_gpu = (
|
||||
True
|
||||
if defaults.distributed_type
|
||||
in (DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU)
|
||||
in (
|
||||
DistributedType.MULTI_GPU,
|
||||
DistributedType.MULTI_NPU,
|
||||
DistributedType.MULTI_MLU,
|
||||
DistributedType.MULTI_XPU,
|
||||
)
|
||||
else False
|
||||
)
|
||||
args.tpu = defaults.distributed_type == DistributedType.TPU
|
||||
args.tpu = defaults.distributed_type == DistributedType.XLA
|
||||
args.use_fsdp = defaults.distributed_type == DistributedType.FSDP
|
||||
args.use_megatron_lm = defaults.distributed_type == DistributedType.MEGATRON_LM
|
||||
args.tpu_use_cluster = defaults.tpu_use_cluster if args.tpu else False
|
||||
@ -906,6 +951,8 @@ def _validate_launch_command(args):
|
||||
setattr(args, k, defaults.dynamo_config[k])
|
||||
for k in defaults.ipex_config:
|
||||
setattr(args, k, defaults.ipex_config[k])
|
||||
for k in defaults.mpirun_config:
|
||||
setattr(args, k, defaults.mpirun_config[k])
|
||||
continue
|
||||
|
||||
# Those args are handled separately
|
||||
@ -924,14 +971,16 @@ def _validate_launch_command(args):
|
||||
args.mixed_precision = defaults.mixed_precision
|
||||
mp_from_config_flag = True
|
||||
else:
|
||||
native_amp = False
|
||||
err = "{mode} mixed precision requires {requirement}"
|
||||
if args.use_cpu or (args.use_xpu and torch.xpu.is_available()):
|
||||
native_amp = is_torch_version(">=", "1.10")
|
||||
else:
|
||||
native_amp = is_bf16_available(True)
|
||||
if args.mixed_precision == "bf16" and not native_amp and not (args.tpu and is_tpu_available()):
|
||||
raise ValueError(err.format(mode="bf16", requirement="PyTorch >= 1.10 and a supported device."))
|
||||
if (
|
||||
args.mixed_precision == "bf16"
|
||||
and not native_amp
|
||||
and not (args.tpu and is_torch_xla_available(check_is_tpu=True))
|
||||
):
|
||||
raise ValueError("bf16 mixed precision requires PyTorch >= 1.10 and a supported device.")
|
||||
|
||||
# Silently set the default here
|
||||
if args.dynamo_backend is None:
|
||||
@ -940,6 +989,8 @@ def _validate_launch_command(args):
|
||||
if args.num_processes is None:
|
||||
if args.use_xpu and is_xpu_available():
|
||||
args.num_processes = torch.xpu.device_count()
|
||||
elif is_mlu_available():
|
||||
args.num_processes = torch.mlu.device_count()
|
||||
elif is_npu_available():
|
||||
args.num_processes = torch.npu.device_count()
|
||||
else:
|
||||
@ -949,6 +1000,7 @@ def _validate_launch_command(args):
|
||||
args.debug = False
|
||||
if not args.multi_gpu and (
|
||||
(args.use_xpu and is_xpu_available() and torch.xpu.device_count() > 1)
|
||||
or (is_mlu_available() and torch.mlu.device_count() > 1)
|
||||
or (is_npu_available() and torch.npu.device_count() > 1)
|
||||
or (torch.cuda.device_count() > 1)
|
||||
):
|
||||
|
||||
@ -1 +1,14 @@
|
||||
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from .selection_menu import BulletMenu
|
||||
|
||||
@ -30,7 +30,7 @@ def mark(key: str):
|
||||
def decorator(func):
|
||||
handle = getattr(func, "handle_key", [])
|
||||
handle += [key]
|
||||
setattr(func, "handle_key", handle)
|
||||
func.handle_key = handle
|
||||
return func
|
||||
|
||||
return decorator
|
||||
@ -44,7 +44,7 @@ def mark_multiple(*keys: List[str]):
|
||||
def decorator(func):
|
||||
handle = getattr(func, "handle_key", [])
|
||||
handle += keys
|
||||
setattr(func, "handle_key", handle)
|
||||
func.handle_key = handle
|
||||
return func
|
||||
|
||||
return decorator
|
||||
@ -58,8 +58,8 @@ class KeyHandler(type):
|
||||
def __new__(cls, name, bases, attrs):
|
||||
new_cls = super().__new__(cls, name, bases, attrs)
|
||||
if not hasattr(new_cls, "key_handler"):
|
||||
setattr(new_cls, "key_handler", {})
|
||||
setattr(new_cls, "handle_input", KeyHandler.handle_input)
|
||||
new_cls.key_handler = {}
|
||||
new_cls.handle_input = KeyHandler.handle_input
|
||||
|
||||
for value in attrs.values():
|
||||
handled_keys = getattr(value, "handle_key", [])
|
||||
|
||||
@ -16,7 +16,6 @@
|
||||
Utilities relating to parsing raw characters from the keyboard, based on https://github.com/bchao1/bullet
|
||||
"""
|
||||
|
||||
|
||||
import os
|
||||
import string
|
||||
import sys
|
||||
|
||||
@ -15,6 +15,7 @@
|
||||
"""
|
||||
Main driver for the selection menu, based on https://github.com/bchao1/bullet
|
||||
"""
|
||||
|
||||
import builtins
|
||||
import sys
|
||||
|
||||
|
||||
@ -15,9 +15,8 @@
|
||||
# limitations under the License.
|
||||
|
||||
import argparse
|
||||
import os
|
||||
|
||||
from accelerate.test_utils import execute_subprocess_async
|
||||
from accelerate.test_utils import execute_subprocess_async, path_in_accelerate_package
|
||||
|
||||
|
||||
def test_command_parser(subparsers=None):
|
||||
@ -43,15 +42,15 @@ def test_command_parser(subparsers=None):
|
||||
|
||||
|
||||
def test_command(args):
|
||||
script_name = os.path.sep.join(__file__.split(os.path.sep)[:-2] + ["test_utils", "scripts", "test_script.py"])
|
||||
script_name = path_in_accelerate_package("test_utils", "scripts", "test_script.py")
|
||||
|
||||
if args.config_file is None:
|
||||
test_args = script_name
|
||||
test_args = [script_name]
|
||||
else:
|
||||
test_args = f"--config_file={args.config_file} {script_name}"
|
||||
test_args = f"--config_file={args.config_file} {script_name}".split()
|
||||
|
||||
cmd = ["accelerate-launch"] + test_args.split()
|
||||
result = execute_subprocess_async(cmd, env=os.environ.copy())
|
||||
cmd = ["accelerate-launch"] + test_args
|
||||
result = execute_subprocess_async(cmd)
|
||||
if result.returncode == 0:
|
||||
print("Test is a success! You are ready for your distributed training!")
|
||||
|
||||
|
||||
@ -112,7 +112,7 @@ def tpu_command_launcher(args):
|
||||
raise ValueError("You must specify either a command file or a command to run on the pod.")
|
||||
|
||||
if args.command_file:
|
||||
with open(args.command_file, "r") as f:
|
||||
with open(args.command_file) as f:
|
||||
args.command = [f.read().splitlines()]
|
||||
|
||||
# To turn list of lists into list of strings
|
||||
|
||||
120
src/accelerate/commands/utils.py
Normal file
120
src/accelerate/commands/utils.py
Normal file
@ -0,0 +1,120 @@
|
||||
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import argparse
|
||||
|
||||
|
||||
class _StoreAction(argparse.Action):
|
||||
"""
|
||||
Custom action that allows for `-` or `_` to be passed in for an argument.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
new_option_strings = []
|
||||
for option_string in self.option_strings:
|
||||
new_option_strings.append(option_string)
|
||||
if "_" in option_string[2:]:
|
||||
# Add `-` version to the option string
|
||||
new_option_strings.append(option_string.replace("_", "-"))
|
||||
self.option_strings = new_option_strings
|
||||
|
||||
def __call__(self, parser, namespace, values, option_string=None):
|
||||
setattr(namespace, self.dest, values)
|
||||
|
||||
|
||||
class _StoreConstAction(_StoreAction):
|
||||
"""
|
||||
Same as `argparse._StoreConstAction` but uses the custom `_StoreAction`.
|
||||
"""
|
||||
|
||||
def __init__(self, option_strings, dest, const, default=None, required=False, help=None):
|
||||
super().__init__(
|
||||
option_strings=option_strings,
|
||||
dest=dest,
|
||||
nargs=0,
|
||||
const=const,
|
||||
default=default,
|
||||
required=required,
|
||||
help=help,
|
||||
)
|
||||
|
||||
def __call__(self, parser, namespace, values, option_string=None):
|
||||
setattr(namespace, self.dest, self.const)
|
||||
|
||||
|
||||
class _StoreTrueAction(_StoreConstAction):
|
||||
"""
|
||||
Same as `argparse._StoreTrueAction` but uses the custom `_StoreConstAction`.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
option_strings,
|
||||
dest,
|
||||
default=None,
|
||||
required=False,
|
||||
help=None,
|
||||
):
|
||||
super().__init__(
|
||||
option_strings=option_strings, dest=dest, const=True, default=default, required=required, help=help
|
||||
)
|
||||
|
||||
|
||||
class CustomArgumentGroup(argparse._ArgumentGroup):
|
||||
"""
|
||||
Custom argument group that allows for the use of `-` or `_` in arguments passed and overrides the help for each
|
||||
when applicable.
|
||||
"""
|
||||
|
||||
def _add_action(self, action):
|
||||
args = vars(action)
|
||||
if isinstance(action, argparse._StoreTrueAction):
|
||||
action = _StoreTrueAction(
|
||||
args["option_strings"], args["dest"], args["default"], args["required"], args["help"]
|
||||
)
|
||||
elif isinstance(action, argparse._StoreConstAction):
|
||||
action = _StoreConstAction(
|
||||
args["option_strings"],
|
||||
args["dest"],
|
||||
args["const"],
|
||||
args["default"],
|
||||
args["required"],
|
||||
args["help"],
|
||||
)
|
||||
elif isinstance(action, argparse._StoreAction):
|
||||
action = _StoreAction(**args)
|
||||
action = super()._add_action(action)
|
||||
return action
|
||||
|
||||
|
||||
class CustomArgumentParser(argparse.ArgumentParser):
|
||||
"""
|
||||
Custom argument parser that allows for the use of `-` or `_` in arguments passed and overrides the help for each
|
||||
when applicable.
|
||||
"""
|
||||
|
||||
def add_argument(self, *args, **kwargs):
|
||||
if "action" in kwargs:
|
||||
# Translate action -> class
|
||||
if kwargs["action"] == "store_true":
|
||||
kwargs["action"] = _StoreTrueAction
|
||||
else:
|
||||
kwargs["action"] = _StoreAction
|
||||
super().add_argument(*args, **kwargs)
|
||||
|
||||
def add_argument_group(self, *args, **kwargs):
|
||||
group = CustomArgumentGroup(self, *args, **kwargs)
|
||||
self._action_groups.append(group)
|
||||
return group
|
||||
@ -20,7 +20,7 @@ import torch
|
||||
from torch.utils.data import BatchSampler, DataLoader, IterableDataset, RandomSampler
|
||||
|
||||
from .logging import get_logger
|
||||
from .state import AcceleratorState, DistributedType, GradientState, is_tpu_available
|
||||
from .state import AcceleratorState, DistributedType, GradientState, is_torch_xla_available
|
||||
from .utils import (
|
||||
RNGType,
|
||||
broadcast,
|
||||
@ -78,15 +78,16 @@ class SeedableRandomSampler(RandomSampler):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.epoch = 0
|
||||
self.seed = torch.random.initial_seed()
|
||||
self.initial_seed = torch.random.initial_seed()
|
||||
|
||||
def __iter__(self):
|
||||
if self.generator is None:
|
||||
self.generator = torch.Generator()
|
||||
else:
|
||||
self.seed = self.generator.initial_seed()
|
||||
self.generator.manual_seed(self.initial_seed)
|
||||
|
||||
# Allow `self.epoch` to modify the seed of the generator
|
||||
seed = self.epoch + self.seed
|
||||
seed = self.epoch + self.initial_seed
|
||||
# print("Setting seed at epoch", self.epoch, seed)
|
||||
self.generator.manual_seed(seed)
|
||||
yield from super().__iter__()
|
||||
self.set_epoch(self.epoch + 1)
|
||||
@ -408,7 +409,7 @@ class DataLoaderShard(DataLoader, DataLoaderStateMixin):
|
||||
A random number generator to keep synchronized across processes.
|
||||
skip_batches (`int`, *optional*, defaults to 0):
|
||||
The number of batches to skip at the beginning.
|
||||
kwargs:
|
||||
**kwargs (additional keyword arguments, *optional*):
|
||||
All other keyword arguments to pass to the regular `DataLoader` initialization.
|
||||
|
||||
**Available attributes:**
|
||||
@ -500,7 +501,7 @@ class DataLoaderShard(DataLoader, DataLoaderStateMixin):
|
||||
return len(self.dataset)
|
||||
|
||||
|
||||
if is_tpu_available(check_device=False):
|
||||
if is_torch_xla_available():
|
||||
import torch_xla.distributed.parallel_loader as xpl
|
||||
|
||||
class MpDeviceLoaderWrapper(xpl.MpDeviceLoader):
|
||||
@ -809,7 +810,8 @@ def prepare_data_loader(
|
||||
use_seedable_sampler (`bool`, *optional*, defaults to `False`):
|
||||
Whether to use the [`~data_loader.SeedableRandomSampler`] instead of a `RandomSampler` for better
|
||||
reproducability. Comes at a cost of potentially different performances due to different shuffling
|
||||
algorithms but ensures results will be the *exact* same.
|
||||
algorithms but ensures results will be the *exact* same. Should be paired with `set_seed()` at every
|
||||
`self.set_epoch`
|
||||
|
||||
Returns:
|
||||
`torch.utils.data.dataloader.DataLoader`: A new data loader that will yield the portion of the batches
|
||||
@ -837,12 +839,26 @@ def prepare_data_loader(
|
||||
process_index = state.process_index
|
||||
|
||||
# Sanity check
|
||||
batch_size = dataloader.batch_size if dataloader.batch_size is not None else dataloader.batch_sampler.batch_size
|
||||
if split_batches and batch_size > 1 and batch_size % num_processes != 0:
|
||||
raise ValueError(
|
||||
f"To use a `DataLoader` in `split_batches` mode, the batch size ({dataloader.batch_size}) "
|
||||
f"needs to be a round multiple of the number of processes ({num_processes})."
|
||||
)
|
||||
if split_batches:
|
||||
if dataloader.batch_size is not None:
|
||||
batch_size_for_check = dataloader.batch_size
|
||||
else:
|
||||
# For custom batch_sampler
|
||||
if hasattr(dataloader.batch_sampler, "batch_size"):
|
||||
batch_size_for_check = dataloader.batch_sampler.batch_size
|
||||
else:
|
||||
raise ValueError(
|
||||
"In order to use `split_batches==True` you must have a `batch_size` attribute either in the passed "
|
||||
"`dataloader` or `dataloader.batch_sampler` objects, and it has to return a natural number. "
|
||||
"Your `dataloader.batch_size` is None and `dataloader.batch_sampler` "
|
||||
f"(`{type(dataloader.batch_sampler)}`) does not have the `batch_size` attribute set."
|
||||
)
|
||||
|
||||
if batch_size_for_check > 1 and batch_size_for_check % num_processes != 0:
|
||||
raise ValueError(
|
||||
f"To use a `DataLoader` in `split_batches` mode, the batch size ({dataloader.batch_size}) "
|
||||
f"needs to be a round multiple of the number of processes ({num_processes})."
|
||||
)
|
||||
|
||||
new_dataset = dataloader.dataset
|
||||
# Iterable dataset doesn't like batch_sampler, but data_loader creates a default one for it
|
||||
@ -866,6 +882,11 @@ def prepare_data_loader(
|
||||
generator=getattr(sampler, "generator", torch.Generator()),
|
||||
)
|
||||
|
||||
if isinstance(dataloader.sampler, RandomSampler) and state.distributed_type == DistributedType.XLA:
|
||||
# isinstance(dataloader.sampler, RandomSampler) indicates the original dataloader has `shuffle` enabled.
|
||||
generator = torch.Generator().manual_seed(42)
|
||||
dataloader.generator = generator
|
||||
dataloader.sampler.generator = generator
|
||||
# No change if no multiprocess
|
||||
if (num_processes != 1 or state.distributed_type == DistributedType.MEGATRON_LM) and not dispatch_batches:
|
||||
if isinstance(new_dataset, IterableDataset):
|
||||
@ -913,11 +934,6 @@ def prepare_data_loader(
|
||||
kwargs["batch_size"] = (
|
||||
dataloader.batch_size // num_processes if split_batches and not dispatch_batches else dataloader.batch_size
|
||||
)
|
||||
if isinstance(sampler, SeedableRandomSampler) and use_seedable_sampler:
|
||||
if sampler_is_batch_sampler:
|
||||
dataloader.sampler.sampler = sampler
|
||||
else:
|
||||
dataloader.batch_sampler.sampler = sampler
|
||||
if dispatch_batches:
|
||||
kwargs.pop("generator")
|
||||
dataloader = DataLoaderDispatcher(
|
||||
@ -931,7 +947,7 @@ def prepare_data_loader(
|
||||
elif sampler_is_batch_sampler:
|
||||
dataloader = DataLoaderShard(
|
||||
new_dataset,
|
||||
device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,
|
||||
device=device if put_on_device and state.distributed_type != DistributedType.XLA else None,
|
||||
sampler=new_batch_sampler,
|
||||
batch_size=dataloader.batch_size,
|
||||
rng_types=rng_types,
|
||||
@ -942,7 +958,7 @@ def prepare_data_loader(
|
||||
else:
|
||||
dataloader = DataLoaderShard(
|
||||
new_dataset,
|
||||
device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,
|
||||
device=device if put_on_device and state.distributed_type != DistributedType.XLA else None,
|
||||
batch_sampler=new_batch_sampler,
|
||||
rng_types=rng_types,
|
||||
synchronized_generator=synchronized_generator,
|
||||
@ -950,7 +966,14 @@ def prepare_data_loader(
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
if state.distributed_type == DistributedType.TPU:
|
||||
if isinstance(sampler, SeedableRandomSampler) and use_seedable_sampler:
|
||||
if sampler_is_batch_sampler:
|
||||
dataloader.sampler.sampler = sampler
|
||||
else:
|
||||
dataloader.batch_sampler.sampler = sampler
|
||||
if hasattr(dataloader.batch_sampler, "batch_sampler"):
|
||||
dataloader.batch_sampler.batch_sampler.sampler = sampler
|
||||
if state.distributed_type == DistributedType.XLA:
|
||||
return MpDeviceLoaderWrapper(dataloader, device)
|
||||
return dataloader
|
||||
|
||||
|
||||
@ -374,7 +374,7 @@ class AlignDevicesHook(ModelHook):
|
||||
# this dictionary to allow the garbage collector to do its job.
|
||||
for value_pointer, device in self.tied_pointers_to_remove:
|
||||
del self.tied_params_map[value_pointer][device]
|
||||
self.tied_pointers_to_remove = None
|
||||
self.tied_pointers_to_remove = set()
|
||||
|
||||
if self.io_same_device and self.input_device is not None:
|
||||
output = send_to_device(output, self.input_device, skip_keys=self.skip_keys)
|
||||
|
||||
188
src/accelerate/inference.py
Normal file
188
src/accelerate/inference.py
Normal file
@ -0,0 +1,188 @@
|
||||
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import math
|
||||
from types import MethodType
|
||||
from typing import Any, Dict, List, Optional, Tuple, Union
|
||||
|
||||
from .state import PartialState
|
||||
from .utils import (
|
||||
calculate_maximum_sizes,
|
||||
convert_bytes,
|
||||
copy_tensor_to_devices,
|
||||
ignorant_find_batch_size,
|
||||
infer_auto_device_map,
|
||||
is_pippy_available,
|
||||
pad_input_tensors,
|
||||
send_to_device,
|
||||
)
|
||||
|
||||
|
||||
if is_pippy_available():
|
||||
from pippy.IR import Pipe, PipeSplitWrapper, annotate_split_points
|
||||
from pippy.PipelineStage import PipelineStage
|
||||
|
||||
|
||||
def generate_device_map(model, num_processes: int = 1, no_split_module_classes=None, max_memory: dict = None):
|
||||
"""
|
||||
Calculates the device map for `model` with an offset for PiPPy
|
||||
"""
|
||||
if num_processes == 1:
|
||||
return infer_auto_device_map(model, no_split_module_classes=no_split_module_classes, clean_result=False)
|
||||
if max_memory is None:
|
||||
model_size, shared = calculate_maximum_sizes(model)
|
||||
|
||||
# Split into `n` chunks for each GPU
|
||||
memory = (model_size + shared[0]) / num_processes
|
||||
memory = convert_bytes(memory)
|
||||
value, ending = memory.split(" ")
|
||||
|
||||
# Add a chunk to deal with potential extra shared memory instances
|
||||
memory = math.ceil(float(value)) * 1.1
|
||||
memory = f"{memory} {ending}"
|
||||
max_memory = {i: memory for i in range(num_processes)}
|
||||
device_map = infer_auto_device_map(
|
||||
model,
|
||||
max_memory=max_memory,
|
||||
no_split_module_classes=no_split_module_classes,
|
||||
clean_result=False,
|
||||
)
|
||||
return device_map
|
||||
|
||||
|
||||
def find_pippy_batch_size(args, kwargs):
|
||||
found_batch_size = None
|
||||
if args is not None:
|
||||
for arg in args:
|
||||
found_batch_size = ignorant_find_batch_size(arg)
|
||||
if found_batch_size is not None:
|
||||
break
|
||||
if kwargs is not None and found_batch_size is None:
|
||||
for kwarg in kwargs.values():
|
||||
found_batch_size = ignorant_find_batch_size(kwarg)
|
||||
if found_batch_size is not None:
|
||||
break
|
||||
return found_batch_size
|
||||
|
||||
|
||||
def build_pipeline(model, split_points, args, kwargs, num_chunks):
|
||||
"""
|
||||
Attaches the split points to the model based on `self.device_map` and generates a `PipelineStage`. Requires passing
|
||||
in needed `args` and `kwargs` as the model needs on the CPU.
|
||||
|
||||
Users can pass in custom `num_chunks` as an optional hyper-parameter. By default will use
|
||||
`AcceleratorState.num_processes`
|
||||
"""
|
||||
# We need to annotate the split points in the model for PiPPy
|
||||
state = PartialState()
|
||||
annotate_split_points(model, {split_point: PipeSplitWrapper.SplitPoint.BEGINNING for split_point in split_points})
|
||||
found_batch_size = find_pippy_batch_size(args, kwargs)
|
||||
if found_batch_size != num_chunks:
|
||||
if args is not None:
|
||||
args = pad_input_tensors(args, found_batch_size, num_chunks)
|
||||
if kwargs is not None:
|
||||
kwargs = pad_input_tensors(kwargs, found_batch_size, num_chunks)
|
||||
pipe = Pipe.from_tracing(model, num_chunks=num_chunks, example_args=args, example_kwargs=kwargs)
|
||||
stage = PipelineStage(pipe, state.local_process_index, device=state.device)
|
||||
|
||||
return stage
|
||||
|
||||
|
||||
def pippy_forward(forward, num_chunks, gather_output, *args, **kwargs):
|
||||
state = PartialState()
|
||||
output = None
|
||||
|
||||
if state.num_processes == 1:
|
||||
output = forward(*args, **kwargs)
|
||||
elif state.is_local_main_process:
|
||||
found_batch_size = find_pippy_batch_size(args, kwargs)
|
||||
if found_batch_size is None:
|
||||
raise ValueError("Could not find batch size from args or kwargs")
|
||||
else:
|
||||
if found_batch_size != num_chunks:
|
||||
args = pad_input_tensors(args, found_batch_size, num_chunks)
|
||||
kwargs = pad_input_tensors(kwargs, found_batch_size, num_chunks)
|
||||
forward(*args, **kwargs)
|
||||
elif state.is_last_process:
|
||||
output = forward()
|
||||
else:
|
||||
forward()
|
||||
if gather_output:
|
||||
# Each node will get a copy of the full output which is only on the last GPU
|
||||
output = copy_tensor_to_devices(output)
|
||||
return output
|
||||
|
||||
|
||||
def prepare_pippy(
|
||||
model,
|
||||
split_points: Optional[Union[str, List[str]]] = "auto",
|
||||
no_split_module_classes: Optional[List[str]] = None,
|
||||
example_args: Optional[Tuple[Any]] = (),
|
||||
example_kwargs: Optional[Dict[str, Any]] = None,
|
||||
num_chunks: Optional[int] = None,
|
||||
gather_output: Optional[bool] = False,
|
||||
):
|
||||
"""
|
||||
Wraps `model` for pipeline parallel inference.
|
||||
|
||||
Args:
|
||||
model (`torch.nn.Module`):
|
||||
A model we want to split for pipeline-parallel inference
|
||||
split_points (`str` or `List[str]`, defaults to 'auto'):
|
||||
How to generate the split points and chunk the model across each GPU. 'auto' will find the best balanced
|
||||
split given any model. Should be a list of layer names in the model to split by otherwise.
|
||||
no_split_module_classes (`List[str]`):
|
||||
A list of class names for layers we don't want to be split.
|
||||
example_args (tuple of model inputs):
|
||||
The expected inputs for the model that uses order-based inputs. Recommended to use this method if possible.
|
||||
example_kwargs (dict of model inputs)
|
||||
The expected inputs for the model that uses dictionary-based inputs. This is a *highly* limiting structure
|
||||
that requires the same keys be present at *all* inference calls. Not recommended unless the prior condition
|
||||
is true for all cases.
|
||||
num_chunks (`int`, defaults to the number of available GPUs):
|
||||
The number of different stages the Pipeline will have. By default it will assign one chunk per GPU, but
|
||||
this can be tuned and played with. In general one should have num_chunks >= num_gpus.
|
||||
gather_output (`bool`, defaults to `False`):
|
||||
If `True`, the output from the last GPU (which holds the true outputs) is sent across to all GPUs.
|
||||
"""
|
||||
if not is_pippy_available():
|
||||
raise ImportError(
|
||||
"`pippy` was not found to be installed on your system. Please "
|
||||
"install using `pip install torchpippy` or ensure you have at least version 0.2.0"
|
||||
)
|
||||
state = PartialState()
|
||||
example_args = send_to_device(example_args, "cpu")
|
||||
example_kwargs = send_to_device(example_kwargs, "cpu")
|
||||
if num_chunks is None:
|
||||
num_chunks = state.num_processes
|
||||
if split_points == "auto":
|
||||
device_map = generate_device_map(model, num_chunks, no_split_module_classes=no_split_module_classes)
|
||||
split_points = []
|
||||
for i in range(1, num_chunks):
|
||||
split_points.append(next(k for k, v in device_map.items() if v == i))
|
||||
model.hf_split_points = split_points
|
||||
stage = build_pipeline(model, split_points, example_args, example_kwargs, num_chunks)
|
||||
model._original_forward = model.forward
|
||||
model._original_call = model.__call__
|
||||
model.pippy_stage = stage
|
||||
model.hf_split_points = split_points
|
||||
|
||||
def forward(*args, **kwargs):
|
||||
return pippy_forward(stage.forward, num_chunks, gather_output, *args, **kwargs)
|
||||
|
||||
# To act like a decorator so that it can be popped when doing `extract_model_from_parallel`
|
||||
# Note: creates an infinite recursion loop with `generate`
|
||||
model_forward = MethodType(forward, model)
|
||||
forward.__wrapped__ = model_forward
|
||||
model.forward = forward
|
||||
return model
|
||||
@ -24,6 +24,7 @@ from .utils import (
|
||||
PrepareForLaunch,
|
||||
are_libraries_initialized,
|
||||
check_cuda_p2p_ib_support,
|
||||
get_gpu_info,
|
||||
is_mps_available,
|
||||
patch_environment,
|
||||
)
|
||||
@ -124,7 +125,7 @@ def notebook_launcher(
|
||||
launcher = PrepareForLaunch(function, distributed_type="TPU")
|
||||
print(f"Launching a training on {num_processes} TPU cores.")
|
||||
xmp.spawn(launcher, args=args, nprocs=num_processes, start_method="fork")
|
||||
elif in_colab:
|
||||
elif in_colab and get_gpu_info()[1] < 2:
|
||||
# No need for a distributed launch otherwise as it's either CPU or one GPU.
|
||||
if torch.cuda.is_available():
|
||||
print("Launching training on one GPU.")
|
||||
|
||||
@ -69,6 +69,8 @@ class LocalSGD:
|
||||
DistributedType.NO,
|
||||
DistributedType.MULTI_CPU,
|
||||
DistributedType.MULTI_GPU,
|
||||
DistributedType.MULTI_MLU,
|
||||
DistributedType.MULTI_NPU,
|
||||
]:
|
||||
raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)")
|
||||
self.enabled = enabled and accelerator.distributed_type != DistributedType.NO
|
||||
|
||||
@ -18,10 +18,10 @@ import warnings
|
||||
import torch
|
||||
|
||||
from .state import AcceleratorState, GradientState
|
||||
from .utils import DistributedType, honor_type, is_tpu_available
|
||||
from .utils import DistributedType, honor_type, is_torch_xla_available
|
||||
|
||||
|
||||
if is_tpu_available(check_device=False):
|
||||
if is_torch_xla_available():
|
||||
import torch_xla.core.xla_model as xm
|
||||
|
||||
|
||||
@ -68,7 +68,7 @@ class AcceleratedOptimizer(torch.optim.Optimizer):
|
||||
# Handle device placement
|
||||
if device_placement:
|
||||
state_dict = self.optimizer.state_dict()
|
||||
if self.accelerator_state.distributed_type == DistributedType.TPU:
|
||||
if self.accelerator_state.distributed_type == DistributedType.XLA:
|
||||
xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device)
|
||||
else:
|
||||
state_dict = move_to_device(state_dict, self.accelerator_state.device)
|
||||
@ -102,7 +102,7 @@ class AcceleratedOptimizer(torch.optim.Optimizer):
|
||||
self.optimizer.add_param_group(param_group)
|
||||
|
||||
def load_state_dict(self, state_dict):
|
||||
if self.accelerator_state.distributed_type == DistributedType.TPU and self.device_placement:
|
||||
if self.accelerator_state.distributed_type == DistributedType.XLA and self.device_placement:
|
||||
xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device)
|
||||
self.optimizer.load_state_dict(state_dict)
|
||||
|
||||
@ -114,7 +114,7 @@ class AcceleratedOptimizer(torch.optim.Optimizer):
|
||||
accept_arg = "set_to_none" in inspect.signature(self.optimizer.zero_grad).parameters
|
||||
if accept_arg:
|
||||
if set_to_none is None:
|
||||
set_to_none = False
|
||||
set_to_none = True
|
||||
self.optimizer.zero_grad(set_to_none=set_to_none)
|
||||
else:
|
||||
if set_to_none is not None:
|
||||
@ -122,11 +122,15 @@ class AcceleratedOptimizer(torch.optim.Optimizer):
|
||||
self.optimizer.zero_grad()
|
||||
|
||||
def step(self, closure=None):
|
||||
if (
|
||||
not self.gradient_state.is_xla_gradients_synced
|
||||
and self.accelerator_state.distributed_type == DistributedType.XLA
|
||||
):
|
||||
gradients = xm._fetch_gradients(self.optimizer)
|
||||
xm.all_reduce("sum", gradients, scale=1.0 / xm.xrt_world_size())
|
||||
self.gradient_state.is_xla_gradients_synced = True
|
||||
if self.gradient_state.sync_gradients:
|
||||
if self.accelerator_state.distributed_type == DistributedType.TPU:
|
||||
optimizer_args = {"closure": closure} if closure is not None else {}
|
||||
xm.optimizer_step(self.optimizer, optimizer_args=optimizer_args)
|
||||
elif self.scaler is not None:
|
||||
if self.scaler is not None:
|
||||
self.optimizer.step = self._optimizer_patched_step_method
|
||||
|
||||
self.scaler.step(self.optimizer, closure)
|
||||
@ -143,6 +147,8 @@ class AcceleratedOptimizer(torch.optim.Optimizer):
|
||||
self._accelerate_step_called = False
|
||||
else:
|
||||
self.optimizer.step(closure)
|
||||
if self.accelerator_state.distributed_type == DistributedType.XLA:
|
||||
self.gradient_state.is_xla_gradients_synced = False
|
||||
|
||||
def _switch_parameters(self, parameters_map):
|
||||
for param_group in self.optimizer.param_groups:
|
||||
|
||||
@ -32,24 +32,30 @@ from .utils import (
|
||||
check_cuda_p2p_ib_support,
|
||||
check_fp8_capability,
|
||||
get_ccl_version,
|
||||
get_cpu_distributed_information,
|
||||
get_int_from_env,
|
||||
is_ccl_available,
|
||||
is_datasets_available,
|
||||
is_deepspeed_available,
|
||||
is_fp8_available,
|
||||
is_ipex_available,
|
||||
is_mlu_available,
|
||||
is_mps_available,
|
||||
is_npu_available,
|
||||
is_tpu_available,
|
||||
is_torch_xla_available,
|
||||
is_xpu_available,
|
||||
parse_choice_from_env,
|
||||
parse_flag_from_env,
|
||||
set_numa_affinity,
|
||||
)
|
||||
from .utils.dataclasses import SageMakerDistributedType
|
||||
|
||||
|
||||
if is_tpu_available(check_device=False):
|
||||
if is_torch_xla_available():
|
||||
import torch_xla.core.xla_model as xm
|
||||
|
||||
if is_mlu_available(check_device=False):
|
||||
import torch_mlu # noqa: F401
|
||||
|
||||
if is_npu_available(check_device=False):
|
||||
import torch_npu # noqa: F401
|
||||
@ -98,7 +104,7 @@ class ThreadLocalSharedDict(threading.local):
|
||||
|
||||
|
||||
# Prefer global shared dictionary, except when using TPU.
|
||||
SharedDict = dict if not is_tpu_available(check_device=False) else ThreadLocalSharedDict
|
||||
SharedDict = dict if not is_torch_xla_available() else ThreadLocalSharedDict
|
||||
|
||||
|
||||
# Inspired by Alex Martelli's 'Borg'.
|
||||
@ -108,6 +114,14 @@ class PartialState:
|
||||
control. Designed to be used when only process control and device execution states are needed. Does *not* need to
|
||||
be initialized from `Accelerator`.
|
||||
|
||||
Args:
|
||||
cpu (`bool`, *optional*):
|
||||
Whether or not to force the script to execute on CPU. Will ignore any accelerators available if set to
|
||||
`True` and force the execution on the CPU.
|
||||
kwargs (additional keyword arguments, *optional*):
|
||||
Additional keyword arguments to pass to the relevent `init_process_group` function. Valid `kwargs` can be
|
||||
found in [`utils.InitProcessGroupKwargs`]. See the example section for detailed usage.
|
||||
|
||||
**Available attributes:**
|
||||
|
||||
- **device** (`torch.device`) -- The device to use.
|
||||
@ -122,9 +136,31 @@ class PartialState:
|
||||
- **is_main_process** (`bool`) -- Whether or not the current process is the main one.
|
||||
- **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node.
|
||||
- **debug** (`bool`) -- Whether or not the current script is being run in debug mode.
|
||||
|
||||
Example:
|
||||
```python
|
||||
from accelerate.utils import InitProcessGroupKwargs
|
||||
|
||||
# To include `InitProcessGroupKwargs`, init then call `.to_kwargs()`
|
||||
kwargs = InitProcessGroupKwargs(...).to_kwargs()
|
||||
state = PartialState(**kwargs)
|
||||
```
|
||||
"""
|
||||
|
||||
_shared_state = SharedDict()
|
||||
_known_attrs = [
|
||||
"_cpu",
|
||||
"_mixed_precision",
|
||||
"_shared_state",
|
||||
"backend",
|
||||
"debug",
|
||||
"device",
|
||||
"distributed_type",
|
||||
"fork_launched",
|
||||
"local_process_index",
|
||||
"num_processes",
|
||||
"process_index",
|
||||
]
|
||||
|
||||
def __init__(self, cpu: bool = False, **kwargs):
|
||||
self.__dict__ = self._shared_state
|
||||
@ -135,171 +171,88 @@ class PartialState:
|
||||
self.device = torch.device(env_device) if env_device is not None else None
|
||||
self.debug = parse_flag_from_env("ACCELERATE_DEBUG_MODE")
|
||||
use_sagemaker_dp = kwargs.pop("_use_sagemaker_dp", None)
|
||||
dist_information = None
|
||||
if use_sagemaker_dp is None:
|
||||
use_sagemaker_dp = (
|
||||
os.environ.get("ACCELERATE_USE_SAGEMAKER", "false") == "true"
|
||||
and os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") != SageMakerDistributedType.NO
|
||||
)
|
||||
|
||||
if use_sagemaker_dp and not cpu:
|
||||
if (
|
||||
os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") == SageMakerDistributedType.DATA_PARALLEL
|
||||
) or use_sagemaker_dp:
|
||||
self.distributed_type = DistributedType.MULTI_GPU
|
||||
import smdistributed.dataparallel.torch.torch_smddp # noqa
|
||||
|
||||
if not torch.distributed.is_initialized():
|
||||
torch.distributed.init_process_group(backend="smddp")
|
||||
self.backend = "smddp"
|
||||
self.num_processes = torch.distributed.get_world_size()
|
||||
self.process_index = torch.distributed.get_rank()
|
||||
self.local_process_index = int(os.environ.get("LOCAL_RANK", -1))
|
||||
if self.device is None:
|
||||
self.device = torch.device("cuda", self.local_process_index)
|
||||
torch.cuda.set_device(self.device)
|
||||
elif is_tpu_available() and not cpu:
|
||||
self.distributed_type = DistributedType.TPU
|
||||
self.num_processes = xm.xrt_world_size()
|
||||
self.process_index = xm.get_ordinal()
|
||||
self.local_process_index = xm.get_local_ordinal()
|
||||
self.device = xm.xla_device()
|
||||
elif (
|
||||
os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true"
|
||||
and int(os.environ.get("LOCAL_RANK", -1)) != -1
|
||||
and not cpu
|
||||
):
|
||||
assert (
|
||||
is_deepspeed_available()
|
||||
), "DeepSpeed is not available => install it using `pip3 install deepspeed` or build it from source"
|
||||
self.distributed_type = DistributedType.DEEPSPEED
|
||||
if not torch.distributed.is_initialized():
|
||||
from deepspeed import comm as dist
|
||||
|
||||
# DeepSpeed always uses nccl
|
||||
kwargs.pop("backend", None)
|
||||
if is_xpu_available and is_ccl_available():
|
||||
# Set DeepSpeed backend to ccl for xpu
|
||||
self.backend = "ccl"
|
||||
os.environ["CCL_PROCESS_LAUNCHER"] = "none"
|
||||
os.environ["CCL_LOCAL_SIZE"] = os.environ.get("LOCAL_WORLD_SIZE", "1")
|
||||
os.environ["CCL_LOCAL_RANK"] = os.environ.get("LOCAL_RANK", "0")
|
||||
elif is_npu_available():
|
||||
self.backend = "hccl"
|
||||
# Sets up self.backend + imports
|
||||
backend, distributed_type = self._prepare_backend(cpu, use_sagemaker_dp, kwargs.pop("backend", None))
|
||||
self.backend = backend
|
||||
self.distributed_type = distributed_type
|
||||
use_deepspeed = False
|
||||
if not cpu:
|
||||
# Deal with XLA
|
||||
if is_torch_xla_available():
|
||||
self.device = xm.xla_device()
|
||||
xm.set_replication(self.device, xm.get_xla_supported_devices())
|
||||
self.num_processes = xm.xrt_world_size()
|
||||
self.process_index = xm.get_ordinal()
|
||||
if is_torch_xla_available(check_is_tpu=True):
|
||||
self.local_process_index = xm.get_local_ordinal()
|
||||
else:
|
||||
self.backend = "nccl"
|
||||
dist.init_distributed(dist_backend=self.backend, auto_mpi_discovery=False, **kwargs)
|
||||
self.local_process_index = int(os.environ.get("LOCAL_RANK", -1))
|
||||
self.distributed_type = DistributedType.XLA
|
||||
if int(os.environ.get("LOCAL_RANK", -1)) != -1:
|
||||
# Deal with spawning deepspeed
|
||||
if os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true":
|
||||
if not is_deepspeed_available():
|
||||
raise ImportError(
|
||||
"DeepSpeed is not available => install it using `pip3 install deepspeed` or build it from source"
|
||||
)
|
||||
from deepspeed import comm as dist
|
||||
|
||||
self.num_processes = torch.distributed.get_world_size()
|
||||
self.process_index = torch.distributed.get_rank()
|
||||
self.local_process_index = int(os.environ.get("LOCAL_RANK", -1))
|
||||
if self.device is None:
|
||||
if is_xpu_available():
|
||||
self.device = torch.device("xpu", self.local_process_index)
|
||||
if self.device is not None:
|
||||
torch.xpu.set_device(self.device)
|
||||
elif is_npu_available():
|
||||
self.device = torch.device("npu", self.local_process_index)
|
||||
if self.device is not None:
|
||||
torch.npu.set_device(self.device)
|
||||
else:
|
||||
self.device = torch.device("cuda", self.local_process_index)
|
||||
if self.device is not None:
|
||||
torch.cuda.set_device(self.device)
|
||||
if self.device.type == "cuda" and not check_cuda_p2p_ib_support():
|
||||
if "NCCL_P2P_DISABLE" not in os.environ or "NCCL_IB_DISABLE" not in os.environ:
|
||||
raise NotImplementedError(
|
||||
"Using RTX 4000 series doesn't support faster communication broadband via P2P or IB. "
|
||||
'Please set `NCCL_P2P_DISABLE="1"` and `NCCL_IB_DISABLE="1" or use `accelerate launch` which '
|
||||
"will do this automatically."
|
||||
)
|
||||
self._mixed_precision = "no" # deepspeed handles mixed_precision using deepspeed_config
|
||||
elif int(os.environ.get("LOCAL_RANK", -1)) != -1 and not cpu and torch.cuda.is_available():
|
||||
self.distributed_type = DistributedType.MULTI_GPU
|
||||
if not torch.distributed.is_initialized():
|
||||
self.backend = kwargs.pop("backend", "nccl")
|
||||
# Special case for `TrainingArguments`, where `backend` will be `None`
|
||||
if self.backend is None:
|
||||
self.backend = "nccl"
|
||||
torch.distributed.init_process_group(backend=self.backend, **kwargs)
|
||||
if not check_cuda_p2p_ib_support():
|
||||
if "NCCL_P2P_DISABLE" not in os.environ or "NCCL_IB_DISABLE" not in os.environ:
|
||||
raise NotImplementedError(
|
||||
"Using RTX 4000 series doesn't support faster communication broadband via P2P or IB. "
|
||||
'Please set `NCCL_P2P_DISABLE="1"` and `NCCL_IB_DISABLE="1" or use `accelerate launch` which '
|
||||
"will do this automatically."
|
||||
)
|
||||
self.num_processes = torch.distributed.get_world_size()
|
||||
self.process_index = torch.distributed.get_rank()
|
||||
self.local_process_index = int(os.environ.get("LOCAL_RANK", -1))
|
||||
if self.device is None:
|
||||
self.device = torch.device("cuda", self.local_process_index)
|
||||
torch.cuda.set_device(self.device)
|
||||
elif is_npu_available() and not cpu and int(os.environ.get("LOCAL_RANK", -1)) != -1:
|
||||
self.distributed_type = DistributedType.MULTI_NPU
|
||||
if not torch.distributed.is_initialized():
|
||||
# Backend is not set by the user, we set it here
|
||||
kwargs.pop("backend", None)
|
||||
self.backend = "hccl"
|
||||
torch.distributed.init_process_group(backend=self.backend, **kwargs)
|
||||
self.num_processes = torch.distributed.get_world_size()
|
||||
self.process_index = torch.distributed.get_rank()
|
||||
self.local_process_index = int(os.environ.get("LOCAL_RANK", -1))
|
||||
if self.device is None:
|
||||
self.device = torch.device("npu", self.local_process_index)
|
||||
torch.npu.set_device(self.device)
|
||||
elif get_int_from_env(["PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE", "WORLD_SIZE"], 1) > 1:
|
||||
if not cpu and is_xpu_available():
|
||||
self.distributed_type = DistributedType.MULTI_XPU
|
||||
else:
|
||||
self.distributed_type = DistributedType.MULTI_CPU
|
||||
# Actually, CCL_WORKER_COUNT is a CPU only env var in CCL, no need to set it for XPU.
|
||||
if is_ccl_available() and (
|
||||
get_int_from_env(["CCL_WORKER_COUNT"], 0) > 0 or self.distributed_type == DistributedType.MULTI_XPU
|
||||
):
|
||||
if get_ccl_version() >= "1.12":
|
||||
import oneccl_bindings_for_pytorch # noqa: F401
|
||||
else:
|
||||
import torch_ccl # noqa: F401
|
||||
backend = "ccl"
|
||||
elif torch.distributed.is_mpi_available():
|
||||
backend = "mpi"
|
||||
else:
|
||||
backend = "gloo"
|
||||
# Try to get launch configuration from environment variables set by MPI launcher - works for Intel MPI, OpenMPI and MVAPICH
|
||||
rank = get_int_from_env(["RANK", "PMI_RANK", "OMPI_COMM_WORLD_RANK", "MV2_COMM_WORLD_RANK"], 0)
|
||||
size = get_int_from_env(["WORLD_SIZE", "PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE"], 1)
|
||||
local_rank = get_int_from_env(
|
||||
["LOCAL_RANK", "MPI_LOCALRANKID", "OMPI_COMM_WORLD_LOCAL_RANK", "MV2_COMM_WORLD_LOCAL_RANK"], 0
|
||||
)
|
||||
local_size = get_int_from_env(
|
||||
["LOCAL_WORLD_SIZE", "MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"],
|
||||
1,
|
||||
)
|
||||
self.local_process_index = local_rank
|
||||
os.environ["RANK"] = str(rank)
|
||||
os.environ["WORLD_SIZE"] = str(size)
|
||||
os.environ["LOCAL_RANK"] = str(local_rank)
|
||||
os.environ["LOCAL_WORLD_SIZE"] = str(local_size)
|
||||
if is_xpu_available and is_ccl_available():
|
||||
os.environ["CCL_PROCESS_LAUNCHER"] = "none"
|
||||
os.environ["CCL_LOCAL_SIZE"] = os.environ.get("LOCAL_WORLD_SIZE", "1")
|
||||
os.environ["CCL_LOCAL_RANK"] = os.environ.get("LOCAL_RANK", "0")
|
||||
|
||||
if backend == "ccl" and self.distributed_type == DistributedType.MULTI_XPU:
|
||||
if not dist.is_initialized():
|
||||
dist.init_distributed(dist_backend=self.backend, auto_mpi_discovery=False, **kwargs)
|
||||
# We need to flag to `use_deepspeed` to be True to override `distributed_type` later
|
||||
use_deepspeed = True
|
||||
# Deal with all other backends but XPU and CPU, that gets handled special later
|
||||
elif (
|
||||
self.distributed_type not in (DistributedType.MULTI_XPU, DistributedType.MULTI_CPU)
|
||||
and not torch.distributed.is_initialized()
|
||||
):
|
||||
torch.distributed.init_process_group(backend=self.backend, **kwargs)
|
||||
# XPU and CPU require special env configs to be set
|
||||
if self.distributed_type in (DistributedType.MULTI_XPU, DistributedType.MULTI_CPU):
|
||||
dist_information = get_cpu_distributed_information()
|
||||
os.environ["RANK"] = str(dist_information.rank)
|
||||
os.environ["WORLD_SIZE"] = str(dist_information.world_size)
|
||||
os.environ["LOCAL_RANK"] = str(dist_information.local_rank)
|
||||
os.environ["LOCAL_WORLD_SIZE"] = str(dist_information.local_world_size)
|
||||
if self.backend == "ccl" and self.distributed_type == DistributedType.MULTI_XPU:
|
||||
os.environ["CCL_PROCESS_LAUNCHER"] = "none"
|
||||
os.environ["CCL_LOCAL_SIZE"] = str(local_size)
|
||||
os.environ["CCL_LOCAL_RANK"] = str(local_rank)
|
||||
os.environ["CCL_LOCAL_SIZE"] = os.environ["LOCAL_WORLD_SIZE"]
|
||||
os.environ["CCL_LOCAL_RANK"] = os.environ["LOCAL_RANK"]
|
||||
if not os.environ.get("MASTER_PORT", None):
|
||||
os.environ["MASTER_PORT"] = "29500"
|
||||
if not os.environ.get("MASTER_ADDR", None):
|
||||
if local_size != size and backend != "mpi":
|
||||
raise ValueError(
|
||||
"Looks like distributed multinode run but MASTER_ADDR env not set, "
|
||||
"please try exporting rank 0's hostname as MASTER_ADDR"
|
||||
)
|
||||
if (
|
||||
not os.environ.get("MASTER_ADDR", None)
|
||||
and dist_information.local_world_size != dist_information.world_size
|
||||
and self.backend != "mpi"
|
||||
):
|
||||
raise ValueError(
|
||||
"Tried to launch on distributed with multinode, but `MASTER_ADDR` env was not set, "
|
||||
"please try exporting rank 0's hostname as `MASTER_ADDR`"
|
||||
)
|
||||
kwargs["rank"] = dist_information.rank
|
||||
kwargs["world_size"] = dist_information.world_size
|
||||
|
||||
if (
|
||||
self.distributed_type == DistributedType.MULTI_CPU
|
||||
and get_int_from_env(["OMP_NUM_THREADS", "MKL_NUM_THREADS"], 0) == 0
|
||||
and get_int_from_env(["OMP_NUM_THREADS", "OMP_NUM_THREADS"], 0) > 0
|
||||
):
|
||||
import psutil
|
||||
|
||||
num_cpu_threads_per_process = int(psutil.cpu_count(logical=False) / local_size)
|
||||
num_cpu_threads_per_process = int(
|
||||
psutil.cpu_count(logical=False) / dist_information.local_world_size
|
||||
)
|
||||
if num_cpu_threads_per_process == 0:
|
||||
num_cpu_threads_per_process = 1
|
||||
torch.set_num_threads(num_cpu_threads_per_process)
|
||||
@ -307,34 +260,41 @@ class PartialState:
|
||||
f"OMP_NUM_THREADS/MKL_NUM_THREADS unset, we set it at {num_cpu_threads_per_process} to improve oob"
|
||||
" performance."
|
||||
)
|
||||
|
||||
if not torch.distributed.is_initialized():
|
||||
# Backend is not set by the user, we set it here
|
||||
kwargs.pop("backend", None)
|
||||
self.backend = backend
|
||||
torch.distributed.init_process_group(self.backend, rank=rank, world_size=size, **kwargs)
|
||||
torch.distributed.init_process_group(backend=self.backend, **kwargs)
|
||||
|
||||
# No backend == no distributed training
|
||||
if self.backend is None:
|
||||
self.distributed_type = DistributedType.NO
|
||||
self.num_processes = 1
|
||||
self.process_index = 0
|
||||
self.local_process_index = 0
|
||||
else:
|
||||
self.num_processes = torch.distributed.get_world_size()
|
||||
self.process_index = torch.distributed.get_rank()
|
||||
if cpu:
|
||||
self.device = torch.device("cpu")
|
||||
elif is_xpu_available():
|
||||
self.device = torch.device("xpu", self.local_process_index)
|
||||
torch.xpu.set_device(self.device)
|
||||
else:
|
||||
self.device = self.default_device
|
||||
else:
|
||||
self.distributed_type = (
|
||||
DistributedType.NO
|
||||
if os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "false"
|
||||
else DistributedType.DEEPSPEED
|
||||
self.local_process_index = (
|
||||
int(os.environ.get("LOCAL_RANK", -1)) if dist_information is None else dist_information.local_rank
|
||||
)
|
||||
self.num_processes = 1
|
||||
self.process_index = self.local_process_index = 0
|
||||
|
||||
if self.device is None:
|
||||
self.device = torch.device("cpu") if cpu else self.default_device
|
||||
self.set_device()
|
||||
# Now we can change to deepseed
|
||||
if use_deepspeed:
|
||||
self.distributed_type = DistributedType.DEEPSPEED
|
||||
|
||||
# Set CPU affinity if enabled
|
||||
if parse_flag_from_env("ACCELERATE_CPU_AFFINITY", False):
|
||||
set_numa_affinity(self.local_process_index)
|
||||
self.fork_launched = parse_flag_from_env("FORK_LAUNCHED", 0)
|
||||
|
||||
# Check for old RTX 4000's that can't use P2P or IB and are on old drivers
|
||||
if self.device.type == "cuda" and not check_cuda_p2p_ib_support():
|
||||
if "NCCL_P2P_DISABLE" not in os.environ or "NCCL_IB_DISABLE" not in os.environ:
|
||||
raise NotImplementedError(
|
||||
"Using RTX 4000 series doesn't support faster communication broadband via P2P or IB. "
|
||||
'Please set `NCCL_P2P_DISABLE="1"` and `NCCL_IB_DISABLE="1" or use `accelerate launch` which '
|
||||
"will do this automatically."
|
||||
)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return (
|
||||
f"Distributed environment: {self.distributed_type}{(' Backend: ' + self.backend) if self.backend else ''}\n"
|
||||
@ -406,6 +366,7 @@ class PartialState:
|
||||
"""
|
||||
if self.distributed_type in (
|
||||
DistributedType.MULTI_GPU,
|
||||
DistributedType.MULTI_MLU,
|
||||
DistributedType.MULTI_NPU,
|
||||
DistributedType.MULTI_XPU,
|
||||
DistributedType.MULTI_CPU,
|
||||
@ -413,7 +374,7 @@ class PartialState:
|
||||
DistributedType.FSDP,
|
||||
):
|
||||
torch.distributed.barrier()
|
||||
elif self.distributed_type == DistributedType.TPU:
|
||||
elif self.distributed_type == DistributedType.XLA:
|
||||
xm.rendezvous("accelerate.utils.wait_for_everyone")
|
||||
|
||||
def _goes_first(self, is_main: bool):
|
||||
@ -434,7 +395,7 @@ class PartialState:
|
||||
Note that when using a `dict`, all keys need to have the same number of elements.
|
||||
|
||||
Args:
|
||||
inputs (`list`, `tuple`, `torch.Tensor`, or `dict` of `list`/`tuple`/`torch.Tensor`):
|
||||
inputs (`list`, `tuple`, `torch.Tensor`, `dict` of `list`/`tuple`/`torch.Tensor`, or `datasets.Dataset`):
|
||||
The input to split between processes.
|
||||
apply_padding (`bool`, `optional`, defaults to `False`):
|
||||
Whether to apply padding by repeating the last element of the input so that all processes have the same
|
||||
@ -500,6 +461,18 @@ class PartialState:
|
||||
inputs[key] = _split_values(inputs[key], start_index, end_index)
|
||||
return inputs
|
||||
else:
|
||||
if is_datasets_available():
|
||||
from datasets import Dataset
|
||||
|
||||
if isinstance(inputs, Dataset):
|
||||
if start_index >= len(inputs):
|
||||
start_index = len(inputs) - 1
|
||||
if end_index > len(inputs):
|
||||
end_index = len(inputs)
|
||||
result_idcs = list(range(start_index, end_index))
|
||||
if apply_padding:
|
||||
result_idcs += [end_index - 1] * (num_samples_per_process - len(result_idcs))
|
||||
return inputs.select(result_idcs)
|
||||
return inputs
|
||||
|
||||
yield _split_values(inputs, start_index, end_index)
|
||||
@ -714,12 +687,15 @@ class PartialState:
|
||||
Returns the default device which is:
|
||||
- MPS if `torch.backends.mps.is_available()` and `torch.backends.mps.is_built()` both return True.
|
||||
- CUDA if `torch.cuda.is_available()`
|
||||
- MLU if `is_mlu_available()`
|
||||
- NPU if `is_npu_available()`
|
||||
- CPU otherwise
|
||||
"""
|
||||
if is_mps_available():
|
||||
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
|
||||
return torch.device("mps")
|
||||
elif is_mlu_available():
|
||||
return torch.device("mlu")
|
||||
elif torch.cuda.is_available():
|
||||
return torch.device("cuda")
|
||||
elif is_xpu_available():
|
||||
@ -729,6 +705,92 @@ class PartialState:
|
||||
else:
|
||||
return torch.device("cpu")
|
||||
|
||||
def _prepare_backend(
|
||||
self, cpu: bool = False, sagemaker_dp=False, backend: str = None
|
||||
) -> tuple[str, DistributedType]:
|
||||
"Prepares any imports needed before initializing the distributed backend and sets `self.backend` properly"
|
||||
distributed_type = None
|
||||
if sagemaker_dp:
|
||||
import smdistributed.dataparallel.torch.torch_smddp # noqa
|
||||
|
||||
backend = "smddp"
|
||||
distributed_type = DistributedType.MULTI_GPU
|
||||
elif int(os.environ.get("LOCAL_RANK", -1)) != -1:
|
||||
if not cpu:
|
||||
if is_mlu_available():
|
||||
backend = "cncl"
|
||||
distributed_type = DistributedType.MULTI_MLU
|
||||
elif torch.cuda.is_available():
|
||||
if backend is None:
|
||||
backend = "nccl"
|
||||
distributed_type = DistributedType.MULTI_GPU
|
||||
elif is_npu_available():
|
||||
backend = "hccl"
|
||||
distributed_type = DistributedType.MULTI_NPU
|
||||
if backend is None and (
|
||||
int(os.environ.get("LOCAL_RANK", -1)) != -1
|
||||
or get_int_from_env(["PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE", "WORLD_SIZE"], 1) > 1
|
||||
):
|
||||
if not cpu and is_xpu_available():
|
||||
distributed_type = DistributedType.MULTI_XPU
|
||||
else:
|
||||
distributed_type = DistributedType.MULTI_CPU
|
||||
if is_ccl_available() and (
|
||||
get_int_from_env(["CCL_WORKER_COUNT"], 0) > 0 or distributed_type == DistributedType.MULTI_XPU
|
||||
):
|
||||
if get_ccl_version() >= "1.12":
|
||||
import oneccl_bindings_for_pytorch # noqa: F401
|
||||
else:
|
||||
import torch_ccl # noqa: F401
|
||||
|
||||
backend = "ccl"
|
||||
elif torch.distributed.is_mpi_available():
|
||||
backend = "mpi"
|
||||
else:
|
||||
backend = "gloo"
|
||||
if distributed_type is None:
|
||||
distributed_type = DistributedType.NO
|
||||
return backend, distributed_type
|
||||
|
||||
def set_device(self):
|
||||
"""
|
||||
Sets the device in `self.device` to the current distributed environment.
|
||||
"""
|
||||
if self.device is not None:
|
||||
return
|
||||
if self.num_processes == 1:
|
||||
self.device = torch.device("cpu") if self._cpu else self.default_device
|
||||
return
|
||||
device = str(self.distributed_type).split(".")[-1].replace("MULTI_", "").lower()
|
||||
if device not in ("cpu", "gpu", "mlu", "npu", "xpu"):
|
||||
raise ValueError(
|
||||
f"Can't set device for {self.distributed_type} ({device}), verify we should be calling `_set_device()` for it!"
|
||||
)
|
||||
if device == "gpu":
|
||||
device = "cuda"
|
||||
self.device = torch.device(device, self.local_process_index)
|
||||
if self.device is not None:
|
||||
if device == "xpu":
|
||||
torch.xpu.set_device(self.device)
|
||||
elif device == "mlu":
|
||||
torch.mlu.set_device(self.device)
|
||||
elif device == "npu":
|
||||
torch.npu.set_device(self.device)
|
||||
elif device == "cuda":
|
||||
torch.cuda.set_device(self.device)
|
||||
|
||||
def __getattr__(self, name: str):
|
||||
# By this point we know that no attributes of `self` contain `name`,
|
||||
# so we just modify the error message
|
||||
if name in self._known_attrs:
|
||||
raise AttributeError(
|
||||
f"`PartialState` object has no attribute `{name}`. "
|
||||
"This happens if `PartialState._reset_state()` was called and "
|
||||
"an `Accelerator` or `PartialState` was not reinitialized."
|
||||
)
|
||||
# Raise a typical AttributeError
|
||||
raise AttributeError(f"'PartialState' object has no attribute '{name}'")
|
||||
|
||||
|
||||
class AcceleratorState:
|
||||
"""
|
||||
@ -752,6 +814,13 @@ class AcceleratorState:
|
||||
"""
|
||||
|
||||
_shared_state = SharedDict()
|
||||
_known_attrs = PartialState._known_attrs + [
|
||||
"deepspeed_plugin",
|
||||
"use_ipex",
|
||||
"fsdp_plugin",
|
||||
"megatron_lm_plugin",
|
||||
"dynamo_plugin",
|
||||
]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@ -800,7 +869,7 @@ class AcceleratorState:
|
||||
)
|
||||
# deepspeed handles mixed_precision using deepspeed_config
|
||||
self._mixed_precision = "no" if self.distributed_type == DistributedType.DEEPSPEED else mixed_precision
|
||||
if self.distributed_type == DistributedType.TPU:
|
||||
if self.distributed_type == DistributedType.XLA and is_torch_xla_available(check_is_tpu=True):
|
||||
if mixed_precision == "bf16":
|
||||
if os.environ.get("ACCELERATE_DOWNCAST_BF16"):
|
||||
os.environ["XLA_USE_BF16"] = str(0)
|
||||
@ -812,35 +881,30 @@ class AcceleratorState:
|
||||
self.downcast_bfloat = False
|
||||
elif os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true" and not cpu:
|
||||
self.deepspeed_plugin = deepspeed_plugin
|
||||
elif self.distributed_type == DistributedType.MULTI_GPU:
|
||||
elif self.distributed_type in [
|
||||
DistributedType.MULTI_GPU,
|
||||
DistributedType.MULTI_MLU,
|
||||
DistributedType.MULTI_NPU,
|
||||
DistributedType.MULTI_XPU,
|
||||
]:
|
||||
if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true":
|
||||
self.distributed_type = DistributedType.FSDP
|
||||
if self._mixed_precision != "no":
|
||||
fsdp_plugin.set_mixed_precision(self._mixed_precision)
|
||||
self.fsdp_plugin = fsdp_plugin
|
||||
if os.environ.get("ACCELERATE_USE_MEGATRON_LM", "false") == "true":
|
||||
if os.environ.get("ACCELERATE_USE_MEGATRON_LM", "false") == "true" and self.distributed_type not in [
|
||||
DistributedType.MULTI_NPU,
|
||||
DistributedType.MULTI_XPU,
|
||||
]:
|
||||
self.distributed_type = DistributedType.MEGATRON_LM
|
||||
megatron_lm_plugin.set_mixed_precision(self._mixed_precision)
|
||||
self.megatron_lm_plugin = megatron_lm_plugin
|
||||
elif self.distributed_type == DistributedType.MULTI_NPU:
|
||||
if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true":
|
||||
self.distributed_type = DistributedType.FSDP
|
||||
if self._mixed_precision != "no":
|
||||
fsdp_plugin.set_mixed_precision(self._mixed_precision)
|
||||
self.fsdp_plugin = fsdp_plugin
|
||||
elif self.distributed_type in [DistributedType.MULTI_CPU, DistributedType.MULTI_XPU, DistributedType.NO]:
|
||||
if is_ipex_available():
|
||||
"check if user disables it explicitly"
|
||||
# check if user disables it explicitly
|
||||
self.use_ipex = parse_flag_from_env("ACCELERATE_USE_IPEX", default=True)
|
||||
else:
|
||||
self.use_ipex = False
|
||||
if self.distributed_type == DistributedType.MULTI_XPU:
|
||||
if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true":
|
||||
self.distributed_type = DistributedType.FSDP
|
||||
if self._mixed_precision != "no":
|
||||
fsdp_plugin.set_mixed_precision(self._mixed_precision)
|
||||
self.fsdp_plugin = fsdp_plugin
|
||||
|
||||
if (
|
||||
self.dynamo_plugin.backend != DynamoBackend.NO
|
||||
and self._mixed_precision == "no"
|
||||
@ -993,6 +1057,18 @@ class AcceleratorState:
|
||||
def print(self, *args, **kwargs):
|
||||
PartialState().print(*args, **kwargs)
|
||||
|
||||
def __getattr__(self, name: str):
|
||||
# By this point we know that no attributes of `self` contain `name`,
|
||||
# so we just modify the error message
|
||||
if name in self._known_attrs:
|
||||
raise AttributeError(
|
||||
f"`AcceleratorState` object has no attribute `{name}`. "
|
||||
"This happens if `AcceleratorState._reset_state()` was called and "
|
||||
"an `Accelerator` or `PartialState` was not reinitialized."
|
||||
)
|
||||
# Raise a typical AttributeError
|
||||
raise AttributeError(f"'AcceleratorState' object has no attribute '{name}'")
|
||||
|
||||
|
||||
class GradientState:
|
||||
"""
|
||||
@ -1011,6 +1087,10 @@ class GradientState:
|
||||
accumulation
|
||||
- **sync_with_dataloader** (`bool`) -- Whether the gradients should be synced at the end of the dataloader
|
||||
iteration and the number of total steps reset
|
||||
- **is_xla_gradients_synced** (`bool`) -- Whether the XLA gradients have been synchronized. It is initialized
|
||||
as false. Once gradients have been reduced before the optimizer step, this flag is set to true. Subsequently,
|
||||
after each step, the flag is reset to false. FSDP will always synchronize the gradients, hence
|
||||
is_xla_gradients_synced is always true.
|
||||
"""
|
||||
|
||||
_shared_state = SharedDict()
|
||||
@ -1024,6 +1104,7 @@ class GradientState:
|
||||
self.plugin_kwargs = (
|
||||
gradient_accumulation_plugin.to_kwargs() if gradient_accumulation_plugin is not None else {}
|
||||
)
|
||||
self._is_xla_gradients_synced = False
|
||||
|
||||
# Plugin args are different and can be updated
|
||||
if gradient_accumulation_plugin is not None and self.plugin_kwargs != gradient_accumulation_plugin.to_kwargs():
|
||||
@ -1071,9 +1152,28 @@ class GradientState:
|
||||
f"Gradient accumulation plugin: {self.plugin_kwargs}\n"
|
||||
)
|
||||
|
||||
@property
|
||||
def is_xla_gradients_synced(self):
|
||||
"Returns the value of is_xla_gradients_synced. FSDP will always synchronize the gradients, hence is_xla_gradients_synced is always true."
|
||||
if parse_flag_from_env("ACCELERATE_USE_FSDP", default=False):
|
||||
return True
|
||||
return self._is_xla_gradients_synced
|
||||
|
||||
@is_xla_gradients_synced.setter
|
||||
def is_xla_gradients_synced(self, is_synced):
|
||||
"Set the _is_xla_gradients_synced attribute."
|
||||
self._is_xla_gradients_synced = is_synced
|
||||
|
||||
def _set_sync_gradients(self, sync_gradients):
|
||||
"Private function that sets whether gradients should be synchronized. Users should not have to call this."
|
||||
self.sync_gradients = sync_gradients
|
||||
# Allow grad-sync to automatically work on TPUs
|
||||
if (
|
||||
self.sync_gradients
|
||||
and is_torch_xla_available(check_is_tpu=True)
|
||||
and PartialState().distributed_type == DistributedType.XLA
|
||||
):
|
||||
xm.mark_step()
|
||||
|
||||
def _add_dataloader(self, dataloader):
|
||||
"Private function that adds a dataloader to `self.dataloader_references` and sets `in_dataloader` to `True`. Users should not have to call this."
|
||||
|
||||
@ -1,18 +1,39 @@
|
||||
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from .testing import (
|
||||
DEFAULT_LAUNCH_COMMAND,
|
||||
are_the_same_tensors,
|
||||
assert_exception,
|
||||
device_count,
|
||||
execute_subprocess_async,
|
||||
get_launch_command,
|
||||
memory_allocated_func,
|
||||
path_in_accelerate_package,
|
||||
require_bnb,
|
||||
require_cpu,
|
||||
require_cuda,
|
||||
require_huggingface_suite,
|
||||
require_mlu,
|
||||
require_mps,
|
||||
require_multi_device,
|
||||
require_multi_gpu,
|
||||
require_multi_xpu,
|
||||
require_non_cpu,
|
||||
require_non_torch_xla,
|
||||
require_non_xpu,
|
||||
require_npu,
|
||||
require_pippy,
|
||||
require_single_device,
|
||||
require_single_gpu,
|
||||
require_single_xpu,
|
||||
|
||||
@ -84,14 +84,14 @@ def compare_against_test(base_filename: str, feature_filename: str, parser_only:
|
||||
functionalities off of "examples/nlp_example.py", so if `base_filename` is a script other than
|
||||
`complete_nlp_example.py`, the template script should be included here. Such as `examples/cv_example.py`
|
||||
"""
|
||||
with open(base_filename, "r") as f:
|
||||
with open(base_filename) as f:
|
||||
base_file_contents = f.readlines()
|
||||
with open(os.path.abspath(os.path.join("examples", "nlp_example.py")), "r") as f:
|
||||
with open(os.path.abspath(os.path.join("examples", "nlp_example.py"))) as f:
|
||||
full_file_contents = f.readlines()
|
||||
with open(feature_filename, "r") as f:
|
||||
with open(feature_filename) as f:
|
||||
feature_file_contents = f.readlines()
|
||||
if secondary_filename is not None:
|
||||
with open(secondary_filename, "r") as f:
|
||||
with open(secondary_filename) as f:
|
||||
secondary_file_contents = f.readlines()
|
||||
|
||||
# This is our base, we remove all the code from here in our `full_filename` and `feature_filename` to find the new content
|
||||
|
||||
@ -0,0 +1,13 @@
|
||||
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
@ -0,0 +1,13 @@
|
||||
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -61,7 +60,7 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16, model_name:
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
if accelerator.distributed_type == DistributedType.TPU:
|
||||
if accelerator.distributed_type == DistributedType.XLA:
|
||||
return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
|
||||
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
|
||||
|
||||
@ -182,7 +181,7 @@ def training_function(config, args):
|
||||
accelerator.print("resumed checkpoint performance:", accuracy)
|
||||
accelerator.print("resumed checkpoint's scheduler's lr:", lr_scheduler.get_lr()[0])
|
||||
accelerator.print("resumed optimizers's lr:", optimizer.param_groups[0]["lr"])
|
||||
with open(os.path.join(args.output_dir, f"state_{starting_epoch-1}.json"), "r") as f:
|
||||
with open(os.path.join(args.output_dir, f"state_{starting_epoch - 1}.json")) as f:
|
||||
resumed_state = json.load(f)
|
||||
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
|
||||
assert (
|
||||
|
||||
@ -25,10 +25,10 @@ from datasets import load_dataset
|
||||
from torch.utils.data import DataLoader, IterableDataset
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
||||
|
||||
from accelerate import Accelerator
|
||||
from accelerate import Accelerator, DataLoaderConfiguration, DistributedType
|
||||
from accelerate.data_loader import DataLoaderDispatcher
|
||||
from accelerate.test_utils import RegressionDataset, RegressionModel, torch_device
|
||||
from accelerate.utils import set_seed
|
||||
from accelerate.utils import is_torch_xla_available, set_seed
|
||||
|
||||
|
||||
os.environ["TRANSFORMERS_NO_ADVISORY_WARNINGS"] = "true"
|
||||
@ -36,7 +36,7 @@ os.environ["TRANSFORMERS_NO_ADVISORY_WARNINGS"] = "true"
|
||||
|
||||
class ListHandler(logging.Handler):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(ListHandler, self).__init__(*args, **kwargs)
|
||||
super().__init__(*args, **kwargs)
|
||||
self.logs = []
|
||||
|
||||
def emit(self, record):
|
||||
@ -81,7 +81,8 @@ def get_dataloader(accelerator: Accelerator, use_longest=False):
|
||||
|
||||
|
||||
def get_mrpc_setup(dispatch_batches, split_batches):
|
||||
accelerator = Accelerator(dispatch_batches=dispatch_batches, split_batches=split_batches)
|
||||
dataloader_config = DataLoaderConfiguration(dispatch_batches=dispatch_batches, split_batches=split_batches)
|
||||
accelerator = Accelerator(dataloader_config=dataloader_config)
|
||||
dataloader = get_dataloader(accelerator, not dispatch_batches)
|
||||
model = AutoModelForSequenceClassification.from_pretrained(
|
||||
"hf-internal-testing/mrpc-bert-base-cased", return_dict=True
|
||||
@ -112,8 +113,8 @@ def generate_predictions(model, dataloader, accelerator):
|
||||
def test_torch_metrics(
|
||||
accelerator: Accelerator, num_samples=82, dispatch_batches=False, split_batches=False, batch_size=16
|
||||
):
|
||||
model, ddp_model, dataloader = get_basic_setup(accelerator, num_samples, batch_size)
|
||||
logits, targs = generate_predictions(ddp_model, dataloader, accelerator)
|
||||
_, ddp_model, dataloader = get_basic_setup(accelerator, num_samples, batch_size)
|
||||
logits, _ = generate_predictions(ddp_model, dataloader, accelerator)
|
||||
assert (
|
||||
len(logits) == num_samples
|
||||
), f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(logits)}"
|
||||
@ -161,8 +162,7 @@ def test_gather_for_metrics_with_non_tensor_objects_iterable_dataset():
|
||||
return len(self.data)
|
||||
|
||||
def __iter__(self):
|
||||
for element in self.data:
|
||||
yield element
|
||||
yield from self.data
|
||||
|
||||
iterable_dataset = DummyIterableDataset([n for n in range(30)])
|
||||
dataloader = DataLoader(iterable_dataset, batch_size=4)
|
||||
@ -194,8 +194,7 @@ def test_gather_for_metrics_with_iterable_dataset():
|
||||
return len(self.data)
|
||||
|
||||
def __iter__(self):
|
||||
for element in self.data:
|
||||
yield element
|
||||
yield from self.data
|
||||
|
||||
iterable_dataset = DummyIterableDataset(torch.as_tensor(range(30)))
|
||||
dataloader = DataLoader(iterable_dataset, batch_size=4)
|
||||
@ -242,19 +241,26 @@ def test_gather_for_metrics_drop_last():
|
||||
|
||||
|
||||
def main():
|
||||
accelerator = Accelerator(split_batches=False, dispatch_batches=False)
|
||||
dataloader_config = DataLoaderConfiguration(split_batches=False, dispatch_batches=False)
|
||||
accelerator = Accelerator(dataloader_config=dataloader_config)
|
||||
if accelerator.is_local_main_process:
|
||||
datasets.utils.logging.set_verbosity_warning()
|
||||
transformers.utils.logging.set_verbosity_warning()
|
||||
else:
|
||||
datasets.utils.logging.set_verbosity_error()
|
||||
transformers.utils.logging.set_verbosity_error()
|
||||
# TorchXLA does not support batch dispatching. 'put_on_device' is always False for
|
||||
# TorchXLA, which can cause a value error in 'prepare_data_loader' function.
|
||||
dispatch_batches_options = [False] if accelerator.state.distributed_type == DistributedType.XLA else [True, False]
|
||||
|
||||
# Temporarily close this test for TorchXLA due to the 'Cannot set version_counter for
|
||||
# inference tensor' error in inference mode. Reopen it after TorchXLA fixes this bug.
|
||||
# These are a bit slower so they should only be ran on the GPU or TPU
|
||||
if accelerator.device.type != "cpu":
|
||||
if accelerator.device.type != "cpu" and not is_torch_xla_available():
|
||||
if accelerator.is_local_main_process:
|
||||
print("**Testing gather_for_metrics**")
|
||||
for split_batches in [True, False]:
|
||||
for dispatch_batches in [True, False]:
|
||||
for dispatch_batches in dispatch_batches_options:
|
||||
if accelerator.is_local_main_process:
|
||||
print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`")
|
||||
test_mrpc(dispatch_batches, split_batches)
|
||||
@ -263,15 +269,23 @@ def main():
|
||||
test_gather_for_metrics_with_iterable_dataset()
|
||||
print("test gather_for_metrics_with_non_tensor_objects_iterable_dataset")
|
||||
test_gather_for_metrics_with_non_tensor_objects_iterable_dataset()
|
||||
if accelerator.is_local_main_process:
|
||||
print("**Test torch metrics**")
|
||||
for split_batches in [True, False]:
|
||||
for dispatch_batches in [True, False]:
|
||||
accelerator = Accelerator(split_batches=split_batches, dispatch_batches=dispatch_batches)
|
||||
if accelerator.is_local_main_process:
|
||||
print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99")
|
||||
test_torch_metrics(accelerator, 99)
|
||||
accelerator.state._reset_state()
|
||||
|
||||
# MpDeviceLoader in TorchXLA is an asynchronous loader that preloads several batches into cache.
|
||||
# This can cause the 'end_of_dataloader' of DataLoaderStateMixin to be set earlier than intended.
|
||||
# Skip this test when TorchXLA is enabled.
|
||||
if accelerator.state.distributed_type != DistributedType.XLA:
|
||||
if accelerator.is_local_main_process:
|
||||
print("**Test torch metrics**")
|
||||
for split_batches in [True, False]:
|
||||
for dispatch_batches in dispatch_batches_options:
|
||||
dataloader_config = DataLoaderConfiguration(
|
||||
split_batches=split_batches, dispatch_batches=dispatch_batches
|
||||
)
|
||||
accelerator = Accelerator(dataloader_config=dataloader_config)
|
||||
if accelerator.is_local_main_process:
|
||||
print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99")
|
||||
test_torch_metrics(accelerator, 99)
|
||||
accelerator.state._reset_state()
|
||||
if accelerator.is_local_main_process:
|
||||
print("**Test last batch is not dropped when perfectly divisible**")
|
||||
accelerator = Accelerator()
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -24,7 +23,7 @@ from torch.utils.data import DataLoader
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from accelerate.utils import is_npu_available, is_xpu_available
|
||||
from accelerate.utils import is_mlu_available, is_npu_available, is_xpu_available
|
||||
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
|
||||
|
||||
|
||||
@ -45,6 +44,10 @@ class TorchTracemalloc:
|
||||
torch.cuda.empty_cache()
|
||||
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
|
||||
self.begin = torch.cuda.memory_allocated()
|
||||
elif is_mlu_available():
|
||||
torch.mlu.empty_cache()
|
||||
torch.mlu.reset_max_memory_allocated() # reset the peak gauge to zero
|
||||
self.begin = torch.mlu.memory_allocated()
|
||||
elif is_npu_available():
|
||||
torch.npu.empty_cache()
|
||||
torch.npu.reset_max_memory_allocated() # reset the peak gauge to zero
|
||||
@ -61,6 +64,10 @@ class TorchTracemalloc:
|
||||
torch.cuda.empty_cache()
|
||||
self.end = torch.cuda.memory_allocated()
|
||||
self.peak = torch.cuda.max_memory_allocated()
|
||||
elif is_mlu_available():
|
||||
torch.mlu.empty_cache()
|
||||
torch.mlu.memory_allocated() # reset the peak gauge to zero
|
||||
self.begin = torch.mlu.max_memory_allocated()
|
||||
elif is_npu_available():
|
||||
torch.npu.empty_cache()
|
||||
self.end = torch.npu.memory_allocated()
|
||||
@ -117,7 +124,7 @@ def get_dataloaders(
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
if accelerator.distributed_type == DistributedType.TPU:
|
||||
if accelerator.distributed_type == DistributedType.XLA:
|
||||
return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
|
||||
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
|
||||
|
||||
@ -209,13 +216,11 @@ def training_function(config, args):
|
||||
overall_step += 1
|
||||
|
||||
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
|
||||
accelerator.print("Memory before entering the train : {}".format(b2mb(tracemalloc.begin)))
|
||||
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used))
|
||||
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked))
|
||||
accelerator.print(f"Memory before entering the train : {b2mb(tracemalloc.begin)}")
|
||||
accelerator.print(f"Memory consumed at the end of the train (end-begin): {tracemalloc.used}")
|
||||
accelerator.print(f"Peak Memory consumed during the train (max-begin): {tracemalloc.peaked}")
|
||||
accelerator.print(
|
||||
"Total Peak Memory consumed during the train (max): {}".format(
|
||||
tracemalloc.peaked + b2mb(tracemalloc.begin)
|
||||
)
|
||||
f"Total Peak Memory consumed during the train (max): {tracemalloc.peaked + b2mb(tracemalloc.begin)}"
|
||||
)
|
||||
train_total_peak_memory[f"epoch-{epoch}"] = tracemalloc.peaked + b2mb(tracemalloc.begin)
|
||||
if args.peak_memory_upper_bound is not None:
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -61,7 +60,7 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16, model_name:
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
if accelerator.distributed_type == DistributedType.TPU:
|
||||
if accelerator.distributed_type == DistributedType.XLA:
|
||||
return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
|
||||
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
|
||||
|
||||
|
||||
129
src/accelerate/test_utils/scripts/external_deps/test_pippy.py
Normal file
129
src/accelerate/test_utils/scripts/external_deps/test_pippy.py
Normal file
@ -0,0 +1,129 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import torch
|
||||
from torchvision.models import resnet34
|
||||
from transformers import (
|
||||
BertConfig,
|
||||
BertForMaskedLM,
|
||||
GPT2Config,
|
||||
GPT2ForSequenceClassification,
|
||||
T5Config,
|
||||
T5ForConditionalGeneration,
|
||||
)
|
||||
|
||||
from accelerate import PartialState
|
||||
from accelerate.inference import prepare_pippy
|
||||
from accelerate.utils import DistributedType, send_to_device, set_seed
|
||||
|
||||
|
||||
model_to_config = {
|
||||
"t5": (T5ForConditionalGeneration, T5Config, 1024),
|
||||
"bert": (BertForMaskedLM, BertConfig, 512),
|
||||
"gpt2": (GPT2ForSequenceClassification, GPT2Config, 1024),
|
||||
}
|
||||
|
||||
|
||||
def get_model_and_data_for_text(model_name, device, num_processes: int = 2):
|
||||
initializer, config, seq_len = model_to_config[model_name]
|
||||
config_args = {}
|
||||
# Eventually needed for batch inference tests on gpt-2 when bs != 1
|
||||
# if model_name == "gpt2":
|
||||
# config_args["pad_token_id"] = 0
|
||||
model_config = config(**config_args)
|
||||
model = initializer(model_config)
|
||||
return model, torch.randint(
|
||||
low=0,
|
||||
high=model_config.vocab_size,
|
||||
size=(num_processes, seq_len),
|
||||
device=device,
|
||||
dtype=torch.int64,
|
||||
requires_grad=False,
|
||||
)
|
||||
|
||||
|
||||
def test_gpt2(batch_size: int = 2):
|
||||
set_seed(42)
|
||||
state = PartialState()
|
||||
model, inputs = get_model_and_data_for_text("gpt2", "cpu", batch_size)
|
||||
model = prepare_pippy(model, example_args=(inputs,), no_split_module_classes=model._no_split_modules)
|
||||
# For inference args need to be a tuple
|
||||
inputs = inputs.to("cuda")
|
||||
with torch.no_grad():
|
||||
output = model(inputs)
|
||||
# Zach: Check that we just grab the real outputs we need at the end
|
||||
if not state.is_last_process:
|
||||
assert output is None, "Output was not generated on just the last process!"
|
||||
else:
|
||||
assert output is not None, "Output was not generated in the last process!"
|
||||
|
||||
|
||||
def test_t5(batch_size: int = 2):
|
||||
set_seed(42)
|
||||
state = PartialState()
|
||||
model, inputs = get_model_and_data_for_text("t5", "cpu", batch_size)
|
||||
example_inputs = {"input_ids": inputs, "decoder_input_ids": inputs}
|
||||
model = prepare_pippy(
|
||||
model,
|
||||
no_split_module_classes=model._no_split_modules,
|
||||
example_kwargs=example_inputs,
|
||||
)
|
||||
# For inference args need to be a tuple
|
||||
inputs = send_to_device(example_inputs, "cuda:0")
|
||||
with torch.no_grad():
|
||||
output = model(*inputs.values())
|
||||
# Zach: Check that we just grab the real outputs we need at the end
|
||||
if not state.is_last_process:
|
||||
assert output is None, "Output was not generated on just the last process!"
|
||||
else:
|
||||
assert output is not None, "Output was not generated in the last process!"
|
||||
|
||||
|
||||
def test_resnet(batch_size: int = 2):
|
||||
set_seed(42)
|
||||
state = PartialState()
|
||||
model = resnet34()
|
||||
input_tensor = torch.rand(batch_size, 3, 224, 224)
|
||||
model = prepare_pippy(
|
||||
model,
|
||||
example_args=(input_tensor,),
|
||||
)
|
||||
inputs = send_to_device(input_tensor, "cuda:0")
|
||||
with torch.no_grad():
|
||||
output = model(inputs)
|
||||
# Zach: Check that we just grab the real outputs we need at the end
|
||||
if not state.is_last_process:
|
||||
assert output is None, "Output was not generated on just the last process!"
|
||||
else:
|
||||
assert output is not None, "Output was not generated in the last process!"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
state = PartialState()
|
||||
state.print("Testing pippy integration...")
|
||||
if state.distributed_type == DistributedType.MULTI_GPU:
|
||||
state.print("Testing GPT2...")
|
||||
test_gpt2()
|
||||
# Issue: When modifying the tokenizer for batch GPT2 inference, there's an issue
|
||||
# due to references
|
||||
# NameError: cannot access free variable 'chunk_args_list' where it is not associated with a value in enclosing scope
|
||||
# test_gpt2(3)
|
||||
state.print("Testing T5...")
|
||||
test_t5()
|
||||
test_t5(1)
|
||||
test_t5(3)
|
||||
state.print("Testing CV model...")
|
||||
test_resnet()
|
||||
test_resnet(3)
|
||||
else:
|
||||
print("Less than two GPUs found, not running tests!")
|
||||
@ -0,0 +1,52 @@
|
||||
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import torch.distributed
|
||||
|
||||
from accelerate.test_utils import require_huggingface_suite
|
||||
from accelerate.utils import is_transformers_available
|
||||
|
||||
|
||||
if is_transformers_available():
|
||||
from transformers import AutoModel, TrainingArguments
|
||||
|
||||
|
||||
GPT2_TINY = "sshleifer/tiny-gpt2"
|
||||
|
||||
|
||||
@require_huggingface_suite
|
||||
def init_torch_dist_then_launch_deepspeed():
|
||||
torch.distributed.init_process_group(backend="nccl")
|
||||
deepspeed_config = {
|
||||
"zero_optimization": {
|
||||
"stage": 3,
|
||||
},
|
||||
"train_batch_size": "auto",
|
||||
"train_micro_batch_size_per_gpu": "auto",
|
||||
}
|
||||
train_args = TrainingArguments(
|
||||
output_dir="./",
|
||||
deepspeed=deepspeed_config,
|
||||
)
|
||||
model = AutoModel.from_pretrained(GPT2_TINY)
|
||||
assert train_args is not None
|
||||
assert model is not None
|
||||
|
||||
|
||||
def main():
|
||||
init_torch_dist_then_launch_deepspeed()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -1,3 +1,16 @@
|
||||
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import torch
|
||||
|
||||
|
||||
|
||||
@ -22,7 +22,7 @@ from unittest.mock import Mock
|
||||
import torch
|
||||
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
|
||||
|
||||
from accelerate.accelerator import Accelerator
|
||||
from accelerate.accelerator import Accelerator, DataLoaderConfiguration
|
||||
from accelerate.utils.dataclasses import DistributedType
|
||||
|
||||
|
||||
@ -31,12 +31,12 @@ class DummyIterableDataset(IterableDataset):
|
||||
self.data = data
|
||||
|
||||
def __iter__(self):
|
||||
for element in self.data:
|
||||
yield element
|
||||
yield from self.data
|
||||
|
||||
|
||||
def create_accelerator(even_batches=True):
|
||||
accelerator = Accelerator(even_batches=even_batches)
|
||||
dataloader_config = DataLoaderConfiguration(even_batches=even_batches)
|
||||
accelerator = Accelerator(dataloader_config=dataloader_config)
|
||||
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
|
||||
return accelerator
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user