mirror of
https://github.com/huggingface/accelerate.git
synced 2025-11-14 22:24:32 +08:00
Compare commits
83 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 8d1479def0 | |||
| 62fcf16429 | |||
| 00301b27b7 | |||
| eb8c535c17 | |||
| b7686ccb44 | |||
| f3229872bc | |||
| 7843286f2e | |||
| 11e2e99cfc | |||
| 07e745f1c4 | |||
| c7c99a30ea | |||
| 8f45a2eae8 | |||
| 9fd64b7ea9 | |||
| 5be16ad90b | |||
| dab62832de | |||
| caa9f9bcbb | |||
| 943efedb88 | |||
| 50acb0c2ec | |||
| e6d96e5f70 | |||
| 1dfb6e9304 | |||
| 4bef6bc511 | |||
| 73640d0463 | |||
| 7a1159143e | |||
| cbb0b82fa2 | |||
| 5ae6111180 | |||
| 230a5f541b | |||
| 956114ac92 | |||
| 76ee7f211d | |||
| 420743af22 | |||
| 206ab491ed | |||
| 936d2f4f5c | |||
| da98d601b5 | |||
| 658492fb41 | |||
| 80da9cfb09 | |||
| 03deec2a01 | |||
| 629d02c844 | |||
| a87c95da9e | |||
| bbcdbbaffc | |||
| ce53708e0e | |||
| 53209ce6d8 | |||
| bd083ae1bf | |||
| e5452a618d | |||
| 40a73e0ae0 | |||
| 937e08ce75 | |||
| 5d558f21e2 | |||
| d9b5ce60b3 | |||
| 61a87ab946 | |||
| 5dec654aae | |||
| b2a950205e | |||
| ca7b853abc | |||
| 6832aa51a6 | |||
| 4a1d5b1fb6 | |||
| 82369c8314 | |||
| cdb001ca5f | |||
| c72e22419b | |||
| c872c3086f | |||
| cec5ae8e4d | |||
| cd570b2e2a | |||
| 727d624322 | |||
| afed2f75f8 | |||
| 739b135f83 | |||
| 4a9dd1cd82 | |||
| feab09908d | |||
| e0baaa8df0 | |||
| 1b998f1695 | |||
| 7befe580c2 | |||
| cd3d3a37f9 | |||
| 81fffe51fd | |||
| 0b5ac0253e | |||
| a16b843a1b | |||
| bc86a9379f | |||
| 87a096f95e | |||
| 44adf1e14f | |||
| ce870e1ce1 | |||
| 1ace672d3e | |||
| e2ae254008 | |||
| 0fa291e707 | |||
| ba6f11ec3e | |||
| 430ee9df6b | |||
| 409a9df0a4 | |||
| acad5bae5c | |||
| 81b19c4094 | |||
| 3e97a9172b | |||
| 812719644d |
47
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
47
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
# What does this PR do?
|
||||
|
||||
<!--
|
||||
Congratulations! You've made it this far! You're not quite done yet though.
|
||||
|
||||
Once merged, your PR is going to appear in the release notes with the title you set, so make sure it's a great title that fully reflects the extent of your awesome contribution.
|
||||
|
||||
Then, please replace this with a description of the change and which issue is fixed (if applicable). Please also include relevant motivation and context. List any dependencies (if any) that are required for this change.
|
||||
|
||||
Once you're done, someone will review your PR shortly (see the section "Who can review?" below to tag some potential reviewers). They may suggest changes to make the code even better. If no one reviewed your PR after a week has passed, don't hesitate to post a new comment @-mentioning the same persons---sometimes notifications get lost.
|
||||
-->
|
||||
|
||||
<!-- Remove if not applicable -->
|
||||
|
||||
Fixes # (issue)
|
||||
|
||||
|
||||
## Before submitting
|
||||
- [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case).
|
||||
- [ ] Did you read the [contributor guideline](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md#submitting-a-pull-request-pr),
|
||||
Pull Request section?
|
||||
- [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link
|
||||
to it if that's the case.
|
||||
- [ ] Did you make sure to update the documentation with your changes? Here are the
|
||||
[documentation guidelines](https://github.com/huggingface/accelerate/tree/main/docs), and
|
||||
[here are tips on formatting docstrings](https://github.com/huggingface/accelerate/tree/main/docs#writing-documentation---specification).
|
||||
- [ ] Did you write any new necessary tests?
|
||||
|
||||
|
||||
## Who can review?
|
||||
|
||||
Anyone in the community is free to review the PR once the tests have passed. Feel free to tag
|
||||
members/contributors who may be interested in your PR.
|
||||
|
||||
<!-- Your PR will be replied to more quickly if you can figure out the right person to tag with @
|
||||
|
||||
If you know how to use git blame, that is the easiest way, otherwise, here is a rough guide of **who to tag**.
|
||||
|
||||
- Big modeling: @SunMarc
|
||||
- Fully-Sharded Data Parallism: @pacman100
|
||||
- DeepSpeed: @pacman100
|
||||
- Command Line Interface: @muellerzr
|
||||
- Documentation: @muellerzr
|
||||
- Core parts of the library: @muellerzr @BenjaminBossan
|
||||
- Maintained examples: @muellerzr or @pacman100
|
||||
|
||||
-->
|
||||
@ -21,44 +21,40 @@ jobs:
|
||||
|
||||
version-cpu:
|
||||
name: "Latest Accelerate CPU [version]"
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
needs: get-version
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Build and Push CPU
|
||||
uses: docker/build-push-action@v2
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: ./docker/accelerate-cpu
|
||||
file: docker/accelerate-cpu/Dockerfile
|
||||
push: true
|
||||
tags: huggingface/accelerate-cpu:${{needs.get-version.outputs.version}}
|
||||
|
||||
version-cuda:
|
||||
name: "Latest Accelerate GPU [version]"
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
needs: get-version
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Build and Push GPU
|
||||
uses: docker/build-push-action@v2
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: ./docker/accelerate-gpu
|
||||
file: docker/accelerate-gpu/Dockerfile
|
||||
push: true
|
||||
tags: huggingface/accelerate-gpu:${{needs.get-version.outputs.version}}
|
||||
tags: huggingface/accelerate-gpu:${{needs.get-version.outputs.version}}
|
||||
|
||||
38
.github/workflows/build_docker_images.yml
vendored
38
.github/workflows/build_docker_images.yml
vendored
@ -11,44 +11,50 @@ concurrency:
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
clean-storage:
|
||||
name: "Clean docker image storage"
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
steps:
|
||||
- name: Clean storage
|
||||
run: |
|
||||
docker image prune --all -f --filter "until=48h"
|
||||
docker system prune --all -f --filter "until=48h"
|
||||
|
||||
latest-cpu:
|
||||
name: "Latest Accelerate CPU [dev]"
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
needs: clean-storage
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Build and Push CPU
|
||||
uses: docker/build-push-action@v2
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: ./docker/accelerate-cpu
|
||||
file: docker/accelerate-cpu/Dockerfile
|
||||
push: true
|
||||
tags: huggingface/accelerate-cpu
|
||||
|
||||
latest-cuda:
|
||||
name: "Latest Accelerate GPU [dev]"
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
needs: clean-storage
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Build and Push GPU
|
||||
uses: docker/build-push-action@v2
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: ./docker/accelerate-gpu
|
||||
file: docker/accelerate-gpu/Dockerfile
|
||||
push: true
|
||||
tags: huggingface/accelerate-gpu
|
||||
tags: huggingface/accelerate-gpu
|
||||
|
||||
1
.github/workflows/build_documentation.yml
vendored
1
.github/workflows/build_documentation.yml
vendored
@ -14,5 +14,4 @@ jobs:
|
||||
commit_sha: ${{ github.sha }}
|
||||
package: accelerate
|
||||
secrets:
|
||||
token: ${{ secrets.HUGGINGFACE_PUSH }}
|
||||
hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}
|
||||
|
||||
3
.github/workflows/nightly.yml
vendored
3
.github/workflows/nightly.yml
vendored
@ -39,6 +39,7 @@ jobs:
|
||||
make test
|
||||
|
||||
- name: Run examples on GPUs
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate
|
||||
pip uninstall comet_ml -y
|
||||
@ -79,11 +80,13 @@ jobs:
|
||||
make test_cli
|
||||
|
||||
- name: Run Integration tests on GPUs
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate
|
||||
make test_integrations
|
||||
|
||||
- name: Run examples on GPUs
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate
|
||||
pip uninstall comet_ml -y
|
||||
|
||||
3
.github/workflows/run_merge_tests.yml
vendored
3
.github/workflows/run_merge_tests.yml
vendored
@ -35,10 +35,12 @@ jobs:
|
||||
make test_cli
|
||||
|
||||
- name: Run test on GPUs
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate
|
||||
make test
|
||||
- name: Run examples on GPUs
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate
|
||||
pip uninstall comet_ml -y
|
||||
@ -74,6 +76,7 @@ jobs:
|
||||
make test
|
||||
|
||||
- name: Run examples on GPUs
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate
|
||||
pip uninstall comet_ml -y
|
||||
|
||||
@ -51,9 +51,9 @@ jobs:
|
||||
run: |
|
||||
source activate accelerate
|
||||
git config --global --add safe.directory '*'
|
||||
git checkout main && git pull
|
||||
git checkout main && git pull && git fetch --tags
|
||||
if [[ ${{ matrix.transformers-version }} = pypi ]]; then
|
||||
git checkout $(git describe --tags `git rev-list --tags --max-count=1`)
|
||||
git checkout $(git tag --sort=taggerdate | tail -1)
|
||||
fi
|
||||
pip install .[torch,deepspeed-testing]
|
||||
|
||||
@ -76,6 +76,7 @@ jobs:
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: ${{ matrix.cuda_visible_devices }}
|
||||
WANDB_DISABLED: true
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate;
|
||||
pytest -sv tests/deepspeed
|
||||
@ -106,7 +107,7 @@ jobs:
|
||||
run: |
|
||||
source activate accelerate
|
||||
git config --global --add safe.directory '*'
|
||||
git checkout main && git pull
|
||||
git checkout master && git pull
|
||||
if [[ ${{ matrix.skorch-version }} = pypi ]]; then
|
||||
git checkout $(git describe --tags `git rev-list --tags --max-count=1`)
|
||||
fi
|
||||
|
||||
@ -23,6 +23,8 @@
|
||||
title: Example Zoo
|
||||
- local: usage_guides/big_modeling
|
||||
title: How to perform inference on large models with small resources
|
||||
- local: usage_guides/model_size_estimator
|
||||
title: Knowing how big of a model you can fit into memory
|
||||
- local: usage_guides/quantization
|
||||
title: How to quantize model
|
||||
- local: usage_guides/distributed_inference
|
||||
@ -53,6 +55,8 @@
|
||||
title: How to use 🤗 Accelerate with Intel® Extension for PyTorch for cpu
|
||||
title: How-To Guides
|
||||
- sections:
|
||||
- local: concept_guides/internal_mechanism
|
||||
title: 🤗 Accelerate's internal mechanism
|
||||
- local: concept_guides/big_model_inference
|
||||
title: Loading big models into memory
|
||||
- local: concept_guides/performance
|
||||
|
||||
@ -153,6 +153,15 @@ the below example enabling unbuffered stdout and stderr:
|
||||
python -u -m accelerate.commands.launch --num_processes=2 {script_name.py} {--arg1} {--arg2}
|
||||
```
|
||||
|
||||
<Tip>
|
||||
|
||||
You can run your code on CPU as well! This is helpful for debugging and testing purposes on toy models and datasets.
|
||||
|
||||
```bash
|
||||
accelerate launch --cpu {script_name.py} {--arg1} {--arg2}
|
||||
```
|
||||
|
||||
</Tip>
|
||||
|
||||
## Why you should always use `accelerate config`
|
||||
|
||||
@ -200,3 +209,24 @@ Launching a script from the location of that custom yaml file looks like the fol
|
||||
```bash
|
||||
accelerate launch --config_file {path/to/config/my_config_file.yaml} {script_name.py} {--arg1} {--arg2} ...
|
||||
```
|
||||
|
||||
## Multi-node training
|
||||
Multi-node training with 🤗Accelerate is similar to [multi-node training with torchrun](https://pytorch.org/tutorials/intermediate/ddp_series_multinode.html). The simplest way to launch a multi-node training run is to do the following:
|
||||
|
||||
- Copy your codebase and data to all nodes. (or place them on a shared filesystem)
|
||||
- Setup your python packages on all nodes.
|
||||
- Run `accelerate config` on the main single node first. After specifying the number of nodes, you will be asked to specify the rank of each node (this will be 0 for the main/master node), along with the IP address and port for the main process. This is required for the worker nodes to communicate with the main process. Afterwards, you can copy or send this config file across all of your nodes, changing the `machine_rank` to 1, 2,3, etc. to avoid having to run the command (or just follow their directions directly for launching with `torchrun` as well)
|
||||
|
||||
Once you have done this, you can start your multi-node training run by running `accelerate launch` (or `torchrun`) on all nodes.
|
||||
|
||||
<Tip>
|
||||
It is required that the command be ran on all nodes for everything to start, not just running it from the main node. You can use something like SLURM or a different process executor to wrap around this requirement and call everything from a single command.
|
||||
</Tip>
|
||||
|
||||
<Tip>
|
||||
|
||||
It is recommended to use the intranet IP of your main node over the public IP for better latency. This is the `192.168.x.x` or the `172.x.x.x` address you see when you run `hostname -I` on the main node.
|
||||
|
||||
</Tip>
|
||||
|
||||
To get a better idea about multi-node training, check out our example for [multi-node training with FSDP](https://huggingface.co/blog/ram-efficient-pytorch-fsdp).
|
||||
|
||||
@ -401,6 +401,26 @@ args = ("fp16", 42, 64)
|
||||
notebook_launcher(training_loop, args, num_processes=2)
|
||||
```
|
||||
|
||||
In the case of running on multiple nodes, you need to set up a Jupyter session at each node and run the launching cell at the same time.
|
||||
|
||||
For an environment containing 2 nodes (computers) with 8 GPUs each and the main computer with an IP address of "172.31.43.8", it would look like so:
|
||||
|
||||
```python
|
||||
notebook_launcher(training_loop, args, master_addr="172.31.43.8", node_rank=0, num_nodes=2, num_processes=8)
|
||||
```
|
||||
|
||||
And in the second Jupyter session on the other machine:
|
||||
|
||||
<Tip>
|
||||
|
||||
Notice how the `node_rank` has changed
|
||||
|
||||
</Tip>
|
||||
|
||||
```python
|
||||
notebook_launcher(training_loop, args, master_addr="172.31.43.8", node_rank=1, num_nodes=2, num_processes=8)
|
||||
```
|
||||
|
||||
In the case of running on the TPU, it would look like so:
|
||||
|
||||
```python
|
||||
|
||||
@ -108,3 +108,23 @@ with accelerator.main_process_first():
|
||||
remove_columns=["idx", "sentence1", "sentence2"],
|
||||
)
|
||||
```
|
||||
|
||||
## Applying checks such as Early Stopping
|
||||
|
||||
To have a check that works with a flag set by a particular process, the `set_trigger` and `check_trigger` API should be used. Useful examples
|
||||
for doing so can include situations such as using early stopping and monitoring the loss (as each loss slightly differs on each process).
|
||||
|
||||
Call [`Accelerator.set_trigger`] when your condition has been met, and [`Accelerator.check_trigger`] when checking if that condition has been met in any process:
|
||||
|
||||
```python
|
||||
for (x,y) in data_loader:
|
||||
logits = model(x)
|
||||
loss = loss_func(logits, y)
|
||||
# Assume `should_do_early_stopping` is a custom defined function that returns a conditional
|
||||
if should_do_early_stopping(loss):
|
||||
accelerator.set_trigger()
|
||||
|
||||
# Later in the training script when we need to check for the breakpoint
|
||||
if accelerator.check_trigger():
|
||||
break
|
||||
```
|
||||
72
docs/source/concept_guides/internal_mechanism.md
Normal file
72
docs/source/concept_guides/internal_mechanism.md
Normal file
@ -0,0 +1,72 @@
|
||||
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# 🤗 Accelerate's internal mechanisms
|
||||
|
||||
Internally, 🤗 Accelerate works by first analyzing the environment in which the script is launched to determine which
|
||||
kind of distributed setup is used, how many different processes there are and which one the current script is in. All
|
||||
that information is stored in the [`~AcceleratorState`].
|
||||
|
||||
This class is initialized the first time you instantiate an [`~Accelerator`] as well as performing any
|
||||
specific initialization your distributed setup needs. Its state is then uniquely shared through all instances of
|
||||
[`~state.AcceleratorState`]. (The same can also be done with the [`PartialState`], a more barebones version it inherits)
|
||||
|
||||
Then, when calling [`~Accelerator.prepare`], the library:
|
||||
|
||||
- wraps your model(s) in the container adapted for the distributed setup,
|
||||
- wraps your optimizer(s) in an [`~optimizer.AcceleratedOptimizer`],
|
||||
- wraps your scheduler(s) in an [`~scheduler.AcceleratedScheduler`]
|
||||
- creates a new version of your dataloader(s) in a [`~data_loader.DataLoaderShard`] or [`~data_loader.DataLoaderDispatcher`]
|
||||
|
||||
While the model(s), optimizer(s), and scheduler(s) are just put in simple wrappers, the dataloader(s) are re-created. This is mostly
|
||||
because PyTorch does not let the user change the `batch_sampler` of a dataloader once it's been created and the
|
||||
library handles the sharding of your data between processes by changing that `batch_sampler` to yield every other
|
||||
`num_processes` batches (if enabled).
|
||||
|
||||
The [`~data_loader.DataLoaderShard`] subclasses `DataLoader` to add the following functionality:
|
||||
|
||||
- it synchronizes the appropriate random number generator of all processes at each new iteration, to ensure any
|
||||
randomization (like shuffling) is done the exact same way across processes.
|
||||
- it puts the batches on the proper device before yielding them (unless you have opted out of
|
||||
`device_placement=True`).
|
||||
|
||||
The [`~data_loader.DataLoaderDispatcher`] subclasses differs from the [`~data_loader.DataLoaderShard`] in that when iterating through the `DataLoader`, the data is all starting from process 0 and *then* split and sent off to each process rather than it happening at the dataset level.
|
||||
|
||||
The random number generator synchronization will by default synchronize:
|
||||
|
||||
- the `generator` attribute of a given sampler (like the PyTorch `RandomSampler`) for PyTorch >= 1.6
|
||||
- the main random number generator in PyTorch <=1.5.1
|
||||
|
||||
You can choose which random number generator(s) to synchronize with the `rng_types` argument of the main
|
||||
[`Accelerator`]. In PyTorch >= 1.6, it is recommended to rely on a local `generator` to avoid
|
||||
setting the same seed in the main random number generator in all processes.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
Synchronization of the main torch (or CUDA or XLA) random number generator will affect any other potential random
|
||||
artifacts you could have in your dataset (like random data augmentation) in the sense that all processes will get
|
||||
the same random numbers from the torch random modules (so will apply the same random data augmentation if it's
|
||||
controlled by torch).
|
||||
|
||||
</Tip>
|
||||
|
||||
<Tip>
|
||||
|
||||
The randomization part of your custom sampler, batch sampler or iterable dataset should be done using a local
|
||||
`torch.Generator` object (in PyTorch >= 1.6), see the traditional `RandomSampler`, as an example.
|
||||
|
||||
</Tip>
|
||||
|
||||
For more details about the internals, see the [Internals page](package_reference/torch_wrappers).
|
||||
@ -228,6 +228,36 @@ The following arguments are only useful when training in SageMaker
|
||||
* `--aws_access_key_id AWS_ACCESS_KEY_ID` (`str`) -- The AWS_ACCESS_KEY_ID used to launch the Amazon SageMaker training job
|
||||
* `--aws_secret_access_key AWS_SECRET_ACCESS_KEY` (`str`) -- The AWS_SECRET_ACCESS_KEY used to launch the Amazon SageMaker training job
|
||||
|
||||
## accelerate estimate-memory
|
||||
|
||||
**Command**:
|
||||
|
||||
`accelerate estimate-memory` or `accelerate-estimate-memory` or `python -m accelerate.commands.estimate`
|
||||
|
||||
Estimates the total vRAM a particular model hosted on the Hub needs to be loaded in with an estimate for training. Requires that `huggingface_hub` be installed.
|
||||
|
||||
<Tip>
|
||||
|
||||
When performing inference, typically add ≤20% to the result as overall allocation [as referenced here](https://blog.eleuther.ai/transformer-math/). We will have more extensive estimations in the future that will automatically be included in the calculation.
|
||||
|
||||
</Tip>
|
||||
|
||||
**Usage**:
|
||||
|
||||
```bash
|
||||
accelerate estimate-memory {MODEL_NAME} --library_name {LIBRARY_NAME} --dtypes {dtype_1} {dtype_2} ...
|
||||
```
|
||||
|
||||
**Required Arguments**:
|
||||
|
||||
* `MODEL_NAME` (`str`)-- The model name on the Hugging Face Hub
|
||||
|
||||
**Optional Arguments**:
|
||||
|
||||
* `--library_name {timm,transformers}` (`str`) -- The library the model has an integration with, such as `transformers`, needed only if this information is not stored on the Hub
|
||||
* `--dtypes {float32,float16,int8,int4}` (`[{float32,float16,int8,int4} ...]`) -- The dtypes to use for the model, must be one (or many) of `float32`, `float16`, `int8`, and `int4`
|
||||
* `--trust_remote_code` (`bool`) -- Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be passed for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.
|
||||
|
||||
## accelerate tpu-config
|
||||
|
||||
`accelerate tpu-config`
|
||||
|
||||
@ -15,13 +15,20 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
# Quick tour
|
||||
|
||||
Let's have a look at the 🤗 Accelerate main features and traps to avoid.
|
||||
This guide aims to help you get started with 🤗 Accelerate quickly. It covers the essential steps you need to take to
|
||||
enable distributed training, as well as the adjustments that you need to make in some common scenarios.
|
||||
|
||||
## Main use
|
||||
To help you navigate, the guide is split into two sections:
|
||||
* [Getting Started with 🤗 Accelerate](#getting-started-with--accelerate): start here to learn how to modify your script to enable distributed training with 🤗 Accelerate
|
||||
* [Common adaptations to the base case](#common-adaptations-to-the-base-case): check out this section for common deviations from the baseline scenario and what adjustments may need to be made to support them.
|
||||
|
||||
To use 🤗 Accelerate in your own script, you have to change four things:
|
||||
## Getting started with 🤗 Accelerate
|
||||
|
||||
1. Import the [`Accelerator`] main class and instantiate one in an `accelerator` object:
|
||||
### Enable distributed training in your script
|
||||
|
||||
To use 🤗 Accelerate in your own training script, you have to modify four things:
|
||||
|
||||
1. Import the [`Accelerator`] main class and instantiate one in an `accelerator` object.
|
||||
|
||||
```python
|
||||
from accelerate import Accelerator
|
||||
@ -29,27 +36,27 @@ from accelerate import Accelerator
|
||||
accelerator = Accelerator()
|
||||
```
|
||||
|
||||
This should happen as early as possible in your training script as it will initialize everything necessary for
|
||||
distributed training. You don't need to indicate the kind of environment you are in (just one machine with a GPU, one
|
||||
machines with several GPUs, several machines with multiple GPUs or a TPU), the library will detect this automatically.
|
||||
Add this at the beginning of your training script as it will initialize everything necessary for distributed training.
|
||||
You don't need to indicate the kind of environment you are in (a single machine with a GPU, a machine with several GPUs,
|
||||
or several machines with multiple GPUs or a TPU), the library will detect this automatically.
|
||||
|
||||
2. Remove the call `.to(device)` or `.cuda()` for your model and input data. The `accelerator` object
|
||||
will handle this for you and place all those objects on the right device for you. If you know what you're doing, you
|
||||
can leave those `.to(device)` calls but you should use the device provided by the `accelerator` object:
|
||||
`accelerator.device`.
|
||||
2. Remove the `.to(device)` or `.cuda()` calls for your model and input data.
|
||||
|
||||
To fully deactivate the automatic device placement, pass along `device_placement=False` when initializing your
|
||||
[`Accelerator`].
|
||||
The `accelerator` object will handle placing these objects on the right device for you.
|
||||
If you choose to leave those `.to(device)` calls, make sure to use the device provided by the `accelerator` object: `accelerator.device`.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
If you place your objects manually on the proper device, be careful to create your optimizer after putting your
|
||||
You can fully deactivate the automatic device placement by passing along `device_placement=False` when
|
||||
initializing the [`Accelerator`].
|
||||
However, if you place your objects manually on the proper device, be careful to create your optimizer after putting your
|
||||
model on `accelerator.device` or your training will fail on TPU.
|
||||
|
||||
</Tip>
|
||||
|
||||
3. Pass all objects relevant to training (optimizer, model, training dataloader, learning rate scheduler) to the
|
||||
[`~Accelerator.prepare`] method. This will make sure everything is ready for training.
|
||||
3. Pass all PyTorch objects relevant to training (optimizer, model, dataloader(s), learning rate scheduler) to the
|
||||
[`~Accelerator.prepare`] method as soon as these objects are created, before starting your actual
|
||||
training loop:
|
||||
|
||||
```python
|
||||
model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
|
||||
@ -57,60 +64,42 @@ model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
|
||||
)
|
||||
```
|
||||
|
||||
In particular, your training dataloader will be sharded across all GPUs/TPU cores available so that each one sees a
|
||||
different portion of the training dataset. Also, the random states of all processes will be synchronized at the
|
||||
beginning of each iteration through your dataloader, to make sure the data is shuffled the same way (if you decided to
|
||||
use `shuffle=True` or any kind of random sampler).
|
||||
**Important notes**:
|
||||
|
||||
* You should always pass the the learning rate scheduler to [`~Accelerator.prepare`], however if the scheduler should *not* be stepped at each optimization step, pass `step_with_optimizer=False` to the [`Accelerator`] init.
|
||||
* While you can send your dataloader to [`~Accelerator.prepare`] on its own (and there are cases for doing so, such as distributed inference), it's best to send it to [`~Accelerator.prepare`] together with the model and optimizer.
|
||||
* If you wish to run distributed evaluation, send your validation dataloader to [`~Accelerator.prepare`] as well. There are some nuances to distributed validation, check the [Distributed evaluation](#add-distributed-evaluation) section of the guide.
|
||||
* Any instruction using your training dataloader length (for instance if you want to log the number of total training
|
||||
steps) should go after the call to [`~Accelerator.prepare`].
|
||||
|
||||
Passing `DataLoader` objects to the [`~Accelerator.prepare`] method ensures that your dataloader will be sharded across
|
||||
all GPUs/TPU cores available so that each one sees a different portion of the training dataset. In other words, if there are 8 processes and a dataset of 64 items, each process will see 8 of these items per iteration. Also, the random states
|
||||
of all processes will be synchronized at the beginning of each iteration through your dataloader, to make sure the data
|
||||
is shuffled the same way (if you decided to use `shuffle=True` or any kind of random sampler).
|
||||
|
||||
<Tip>
|
||||
|
||||
The actual batch size for your training will be the number of devices used multiplied by the batch size you set in
|
||||
your script: for instance training on 4 GPUs with a batch size of 16 set when creating the training dataloader will
|
||||
train at an actual batch size of 64.
|
||||
|
||||
</Tip>
|
||||
|
||||
Alternatively, you can use the option `split_batches=True` when creating and initializing your
|
||||
[`Accelerator`], in which case the batch size will always stay the same, whether you run your
|
||||
script on 1, 2, 4, or 64 GPUs.
|
||||
|
||||
You should execute this instruction as soon as all objects for training are created, before starting your actual
|
||||
training loop.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
You should only pass the learning rate scheduler to [`~Accelerator.prepare`] when the scheduler needs to be stepped
|
||||
at each optimizer step.
|
||||
|
||||
</Tip>
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
your script. For instance, training on 4 GPUs with a batch size of 16 set when creating the training dataloader will
|
||||
train at an actual batch size of 64 (4 * 16).
|
||||
If you want the batch size remain the same regardless of how many GPUs the script is run on, you can use the
|
||||
option `split_batches=True` when creating and initializing [`Accelerator`].
|
||||
Your training dataloader may change length when going through this method: if you run on X GPUs, it will have its
|
||||
length divided by X (since your actual batch size will be multiplied by X), unless you set
|
||||
`split_batches=True`.
|
||||
|
||||
</Tip>
|
||||
|
||||
Any instruction using your training dataloader length (for instance if you want to log the number of total training
|
||||
steps) should go after the call to [`~Accelerator.prepare`].
|
||||
|
||||
You can perfectly send your dataloader to [`~Accelerator.prepare`] on its own, but it's best to send the
|
||||
model and optimizer to [`~Accelerator.prepare`] together.
|
||||
|
||||
You may or may not want to send your validation dataloader to [`~Accelerator.prepare`], depending on
|
||||
whether you want to run distributed evaluation or not (see below).
|
||||
|
||||
4. Replace the line `loss.backward()` by `accelerator.backward(loss)`.
|
||||
4. Replace the `loss.backward()` line with `accelerator.backward(loss)`.
|
||||
|
||||
And you're all set! With all these changes, your script will run on your local machine as well as on multiple GPUs or a
|
||||
TPU! You can either use your favorite tool to launch the distributed training, or you can use the 🤗 Accelerate
|
||||
launcher.
|
||||
|
||||
### Add distributed evaluation
|
||||
|
||||
## Distributed evaluation
|
||||
|
||||
You can perform regular evaluation in your training script, if you leave your validation dataloader out of the
|
||||
You can perform regular evaluation in your training script if you leave your validation dataloader out of the
|
||||
[`~Accelerator.prepare`] method. In this case, you will need to put the input data on the
|
||||
`accelerator.device` manually.
|
||||
|
||||
@ -121,9 +110,9 @@ method:
|
||||
validation_dataloader = accelerator.prepare(validation_dataloader)
|
||||
```
|
||||
|
||||
As for your training dataloader, it will mean that (should you run your script on multiple devices) each device will
|
||||
only see part of the evaluation data. This means you will need to group your predictions together. This is very easy to
|
||||
do with the [`~Accelerator.gather_for_metrics`] method.
|
||||
Same as with your training dataloader, each device will only see part of the evaluation data should you run your script
|
||||
on multiple devices. This means you will need to group your predictions together which you can do with
|
||||
the [`~Accelerator.gather_for_metrics`] method.
|
||||
|
||||
```python
|
||||
for inputs, targets in validation_dataloader:
|
||||
@ -142,11 +131,9 @@ for inputs, targets in validation_dataloader:
|
||||
|
||||
</Tip>
|
||||
|
||||
Any instruction using your training dataloader length (for instance if you need the number of total training steps
|
||||
to create a learning rate scheduler) should go after the call to [`~Accelerator.prepare`].
|
||||
|
||||
Some data at the end of the dataset may be duplicated so the batch can be divided equally among all workers. As a result, metrics
|
||||
should be calculated through the [`~Accelerator.gather_for_metrics`] method to automatically remove the duplicated data while gathering.
|
||||
Some data at the end of the dataset may be duplicated so the batch can be divided equally among all workers. As a result,
|
||||
metrics should be calculated through the [`~Accelerator.gather_for_metrics`] method to automatically remove the duplicated
|
||||
data while gathering and provide a more accurate metric.
|
||||
|
||||
<Tip>
|
||||
|
||||
@ -165,36 +152,35 @@ should be calculated through the [`~Accelerator.gather_for_metrics`] method to a
|
||||
|
||||
</Tip>
|
||||
|
||||
## Launching your distributed script
|
||||
### Launch your distributed script
|
||||
|
||||
You can use the regular commands to launch your distributed training (like `torch.distributed.run` for
|
||||
PyTorch), they are fully compatible with 🤗 Accelerate.
|
||||
PyTorch) - they are fully compatible with 🤗 Accelerate.
|
||||
|
||||
🤗 Accelerate also provides a CLI tool that unifies all launchers, so you only have to remember one command. To use it,
|
||||
just run:
|
||||
Alternatively, 🤗 Accelerate provides a CLI tool that unifies all launchers, so you only have to remember one command. \
|
||||
To use it, run a quick configuration setup first on your machine and answer the questions:
|
||||
|
||||
```bash
|
||||
accelerate config
|
||||
```
|
||||
|
||||
on your machine and reply to the questions asked. This will save a *default_config.yaml* file in your cache folder for
|
||||
🤗 Accelerate. That cache folder is (with decreasing order of priority):
|
||||
At the end of the setup, a *default_config.yaml* file will be saved in your cache folder for 🤗 Accelerate. That cache
|
||||
folder is (with decreasing order of priority):
|
||||
|
||||
- The content of your environment variable `HF_HOME` suffixed with *accelerate*.
|
||||
- If it does not exist, the content of your environment variable `XDG_CACHE_HOME` suffixed with
|
||||
*huggingface/accelerate*.
|
||||
- If this does not exist either, the folder *~/.cache/huggingface/accelerate*
|
||||
- If this does not exist either, the folder *~/.cache/huggingface/accelerate*.
|
||||
|
||||
You can also specify with the flag `--config_file` the location of the file you want to save.
|
||||
|
||||
Once this is done, you can test everything is going well on your setup by running:
|
||||
By specifying the `--config_file` flag you can specify an alternative location of the configuration file.
|
||||
Once the configuration setup is complete, you can test your setup by running:
|
||||
|
||||
```bash
|
||||
accelerate test
|
||||
```
|
||||
|
||||
This will launch a short script that will test the distributed environment. If it runs fine, you are ready for the next
|
||||
step!
|
||||
This will launch a short script that will test the distributed environment. If it runs without issues, you are ready for
|
||||
the next step!
|
||||
|
||||
Note that if you specified a location for the config file in the previous step, you need to pass it here as well:
|
||||
|
||||
@ -214,19 +200,23 @@ If you stored the config file in a non-default location, you can indicate it to
|
||||
accelerate launch --config_file path_to_config.yaml path_to_script.py --args_for_the_script
|
||||
```
|
||||
|
||||
You can also override any of the arguments determined by your config file.
|
||||
To see the complete list of parameters that you can pass in, run `accelerate launch -h`.
|
||||
You can override any of the arguments determined by your config file. To see the complete list of parameters that you
|
||||
can pass in, run `accelerate launch -h`. (And further niche argument help by passing in partial commands, such as `accelerate launch --multi_gpu -h` for all `multi_gpu` args)
|
||||
|
||||
Check out the [Launch tutorial](basic_tutorials/launch) for more information about launching your scripts.
|
||||
Check out the [Launch tutorial](basic_tutorials/launch) for more information about launching your scripts.
|
||||
|
||||
## Common modifications of the base case
|
||||
|
||||
## Launching training from a notebook
|
||||
The previous section covers the minimal essential steps to move a training script into a distributed setup with 🤗 Accelerate.
|
||||
Here we describe common modifications/deviations from the base case scenario and the adjustments you need to make to accommodate for them.
|
||||
|
||||
In Accelerate 0.3.0, a new [`notebook_launcher`] has been introduced to help you launch your training
|
||||
function from a notebook. This launcher supports launching a training with TPUs on Colab or Kaggle, as well as training
|
||||
on several GPUs (if the machine on which you are running your notebook has them).
|
||||
### Launch distributed training from a notebook
|
||||
|
||||
Just define a function responsible for your whole training and/or evaluation in a cell of the notebook, then execute a
|
||||
Accelerate has a [`notebook_launcher`] to help you launch your training function from a
|
||||
notebook. This launcher supports launching a training with TPUs on Colab or Kaggle, as well as training on several GPUs and machines
|
||||
(if the machine on which you are running your notebook has them).
|
||||
|
||||
Define a function responsible for your whole training and/or evaluation in a cell of the notebook, then execute a
|
||||
cell with the following code:
|
||||
|
||||
```python
|
||||
@ -242,10 +232,9 @@ notebook_launcher(training_function)
|
||||
|
||||
</Tip>
|
||||
|
||||
Check out the [Notebook Launcher tutorial](basic_tutorials/notebook) for more information about training on TPUs.
|
||||
Check out the [Notebook Launcher tutorial](basic_tutorials/notebook) for more information about training on TPUs.
|
||||
|
||||
|
||||
## Training on TPU
|
||||
### Specifics of training on TPU
|
||||
|
||||
If you want to launch your script on TPUs, there are a few caveats you should be aware of. Behind the scenes, the TPUs
|
||||
will create a graph of all the operations happening in your training step (forward pass, backward pass and optimizer
|
||||
@ -284,12 +273,7 @@ passed your model to [`~Accelerator.prepare`]) will break the tying. You will ne
|
||||
after. You can find an example of this in the [run_clm_no_trainer](https://github.com/huggingface/transformers/blob/master/examples/pytorch/language-modeling/run_clm.py) script in
|
||||
the Transformers repository.
|
||||
|
||||
Check out the [TPU tutorial](concept_guides/training_tpu) for more information about training on TPUs.
|
||||
|
||||
|
||||
## Other caveats
|
||||
|
||||
We list here all smaller issues you could have in your script conversion and how to resolve them.
|
||||
Check out the [TPU tutorial](concept_guides/training_tpu) for more information about training on TPUs.
|
||||
|
||||
### Execute a statement only on one processes
|
||||
|
||||
@ -323,14 +307,14 @@ For printing statements you only want executed once per machine, you can just re
|
||||
`accelerator.print`.
|
||||
|
||||
|
||||
### Defer execution
|
||||
### Defer execution on multiple GPUs
|
||||
|
||||
When you run your usual script, instructions are executed in order. Using 🤗 Accelerate to deploy your script on several
|
||||
GPUs at the same time introduces a complication: while each process executes all instructions in order, some may be
|
||||
faster than others.
|
||||
|
||||
You might need to wait for all processes to have reached a certain point before executing a given instruction. For
|
||||
instance, you shouldn't save a model before being sure every process is done with training. To do this, just write the
|
||||
instance, you shouldn't save a model before making sure every process is done with training. To do this, add the
|
||||
following line in your code:
|
||||
|
||||
```
|
||||
@ -341,7 +325,7 @@ This instruction will block all the processes that arrive first until all the ot
|
||||
point (if you run your script on just one GPU or CPU, this won't do anything).
|
||||
|
||||
|
||||
### Saving/loading a model
|
||||
### Save/load a model in a distributed setup
|
||||
|
||||
Saving the model you trained might need a bit of adjustment: first you should wait for all processes to reach that
|
||||
point in the script as shown above, and then, you should unwrap your model before saving it. This is because when going
|
||||
@ -349,15 +333,16 @@ through the [`~Accelerator.prepare`] method, your model may have been placed ins
|
||||
which deals with the distributed training. This in turn means that saving your model state dictionary without taking
|
||||
any precaution will take that potential extra layer into account, and you will end up with weights you can't load back
|
||||
in your base model. The [`~Accelerator.save_model`] method will help you to achieve that. It will unwrap your model and save
|
||||
the model state dictionnary.
|
||||
the model state dictionary.
|
||||
|
||||
Here is an example:
|
||||
|
||||
```
|
||||
accelerator.wait_for_everyone()
|
||||
accelerator.save_model(model, save_directory)
|
||||
```
|
||||
The [`~Accelerator.save_model`] method can also save a model into sharded checkpoints or with safetensors format.
|
||||
Here is an example:
|
||||
|
||||
The [`~Accelerator.save_model`] method can also save a model into sharded checkpoints or with safetensors format:
|
||||
|
||||
```python
|
||||
accelerator.wait_for_everyone()
|
||||
@ -376,15 +361,18 @@ unwrapped_model.load_state_dict(torch.load(path_to_checkpoint))
|
||||
|
||||
Note that since all the model parameters are references to tensors, this will load your weights inside `model`.
|
||||
|
||||
If you want to load a sharded checkpoint or a checkpoint with safetensors format into the model with a specific `device`, we recommend you to load it with [`~utils.load_checkpoint_in_model`] function. Here's an example:
|
||||
If you want to load a sharded checkpoint or a checkpoint with safetensors format into the model with a specific `device`,
|
||||
we recommend you to load it with [`~utils.load_checkpoint_in_model`] function. Here's an example:
|
||||
|
||||
```python
|
||||
load_checkpoint_in_model(unwrapped_model, save_directory, device_map={"":device})
|
||||
```
|
||||
|
||||
## Saving/loading entire states
|
||||
|
||||
When training your model, you may want to save the current state of the model, optimizer, random generators, and potentially LR schedulers to be restored in the _same script_.
|
||||
### Save/load entire states
|
||||
|
||||
When training your model, you may want to save the current state of the model, optimizer, random generators, and potentially
|
||||
learning rate schedulers to be restored in the _same script_.
|
||||
You can use [`~Accelerator.save_state`] and [`~Accelerator.load_state`] respectively to do so.
|
||||
|
||||
To further customize where and how states saved through [`~Accelerator.save_state`] the [`~utils.ProjectConfiguration`] class can be used. For example
|
||||
@ -399,19 +387,19 @@ If you have registered any other stateful items to be stored through [`~Accelera
|
||||
</Tip>
|
||||
|
||||
|
||||
### Gradient clipping
|
||||
### Use gradient clipping
|
||||
|
||||
If you are using gradient clipping in your script, you should replace the calls to
|
||||
`torch.nn.utils.clip_grad_norm_` or `torch.nn.utils.clip_grad_value_` with [`~Accelerator.clip_grad_norm_`]
|
||||
and [`~Accelerator.clip_grad_value_`] respectively.
|
||||
|
||||
|
||||
### Mixed Precision training
|
||||
### Train with mixed precision
|
||||
|
||||
If you are running your training in Mixed Precision with 🤗 Accelerate, you will get the best result with your loss being
|
||||
computed inside your model (like in Transformer models for instance). Every computation outside of the model will be
|
||||
executed in full precision (which is generally what you want for loss computation, especially if it involves a
|
||||
softmax). However you might want to put your loss computation inside the [`~Accelerator.autocast`] context manager:
|
||||
softmax). However, you might want to put your loss computation inside the [`~Accelerator.autocast`] context manager:
|
||||
|
||||
```
|
||||
with accelerator.autocast():
|
||||
@ -432,7 +420,7 @@ if not accelerator.optimizer_step_was_skipped:
|
||||
lr_scheduler.step()
|
||||
```
|
||||
|
||||
### Gradient Accumulation
|
||||
### Use gradient accumulation
|
||||
|
||||
To perform gradient accumulation use [`~Accelerator.accumulate`] and specify a `gradient_accumulation_steps`.
|
||||
This will also automatically ensure the gradients are synced or unsynced when on multi-device training, check if the step should
|
||||
@ -451,70 +439,3 @@ for input, label in training_dataloader:
|
||||
scheduler.step()
|
||||
optimizer.zero_grad()
|
||||
```
|
||||
|
||||
### DeepSpeed
|
||||
|
||||
DeepSpeed support is experimental, so the underlying API will evolve in the near future and may have some slight
|
||||
breaking changes. In particular, 🤗 Accelerate does not support DeepSpeed config you have written yourself yet, this
|
||||
will be added in a next version.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
The [`notebook_launcher`] does not support the DeepSpeed integration yet.
|
||||
|
||||
</Tip>
|
||||
|
||||
## Internal mechanism
|
||||
|
||||
Internally, the library works by first analyzing the environment in which the script is launched to determine which
|
||||
kind of distributed setup is used, how many different processes there are and which one the current script is in. All
|
||||
that information is stored in the [`~AcceleratorState`].
|
||||
|
||||
This class is initialized the first time you instantiate an [`~Accelerator`] as well as performing any
|
||||
specific initialization your distributed setup needs. Its state is then uniquely shared through all instances of
|
||||
[`~state.AcceleratorState`].
|
||||
|
||||
Then, when calling [`~Accelerator.prepare`], the library:
|
||||
|
||||
- wraps your model(s) in the container adapted for the distributed setup,
|
||||
- wraps your optimizer(s) in a [`~optimizer.AcceleratedOptimizer`],
|
||||
- creates a new version of your dataloader(s) in a [`~data_loader.DataLoaderShard`].
|
||||
|
||||
While the model(s) and optimizer(s) are just put in simple wrappers, the dataloader(s) are re-created. This is mostly
|
||||
because PyTorch does not let the user change the `batch_sampler` of a dataloader once it's been created and the
|
||||
library handles the sharding of your data between processes by changing that `batch_sampler` to yield every other
|
||||
`num_processes` batches.
|
||||
|
||||
The [`~data_loader.DataLoaderShard`] subclasses `DataLoader` to add the following functionality:
|
||||
|
||||
- it synchronizes the appropriate random number generator of all processes at each new iteration, to ensure any
|
||||
randomization (like shuffling) is done the exact same way across processes.
|
||||
- it puts the batches on the proper device before yielding them (unless you have opted out of
|
||||
`device_placement=True`).
|
||||
|
||||
The random number generator synchronization will by default synchronize:
|
||||
|
||||
- the `generator` attribute of a given sampler (like the PyTorch `RandomSampler`) for PyTorch >= 1.6
|
||||
- the main random number generator in PyTorch <=1.5.1
|
||||
|
||||
You can choose which random number generator(s) to synchronize with the `rng_types` argument of the main
|
||||
[`Accelerator`]. In PyTorch >= 1.6, it is recommended to rely on a local `generator` to avoid
|
||||
setting the same seed in the main random number generator in all processes.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
Synchronization of the main torch (or CUDA or XLA) random number generator will affect any other potential random
|
||||
artifacts you could have in your dataset (like random data augmentation) in the sense that all processes will get
|
||||
the same random numbers from the torch random modules (so will apply the same random data augmentation if it's
|
||||
controlled by torch).
|
||||
|
||||
</Tip>
|
||||
|
||||
<Tip>
|
||||
|
||||
The randomization part of your custom sampler, batch sampler or iterable dataset should be done using a local
|
||||
`torch.Generator` object (in PyTorch >= 1.6), see the traditional `RandomSampler`, as an example.
|
||||
|
||||
</Tip>
|
||||
|
||||
For more details about the internals, see the [Internals page](package_reference/torch_wrappers).
|
||||
|
||||
@ -52,7 +52,7 @@ will attempt to fill all the space in your GPU(s), then loading them to the CPU,
|
||||
|
||||
<Tip>
|
||||
|
||||
For more details on desigining your own device map, see this section of the [concept guide](../concept_guide/big_model_inference#desigining-a-device-map)
|
||||
For more details on desigining your own device map, see this section of the [concept guide](../concept_guide/big_model_inference#designing-a-device-map)
|
||||
|
||||
</Tip>
|
||||
|
||||
@ -130,7 +130,7 @@ As a brief example, we will look at using `transformers` and loading in Big Scie
|
||||
```py
|
||||
from transformers import AutoModelForSeq2SeqLM
|
||||
|
||||
model = AutoModelForSeq2SeqLM("bigscience/T0pp", device_map="auto")
|
||||
model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0pp", device_map="auto")
|
||||
```
|
||||
|
||||
After loading the model in, the initial steps from before to prepare a model have all been done and the model is fully
|
||||
@ -140,11 +140,11 @@ specifying the precision the model is loaded into as well, through the `torch_dt
|
||||
```py
|
||||
from transformers import AutoModelForSeq2SeqLM
|
||||
|
||||
model = AutoModelForSeq2SeqLM("bigscience/T0pp", device_map="auto", torch_dtype=torch.float16)
|
||||
model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0pp", device_map="auto", torch_dtype=torch.float16)
|
||||
```
|
||||
|
||||
To learn more about this, check out the 🤗 Transformers documentation available [here](https://huggingface.co/docs/transformers/main/en/main_classes/model#large-model-loading).
|
||||
|
||||
## Where to go from here
|
||||
|
||||
For a much more detailed look at big model inference, be sure to check out the [Conceptual Guide on it](../concept_guides/big_model_inference)
|
||||
For a much more detailed look at big model inference, be sure to check out the [Conceptual Guide on it](../concept_guides/big_model_inference)
|
||||
|
||||
@ -585,8 +585,10 @@ Mixed precision type: fp16
|
||||
ds_config: {'bf16': {'enabled': False}, 'zero_optimization': {'stage': 3, 'stage3_gather_16bit_weights_on_model_save': True, 'offload_optimizer': {'device': 'nvme'}, 'offload_param': {'device': 'cpu'}}, 'gradient_clipping': 1.0, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 5, 'steps_per_print': inf, 'fp16': {'enabled': True, 'auto_cast': True}}
|
||||
```
|
||||
|
||||
**Note**: Remaining `"auto"` values are handled in `accelerator.prepare()` call as explained in point 2 of
|
||||
**Note**:
|
||||
1. Remaining `"auto"` values are handled in `accelerator.prepare()` call as explained in point 2 of
|
||||
`Important code changes when using DeepSpeed Config File`.
|
||||
2. Only when `gradient_accumulation_steps` is `auto`, the value passed while creating `Accelerator` object via `Accelerator(gradient_accumulation_steps=k)` will be used. When using DeepSpeed Plugin, the value from it will be used and it will overwrite the value passed while creating Accelerator object.
|
||||
|
||||
## Saving and loading
|
||||
|
||||
|
||||
@ -37,14 +37,14 @@ for batch in dataloader:
|
||||
|
||||
<div class="block dark:hidden">
|
||||
<iframe
|
||||
src="https://muellerzr-accelerate-examples.hf.space?__theme=light"
|
||||
src="https://hf-accelerate-accelerate-examples.hf.space?__theme=light"
|
||||
width="850"
|
||||
height="1600"
|
||||
></iframe>
|
||||
</div>
|
||||
<div class="hidden dark:block">
|
||||
<iframe
|
||||
src="https://muellerzr-accelerate-examples.hf.space?__theme=dark"
|
||||
src="https://hf-accelerate-accelerate-examples.hf.space?__theme=dark"
|
||||
width="850"
|
||||
height="1600"
|
||||
></iframe>
|
||||
|
||||
@ -49,7 +49,7 @@ fsdp_config:
|
||||
fsdp_offload_params: false
|
||||
fsdp_sharding_strategy: 1
|
||||
fsdp_state_dict_type: FULL_STATE_DICT
|
||||
fsdp_transformer_layer_cls_to_wrap: GPT2Block
|
||||
fsdp_transformer_layer_cls_to_wrap: BertLayer
|
||||
machine_rank: 0
|
||||
main_process_ip: null
|
||||
main_process_port: null
|
||||
@ -67,19 +67,38 @@ accelerate launch examples/nlp_example.py
|
||||
Currently, `Accelerate` supports the following config through the CLI:
|
||||
|
||||
```bash
|
||||
`Sharding Strategy`: [1] FULL_SHARD (shards optimizer states, gradients and parameters), [2] SHARD_GRAD_OP (shards optimizer states and gradients), [3] NO_SHARD
|
||||
`Sharding Strategy`: [1] FULL_SHARD (shards optimizer states, gradients and parameters), [2] SHARD_GRAD_OP (shards optimizer states and gradients), [3] NO_SHARD (DDP), [4] HYBRID_SHARD (shards optimizer states, gradients and parameters within each node while each node has full copy), [5] HYBRID_SHARD_ZERO2 (shards optimizer states and gradients within each node while each node has full copy)
|
||||
|
||||
`Offload Params`: Decides Whether to offload parameters and gradients to CPU
|
||||
`Auto Wrap Policy`: [1] TRANSFORMER_BASED_WRAP, [2] SIZE_BASED_WRAP, [3] NO_WRAP [4] "HYBRID_SHARD" [5] "HYBRID_SHARD_ZERO2"
|
||||
|
||||
`Auto Wrap Policy`: [1] TRANSFORMER_BASED_WRAP, [2] SIZE_BASED_WRAP, [3] NO_WRAP
|
||||
|
||||
`Transformer Layer Class to Wrap`: When using `TRANSFORMER_BASED_WRAP`, user specifies comma-separated string of transformer layer class names (case-sensitive) to wrap ,e.g,
|
||||
`BertLayer`, `GPTJBlock`, `T5Block`, `BertLayer,BertEmbeddings,BertSelfOutput`...
|
||||
This is important because submodules that share weights (e.g., embedding layer) should not end up in different FSDP wrapped units.
|
||||
Using this policy, wrapping happens for each block containing Multi-Head Attention followed by couple of MLP layers.
|
||||
Remaining layers including the shared embeddings are conveniently wrapped in same outermost FSDP unit.
|
||||
Therefore, use this for transformer based models.
|
||||
You can use the `model._no_split_modules` for 🤗 Transformer models by answering `yes` to
|
||||
`Do you want to use the model's `_no_split_modules` to wrap. Only applicable for 🤗 Transformers`.
|
||||
It will try to use `model._no_split_modules` when available.
|
||||
|
||||
`Min Num Params`: minimum number of parameters when using `SIZE_BASED_WRAP`
|
||||
|
||||
`Backward Prefetch`: [1] BACKWARD_PRE, [2] BACKWARD_POST, [3] NO_PREFETCH
|
||||
`State Dict Type`: [1] FULL_STATE_DICT, [2] LOCAL_STATE_DICT, [3] SHARDED_STATE_DICT
|
||||
|
||||
`State Dict Type`: [1] FULL_STATE_DICT, [2] LOCAL_STATE_DICT, [3] SHARDED_STATE_DICT
|
||||
|
||||
`Forward Prefetch`: if True, then FSDP explicitly prefetches the next upcoming
|
||||
all-gather while executing in the forward pass. only use with Static graphs.
|
||||
|
||||
`Use Orig Params`: If True, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable paramteres.
|
||||
Useful in cases such as parameter-efficient fine-tuning.
|
||||
Please refer this [blog](https://dev-discuss.pytorch.org/t/rethinking-pytorch-fully-sharded-data-parallel-fsdp-from-first-principles/1019)
|
||||
|
||||
`CPU RAM Efficient Model loading`: If True, only the first process loads the pretrained model checkoint while all other processes have empty weights. Only applicable for 🤗 Transformers models. This should be set to False if you experience errors when loading the pretrained 🤗 Transformers model via `from_pretrained` method. When using this, `Sync Module States` needs to be True else all the processes expect the main process would have random empty weights leading to unexpected behaviour during training.
|
||||
|
||||
`Sync Module States`: If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0
|
||||
`Forward Prefetch`: If True, then FSDP explicitly prefetches the next upcoming all-gather while executing in the forward pass
|
||||
```
|
||||
|
||||
For additional and more nuanced control, you can specify other FSDP parameters via `FullyShardedDataParallelPlugin`.
|
||||
@ -137,7 +156,7 @@ When using transformers `save_pretrained`, pass `state_dict=accelerator.get_stat
|
||||
args.output_dir,
|
||||
is_main_process=accelerator.is_main_process,
|
||||
save_function=accelerator.save,
|
||||
+ state_dict=accelerator.get_state_dict(model),
|
||||
+ state_dict=accelerator.get_state_dict(model, unwrap=False),
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
137
docs/source/usage_guides/model_size_estimator.md
Normal file
137
docs/source/usage_guides/model_size_estimator.md
Normal file
@ -0,0 +1,137 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Understanding how big of a model can fit on your machine
|
||||
|
||||
One very difficult aspect when exploring potential models to use on your machine is knowing just how big of a model will *fit* into memory with your current graphics card (such as loading the model onto CUDA).
|
||||
|
||||
To help alleviate this, 🤗 Accelerate has a CLI interface through `accelerate estimate-memory`. This tutorial will
|
||||
help walk you through using it, what to expect, and at the end link to the interactive demo hosted on the 🤗 Hub which will
|
||||
even let you post those results directly on the model repo!
|
||||
|
||||
Currently we support searching for models that can be used in `timm` and `transformers`.
|
||||
|
||||
<Tip>
|
||||
|
||||
This API will load the model into memory on the `meta` device, so we are not actually downloading
|
||||
and loading the full weights of the model into memory, nor do we need to. As a result it's
|
||||
perfectly fine to measure 8 billion parameter models (or more), without having to worry about
|
||||
if your CPU can handle it!
|
||||
|
||||
</Tip>
|
||||
|
||||
## Gradio Demos
|
||||
|
||||
Below are a few gradio demos related to what was described above. The first is the official Hugging Face memory estimation space, utilizing Accelerate directly:
|
||||
|
||||
<div class="block dark:hidden">
|
||||
<iframe
|
||||
src="https://hf-accelerate-model-memory-usage.hf.space?__theme=light"
|
||||
width="850"
|
||||
height="1600"
|
||||
></iframe>
|
||||
</div>
|
||||
<div class="hidden dark:block">
|
||||
<iframe
|
||||
src="https://hf-accelerate-model-memory-usage.hf.space?__theme=dark"
|
||||
width="850"
|
||||
height="1600"
|
||||
></iframe>
|
||||
</div>
|
||||
|
||||
A community member has taken the idea and expended it further, allowing you to filter models directly and see if you can run a particular LLM given GPU constraints and LoRA configurations. To play with it, see [here](https://huggingface.co/spaces/Vokturz/can-it-run-llm) for more details.
|
||||
|
||||
## The Command
|
||||
|
||||
When using `accelerate estimate-memory`, you need to pass in the name of the model you want to use, potentially the framework
|
||||
that model utilizing (if it can't be found automatically), and the data types you want the model to be loaded in with.
|
||||
|
||||
For example, here is how we can calculate the memory footprint for `bert-base-cased`:
|
||||
|
||||
```bash
|
||||
accelerate estimate-memory bert-base-cased
|
||||
```
|
||||
|
||||
This will download the `config.json` for `bert-based-cased`, load the model on the `meta` device, and report back how much space
|
||||
it will use:
|
||||
|
||||
Memory Usage for loading `bert-base-cased`:
|
||||
|
||||
| dtype | Largest Layer | Total Size | Training using Adam |
|
||||
|---------|---------------|------------|---------------------|
|
||||
| float32 | 84.95 MB | 418.18 MB | 1.61 GB |
|
||||
| float16 | 42.47 MB | 206.59 MB | 826.36 MB |
|
||||
| int8 | 21.24 MB | 103.29 MB | 413.18 MB |
|
||||
| int4 | 10.62 MB | 51.65 MB | 206.59 MB |
|
||||
|
||||
By default it will return all the supported dtypes (`int4` through `float32`), but if you are interested in specific ones these can be filtered.
|
||||
|
||||
### Specific libraries
|
||||
|
||||
If the source library cannot be determined automatically (like it could in the case of `bert-base-cased`), a library name can
|
||||
be passed in.
|
||||
|
||||
```bash
|
||||
accelerate estimate-memory HuggingFaceM4/idefics-80b-instruct --library_name transformers
|
||||
```
|
||||
|
||||
Memory Usage for loading `HuggingFaceM4/idefics-80b-instruct`:
|
||||
|
||||
| dtype | Largest Layer | Total Size | Training using Adam |
|
||||
|---------|---------------|------------|---------------------|
|
||||
| float32 | 3.02 GB | 297.12 GB | 1.16 TB |
|
||||
| float16 | 1.51 GB | 148.56 GB | 594.24 GB |
|
||||
| int8 | 772.52 MB | 74.28 GB | 297.12 GB |
|
||||
| int4 | 386.26 MB | 37.14 GB | 148.56 GB |
|
||||
|
||||
|
||||
```bash
|
||||
accelerate estimate-memory timm/resnet50.a1_in1k --library_name timm
|
||||
```
|
||||
|
||||
Memory Usage for loading `timm/resnet50.a1_in1k`:
|
||||
|
||||
| dtype | Largest Layer | Total Size | Training using Adam |
|
||||
|---------|---------------|------------|---------------------|
|
||||
| float32 | 9.0 MB | 97.7 MB | 390.78 MB |
|
||||
| float16 | 4.5 MB | 48.85 MB | 195.39 MB |
|
||||
| int8 | 2.25 MB | 24.42 MB | 97.7 MB |
|
||||
| int4 | 1.12 MB | 12.21 MB | 48.85 MB |
|
||||
|
||||
### Specific dtypes
|
||||
|
||||
As mentioned earlier, while we return `int4` through `float32` by default, any dtype can be used from `float32`, `float16`, `int8`, and `int4`.
|
||||
|
||||
To do so, pass them in after specifying `--dtypes`:
|
||||
|
||||
```bash
|
||||
accelerate estimate-memory bert-base-cased --dtypes float32 float16
|
||||
```
|
||||
|
||||
Memory Usage for loading `bert-base-cased`:
|
||||
|
||||
| dtype | Largest Layer | Total Size | Training using Adam |
|
||||
|---------|---------------|------------|---------------------|
|
||||
| float32 | 84.95 MB | 413.18 MB | 1.61 GB |
|
||||
| float16 | 42.47 MB | 206.59 MB | 826.36 MB |
|
||||
|
||||
## Caveats with this calculator
|
||||
|
||||
This calculator will tell you how much memory is needed to purely load the model in, *not* to perform inference.
|
||||
|
||||
This calculation is accurate within a few % of the actual value, so it is a very good view of just how much memory it will take. For instance loading `bert-base-cased` actually takes `413.68 MB` when loaded on CUDA in full precision, and the calculator estimates `413.18 MB`.
|
||||
|
||||
When performing inference you can expect to add up to an additional 20% as found by [EleutherAI](https://blog.eleuther.ai/transformer-math/). We'll be conducting research into finding a more accurate estimate to these values, and will update
|
||||
this calculator once done.
|
||||
@ -602,15 +602,22 @@ def main():
|
||||
resume_step -= starting_epoch * num_update_steps_per_epoch
|
||||
completed_steps = resume_step
|
||||
|
||||
# update progress bar if resumed from checkpoint
|
||||
progress_bar.update(completed_steps)
|
||||
|
||||
for epoch in range(starting_epoch, args.num_train_epochs):
|
||||
model.train()
|
||||
if args.with_tracking:
|
||||
total_loss = 0
|
||||
|
||||
# skip new `skip_first_batches` to skip the batches when resuming from ckpt
|
||||
if args.resume_from_checkpoint:
|
||||
train_dataloader = accelerator.skip_first_batches(train_dataloader, num_batches=resume_step)
|
||||
for step, batch in enumerate(train_dataloader):
|
||||
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
|
||||
# We need to skip steps until we reach the resumed step
|
||||
active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step)
|
||||
else:
|
||||
# After the first iteration though, we need to go back to the original dataloader
|
||||
active_dataloader = train_dataloader
|
||||
for step, batch in enumerate(active_dataloader):
|
||||
# In particular, DeepSpeed handles `gradient_accumulation` via `DeepSpeedEngine`.
|
||||
# Below, we use `accelerator.accumulate` if the user
|
||||
# wants to switch to other approaches such as plain DDP, PyTorch FSDP ...
|
||||
|
||||
246
examples/by_feature/early_stopping.py
Normal file
246
examples/by_feature/early_stopping.py
Normal file
@ -0,0 +1,246 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import argparse
|
||||
|
||||
import evaluate
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
from accelerate import Accelerator, DistributedType
|
||||
|
||||
|
||||
########################################################################
|
||||
# This is a fully working simple example to use Accelerate
|
||||
# specifically showcasing how to perform early stopping,
|
||||
# and builds off the `nlp_example.py` script
|
||||
#
|
||||
# This example trains a Bert base model on GLUE MRPC
|
||||
# in any of the following settings (with the same script):
|
||||
# - single CPU or single GPU
|
||||
# - multi GPUS (using PyTorch distributed mode)
|
||||
# - (multi) TPUs
|
||||
# - fp16 (mixed-precision) or fp32 (normal precision)
|
||||
#
|
||||
# To run it in each of these various modes, follow the instructions
|
||||
# in the readme for examples:
|
||||
# https://github.com/huggingface/accelerate/tree/main/examples
|
||||
#
|
||||
########################################################################
|
||||
|
||||
|
||||
MAX_GPU_BATCH_SIZE = 16
|
||||
EVAL_BATCH_SIZE = 32
|
||||
|
||||
|
||||
def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
"""
|
||||
Creates a set of `DataLoader`s for the `glue` dataset,
|
||||
using "bert-base-cased" as the tokenizer.
|
||||
|
||||
Args:
|
||||
accelerator (`Accelerator`):
|
||||
An `Accelerator` object
|
||||
batch_size (`int`, *optional*):
|
||||
The batch size for the train and validation DataLoaders.
|
||||
"""
|
||||
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
|
||||
datasets = load_dataset("glue", "mrpc")
|
||||
|
||||
def tokenize_function(examples):
|
||||
# max_length=None => use the model max length (it's actually the default)
|
||||
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
|
||||
return outputs
|
||||
|
||||
# Apply the method we just defined to all the examples in all the splits of the dataset
|
||||
# starting with the main process first:
|
||||
with accelerator.main_process_first():
|
||||
tokenized_datasets = datasets.map(
|
||||
tokenize_function,
|
||||
batched=True,
|
||||
remove_columns=["idx", "sentence1", "sentence2"],
|
||||
)
|
||||
|
||||
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
|
||||
# transformers library
|
||||
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
elif accelerator.mixed_precision != "no":
|
||||
pad_to_multiple_of = 8
|
||||
else:
|
||||
pad_to_multiple_of = None
|
||||
|
||||
return tokenizer.pad(
|
||||
examples,
|
||||
padding="longest",
|
||||
max_length=max_length,
|
||||
pad_to_multiple_of=pad_to_multiple_of,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
# Instantiate dataloaders.
|
||||
train_dataloader = DataLoader(
|
||||
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size, drop_last=True
|
||||
)
|
||||
eval_dataloader = DataLoader(
|
||||
tokenized_datasets["validation"],
|
||||
shuffle=False,
|
||||
collate_fn=collate_fn,
|
||||
batch_size=EVAL_BATCH_SIZE,
|
||||
drop_last=(accelerator.mixed_precision == "fp8"),
|
||||
)
|
||||
|
||||
return train_dataloader, eval_dataloader
|
||||
|
||||
|
||||
# New code
|
||||
class EarlyStoppingCallback:
|
||||
"A callback class that helps with early stopping"
|
||||
|
||||
def __init__(self, min_delta=0, patience=5):
|
||||
self.min_delta = min_delta
|
||||
self.patience = patience
|
||||
self.counter = 0
|
||||
self.lowest_loss = float("inf")
|
||||
|
||||
def check_early_stopping(self, eval_loss):
|
||||
delta = self.lowest_loss - eval_loss
|
||||
if delta >= self.min_delta:
|
||||
self.lowest_loss = eval_loss
|
||||
self.counter = 0
|
||||
else:
|
||||
self.counter += 1
|
||||
if self.counter >= self.patience:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
callback = EarlyStoppingCallback()
|
||||
|
||||
|
||||
def training_function(config, args):
|
||||
# Initialize accelerator
|
||||
accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)
|
||||
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
|
||||
lr = config["lr"]
|
||||
num_epochs = int(config["num_epochs"])
|
||||
seed = int(config["seed"])
|
||||
batch_size = int(config["batch_size"])
|
||||
|
||||
metric = evaluate.load("glue", "mrpc")
|
||||
|
||||
# If the batch size is too big we use gradient accumulation
|
||||
gradient_accumulation_steps = 1
|
||||
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
|
||||
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
|
||||
batch_size = MAX_GPU_BATCH_SIZE
|
||||
|
||||
set_seed(seed)
|
||||
train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)
|
||||
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
|
||||
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True)
|
||||
|
||||
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
|
||||
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
|
||||
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
|
||||
model = model.to(accelerator.device)
|
||||
# Instantiate optimizer
|
||||
optimizer = AdamW(params=model.parameters(), lr=lr)
|
||||
|
||||
# Instantiate scheduler
|
||||
lr_scheduler = get_linear_schedule_with_warmup(
|
||||
optimizer=optimizer,
|
||||
num_warmup_steps=100,
|
||||
num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps,
|
||||
)
|
||||
|
||||
# Prepare everything
|
||||
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
|
||||
# prepare method.
|
||||
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
||||
)
|
||||
|
||||
# Now we train the model
|
||||
for epoch in range(num_epochs):
|
||||
model.train()
|
||||
for step, batch in enumerate(train_dataloader):
|
||||
# We could avoid this line since we set the accelerator with `device_placement=True`.
|
||||
batch.to(accelerator.device)
|
||||
outputs = model(**batch)
|
||||
loss = outputs.loss
|
||||
loss = loss / gradient_accumulation_steps
|
||||
accelerator.backward(loss)
|
||||
if step % gradient_accumulation_steps == 0:
|
||||
optimizer.step()
|
||||
lr_scheduler.step()
|
||||
optimizer.zero_grad()
|
||||
|
||||
# New code
|
||||
# Check if we should stop the training on any processes
|
||||
if callback.check_early_stopping(loss.item()):
|
||||
accelerator.set_trigger()
|
||||
|
||||
# If so, we break the loop
|
||||
if accelerator.check_trigger():
|
||||
break
|
||||
|
||||
model.eval()
|
||||
for step, batch in enumerate(eval_dataloader):
|
||||
# We could avoid this line since we set the accelerator with `device_placement=True`.
|
||||
batch.to(accelerator.device)
|
||||
with torch.no_grad():
|
||||
outputs = model(**batch)
|
||||
predictions = outputs.logits.argmax(dim=-1)
|
||||
predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"]))
|
||||
metric.add_batch(
|
||||
predictions=predictions,
|
||||
references=references,
|
||||
)
|
||||
|
||||
eval_metric = metric.compute()
|
||||
|
||||
# Use accelerator.print to print only on the main process.
|
||||
accelerator.print(f"epoch {epoch}:", eval_metric)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Simple example of training script.")
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
default=None,
|
||||
choices=["no", "fp16", "bf16", "fp8"],
|
||||
help="Whether to use mixed precision. Choose"
|
||||
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
||||
"and an Nvidia Ampere GPU.",
|
||||
)
|
||||
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.")
|
||||
args = parser.parse_args()
|
||||
config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
|
||||
training_function(config, args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
41
setup.py
41
setup.py
@ -19,7 +19,9 @@ extras = {}
|
||||
extras["quality"] = ["black ~= 23.1", "ruff >= 0.0.241", "hf-doc-builder >= 0.3.0", "urllib3 < 2.0.0"]
|
||||
extras["docs"] = []
|
||||
extras["test_prod"] = ["pytest", "pytest-xdist", "pytest-subtests", "parameterized"]
|
||||
extras["test_dev"] = ["datasets", "evaluate", "transformers", "scipy", "scikit-learn", "deepspeed", "tqdm", "bitsandbytes"]
|
||||
extras["test_dev"] = [
|
||||
"datasets", "evaluate", "transformers", "scipy", "scikit-learn", "deepspeed", "tqdm", "bitsandbytes", "timm"
|
||||
]
|
||||
extras["testing"] = extras["test_prod"] + extras["test_dev"]
|
||||
extras["rich"] = ["rich"]
|
||||
|
||||
@ -32,7 +34,7 @@ extras["sagemaker"] = [
|
||||
|
||||
setup(
|
||||
name="accelerate",
|
||||
version="0.22.0.dev0",
|
||||
version="0.24.1",
|
||||
description="Accelerate",
|
||||
long_description=open("README.md", "r", encoding="utf-8").read(),
|
||||
long_description_content_type="text/markdown",
|
||||
@ -47,11 +49,12 @@ setup(
|
||||
"console_scripts": [
|
||||
"accelerate=accelerate.commands.accelerate_cli:main",
|
||||
"accelerate-config=accelerate.commands.config:main",
|
||||
"accelerate-estimate-memory=accelerate.commands.estimate:main",
|
||||
"accelerate-launch=accelerate.commands.launch:main",
|
||||
]
|
||||
},
|
||||
python_requires=">=3.8.0",
|
||||
install_requires=["numpy>=1.17", "packaging>=20.0", "psutil", "pyyaml", "torch>=1.10.0"],
|
||||
install_requires=["numpy>=1.17", "packaging>=20.0", "psutil", "pyyaml", "torch>=1.10.0", "huggingface_hub"],
|
||||
extras_require=extras,
|
||||
classifiers=[
|
||||
"Development Status :: 5 - Production/Stable",
|
||||
@ -67,21 +70,29 @@ setup(
|
||||
)
|
||||
|
||||
# Release checklist
|
||||
# 1. Change the version in __init__.py and setup.py.
|
||||
# 2. Commit these changes with the message: "Release: VERSION"
|
||||
# 3. Add a tag in git to mark the release: "git tag VERSION -m 'Adds tag VERSION for pypi' "
|
||||
# Push the tag to git: git push --tags origin main
|
||||
# 4. Run the following commands in the top-level directory:
|
||||
# 1. Checkout the release branch (for a patch the current release branch, for a new minor version, create one):
|
||||
# git checkout -b vXX.xx-release
|
||||
# The -b is only necessary for creation (so remove it when doing a patch)
|
||||
# 2. Change the version in __init__.py and setup.py to the proper value.
|
||||
# 3. Commit these changes with the message: "Release: v<VERSION>"
|
||||
# 4. Add a tag in git to mark the release:
|
||||
# git tag v<VERSION> -m 'Adds tag v<VERSION> for pypi'
|
||||
# Push the tag and release commit to git: git push --tags origin vXX.xx-release
|
||||
# 5. Run the following commands in the top-level directory:
|
||||
# rm -rf dist
|
||||
# rm -rf build
|
||||
# python setup.py bdist_wheel
|
||||
# python setup.py sdist
|
||||
# 5. Upload the package to the pypi test server first:
|
||||
# twine upload dist/* -r pypitest
|
||||
# twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/
|
||||
# 6. Check that you can install it in a virtualenv by running:
|
||||
# 6. Upload the package to the pypi test server first:
|
||||
# twine upload dist/* -r testpypi
|
||||
# 7. Check that you can install it in a virtualenv by running:
|
||||
# pip install accelerate
|
||||
# pip uninstall accelerate
|
||||
# pip install -i https://testpypi.python.org/pypi accelerate
|
||||
# accelerate env
|
||||
# accelerate test
|
||||
# 7. Upload the final version to actual pypi:
|
||||
# 8. Upload the final version to actual pypi:
|
||||
# twine upload dist/* -r pypi
|
||||
# 8. Add release notes to the tag in github once everything is looking hunky-dory.
|
||||
# 9. Update the version in __init__.py, setup.py to the new version "-dev" and push to master
|
||||
# 9. Add release notes to the tag in github once everything is looking hunky-dory.
|
||||
# 10. Go back to the main branch and update the version in __init__.py, setup.py to the new version ".dev" and push to
|
||||
# main.
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
__version__ = "0.22.0.dev0"
|
||||
__version__ = "0.24.1"
|
||||
|
||||
from .accelerator import Accelerator
|
||||
from .big_modeling import (
|
||||
|
||||
@ -16,6 +16,7 @@ from __future__ import annotations
|
||||
|
||||
import collections
|
||||
import contextlib
|
||||
import functools
|
||||
import json
|
||||
import math
|
||||
import os
|
||||
@ -62,11 +63,13 @@ from .utils import (
|
||||
ProjectConfiguration,
|
||||
RNGType,
|
||||
TorchDynamoPlugin,
|
||||
check_os_kernel,
|
||||
compare_versions,
|
||||
convert_model,
|
||||
convert_outputs_to_fp32,
|
||||
extract_model_from_parallel,
|
||||
gather,
|
||||
gather_object,
|
||||
get_mixed_precision_context_manager,
|
||||
get_pretty_name,
|
||||
has_transformer_engine_layers,
|
||||
@ -95,11 +98,10 @@ from .utils import (
|
||||
wait_for_everyone,
|
||||
)
|
||||
from .utils.constants import FSDP_PYTORCH_VERSION
|
||||
from .utils.other import is_compiled_module
|
||||
|
||||
|
||||
if is_deepspeed_available():
|
||||
import deepspeed
|
||||
|
||||
from .utils import (
|
||||
DeepSpeedEngineWrapper,
|
||||
DeepSpeedOptimizerWrapper,
|
||||
@ -135,6 +137,10 @@ if is_tpu_available(check_device=False):
|
||||
import torch_xla.distributed.xla_multiprocessing as xmp
|
||||
|
||||
|
||||
if is_npu_available(check_device=False):
|
||||
import torch_npu # noqa: F401
|
||||
|
||||
|
||||
try:
|
||||
from torch.optim.lr_scheduler import LRScheduler
|
||||
except ImportError:
|
||||
@ -259,6 +265,7 @@ class Accelerator:
|
||||
kwargs_handlers: list[KwargsHandler] | None = None,
|
||||
dynamo_backend: DynamoBackend | str | None = None,
|
||||
):
|
||||
self.trackers = []
|
||||
if project_config is not None:
|
||||
self.project_configuration = project_config
|
||||
else:
|
||||
@ -416,11 +423,10 @@ class Accelerator:
|
||||
if (
|
||||
self.state.mixed_precision == "fp16"
|
||||
and self.device.type != "cpu"
|
||||
and self.device.type != "xpu"
|
||||
and self.distributed_type not in (DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM)
|
||||
):
|
||||
self.native_amp = True
|
||||
if self.device.type not in ("cuda", "mps", "npu"):
|
||||
if self.device.type not in ("xpu", "cuda", "mps", "npu"):
|
||||
raise ValueError(err.format(mode="fp16", requirement="a GPU"))
|
||||
kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {}
|
||||
if self.distributed_type == DistributedType.FSDP:
|
||||
@ -462,6 +468,11 @@ class Accelerator:
|
||||
if self.rng_types is None:
|
||||
self.rng_types = ["generator"]
|
||||
|
||||
# Set a flag tensor for early stopping and other breakpoints
|
||||
self.flag_tensor = None
|
||||
|
||||
check_os_kernel()
|
||||
|
||||
@property
|
||||
def use_distributed(self):
|
||||
"""
|
||||
@ -1193,10 +1204,12 @@ class Accelerator:
|
||||
)
|
||||
|
||||
for obj in args:
|
||||
# TODO: Look at enabling native TP training directly with a proper config
|
||||
if (
|
||||
isinstance(obj, torch.nn.Module)
|
||||
and self.verify_device_map(obj)
|
||||
and self.distributed_type != DistributedType.NO
|
||||
and os.environ.get("ACCELERATE_BYPASS_DEVICE_MAP", "false") != "true"
|
||||
):
|
||||
raise ValueError(
|
||||
"You can't train a model that has been loaded with `device_map='auto'` in any distributed mode."
|
||||
@ -1212,7 +1225,12 @@ class Accelerator:
|
||||
for obj in args:
|
||||
if isinstance(obj, torch.nn.Module):
|
||||
model_count += 1
|
||||
is_type_fsdp = type(obj) == FSDP
|
||||
# if the model is compiled using PyTorch 2.0,
|
||||
# check that the wrapped model is FSDP or not;
|
||||
# else check if it is FSDP or not;
|
||||
is_type_fsdp = isinstance(obj, FSDP) or (
|
||||
is_compiled_module(obj) and isinstance(obj._orig_mod, FSDP)
|
||||
)
|
||||
if isinstance(obj, torch.optim.Optimizer):
|
||||
optimizer_present = True
|
||||
if model_count > 1 and optimizer_present:
|
||||
@ -1328,7 +1346,12 @@ class Accelerator:
|
||||
device_placement = self.device_placement and self.distributed_type != DistributedType.FSDP
|
||||
self._models.append(model)
|
||||
|
||||
if self.verify_device_map(model) and self.distributed_type != DistributedType.NO:
|
||||
# TODO: Look at enabling native TP training directly with a proper config
|
||||
if (
|
||||
self.verify_device_map(model)
|
||||
and self.distributed_type != DistributedType.NO
|
||||
and os.environ.get("ACCELERATE_BYPASS_DEVICE_MAP", "false") != "true"
|
||||
):
|
||||
raise ValueError(
|
||||
"You can't train a model that has been loaded with `device_map='auto'` in any distributed mode."
|
||||
" Please rerun your script specifying `--num_processes=1` or by launching with `python {{myscript.py}}`."
|
||||
@ -1363,7 +1386,7 @@ class Accelerator:
|
||||
elif device_placement and not self.verify_device_map(model):
|
||||
model = model.to(self.device)
|
||||
|
||||
if self.native_amp and self.distributed_type != DistributedType.FSDP:
|
||||
if self.native_amp:
|
||||
model._original_forward = model.forward
|
||||
model_forward_func = model.forward.__func__ if hasattr(model.forward, "__func__") else model.forward
|
||||
autocast_context = get_mixed_precision_context_manager(self.native_amp, self.autocast_handler)
|
||||
@ -1401,15 +1424,27 @@ class Accelerator:
|
||||
):
|
||||
if any(p.requires_grad for p in model.parameters()):
|
||||
kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}
|
||||
# TODO: Look at enabling native TP training directly with a proper config
|
||||
if os.environ.get("ACCELERATE_BYPASS_DEVICE_MAP", "false") != "true":
|
||||
device_ids, output_device = [self.local_process_index], self.local_process_index
|
||||
else:
|
||||
device_ids, output_device = None, None
|
||||
|
||||
model = torch.nn.parallel.DistributedDataParallel(
|
||||
model, device_ids=[self.local_process_index], output_device=self.local_process_index, **kwargs
|
||||
model, device_ids=device_ids, output_device=output_device, **kwargs
|
||||
)
|
||||
elif self.distributed_type == DistributedType.FSDP:
|
||||
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
|
||||
|
||||
# Check if the model is already a FSDP model due to `Manual Wrapping` and if so,
|
||||
# don't wrap it again
|
||||
if type(model) != FSDP:
|
||||
# In case the model is already compiled using PyTorch 2.0 and the wrapped model in it
|
||||
# is a FSDP model, don't wrap it again
|
||||
is_type_fsdp = isinstance(model, FSDP) or (
|
||||
is_compiled_module(model) and isinstance(model._orig_mod, FSDP)
|
||||
)
|
||||
|
||||
if not is_type_fsdp:
|
||||
self.state.fsdp_plugin.set_auto_wrap_policy(model)
|
||||
fsdp_plugin = self.state.fsdp_plugin
|
||||
kwargs = {
|
||||
@ -1427,20 +1462,40 @@ class Accelerator:
|
||||
"device_id": self.device,
|
||||
}
|
||||
model = FSDP(model, **kwargs)
|
||||
if fsdp_plugin.activation_checkpointing:
|
||||
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
|
||||
CheckpointImpl,
|
||||
apply_activation_checkpointing,
|
||||
checkpoint_wrapper,
|
||||
)
|
||||
|
||||
apply_activation_checkpointing(
|
||||
model,
|
||||
checkpoint_wrapper_fn=functools.partial(
|
||||
checkpoint_wrapper,
|
||||
checkpoint_impl=CheckpointImpl.NO_REENTRANT,
|
||||
),
|
||||
auto_wrap_policy=fsdp_plugin.auto_wrap_policy,
|
||||
)
|
||||
# if the previous and current models are same, delete the previous one
|
||||
if len(self._models) > 1 and (self._models[-2] is self._models[-1]):
|
||||
del self._models[-2]
|
||||
self._models[-1] = model
|
||||
elif self.distributed_type == DistributedType.MULTI_CPU:
|
||||
kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}
|
||||
model = torch.nn.parallel.DistributedDataParallel(model, **kwargs)
|
||||
elif self.distributed_type == DistributedType.TPU and self.state.fork_launched:
|
||||
model = xmp.MpModelWrapper(model).to(self.device)
|
||||
# torch.compile should be called last.
|
||||
if self.state.dynamo_plugin.backend != DynamoBackend.NO:
|
||||
# torch.compile should be called last and only if the model isn't already compiled.
|
||||
if self.state.dynamo_plugin.backend != DynamoBackend.NO and not is_compiled_module(model):
|
||||
if not is_torch_version(">=", "2.0"):
|
||||
raise ValueError("Using `torch.compile` requires PyTorch 2.0 or higher.")
|
||||
model = torch.compile(model, **self.state.dynamo_plugin.to_kwargs())
|
||||
return model
|
||||
|
||||
def _prepare_deepspeed(self, *args):
|
||||
import deepspeed
|
||||
|
||||
deepspeed_plugin = self.state.deepspeed_plugin
|
||||
|
||||
is_dataloader_present = any(isinstance(obj, torch.utils.data.DataLoader) for obj in args)
|
||||
@ -1477,12 +1532,13 @@ class Accelerator:
|
||||
batch_size_per_device = deepspeed_plugin.deepspeed_config["train_micro_batch_size_per_gpu"]
|
||||
result = [obj for obj in args]
|
||||
|
||||
if self.gradient_accumulation_steps != deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"]:
|
||||
logger.info(
|
||||
f"Updating DeepSpeed's gradient accumulation steps to {self.gradient_accumulation_steps} from "
|
||||
f"{deepspeed_plugin.deepspeed_config['gradient_accumulation_steps']}."
|
||||
)
|
||||
deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"] = self.gradient_accumulation_steps
|
||||
# handle `gradient_accumulation_steps` when the value is `auto`
|
||||
deepspeed_plugin.fill_match(
|
||||
"gradient_accumulation_steps",
|
||||
must_match=False,
|
||||
gradient_accumulation_steps=self.gradient_accumulation_steps,
|
||||
)
|
||||
|
||||
config_kwargs = {
|
||||
"train_micro_batch_size_per_gpu": batch_size_per_device,
|
||||
"train_batch_size": batch_size_per_device
|
||||
@ -1527,9 +1583,14 @@ class Accelerator:
|
||||
"Please remove the scheduler from the config file or "
|
||||
"create `accelerate.utils.DummyScheduler` in the code."
|
||||
)
|
||||
elif "scheduler" not in deepspeed_plugin.deepspeed_config and isinstance(scheduler, (DummyScheduler)):
|
||||
elif (
|
||||
"scheduler" not in deepspeed_plugin.deepspeed_config
|
||||
and isinstance(scheduler, (DummyScheduler))
|
||||
and scheduler.lr_scheduler_callable is None
|
||||
):
|
||||
raise ValueError(
|
||||
"You cannot create a `DummyScheduler` without specifying a scheduler in the config file."
|
||||
"Either specify a scheduler in the config file or "
|
||||
"pass in the `lr_scheduler_callable` parameter when using `accelerate.utils.DummyScheduler`."
|
||||
)
|
||||
|
||||
if optimizer is not None and scheduler is not None:
|
||||
@ -1559,7 +1620,7 @@ class Accelerator:
|
||||
config_kwargs.update(
|
||||
{"optimizer.params.lr": optimizer.lr, "optimizer.params.weight_decay": optimizer.weight_decay}
|
||||
)
|
||||
if isinstance(scheduler, (DummyScheduler)):
|
||||
if isinstance(scheduler, (DummyScheduler)) and scheduler.lr_scheduler_callable is None:
|
||||
max_lr = (
|
||||
getattr(scheduler.optimizer, "lr", None)
|
||||
if getattr(scheduler.optimizer, "defaults", None) is None
|
||||
@ -1584,6 +1645,8 @@ class Accelerator:
|
||||
if optimizer is not None:
|
||||
if isinstance(optimizer, (DummyOptim)):
|
||||
kwargs["model_parameters"] = optimizer.params
|
||||
if isinstance(scheduler, (DummyScheduler)) and scheduler.lr_scheduler_callable is not None:
|
||||
kwargs["lr_scheduler"] = scheduler.lr_scheduler_callable
|
||||
else:
|
||||
if self.deepspeed_config["zero_optimization"].get("offload_optimizer", {}).get(
|
||||
"device", "none"
|
||||
@ -1594,7 +1657,10 @@ class Accelerator:
|
||||
optimizer = DeepSpeedCPUAdam(optimizer.param_groups, **defaults)
|
||||
kwargs["optimizer"] = optimizer
|
||||
if scheduler is not None:
|
||||
if type(scheduler).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES:
|
||||
if (
|
||||
isinstance(scheduler, LRScheduler)
|
||||
or type(scheduler).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES
|
||||
):
|
||||
kwargs["lr_scheduler"] = scheduler
|
||||
|
||||
engine, optimizer, _, lr_scheduler = deepspeed.initialize(**kwargs)
|
||||
@ -1922,6 +1988,65 @@ class Accelerator:
|
||||
else:
|
||||
loss.backward(**kwargs)
|
||||
|
||||
def set_trigger(self):
|
||||
"""
|
||||
Sets the internal trigger tensor to 1 on the current process. A latter check should follow using this which
|
||||
will check across all processes.
|
||||
|
||||
Note:
|
||||
Does not require `wait_for_everyone()`
|
||||
|
||||
Example:
|
||||
|
||||
```python
|
||||
>>> from accelerate import Accelerator
|
||||
|
||||
>>> accelerator = Accelerator()
|
||||
>>> # Assume later in the training script
|
||||
>>> # `should_do_breakpoint` is a custom function to monitor when to break,
|
||||
>>> # e.g. when the loss is NaN
|
||||
>>> if should_do_breakpoint(loss):
|
||||
... accelerator.set_trigger()
|
||||
>>> # Assume later in the training script
|
||||
>>> if accelerator.check_breakpoint():
|
||||
... break
|
||||
```
|
||||
"""
|
||||
self.flag_tensor = torch.tensor(1, device=self.device)
|
||||
|
||||
def check_trigger(self):
|
||||
"""
|
||||
Checks if the internal trigger tensor has been set to 1 in any of the processes. If so, will return `True` and
|
||||
reset the trigger tensor to 0.
|
||||
|
||||
Note:
|
||||
Does not require `wait_for_everyone()`
|
||||
|
||||
Example:
|
||||
|
||||
```python
|
||||
>>> from accelerate import Accelerator
|
||||
|
||||
>>> accelerator = Accelerator()
|
||||
>>> # Assume later in the training script
|
||||
>>> # `should_do_breakpoint` is a custom function to monitor when to break,
|
||||
>>> # e.g. when the loss is NaN
|
||||
>>> if should_do_breakpoint(loss):
|
||||
... accelerator.set_trigger()
|
||||
>>> # Assume later in the training script
|
||||
>>> if accelerator.check_trigger():
|
||||
... break
|
||||
```
|
||||
"""
|
||||
# Now that we are outside `__init__`, we can initialize it if it is `None` on device
|
||||
if self.flag_tensor is None:
|
||||
self.flag_tensor = torch.tensor(0, device=self.device)
|
||||
flag_tensor = self.reduce(self.flag_tensor)
|
||||
if flag_tensor.item() >= 1:
|
||||
self.flag_tensor = torch.tensor(0, device=self.device)
|
||||
return True
|
||||
return False
|
||||
|
||||
def unscale_gradients(self, optimizer=None):
|
||||
"""
|
||||
Unscale the gradients in mixed precision training with AMP. This is a noop in all other settings.
|
||||
@ -1955,6 +2080,10 @@ class Accelerator:
|
||||
for opt in optimizer:
|
||||
while isinstance(opt, AcceleratedOptimizer):
|
||||
opt = opt.optimizer
|
||||
# Reduce gradients first for XLA
|
||||
if self.distributed_type == DistributedType.TPU:
|
||||
gradients = xm._fetch_gradients(opt)
|
||||
self.reduce(gradients, scale=1.0 / self.num_processes)
|
||||
self.scaler.unscale_(opt)
|
||||
|
||||
def clip_grad_norm_(self, parameters, max_norm, norm_type=2):
|
||||
@ -2054,14 +2183,14 @@ class Accelerator:
|
||||
"""
|
||||
return gather(tensor)
|
||||
|
||||
def gather_for_metrics(self, tensor):
|
||||
def gather_for_metrics(self, input_data):
|
||||
"""
|
||||
Gathers `tensor` and potentially drops duplicates in the last batch if on a distributed system. Should be used
|
||||
for gathering the inputs and targets for metric calculation.
|
||||
Gathers `input_data` and potentially drops duplicates in the last batch if on a distributed system. Should be
|
||||
used for gathering the inputs and targets for metric calculation.
|
||||
|
||||
Args:
|
||||
tensor (`torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`):
|
||||
The tensors for calculating metrics across all processes.
|
||||
input (`torch.Tensor`, `object`, a nested tuple/list/dictionary of `torch.Tensor`, or a nested tuple/list/dictionary of `object`):
|
||||
The tensors or objects for calculating metrics across all processes
|
||||
|
||||
Example:
|
||||
|
||||
@ -2079,7 +2208,17 @@ class Accelerator:
|
||||
9
|
||||
```
|
||||
"""
|
||||
tensor = self.gather(tensor)
|
||||
|
||||
try:
|
||||
recursively_apply(lambda x: x, input_data, error_on_other_type=True)
|
||||
all_tensors = True
|
||||
except TypeError:
|
||||
all_tensors = False
|
||||
|
||||
if not all_tensors:
|
||||
data = gather_object(input_data)
|
||||
else:
|
||||
data = self.gather(input_data)
|
||||
|
||||
try:
|
||||
if self.gradient_state.end_of_dataloader:
|
||||
@ -2089,24 +2228,24 @@ class Accelerator:
|
||||
logger.info(
|
||||
"The used dataset had no length, returning gathered tensors. You should drop the remainder yourself."
|
||||
)
|
||||
return tensor
|
||||
return data
|
||||
elif self.gradient_state.remainder > 0:
|
||||
# Last batch needs to be truncated on distributed systems as it contains additional samples
|
||||
def _adjust_samples(tensor):
|
||||
return tensor[: self.gradient_state.remainder]
|
||||
|
||||
return recursively_apply(_adjust_samples, tensor)
|
||||
return recursively_apply(_adjust_samples, data)
|
||||
else: # remainder is 0
|
||||
# no remainder even though at end of dataloader, so nothing to do.
|
||||
return tensor
|
||||
return data
|
||||
else:
|
||||
# Not at the end of the dataloader, no need to adjust the tensors
|
||||
return tensor
|
||||
return data
|
||||
except Exception:
|
||||
# Dataset had no length or raised an error
|
||||
return tensor
|
||||
return data
|
||||
|
||||
def reduce(self, tensor, reduction="sum"):
|
||||
def reduce(self, tensor, reduction="sum", scale=1.0):
|
||||
"""
|
||||
Reduce the values in *tensor* across all processes based on *reduction*.
|
||||
|
||||
@ -2118,6 +2257,8 @@ class Accelerator:
|
||||
The tensors to reduce across all processes.
|
||||
reduction (`str`, *optional*, defaults to "sum"):
|
||||
A reduction type, can be one of 'sum', 'mean', or 'none'. If 'none', will not perform any operation.
|
||||
scale (`float`, *optional*, defaults to 1.0):
|
||||
A default scaling value to be applied after the reduce, only valied on XLA.
|
||||
|
||||
Returns:
|
||||
`torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`:
|
||||
@ -2138,7 +2279,7 @@ class Accelerator:
|
||||
tensor([4, 6])
|
||||
```
|
||||
"""
|
||||
return reduce(tensor, reduction)
|
||||
return reduce(tensor, reduction, scale)
|
||||
|
||||
def pad_across_processes(self, tensor, dim=0, pad_index=0, pad_first=False):
|
||||
"""
|
||||
@ -2262,7 +2403,6 @@ class Accelerator:
|
||||
... )
|
||||
```
|
||||
"""
|
||||
self.trackers = []
|
||||
for tracker in self.log_with:
|
||||
if issubclass(type(tracker), GeneralTracker):
|
||||
# Custom trackers are already initialized
|
||||
@ -2304,7 +2444,7 @@ class Accelerator:
|
||||
>>> tensorboard_tracker = accelerator.get_tracker("tensorboard")
|
||||
```
|
||||
"""
|
||||
if len(getattr(self, "trackers", [])) > 0:
|
||||
if len(self.trackers) > 0:
|
||||
for tracker in self.trackers:
|
||||
if tracker.name == name:
|
||||
return tracker.tracker if unwrap else tracker
|
||||
@ -2362,13 +2502,18 @@ class Accelerator:
|
||||
for tracker in self.trackers:
|
||||
tracker.finish()
|
||||
|
||||
def save(self, obj, f):
|
||||
def save(self, obj, f, safe_serialization=False):
|
||||
"""
|
||||
Save the object passed to disk once per machine. Use in place of `torch.save`.
|
||||
|
||||
Args:
|
||||
obj (`object`): The object to save.
|
||||
f (`str` or `os.PathLike`): Where to save the content of `obj`.
|
||||
safe_serialization (`bool`, *optional*, defaults to `False`): Whether to save `obj` using `safetensors`
|
||||
|
||||
Note:
|
||||
If `save_on_each_node` was passed in as a `ProjectConfiguration`, will save the object once per node,
|
||||
rather than only once on the main node.
|
||||
|
||||
Example:
|
||||
|
||||
@ -2380,7 +2525,12 @@ class Accelerator:
|
||||
>>> accelerator.save(arr, "array.pkl")
|
||||
```
|
||||
"""
|
||||
save(obj, f)
|
||||
save(
|
||||
obj,
|
||||
f,
|
||||
save_on_each_node=self.project_configuration.save_on_each_node,
|
||||
safe_serialization=safe_serialization,
|
||||
)
|
||||
|
||||
def save_model(
|
||||
self,
|
||||
@ -2460,7 +2610,7 @@ class Accelerator:
|
||||
del state_dict[name]
|
||||
warn_names.add(name)
|
||||
if len(warn_names) > 0:
|
||||
logger.warning_once(
|
||||
logger.warning(
|
||||
f"Removed shared tensor {warn_names} while saving. This should be OK, but check by verifying that you don't receive any warning while reloading",
|
||||
)
|
||||
|
||||
@ -2491,7 +2641,7 @@ class Accelerator:
|
||||
|
||||
# Save the model
|
||||
for shard_file, shard in shards.items():
|
||||
self.save(shard, os.path.join(save_directory, shard_file))
|
||||
self.save(shard, os.path.join(save_directory, shard_file), safe_serialization=safe_serialization)
|
||||
|
||||
if index is None:
|
||||
path_to_weights = os.path.join(save_directory, WEIGHTS_NAME)
|
||||
@ -2582,8 +2732,10 @@ class Accelerator:
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
if self.project_configuration.automatic_checkpoint_naming:
|
||||
folders = [os.path.join(output_dir, folder) for folder in os.listdir(output_dir)]
|
||||
if self.project_configuration.total_limit is not None and (
|
||||
len(folders) + 1 > self.project_configuration.total_limit
|
||||
if (
|
||||
self.project_configuration.total_limit is not None
|
||||
and (len(folders) + 1 > self.project_configuration.total_limit)
|
||||
and self.is_main_process
|
||||
):
|
||||
|
||||
def _inner(folder):
|
||||
@ -2647,16 +2799,26 @@ class Accelerator:
|
||||
elif self.distributed_type not in [DistributedType.MEGATRON_LM]:
|
||||
schedulers = self._schedulers
|
||||
|
||||
# Save the samplers of the dataloaders
|
||||
dataloaders = self._dataloaders
|
||||
|
||||
# Call model loading hooks that might have been registered with
|
||||
# accelerator.register_model_state_hook
|
||||
for hook in self._save_model_state_pre_hook.values():
|
||||
hook(self._models, weights, output_dir)
|
||||
|
||||
save_location = save_accelerator_state(
|
||||
output_dir, weights, optimizers, schedulers, self.state.process_index, self.scaler
|
||||
output_dir,
|
||||
weights,
|
||||
optimizers,
|
||||
schedulers,
|
||||
dataloaders,
|
||||
self.state.process_index,
|
||||
self.scaler,
|
||||
save_on_each_node=self.project_configuration.save_on_each_node,
|
||||
)
|
||||
for i, obj in enumerate(self._custom_objects):
|
||||
save_custom_state(obj, output_dir, i)
|
||||
save_custom_state(obj, output_dir, i, save_on_each_node=self.project_configuration.save_on_each_node)
|
||||
self.project_configuration.iteration += 1
|
||||
return save_location
|
||||
|
||||
@ -2736,7 +2898,7 @@ class Accelerator:
|
||||
return list(map(int, re.findall(r"[\/]?([0-9]+)(?=[^\/]*$)", folder)))[0]
|
||||
|
||||
folders.sort(key=_inner)
|
||||
input_dir = os.path.join(input_dir, folders[-1])
|
||||
input_dir = folders[-1]
|
||||
else:
|
||||
raise ValueError("No input_dir provided and automatic checkpoint naming is disabled.")
|
||||
logger.info(f"Loading states from {input_dir}")
|
||||
@ -2780,6 +2942,8 @@ class Accelerator:
|
||||
elif self.distributed_type not in [DistributedType.MEGATRON_LM]:
|
||||
schedulers = self._schedulers
|
||||
|
||||
dataloaders = self._dataloaders
|
||||
|
||||
# Call model loading hooks that might have been registered with
|
||||
# accelerator.register_model_state_hook
|
||||
for hook in self._load_model_state_pre_hook.values():
|
||||
@ -2800,6 +2964,7 @@ class Accelerator:
|
||||
models,
|
||||
optimizers,
|
||||
schedulers,
|
||||
dataloaders,
|
||||
self.state.process_index,
|
||||
self.scaler,
|
||||
map_location,
|
||||
|
||||
@ -304,6 +304,7 @@ def dispatch_model(
|
||||
offload_buffers: bool = False,
|
||||
skip_keys: Optional[Union[str, List[str]]] = None,
|
||||
preload_module_classes: Optional[List[str]] = None,
|
||||
force_hooks: bool = False,
|
||||
):
|
||||
"""
|
||||
Dispatches a model according to a given device map. Layers of the model might be spread across GPUs, offloaded on
|
||||
@ -334,6 +335,9 @@ def dispatch_model(
|
||||
of the forward. This should only be used for classes that have submodules which are registered but not
|
||||
called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
|
||||
`dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
|
||||
force_hooks (`bool`, *optional*, defaults to `False`):
|
||||
Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a
|
||||
single device.
|
||||
"""
|
||||
# Error early if the device map is incomplete.
|
||||
check_device_map(model, device_map)
|
||||
@ -343,10 +347,11 @@ def dispatch_model(
|
||||
getattr(model, "is_quantized", False) or getattr(model, "is_loaded_in_8bit", False)
|
||||
) and getattr(model, "quantization_method", "bitsandbytes") == "bitsandbytes"
|
||||
|
||||
# We attach hooks if the device_map have at least 2 different devices. Otherwise, the model in already loaded
|
||||
# We attach hooks if the device_map has at least 2 different devices or if
|
||||
# force_hooks is set to `True`. Otherwise, the model in already loaded
|
||||
# in the unique device and the user can decide where to dispatch the model.
|
||||
# If the model is quantized, we always force-dispatch the model
|
||||
if (len(set(device_map.values())) > 1) or is_bnb_quantized:
|
||||
if (len(set(device_map.values())) > 1) or is_bnb_quantized or force_hooks:
|
||||
if main_device is None:
|
||||
if set(device_map.values()) == {"cpu"} or set(device_map.values()) == {"cpu", "disk"}:
|
||||
main_device = "cpu"
|
||||
@ -439,6 +444,7 @@ def load_checkpoint_and_dispatch(
|
||||
offload_state_dict: Optional[bool] = None,
|
||||
skip_keys: Optional[Union[str, List[str]]] = None,
|
||||
preload_module_classes: Optional[List[str]] = None,
|
||||
force_hooks: bool = False,
|
||||
):
|
||||
"""
|
||||
Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are
|
||||
@ -481,6 +487,9 @@ def load_checkpoint_and_dispatch(
|
||||
of the forward. This should only be used for classes that have submodules which are registered but not
|
||||
called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
|
||||
`dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
|
||||
force_hooks (`bool`, *optional*, defaults to `False`):
|
||||
Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a
|
||||
single device.
|
||||
|
||||
Example:
|
||||
|
||||
@ -541,4 +550,5 @@ def load_checkpoint_and_dispatch(
|
||||
offload_buffers=offload_buffers,
|
||||
skip_keys=skip_keys,
|
||||
preload_module_classes=preload_module_classes,
|
||||
force_hooks=force_hooks,
|
||||
)
|
||||
|
||||
@ -25,6 +25,7 @@ from .utils import (
|
||||
MODEL_NAME,
|
||||
OPTIMIZER_NAME,
|
||||
RNG_STATE_NAME,
|
||||
SAMPLER_NAME,
|
||||
SCALER_NAME,
|
||||
SCHEDULER_NAME,
|
||||
get_pretty_name,
|
||||
@ -49,8 +50,10 @@ def save_accelerator_state(
|
||||
model_states: List[dict],
|
||||
optimizers: list,
|
||||
schedulers: list,
|
||||
dataloaders: list,
|
||||
process_index: int,
|
||||
scaler: GradScaler = None,
|
||||
save_on_each_node: bool = False,
|
||||
):
|
||||
"""
|
||||
Saves the current states of the models, optimizers, scaler, and RNG generators to a given directory.
|
||||
@ -64,31 +67,49 @@ def save_accelerator_state(
|
||||
A list of optimizer instances
|
||||
schedulers (`List[torch.optim.lr_scheduler._LRScheduler]`):
|
||||
A list of learning rate schedulers
|
||||
dataloaders (`List[torch.utils.data.DataLoader]`):
|
||||
A list of dataloader instances to save their sampler states
|
||||
process_index (`int`):
|
||||
The current process index in the Accelerator state
|
||||
scaler (`torch.cuda.amp.GradScaler`, *optional*):
|
||||
An optional gradient scaler instance to save
|
||||
save_on_each_node (`bool`, *optional*):
|
||||
Whether to save on every node, or only the main node.
|
||||
"""
|
||||
# Model states
|
||||
for i, state in enumerate(model_states):
|
||||
weights_name = f"{MODEL_NAME}.bin" if i == 0 else f"{MODEL_NAME}_{i}.bin"
|
||||
output_model_file = os.path.join(output_dir, weights_name)
|
||||
save(state, output_model_file)
|
||||
save(state, output_model_file, save_on_each_node=save_on_each_node)
|
||||
logger.info(f"Model weights saved in {output_model_file}")
|
||||
# Optimizer states
|
||||
for i, opt in enumerate(optimizers):
|
||||
state = opt.state_dict()
|
||||
optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin"
|
||||
output_optimizer_file = os.path.join(output_dir, optimizer_name)
|
||||
save(state, output_optimizer_file)
|
||||
save(state, output_optimizer_file, save_on_each_node=save_on_each_node)
|
||||
logger.info(f"Optimizer state saved in {output_optimizer_file}")
|
||||
# Scheduler states
|
||||
for i, scheduler in enumerate(schedulers):
|
||||
state = scheduler.state_dict()
|
||||
scheduler_name = f"{SCHEDULER_NAME}.bin" if i == 0 else f"{SCHEDULER_NAME}_{i}.bin"
|
||||
output_scheduler_file = os.path.join(output_dir, scheduler_name)
|
||||
save(state, output_scheduler_file)
|
||||
save(state, output_scheduler_file, save_on_each_node=save_on_each_node)
|
||||
logger.info(f"Scheduler state saved in {output_scheduler_file}")
|
||||
# DataLoader states
|
||||
for i, dataloader in enumerate(dataloaders):
|
||||
sampler_name = f"{SAMPLER_NAME}.bin" if i == 0 else f"{SAMPLER_NAME}_{i}.bin"
|
||||
output_sampler_file = os.path.join(output_dir, sampler_name)
|
||||
# Only save if we have our custom sampler
|
||||
from .data_loader import IterableDatasetShard, SeedableRandomSampler
|
||||
|
||||
if isinstance(dataloader.dataset, IterableDatasetShard):
|
||||
sampler = dataloader.sampler.sampler
|
||||
|
||||
if isinstance(sampler, SeedableRandomSampler):
|
||||
save(sampler, output_sampler_file, save_on_each_node=save_on_each_node)
|
||||
logger.info(f"Sampler state for dataloader {i} saved in {output_sampler_file}")
|
||||
|
||||
# GradScaler state
|
||||
if scaler is not None:
|
||||
state = scaler.state_dict()
|
||||
@ -118,6 +139,7 @@ def load_accelerator_state(
|
||||
models,
|
||||
optimizers,
|
||||
schedulers,
|
||||
dataloaders,
|
||||
process_index,
|
||||
scaler=None,
|
||||
map_location=None,
|
||||
@ -174,6 +196,19 @@ def load_accelerator_state(
|
||||
scheduler.load_state_dict(torch.load(input_scheduler_file))
|
||||
logger.info("All scheduler states loaded successfully")
|
||||
|
||||
for i, dataloader in enumerate(dataloaders):
|
||||
sampler_name = f"{SAMPLER_NAME}.bin" if i == 0 else f"{SAMPLER_NAME}_{i}.bin"
|
||||
input_sampler_file = os.path.join(input_dir, sampler_name)
|
||||
# Only load if we have our custom sampler
|
||||
from .data_loader import IterableDatasetShard, SeedableRandomSampler
|
||||
|
||||
if isinstance(dataloader.dataset, IterableDatasetShard):
|
||||
sampler = dataloader.sampler.sampler
|
||||
|
||||
if isinstance(sampler, SeedableRandomSampler):
|
||||
dataloader.sampler.sampler = torch.load(input_sampler_file)
|
||||
logger.info("All dataloader sampler states loaded successfully")
|
||||
|
||||
# GradScaler state
|
||||
if scaler is not None:
|
||||
input_scaler_file = os.path.join(input_dir, SCALER_NAME)
|
||||
@ -197,14 +232,14 @@ def load_accelerator_state(
|
||||
logger.info("Could not load random states")
|
||||
|
||||
|
||||
def save_custom_state(obj, path, index: int = 0):
|
||||
def save_custom_state(obj, path, index: int = 0, save_on_each_node: bool = False):
|
||||
"""
|
||||
Saves the state of `obj` to `{path}/custom_checkpoint_{index}.pkl`
|
||||
"""
|
||||
# Should this be the right way to get a qual_name type value from `obj`?
|
||||
save_location = Path(path) / f"custom_checkpoint_{index}.pkl"
|
||||
logger.info(f"Saving the state of {get_pretty_name(obj)} to {save_location}")
|
||||
torch.save(obj.state_dict(), save_location)
|
||||
save(obj.state_dict(), save_location, save_on_each_node=save_on_each_node)
|
||||
|
||||
|
||||
def load_custom_state(obj, path, index: int = 0):
|
||||
|
||||
@ -18,6 +18,7 @@ from argparse import ArgumentParser
|
||||
|
||||
from accelerate.commands.config import get_config_parser
|
||||
from accelerate.commands.env import env_command_parser
|
||||
from accelerate.commands.estimate import estimate_command_parser
|
||||
from accelerate.commands.launch import launch_command_parser
|
||||
from accelerate.commands.test import test_command_parser
|
||||
from accelerate.commands.tpu import tpu_command_parser
|
||||
@ -29,6 +30,7 @@ def main():
|
||||
|
||||
# Register commands
|
||||
get_config_parser(subparsers=subparsers)
|
||||
estimate_command_parser(subparsers=subparsers)
|
||||
env_command_parser(subparsers=subparsers)
|
||||
launch_command_parser(subparsers=subparsers)
|
||||
tpu_command_parser(subparsers=subparsers)
|
||||
|
||||
@ -21,6 +21,7 @@ from ...utils import (
|
||||
DistributedType,
|
||||
is_deepspeed_available,
|
||||
is_mps_available,
|
||||
is_npu_available,
|
||||
is_transformers_available,
|
||||
is_xpu_available,
|
||||
)
|
||||
@ -104,7 +105,7 @@ def get_cluster_input():
|
||||
|
||||
if distributed_type == DistributedType.NO:
|
||||
use_cpu = _ask_field(
|
||||
"Do you want to run your training on CPU only (even if a GPU / Apple Silicon device is available)? [yes/NO]:",
|
||||
"Do you want to run your training on CPU only (even if a GPU / Apple Silicon / Ascend NPU device is available)? [yes/NO]:",
|
||||
_convert_yes_no_to_bool,
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
@ -385,12 +386,21 @@ def get_cluster_input():
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
fsdp_config["fsdp_sync_module_states"] = _ask_field(
|
||||
"Do you want each individually wrapped FSDP unit to broadcast module parameters from rank 0 at the start? [YES/no]: ",
|
||||
fsdp_config["fsdp_cpu_ram_efficient_loading"] = _ask_field(
|
||||
"Do you want to enable CPU RAM efficient model loading? Only applicable for 🤗 Transformers models. [YES/no]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
default=True,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
if fsdp_config["fsdp_cpu_ram_efficient_loading"]:
|
||||
fsdp_config["fsdp_sync_module_states"] = True
|
||||
else:
|
||||
fsdp_config["fsdp_sync_module_states"] = _ask_field(
|
||||
"Do you want each individually wrapped FSDP unit to broadcast module parameters from rank 0 at the start? [YES/no]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
default=True,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
|
||||
megatron_lm_config = {}
|
||||
if distributed_type in [DistributedType.MULTI_GPU]:
|
||||
@ -507,8 +517,12 @@ def get_cluster_input():
|
||||
and not use_cpu
|
||||
and not use_mps
|
||||
):
|
||||
if is_npu_available():
|
||||
machine_type = "NPU(s)"
|
||||
else:
|
||||
machine_type = "GPU(s)"
|
||||
gpu_ids = _ask_field(
|
||||
"What GPU(s) (by id) should be used for training on this machine as a comma-seperated list? [all]:",
|
||||
f"What {machine_type} (by id) should be used for training on this machine as a comma-seperated list? [all]:",
|
||||
default="all",
|
||||
)
|
||||
|
||||
|
||||
@ -109,6 +109,13 @@ class BaseConfig:
|
||||
config_dict["use_cpu"] = False
|
||||
if "debug" not in config_dict:
|
||||
config_dict["debug"] = False
|
||||
extra_keys = sorted(set(config_dict.keys()) - set(cls.__dataclass_fields__.keys()))
|
||||
if len(extra_keys) > 0:
|
||||
raise ValueError(
|
||||
f"The config file at {json_file} had unknown keys ({extra_keys}), please try upgrading your `accelerate`"
|
||||
" version or fix (and potentially remove) these keys from your config file."
|
||||
)
|
||||
|
||||
return cls(**config_dict)
|
||||
|
||||
def to_json_file(self, json_file):
|
||||
@ -123,7 +130,6 @@ class BaseConfig:
|
||||
config_dict = yaml.safe_load(f)
|
||||
if "compute_environment" not in config_dict:
|
||||
config_dict["compute_environment"] = ComputeEnvironment.LOCAL_MACHINE
|
||||
|
||||
if "mixed_precision" not in config_dict:
|
||||
config_dict["mixed_precision"] = "fp16" if ("fp16" in config_dict and config_dict["fp16"]) else None
|
||||
if isinstance(config_dict["mixed_precision"], bool) and not config_dict["mixed_precision"]:
|
||||
@ -137,6 +143,12 @@ class BaseConfig:
|
||||
config_dict["use_cpu"] = False
|
||||
if "debug" not in config_dict:
|
||||
config_dict["debug"] = False
|
||||
extra_keys = sorted(set(config_dict.keys()) - set(cls.__dataclass_fields__.keys()))
|
||||
if len(extra_keys) > 0:
|
||||
raise ValueError(
|
||||
f"The config file at {yaml_file} had unknown keys ({extra_keys}), please try upgrading your `accelerate`"
|
||||
" version or fix (and potentially remove) these keys from your config file."
|
||||
)
|
||||
return cls(**config_dict)
|
||||
|
||||
def to_yaml_file(self, yaml_file):
|
||||
|
||||
@ -30,13 +30,15 @@ DYNAMO_BACKENDS = [
|
||||
"EAGER",
|
||||
"AOT_EAGER",
|
||||
"INDUCTOR",
|
||||
"NVFUSER",
|
||||
"AOT_NVFUSER",
|
||||
"AOT_CUDAGRAPHS",
|
||||
"AOT_TS_NVFUSER",
|
||||
"NVPRIMS_NVFUSER",
|
||||
"CUDAGRAPHS",
|
||||
"OFI",
|
||||
"FX2TRT",
|
||||
"ONNXRT",
|
||||
"TENSORRT",
|
||||
"IPEX",
|
||||
"TVM",
|
||||
]
|
||||
|
||||
|
||||
|
||||
@ -221,6 +221,15 @@ def get_sagemaker_input():
|
||||
ec2_instance_query += "? [ml.p3.2xlarge]:"
|
||||
ec2_instance_type = _ask_field(ec2_instance_query, lambda x: str(x).lower(), default="ml.p3.2xlarge")
|
||||
|
||||
debug = False
|
||||
if distributed_type != SageMakerDistributedType.NO:
|
||||
debug = _ask_field(
|
||||
"Should distributed operations be checked while running for errors? This can avoid timeout issues but will be slower. [yes/NO]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
|
||||
num_machines = 1
|
||||
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
|
||||
num_machines = _ask_field(
|
||||
@ -254,4 +263,5 @@ def get_sagemaker_input():
|
||||
num_machines=num_machines,
|
||||
sagemaker_inputs_file=sagemaker_inputs_file,
|
||||
sagemaker_metrics_file=sagemaker_metrics_file,
|
||||
debug=debug,
|
||||
)
|
||||
|
||||
270
src/accelerate/commands/estimate.py
Normal file
270
src/accelerate/commands/estimate.py
Normal file
@ -0,0 +1,270 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import argparse
|
||||
|
||||
from huggingface_hub import model_info
|
||||
from huggingface_hub.utils import GatedRepoError, RepositoryNotFoundError
|
||||
|
||||
from accelerate import init_empty_weights
|
||||
from accelerate.utils import (
|
||||
calculate_maximum_sizes,
|
||||
convert_bytes,
|
||||
is_timm_available,
|
||||
is_transformers_available,
|
||||
)
|
||||
|
||||
|
||||
if is_transformers_available():
|
||||
import transformers
|
||||
from transformers import AutoConfig, AutoModel
|
||||
|
||||
if is_timm_available():
|
||||
import timm
|
||||
|
||||
|
||||
def verify_on_hub(repo: str, token: str = None):
|
||||
"Verifies that the model is on the hub and returns the model info."
|
||||
try:
|
||||
return model_info(repo, token=token)
|
||||
except GatedRepoError:
|
||||
return "gated"
|
||||
except RepositoryNotFoundError:
|
||||
return "repo"
|
||||
|
||||
|
||||
def check_has_model(error):
|
||||
"""
|
||||
Checks what library spawned `error` when a model is not found
|
||||
"""
|
||||
if is_timm_available() and isinstance(error, RuntimeError) and "Unknown model" in error.args[0]:
|
||||
return "timm"
|
||||
elif (
|
||||
is_transformers_available()
|
||||
and isinstance(error, OSError)
|
||||
and "does not appear to have a file named" in error.args[0]
|
||||
):
|
||||
return "transformers"
|
||||
else:
|
||||
return "unknown"
|
||||
|
||||
|
||||
def create_empty_model(model_name: str, library_name: str, trust_remote_code: bool = False, access_token: str = None):
|
||||
"""
|
||||
Creates an empty model from its parent library on the `Hub` to calculate the overall memory consumption.
|
||||
|
||||
Args:
|
||||
model_name (`str`):
|
||||
The model name on the Hub
|
||||
library_name (`str`):
|
||||
The library the model has an integration with, such as `transformers`. Will be used if `model_name` has no
|
||||
metadata on the Hub to determine the library.
|
||||
trust_remote_code (`bool`, `optional`, defaults to `False`):
|
||||
Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
|
||||
should only be set to `True` for repositories you trust and in which you have read the code, as it will
|
||||
execute code present on the Hub on your local machine.
|
||||
access_token (`str`, `optional`, defaults to `None`):
|
||||
The access token to use to access private or gated models on the Hub. (for use on the Gradio app)
|
||||
|
||||
Returns:
|
||||
`torch.nn.Module`: The torch model that has been initialized on the `meta` device.
|
||||
|
||||
"""
|
||||
model_info = verify_on_hub(model_name, access_token)
|
||||
# Simplified errors
|
||||
if model_info == "gated":
|
||||
raise GatedRepoError(
|
||||
f"Repo for model `{model_name}` is gated. You must be authenticated to access it. Please run `huggingface-cli login`."
|
||||
)
|
||||
elif model_info == "repo":
|
||||
raise RepositoryNotFoundError(
|
||||
f"Repo for model `{model_name}` does not exist on the Hub. If you are trying to access a private repo,"
|
||||
" make sure you are authenticated via `huggingface-cli login` and have access."
|
||||
)
|
||||
if library_name is None:
|
||||
library_name = getattr(model_info, "library_name", False)
|
||||
if not library_name:
|
||||
raise ValueError(
|
||||
f"Model `{model_name}` does not have any library metadata on the Hub, please manually pass in a `--library_name` to use (such as `transformers`)"
|
||||
)
|
||||
if library_name == "transformers":
|
||||
if not is_transformers_available():
|
||||
raise ImportError(
|
||||
f"To check `{model_name}`, `transformers` must be installed. Please install it via `pip install transformers`"
|
||||
)
|
||||
print(f"Loading pretrained config for `{model_name}` from `transformers`...")
|
||||
|
||||
auto_map = model_info.config.get("auto_map", False)
|
||||
config = AutoConfig.from_pretrained(model_name, trust_remote_code=trust_remote_code)
|
||||
|
||||
with init_empty_weights():
|
||||
# remote code could specify a specific `AutoModel` class in the `auto_map`
|
||||
constructor = AutoModel
|
||||
if isinstance(auto_map, dict):
|
||||
value = None
|
||||
for key in auto_map.keys():
|
||||
if key.startswith("AutoModelFor"):
|
||||
value = key
|
||||
break
|
||||
if value is not None:
|
||||
constructor = getattr(transformers, value)
|
||||
model = constructor.from_config(config, trust_remote_code=trust_remote_code)
|
||||
elif library_name == "timm":
|
||||
if not is_timm_available():
|
||||
raise ImportError(
|
||||
f"To check `{model_name}`, `timm` must be installed. Please install it via `pip install timm`"
|
||||
)
|
||||
print(f"Loading pretrained config for `{model_name}` from `timm`...")
|
||||
with init_empty_weights():
|
||||
model = timm.create_model(model_name, pretrained=False)
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Library `{library_name}` is not supported yet, please open an issue on GitHub for us to add support."
|
||||
)
|
||||
return model
|
||||
|
||||
|
||||
def create_ascii_table(headers: list, rows: list, title: str):
|
||||
"Creates a pretty table from a list of rows, minimal version of `tabulate`."
|
||||
sep_char, in_between = "│", "─"
|
||||
column_widths = []
|
||||
for i in range(len(headers)):
|
||||
column_values = [row[i] for row in rows] + [headers[i]]
|
||||
max_column_width = max(len(value) for value in column_values)
|
||||
column_widths.append(max_column_width)
|
||||
|
||||
formats = [f"%{column_widths[i]}s" for i in range(len(rows[0]))]
|
||||
|
||||
pattern = f"{sep_char}{sep_char.join(formats)}{sep_char}"
|
||||
diff = 0
|
||||
|
||||
def make_row(left_char, middle_char, right_char):
|
||||
return f"{left_char}{middle_char.join([in_between * n for n in column_widths])}{in_between * diff}{right_char}"
|
||||
|
||||
separator = make_row("├", "┼", "┤")
|
||||
if len(title) > sum(column_widths):
|
||||
diff = abs(len(title) - len(separator))
|
||||
column_widths[-1] += diff
|
||||
|
||||
# Update with diff
|
||||
separator = make_row("├", "┼", "┤")
|
||||
initial_rows = [
|
||||
make_row("┌", in_between, "┐"),
|
||||
f"{sep_char}{title.center(len(separator) - 2)}{sep_char}",
|
||||
make_row("├", "┬", "┤"),
|
||||
]
|
||||
table = "\n".join(initial_rows) + "\n"
|
||||
column_widths[-1] += diff
|
||||
centered_line = [text.center(column_widths[i]) for i, text in enumerate(headers)]
|
||||
table += f"{pattern % tuple(centered_line)}\n{separator}\n"
|
||||
for i, line in enumerate(rows):
|
||||
centered_line = [t.center(column_widths[i]) for i, t in enumerate(line)]
|
||||
table += f"{pattern % tuple(centered_line)}\n"
|
||||
table += f'└{"┴".join([in_between * n for n in column_widths])}┘'
|
||||
|
||||
return table
|
||||
|
||||
|
||||
def estimate_command_parser(subparsers=None):
|
||||
if subparsers is not None:
|
||||
parser = subparsers.add_parser("estimate-memory")
|
||||
else:
|
||||
parser = argparse.ArgumentParser(description="Model size estimator for fitting a model onto CUDA memory.")
|
||||
|
||||
parser.add_argument("model_name", type=str, help="The model name on the Hugging Face Hub.")
|
||||
parser.add_argument(
|
||||
"--library_name",
|
||||
type=str,
|
||||
help="The library the model has an integration with, such as `transformers`, needed only if this information is not stored on the Hub.",
|
||||
choices=["timm", "transformers"],
|
||||
)
|
||||
parser.add_argument(
|
||||
"--dtypes",
|
||||
type=str,
|
||||
nargs="+",
|
||||
default=["float32", "float16", "int8", "int4"],
|
||||
help="The dtypes to use for the model, must be one (or many) of `float32`, `float16`, `int8`, and `int4`",
|
||||
choices=["float32", "float16", "int8", "int4"],
|
||||
)
|
||||
parser.add_argument(
|
||||
"--trust_remote_code",
|
||||
action="store_true",
|
||||
help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. This flag
|
||||
should only be used for repositories you trust and in which you have read the code, as it will execute
|
||||
code present on the Hub on your local machine.""",
|
||||
)
|
||||
|
||||
if subparsers is not None:
|
||||
parser.set_defaults(func=estimate_command)
|
||||
return parser
|
||||
|
||||
|
||||
def gather_data(args):
|
||||
"Creates an empty model and gathers the data for the sizes"
|
||||
try:
|
||||
model = create_empty_model(
|
||||
args.model_name, library_name=args.library_name, trust_remote_code=args.trust_remote_code
|
||||
)
|
||||
except (RuntimeError, OSError) as e:
|
||||
library = check_has_model(e)
|
||||
if library != "unknown":
|
||||
raise RuntimeError(
|
||||
f"Tried to load `{args.model_name}` with `{library}` but a possible model to load was not found inside the repo."
|
||||
)
|
||||
raise e
|
||||
|
||||
total_size, largest_layer = calculate_maximum_sizes(model)
|
||||
|
||||
data = []
|
||||
|
||||
for dtype in args.dtypes:
|
||||
dtype_total_size = total_size
|
||||
dtype_largest_layer = largest_layer[0]
|
||||
if dtype == "float16":
|
||||
dtype_total_size /= 2
|
||||
dtype_largest_layer /= 2
|
||||
elif dtype == "int8":
|
||||
dtype_total_size /= 4
|
||||
dtype_largest_layer /= 4
|
||||
elif dtype == "int4":
|
||||
dtype_total_size /= 8
|
||||
dtype_largest_layer /= 8
|
||||
dtype_training_size = dtype_total_size * 4
|
||||
data.append([dtype, dtype_largest_layer, dtype_total_size, dtype_training_size])
|
||||
return data
|
||||
|
||||
|
||||
def estimate_command(args):
|
||||
data = gather_data(args)
|
||||
for row in data:
|
||||
for i, item in enumerate(row):
|
||||
if isinstance(item, (int, float)):
|
||||
row[i] = convert_bytes(item)
|
||||
|
||||
headers = ["dtype", "Largest Layer", "Total Size", "Training using Adam"]
|
||||
|
||||
title = f"Memory Usage for loading `{args.model_name}`"
|
||||
table = create_ascii_table(headers, data, title)
|
||||
print(table)
|
||||
|
||||
|
||||
def main():
|
||||
parser = estimate_command_parser()
|
||||
args = parser.parse_args()
|
||||
estimate_command(args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -524,6 +524,14 @@ def launch_command_parser(subparsers=None):
|
||||
help="If True, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable paramteres."
|
||||
" (useful only when `use_fsdp` flag is passed).",
|
||||
)
|
||||
fsdp_args.add_argument(
|
||||
"--fsdp_cpu_ram_efficient_loading",
|
||||
default="true",
|
||||
type=str,
|
||||
help="If True, only the first process loads the pretrained model checkoint while all other processes have empty weights. "
|
||||
"Only applicable for 🤗 Transformers. When using this, `--fsdp_sync_module_states` needs to True. "
|
||||
"(useful only when `use_fsdp` flag is passed).",
|
||||
)
|
||||
fsdp_args.add_argument(
|
||||
"--fsdp_sync_module_states",
|
||||
default="true",
|
||||
|
||||
@ -17,7 +17,7 @@ from contextlib import suppress
|
||||
from typing import Callable, List, Optional, Union
|
||||
|
||||
import torch
|
||||
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
|
||||
from torch.utils.data import BatchSampler, DataLoader, IterableDataset, RandomSampler
|
||||
|
||||
from .logging import get_logger
|
||||
from .state import AcceleratorState, DistributedType, GradientState, is_tpu_available
|
||||
@ -64,6 +64,41 @@ for v, additional_kwargs in _PYTORCH_DATALOADER_ADDITIONAL_KWARGS.items():
|
||||
_PYTORCH_DATALOADER_KWARGS.update(additional_kwargs)
|
||||
|
||||
|
||||
class SeedableRandomSampler(RandomSampler):
|
||||
"""
|
||||
Same as a random sampler, except that in `__iter__` a seed can be used.
|
||||
|
||||
Needed specifically in distributed cases, when the random generator for each GPU needs to start from the same seed
|
||||
and be fully reproducable on multiple iterations.
|
||||
|
||||
If a custom `generator` is passed, it will rely on its initial seed as well as the current iteration it is on
|
||||
(stored in `self.epoch`).
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.epoch = 0
|
||||
|
||||
def __iter__(self):
|
||||
g = torch.Generator()
|
||||
if self.generator is not None:
|
||||
seed = self.epoch + self.generator.initial_seed()
|
||||
else:
|
||||
seed = self.epoch
|
||||
g.manual_seed(seed)
|
||||
n = len(self.data_source)
|
||||
# Taken 1:1 from torch.utils.data.sampler.RandomSampler.__iter__
|
||||
if self.replacement:
|
||||
for _ in range(self.num_samples // 32):
|
||||
yield from torch.randint(high=n, size=(32,), dtype=torch.int64, generator=g).tolist()
|
||||
else:
|
||||
yield from torch.randperm(n, generator=g).tolist()
|
||||
|
||||
def set_epoch(self, epoch: int):
|
||||
"Sets the current iteration of the sampler."
|
||||
self.epoch = epoch
|
||||
|
||||
|
||||
class BatchSamplerShard(BatchSampler):
|
||||
"""
|
||||
Wraps a PyTorch `BatchSampler` to generate batches for one of the processes only. Instances of this class will
|
||||
@ -271,7 +306,25 @@ class IterableDatasetShard(IterableDataset):
|
||||
self.process_index = process_index
|
||||
self.split_batches = split_batches
|
||||
|
||||
def set_epoch(self, epoch):
|
||||
self.epoch = epoch
|
||||
if hasattr(self.dataset, "set_epoch"):
|
||||
self.dataset.set_epoch(epoch)
|
||||
|
||||
def __len__(self):
|
||||
# We will just raise the downstream error if the underlying dataset is not sized
|
||||
if self.drop_last:
|
||||
return (len(self.dataset) // (self.batch_size * self.num_processes)) * self.batch_size
|
||||
else:
|
||||
return math.ceil(len(self.dataset) / (self.batch_size * self.num_processes)) * self.batch_size
|
||||
|
||||
def __iter__(self):
|
||||
if (
|
||||
not hasattr(self.dataset, "set_epoch")
|
||||
and hasattr(self.dataset, "generator")
|
||||
and isinstance(self.dataset.generator, torch.Generator)
|
||||
):
|
||||
self.dataset.generator.manual_seed(self.epoch)
|
||||
real_batch_size = self.batch_size if self.split_batches else (self.batch_size * self.num_processes)
|
||||
process_batch_size = (self.batch_size // self.num_processes) if self.split_batches else self.batch_size
|
||||
process_slice = range(self.process_index * process_batch_size, (self.process_index + 1) * process_batch_size)
|
||||
@ -324,8 +377,9 @@ class DataLoaderStateMixin:
|
||||
"Prepares the gradient state for the current dataloader"
|
||||
self.reset()
|
||||
with suppress(Exception):
|
||||
length = getattr(self.dataset, "total_dataset_length", len(self.dataset))
|
||||
self.remainder = length % self.total_batch_size
|
||||
if not self._drop_last:
|
||||
length = getattr(self.dataset, "total_dataset_length", len(self.dataset))
|
||||
self.remainder = length % self.total_batch_size
|
||||
self.gradient_state._add_dataloader(self)
|
||||
|
||||
def end(self):
|
||||
@ -352,7 +406,7 @@ class DataLoaderShard(DataLoader, DataLoaderStateMixin):
|
||||
- `"generator"`: an optional `torch.Generator`
|
||||
synchronized_generator (`torch.Generator`, *optional*):
|
||||
A random number generator to keep synchronized across processes.
|
||||
split_batches (`int`, *optional*, defaults to 0):
|
||||
skip_batches (`int`, *optional*, defaults to 0):
|
||||
The number of batches to skip at the beginning.
|
||||
kwargs:
|
||||
All other keyword arguments to pass to the regular `DataLoader` initialization.
|
||||
@ -366,18 +420,31 @@ class DataLoaderShard(DataLoader, DataLoaderStateMixin):
|
||||
- **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
|
||||
"""
|
||||
|
||||
def __init__(self, dataset, device=None, rng_types=None, synchronized_generator=None, skip_batches=0, **kwargs):
|
||||
def __init__(
|
||||
self,
|
||||
dataset,
|
||||
device=None,
|
||||
rng_types=None,
|
||||
synchronized_generator=None,
|
||||
skip_batches=0,
|
||||
_drop_last: bool = False,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(dataset, **kwargs)
|
||||
self.device = device
|
||||
self.rng_types = rng_types
|
||||
self.synchronized_generator = synchronized_generator
|
||||
self.skip_batches = skip_batches
|
||||
self.gradient_state = GradientState()
|
||||
self._drop_last = _drop_last
|
||||
self.iteration = 0
|
||||
|
||||
def __iter__(self):
|
||||
if self.rng_types is not None:
|
||||
synchronize_rng_states(self.rng_types, self.synchronized_generator)
|
||||
self.begin()
|
||||
|
||||
self.set_epoch(self.iteration)
|
||||
dataloader_iter = super().__iter__()
|
||||
# We iterate one batch ahead to check when we are at the end
|
||||
try:
|
||||
@ -401,8 +468,21 @@ class DataLoaderShard(DataLoader, DataLoaderStateMixin):
|
||||
if batch_index >= self.skip_batches:
|
||||
yield current_batch
|
||||
break
|
||||
|
||||
self.iteration += 1
|
||||
self.end()
|
||||
|
||||
def set_epoch(self, epoch: int):
|
||||
# In case it is manually passed in, the user can set it to what they like
|
||||
if self.iteration != epoch:
|
||||
self.iteration = epoch
|
||||
if hasattr(self.batch_sampler, "sampler") and hasattr(self.batch_sampler.sampler, "set_epoch"):
|
||||
self.batch_sampler.sampler.set_epoch(epoch)
|
||||
# We support if a custom `Dataset` implementation has `set_epoch`
|
||||
# or in general HF datasets `Datasets`
|
||||
elif hasattr(self.dataset, "set_epoch"):
|
||||
self.dataset.set_epoch(epoch)
|
||||
|
||||
@property
|
||||
def total_batch_size(self):
|
||||
batch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler
|
||||
@ -506,6 +586,7 @@ class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
|
||||
self.skip_batches = skip_batches
|
||||
|
||||
self.slice_fn = slice_tensors if slice_fn is None else slice_fn
|
||||
self.iteration = 0
|
||||
|
||||
def _fetch_batches(self, iterator):
|
||||
batches, batch = None, None
|
||||
@ -546,6 +627,7 @@ class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
|
||||
|
||||
def __iter__(self):
|
||||
self.begin()
|
||||
self.set_epoch(self.iteration)
|
||||
main_iterator = None
|
||||
if is_torch_version(">=", "2.0.1"):
|
||||
# NOTE PyTorch DataLoader adds forward compatibilities for DataPipes, which broadcasts
|
||||
@ -615,8 +697,18 @@ class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
|
||||
if batch_index >= self.skip_batches:
|
||||
yield batch
|
||||
batch_index += 1
|
||||
self.iteration += 1
|
||||
self.end()
|
||||
|
||||
def set_epoch(self, epoch: int):
|
||||
# In case it is manually passed in, the user can set it to what they like
|
||||
if self.iteration != epoch:
|
||||
self.iteration = epoch
|
||||
if hasattr(self.batch_sampler.sampler, "set_epoch"):
|
||||
self.batch_sampler.sampler.set_epoch(epoch)
|
||||
elif hasattr(self.dataset, "set_epoch"):
|
||||
self.dataset.set_epoch(epoch)
|
||||
|
||||
def __len__(self):
|
||||
whole_length = super().__len__()
|
||||
if self.split_batches:
|
||||
@ -739,6 +831,23 @@ def prepare_data_loader(
|
||||
new_batch_sampler = dataloader.batch_sampler if not isinstance(new_dataset, IterableDataset) else None
|
||||
sampler_is_batch_sampler = False
|
||||
synchronized_generator = None
|
||||
sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler)
|
||||
if sampler_is_batch_sampler:
|
||||
sampler = getattr(dataloader.sampler, "sampler", None)
|
||||
else:
|
||||
sampler = getattr(dataloader.batch_sampler, "sampler", None)
|
||||
if isinstance(sampler, RandomSampler) and num_processes > 1:
|
||||
# When iterating through the dataloader during distributed processes
|
||||
# we want to ensure that on each process we are iterating through the same
|
||||
# samples in the same order if a seed is set. This requires a tweak
|
||||
# to the `torch.utils.data.RandomSampler` class (if used).
|
||||
sampler = SeedableRandomSampler(
|
||||
data_source=sampler.data_source,
|
||||
replacement=sampler.replacement,
|
||||
num_samples=sampler._num_samples,
|
||||
generator=getattr(sampler, "generator", torch.Generator()),
|
||||
)
|
||||
|
||||
# No change if no multiprocess
|
||||
if (num_processes != 1 or state.distributed_type == DistributedType.MEGATRON_LM) and not dispatch_batches:
|
||||
if isinstance(new_dataset, IterableDataset):
|
||||
@ -753,17 +862,6 @@ def prepare_data_loader(
|
||||
split_batches=split_batches,
|
||||
)
|
||||
else:
|
||||
# New batch sampler for the current process.
|
||||
sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler)
|
||||
if sampler_is_batch_sampler:
|
||||
sampler = dataloader.sampler.sampler
|
||||
else:
|
||||
sampler = dataloader.batch_sampler.sampler
|
||||
if hasattr(sampler, "generator"):
|
||||
if sampler.generator is None:
|
||||
sampler.generator = torch.Generator()
|
||||
synchronized_generator = sampler.generator
|
||||
|
||||
batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler
|
||||
new_batch_sampler = BatchSamplerShard(
|
||||
batch_sampler,
|
||||
@ -797,7 +895,11 @@ def prepare_data_loader(
|
||||
kwargs["batch_size"] = (
|
||||
dataloader.batch_size // num_processes if split_batches and not dispatch_batches else dataloader.batch_size
|
||||
)
|
||||
|
||||
if isinstance(sampler, SeedableRandomSampler):
|
||||
if sampler_is_batch_sampler:
|
||||
dataloader.sampler.sampler = sampler
|
||||
else:
|
||||
dataloader.batch_sampler.sampler = sampler
|
||||
if dispatch_batches:
|
||||
kwargs.pop("generator")
|
||||
dataloader = DataLoaderDispatcher(
|
||||
@ -815,6 +917,7 @@ def prepare_data_loader(
|
||||
sampler=new_batch_sampler,
|
||||
batch_size=dataloader.batch_size,
|
||||
rng_types=rng_types,
|
||||
_drop_last=dataloader.drop_last,
|
||||
synchronized_generator=synchronized_generator,
|
||||
**kwargs,
|
||||
)
|
||||
@ -825,6 +928,7 @@ def prepare_data_loader(
|
||||
batch_sampler=new_batch_sampler,
|
||||
rng_types=rng_types,
|
||||
synchronized_generator=synchronized_generator,
|
||||
_drop_last=dataloader.drop_last,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
@ -155,17 +155,17 @@ def add_hook_to_module(module: nn.Module, hook: ModelHook, append: bool = False)
|
||||
module = hook.init_hook(module)
|
||||
module._hf_hook = hook
|
||||
|
||||
@functools.wraps(old_forward)
|
||||
def new_forward(*args, **kwargs):
|
||||
def new_forward(module, *args, **kwargs):
|
||||
args, kwargs = module._hf_hook.pre_forward(module, *args, **kwargs)
|
||||
if module._hf_hook.no_grad:
|
||||
with torch.no_grad():
|
||||
output = old_forward(*args, **kwargs)
|
||||
output = module._old_forward(*args, **kwargs)
|
||||
else:
|
||||
output = old_forward(*args, **kwargs)
|
||||
output = module._old_forward(*args, **kwargs)
|
||||
return module._hf_hook.post_forward(module, output)
|
||||
|
||||
module.forward = new_forward
|
||||
module.forward = functools.update_wrapper(functools.partial(new_forward, module), old_forward)
|
||||
|
||||
return module
|
||||
|
||||
|
||||
@ -311,6 +311,7 @@ class AlignDevicesHook(ModelHook):
|
||||
for name, device in self.original_devices.items():
|
||||
if device != torch.device("meta"):
|
||||
set_module_tensor_to_device(module, name, device, value=self.weights_map.get(name, None))
|
||||
return module
|
||||
|
||||
|
||||
def attach_execution_device_hook(
|
||||
|
||||
@ -27,10 +27,19 @@ def test_launch():
|
||||
_ = PartialState()
|
||||
|
||||
|
||||
def notebook_launcher(function, args=(), num_processes=None, mixed_precision="no", use_port="29500"):
|
||||
def notebook_launcher(
|
||||
function,
|
||||
args=(),
|
||||
num_processes=None,
|
||||
mixed_precision="no",
|
||||
use_port="29500",
|
||||
master_addr="127.0.0.1",
|
||||
node_rank=0,
|
||||
num_nodes=1,
|
||||
):
|
||||
"""
|
||||
Launches a training function, using several processes if it's possible in the current environment (TPU with
|
||||
multiple cores for instance).
|
||||
Launches a training function, using several processes or multiple nodes if it's possible in the current environment
|
||||
(TPU with multiple cores for instance).
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
@ -55,6 +64,12 @@ def notebook_launcher(function, args=(), num_processes=None, mixed_precision="no
|
||||
If `fp16` or `bf16`, will use mixed precision training on multi-GPU.
|
||||
use_port (`str`, *optional*, defaults to `"29500"`):
|
||||
The port to use to communicate between processes when launching a multi-GPU training.
|
||||
master_addr (`str`, *optional*, defaults to `"127.0.0.1"`):
|
||||
The address to use for communication between processes.
|
||||
node_rank (`int`, *optional*, defaults to 0):
|
||||
The rank of the current node.
|
||||
num_nodes (`int`, *optional*, defaults to 1):
|
||||
The number of nodes to use for training.
|
||||
|
||||
Example:
|
||||
|
||||
@ -114,7 +129,8 @@ def notebook_launcher(function, args=(), num_processes=None, mixed_precision="no
|
||||
raise ValueError(
|
||||
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call."
|
||||
)
|
||||
|
||||
if node_rank >= num_nodes:
|
||||
raise ValueError("The node_rank must be less than the number of nodes.")
|
||||
if num_processes > 1:
|
||||
# Multi-GPU launch
|
||||
from torch.multiprocessing import start_processes
|
||||
@ -129,7 +145,12 @@ def notebook_launcher(function, args=(), num_processes=None, mixed_precision="no
|
||||
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
|
||||
# process here (the other ones will be set be the launcher).
|
||||
with patch_environment(
|
||||
world_size=num_processes, master_addr="127.0.01", master_port=use_port, mixed_precision=mixed_precision
|
||||
nproc=num_processes,
|
||||
node_rank=node_rank,
|
||||
world_size=num_nodes * num_processes,
|
||||
master_addr=master_addr,
|
||||
master_port=use_port,
|
||||
mixed_precision=mixed_precision,
|
||||
):
|
||||
# First dummy launch
|
||||
if os.environ.get("ACCELERATE_DEBUG_MODE", "false").lower() == "true":
|
||||
|
||||
@ -85,9 +85,11 @@ def get_logger(name: str, log_level: str = None):
|
||||
|
||||
```python
|
||||
>>> from accelerate.logging import get_logger
|
||||
>>> from accelerate import Accelerator
|
||||
|
||||
>>> logger = get_logger(__name__)
|
||||
|
||||
>>> accelerator = Accelerator()
|
||||
>>> logger.info("My log", main_process_only=False)
|
||||
>>> logger.debug("My log", main_process_only=True)
|
||||
|
||||
@ -95,9 +97,6 @@ def get_logger(name: str, log_level: str = None):
|
||||
>>> logger.info("My log")
|
||||
>>> logger.debug("My second log")
|
||||
|
||||
>>> from accelerate import Accelerator
|
||||
|
||||
>>> accelerator = Accelerator()
|
||||
>>> array = ["a", "b", "c", "d"]
|
||||
>>> letter_at_rank = array[accelerator.process_index]
|
||||
>>> logger.info(letter_at_rank, in_order=True)
|
||||
|
||||
@ -48,6 +48,10 @@ if is_tpu_available(check_device=False):
|
||||
import torch_xla.core.xla_model as xm
|
||||
|
||||
|
||||
if is_npu_available(check_device=False):
|
||||
import torch_npu # noqa: F401
|
||||
|
||||
|
||||
def is_initialized() -> bool:
|
||||
"""
|
||||
Checks if the `AcceleratorState` has been initialized from `Accelerator`. Same as `AcceleratorState.initialized`,
|
||||
@ -168,7 +172,11 @@ class PartialState:
|
||||
|
||||
# DeepSpeed always uses nccl
|
||||
kwargs.pop("backend", None)
|
||||
self.backend = "nccl"
|
||||
if is_xpu_available and is_ccl_available():
|
||||
# Set DeepSpeed backend to ccl for xpu
|
||||
self.backend = "ccl"
|
||||
else:
|
||||
self.backend = "nccl"
|
||||
dist.init_distributed(dist_backend=self.backend, auto_mpi_discovery=False, **kwargs)
|
||||
|
||||
self.num_processes = torch.distributed.get_world_size()
|
||||
|
||||
@ -149,6 +149,39 @@ def test_mrpc(dispatch_batches: bool = False, split_batches: bool = False):
|
||||
), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
|
||||
|
||||
|
||||
def test_gather_for_metrics_with_non_tensor_objects_iterable_dataset():
|
||||
class DummyIterableDataset(IterableDataset):
|
||||
def __init__(self, data):
|
||||
self.data = data
|
||||
|
||||
def __len__(self):
|
||||
return len(self.data)
|
||||
|
||||
def __iter__(self):
|
||||
for element in self.data:
|
||||
yield element
|
||||
|
||||
iterable_dataset = DummyIterableDataset([n for n in range(30)])
|
||||
dataloader = DataLoader(iterable_dataset, batch_size=4)
|
||||
accelerator = Accelerator()
|
||||
prepared_dataloader = accelerator.prepare(dataloader)
|
||||
|
||||
if accelerator.is_main_process:
|
||||
logger = logging.root.manager.loggerDict["accelerate.accelerator"]
|
||||
list_handler = ListHandler()
|
||||
logger.addHandler(list_handler)
|
||||
|
||||
batches_for_metrics = []
|
||||
for batch in prepared_dataloader:
|
||||
batches_for_metrics.append(accelerator.gather_for_metrics(batch))
|
||||
|
||||
assert torch.cat(batches_for_metrics).size(0) == 30
|
||||
|
||||
if accelerator.is_main_process:
|
||||
assert len(list_handler.logs) == 0
|
||||
logger.removeHandler(list_handler)
|
||||
|
||||
|
||||
def test_gather_for_metrics_with_iterable_dataset():
|
||||
class DummyIterableDataset(IterableDataset):
|
||||
def __init__(self, data):
|
||||
@ -186,6 +219,25 @@ def test_gather_for_metrics_with_iterable_dataset():
|
||||
logger.removeHandler(list_handler)
|
||||
|
||||
|
||||
def test_gather_for_metrics_drop_last():
|
||||
accelerator = Accelerator()
|
||||
per_device_batch_size = 5
|
||||
num_items = (10 * accelerator.num_processes) + 1
|
||||
dataloader = DataLoader(range(num_items), batch_size=per_device_batch_size, drop_last=True)
|
||||
dataloader = accelerator.prepare(dataloader)
|
||||
|
||||
iterator = iter(dataloader)
|
||||
next(iterator) # Skip first batch tensor([0, 1, 2, 3, 4], device='cuda:0')
|
||||
batch = next(iterator)
|
||||
gathered_items = accelerator.gather_for_metrics(batch)
|
||||
|
||||
# Should return a full set of complete batches from each GPU
|
||||
num_expected_items = per_device_batch_size * accelerator.num_processes
|
||||
assert gathered_items.size(0) == (
|
||||
num_expected_items
|
||||
), f"Expected number of items: {num_expected_items}, Actual: {gathered_items.size(0)}"
|
||||
|
||||
|
||||
def main():
|
||||
accelerator = Accelerator(split_batches=False, dispatch_batches=False)
|
||||
if accelerator.is_local_main_process:
|
||||
@ -206,6 +258,8 @@ def main():
|
||||
accelerator.state._reset_state()
|
||||
print("test_gather_for_metrics_with_iterable_dataset")
|
||||
test_gather_for_metrics_with_iterable_dataset()
|
||||
print("test gather_for_metrics_with_non_tensor_objects_iterable_dataset")
|
||||
test_gather_for_metrics_with_non_tensor_objects_iterable_dataset()
|
||||
if accelerator.is_local_main_process:
|
||||
print("**Test torch metrics**")
|
||||
for split_batches in [True, False]:
|
||||
@ -220,6 +274,10 @@ def main():
|
||||
accelerator = Accelerator()
|
||||
test_torch_metrics(accelerator, 512)
|
||||
accelerator.state._reset_state()
|
||||
if accelerator.is_local_main_process:
|
||||
print("**Test that `drop_last` is taken into account**")
|
||||
test_gather_for_metrics_drop_last()
|
||||
accelerator.state._reset_state()
|
||||
|
||||
|
||||
def _mp_fn(index):
|
||||
|
||||
@ -21,11 +21,12 @@ import time
|
||||
from copy import deepcopy
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from torch.utils.data import DataLoader
|
||||
from torch.utils.data import DataLoader, Dataset
|
||||
|
||||
from accelerate import Accelerator
|
||||
from accelerate.data_loader import prepare_data_loader
|
||||
from accelerate.data_loader import SeedableRandomSampler, prepare_data_loader
|
||||
from accelerate.state import AcceleratorState
|
||||
from accelerate.test_utils import RegressionDataset, are_the_same_tensors
|
||||
from accelerate.utils import (
|
||||
@ -288,11 +289,67 @@ def central_dl_preparation_check():
|
||||
print("Shuffled central dataloader passing.")
|
||||
|
||||
|
||||
def custom_sampler_check():
|
||||
state = AcceleratorState()
|
||||
|
||||
class CustomDataset(Dataset):
|
||||
def __init__(self, data):
|
||||
self.data = data
|
||||
|
||||
def __len__(self):
|
||||
return len(self.data)
|
||||
|
||||
def __getitem__(self, index):
|
||||
return self.data[index]
|
||||
|
||||
class CustomBatchSampler:
|
||||
def __init__(self, dataset_length: int, batch_size: int, shuffle: bool = True):
|
||||
self.batch_size = batch_size
|
||||
self.data_index = np.arange(dataset_length)
|
||||
self.shuffle = shuffle
|
||||
|
||||
def __iter__(self):
|
||||
num_batches = len(self)
|
||||
if self.shuffle:
|
||||
index = np.random.permutation(self.data_index)
|
||||
else:
|
||||
index = self.data_index
|
||||
output = np.array_split(index, num_batches)
|
||||
yield from output
|
||||
|
||||
def __len__(self):
|
||||
return math.ceil(len(self.data_index) / self.batch_size)
|
||||
|
||||
dataset = CustomDataset(range(32 * state.num_processes))
|
||||
sampler = CustomBatchSampler(len(dataset), batch_size=8)
|
||||
dl = DataLoader(dataset, batch_sampler=sampler)
|
||||
dl = prepare_data_loader(dl, state.device, state.num_processes, state.process_index)
|
||||
# We need just ensure that `dl.batch_sampler` (or `dl.batch_sampler.batch_sampler` is indeed the old batch sampler
|
||||
if hasattr(dl.batch_sampler, "batch_sampler"):
|
||||
assert isinstance(
|
||||
dl.batch_sampler.batch_sampler, CustomBatchSampler
|
||||
), "Custom sampler was changed after calling `prepare_data_loader`"
|
||||
else:
|
||||
assert isinstance(
|
||||
dl.batch_sampler, CustomBatchSampler
|
||||
), "Custom sampler was changed after calling `prepare_data_loader`"
|
||||
|
||||
|
||||
def mock_training(length, batch_size, generator):
|
||||
set_seed(42)
|
||||
generator.manual_seed(42)
|
||||
train_set = RegressionDataset(length=length, seed=42)
|
||||
train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)
|
||||
if AcceleratorState().num_processes > 1:
|
||||
# The SeedableRandomSampler is needed during distributed setups
|
||||
# for full reproducability across processes with the `DataLoader`
|
||||
sampler = SeedableRandomSampler(
|
||||
generator=generator,
|
||||
data_source=train_set,
|
||||
num_samples=len(train_set),
|
||||
)
|
||||
train_dl = DataLoader(train_set, batch_size=batch_size, sampler=sampler)
|
||||
else:
|
||||
train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)
|
||||
model = RegressionModel()
|
||||
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
|
||||
for epoch in range(3):
|
||||
@ -541,6 +598,23 @@ def test_split_between_processes_tensor():
|
||||
state.wait_for_everyone()
|
||||
|
||||
|
||||
def test_trigger():
|
||||
accelerator = Accelerator()
|
||||
# should start with being false
|
||||
assert accelerator.check_trigger() is False
|
||||
|
||||
# set a breakpoint on the main process
|
||||
if accelerator.is_main_process:
|
||||
accelerator.set_trigger()
|
||||
|
||||
# check it's been activated across all processes
|
||||
# calls `all_reduce` and triggers a sync
|
||||
assert accelerator.check_trigger() is True
|
||||
|
||||
# check it's been reset after the sync
|
||||
assert accelerator.check_trigger() is False
|
||||
|
||||
|
||||
def main():
|
||||
accelerator = Accelerator()
|
||||
state = accelerator.state
|
||||
@ -581,6 +655,7 @@ def main():
|
||||
dl_preparation_check()
|
||||
if state.distributed_type != DistributedType.TPU:
|
||||
central_dl_preparation_check()
|
||||
custom_sampler_check()
|
||||
|
||||
# Trainings are not exactly the same in DeepSpeed and CPU mode
|
||||
if state.distributed_type == DistributedType.DEEPSPEED:
|
||||
@ -590,6 +665,10 @@ def main():
|
||||
print("\n**Training integration test**")
|
||||
training_check()
|
||||
|
||||
if state.local_process_index == 0:
|
||||
print("\n**Breakpoint trigger test**")
|
||||
test_trigger()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@ -20,7 +20,6 @@ import sys
|
||||
import tempfile
|
||||
import unittest
|
||||
from contextlib import contextmanager
|
||||
from distutils.util import strtobool
|
||||
from functools import partial
|
||||
from pathlib import Path
|
||||
from typing import List, Union
|
||||
@ -38,11 +37,13 @@ from ..utils import (
|
||||
is_mps_available,
|
||||
is_safetensors_available,
|
||||
is_tensorboard_available,
|
||||
is_timm_available,
|
||||
is_torch_version,
|
||||
is_tpu_available,
|
||||
is_transformers_available,
|
||||
is_wandb_available,
|
||||
is_xpu_available,
|
||||
str_to_bool,
|
||||
)
|
||||
|
||||
|
||||
@ -55,7 +56,7 @@ def parse_flag_from_env(key, default=False):
|
||||
else:
|
||||
# KEY is set, convert it to True or False.
|
||||
try:
|
||||
_value = strtobool(value)
|
||||
_value = str_to_bool(value)
|
||||
except ValueError:
|
||||
# More values are supported, but let's keep the message simple.
|
||||
raise ValueError(f"If set, {key} must be yes or no.")
|
||||
@ -116,6 +117,20 @@ def require_huggingface_suite(test_case):
|
||||
)(test_case)
|
||||
|
||||
|
||||
def require_transformers(test_case):
|
||||
"""
|
||||
Decorator marking a test that requires transformers. These tests are skipped when they are not.
|
||||
"""
|
||||
return unittest.skipUnless(is_transformers_available(), "test requires the transformers library")(test_case)
|
||||
|
||||
|
||||
def require_timm(test_case):
|
||||
"""
|
||||
Decorator marking a test that requires transformers. These tests are skipped when they are not.
|
||||
"""
|
||||
return unittest.skipUnless(is_timm_available(), "test requires the timm library")(test_case)
|
||||
|
||||
|
||||
def require_bnb(test_case):
|
||||
"""
|
||||
Decorator marking a test that requires bitsandbytes. These tests are skipped when they are not.
|
||||
|
||||
@ -39,31 +39,18 @@ from .utils import (
|
||||
_available_trackers = []
|
||||
|
||||
if is_tensorboard_available():
|
||||
try:
|
||||
from torch.utils import tensorboard
|
||||
except ModuleNotFoundError:
|
||||
import tensorboardX as tensorboard
|
||||
|
||||
_available_trackers.append(LoggerType.TENSORBOARD)
|
||||
|
||||
if is_wandb_available():
|
||||
import wandb
|
||||
|
||||
_available_trackers.append(LoggerType.WANDB)
|
||||
|
||||
if is_comet_ml_available():
|
||||
from comet_ml import Experiment
|
||||
|
||||
_available_trackers.append(LoggerType.COMETML)
|
||||
|
||||
if is_aim_available():
|
||||
from aim import Run
|
||||
|
||||
_available_trackers.append(LoggerType.AIM)
|
||||
|
||||
if is_mlflow_available():
|
||||
import mlflow
|
||||
|
||||
_available_trackers.append(LoggerType.MLFLOW)
|
||||
|
||||
logger = get_logger(__name__)
|
||||
@ -185,6 +172,10 @@ class TensorBoardTracker(GeneralTracker):
|
||||
|
||||
@on_main_process
|
||||
def __init__(self, run_name: str, logging_dir: Union[str, os.PathLike], **kwargs):
|
||||
try:
|
||||
from torch.utils import tensorboard
|
||||
except ModuleNotFoundError:
|
||||
import tensorboardX as tensorboard
|
||||
super().__init__()
|
||||
self.run_name = run_name
|
||||
self.logging_dir = os.path.join(logging_dir, run_name)
|
||||
@ -293,6 +284,9 @@ class WandBTracker(GeneralTracker):
|
||||
def __init__(self, run_name: str, **kwargs):
|
||||
super().__init__()
|
||||
self.run_name = run_name
|
||||
|
||||
import wandb
|
||||
|
||||
self.run = wandb.init(project=self.run_name, **kwargs)
|
||||
logger.debug(f"Initialized WandB project {self.run_name}")
|
||||
logger.debug(
|
||||
@ -313,6 +307,8 @@ class WandBTracker(GeneralTracker):
|
||||
Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,
|
||||
`str`, `float`, `int`, or `None`.
|
||||
"""
|
||||
import wandb
|
||||
|
||||
wandb.config.update(values, allow_val_change=True)
|
||||
logger.debug("Stored initial configuration hyperparameters to WandB")
|
||||
|
||||
@ -346,6 +342,8 @@ class WandBTracker(GeneralTracker):
|
||||
kwargs:
|
||||
Additional key word arguments passed along to the `wandb.log` method.
|
||||
"""
|
||||
import wandb
|
||||
|
||||
for k, v in values.items():
|
||||
self.log({k: [wandb.Image(image) for image in v]}, step=step, **kwargs)
|
||||
logger.debug("Successfully logged images to WandB")
|
||||
@ -376,6 +374,7 @@ class WandBTracker(GeneralTracker):
|
||||
step (`int`, *optional*):
|
||||
The run step. If included, the log will be affiliated with this step.
|
||||
"""
|
||||
import wandb
|
||||
|
||||
values = {table_name: wandb.Table(columns=columns, data=data, dataframe=dataframe)}
|
||||
self.log(values, step=step, **kwargs)
|
||||
@ -409,6 +408,9 @@ class CometMLTracker(GeneralTracker):
|
||||
def __init__(self, run_name: str, **kwargs):
|
||||
super().__init__()
|
||||
self.run_name = run_name
|
||||
|
||||
from comet_ml import Experiment
|
||||
|
||||
self.writer = Experiment(project_name=run_name, **kwargs)
|
||||
logger.debug(f"Initialized CometML project {self.run_name}")
|
||||
logger.debug(
|
||||
@ -484,6 +486,9 @@ class AimTracker(GeneralTracker):
|
||||
@on_main_process
|
||||
def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = ".", **kwargs):
|
||||
self.run_name = run_name
|
||||
|
||||
from aim import Run
|
||||
|
||||
self.writer = Run(repo=logging_dir, **kwargs)
|
||||
self.writer.name = self.run_name
|
||||
logger.debug(f"Initialized Aim project {self.run_name}")
|
||||
@ -581,6 +586,8 @@ class MLflowTracker(GeneralTracker):
|
||||
|
||||
nested_run = os.getenv("MLFLOW_NESTED_RUN", nested_run)
|
||||
|
||||
import mlflow
|
||||
|
||||
exps = mlflow.search_experiments(filter_string=f"name = '{experiment_name}'")
|
||||
if len(exps) > 0:
|
||||
if len(exps) > 1:
|
||||
@ -620,6 +627,7 @@ class MLflowTracker(GeneralTracker):
|
||||
values (`dict`):
|
||||
Values to be stored as initial hyperparameters as key-value pairs.
|
||||
"""
|
||||
import mlflow
|
||||
|
||||
for name, value in list(values.items()):
|
||||
# internally, all values are converted to str in MLflow
|
||||
@ -658,6 +666,7 @@ class MLflowTracker(GeneralTracker):
|
||||
f'MLflowTracker is attempting to log a value of "{v}" of type {type(v)} for key "{k}" as a metric. '
|
||||
"MLflow's log_metric() only accepts float and int types so we dropped this attribute."
|
||||
)
|
||||
import mlflow
|
||||
|
||||
mlflow.log_metrics(metrics, step=step)
|
||||
logger.debug("Successfully logged to mlflow")
|
||||
@ -667,6 +676,8 @@ class MLflowTracker(GeneralTracker):
|
||||
"""
|
||||
End the active MLflow run.
|
||||
"""
|
||||
import mlflow
|
||||
|
||||
mlflow.end_run()
|
||||
|
||||
|
||||
|
||||
@ -4,6 +4,7 @@ from .constants import (
|
||||
RNG_STATE_NAME,
|
||||
SAFE_WEIGHTS_INDEX_NAME,
|
||||
SAFE_WEIGHTS_NAME,
|
||||
SAMPLER_NAME,
|
||||
SCALER_NAME,
|
||||
SCHEDULER_NAME,
|
||||
TORCH_DISTRIBUTED_OPERATION_TYPES,
|
||||
@ -35,7 +36,7 @@ from .dataclasses import (
|
||||
TensorInformation,
|
||||
TorchDynamoPlugin,
|
||||
)
|
||||
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
|
||||
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env, str_to_bool
|
||||
from .imports import (
|
||||
get_ccl_version,
|
||||
is_4bit_bnb_available,
|
||||
@ -59,12 +60,14 @@ from .imports import (
|
||||
is_safetensors_available,
|
||||
is_sagemaker_available,
|
||||
is_tensorboard_available,
|
||||
is_timm_available,
|
||||
is_tpu_available,
|
||||
is_transformers_available,
|
||||
is_wandb_available,
|
||||
is_xpu_available,
|
||||
)
|
||||
from .modeling import (
|
||||
calculate_maximum_sizes,
|
||||
check_device_map,
|
||||
check_tied_parameters_in_config,
|
||||
check_tied_parameters_on_same_device,
|
||||
@ -162,7 +165,9 @@ from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
|
||||
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
|
||||
from .memory import find_executable_batch_size, release_memory
|
||||
from .other import (
|
||||
check_os_kernel,
|
||||
clear_environment,
|
||||
convert_bytes,
|
||||
extract_model_from_parallel,
|
||||
get_pretty_name,
|
||||
is_port_in_use,
|
||||
|
||||
@ -20,6 +20,7 @@ MODEL_NAME = "pytorch_model"
|
||||
RNG_STATE_NAME = "random_states"
|
||||
OPTIMIZER_NAME = "optimizer"
|
||||
SCHEDULER_NAME = "scheduler"
|
||||
SAMPLER_NAME = "sampler"
|
||||
WEIGHTS_NAME = "pytorch_model.bin"
|
||||
WEIGHTS_INDEX_NAME = "pytorch_model.bin.index.json"
|
||||
SAFE_WEIGHTS_NAME = "model.safetensors"
|
||||
@ -33,7 +34,7 @@ FSDP_AUTO_WRAP_POLICY = ["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"]
|
||||
FSDP_BACKWARD_PREFETCH = ["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"]
|
||||
FSDP_STATE_DICT_TYPE = ["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"]
|
||||
FSDP_PYTORCH_VERSION = "2.0.1"
|
||||
DEEPSPEED_MULTINODE_LAUNCHERS = ["pdsh", "standard", "openmpi", "mvapich"]
|
||||
DEEPSPEED_MULTINODE_LAUNCHERS = ["pdsh", "standard", "openmpi", "mvapich", "mpich"]
|
||||
TORCH_DYNAMO_MODES = ["default", "reduce-overhead", "max-autotune"]
|
||||
|
||||
STR_OPERATION_TO_FUNC = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt}
|
||||
|
||||
@ -26,12 +26,14 @@ import warnings
|
||||
from contextlib import contextmanager
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import timedelta
|
||||
from distutils.util import strtobool
|
||||
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
|
||||
|
||||
import torch
|
||||
|
||||
from .constants import FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_STATE_DICT_TYPE
|
||||
from .environment import str_to_bool
|
||||
from .imports import is_xpu_available
|
||||
from .versions import compare_versions
|
||||
|
||||
|
||||
class KwargsHandler:
|
||||
@ -199,6 +201,29 @@ class FP8RecipeKwargs(KwargsHandler):
|
||||
raise ValueError("`amax_compute_algo` must be 'max' or 'most_recent'")
|
||||
|
||||
|
||||
class EnumWithContains(enum.EnumMeta):
|
||||
"A metaclass that adds the ability to check if `self` contains an item with the `in` operator"
|
||||
|
||||
def __contains__(cls, item):
|
||||
try:
|
||||
cls(item)
|
||||
except ValueError:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class BaseEnum(enum.Enum, metaclass=EnumWithContains):
|
||||
"An enum class that can get the value of an item with `str(Enum.key)`"
|
||||
|
||||
def __str__(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def list(cls):
|
||||
"Method to list all the possible items in `cls`"
|
||||
return list(map(str, cls))
|
||||
|
||||
|
||||
class DistributedType(str, enum.Enum):
|
||||
"""
|
||||
Represents a type of distributed environment.
|
||||
@ -258,7 +283,7 @@ class ComputeEnvironment(str, enum.Enum):
|
||||
AMAZON_SAGEMAKER = "AMAZON_SAGEMAKER"
|
||||
|
||||
|
||||
class DynamoBackend(str, enum.Enum):
|
||||
class DynamoBackend(str, BaseEnum):
|
||||
"""
|
||||
Represents a dynamo backend (see https://github.com/pytorch/torchdynamo).
|
||||
|
||||
@ -272,19 +297,21 @@ class DynamoBackend(str, enum.Enum):
|
||||
- **INDUCTOR** -- Uses TorchInductor backend with AotAutograd and cudagraphs by leveraging codegened Triton
|
||||
kernels. [Read
|
||||
more](https://dev-discuss.pytorch.org/t/torchinductor-a-pytorch-native-compiler-with-define-by-run-ir-and-symbolic-shapes/747)
|
||||
- **NVFUSER** -- nvFuser with TorchScript. [Read
|
||||
- **AOT_TS_NVFUSER** -- nvFuser with AotAutograd/TorchScript. [Read
|
||||
more](https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-1-nvfuser-and-its-primitives/593)
|
||||
- **AOT_NVFUSER** -- nvFuser with AotAutograd. [Read
|
||||
- **NVPRIMS_NVFUSER** -- nvFuser with PrimTorch. [Read
|
||||
more](https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-1-nvfuser-and-its-primitives/593)
|
||||
- **AOT_CUDAGRAPHS** -- cudagraphs with AotAutograd. [Read
|
||||
more](https://github.com/pytorch/torchdynamo/pull/757)
|
||||
- **CUDAGRAPHS** -- cudagraphs with AotAutograd. [Read more](https://github.com/pytorch/torchdynamo/pull/757)
|
||||
- **OFI** -- Uses Torchscript optimize_for_inference. Inference only. [Read
|
||||
more](https://pytorch.org/docs/stable/generated/torch.jit.optimize_for_inference.html)
|
||||
- **FX2TRT** -- Uses Nvidia TensorRT for inference optimizations. Inference only. [Read
|
||||
more](https://github.com/pytorch/TensorRT/blob/master/docsrc/tutorials/getting_started_with_fx_path.rst)
|
||||
- **ONNXRT** -- Uses ONNXRT for inference on CPU/GPU. Inference only. [Read more](https://onnxruntime.ai/)
|
||||
- **TENSORRT** -- Uses ONNXRT to run TensorRT for inference optimizations. [Read
|
||||
more](https://github.com/onnx/onnx-tensorrt)
|
||||
- **IPEX** -- Uses IPEX for inference on CPU. Inference only. [Read
|
||||
more](https://github.com/intel/intel-extension-for-pytorch).
|
||||
- **TVM** -- Uses Apach TVM for inference optimizations. [Read more](https://tvm.apache.org/)
|
||||
|
||||
"""
|
||||
|
||||
@ -293,36 +320,15 @@ class DynamoBackend(str, enum.Enum):
|
||||
EAGER = "EAGER"
|
||||
AOT_EAGER = "AOT_EAGER"
|
||||
INDUCTOR = "INDUCTOR"
|
||||
NVFUSER = "NVFUSER"
|
||||
AOT_NVFUSER = "AOT_NVFUSER"
|
||||
AOT_CUDAGRAPHS = "AOT_CUDAGRAPHS"
|
||||
AOT_TS_NVFUSER = "AOT_TS_NVFUSER"
|
||||
NVPRIMS_NVFUSER = "NVPRIMS_NVFUSER"
|
||||
CUDAGRAPHS = "CUDAGRAPHS"
|
||||
OFI = "OFI"
|
||||
FX2TRT = "FX2TRT"
|
||||
ONNXRT = "ONNXRT"
|
||||
TENSORRT = "TENSORRT"
|
||||
IPEX = "IPEX"
|
||||
|
||||
|
||||
class EnumWithContains(enum.EnumMeta):
|
||||
"A metaclass that adds the ability to check if `self` contains an item with the `in` operator"
|
||||
|
||||
def __contains__(cls, item):
|
||||
try:
|
||||
cls(item)
|
||||
except ValueError:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class BaseEnum(enum.Enum, metaclass=EnumWithContains):
|
||||
"An enum class that can get the value of an item with `str(Enum.key)`"
|
||||
|
||||
def __str__(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def list(cls):
|
||||
"Method to list all the possible items in `cls`"
|
||||
return list(map(str, cls))
|
||||
TVM = "TVM"
|
||||
|
||||
|
||||
class LoggerType(BaseEnum):
|
||||
@ -414,6 +420,16 @@ class ProjectConfiguration:
|
||||
metadata={"help": "The current save iteration."},
|
||||
)
|
||||
|
||||
save_on_each_node: bool = field(
|
||||
default=False,
|
||||
metadata={
|
||||
"help": (
|
||||
"When doing multi-node distributed training, whether to save models and checkpoints on each node, or"
|
||||
" only on the main one"
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
def set_directories(self, project_dir: str = None):
|
||||
"Sets `self.project_dir` and `self.logging_dir` to the appropriate values."
|
||||
self.project_dir = project_dir
|
||||
@ -471,9 +487,9 @@ class TorchDynamoPlugin(KwargsHandler):
|
||||
if self.mode is None:
|
||||
self.mode = os.environ.get(prefix + "MODE", "default")
|
||||
if self.fullgraph is None:
|
||||
self.fullgraph = strtobool(os.environ.get(prefix + "USE_FULLGRAPH", "False")) == 1
|
||||
self.fullgraph = str_to_bool(os.environ.get(prefix + "USE_FULLGRAPH", "False")) == 1
|
||||
if self.dynamic is None:
|
||||
self.dynamic = strtobool(os.environ.get(prefix + "USE_DYNAMIC", "False")) == 1
|
||||
self.dynamic = str_to_bool(os.environ.get(prefix + "USE_DYNAMIC", "False")) == 1
|
||||
|
||||
def to_dict(self):
|
||||
dynamo_config = copy.deepcopy(self.__dict__)
|
||||
@ -494,7 +510,10 @@ class DeepSpeedPlugin:
|
||||
},
|
||||
)
|
||||
gradient_accumulation_steps: int = field(
|
||||
default=None, metadata={"help": "Number of steps to accumulate gradients before updating optimizer states"}
|
||||
default=None,
|
||||
metadata={
|
||||
"help": "Number of steps to accumulate gradients before updating optimizer states. If not set, will use the value from the `Accelerator` directly."
|
||||
},
|
||||
)
|
||||
gradient_clipping: float = field(default=None, metadata={"help": "Enable gradient clipping with value"})
|
||||
zero_stage: int = field(
|
||||
@ -537,7 +556,8 @@ class DeepSpeedPlugin:
|
||||
from .deepspeed import HfDeepSpeedConfig
|
||||
|
||||
if self.gradient_accumulation_steps is None:
|
||||
self.gradient_accumulation_steps = int(os.environ.get("ACCELERATE_GRADIENT_ACCUMULATION_STEPS", 1))
|
||||
gas = os.environ.get("ACCELERATE_GRADIENT_ACCUMULATION_STEPS", "auto")
|
||||
self.gradient_accumulation_steps = int(gas) if gas.isdigit() else gas
|
||||
|
||||
if self.gradient_clipping is None:
|
||||
gradient_clipping = os.environ.get("ACCELERATE_GRADIENT_CLIPPING", "none")
|
||||
@ -630,7 +650,7 @@ class DeepSpeedPlugin:
|
||||
self.deepspeed_config["steps_per_print"] = float("inf") # this will stop deepspeed from logging @ stdout
|
||||
if self.zero3_init_flag is None:
|
||||
self.zero3_init_flag = (
|
||||
strtobool(os.environ.get("ACCELERATE_DEEPSPEED_ZERO3_INIT", str(self.hf_ds_config.is_zero3()))) == 1
|
||||
str_to_bool(os.environ.get("ACCELERATE_DEEPSPEED_ZERO3_INIT", str(self.hf_ds_config.is_zero3()))) == 1
|
||||
)
|
||||
if self.zero3_init_flag and not self.hf_ds_config.is_zero3():
|
||||
warnings.warn("DeepSpeed Zero3 Init flag is only applicable for ZeRO Stage 3. Setting it to False.")
|
||||
@ -722,10 +742,13 @@ class DeepSpeedPlugin:
|
||||
or ds_config["train_micro_batch_size_per_gpu"] == "auto"
|
||||
):
|
||||
ds_config["train_micro_batch_size_per_gpu"] = 1
|
||||
if ds_config["train_batch_size"] == "auto":
|
||||
if ds_config.get("train_batch_size", None) == "auto":
|
||||
del ds_config["train_batch_size"]
|
||||
|
||||
from transformers.deepspeed import HfDeepSpeedConfig
|
||||
if compare_versions("transformers", "<", "4.33"):
|
||||
from transformers.deepspeed import HfDeepSpeedConfig
|
||||
else:
|
||||
from transformers.integrations import HfDeepSpeedConfig
|
||||
|
||||
self.dschf = HfDeepSpeedConfig(ds_config) # keep this object alive # noqa
|
||||
|
||||
@ -872,6 +895,14 @@ class FullyShardedDataParallelPlugin:
|
||||
"all-gather while executing in the forward pass. only use with Static graphs."
|
||||
},
|
||||
)
|
||||
activation_checkpointing: bool = field(
|
||||
default=False,
|
||||
metadata={
|
||||
"help": "If True, activation checkpointing is a technique to reduce memory usage by clearing activations of "
|
||||
"certain layers and recomputing them during a backward pass. Effectively, this trades extra computation time "
|
||||
"for reduced memory usage."
|
||||
},
|
||||
)
|
||||
|
||||
def __post_init__(self):
|
||||
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch, CPUOffload, ShardingStrategy
|
||||
@ -881,7 +912,7 @@ class FullyShardedDataParallelPlugin:
|
||||
self.sharding_strategy = ShardingStrategy(int(os.environ.get(prefix + "SHARDING_STRATEGY", 1)))
|
||||
|
||||
if self.cpu_offload is None:
|
||||
if strtobool(os.environ.get(prefix + "OFFLOAD_PARAMS", "False")) == 1:
|
||||
if str_to_bool(os.environ.get(prefix + "OFFLOAD_PARAMS", "False")) == 1:
|
||||
self.cpu_offload = CPUOffload(offload_params=True)
|
||||
else:
|
||||
self.cpu_offload = CPUOffload(offload_params=False)
|
||||
@ -894,12 +925,14 @@ class FullyShardedDataParallelPlugin:
|
||||
if self.state_dict_type is None:
|
||||
state_dict_type_policy = os.environ.get(prefix + "STATE_DICT_TYPE", "FULL_STATE_DICT")
|
||||
self.set_state_dict_type(state_dict_type_policy)
|
||||
self.use_orig_params = strtobool(os.environ.get(prefix + "USE_ORIG_PARAMS", "False")) == 1
|
||||
self.sync_module_states = strtobool(os.environ.get(prefix + "SYNC_MODULE_STATES", "True")) == 1
|
||||
self.forward_prefetch = strtobool(os.environ.get(prefix + "FORWARD_PREFETCH", "False")) == 1
|
||||
self.use_orig_params = str_to_bool(os.environ.get(prefix + "USE_ORIG_PARAMS", "False")) == 1
|
||||
self.sync_module_states = str_to_bool(os.environ.get(prefix + "SYNC_MODULE_STATES", "True")) == 1
|
||||
self.forward_prefetch = str_to_bool(os.environ.get(prefix + "FORWARD_PREFETCH", "False")) == 1
|
||||
self.activation_checkpointing = str_to_bool(os.environ.get(prefix + "ACTIVATION_CHECKPOINTING", "False")) == 1
|
||||
|
||||
if self.sync_module_states:
|
||||
self.param_init_fn = lambda x: x.to_empty(device=torch.cuda.current_device(), recurse=False)
|
||||
device = torch.cuda.current_device() if not is_xpu_available() else torch.xpu.current_device()
|
||||
self.param_init_fn = lambda x: x.to_empty(device=device, recurse=False)
|
||||
|
||||
@staticmethod
|
||||
def get_module_class_from_name(module, name):
|
||||
@ -1152,13 +1185,13 @@ class MegatronLMPlugin:
|
||||
if self.gradient_clipping is None:
|
||||
self.gradient_clipping = float(os.environ.get(prefix + "GRADIENT_CLIPPING", 1.0))
|
||||
if self.recompute_activation is None:
|
||||
self.recompute_activation = strtobool(os.environ.get(prefix + "RECOMPUTE_ACTIVATION", "False")) == 1
|
||||
self.recompute_activation = str_to_bool(os.environ.get(prefix + "RECOMPUTE_ACTIVATION", "False")) == 1
|
||||
if self.use_distributed_optimizer is None:
|
||||
self.use_distributed_optimizer = (
|
||||
strtobool(os.environ.get(prefix + "USE_DISTRIBUTED_OPTIMIZER", "False")) == 1
|
||||
str_to_bool(os.environ.get(prefix + "USE_DISTRIBUTED_OPTIMIZER", "False")) == 1
|
||||
)
|
||||
if self.sequence_parallelism is None:
|
||||
self.sequence_parallelism = strtobool(os.environ.get(prefix + "SEQUENCE_PARALLELISM", "False")) == 1
|
||||
self.sequence_parallelism = str_to_bool(os.environ.get(prefix + "SEQUENCE_PARALLELISM", "False")) == 1
|
||||
|
||||
if self.pp_degree > 1 or self.use_distributed_optimizer:
|
||||
self.DDP_impl = "local"
|
||||
|
||||
@ -254,16 +254,19 @@ class DummyScheduler:
|
||||
Args:
|
||||
optimizer (`torch.optim.optimizer.Optimizer`):
|
||||
The optimizer to wrap.
|
||||
total_num_steps (int):
|
||||
total_num_steps (int, *optional*):
|
||||
Total number of steps.
|
||||
warmup_num_steps (int):
|
||||
warmup_num_steps (int, *optional*):
|
||||
Number of steps for warmup.
|
||||
lr_scheduler_callable (callable, *optional*):
|
||||
A callable function that creates an LR Scheduler. It accepts only one argument `optimizer`.
|
||||
**kwargs:
|
||||
Other arguments.
|
||||
"""
|
||||
|
||||
def __init__(self, optimizer, total_num_steps=None, warmup_num_steps=0, **kwargs):
|
||||
def __init__(self, optimizer, total_num_steps=None, warmup_num_steps=0, lr_scheduler_callable=None, **kwargs):
|
||||
self.optimizer = optimizer
|
||||
self.total_num_steps = total_num_steps
|
||||
self.warmup_num_steps = warmup_num_steps
|
||||
self.lr_scheduler_callable = lr_scheduler_callable
|
||||
self.kwargs = kwargs
|
||||
|
||||
@ -13,7 +13,21 @@
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
from distutils.util import strtobool
|
||||
|
||||
|
||||
def str_to_bool(value) -> int:
|
||||
"""
|
||||
Converts a string representation of truth to `True` (1) or `False` (0).
|
||||
|
||||
True values are `y`, `yes`, `t`, `true`, `on`, and `1`; False value are `n`, `no`, `f`, `false`, `off`, and `0`;
|
||||
"""
|
||||
value = value.lower()
|
||||
if value in ("y", "yes", "t", "true", "on", "1"):
|
||||
return 1
|
||||
elif value in ("n", "no", "f", "false", "off", "0"):
|
||||
return 0
|
||||
else:
|
||||
raise ValueError(f"invalid truth value {value}")
|
||||
|
||||
|
||||
def get_int_from_env(env_keys, default):
|
||||
@ -28,7 +42,7 @@ def get_int_from_env(env_keys, default):
|
||||
def parse_flag_from_env(key, default=False):
|
||||
"""Returns truthy value for `key` from the env if available else the default."""
|
||||
value = os.environ.get(key, str(default))
|
||||
return strtobool(value) == 1 # As its name indicates `strtobool` actually returns an int...
|
||||
return str_to_bool(value) == 1 # As its name indicates `str_to_bool` actually returns an int...
|
||||
|
||||
|
||||
def parse_choice_from_env(key, default="no"):
|
||||
|
||||
@ -16,14 +16,13 @@ import importlib
|
||||
import importlib.metadata
|
||||
import os
|
||||
import warnings
|
||||
from distutils.util import strtobool
|
||||
from functools import lru_cache
|
||||
|
||||
import torch
|
||||
from packaging import version
|
||||
from packaging.version import parse
|
||||
|
||||
from .environment import parse_flag_from_env
|
||||
from .environment import parse_flag_from_env, str_to_bool
|
||||
from .versions import compare_versions, is_torch_version
|
||||
|
||||
|
||||
@ -117,8 +116,6 @@ def is_bf16_available(ignore_tpu=False):
|
||||
return not ignore_tpu
|
||||
if torch.cuda.is_available():
|
||||
return torch.cuda.is_bf16_supported()
|
||||
if is_npu_available():
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
@ -143,7 +140,7 @@ def is_bnb_available():
|
||||
|
||||
|
||||
def is_megatron_lm_available():
|
||||
if strtobool(os.environ.get("ACCELERATE_USE_MEGATRON_LM", "False")) == 1:
|
||||
if str_to_bool(os.environ.get("ACCELERATE_USE_MEGATRON_LM", "False")) == 1:
|
||||
package_exists = importlib.util.find_spec("megatron") is not None
|
||||
if package_exists:
|
||||
try:
|
||||
@ -166,6 +163,10 @@ def is_datasets_available():
|
||||
return _is_package_available("datasets")
|
||||
|
||||
|
||||
def is_timm_available():
|
||||
return _is_package_available("timm")
|
||||
|
||||
|
||||
def is_aim_available():
|
||||
package_exists = _is_package_available("aim")
|
||||
if package_exists:
|
||||
@ -210,7 +211,16 @@ def is_tqdm_available():
|
||||
|
||||
|
||||
def is_mlflow_available():
|
||||
return _is_package_available("mlflow")
|
||||
if _is_package_available("mlflow"):
|
||||
return True
|
||||
|
||||
if importlib.util.find_spec("mlflow") is not None:
|
||||
try:
|
||||
_ = importlib.metadata.metadata("mlflow-skinny")
|
||||
return True
|
||||
except importlib.metadata.PackageNotFoundError:
|
||||
return False
|
||||
return False
|
||||
|
||||
|
||||
def is_mps_available():
|
||||
|
||||
@ -21,7 +21,6 @@ from typing import Any, Dict, List, Tuple
|
||||
import torch
|
||||
|
||||
from ..commands.config.config_args import SageMakerConfig
|
||||
from ..commands.config.config_utils import DYNAMO_BACKENDS
|
||||
from ..utils import (
|
||||
DynamoBackend,
|
||||
PrecisionType,
|
||||
@ -89,7 +88,9 @@ def prepare_simple_launcher_cmd_env(args: argparse.Namespace) -> Tuple[List[str]
|
||||
try:
|
||||
dynamo_backend = DynamoBackend(args.dynamo_backend.upper())
|
||||
except ValueError:
|
||||
raise ValueError(f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DYNAMO_BACKENDS}.")
|
||||
raise ValueError(
|
||||
f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DynamoBackend.list()}."
|
||||
)
|
||||
current_env["ACCELERATE_DYNAMO_BACKEND"] = dynamo_backend.value
|
||||
current_env["ACCELERATE_DYNAMO_MODE"] = args.dynamo_mode
|
||||
current_env["ACCELERATE_DYNAMO_USE_FULLGRAPH"] = str(args.dynamo_use_fullgraph)
|
||||
@ -163,7 +164,9 @@ def prepare_multi_gpu_env(args: argparse.Namespace) -> Dict[str, str]:
|
||||
try:
|
||||
dynamo_backend = DynamoBackend(args.dynamo_backend.upper())
|
||||
except ValueError:
|
||||
raise ValueError(f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DYNAMO_BACKENDS}.")
|
||||
raise ValueError(
|
||||
f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DynamoBackend.list()}."
|
||||
)
|
||||
current_env["ACCELERATE_DYNAMO_BACKEND"] = dynamo_backend.value
|
||||
current_env["ACCELERATE_DYNAMO_MODE"] = args.dynamo_mode
|
||||
current_env["ACCELERATE_DYNAMO_USE_FULLGRAPH"] = str(args.dynamo_use_fullgraph)
|
||||
@ -171,6 +174,9 @@ def prepare_multi_gpu_env(args: argparse.Namespace) -> Dict[str, str]:
|
||||
|
||||
if args.use_fsdp:
|
||||
current_env["ACCELERATE_USE_FSDP"] = "true"
|
||||
if args.fsdp_cpu_ram_efficient_loading and not args.fsdp_sync_module_states:
|
||||
raise ValueError("When using `--fsdp_cpu_ram_efficient_loading` set `--fsdp_sync_module_states` to `True`")
|
||||
|
||||
current_env["FSDP_SHARDING_STRATEGY"] = str(args.fsdp_sharding_strategy)
|
||||
current_env["FSDP_OFFLOAD_PARAMS"] = str(args.fsdp_offload_params).lower()
|
||||
current_env["FSDP_MIN_NUM_PARAMS"] = str(args.fsdp_min_num_params)
|
||||
@ -184,6 +190,7 @@ def prepare_multi_gpu_env(args: argparse.Namespace) -> Dict[str, str]:
|
||||
current_env["FSDP_STATE_DICT_TYPE"] = str(args.fsdp_state_dict_type)
|
||||
current_env["FSDP_FORWARD_PREFETCH"] = str(args.fsdp_forward_prefetch).lower()
|
||||
current_env["FSDP_USE_ORIG_PARAMS"] = str(args.fsdp_use_orig_params).lower()
|
||||
current_env["FSDP_CPU_RAM_EFFICIENT_LOADING"] = str(args.fsdp_cpu_ram_efficient_loading).lower()
|
||||
current_env["FSDP_SYNC_MODULE_STATES"] = str(args.fsdp_sync_module_states).lower()
|
||||
|
||||
if args.use_megatron_lm:
|
||||
@ -419,7 +426,9 @@ def prepare_sagemager_args_inputs(
|
||||
try:
|
||||
dynamo_backend = DynamoBackend(args.dynamo_backend.upper())
|
||||
except ValueError:
|
||||
raise ValueError(f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DYNAMO_BACKENDS}.")
|
||||
raise ValueError(
|
||||
f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DynamoBackend.list()}."
|
||||
)
|
||||
|
||||
# Environment variables to be set for use during training job
|
||||
environment = {
|
||||
@ -537,7 +546,9 @@ class PrepareForLaunch:
|
||||
):
|
||||
# Prepare the environment for torch.distributed
|
||||
os.environ["LOCAL_RANK"] = str(index)
|
||||
os.environ["RANK"] = str(index)
|
||||
nproc = int(os.environ.get("NPROC", 1))
|
||||
node_rank = int(os.environ.get("NODE_RANK", 0))
|
||||
os.environ["RANK"] = str(nproc * node_rank + index)
|
||||
|
||||
os.environ["FORK_LAUNCHED"] = str(1)
|
||||
self.launcher(*args)
|
||||
|
||||
@ -35,6 +35,10 @@ from .offload import load_offloaded_weight, offload_weight, save_offload_index
|
||||
from .tqdm import is_tqdm_available, tqdm
|
||||
|
||||
|
||||
if is_npu_available(check_device=False):
|
||||
import torch_npu # noqa: F401
|
||||
|
||||
|
||||
if is_safetensors_available():
|
||||
from safetensors import safe_open
|
||||
from safetensors.torch import load_file as safe_load_file
|
||||
@ -246,7 +250,7 @@ def set_module_tensor_to_device(
|
||||
Args:
|
||||
module (`torch.nn.Module`):
|
||||
The module in which the tensor we want to move lives.
|
||||
param_name (`str`):
|
||||
tensor_name (`str`):
|
||||
The full name of the parameter/buffer.
|
||||
device (`int`, `str` or `torch.device`):
|
||||
The device on which to set the tensor.
|
||||
@ -766,9 +770,6 @@ def get_balanced_memory(
|
||||
user_not_set_max_memory = max_memory is None
|
||||
max_memory = get_max_memory(max_memory)
|
||||
|
||||
if not (torch.cuda.is_available() or is_xpu_available()) or is_mps_available():
|
||||
return max_memory
|
||||
|
||||
if not is_xpu_available():
|
||||
num_devices = len([d for d in max_memory if torch.device(d).type == "cuda" and max_memory[d] > 0])
|
||||
else:
|
||||
@ -784,6 +785,9 @@ def get_balanced_memory(
|
||||
]
|
||||
)
|
||||
|
||||
if num_devices == 0:
|
||||
return max_memory
|
||||
|
||||
if num_devices == 1:
|
||||
# We cannot do low_zero on just one GPU, but we will still reserve some memory for the buffer
|
||||
low_zero = False
|
||||
@ -856,6 +860,24 @@ def get_balanced_memory(
|
||||
return max_memory
|
||||
|
||||
|
||||
def calculate_maximum_sizes(model: torch.nn.Module):
|
||||
"Computes the total size of the model and its largest layer"
|
||||
sizes = compute_module_sizes(model)
|
||||
# `transformers` models store this information for us
|
||||
no_split_modules = getattr(model, "_no_split_modules", None)
|
||||
if no_split_modules is None:
|
||||
no_split_modules = []
|
||||
|
||||
modules_to_treat = (
|
||||
list(model.named_parameters(recurse=False))
|
||||
+ list(model.named_children())
|
||||
+ list(model.named_buffers(recurse=False))
|
||||
)
|
||||
largest_layer = get_max_layer_size(modules_to_treat, sizes, no_split_modules)
|
||||
total_size = sizes[""]
|
||||
return total_size, largest_layer
|
||||
|
||||
|
||||
def infer_auto_device_map(
|
||||
model: nn.Module,
|
||||
max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,
|
||||
@ -1429,15 +1451,14 @@ def get_mixed_precision_context_manager(native_amp: bool = False, autocast_kwarg
|
||||
autocast_kwargs = autocast_kwargs.to_kwargs()
|
||||
if native_amp:
|
||||
if state.mixed_precision == "fp16":
|
||||
if is_npu_available():
|
||||
return torch.npu.amp.autocast(dtype=torch.float16, **autocast_kwargs)
|
||||
else:
|
||||
return torch.autocast(device_type=state.device.type, dtype=torch.float16, **autocast_kwargs)
|
||||
return torch.autocast(device_type=state.device.type, dtype=torch.float16, **autocast_kwargs)
|
||||
elif state.mixed_precision == "bf16" and state.distributed_type in [
|
||||
DistributedType.NO,
|
||||
DistributedType.MULTI_CPU,
|
||||
DistributedType.MULTI_GPU,
|
||||
DistributedType.MULTI_NPU,
|
||||
DistributedType.MULTI_XPU,
|
||||
DistributedType.FSDP,
|
||||
]:
|
||||
return torch.autocast(device_type=state.device.type, dtype=torch.bfloat16, **autocast_kwargs)
|
||||
else:
|
||||
|
||||
@ -25,7 +25,7 @@ import torch
|
||||
from ..state import PartialState
|
||||
from .constants import TORCH_DISTRIBUTED_OPERATION_TYPES
|
||||
from .dataclasses import DistributedType, TensorInformation
|
||||
from .imports import is_torch_distributed_available, is_tpu_available
|
||||
from .imports import is_torch_distributed_available, is_torch_version, is_tpu_available
|
||||
|
||||
|
||||
if is_tpu_available(check_device=False):
|
||||
@ -280,6 +280,12 @@ def _tpu_gather(tensor):
|
||||
|
||||
|
||||
def _gpu_gather(tensor):
|
||||
state = PartialState()
|
||||
if is_torch_version(">=", "1.13"):
|
||||
gather_op = torch.distributed.all_gather_into_tensor
|
||||
else:
|
||||
gather_op = torch.distributed._all_gather_base
|
||||
|
||||
def _gpu_gather_one(tensor):
|
||||
if tensor.ndim == 0:
|
||||
tensor = tensor.clone()[None]
|
||||
@ -287,9 +293,26 @@ def _gpu_gather(tensor):
|
||||
# Can only gather contiguous tensors
|
||||
if not tensor.is_contiguous():
|
||||
tensor = tensor.contiguous()
|
||||
output_tensors = [torch.empty_like(tensor) for _ in range(torch.distributed.get_world_size())]
|
||||
torch.distributed.all_gather(output_tensors, tensor)
|
||||
return torch.cat(output_tensors, dim=0)
|
||||
|
||||
if state.backend is not None and state.backend != "gloo":
|
||||
# We use `empty` as `all_gather_into_tensor` slightly
|
||||
# differs from `all_gather` for better efficiency,
|
||||
# and we rely on the number of items in the tensor
|
||||
# rather than its direct shape
|
||||
output_tensors = torch.empty(
|
||||
state.num_processes * tensor.numel(),
|
||||
dtype=tensor.dtype,
|
||||
device=state.device,
|
||||
)
|
||||
gather_op(output_tensors, tensor)
|
||||
return output_tensors.view(-1, *tensor.size()[1:])
|
||||
else:
|
||||
# a backend of `None` is always CPU
|
||||
# also gloo does not support `all_gather_into_tensor`,
|
||||
# which will result in a larger memory overhead for the op
|
||||
output_tensors = [torch.empty_like(tensor) for _ in range(state.num_processes)]
|
||||
torch.distributed.all_gather(output_tensors, tensor)
|
||||
return torch.cat(output_tensors, dim=0)
|
||||
|
||||
return recursively_apply(_gpu_gather_one, tensor, error_on_other_type=True)
|
||||
|
||||
@ -547,7 +570,7 @@ def pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False):
|
||||
|
||||
|
||||
@verify_operation
|
||||
def reduce(tensor, reduction="mean"):
|
||||
def reduce(tensor, reduction="mean", scale=1.0):
|
||||
"""
|
||||
Recursively reduce the tensors in a nested list/tuple/dictionary of lists of tensors across all processes by the
|
||||
mean of a given operation.
|
||||
@ -557,25 +580,29 @@ def reduce(tensor, reduction="mean"):
|
||||
The data to reduce.
|
||||
reduction (`str`, *optional*, defaults to `"mean"`):
|
||||
A reduction method. Can be of "mean", "sum", or "none"
|
||||
scale (`float`, *optional*):
|
||||
A default scaling value to be applied after the reduce, only valied on XLA.
|
||||
|
||||
Returns:
|
||||
The same data structure as `data` with all the tensors reduced.
|
||||
"""
|
||||
|
||||
def _reduce_across_processes(tensor, reduction="mean"):
|
||||
def _reduce_across_processes(tensor, reduction="mean", scale=1.0):
|
||||
state = PartialState()
|
||||
cloned_tensor = tensor.clone()
|
||||
if state.distributed_type == DistributedType.NO:
|
||||
return cloned_tensor
|
||||
if state.distributed_type == DistributedType.TPU:
|
||||
xm.all_reduce("sum", cloned_tensor)
|
||||
xm.all_reduce("sum", cloned_tensor, scale)
|
||||
elif state.distributed_type.value in TORCH_DISTRIBUTED_OPERATION_TYPES:
|
||||
torch.distributed.all_reduce(cloned_tensor, ReduceOp.SUM)
|
||||
if reduction == "mean":
|
||||
cloned_tensor /= state.num_processes
|
||||
return cloned_tensor
|
||||
|
||||
return recursively_apply(_reduce_across_processes, tensor, error_on_other_type=True, reduction=reduction)
|
||||
return recursively_apply(
|
||||
_reduce_across_processes, tensor, error_on_other_type=True, reduction=reduction, scale=scale
|
||||
)
|
||||
|
||||
|
||||
def convert_to_fp32(tensor):
|
||||
|
||||
@ -13,27 +13,35 @@
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import socket
|
||||
from contextlib import contextmanager
|
||||
from functools import partial
|
||||
from types import MethodType
|
||||
|
||||
import torch
|
||||
from packaging.version import Version
|
||||
|
||||
from ..commands.config.default import write_basic_config # noqa: F401
|
||||
from ..logging import get_logger
|
||||
from ..state import PartialState
|
||||
from .constants import FSDP_PYTORCH_VERSION
|
||||
from .dataclasses import DistributedType
|
||||
from .imports import is_deepspeed_available, is_tpu_available
|
||||
from .imports import is_deepspeed_available, is_safetensors_available, is_tpu_available
|
||||
from .transformer_engine import convert_model
|
||||
from .versions import is_torch_version
|
||||
|
||||
|
||||
if is_deepspeed_available():
|
||||
from deepspeed import DeepSpeedEngine
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
if is_tpu_available(check_device=False):
|
||||
import torch_xla.core.xla_model as xm
|
||||
|
||||
if is_safetensors_available():
|
||||
from safetensors.torch import save_file as safe_save_file
|
||||
|
||||
|
||||
def is_compiled_module(module):
|
||||
"""
|
||||
@ -65,6 +73,8 @@ def extract_model_from_parallel(model, keep_fp32_wrapper: bool = True):
|
||||
model = model._orig_mod
|
||||
|
||||
if is_deepspeed_available():
|
||||
from deepspeed import DeepSpeedEngine
|
||||
|
||||
options += (DeepSpeedEngine,)
|
||||
|
||||
if is_torch_version(">=", FSDP_PYTORCH_VERSION):
|
||||
@ -107,18 +117,27 @@ def wait_for_everyone():
|
||||
PartialState().wait_for_everyone()
|
||||
|
||||
|
||||
def save(obj, f):
|
||||
def save(obj, f, save_on_each_node: bool = False, safe_serialization: bool = False):
|
||||
"""
|
||||
Save the data to disk. Use in place of `torch.save()`.
|
||||
|
||||
Args:
|
||||
obj: The data to save
|
||||
f: The file (or file-like object) to use to save the data
|
||||
obj:
|
||||
The data to save
|
||||
f:
|
||||
The file (or file-like object) to use to save the data
|
||||
save_on_each_node (`bool`, *optional*, defaults to `False`):
|
||||
Whether to only save on the global main process
|
||||
safe_serialization (`bool`, *optional*, defaults to `False`):
|
||||
Whether to save `obj` using `safetensors`
|
||||
"""
|
||||
save_func = torch.save if not safe_serialization else partial(safe_save_file, metadata={"format": "pt"})
|
||||
if PartialState().distributed_type == DistributedType.TPU:
|
||||
xm.save(obj, f)
|
||||
elif PartialState().local_process_index == 0:
|
||||
torch.save(obj, f)
|
||||
elif PartialState().is_main_process and not save_on_each_node:
|
||||
save_func(obj, f)
|
||||
elif PartialState().is_local_main_process and save_on_each_node:
|
||||
save_func(obj, f)
|
||||
|
||||
|
||||
@contextmanager
|
||||
@ -172,14 +191,22 @@ def patch_environment(**kwargs):
|
||||
>>> print(os.environ["FOO"]) # raises KeyError
|
||||
```
|
||||
"""
|
||||
existing_vars = {}
|
||||
for key, value in kwargs.items():
|
||||
os.environ[key.upper()] = str(value)
|
||||
key = key.upper()
|
||||
if key in os.environ:
|
||||
existing_vars[key] = os.environ[key]
|
||||
os.environ[key] = str(value)
|
||||
|
||||
yield
|
||||
|
||||
for key in kwargs:
|
||||
if key.upper() in os.environ:
|
||||
del os.environ[key.upper()]
|
||||
key = key.upper()
|
||||
if key in existing_vars:
|
||||
# restore previous value
|
||||
os.environ[key] = existing_vars[key]
|
||||
else:
|
||||
os.environ.pop(key, None)
|
||||
|
||||
|
||||
def get_pretty_name(obj):
|
||||
@ -222,3 +249,31 @@ def is_port_in_use(port: int = None) -> bool:
|
||||
port = 29500
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||
return s.connect_ex(("localhost", port)) == 0
|
||||
|
||||
|
||||
def convert_bytes(size):
|
||||
"Converts `size` from bytes to the largest possible unit"
|
||||
for x in ["bytes", "KB", "MB", "GB", "TB"]:
|
||||
if size < 1024.0:
|
||||
return f"{round(size, 2)} {x}"
|
||||
size /= 1024.0
|
||||
|
||||
return f"{round(size, 2)} PB"
|
||||
|
||||
|
||||
def check_os_kernel():
|
||||
"""Warns if the kernel version is below the recommended minimum on Linux."""
|
||||
# see issue #1929
|
||||
info = platform.uname()
|
||||
system = info.system
|
||||
if system != "Linux":
|
||||
return
|
||||
|
||||
_, version, *_ = re.split(r"(\d+\.\d+\.\d+)", info.release)
|
||||
min_version = "5.5.0"
|
||||
if Version(version) < Version(min_version):
|
||||
msg = (
|
||||
f"Detected kernel version {version}, which is below the recommended minimum of {min_version}; this can "
|
||||
"cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher."
|
||||
)
|
||||
logger.warning(msg, main_process_only=True)
|
||||
|
||||
@ -31,7 +31,6 @@ from transformers.utils import is_torch_bf16_available
|
||||
|
||||
import accelerate
|
||||
from accelerate.accelerator import Accelerator
|
||||
from accelerate.scheduler import AcceleratedScheduler
|
||||
from accelerate.state import AcceleratorState
|
||||
from accelerate.test_utils.testing import (
|
||||
AccelerateTestCase,
|
||||
@ -56,8 +55,6 @@ from accelerate.utils.other import patch_environment
|
||||
|
||||
set_seed(42)
|
||||
|
||||
T5_SMALL = "t5-small"
|
||||
T5_TINY = "patrickvonplaten/t5-tiny-random"
|
||||
GPT2_TINY = "sshleifer/tiny-gpt2"
|
||||
|
||||
ZERO2 = "zero2"
|
||||
@ -332,7 +329,8 @@ class DeepSpeedConfigIntegration(AccelerateTestCase):
|
||||
model, optimizer, train_dataloader, eval_dataloader, dummy_lr_scheduler
|
||||
)
|
||||
self.assertTrue(
|
||||
"You cannot create a `DummyScheduler` without specifying a scheduler in the config file."
|
||||
"Either specify a scheduler in the config file or "
|
||||
"pass in the `lr_scheduler_callable` parameter when using `accelerate.utils.DummyScheduler`."
|
||||
in str(cm.exception)
|
||||
)
|
||||
|
||||
@ -352,7 +350,7 @@ class DeepSpeedConfigIntegration(AccelerateTestCase):
|
||||
self.assertTrue(accelerator.deepspeed_config["train_batch_size"], 16)
|
||||
self.assertEqual(type(model), DeepSpeedEngine)
|
||||
self.assertEqual(type(optimizer), DeepSpeedOptimizerWrapper)
|
||||
self.assertEqual(type(lr_scheduler), AcceleratedScheduler)
|
||||
self.assertEqual(type(lr_scheduler), DeepSpeedSchedulerWrapper)
|
||||
self.assertEqual(type(accelerator.deepspeed_engine_wrapped), DeepSpeedEngineWrapper)
|
||||
|
||||
elif optim_type == DS_OPTIMIZER and scheduler_type == DS_SCHEDULER:
|
||||
@ -483,6 +481,31 @@ class DeepSpeedConfigIntegration(AccelerateTestCase):
|
||||
in str(cm.exception)
|
||||
)
|
||||
|
||||
# passing `DummyScheduler` without `lr_scheduler_callable` should fail
|
||||
with self.assertRaises(ValueError) as cm:
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
||||
model, dummy_optimizer, train_dataloader, eval_dataloader, dummy_lr_scheduler
|
||||
)
|
||||
self.assertTrue(
|
||||
"Either specify a scheduler in the config file or "
|
||||
"pass in the `lr_scheduler_callable` parameter when using `accelerate.utils.DummyScheduler`."
|
||||
in str(cm.exception)
|
||||
)
|
||||
|
||||
# passing `lr_scheduler_callable` to DummyScheduler should enable DS Optim + Custom Scheduler
|
||||
def _lr_scheduler_callable(optimizer):
|
||||
return get_scheduler(
|
||||
name="linear",
|
||||
optimizer=optimizer,
|
||||
num_warmup_steps=0,
|
||||
num_training_steps=1000,
|
||||
)
|
||||
|
||||
dummy_lr_scheduler = DummyScheduler(dummy_optimizer, lr_scheduler_callable=_lr_scheduler_callable)
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
||||
model, dummy_optimizer, train_dataloader, eval_dataloader, dummy_lr_scheduler
|
||||
)
|
||||
|
||||
def test_save_checkpoints(self):
|
||||
deepspeed_plugin = DeepSpeedPlugin(
|
||||
hf_ds_config=self.ds_config_file[ZERO3],
|
||||
@ -599,7 +622,7 @@ class DeepSpeedConfigIntegration(AccelerateTestCase):
|
||||
deepspeed_plugin = DeepSpeedPlugin(
|
||||
hf_ds_config=ds_config,
|
||||
zero3_init_flag=True,
|
||||
gradient_accumulation_steps=1,
|
||||
gradient_accumulation_steps=2,
|
||||
gradient_clipping=1.0,
|
||||
zero_stage=2,
|
||||
offload_optimizer_device="cpu",
|
||||
@ -611,7 +634,7 @@ class DeepSpeedConfigIntegration(AccelerateTestCase):
|
||||
accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision=dtype)
|
||||
deepspeed_plugin = accelerator.state.deepspeed_plugin
|
||||
self.assertEqual(deepspeed_plugin.deepspeed_config["gradient_clipping"], 1.0)
|
||||
self.assertEqual(deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"], 1)
|
||||
self.assertEqual(deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"], 2)
|
||||
self.assertEqual(deepspeed_plugin.deepspeed_config["zero_optimization"]["stage"], 2)
|
||||
self.assertEqual(
|
||||
deepspeed_plugin.deepspeed_config["zero_optimization"]["offload_optimizer"]["device"], "cpu"
|
||||
@ -632,6 +655,42 @@ class DeepSpeedConfigIntegration(AccelerateTestCase):
|
||||
in str(cm.exception)
|
||||
)
|
||||
|
||||
# base case of passing in `gradient_accumulation_steps` to `DeepSpeedPlugin`
|
||||
AcceleratorState._reset_state(True)
|
||||
deepspeed_plugin = DeepSpeedPlugin(zero_stage=2, gradient_accumulation_steps=4)
|
||||
with mockenv_context(**self.dist_env):
|
||||
accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision=dtype)
|
||||
deepspeed_plugin = accelerator.state.deepspeed_plugin
|
||||
self.assertEqual(deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"], 4)
|
||||
|
||||
# filling the `auto` gradient_accumulation_steps via Accelerator's value
|
||||
AcceleratorState._reset_state(True)
|
||||
deepspeed_plugin = DeepSpeedPlugin(
|
||||
hf_ds_config=ds_config,
|
||||
zero3_init_flag=True,
|
||||
gradient_clipping=1.0,
|
||||
zero_stage=2,
|
||||
offload_optimizer_device="cpu",
|
||||
offload_param_device="cpu",
|
||||
zero3_save_16bit_model=True,
|
||||
)
|
||||
with mockenv_context(**self.dist_env):
|
||||
accelerator = Accelerator(
|
||||
deepspeed_plugin=deepspeed_plugin, mixed_precision=dtype, gradient_accumulation_steps=8
|
||||
)
|
||||
train_set = RegressionDataset(length=80)
|
||||
eval_set = RegressionDataset(length=20)
|
||||
train_dataloader = DataLoader(train_set, batch_size=16, shuffle=True)
|
||||
eval_dataloader = DataLoader(eval_set, batch_size=32, shuffle=False)
|
||||
model = AutoModelForCausalLM.from_pretrained("gpt2")
|
||||
dummy_optimizer = DummyOptim(params=model.parameters(), lr=5e-5, weight_decay=1e-4)
|
||||
dummy_lr_scheduler = DummyScheduler(dummy_optimizer, warmup_num_steps=10, total_num_steps=1000)
|
||||
model, _, train_dataloader, eval_dataloader, _ = accelerator.prepare(
|
||||
model, dummy_optimizer, train_dataloader, eval_dataloader, dummy_lr_scheduler
|
||||
)
|
||||
deepspeed_plugin = accelerator.state.deepspeed_plugin
|
||||
self.assertEqual(deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"], 8)
|
||||
|
||||
def test_ds_config_assertions(self):
|
||||
ambiguous_env = self.dist_env.copy()
|
||||
ambiguous_env[
|
||||
|
||||
@ -10,9 +10,10 @@ from torch.utils.data import DataLoader, TensorDataset
|
||||
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
|
||||
from accelerate.accelerator import Accelerator
|
||||
from accelerate.state import GradientState, PartialState
|
||||
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
|
||||
from accelerate.test_utils import require_bnb, require_multi_gpu, require_safetensors, slow
|
||||
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
|
||||
from accelerate.utils import patch_environment
|
||||
from accelerate.utils.modeling import load_checkpoint_in_model
|
||||
|
||||
|
||||
def create_components():
|
||||
@ -114,6 +115,30 @@ class AcceleratorTester(AccelerateTestCase):
|
||||
accelerator.load_state(tmpdirname)
|
||||
self.assertTrue(abs(model_signature - get_signature(model)) < 1e-3)
|
||||
|
||||
def test_save_model_pytorch(self):
|
||||
accelerator = Accelerator()
|
||||
model = torch.nn.Linear(10, 10)
|
||||
|
||||
model_signature = get_signature(model)
|
||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
||||
accelerator.save_model(model, tmpdirname, safe_serialization=False)
|
||||
# make sure loaded weights match
|
||||
load_checkpoint_in_model(model, tmpdirname)
|
||||
self.assertTrue(abs(model_signature - get_signature(model)) < 1e-3)
|
||||
|
||||
@require_safetensors
|
||||
def test_save_model_safetensors(self):
|
||||
accelerator = Accelerator()
|
||||
model = torch.nn.Linear(10, 10)
|
||||
|
||||
model_signature = get_signature(model)
|
||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
||||
accelerator.save_model(model, tmpdirname, safe_serialization=True)
|
||||
|
||||
# make sure loaded weights match
|
||||
load_checkpoint_in_model(model, tmpdirname)
|
||||
self.assertTrue(abs(model_signature - get_signature(model)) < 1e-3)
|
||||
|
||||
def test_save_load_model_with_hooks(self):
|
||||
accelerator = Accelerator()
|
||||
model, optimizer, scheduler, train_dl, valid_dl = create_components()
|
||||
|
||||
@ -11,7 +11,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import copy
|
||||
import os
|
||||
import unittest
|
||||
from tempfile import TemporaryDirectory
|
||||
@ -45,6 +45,18 @@ class ModelForTest(nn.Module):
|
||||
return self.linear2(self.batchnorm(self.linear1(x)))
|
||||
|
||||
|
||||
class ModelForTestCopy(nn.Module):
|
||||
def __init__(self, id: int):
|
||||
super().__init__()
|
||||
self.id = id
|
||||
self.linear1 = nn.Linear(3, 4)
|
||||
self.batchnorm = nn.BatchNorm1d(4)
|
||||
self.linear2 = nn.Linear(4, 5)
|
||||
|
||||
def forward(self, x):
|
||||
return self.linear2(self.batchnorm(self.linear1(x))), self.id
|
||||
|
||||
|
||||
class ModelForTestTiedWeights(nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
@ -325,6 +337,25 @@ class BigModelingTester(unittest.TestCase):
|
||||
output = model(x)
|
||||
self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))
|
||||
|
||||
@require_cuda
|
||||
def test_dispatch_model_copy(self):
|
||||
original_model = ModelForTestCopy(id=1)
|
||||
device_map = {"linear1": 0, "batchnorm": "cpu", "linear2": 0}
|
||||
|
||||
x = torch.randn(2, 3)
|
||||
expected, original_output_id = original_model(x)
|
||||
|
||||
dispatch_model(original_model, device_map)
|
||||
|
||||
copied_model = copy.deepcopy(original_model)
|
||||
copied_model.id = 2
|
||||
output, copied_output_id = copied_model(x)
|
||||
|
||||
self.assertEqual(original_model.id, original_output_id)
|
||||
self.assertEqual(copied_model.id, copied_output_id)
|
||||
self.assertFalse(copied_model.linear1.forward is original_model.linear1.forward)
|
||||
self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))
|
||||
|
||||
@require_cuda
|
||||
def test_dispatch_model_move_offloaded_model(self):
|
||||
model = ModelForTest()
|
||||
@ -444,6 +475,18 @@ class BigModelingTester(unittest.TestCase):
|
||||
output = model(x)
|
||||
self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))
|
||||
|
||||
@require_cuda
|
||||
def test_dispatch_model_force_hooks(self):
|
||||
model = ModelForTest()
|
||||
device_map = {"": 0}
|
||||
|
||||
x = torch.randn(2, 3)
|
||||
expected = model(x)
|
||||
|
||||
dispatch_model(model, device_map, force_hooks=True)
|
||||
output = model(x)
|
||||
self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))
|
||||
|
||||
@require_cuda
|
||||
def test_load_checkpoint_and_dispatch(self):
|
||||
model = ModelForTest()
|
||||
@ -638,22 +681,16 @@ class BigModelingTester(unittest.TestCase):
|
||||
with init_empty_weights():
|
||||
model = AutoModel.from_config(AutoConfig.from_pretrained("bigscience/bloom-560m"))
|
||||
|
||||
# TODO: @younesbelkada remove the positional arg on the next `transformers` release
|
||||
quantization_config = BitsAndBytesConfig(load_in_8bit=True)
|
||||
model = replace_with_bnb_linear(
|
||||
model, modules_to_not_convert=["lm_head"], quantization_config=quantization_config
|
||||
)
|
||||
|
||||
# TODO: @younesbelkada remove this block on the next `transformers` release
|
||||
for p in model.parameters():
|
||||
p.requires_grad = False
|
||||
|
||||
model_path = hf_hub_download("bigscience/bloom-560m", "pytorch_model.bin")
|
||||
|
||||
model = load_checkpoint_and_dispatch(
|
||||
model,
|
||||
checkpoint=model_path,
|
||||
# device_map="auto",
|
||||
device_map="balanced",
|
||||
)
|
||||
|
||||
@ -674,16 +711,11 @@ class BigModelingTester(unittest.TestCase):
|
||||
with init_empty_weights():
|
||||
model = AutoModel.from_config(AutoConfig.from_pretrained("bigscience/bloom-560m"))
|
||||
|
||||
# TODO: @younesbelkada remove the positional arg on the next `transformers` release
|
||||
quantization_config = BitsAndBytesConfig(load_in_8bit=True)
|
||||
model = replace_with_bnb_linear(
|
||||
model, modules_to_not_convert=["lm_head"], quantization_config=quantization_config
|
||||
)
|
||||
|
||||
# TODO: @younesbelkada remove this block on the next `transformers` release
|
||||
for p in model.parameters():
|
||||
p.requires_grad = False
|
||||
|
||||
model_path = hf_hub_download("bigscience/bloom-560m", "pytorch_model.bin")
|
||||
|
||||
# test with auto
|
||||
@ -699,14 +731,10 @@ class BigModelingTester(unittest.TestCase):
|
||||
with init_empty_weights():
|
||||
model = AutoModel.from_config(AutoConfig.from_pretrained("bigscience/bloom-560m"))
|
||||
|
||||
# TODO: @younesbelkada remove the positional arg on the next `transformers` release
|
||||
model = replace_with_bnb_linear(
|
||||
model, modules_to_not_convert=["lm_head"], quantization_config=quantization_config
|
||||
)
|
||||
|
||||
for p in model.parameters():
|
||||
p.requires_grad = False
|
||||
|
||||
# test with str device map
|
||||
model = load_checkpoint_and_dispatch(
|
||||
model,
|
||||
@ -720,15 +748,10 @@ class BigModelingTester(unittest.TestCase):
|
||||
with init_empty_weights():
|
||||
model = AutoModel.from_config(AutoConfig.from_pretrained("bigscience/bloom-560m"))
|
||||
|
||||
# TODO: @younesbelkada remove the positional arg on the next `transformers` release
|
||||
model = replace_with_bnb_linear(
|
||||
model, modules_to_not_convert=["lm_head"], quantization_config=quantization_config
|
||||
)
|
||||
|
||||
# TODO: @younesbelkada remove this block on the next `transformers` release
|
||||
for p in model.parameters():
|
||||
p.requires_grad = False
|
||||
|
||||
# test with torch.device device map
|
||||
model = load_checkpoint_and_dispatch(
|
||||
model,
|
||||
@ -741,7 +764,6 @@ class BigModelingTester(unittest.TestCase):
|
||||
|
||||
@slow
|
||||
@require_bnb
|
||||
@unittest.skip("Un-skip in the next transformers release")
|
||||
def test_dipatch_model_fp4_simple(self):
|
||||
"""Tests that `dispatch_model` quantizes fp4 layers"""
|
||||
from huggingface_hub import hf_hub_download
|
||||
|
||||
@ -18,10 +18,17 @@ import unittest
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
from huggingface_hub.utils import GatedRepoError, RepositoryNotFoundError
|
||||
|
||||
import accelerate
|
||||
from accelerate.commands.estimate import estimate_command, estimate_command_parser, gather_data
|
||||
from accelerate.test_utils import execute_subprocess_async
|
||||
from accelerate.test_utils.testing import run_command
|
||||
from accelerate.test_utils.testing import (
|
||||
require_timm,
|
||||
require_transformers,
|
||||
run_command,
|
||||
)
|
||||
from accelerate.utils import patch_environment
|
||||
|
||||
|
||||
class AccelerateLauncherTester(unittest.TestCase):
|
||||
@ -60,10 +67,22 @@ class AccelerateLauncherTester(unittest.TestCase):
|
||||
|
||||
def test_config_compatibility(self):
|
||||
for config in sorted(self.test_config_path.glob("**/*.yaml")):
|
||||
with self.subTest(config_file=config):
|
||||
execute_subprocess_async(
|
||||
self.base_cmd + ["--config_file", str(config), self.test_file_path], env=os.environ.copy()
|
||||
)
|
||||
if "invalid" not in str(config):
|
||||
with self.subTest(config_file=config):
|
||||
execute_subprocess_async(
|
||||
self.base_cmd + ["--config_file", str(config), self.test_file_path], env=os.environ.copy()
|
||||
)
|
||||
|
||||
def test_invalid_keys(self):
|
||||
with self.assertRaises(
|
||||
RuntimeError,
|
||||
msg="The config file at 'invalid_keys.yaml' had unknown keys ('another_invalid_key', 'invalid_key')",
|
||||
):
|
||||
execute_subprocess_async(
|
||||
self.base_cmd
|
||||
+ ["--config_file", str(self.test_config_path / "invalid_keys.yaml"), self.test_file_path],
|
||||
env=os.environ.copy(),
|
||||
)
|
||||
|
||||
def test_accelerate_test(self):
|
||||
execute_subprocess_async(["accelerate", "test"], env=os.environ.copy())
|
||||
@ -211,3 +230,137 @@ class TpuConfigTester(unittest.TestCase):
|
||||
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all',
|
||||
output,
|
||||
)
|
||||
|
||||
|
||||
class ModelEstimatorTester(unittest.TestCase):
|
||||
"""
|
||||
Test case for checking the output of `accelerate estimate-memory` is correct.
|
||||
|
||||
- Uses `estimate_command` when trying to catch raised errors
|
||||
- Uses `gather_data` when just verifying the calculations are correct
|
||||
"""
|
||||
|
||||
parser = estimate_command_parser()
|
||||
|
||||
def test_invalid_model_name(self):
|
||||
with self.assertRaises(
|
||||
RepositoryNotFoundError, msg="Repo for model `somebrokenname` does not exist on the Hub"
|
||||
):
|
||||
args = self.parser.parse_args(["somebrokenname"])
|
||||
estimate_command(args)
|
||||
|
||||
@require_timm
|
||||
def test_invalid_model_name_timm(self):
|
||||
with self.assertRaises(RuntimeError, msg="Tried to load `muellerzr/dummy` with `timm` but"):
|
||||
args = self.parser.parse_args(["muellerzr/dummy", "--library_name", "timm"])
|
||||
estimate_command(args)
|
||||
|
||||
@require_transformers
|
||||
def test_invalid_model_name_transformers(self):
|
||||
with self.assertRaises(RuntimeError, msg="Tried to load `muellerzr/dummy` with `transformers` but"):
|
||||
args = self.parser.parse_args(["muellerzr/dummy", "--library_name", "transformers"])
|
||||
estimate_command(args)
|
||||
|
||||
def test_no_metadata(self):
|
||||
with self.assertRaises(
|
||||
ValueError, msg="Model `muellerzr/dummy` does not have any library metadata on the Hub"
|
||||
):
|
||||
args = self.parser.parse_args(["muellerzr/dummy"])
|
||||
estimate_command(args)
|
||||
|
||||
def test_gated(self):
|
||||
with self.assertRaises(GatedRepoError, msg="Repo for model `meta-llama/Llama-2-7b` is gated"):
|
||||
args = self.parser.parse_args(["meta-llama/Llama-2-7b"])
|
||||
with patch_environment(hf_hub_disable_implicit_token="1"):
|
||||
estimate_command(args)
|
||||
|
||||
@require_transformers
|
||||
def test_remote_code(self):
|
||||
# Also tests that custom `Auto` classes work
|
||||
args = self.parser.parse_args(["hf-internal-testing/test_dynamic_model"])
|
||||
with self.assertRaises(ValueError, msg="--trust_remote_code"):
|
||||
gather_data(args)
|
||||
|
||||
# Verify it works with the flag
|
||||
args = self.parser.parse_args(["hf-internal-testing/test_dynamic_model", "--trust_remote_code"])
|
||||
gather_data(args)
|
||||
|
||||
@require_transformers
|
||||
def test_explicit_dtypes(self):
|
||||
args = self.parser.parse_args(["bert-base-cased", "--dtypes", "float32", "float16"])
|
||||
output = gather_data(args)
|
||||
# The largest layer and total size of the model in bytes
|
||||
largest_layer, total_size = 89075712, 433249280
|
||||
# Check that full precision -> int4 is calculating correctly
|
||||
self.assertEqual(len(output), 2, f"Output was missing a precision, expected 2 but received {len(output)}")
|
||||
|
||||
for i, factor in enumerate([1, 2]):
|
||||
precision = 32 // factor
|
||||
precision_str = f"float{precision}"
|
||||
largest_layer_estimate = largest_layer / factor
|
||||
total_size_estimate = total_size / factor
|
||||
total_training_size_estimate = total_size_estimate * 4
|
||||
|
||||
self.assertEqual(precision_str, output[i][0], f"Output is missing precision `{precision_str}`")
|
||||
self.assertEqual(
|
||||
largest_layer_estimate,
|
||||
output[i][1],
|
||||
f"Calculation for largest layer size in `{precision_str}` is incorrect.",
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
total_size_estimate,
|
||||
output[i][2],
|
||||
msg=f"Calculation for total size in `{precision_str}` is incorrect.",
|
||||
)
|
||||
self.assertEqual(
|
||||
total_training_size_estimate,
|
||||
output[i][3],
|
||||
msg=f"Calculation for total training size in `{precision_str}` is incorrect.",
|
||||
)
|
||||
|
||||
@require_transformers
|
||||
def test_transformers_model(self):
|
||||
args = self.parser.parse_args(["bert-base-cased", "--dtypes", "float32"])
|
||||
output = gather_data(args)
|
||||
# The largest layer and total size of the model in bytes
|
||||
largest_layer, total_size = 89075712, 433249280
|
||||
self.assertEqual(
|
||||
largest_layer,
|
||||
output[0][1],
|
||||
f"Calculation for largest layer size in `fp32` is incorrect, expected {largest_layer} but received {output[0][1]}",
|
||||
)
|
||||
self.assertEqual(
|
||||
total_size,
|
||||
output[0][2],
|
||||
f"Calculation for total size in `fp32` is incorrect, expected {total_size} but received {output[0][2]}",
|
||||
)
|
||||
|
||||
@require_transformers
|
||||
def test_no_split_modules(self):
|
||||
# idefics-80b-instruct has ["IdeficsDecoderLayer", "IdeficsGatedCrossAttentionLayer"]
|
||||
args = self.parser.parse_args(["HuggingFaceM4/idefics-80b-instruct", "--dtypes", "float32"])
|
||||
output = gather_data(args)
|
||||
# without factoring in `no_split` modules, the largest layer is 721420288 bytes
|
||||
self.assertNotEqual(
|
||||
output[0][1], 721420288, "Largest layer calculation incorrect, did not factor in `no_split` modules."
|
||||
)
|
||||
# the real answer is 3240165632 bytes
|
||||
self.assertEqual(output[0][1], 3240165632)
|
||||
|
||||
@require_timm
|
||||
def test_timm_model(self):
|
||||
args = self.parser.parse_args(["timm/resnet50.a1_in1k", "--library_name", "timm"])
|
||||
output = gather_data(args)
|
||||
# The largest layer and total size of the model in bytes
|
||||
largest_layer, total_size = 9437184, 102441032
|
||||
self.assertEqual(
|
||||
largest_layer,
|
||||
output[0][1],
|
||||
f"Calculation for largest layer size in `fp32` is incorrect, expected {largest_layer} but received {output[0][1]}",
|
||||
)
|
||||
self.assertEqual(
|
||||
total_size,
|
||||
output[0][2],
|
||||
f"Calculation for total size in `fp32` is incorrect, expected {total_size} but received {output[0][2]}",
|
||||
)
|
||||
|
||||
15
tests/test_configs/invalid_keys.yaml
Normal file
15
tests/test_configs/invalid_keys.yaml
Normal file
@ -0,0 +1,15 @@
|
||||
compute_environment: LOCAL_MACHINE
|
||||
deepspeed_config: {}
|
||||
distributed_type: 'NO'
|
||||
downcast_bf16: 'no'
|
||||
fsdp_config: {}
|
||||
machine_rank: 0
|
||||
main_process_ip: null
|
||||
main_process_port: null
|
||||
main_training_function: main
|
||||
mixed_precision: 'no'
|
||||
num_machines: 1
|
||||
num_processes: 1
|
||||
use_cpu: false
|
||||
invalid_key: "invalid_value"
|
||||
another_invalid_key: "another_invalid_value"
|
||||
@ -41,6 +41,7 @@ EXCLUDE_EXAMPLES = [
|
||||
"fsdp_with_peak_mem_tracking.py",
|
||||
"deepspeed_with_config_support.py",
|
||||
"megatron_lm_gpt_pretraining.py",
|
||||
"early_stopping.py",
|
||||
]
|
||||
|
||||
|
||||
@ -222,3 +223,7 @@ class FeatureExamplesTests(TempDirTestCase):
|
||||
def test_local_sgd(self):
|
||||
testargs = ["examples/by_feature/local_sgd.py"]
|
||||
run_command(self._launch_args + testargs)
|
||||
|
||||
def test_early_stopping(self):
|
||||
testargs = ["examples/by_feature/early_stopping.py"]
|
||||
run_command(self._launch_args + testargs)
|
||||
|
||||
@ -92,11 +92,11 @@ class KwargsHandlerTester(unittest.TestCase):
|
||||
prefix = "ACCELERATE_DYNAMO_"
|
||||
# nvfuser's dynamo backend name is "nvprims_nvfuser"
|
||||
# use "nvfuser" here to cause exception if this test causes os.environ changed permanently
|
||||
os.environ[prefix + "BACKEND"] = "nvfuser"
|
||||
os.environ[prefix + "BACKEND"] = "aot_ts_nvfuser"
|
||||
os.environ[prefix + "MODE"] = "reduce-overhead"
|
||||
|
||||
dynamo_plugin_kwargs = TorchDynamoPlugin().to_kwargs()
|
||||
self.assertEqual(dynamo_plugin_kwargs, {"backend": "nvfuser", "mode": "reduce-overhead"})
|
||||
self.assertEqual(dynamo_plugin_kwargs, {"backend": "aot_ts_nvfuser", "mode": "reduce-overhead"})
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@ -547,6 +547,10 @@ class ModelingUtilsTester(unittest.TestCase):
|
||||
max_memory = get_balanced_memory(model, max_memory={0: 0, 1: 300, 2: 300})
|
||||
self.assertDictEqual({0: 0, 1: 215, 2: 300}, max_memory)
|
||||
|
||||
# If we set a device to 0, it's not counted.
|
||||
max_memory = get_balanced_memory(model, max_memory={0: 0, "cpu": 100})
|
||||
self.assertDictEqual({0: 0, "cpu": 100}, max_memory)
|
||||
|
||||
@require_cuda
|
||||
@require_safetensors
|
||||
def test_load_state_dict(self):
|
||||
|
||||
@ -21,7 +21,7 @@ import torch
|
||||
import accelerate
|
||||
from accelerate import Accelerator
|
||||
from accelerate.big_modeling import dispatch_model
|
||||
from accelerate.test_utils import assert_exception, execute_subprocess_async, require_multi_gpu
|
||||
from accelerate.test_utils import assert_exception, execute_subprocess_async, require_multi_gpu, skip
|
||||
from accelerate.utils import patch_environment
|
||||
|
||||
|
||||
@ -66,6 +66,8 @@ class MultiGPUTester(unittest.TestCase):
|
||||
with patch_environment(omp_num_threads=1, cuda_visible_devices="0,1"):
|
||||
execute_subprocess_async(cmd, env=os.environ.copy())
|
||||
|
||||
# Need to see why this test raises forking issues when ran as a suite
|
||||
@skip
|
||||
@require_multi_gpu
|
||||
def test_notebook_launcher(self):
|
||||
"""
|
||||
|
||||
@ -19,6 +19,8 @@ import random
|
||||
import shutil
|
||||
import tempfile
|
||||
import unittest
|
||||
import uuid
|
||||
from contextlib import contextmanager
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
@ -201,6 +203,71 @@ class CheckpointTest(unittest.TestCase):
|
||||
self.assertEqual(opt_state1, opt_state3)
|
||||
self.assertEqual(ground_truth_rands, test_rands)
|
||||
|
||||
def test_can_resume_training_checkpoints_relative_path(self):
|
||||
# See #1983
|
||||
# This test is like test_can_resume_training but uses a relative path for the checkpoint and automatically
|
||||
# infers the checkpoint path when loading.
|
||||
@contextmanager
|
||||
def temporary_relative_directory():
|
||||
# This is equivalent to tempfile.TemporaryDirectory() except that it returns a relative path
|
||||
rand_dir = f"test_path_{uuid.uuid4()}"
|
||||
os.mkdir(rand_dir)
|
||||
try:
|
||||
yield rand_dir
|
||||
finally:
|
||||
shutil.rmtree(rand_dir)
|
||||
|
||||
with temporary_relative_directory() as tmpdir:
|
||||
set_seed(42)
|
||||
model = DummyModel()
|
||||
optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3)
|
||||
train_dataloader, valid_dataloader = dummy_dataloaders()
|
||||
project_config = ProjectConfiguration(automatic_checkpoint_naming=True)
|
||||
|
||||
# Train baseline
|
||||
accelerator = Accelerator(project_dir=tmpdir, project_config=project_config)
|
||||
model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare(
|
||||
model, optimizer, train_dataloader, valid_dataloader
|
||||
)
|
||||
# Save initial
|
||||
accelerator.save_state()
|
||||
(a, b) = model.a.item(), model.b.item()
|
||||
opt_state = optimizer.state_dict()
|
||||
ground_truth_rands = train(3, model, train_dataloader, optimizer, accelerator)
|
||||
(a1, b1) = model.a.item(), model.b.item()
|
||||
opt_state1 = optimizer.state_dict()
|
||||
|
||||
# Train partially
|
||||
set_seed(42)
|
||||
model = DummyModel()
|
||||
optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3)
|
||||
train_dataloader, valid_dataloader = dummy_dataloaders()
|
||||
project_config = ProjectConfiguration(iteration=1, automatic_checkpoint_naming=True)
|
||||
accelerator = Accelerator(project_dir=tmpdir, project_config=project_config)
|
||||
model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare(
|
||||
model, optimizer, train_dataloader, valid_dataloader
|
||||
)
|
||||
accelerator.load_state() # <= infer the directory automatically
|
||||
(a2, b2) = model.a.item(), model.b.item()
|
||||
opt_state2 = optimizer.state_dict()
|
||||
self.assertEqual(a, a2)
|
||||
self.assertEqual(b, b2)
|
||||
self.assertEqual(opt_state, opt_state2)
|
||||
|
||||
test_rands = train(2, model, train_dataloader, optimizer, accelerator)
|
||||
# Save everything
|
||||
accelerator.save_state()
|
||||
|
||||
# Load everything back in and make sure all states work
|
||||
accelerator.load_state(os.path.join(tmpdir, "checkpoints", "checkpoint_1"))
|
||||
test_rands += train(1, model, train_dataloader, optimizer, accelerator)
|
||||
(a3, b3) = model.a.item(), model.b.item()
|
||||
opt_state3 = optimizer.state_dict()
|
||||
self.assertEqual(a1, a3)
|
||||
self.assertEqual(b1, b3)
|
||||
self.assertEqual(opt_state1, opt_state3)
|
||||
self.assertEqual(ground_truth_rands, test_rands)
|
||||
|
||||
def test_invalid_registration(self):
|
||||
t = torch.tensor([1, 2, 3])
|
||||
t1 = torch.tensor([2, 3, 4])
|
||||
|
||||
@ -15,13 +15,17 @@
|
||||
import os
|
||||
import pickle
|
||||
import unittest
|
||||
import warnings
|
||||
from collections import UserDict, namedtuple
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
import torch
|
||||
|
||||
from accelerate.state import PartialState
|
||||
from accelerate.test_utils.testing import require_cuda, require_torch_min_version
|
||||
from accelerate.test_utils.training import RegressionModel
|
||||
from accelerate.utils import (
|
||||
check_os_kernel,
|
||||
convert_outputs_to_fp32,
|
||||
extract_model_from_parallel,
|
||||
find_device,
|
||||
@ -36,6 +40,10 @@ ExampleNamedTuple = namedtuple("ExampleNamedTuple", "a b c")
|
||||
|
||||
|
||||
class UtilsTester(unittest.TestCase):
|
||||
def setUp(self):
|
||||
# logging requires initialized state
|
||||
PartialState()
|
||||
|
||||
def test_send_to_device(self):
|
||||
tensor = torch.randn(5, 2)
|
||||
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
||||
@ -103,6 +111,25 @@ class UtilsTester(unittest.TestCase):
|
||||
self.assertNotIn("AA", os.environ)
|
||||
self.assertNotIn("BB", os.environ)
|
||||
|
||||
def test_patch_environment_key_exists(self):
|
||||
# check that patch_environment correctly restores pre-existing env vars
|
||||
with patch_environment(aa=1, BB=2):
|
||||
self.assertEqual(os.environ.get("AA"), "1")
|
||||
self.assertEqual(os.environ.get("BB"), "2")
|
||||
|
||||
with patch_environment(Aa=10, bb="20", cC=30):
|
||||
self.assertEqual(os.environ.get("AA"), "10")
|
||||
self.assertEqual(os.environ.get("BB"), "20")
|
||||
self.assertEqual(os.environ.get("CC"), "30")
|
||||
|
||||
self.assertEqual(os.environ.get("AA"), "1")
|
||||
self.assertEqual(os.environ.get("BB"), "2")
|
||||
self.assertNotIn("CC", os.environ)
|
||||
|
||||
self.assertNotIn("AA", os.environ)
|
||||
self.assertNotIn("BB", os.environ)
|
||||
self.assertNotIn("CC", os.environ)
|
||||
|
||||
def test_can_undo_convert_outputs(self):
|
||||
model = RegressionModel()
|
||||
model._original_forward = model.forward
|
||||
@ -154,3 +181,27 @@ class UtilsTester(unittest.TestCase):
|
||||
self.assertEqual(find_device([1, "a", torch.tensor([1, 2, 3])]), torch.device("cpu"))
|
||||
self.assertEqual(find_device({"a": 1, "b": torch.tensor([1, 2, 3])}), torch.device("cpu"))
|
||||
self.assertIsNone(find_device([1, "a"]))
|
||||
|
||||
def test_check_os_kernel_no_warning_when_release_gt_min(self):
|
||||
# min version is 5.5
|
||||
with patch("platform.uname", return_value=Mock(release="5.15.0-35-generic", system="Linux")):
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
check_os_kernel()
|
||||
self.assertEqual(len(w), 0)
|
||||
|
||||
def test_check_os_kernel_no_warning_when_not_linux(self):
|
||||
# system must be Linux
|
||||
with patch("platform.uname", return_value=Mock(release="5.4.0-35-generic", system="Darwin")):
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
check_os_kernel()
|
||||
self.assertEqual(len(w), 0)
|
||||
|
||||
def test_check_os_kernel_warning_when_release_lt_min(self):
|
||||
# min version is 5.5
|
||||
with patch("platform.uname", return_value=Mock(release="5.4.0-35-generic", system="Linux")):
|
||||
with self.assertLogs() as ctx:
|
||||
check_os_kernel()
|
||||
self.assertEqual(len(ctx.records), 1)
|
||||
self.assertEqual(ctx.records[0].levelname, "WARNING")
|
||||
self.assertIn("5.4.0", ctx.records[0].msg)
|
||||
self.assertIn("5.5.0", ctx.records[0].msg)
|
||||
|
||||
@ -17,6 +17,7 @@ https://github.com/allenai/allennlp.
|
||||
"""
|
||||
import os
|
||||
from datetime import datetime as dt
|
||||
from datetime import timezone
|
||||
|
||||
from github import Github
|
||||
|
||||
@ -36,7 +37,7 @@ def main():
|
||||
for issue in open_issues:
|
||||
comments = sorted([comment for comment in issue.get_comments()], key=lambda i: i.created_at, reverse=True)
|
||||
last_comment = comments[0] if len(comments) > 0 else None
|
||||
current_time = dt.utcnow()
|
||||
current_time = dt.now(timezone.utc)
|
||||
days_since_updated = (current_time - issue.updated_at).days
|
||||
days_since_creation = (current_time - issue.created_at).days
|
||||
if (
|
||||
|
||||
Reference in New Issue
Block a user