Compare commits

...

48 Commits

Author SHA1 Message Date
48d96319e0 Release: v0.23.0 2023-09-14 14:35:14 -04:00
bbcdbbaffc Remove checkpoints only on main process (#1974)
* Remove checkpoints only on main process

shutil.rmtree might throw errors if called on multiply processes. Make a call only on main process

* Apply style
2023-09-14 08:31:55 -04:00
ce53708e0e fix for xpu (#1972) 2023-09-14 08:18:20 -04:00
53209ce6d8 update FSDP and DeepSpeed docs (#1973) 2023-09-14 08:18:11 -04:00
bd083ae1bf Add force_hooks to dispatch_model (#1969)
* Add force_hooks to dispatch_model

* Minor documentation rephrasing
2023-09-14 07:57:19 -04:00
e5452a618d fix torch compile with FSDP (#1919)
* fix torch compile with FSDP

* Update accelerator.py

* fixes

* resolve comments

* fix bug

* address comments

* addressing comments

* address comments
2023-09-14 13:19:59 +05:30
40a73e0ae0 Introduce breakpoint API (#1940)
* early stopping

* Fix tests

* Works on multi-gpu, uncomment

* Rm reset

* Check for >=1

* equal

* Trigger

* Fix test

* Update docs/source/concept_guides/deferring_execution.md

Co-authored-by: Benjamin Bossan <BenjaminBossan@users.noreply.github.com>

* Explicit example loop

* Set to zero, not None

* rename test

* Check again to ensure it's been reset

---------

Co-authored-by: Benjamin Bossan <BenjaminBossan@users.noreply.github.com>
2023-09-13 12:42:38 -04:00
937e08ce75 add bf16 mixed precision support for NPU (#1949)
* add bf16 mixed precision support for NPU

* Explicitly register the NPU backend to PyTorch via `import torch_npu`

---------

Co-authored-by: statelesshz <jihuazhong1@huawei.com>
2023-09-13 09:56:24 -04:00
5d558f21e2 [WIP] Implementing gather_for_metrics with dedup for non tensor objects (#1937)
* [feat] implementing gather_for_metrics for objects

* [lint] make style result

* [docs] improve fn docs gather for metrics

Co-authored-by: Zach Mueller <muellerzr@gmail.com>

* [docs] update args description gather for metrics

Co-authored-by: Zach Mueller <muellerzr@gmail.com>

* [refactor] gather for metrics for non tensor obj

Co-authored-by: Zach Mueller <muellerzr@gmail.com>

* [fix] renaming tensor to data (was not defined and it is not just a tensor)

* [fix] else state

* [test] gather for metrics with non tensor objects

* [lint] make style result

* Update src/accelerate/accelerator.py

Co-authored-by: Zach Mueller <muellerzr@gmail.com>

* Update src/accelerate/accelerator.py

Co-authored-by: Zach Mueller <muellerzr@gmail.com>

* [test] removing useless assertion

Co-authored-by: Zach Mueller <muellerzr@gmail.com>

* [test] add running on main

* [lint] style autoformat

---------

Co-authored-by: Lorenzobattistela <lorenzobattistela@gmail.com>
Co-authored-by: Zach Mueller <muellerzr@gmail.com>
2023-09-12 12:17:43 -04:00
d9b5ce60b3 Rm strtobool (#1964)
* Rm strtobool

* Reorganize

* c/p

* Signature
2023-09-12 11:21:09 -04:00
61a87ab946 finish all todos (#1957) 2023-09-12 17:13:00 +02:00
5dec654aae Better guards for slow imports (#1963)
* Start

* Deepspeed

* Clean
2023-09-12 10:54:19 -04:00
b2a950205e FIX: patch_environment restores pre-existing environment variables when finished (#1960)
Resolves #1832

This fixes a bug in patch_environment that currently leads to
pre-existing items being deleted completely from the environment
variables, when they were temporarily modified by patch_environment,
once the context has finished. Now, the env vars are restored to their
previous values.
2023-09-12 15:39:54 +02:00
ca7b853abc fix safetensor saving (#1954)
* fix safetensor saving

* fix test

* fix

* better save

* modify as keyword arg
2023-09-12 09:14:41 -04:00
6832aa51a6 move tensorflow dep (#1959) 2023-09-12 06:19:26 -04:00
4a1d5b1fb6 Fix docs (#1951)
Signed-off-by: Peng Gao <peng.gao.dut@gmail.com>
2023-09-11 10:40:14 -04:00
82369c8314 fix the fsdp docs (#1947) 2023-09-11 15:30:09 +05:30
cdb001ca5f Enhance multi-node notebook launching (#1913)
* Introduce new arguments: master_addr, node_rank, and num_nodes.
  Relocate these arguments to the end of the notebook_launcher
  function for compatibility.

* Set defaults for NPROC and NODE_RANK environment variables in the
  PrepareForLaunch function to ensure compatibility.

* Thoroughly document the process and usage guidelines for
  multi-node launching.
2023-09-08 07:53:21 -04:00
c72e22419b Bring back pypi to runners (#1939)
* Bring back pypi

* Flipflop
2023-09-08 07:51:17 -04:00
c872c3086f clean num devices (#1936) 2023-09-07 10:14:52 -04:00
cec5ae8e4d Check for invalid keys (#1935)
* Check for invalid keys

* Revert else

* Better error

* Weird space
2023-09-06 12:22:22 -04:00
cd570b2e2a reduce gradient first for XLA when unscaling the gradients in mixed precision training with AMP. (#1926)
* reduce gradient first for XLA when unscaling the gradients in mixed
precision training with AMP.

* Apply suggestions from code review

Co-authored-by: Zach Mueller <muellerzr@gmail.com>

* update acceleartor.reduce and accelerate.utils.operations.reduce

---------

Co-authored-by: Zach Mueller <muellerzr@gmail.com>
2023-09-06 11:00:24 -04:00
727d624322 Add support for deepspeed optimizer and custom scheduler (#1909)
* support for deepspeed optimizer and custom scheduler

* don't throw the error

* Add tests

* fix the tests

* fix the code quality

* Update tests/deepspeed/test_deepspeed.py

Co-authored-by: Benjamin Bossan <BenjaminBossan@users.noreply.github.com>

* fix the docstrings

---------

Co-authored-by: Benjamin Bossan <BenjaminBossan@users.noreply.github.com>
2023-09-05 22:30:46 +05:30
afed2f75f8 Expose auto in dataclass (#1914)
* Auto

* Update str
2023-09-05 09:23:10 -04:00
739b135f83 More CI fun - run all test parts always (#1916)
* Run always

* Populate
2023-08-31 12:32:28 -04:00
4a9dd1cd82 support logging with mlflow in case of mlflow-skinny installed (#1874)
* - support a case of mlflow-skinny installed when log_with is set to mlflow.

* code beautification.
2023-08-31 12:11:02 -04:00
feab09908d improve help info when run accelerate config on npu (#1895) 2023-08-31 12:02:59 -04:00
e0baaa8df0 fix: add debug argument to sagemaker configuration (#1904)
* fix: add debug argument to sagemaker configuration #1903

* ignore:  address quality style

Signed-off-by: maximegmd <672982+maximegmd@users.noreply.github.com>

* tweak: ask if user wants debug information in SageMaker distributed operations

---------

Signed-off-by: maximegmd <672982+maximegmd@users.noreply.github.com>
2023-08-31 11:52:46 -04:00
1b998f1695 Use hosted CI runners for building docker images (#1915)
* New technique

* needs

* explicit all

* Volume prune not going

* Skip volume

* versions

* Avoid checkout perhaps?

* Working dir

* Don't include dot-slash?

* Accelerate prefix?

* Working directory?

* Context?

* other workingdir

* Faster iteration

* Right tag

* Full

* Release

* GPU
2023-08-31 11:28:48 -04:00
7befe580c2 Fix docker images (#1910)
* With driver

* Remove deps

* No bitsandbytes

* Try with raw push

* We can keep old docker images

* Also include release

* Skorch uses master

* Right tag
2023-08-31 07:14:38 -04:00
cd3d3a37f9 Skip pypi transformers until release (#1911)
* Skip release

* TODO comment
2023-08-31 07:14:06 -04:00
81fffe51fd deepspeed grad_acc_steps fixes (#1901)
* deepspeed grad_acc_steps fixes

* fix tests
2023-08-31 16:33:34 +05:30
0b5ac0253e Add PR template (#1906)
* Add PR template

* Sourab is not a fashion company
2023-08-30 03:19:15 -04:00
a16b843a1b deepspeed for ccl xpu (#1827) 2023-08-29 17:36:29 +05:30
bc86a9379f Solve at least one failing test (#1898) 2023-08-29 10:57:56 +05:30
87a096f95e Add FSDP activation checkpointing feature (#1891)
* add FSDP activation checkpointing feature

* fix formatting issue

* fix code formatting issue
2023-08-29 10:56:08 +05:30
44adf1e14f Fix nb launcher test (#1899)
* Try with raw subprocess

* Skip test for now

* Clean
2023-08-28 14:44:18 -04:00
ce870e1ce1 Final nits on model util (#1896)
* Nits

* Annoying markdown tables

* Try with one

* I give, try raw md

* Moot

* W/o code tick

* Markdown
2023-08-28 09:47:44 -04:00
1ace672d3e Update dataclasses.py (#1894) 2023-08-28 17:40:14 +05:30
e2ae254008 Add hub as core dep (#1885)
* Add hub as dep

* Missing refs
2023-08-25 10:05:58 -04:00
0fa291e707 Add doc on model memory usage (#1887)
* Doc

* Note on meta

* Phrase

* Apply suggestions from code review

Co-authored-by: Benjamin Bossan <BenjaminBossan@users.noreply.github.com>

* Clarity nit

* Nits

---------

Co-authored-by: Benjamin Bossan <BenjaminBossan@users.noreply.github.com>
2023-08-25 10:03:39 -04:00
ba6f11ec3e Enable a token to be used (#1886)
* Enable based on passing the token

* Doc more
2023-08-24 15:43:37 -04:00
430ee9df6b Update with new url (#1884) 2023-08-24 12:52:09 -04:00
409a9df0a4 Introduce model memory estimator (#1876)
* Estimator

* Right err

* Fixup tests

* trust remote code

* Print output for debugging purposes

* trust_remote_code

* Address some comments

* change doc to req arg

* Properly check for _no_split_modules in transformer models

* Note on transformer models

* Check/handle pentabytes

Co-authored-by: Benjamin Bossan <BenjaminBossan@users.noreply.github.com>

* Tests are passing locally again, better handle for no_split

* Adjust setup?

* Let's see if the cleaner version works

* Refactor and clean up for testing

* Specify in comments

* Better error handling

* A million tests later

* More tests + err handling

* Require hub

* More with remote code

* Clean up

* Add a test for no_split

* Apply suggestions from code review

Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>

* Docstring

* Address some comments

* rm einops

* Let it err out

* Adjust errs

* Tests

* Reduce test repeats

* Clean up borders

* Tip on 20%

---------

Co-authored-by: Benjamin Bossan <BenjaminBossan@users.noreply.github.com>
Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>
2023-08-24 12:12:01 -04:00
acad5bae5c Enable power users to bypass device_map="auto" training block (#1881)
* Enable TP greedy env var

* Right env setting

* Use true, not false

* Design nit

* ACCELERATE_BYPASS_DEVICE_MAP
2023-08-24 10:27:59 -04:00
81b19c4094 fix detach_hook (#1880) 2023-08-23 15:15:27 -04:00
3e97a9172b Update release instructions (#1877)
* Update release instructions

* Update setup.py

Co-authored-by: Lysandre Debut <lysandre.debut@reseau.eseo.fr>

---------

Co-authored-by: Lysandre Debut <lysandre.debut@reseau.eseo.fr>
2023-08-23 16:04:09 +02:00
812719644d v0.23.0.dev0 2023-08-23 02:25:56 -04:00
50 changed files with 1659 additions and 203 deletions

47
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View File

@ -0,0 +1,47 @@
# What does this PR do?
<!--
Congratulations! You've made it this far! You're not quite done yet though.
Once merged, your PR is going to appear in the release notes with the title you set, so make sure it's a great title that fully reflects the extent of your awesome contribution.
Then, please replace this with a description of the change and which issue is fixed (if applicable). Please also include relevant motivation and context. List any dependencies (if any) that are required for this change.
Once you're done, someone will review your PR shortly (see the section "Who can review?" below to tag some potential reviewers). They may suggest changes to make the code even better. If no one reviewed your PR after a week has passed, don't hesitate to post a new comment @-mentioning the same persons---sometimes notifications get lost.
-->
<!-- Remove if not applicable -->
Fixes # (issue)
## Before submitting
- [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case).
- [ ] Did you read the [contributor guideline](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md#submitting-a-pull-request-pr),
Pull Request section?
- [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link
to it if that's the case.
- [ ] Did you make sure to update the documentation with your changes? Here are the
[documentation guidelines](https://github.com/huggingface/accelerate/tree/main/docs), and
[here are tips on formatting docstrings](https://github.com/huggingface/accelerate/tree/main/docs#writing-documentation---specification).
- [ ] Did you write any new necessary tests?
## Who can review?
Anyone in the community is free to review the PR once the tests have passed. Feel free to tag
members/contributors who may be interested in your PR.
<!-- Your PR will be replied to more quickly if you can figure out the right person to tag with @
If you know how to use git blame, that is the easiest way, otherwise, here is a rough guide of **who to tag**.
- Big modeling: @SunMarc
- Fully-Sharded Data Parallism: @pacman100
- DeepSpeed: @pacman100
- Command Line Interface: @muellerzr
- Documentation: @muellerzr
- Core parts of the library: @muellerzr @BenjaminBossan
- Maintained examples: @muellerzr or @pacman100
-->

View File

@ -21,44 +21,40 @@ jobs:
version-cpu:
name: "Latest Accelerate CPU [version]"
runs-on: ubuntu-latest
runs-on: [self-hosted, docker-gpu, multi-gpu]
needs: get-version
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Check out code
uses: actions/checkout@v2
uses: docker/setup-buildx-action@v2
- name: Login to DockerHub
uses: docker/login-action@v1
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_PASSWORD }}
- name: Build and Push CPU
uses: docker/build-push-action@v2
uses: docker/build-push-action@v4
with:
context: ./docker/accelerate-cpu
file: docker/accelerate-cpu/Dockerfile
push: true
tags: huggingface/accelerate-cpu:${{needs.get-version.outputs.version}}
version-cuda:
name: "Latest Accelerate GPU [version]"
runs-on: ubuntu-latest
runs-on: [self-hosted, docker-gpu, multi-gpu]
needs: get-version
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Check out code
uses: actions/checkout@v2
uses: docker/setup-buildx-action@v2
- name: Login to DockerHub
uses: docker/login-action@v1
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_PASSWORD }}
- name: Build and Push GPU
uses: docker/build-push-action@v2
uses: docker/build-push-action@v4
with:
context: ./docker/accelerate-gpu
file: docker/accelerate-gpu/Dockerfile
push: true
tags: huggingface/accelerate-gpu:${{needs.get-version.outputs.version}}
tags: huggingface/accelerate-gpu:${{needs.get-version.outputs.version}}

View File

@ -11,44 +11,50 @@ concurrency:
cancel-in-progress: false
jobs:
clean-storage:
name: "Clean docker image storage"
runs-on: [self-hosted, docker-gpu, multi-gpu]
steps:
- name: Clean storage
run: |
docker image prune --all -f --filter "until=48h"
docker system prune --all -f --filter "until=48h"
latest-cpu:
name: "Latest Accelerate CPU [dev]"
runs-on: ubuntu-latest
runs-on: [self-hosted, docker-gpu, multi-gpu]
needs: clean-storage
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Check out code
uses: actions/checkout@v2
uses: docker/setup-buildx-action@v2
- name: Login to DockerHub
uses: docker/login-action@v1
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_PASSWORD }}
- name: Build and Push CPU
uses: docker/build-push-action@v2
uses: docker/build-push-action@v4
with:
context: ./docker/accelerate-cpu
file: docker/accelerate-cpu/Dockerfile
push: true
tags: huggingface/accelerate-cpu
latest-cuda:
name: "Latest Accelerate GPU [dev]"
runs-on: ubuntu-latest
runs-on: [self-hosted, docker-gpu, multi-gpu]
needs: clean-storage
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Check out code
uses: actions/checkout@v2
uses: docker/setup-buildx-action@v2
- name: Login to DockerHub
uses: docker/login-action@v1
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_PASSWORD }}
- name: Build and Push GPU
uses: docker/build-push-action@v2
uses: docker/build-push-action@v4
with:
context: ./docker/accelerate-gpu
file: docker/accelerate-gpu/Dockerfile
push: true
tags: huggingface/accelerate-gpu
tags: huggingface/accelerate-gpu

View File

@ -39,6 +39,7 @@ jobs:
make test
- name: Run examples on GPUs
if: always()
run: |
source activate accelerate
pip uninstall comet_ml -y
@ -79,11 +80,13 @@ jobs:
make test_cli
- name: Run Integration tests on GPUs
if: always()
run: |
source activate accelerate
make test_integrations
- name: Run examples on GPUs
if: always()
run: |
source activate accelerate
pip uninstall comet_ml -y

View File

@ -35,10 +35,12 @@ jobs:
make test_cli
- name: Run test on GPUs
if: always()
run: |
source activate accelerate
make test
- name: Run examples on GPUs
if: always()
run: |
source activate accelerate
pip uninstall comet_ml -y
@ -74,6 +76,7 @@ jobs:
make test
- name: Run examples on GPUs
if: always()
run: |
source activate accelerate
pip uninstall comet_ml -y

View File

@ -76,6 +76,7 @@ jobs:
env:
CUDA_VISIBLE_DEVICES: ${{ matrix.cuda_visible_devices }}
WANDB_DISABLED: true
if: always()
run: |
source activate accelerate;
pytest -sv tests/deepspeed
@ -106,7 +107,7 @@ jobs:
run: |
source activate accelerate
git config --global --add safe.directory '*'
git checkout main && git pull
git checkout master && git pull
if [[ ${{ matrix.skorch-version }} = pypi ]]; then
git checkout $(git describe --tags `git rev-list --tags --max-count=1`)
fi

View File

@ -23,6 +23,8 @@
title: Example Zoo
- local: usage_guides/big_modeling
title: How to perform inference on large models with small resources
- local: usage_guides/model_size_estimator
title: Knowing how big of a model you can fit into memory
- local: usage_guides/quantization
title: How to quantize model
- local: usage_guides/distributed_inference

View File

@ -401,6 +401,26 @@ args = ("fp16", 42, 64)
notebook_launcher(training_loop, args, num_processes=2)
```
In the case of running on multiple nodes, you need to set up a Jupyter session at each node and run the launching cell at the same time.
For an environment containing 2 nodes (computers) with 8 GPUs each and the main computer with an IP address of "172.31.43.8", it would look like so:
```python
notebook_launcher(training_loop, args, master_addr="172.31.43.8", node_rank=0, num_nodes=2, num_processes=8)
```
And in the second Jupyter session on the other machine:
<Tip>
Notice how the `node_rank` has changed
</Tip>
```python
notebook_launcher(training_loop, args, master_addr="172.31.43.8", node_rank=1, num_nodes=2, num_processes=8)
```
In the case of running on the TPU, it would look like so:
```python

View File

@ -108,3 +108,23 @@ with accelerator.main_process_first():
remove_columns=["idx", "sentence1", "sentence2"],
)
```
## Applying checks such as Early Stopping
To have a check that works with a flag set by a particular process, the `set_trigger` and `check_trigger` API should be used. Useful examples
for doing so can include situations such as using early stopping and monitoring the loss (as each loss slightly differs on each process).
Call [`Accelerator.set_trigger`] when your condition has been met, and [`Accelerator.check_trigger`] when checking if that condition has been met in any process:
```python
for (x,y) in data_loader:
logits = model(x)
loss = loss_func(logits, y)
# Assume `should_do_early_stopping` is a custom defined function that returns a conditional
if should_do_early_stopping(loss):
accelerator.set_trigger()
# Later in the training script when we need to check for the breakpoint
if accelerator.check_trigger():
break
```

View File

@ -228,6 +228,36 @@ The following arguments are only useful when training in SageMaker
* `--aws_access_key_id AWS_ACCESS_KEY_ID` (`str`) -- The AWS_ACCESS_KEY_ID used to launch the Amazon SageMaker training job
* `--aws_secret_access_key AWS_SECRET_ACCESS_KEY` (`str`) -- The AWS_SECRET_ACCESS_KEY used to launch the Amazon SageMaker training job
## accelerate estimate-memory
**Command**:
`accelerate estimate-memory` or `accelerate-estimate-memory` or `python -m accelerate.commands.estimate`
Estimates the total vRAM a particular model hosted on the Hub needs to be loaded in with an estimate for training. Requires that `huggingface_hub` be installed.
<Tip>
When performing inference, typically add ≤20% to the result as overall allocation [as referenced here](https://blog.eleuther.ai/transformer-math/). We will have more extensive estimations in the future that will automatically be included in the calculation.
</Tip>
**Usage**:
```bash
accelerate estimate-memory {MODEL_NAME} --library_name {LIBRARY_NAME} --dtypes {dtype_1} {dtype_2} ...
```
**Required Arguments**:
* `MODEL_NAME` (`str`)-- The model name on the Hugging Face Hub
**Optional Arguments**:
* `--library_name {timm,transformers}` (`str`) -- The library the model has an integration with, such as `transformers`, needed only if this information is not stored on the Hub
* `--dtypes {float32,float16,int8,int4}` (`[{float32,float16,int8,int4} ...]`) -- The dtypes to use for the model, must be one (or many) of `float32`, `float16`, `int8`, and `int4`
* `--trust_remote_code` (`bool`) -- Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be passed for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.
## accelerate tpu-config
`accelerate tpu-config`

View File

@ -585,8 +585,10 @@ Mixed precision type: fp16
ds_config: {'bf16': {'enabled': False}, 'zero_optimization': {'stage': 3, 'stage3_gather_16bit_weights_on_model_save': True, 'offload_optimizer': {'device': 'nvme'}, 'offload_param': {'device': 'cpu'}}, 'gradient_clipping': 1.0, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 5, 'steps_per_print': inf, 'fp16': {'enabled': True, 'auto_cast': True}}
```
**Note**: Remaining `"auto"` values are handled in `accelerator.prepare()` call as explained in point 2 of
**Note**:
1. Remaining `"auto"` values are handled in `accelerator.prepare()` call as explained in point 2 of
`Important code changes when using DeepSpeed Config File`.
2. Only when `gradient_accumulation_steps` is `auto`, the value passed while creating `Accelerator` object via `Accelerator(gradient_accumulation_steps=k)` will be used. When using DeepSpeed Plugin, the value from it will be used and it will overwrite the value passed while creating Accelerator object.
## Saving and loading

View File

@ -37,14 +37,14 @@ for batch in dataloader:
<div class="block dark:hidden">
<iframe
src="https://muellerzr-accelerate-examples.hf.space?__theme=light"
src="https://hf-accelerate-accelerate-examples.hf.space?__theme=light"
width="850"
height="1600"
></iframe>
</div>
<div class="hidden dark:block">
<iframe
src="https://muellerzr-accelerate-examples.hf.space?__theme=dark"
src="https://hf-accelerate-accelerate-examples.hf.space?__theme=dark"
width="850"
height="1600"
></iframe>

View File

@ -49,7 +49,7 @@ fsdp_config:
fsdp_offload_params: false
fsdp_sharding_strategy: 1
fsdp_state_dict_type: FULL_STATE_DICT
fsdp_transformer_layer_cls_to_wrap: GPT2Block
fsdp_transformer_layer_cls_to_wrap: BertLayer
machine_rank: 0
main_process_ip: null
main_process_port: null
@ -67,19 +67,36 @@ accelerate launch examples/nlp_example.py
Currently, `Accelerate` supports the following config through the CLI:
```bash
`Sharding Strategy`: [1] FULL_SHARD (shards optimizer states, gradients and parameters), [2] SHARD_GRAD_OP (shards optimizer states and gradients), [3] NO_SHARD
`Sharding Strategy`: [1] FULL_SHARD (shards optimizer states, gradients and parameters), [2] SHARD_GRAD_OP (shards optimizer states and gradients), [3] NO_SHARD (DDP), [4] HYBRID_SHARD (shards optimizer states, gradients and parameters within each node while each node has full copy), [5] HYBRID_SHARD_ZERO2 (shards optimizer states and gradients within each node while each node has full copy)
`Offload Params`: Decides Whether to offload parameters and gradients to CPU
`Auto Wrap Policy`: [1] TRANSFORMER_BASED_WRAP, [2] SIZE_BASED_WRAP, [3] NO_WRAP [4] "HYBRID_SHARD" [5] "HYBRID_SHARD_ZERO2"
`Auto Wrap Policy`: [1] TRANSFORMER_BASED_WRAP, [2] SIZE_BASED_WRAP, [3] NO_WRAP
`Transformer Layer Class to Wrap`: When using `TRANSFORMER_BASED_WRAP`, user specifies comma-separated string of transformer layer class names (case-sensitive) to wrap ,e.g,
`BertLayer`, `GPTJBlock`, `T5Block`, `BertLayer,BertEmbeddings,BertSelfOutput`...
This is important because submodules that share weights (e.g., embedding layer) should not end up in different FSDP wrapped units.
Using this policy, wrapping happens for each block containing Multi-Head Attention followed by couple of MLP layers.
Remaining layers including the shared embeddings are conveniently wrapped in same outermost FSDP unit.
Therefore, use this for transformer based models.
You can use the `model._no_split_modules` for 🤗 Transformer models by answering `yes` to
`Do you want to use the model's `_no_split_modules` to wrap. Only applicable for 🤗 Transformers`.
It will try to use `model._no_split_modules` when available.
`Min Num Params`: minimum number of parameters when using `SIZE_BASED_WRAP`
`Backward Prefetch`: [1] BACKWARD_PRE, [2] BACKWARD_POST, [3] NO_PREFETCH
`State Dict Type`: [1] FULL_STATE_DICT, [2] LOCAL_STATE_DICT, [3] SHARDED_STATE_DICT
`State Dict Type`: [1] FULL_STATE_DICT, [2] LOCAL_STATE_DICT, [3] SHARDED_STATE_DICT
`Forward Prefetch`: if True, then FSDP explicitly prefetches the next upcoming
all-gather while executing in the forward pass. only use with Static graphs.
`Use Orig Params`: If True, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable paramteres.
Useful in cases such as parameter-efficient fine-tuning.
Please refer this [blog](https://dev-discuss.pytorch.org/t/rethinking-pytorch-fully-sharded-data-parallel-fsdp-from-first-principles/1019)
`Sync Module States`: If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0
`Forward Prefetch`: If True, then FSDP explicitly prefetches the next upcoming all-gather while executing in the forward pass
```
For additional and more nuanced control, you can specify other FSDP parameters via `FullyShardedDataParallelPlugin`.

View File

@ -0,0 +1,121 @@
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Understanding how big of a model can fit on your machine
One very difficult aspect when exploring potential models to use on your machine is knowing just how big of a model will *fit* into memory with your current graphics card (such as loading the model onto CUDA).
To help alleviate this, 🤗 Accelerate has a CLI interface through `accelerate estimate-memory`. This tutorial will
help walk you through using it, what to expect, and at the end link to the interactive demo hosted on the 🤗 Hub which will
even let you post those results directly on the model repo!
Currently we support searching for models that can be used in `timm` and `transformers`.
<Tip>
This API will load the model into memory on the `meta` device, so we are not actually downloading
and loading the full weights of the model into memory, nor do we need to. As a result it's
perfectly fine to measure 8 billion parameter models (or more), without having to worry about
if your CPU can handle it!
</Tip>
## The Command
When using `accelerate estimate-memory`, you need to pass in the name of the model you want to use, potentially the framework
that model utilizing (if it can't be found automatically), and the data types you want the model to be loaded in with.
For example, here is how we can calculate the memory footprint for `bert-base-cased`:
```bash
accelerate estimate-memory bert-base-cased
```
This will download the `config.json` for `bert-based-cased`, load the model on the `meta` device, and report back how much space
it will use:
Memory Usage for loading `bert-base-cased`:
| dtype | Largest Layer | Total Size | Training using Adam |
|---------|---------------|------------|---------------------|
| float32 | 84.95 MB | 418.18 MB | 1.61 GB |
| float16 | 42.47 MB | 206.59 MB | 826.36 MB |
| int8 | 21.24 MB | 103.29 MB | 413.18 MB |
| int4 | 10.62 MB | 51.65 MB | 206.59 MB |
By default it will return all the supported dtypes (`int4` through `float32`), but if you are interested in specific ones these can be filtered.
### Specific libraries
If the source library cannot be determined automatically (like it could in the case of `bert-base-cased`), a library name can
be passed in.
```bash
accelerate estimate-memory HuggingFaceM4/idefics-80b-instruct --library_name transformers
```
Memory Usage for loading `HuggingFaceM4/idefics-80b-instruct`:
| dtype | Largest Layer | Total Size | Training using Adam |
|---------|---------------|------------|---------------------|
| float32 | 3.02 GB | 297.12 GB | 1.16 TB |
| float16 | 1.51 GB | 148.56 GB | 594.24 GB |
| int8 | 772.52 MB | 74.28 GB | 297.12 GB |
| int4 | 386.26 MB | 37.14 GB | 148.56 GB |
```bash
accelerate estimate-memory timm/resnet50.a1_in1k --library_name timm
```
Memory Usage for loading `timm/resnet50.a1_in1k`:
| dtype | Largest Layer | Total Size | Training using Adam |
|---------|---------------|------------|---------------------|
| float32 | 9.0 MB | 97.7 MB | 390.78 MB |
| float16 | 4.5 MB | 48.85 MB | 195.39 MB |
| int8 | 2.25 MB | 24.42 MB | 97.7 MB |
| int4 | 1.12 MB | 12.21 MB | 48.85 MB |
### Specific dtypes
As mentioned earlier, while we return `int4` through `float32` by default, any dtype can be used from `float32`, `float16`, `int8`, and `int4`.
To do so, pass them in after specifying `--dtypes`:
```bash
accelerate estimate-memory bert-base-cased --dtypes float32 float16
```
Memory Usage for loading `bert-base-cased`:
| dtype | Largest Layer | Total Size | Training using Adam |
|---------|---------------|------------|---------------------|
| float32 | 84.95 MB | 413.18 MB | 1.61 GB |
| float16 | 42.47 MB | 206.59 MB | 826.36 MB |
## Caveats with this calculator
This calculator will tell you how much memory is needed to purely load the model in, *not* to perform inference.
This calculation is accurate within a few % of the actual value, so it is a very good view of just how much memory it will take. For instance loading `bert-base-cased` actually takes `413.68 MB` when loaded on CUDA in full precision, and the calculator estimates `413.18 MB`.
When performing inference you can expect to add up to an additional 20% as found by [EleutherAI](https://blog.eleuther.ai/transformer-math/). We'll be conducting research into finding a more accurate estimate to these values, and will update
this calculator once done.
## Live Gradio Demo
Lastly, we invite you to try the [live Gradio demo](https://huggingface.co/spaces/hf-accelerate/model-memory-usage) of this utility,
which includes an option to post a discussion thread on a models repository with this data. Doing so will help provide access to these numbers in the community faster and help users know what you've learned!

View File

@ -0,0 +1,246 @@
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# specifically showcasing how to perform early stopping,
# and builds off the `nlp_example.py` script
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
MAX_GPU_BATCH_SIZE = 16
EVAL_BATCH_SIZE = 32
def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
"""
Creates a set of `DataLoader`s for the `glue` dataset,
using "bert-base-cased" as the tokenizer.
Args:
accelerator (`Accelerator`):
An `Accelerator` object
batch_size (`int`, *optional*):
The batch size for the train and validation DataLoaders.
"""
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
datasets = load_dataset("glue", "mrpc")
def tokenize_function(examples):
# max_length=None => use the model max length (it's actually the default)
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
remove_columns=["idx", "sentence1", "sentence2"],
)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
def collate_fn(examples):
# On TPU it's best to pad everything to the same length or training will be very slow.
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
pad_to_multiple_of = 16
elif accelerator.mixed_precision != "no":
pad_to_multiple_of = 8
else:
pad_to_multiple_of = None
return tokenizer.pad(
examples,
padding="longest",
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors="pt",
)
# Instantiate dataloaders.
train_dataloader = DataLoader(
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size, drop_last=True
)
eval_dataloader = DataLoader(
tokenized_datasets["validation"],
shuffle=False,
collate_fn=collate_fn,
batch_size=EVAL_BATCH_SIZE,
drop_last=(accelerator.mixed_precision == "fp8"),
)
return train_dataloader, eval_dataloader
# New code
class EarlyStoppingCallback:
"A callback class that helps with early stopping"
def __init__(self, min_delta=0, patience=5):
self.min_delta = min_delta
self.patience = patience
self.counter = 0
self.lowest_loss = float("inf")
def check_early_stopping(self, eval_loss):
delta = self.lowest_loss - eval_loss
if delta >= self.min_delta:
self.lowest_loss = eval_loss
self.counter = 0
else:
self.counter += 1
if self.counter >= self.patience:
return True
return False
callback = EarlyStoppingCallback()
def training_function(config, args):
# Initialize accelerator
accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lr = config["lr"]
num_epochs = int(config["num_epochs"])
seed = int(config["seed"])
batch_size = int(config["batch_size"])
metric = evaluate.load("glue", "mrpc")
# If the batch size is too big we use gradient accumulation
gradient_accumulation_steps = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
batch_size = MAX_GPU_BATCH_SIZE
set_seed(seed)
train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
model = model.to(accelerator.device)
# Instantiate optimizer
optimizer = AdamW(params=model.parameters(), lr=lr)
# Instantiate scheduler
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=100,
num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps,
)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
# Now we train the model
for epoch in range(num_epochs):
model.train()
for step, batch in enumerate(train_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
outputs = model(**batch)
loss = outputs.loss
loss = loss / gradient_accumulation_steps
accelerator.backward(loss)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# New code
# Check if we should stop the training on any processes
if callback.check_early_stopping(loss.item()):
accelerator.set_trigger()
# If so, we break the loop
if accelerator.check_trigger():
break
model.eval()
for step, batch in enumerate(eval_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=predictions,
references=references,
)
eval_metric = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:", eval_metric)
def main():
parser = argparse.ArgumentParser(description="Simple example of training script.")
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16", "fp8"],
help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.",
)
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.")
args = parser.parse_args()
config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(config, args)
if __name__ == "__main__":
main()

View File

@ -19,7 +19,7 @@ extras = {}
extras["quality"] = ["black ~= 23.1", "ruff >= 0.0.241", "hf-doc-builder >= 0.3.0", "urllib3 < 2.0.0"]
extras["docs"] = []
extras["test_prod"] = ["pytest", "pytest-xdist", "pytest-subtests", "parameterized"]
extras["test_dev"] = ["datasets", "evaluate", "transformers", "scipy", "scikit-learn", "deepspeed", "tqdm", "bitsandbytes"]
extras["test_dev"] = ["datasets", "evaluate", "transformers", "scipy", "scikit-learn", "deepspeed", "tqdm", "bitsandbytes", "timm"]
extras["testing"] = extras["test_prod"] + extras["test_dev"]
extras["rich"] = ["rich"]
@ -32,7 +32,7 @@ extras["sagemaker"] = [
setup(
name="accelerate",
version="0.22.0.dev0",
version="0.23.0",
description="Accelerate",
long_description=open("README.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",
@ -47,11 +47,12 @@ setup(
"console_scripts": [
"accelerate=accelerate.commands.accelerate_cli:main",
"accelerate-config=accelerate.commands.config:main",
"accelerate-estimate-memory=accelerate.commands.estimate:main",
"accelerate-launch=accelerate.commands.launch:main",
]
},
python_requires=">=3.8.0",
install_requires=["numpy>=1.17", "packaging>=20.0", "psutil", "pyyaml", "torch>=1.10.0"],
install_requires=["numpy>=1.17", "packaging>=20.0", "psutil", "pyyaml", "torch>=1.10.0", "huggingface_hub"],
extras_require=extras,
classifiers=[
"Development Status :: 5 - Production/Stable",
@ -67,21 +68,29 @@ setup(
)
# Release checklist
# 1. Change the version in __init__.py and setup.py.
# 2. Commit these changes with the message: "Release: VERSION"
# 3. Add a tag in git to mark the release: "git tag VERSION -m 'Adds tag VERSION for pypi' "
# Push the tag to git: git push --tags origin main
# 4. Run the following commands in the top-level directory:
# 1. Checkout the release branch (for a patch the current release branch, for a new minor version, create one):
# git checkout -b vXX.xx-release
# The -b is only necessary for creation (so remove it when doing a patch)
# 2. Change the version in __init__.py and setup.py to the proper value.
# 3. Commit these changes with the message: "Release: v<VERSION>"
# 4. Add a tag in git to mark the release:
# git tag v<VERSION> -m 'Adds tag v<VERSION> for pypi'
# Push the tag and release commit to git: git push --tags origin vXX.xx-release
# 5. Run the following commands in the top-level directory:
# rm -rf dist
# rm -rf build
# python setup.py bdist_wheel
# python setup.py sdist
# 5. Upload the package to the pypi test server first:
# twine upload dist/* -r pypitest
# twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/
# 6. Check that you can install it in a virtualenv by running:
# 6. Upload the package to the pypi test server first:
# twine upload dist/* -r testpypi
# 7. Check that you can install it in a virtualenv by running:
# pip install accelerate
# pip uninstall accelerate
# pip install -i https://testpypi.python.org/pypi accelerate
# accelerate env
# accelerate test
# 7. Upload the final version to actual pypi:
# 8. Upload the final version to actual pypi:
# twine upload dist/* -r pypi
# 8. Add release notes to the tag in github once everything is looking hunky-dory.
# 9. Update the version in __init__.py, setup.py to the new version "-dev" and push to master
# 9. Add release notes to the tag in github once everything is looking hunky-dory.
# 10. Go back to the main branch and update the version in __init__.py, setup.py to the new version ".dev" and push to
# main.

View File

@ -1,4 +1,4 @@
__version__ = "0.22.0.dev0"
__version__ = "0.23.0"
from .accelerator import Accelerator
from .big_modeling import (

View File

@ -16,6 +16,7 @@ from __future__ import annotations
import collections
import contextlib
import functools
import json
import math
import os
@ -67,6 +68,7 @@ from .utils import (
convert_outputs_to_fp32,
extract_model_from_parallel,
gather,
gather_object,
get_mixed_precision_context_manager,
get_pretty_name,
has_transformer_engine_layers,
@ -95,11 +97,10 @@ from .utils import (
wait_for_everyone,
)
from .utils.constants import FSDP_PYTORCH_VERSION
from .utils.other import is_compiled_module
if is_deepspeed_available():
import deepspeed
from .utils import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
@ -135,6 +136,10 @@ if is_tpu_available(check_device=False):
import torch_xla.distributed.xla_multiprocessing as xmp
if is_npu_available(check_device=False):
import torch_npu # noqa: F401
try:
from torch.optim.lr_scheduler import LRScheduler
except ImportError:
@ -416,11 +421,10 @@ class Accelerator:
if (
self.state.mixed_precision == "fp16"
and self.device.type != "cpu"
and self.device.type != "xpu"
and self.distributed_type not in (DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM)
):
self.native_amp = True
if self.device.type not in ("cuda", "mps", "npu"):
if self.device.type not in ("xpu", "cuda", "mps", "npu"):
raise ValueError(err.format(mode="fp16", requirement="a GPU"))
kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {}
if self.distributed_type == DistributedType.FSDP:
@ -462,6 +466,9 @@ class Accelerator:
if self.rng_types is None:
self.rng_types = ["generator"]
# Set a flag tensor for early stopping and other breakpoints
self.flag_tensor = None
@property
def use_distributed(self):
"""
@ -1193,10 +1200,12 @@ class Accelerator:
)
for obj in args:
# TODO: Look at enabling native TP training directly with a proper config
if (
isinstance(obj, torch.nn.Module)
and self.verify_device_map(obj)
and self.distributed_type != DistributedType.NO
and os.environ.get("ACCELERATE_BYPASS_DEVICE_MAP", "false") != "true"
):
raise ValueError(
"You can't train a model that has been loaded with `device_map='auto'` in any distributed mode."
@ -1212,7 +1221,12 @@ class Accelerator:
for obj in args:
if isinstance(obj, torch.nn.Module):
model_count += 1
is_type_fsdp = type(obj) == FSDP
# if the model is compiled using PyTorch 2.0,
# check that the wrapped model is FSDP or not;
# else check if it is FSDP or not;
is_type_fsdp = isinstance(obj, FSDP) or (
is_compiled_module(obj) and isinstance(obj._orig_mod, FSDP)
)
if isinstance(obj, torch.optim.Optimizer):
optimizer_present = True
if model_count > 1 and optimizer_present:
@ -1328,7 +1342,12 @@ class Accelerator:
device_placement = self.device_placement and self.distributed_type != DistributedType.FSDP
self._models.append(model)
if self.verify_device_map(model) and self.distributed_type != DistributedType.NO:
# TODO: Look at enabling native TP training directly with a proper config
if (
self.verify_device_map(model)
and self.distributed_type != DistributedType.NO
and os.environ.get("ACCELERATE_BYPASS_DEVICE_MAP", "false") != "true"
):
raise ValueError(
"You can't train a model that has been loaded with `device_map='auto'` in any distributed mode."
" Please rerun your script specifying `--num_processes=1` or by launching with `python {{myscript.py}}`."
@ -1363,7 +1382,7 @@ class Accelerator:
elif device_placement and not self.verify_device_map(model):
model = model.to(self.device)
if self.native_amp and self.distributed_type != DistributedType.FSDP:
if self.native_amp:
model._original_forward = model.forward
model_forward_func = model.forward.__func__ if hasattr(model.forward, "__func__") else model.forward
autocast_context = get_mixed_precision_context_manager(self.native_amp, self.autocast_handler)
@ -1401,15 +1420,27 @@ class Accelerator:
):
if any(p.requires_grad for p in model.parameters()):
kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}
# TODO: Look at enabling native TP training directly with a proper config
if os.environ.get("ACCELERATE_BYPASS_DEVICE_MAP", "false") != "true":
device_ids, output_device = [self.local_process_index], self.local_process_index
else:
device_ids, output_device = None, None
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[self.local_process_index], output_device=self.local_process_index, **kwargs
model, device_ids=device_ids, output_device=output_device, **kwargs
)
elif self.distributed_type == DistributedType.FSDP:
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
# Check if the model is already a FSDP model due to `Manual Wrapping` and if so,
# don't wrap it again
if type(model) != FSDP:
# In case the model is already compiled using PyTorch 2.0 and the wrapped model in it
# is a FSDP model, don't wrap it again
is_type_fsdp = isinstance(model, FSDP) or (
is_compiled_module(model) and isinstance(model._orig_mod, FSDP)
)
if not is_type_fsdp:
self.state.fsdp_plugin.set_auto_wrap_policy(model)
fsdp_plugin = self.state.fsdp_plugin
kwargs = {
@ -1427,20 +1458,40 @@ class Accelerator:
"device_id": self.device,
}
model = FSDP(model, **kwargs)
if fsdp_plugin.activation_checkpointing:
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
CheckpointImpl,
apply_activation_checkpointing,
checkpoint_wrapper,
)
apply_activation_checkpointing(
model,
checkpoint_wrapper_fn=functools.partial(
checkpoint_wrapper,
checkpoint_impl=CheckpointImpl.NO_REENTRANT,
),
auto_wrap_policy=fsdp_plugin.auto_wrap_policy,
)
# if the previous and current models are same, delete the previous one
if len(self._models) > 1 and (self._models[-2] is self._models[-1]):
del self._models[-2]
self._models[-1] = model
elif self.distributed_type == DistributedType.MULTI_CPU:
kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}
model = torch.nn.parallel.DistributedDataParallel(model, **kwargs)
elif self.distributed_type == DistributedType.TPU and self.state.fork_launched:
model = xmp.MpModelWrapper(model).to(self.device)
# torch.compile should be called last.
if self.state.dynamo_plugin.backend != DynamoBackend.NO:
# torch.compile should be called last and only if the model isn't already compiled.
if self.state.dynamo_plugin.backend != DynamoBackend.NO and not is_compiled_module(model):
if not is_torch_version(">=", "2.0"):
raise ValueError("Using `torch.compile` requires PyTorch 2.0 or higher.")
model = torch.compile(model, **self.state.dynamo_plugin.to_kwargs())
return model
def _prepare_deepspeed(self, *args):
import deepspeed
deepspeed_plugin = self.state.deepspeed_plugin
is_dataloader_present = any(isinstance(obj, torch.utils.data.DataLoader) for obj in args)
@ -1477,12 +1528,13 @@ class Accelerator:
batch_size_per_device = deepspeed_plugin.deepspeed_config["train_micro_batch_size_per_gpu"]
result = [obj for obj in args]
if self.gradient_accumulation_steps != deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"]:
logger.info(
f"Updating DeepSpeed's gradient accumulation steps to {self.gradient_accumulation_steps} from "
f"{deepspeed_plugin.deepspeed_config['gradient_accumulation_steps']}."
)
deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"] = self.gradient_accumulation_steps
# handle `gradient_accumulation_steps` when the value is `auto`
deepspeed_plugin.fill_match(
"gradient_accumulation_steps",
must_match=False,
gradient_accumulation_steps=self.gradient_accumulation_steps,
)
config_kwargs = {
"train_micro_batch_size_per_gpu": batch_size_per_device,
"train_batch_size": batch_size_per_device
@ -1527,9 +1579,14 @@ class Accelerator:
"Please remove the scheduler from the config file or "
"create `accelerate.utils.DummyScheduler` in the code."
)
elif "scheduler" not in deepspeed_plugin.deepspeed_config and isinstance(scheduler, (DummyScheduler)):
elif (
"scheduler" not in deepspeed_plugin.deepspeed_config
and isinstance(scheduler, (DummyScheduler))
and scheduler.lr_scheduler_callable is None
):
raise ValueError(
"You cannot create a `DummyScheduler` without specifying a scheduler in the config file."
"Either specify a scheduler in the config file or "
"pass in the `lr_scheduler_callable` parameter when using `accelerate.utils.DummyScheduler`."
)
if optimizer is not None and scheduler is not None:
@ -1559,7 +1616,7 @@ class Accelerator:
config_kwargs.update(
{"optimizer.params.lr": optimizer.lr, "optimizer.params.weight_decay": optimizer.weight_decay}
)
if isinstance(scheduler, (DummyScheduler)):
if isinstance(scheduler, (DummyScheduler)) and scheduler.lr_scheduler_callable is None:
max_lr = (
getattr(scheduler.optimizer, "lr", None)
if getattr(scheduler.optimizer, "defaults", None) is None
@ -1584,6 +1641,8 @@ class Accelerator:
if optimizer is not None:
if isinstance(optimizer, (DummyOptim)):
kwargs["model_parameters"] = optimizer.params
if isinstance(scheduler, (DummyScheduler)) and scheduler.lr_scheduler_callable is not None:
kwargs["lr_scheduler"] = scheduler.lr_scheduler_callable
else:
if self.deepspeed_config["zero_optimization"].get("offload_optimizer", {}).get(
"device", "none"
@ -1594,7 +1653,10 @@ class Accelerator:
optimizer = DeepSpeedCPUAdam(optimizer.param_groups, **defaults)
kwargs["optimizer"] = optimizer
if scheduler is not None:
if type(scheduler).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES:
if (
isinstance(scheduler, LRScheduler)
or type(scheduler).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES
):
kwargs["lr_scheduler"] = scheduler
engine, optimizer, _, lr_scheduler = deepspeed.initialize(**kwargs)
@ -1922,6 +1984,65 @@ class Accelerator:
else:
loss.backward(**kwargs)
def set_trigger(self):
"""
Sets the internal trigger tensor to 1 on the current process. A latter check should follow using this which
will check across all processes.
Note:
Does not require `wait_for_everyone()`
Example:
```python
>>> from accelerate import Accelerator
>>> accelerator = Accelerator()
>>> # Assume later in the training script
>>> # `should_do_breakpoint` is a custom function to monitor when to break,
>>> # e.g. when the loss is NaN
>>> if should_do_breakpoint(loss):
... accelerator.set_trigger()
>>> # Assume later in the training script
>>> if accelerator.check_breakpoint():
... break
```
"""
self.flag_tensor = torch.tensor(1, device=self.device)
def check_trigger(self):
"""
Checks if the internal trigger tensor has been set to 1 in any of the processes. If so, will return `True` and
reset the trigger tensor to 0.
Note:
Does not require `wait_for_everyone()`
Example:
```python
>>> from accelerate import Accelerator
>>> accelerator = Accelerator()
>>> # Assume later in the training script
>>> # `should_do_breakpoint` is a custom function to monitor when to break,
>>> # e.g. when the loss is NaN
>>> if should_do_breakpoint(loss):
... accelerator.set_trigger()
>>> # Assume later in the training script
>>> if accelerator.check_trigger():
... break
```
"""
# Now that we are outside `__init__`, we can initialize it if it is `None` on device
if self.flag_tensor is None:
self.flag_tensor = torch.tensor(0, device=self.device)
flag_tensor = self.reduce(self.flag_tensor)
if flag_tensor.item() >= 1:
self.flag_tensor = torch.tensor(0, device=self.device)
return True
return False
def unscale_gradients(self, optimizer=None):
"""
Unscale the gradients in mixed precision training with AMP. This is a noop in all other settings.
@ -1955,6 +2076,10 @@ class Accelerator:
for opt in optimizer:
while isinstance(opt, AcceleratedOptimizer):
opt = opt.optimizer
# Reduce gradients first for XLA
if self.distributed_type == DistributedType.TPU:
gradients = xm._fetch_gradients(opt)
self.reduce(gradients, scale=1.0 / self.num_processes)
self.scaler.unscale_(opt)
def clip_grad_norm_(self, parameters, max_norm, norm_type=2):
@ -2054,14 +2179,14 @@ class Accelerator:
"""
return gather(tensor)
def gather_for_metrics(self, tensor):
def gather_for_metrics(self, input_data):
"""
Gathers `tensor` and potentially drops duplicates in the last batch if on a distributed system. Should be used
for gathering the inputs and targets for metric calculation.
Gathers `input_data` and potentially drops duplicates in the last batch if on a distributed system. Should be
used for gathering the inputs and targets for metric calculation.
Args:
tensor (`torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`):
The tensors for calculating metrics across all processes.
input (`torch.Tensor`, `object`, a nested tuple/list/dictionary of `torch.Tensor`, or a nested tuple/list/dictionary of `object`):
The tensors or objects for calculating metrics across all processes
Example:
@ -2079,7 +2204,17 @@ class Accelerator:
9
```
"""
tensor = self.gather(tensor)
try:
recursively_apply(lambda x: x, input_data, error_on_other_type=True)
all_tensors = True
except TypeError:
all_tensors = False
if not all_tensors:
data = gather_object(input_data)
else:
data = self.gather(input_data)
try:
if self.gradient_state.end_of_dataloader:
@ -2089,24 +2224,24 @@ class Accelerator:
logger.info(
"The used dataset had no length, returning gathered tensors. You should drop the remainder yourself."
)
return tensor
return data
elif self.gradient_state.remainder > 0:
# Last batch needs to be truncated on distributed systems as it contains additional samples
def _adjust_samples(tensor):
return tensor[: self.gradient_state.remainder]
return recursively_apply(_adjust_samples, tensor)
return recursively_apply(_adjust_samples, data)
else: # remainder is 0
# no remainder even though at end of dataloader, so nothing to do.
return tensor
return data
else:
# Not at the end of the dataloader, no need to adjust the tensors
return tensor
return data
except Exception:
# Dataset had no length or raised an error
return tensor
return data
def reduce(self, tensor, reduction="sum"):
def reduce(self, tensor, reduction="sum", scale=1.0):
"""
Reduce the values in *tensor* across all processes based on *reduction*.
@ -2118,6 +2253,8 @@ class Accelerator:
The tensors to reduce across all processes.
reduction (`str`, *optional*, defaults to "sum"):
A reduction type, can be one of 'sum', 'mean', or 'none'. If 'none', will not perform any operation.
scale (`float`, *optional*, defaults to 1.0):
A default scaling value to be applied after the reduce, only valied on XLA.
Returns:
`torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`:
@ -2138,7 +2275,7 @@ class Accelerator:
tensor([4, 6])
```
"""
return reduce(tensor, reduction)
return reduce(tensor, reduction, scale)
def pad_across_processes(self, tensor, dim=0, pad_index=0, pad_first=False):
"""
@ -2362,13 +2499,14 @@ class Accelerator:
for tracker in self.trackers:
tracker.finish()
def save(self, obj, f):
def save(self, obj, f, safe_serialization=False):
"""
Save the object passed to disk once per machine. Use in place of `torch.save`.
Args:
obj (`object`): The object to save.
f (`str` or `os.PathLike`): Where to save the content of `obj`.
safe_serialization (`bool`, *optional*, defaults to `False`): Whether to save `obj` using `safetensors`
Example:
@ -2380,7 +2518,7 @@ class Accelerator:
>>> accelerator.save(arr, "array.pkl")
```
"""
save(obj, f)
save(obj, f, safe_serialization=safe_serialization)
def save_model(
self,
@ -2460,7 +2598,7 @@ class Accelerator:
del state_dict[name]
warn_names.add(name)
if len(warn_names) > 0:
logger.warning_once(
logger.warning(
f"Removed shared tensor {warn_names} while saving. This should be OK, but check by verifying that you don't receive any warning while reloading",
)
@ -2491,7 +2629,7 @@ class Accelerator:
# Save the model
for shard_file, shard in shards.items():
self.save(shard, os.path.join(save_directory, shard_file))
self.save(shard, os.path.join(save_directory, shard_file), safe_serialization=safe_serialization)
if index is None:
path_to_weights = os.path.join(save_directory, WEIGHTS_NAME)
@ -2582,8 +2720,10 @@ class Accelerator:
os.makedirs(output_dir, exist_ok=True)
if self.project_configuration.automatic_checkpoint_naming:
folders = [os.path.join(output_dir, folder) for folder in os.listdir(output_dir)]
if self.project_configuration.total_limit is not None and (
len(folders) + 1 > self.project_configuration.total_limit
if (
self.project_configuration.total_limit is not None
and (len(folders) + 1 > self.project_configuration.total_limit)
and self.is_main_process
):
def _inner(folder):

View File

@ -304,6 +304,7 @@ def dispatch_model(
offload_buffers: bool = False,
skip_keys: Optional[Union[str, List[str]]] = None,
preload_module_classes: Optional[List[str]] = None,
force_hooks: bool = False,
):
"""
Dispatches a model according to a given device map. Layers of the model might be spread across GPUs, offloaded on
@ -334,6 +335,9 @@ def dispatch_model(
of the forward. This should only be used for classes that have submodules which are registered but not
called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
`dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
force_hooks (`bool`, *optional*, defaults to `False`):
Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a
single device.
"""
# Error early if the device map is incomplete.
check_device_map(model, device_map)
@ -343,10 +347,11 @@ def dispatch_model(
getattr(model, "is_quantized", False) or getattr(model, "is_loaded_in_8bit", False)
) and getattr(model, "quantization_method", "bitsandbytes") == "bitsandbytes"
# We attach hooks if the device_map have at least 2 different devices. Otherwise, the model in already loaded
# We attach hooks if the device_map has at least 2 different devices or if
# force_hooks is set to `True`. Otherwise, the model in already loaded
# in the unique device and the user can decide where to dispatch the model.
# If the model is quantized, we always force-dispatch the model
if (len(set(device_map.values())) > 1) or is_bnb_quantized:
if (len(set(device_map.values())) > 1) or is_bnb_quantized or force_hooks:
if main_device is None:
if set(device_map.values()) == {"cpu"} or set(device_map.values()) == {"cpu", "disk"}:
main_device = "cpu"
@ -439,6 +444,7 @@ def load_checkpoint_and_dispatch(
offload_state_dict: Optional[bool] = None,
skip_keys: Optional[Union[str, List[str]]] = None,
preload_module_classes: Optional[List[str]] = None,
force_hooks: bool = False,
):
"""
Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are
@ -481,6 +487,9 @@ def load_checkpoint_and_dispatch(
of the forward. This should only be used for classes that have submodules which are registered but not
called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
`dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
force_hooks (`bool`, *optional*, defaults to `False`):
Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a
single device.
Example:
@ -541,4 +550,5 @@ def load_checkpoint_and_dispatch(
offload_buffers=offload_buffers,
skip_keys=skip_keys,
preload_module_classes=preload_module_classes,
force_hooks=force_hooks,
)

View File

@ -18,6 +18,7 @@ from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.estimate import estimate_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
@ -29,6 +30,7 @@ def main():
# Register commands
get_config_parser(subparsers=subparsers)
estimate_command_parser(subparsers=subparsers)
env_command_parser(subparsers=subparsers)
launch_command_parser(subparsers=subparsers)
tpu_command_parser(subparsers=subparsers)

View File

@ -21,6 +21,7 @@ from ...utils import (
DistributedType,
is_deepspeed_available,
is_mps_available,
is_npu_available,
is_transformers_available,
is_xpu_available,
)
@ -104,7 +105,7 @@ def get_cluster_input():
if distributed_type == DistributedType.NO:
use_cpu = _ask_field(
"Do you want to run your training on CPU only (even if a GPU / Apple Silicon device is available)? [yes/NO]:",
"Do you want to run your training on CPU only (even if a GPU / Apple Silicon / Ascend NPU device is available)? [yes/NO]:",
_convert_yes_no_to_bool,
default=False,
error_message="Please enter yes or no.",
@ -507,8 +508,12 @@ def get_cluster_input():
and not use_cpu
and not use_mps
):
if is_npu_available():
machine_type = "NPU(s)"
else:
machine_type = "GPU(s)"
gpu_ids = _ask_field(
"What GPU(s) (by id) should be used for training on this machine as a comma-seperated list? [all]:",
f"What {machine_type} (by id) should be used for training on this machine as a comma-seperated list? [all]:",
default="all",
)

View File

@ -109,6 +109,13 @@ class BaseConfig:
config_dict["use_cpu"] = False
if "debug" not in config_dict:
config_dict["debug"] = False
extra_keys = sorted(set(config_dict.keys()) - set(cls.__dataclass_fields__.keys()))
if len(extra_keys) > 0:
raise ValueError(
f"The config file at {json_file} had unknown keys ({extra_keys}), please try upgrading your `accelerate`"
" version or fix (and potentially remove) these keys from your config file."
)
return cls(**config_dict)
def to_json_file(self, json_file):
@ -123,7 +130,6 @@ class BaseConfig:
config_dict = yaml.safe_load(f)
if "compute_environment" not in config_dict:
config_dict["compute_environment"] = ComputeEnvironment.LOCAL_MACHINE
if "mixed_precision" not in config_dict:
config_dict["mixed_precision"] = "fp16" if ("fp16" in config_dict and config_dict["fp16"]) else None
if isinstance(config_dict["mixed_precision"], bool) and not config_dict["mixed_precision"]:
@ -137,6 +143,12 @@ class BaseConfig:
config_dict["use_cpu"] = False
if "debug" not in config_dict:
config_dict["debug"] = False
extra_keys = sorted(set(config_dict.keys()) - set(cls.__dataclass_fields__.keys()))
if len(extra_keys) > 0:
raise ValueError(
f"The config file at {yaml_file} had unknown keys ({extra_keys}), please try upgrading your `accelerate`"
" version or fix (and potentially remove) these keys from your config file."
)
return cls(**config_dict)
def to_yaml_file(self, yaml_file):

View File

@ -221,6 +221,15 @@ def get_sagemaker_input():
ec2_instance_query += "? [ml.p3.2xlarge]:"
ec2_instance_type = _ask_field(ec2_instance_query, lambda x: str(x).lower(), default="ml.p3.2xlarge")
debug = False
if distributed_type != SageMakerDistributedType.NO:
debug = _ask_field(
"Should distributed operations be checked while running for errors? This can avoid timeout issues but will be slower. [yes/NO]: ",
_convert_yes_no_to_bool,
default=False,
error_message="Please enter yes or no.",
)
num_machines = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
num_machines = _ask_field(
@ -254,4 +263,5 @@ def get_sagemaker_input():
num_machines=num_machines,
sagemaker_inputs_file=sagemaker_inputs_file,
sagemaker_metrics_file=sagemaker_metrics_file,
debug=debug,
)

View File

@ -0,0 +1,270 @@
#!/usr/bin/env python
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from huggingface_hub import model_info
from huggingface_hub.utils import GatedRepoError, RepositoryNotFoundError
from accelerate import init_empty_weights
from accelerate.utils import (
calculate_maximum_sizes,
convert_bytes,
is_timm_available,
is_transformers_available,
)
if is_transformers_available():
import transformers
from transformers import AutoConfig, AutoModel
if is_timm_available():
import timm
def verify_on_hub(repo: str, token: str = None):
"Verifies that the model is on the hub and returns the model info."
try:
return model_info(repo, token=token)
except GatedRepoError:
return "gated"
except RepositoryNotFoundError:
return "repo"
def check_has_model(error):
"""
Checks what library spawned `error` when a model is not found
"""
if is_timm_available() and isinstance(error, RuntimeError) and "Unknown model" in error.args[0]:
return "timm"
elif (
is_transformers_available()
and isinstance(error, OSError)
and "does not appear to have a file named" in error.args[0]
):
return "transformers"
else:
return "unknown"
def create_empty_model(model_name: str, library_name: str, trust_remote_code: bool = False, access_token: str = None):
"""
Creates an empty model from its parent library on the `Hub` to calculate the overall memory consumption.
Args:
model_name (`str`):
The model name on the Hub
library_name (`str`):
The library the model has an integration with, such as `transformers`. Will be used if `model_name` has no
metadata on the Hub to determine the library.
trust_remote_code (`bool`, `optional`, defaults to `False`):
Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
should only be set to `True` for repositories you trust and in which you have read the code, as it will
execute code present on the Hub on your local machine.
access_token (`str`, `optional`, defaults to `None`):
The access token to use to access private or gated models on the Hub. (for use on the Gradio app)
Returns:
`torch.nn.Module`: The torch model that has been initialized on the `meta` device.
"""
model_info = verify_on_hub(model_name, access_token)
# Simplified errors
if model_info == "gated":
raise GatedRepoError(
f"Repo for model `{model_name}` is gated. You must be authenticated to access it. Please run `huggingface-cli login`."
)
elif model_info == "repo":
raise RepositoryNotFoundError(
f"Repo for model `{model_name}` does not exist on the Hub. If you are trying to access a private repo,"
" make sure you are authenticated via `huggingface-cli login` and have access."
)
if library_name is None:
library_name = getattr(model_info, "library_name", False)
if not library_name:
raise ValueError(
f"Model `{model_name}` does not have any library metadata on the Hub, please manually pass in a `--library_name` to use (such as `transformers`)"
)
if library_name == "transformers":
if not is_transformers_available():
raise ImportError(
f"To check `{model_name}`, `transformers` must be installed. Please install it via `pip install transformers`"
)
print(f"Loading pretrained config for `{model_name}` from `transformers`...")
auto_map = model_info.config.get("auto_map", False)
config = AutoConfig.from_pretrained(model_name, trust_remote_code=trust_remote_code)
with init_empty_weights():
# remote code could specify a specific `AutoModel` class in the `auto_map`
constructor = AutoModel
if isinstance(auto_map, dict):
value = None
for key in auto_map.keys():
if key.startswith("AutoModelFor"):
value = key
break
if value is not None:
constructor = getattr(transformers, value)
model = constructor.from_config(config, trust_remote_code=trust_remote_code)
elif library_name == "timm":
if not is_timm_available():
raise ImportError(
f"To check `{model_name}`, `timm` must be installed. Please install it via `pip install timm`"
)
print(f"Loading pretrained config for `{model_name}` from `timm`...")
with init_empty_weights():
model = timm.create_model(model_name, pretrained=False)
else:
raise ValueError(
f"Library `{library_name}` is not supported yet, please open an issue on GitHub for us to add support."
)
return model
def create_ascii_table(headers: list, rows: list, title: str):
"Creates a pretty table from a list of rows, minimal version of `tabulate`."
sep_char, in_between = "", ""
column_widths = []
for i in range(len(headers)):
column_values = [row[i] for row in rows] + [headers[i]]
max_column_width = max(len(value) for value in column_values)
column_widths.append(max_column_width)
formats = [f"%{column_widths[i]}s" for i in range(len(rows[0]))]
pattern = f"{sep_char}{sep_char.join(formats)}{sep_char}"
diff = 0
def make_row(left_char, middle_char, right_char):
return f"{left_char}{middle_char.join([in_between * n for n in column_widths])}{in_between * diff}{right_char}"
separator = make_row("", "", "")
if len(title) > sum(column_widths):
diff = abs(len(title) - len(separator))
column_widths[-1] += diff
# Update with diff
separator = make_row("", "", "")
initial_rows = [
make_row("", in_between, ""),
f"{sep_char}{title.center(len(separator) - 2)}{sep_char}",
make_row("", "", ""),
]
table = "\n".join(initial_rows) + "\n"
column_widths[-1] += diff
centered_line = [text.center(column_widths[i]) for i, text in enumerate(headers)]
table += f"{pattern % tuple(centered_line)}\n{separator}\n"
for i, line in enumerate(rows):
centered_line = [t.center(column_widths[i]) for i, t in enumerate(line)]
table += f"{pattern % tuple(centered_line)}\n"
table += f'{"".join([in_between * n for n in column_widths])}'
return table
def estimate_command_parser(subparsers=None):
if subparsers is not None:
parser = subparsers.add_parser("estimate-memory")
else:
parser = argparse.ArgumentParser(description="Model size estimator for fitting a model onto CUDA memory.")
parser.add_argument("model_name", type=str, help="The model name on the Hugging Face Hub.")
parser.add_argument(
"--library_name",
type=str,
help="The library the model has an integration with, such as `transformers`, needed only if this information is not stored on the Hub.",
choices=["timm", "transformers"],
)
parser.add_argument(
"--dtypes",
type=str,
nargs="+",
default=["float32", "float16", "int8", "int4"],
help="The dtypes to use for the model, must be one (or many) of `float32`, `float16`, `int8`, and `int4`",
choices=["float32", "float16", "int8", "int4"],
)
parser.add_argument(
"--trust_remote_code",
action="store_true",
help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. This flag
should only be used for repositories you trust and in which you have read the code, as it will execute
code present on the Hub on your local machine.""",
)
if subparsers is not None:
parser.set_defaults(func=estimate_command)
return parser
def gather_data(args):
"Creates an empty model and gathers the data for the sizes"
try:
model = create_empty_model(
args.model_name, library_name=args.library_name, trust_remote_code=args.trust_remote_code
)
except (RuntimeError, OSError) as e:
library = check_has_model(e)
if library != "unknown":
raise RuntimeError(
f"Tried to load `{args.model_name}` with `{library}` but a possible model to load was not found inside the repo."
)
raise e
total_size, largest_layer = calculate_maximum_sizes(model)
data = []
for dtype in args.dtypes:
dtype_total_size = total_size
dtype_largest_layer = largest_layer[0]
if dtype == "float16":
dtype_total_size /= 2
dtype_largest_layer /= 2
elif dtype == "int8":
dtype_total_size /= 4
dtype_largest_layer /= 4
elif dtype == "int4":
dtype_total_size /= 8
dtype_largest_layer /= 8
dtype_training_size = dtype_total_size * 4
data.append([dtype, dtype_largest_layer, dtype_total_size, dtype_training_size])
return data
def estimate_command(args):
data = gather_data(args)
for row in data:
for i, item in enumerate(row):
if isinstance(item, (int, float)):
row[i] = convert_bytes(item)
headers = ["dtype", "Largest Layer", "Total Size", "Training using Adam"]
title = f"Memory Usage for loading `{args.model_name}`"
table = create_ascii_table(headers, data, title)
print(table)
def main():
parser = estimate_command_parser()
args = parser.parse_args()
estimate_command(args)
if __name__ == "__main__":
main()

View File

@ -311,6 +311,7 @@ class AlignDevicesHook(ModelHook):
for name, device in self.original_devices.items():
if device != torch.device("meta"):
set_module_tensor_to_device(module, name, device, value=self.weights_map.get(name, None))
return module
def attach_execution_device_hook(

View File

@ -27,10 +27,19 @@ def test_launch():
_ = PartialState()
def notebook_launcher(function, args=(), num_processes=None, mixed_precision="no", use_port="29500"):
def notebook_launcher(
function,
args=(),
num_processes=None,
mixed_precision="no",
use_port="29500",
master_addr="127.0.0.1",
node_rank=0,
num_nodes=1,
):
"""
Launches a training function, using several processes if it's possible in the current environment (TPU with
multiple cores for instance).
Launches a training function, using several processes or multiple nodes if it's possible in the current environment
(TPU with multiple cores for instance).
<Tip warning={true}>
@ -55,6 +64,12 @@ def notebook_launcher(function, args=(), num_processes=None, mixed_precision="no
If `fp16` or `bf16`, will use mixed precision training on multi-GPU.
use_port (`str`, *optional*, defaults to `"29500"`):
The port to use to communicate between processes when launching a multi-GPU training.
master_addr (`str`, *optional*, defaults to `"127.0.0.1"`):
The address to use for communication between processes.
node_rank (`int`, *optional*, defaults to 0):
The rank of the current node.
num_nodes (`int`, *optional*, defaults to 1):
The number of nodes to use for training.
Example:
@ -114,7 +129,8 @@ def notebook_launcher(function, args=(), num_processes=None, mixed_precision="no
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call."
)
if node_rank >= num_nodes:
raise ValueError("The node_rank must be less than the number of nodes.")
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
@ -129,7 +145,12 @@ def notebook_launcher(function, args=(), num_processes=None, mixed_precision="no
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=num_processes, master_addr="127.0.01", master_port=use_port, mixed_precision=mixed_precision
nproc=num_processes,
node_rank=node_rank,
world_size=num_nodes * num_processes,
master_addr=master_addr,
master_port=use_port,
mixed_precision=mixed_precision,
):
# First dummy launch
if os.environ.get("ACCELERATE_DEBUG_MODE", "false").lower() == "true":

View File

@ -48,6 +48,10 @@ if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
if is_npu_available(check_device=False):
import torch_npu # noqa: F401
def is_initialized() -> bool:
"""
Checks if the `AcceleratorState` has been initialized from `Accelerator`. Same as `AcceleratorState.initialized`,
@ -168,7 +172,11 @@ class PartialState:
# DeepSpeed always uses nccl
kwargs.pop("backend", None)
self.backend = "nccl"
if is_xpu_available and is_ccl_available():
# Set DeepSpeed backend to ccl for xpu
self.backend = "ccl"
else:
self.backend = "nccl"
dist.init_distributed(dist_backend=self.backend, auto_mpi_discovery=False, **kwargs)
self.num_processes = torch.distributed.get_world_size()

View File

@ -149,6 +149,39 @@ def test_mrpc(dispatch_batches: bool = False, split_batches: bool = False):
), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
def test_gather_for_metrics_with_non_tensor_objects_iterable_dataset():
class DummyIterableDataset(IterableDataset):
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __iter__(self):
for element in self.data:
yield element
iterable_dataset = DummyIterableDataset([n for n in range(30)])
dataloader = DataLoader(iterable_dataset, batch_size=4)
accelerator = Accelerator()
prepared_dataloader = accelerator.prepare(dataloader)
if accelerator.is_main_process:
logger = logging.root.manager.loggerDict["accelerate.accelerator"]
list_handler = ListHandler()
logger.addHandler(list_handler)
batches_for_metrics = []
for batch in prepared_dataloader:
batches_for_metrics.append(accelerator.gather_for_metrics(batch))
assert torch.cat(batches_for_metrics).size(0) == 30
if accelerator.is_main_process:
assert len(list_handler.logs) == 0
logger.removeHandler(list_handler)
def test_gather_for_metrics_with_iterable_dataset():
class DummyIterableDataset(IterableDataset):
def __init__(self, data):
@ -206,6 +239,8 @@ def main():
accelerator.state._reset_state()
print("test_gather_for_metrics_with_iterable_dataset")
test_gather_for_metrics_with_iterable_dataset()
print("test gather_for_metrics_with_non_tensor_objects_iterable_dataset")
test_gather_for_metrics_with_non_tensor_objects_iterable_dataset()
if accelerator.is_local_main_process:
print("**Test torch metrics**")
for split_batches in [True, False]:

View File

@ -541,6 +541,23 @@ def test_split_between_processes_tensor():
state.wait_for_everyone()
def test_trigger():
accelerator = Accelerator()
# should start with being false
assert accelerator.check_trigger() is False
# set a breakpoint on the main process
if accelerator.is_main_process:
accelerator.set_trigger()
# check it's been activated across all processes
# calls `all_reduce` and triggers a sync
assert accelerator.check_trigger() is True
# check it's been reset after the sync
assert accelerator.check_trigger() is False
def main():
accelerator = Accelerator()
state = accelerator.state
@ -590,6 +607,10 @@ def main():
print("\n**Training integration test**")
training_check()
if state.local_process_index == 0:
print("\n**Breakpoint trigger test**")
test_trigger()
if __name__ == "__main__":
main()

View File

@ -20,7 +20,6 @@ import sys
import tempfile
import unittest
from contextlib import contextmanager
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
@ -38,11 +37,13 @@ from ..utils import (
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_timm_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
str_to_bool,
)
@ -55,7 +56,7 @@ def parse_flag_from_env(key, default=False):
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
_value = str_to_bool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
@ -116,6 +117,20 @@ def require_huggingface_suite(test_case):
)(test_case)
def require_transformers(test_case):
"""
Decorator marking a test that requires transformers. These tests are skipped when they are not.
"""
return unittest.skipUnless(is_transformers_available(), "test requires the transformers library")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires transformers. These tests are skipped when they are not.
"""
return unittest.skipUnless(is_timm_available(), "test requires the timm library")(test_case)
def require_bnb(test_case):
"""
Decorator marking a test that requires bitsandbytes. These tests are skipped when they are not.

View File

@ -39,31 +39,18 @@ from .utils import (
_available_trackers = []
if is_tensorboard_available():
try:
from torch.utils import tensorboard
except ModuleNotFoundError:
import tensorboardX as tensorboard
_available_trackers.append(LoggerType.TENSORBOARD)
if is_wandb_available():
import wandb
_available_trackers.append(LoggerType.WANDB)
if is_comet_ml_available():
from comet_ml import Experiment
_available_trackers.append(LoggerType.COMETML)
if is_aim_available():
from aim import Run
_available_trackers.append(LoggerType.AIM)
if is_mlflow_available():
import mlflow
_available_trackers.append(LoggerType.MLFLOW)
logger = get_logger(__name__)
@ -185,6 +172,10 @@ class TensorBoardTracker(GeneralTracker):
@on_main_process
def __init__(self, run_name: str, logging_dir: Union[str, os.PathLike], **kwargs):
try:
from torch.utils import tensorboard
except ModuleNotFoundError:
import tensorboardX as tensorboard
super().__init__()
self.run_name = run_name
self.logging_dir = os.path.join(logging_dir, run_name)
@ -293,6 +284,9 @@ class WandBTracker(GeneralTracker):
def __init__(self, run_name: str, **kwargs):
super().__init__()
self.run_name = run_name
import wandb
self.run = wandb.init(project=self.run_name, **kwargs)
logger.debug(f"Initialized WandB project {self.run_name}")
logger.debug(
@ -313,6 +307,8 @@ class WandBTracker(GeneralTracker):
Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,
`str`, `float`, `int`, or `None`.
"""
import wandb
wandb.config.update(values, allow_val_change=True)
logger.debug("Stored initial configuration hyperparameters to WandB")
@ -346,6 +342,8 @@ class WandBTracker(GeneralTracker):
kwargs:
Additional key word arguments passed along to the `wandb.log` method.
"""
import wandb
for k, v in values.items():
self.log({k: [wandb.Image(image) for image in v]}, step=step, **kwargs)
logger.debug("Successfully logged images to WandB")
@ -376,6 +374,7 @@ class WandBTracker(GeneralTracker):
step (`int`, *optional*):
The run step. If included, the log will be affiliated with this step.
"""
import wandb
values = {table_name: wandb.Table(columns=columns, data=data, dataframe=dataframe)}
self.log(values, step=step, **kwargs)
@ -409,6 +408,9 @@ class CometMLTracker(GeneralTracker):
def __init__(self, run_name: str, **kwargs):
super().__init__()
self.run_name = run_name
from comet_ml import Experiment
self.writer = Experiment(project_name=run_name, **kwargs)
logger.debug(f"Initialized CometML project {self.run_name}")
logger.debug(
@ -484,6 +486,9 @@ class AimTracker(GeneralTracker):
@on_main_process
def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = ".", **kwargs):
self.run_name = run_name
from aim import Run
self.writer = Run(repo=logging_dir, **kwargs)
self.writer.name = self.run_name
logger.debug(f"Initialized Aim project {self.run_name}")
@ -581,6 +586,8 @@ class MLflowTracker(GeneralTracker):
nested_run = os.getenv("MLFLOW_NESTED_RUN", nested_run)
import mlflow
exps = mlflow.search_experiments(filter_string=f"name = '{experiment_name}'")
if len(exps) > 0:
if len(exps) > 1:
@ -620,6 +627,7 @@ class MLflowTracker(GeneralTracker):
values (`dict`):
Values to be stored as initial hyperparameters as key-value pairs.
"""
import mlflow
for name, value in list(values.items()):
# internally, all values are converted to str in MLflow
@ -658,6 +666,7 @@ class MLflowTracker(GeneralTracker):
f'MLflowTracker is attempting to log a value of "{v}" of type {type(v)} for key "{k}" as a metric. '
"MLflow's log_metric() only accepts float and int types so we dropped this attribute."
)
import mlflow
mlflow.log_metrics(metrics, step=step)
logger.debug("Successfully logged to mlflow")
@ -667,6 +676,8 @@ class MLflowTracker(GeneralTracker):
"""
End the active MLflow run.
"""
import mlflow
mlflow.end_run()

View File

@ -35,7 +35,7 @@ from .dataclasses import (
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env, str_to_bool
from .imports import (
get_ccl_version,
is_4bit_bnb_available,
@ -59,12 +59,14 @@ from .imports import (
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_timm_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
calculate_maximum_sizes,
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
@ -163,6 +165,7 @@ from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
clear_environment,
convert_bytes,
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,

View File

@ -33,7 +33,7 @@ FSDP_AUTO_WRAP_POLICY = ["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"]
FSDP_BACKWARD_PREFETCH = ["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"]
FSDP_STATE_DICT_TYPE = ["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"]
FSDP_PYTORCH_VERSION = "2.0.1"
DEEPSPEED_MULTINODE_LAUNCHERS = ["pdsh", "standard", "openmpi", "mvapich"]
DEEPSPEED_MULTINODE_LAUNCHERS = ["pdsh", "standard", "openmpi", "mvapich", "mpich"]
TORCH_DYNAMO_MODES = ["default", "reduce-overhead", "max-autotune"]
STR_OPERATION_TO_FUNC = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt}

View File

@ -26,12 +26,13 @@ import warnings
from contextlib import contextmanager
from dataclasses import dataclass, field
from datetime import timedelta
from distutils.util import strtobool
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
import torch
from .constants import FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_STATE_DICT_TYPE
from .environment import str_to_bool
from .versions import compare_versions
class KwargsHandler:
@ -471,9 +472,9 @@ class TorchDynamoPlugin(KwargsHandler):
if self.mode is None:
self.mode = os.environ.get(prefix + "MODE", "default")
if self.fullgraph is None:
self.fullgraph = strtobool(os.environ.get(prefix + "USE_FULLGRAPH", "False")) == 1
self.fullgraph = str_to_bool(os.environ.get(prefix + "USE_FULLGRAPH", "False")) == 1
if self.dynamic is None:
self.dynamic = strtobool(os.environ.get(prefix + "USE_DYNAMIC", "False")) == 1
self.dynamic = str_to_bool(os.environ.get(prefix + "USE_DYNAMIC", "False")) == 1
def to_dict(self):
dynamo_config = copy.deepcopy(self.__dict__)
@ -494,7 +495,10 @@ class DeepSpeedPlugin:
},
)
gradient_accumulation_steps: int = field(
default=None, metadata={"help": "Number of steps to accumulate gradients before updating optimizer states"}
default=None,
metadata={
"help": "Number of steps to accumulate gradients before updating optimizer states. If not set, will use the value from the `Accelerator` directly."
},
)
gradient_clipping: float = field(default=None, metadata={"help": "Enable gradient clipping with value"})
zero_stage: int = field(
@ -537,7 +541,8 @@ class DeepSpeedPlugin:
from .deepspeed import HfDeepSpeedConfig
if self.gradient_accumulation_steps is None:
self.gradient_accumulation_steps = int(os.environ.get("ACCELERATE_GRADIENT_ACCUMULATION_STEPS", 1))
gas = os.environ.get("ACCELERATE_GRADIENT_ACCUMULATION_STEPS", "auto")
self.gradient_accumulation_steps = int(gas) if gas.isdigit() else gas
if self.gradient_clipping is None:
gradient_clipping = os.environ.get("ACCELERATE_GRADIENT_CLIPPING", "none")
@ -630,7 +635,7 @@ class DeepSpeedPlugin:
self.deepspeed_config["steps_per_print"] = float("inf") # this will stop deepspeed from logging @ stdout
if self.zero3_init_flag is None:
self.zero3_init_flag = (
strtobool(os.environ.get("ACCELERATE_DEEPSPEED_ZERO3_INIT", str(self.hf_ds_config.is_zero3()))) == 1
str_to_bool(os.environ.get("ACCELERATE_DEEPSPEED_ZERO3_INIT", str(self.hf_ds_config.is_zero3()))) == 1
)
if self.zero3_init_flag and not self.hf_ds_config.is_zero3():
warnings.warn("DeepSpeed Zero3 Init flag is only applicable for ZeRO Stage 3. Setting it to False.")
@ -725,7 +730,10 @@ class DeepSpeedPlugin:
if ds_config["train_batch_size"] == "auto":
del ds_config["train_batch_size"]
from transformers.deepspeed import HfDeepSpeedConfig
if compare_versions("transformers", "<", "4.33"):
from transformers.deepspeed import HfDeepSpeedConfig
else:
from transformers.integrations import HfDeepSpeedConfig
self.dschf = HfDeepSpeedConfig(ds_config) # keep this object alive # noqa
@ -872,6 +880,14 @@ class FullyShardedDataParallelPlugin:
"all-gather while executing in the forward pass. only use with Static graphs."
},
)
activation_checkpointing: bool = field(
default=False,
metadata={
"help": "If True, activation checkpointing is a technique to reduce memory usage by clearing activations of "
"certain layers and recomputing them during a backward pass. Effectively, this trades extra computation time "
"for reduced memory usage."
},
)
def __post_init__(self):
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch, CPUOffload, ShardingStrategy
@ -881,7 +897,7 @@ class FullyShardedDataParallelPlugin:
self.sharding_strategy = ShardingStrategy(int(os.environ.get(prefix + "SHARDING_STRATEGY", 1)))
if self.cpu_offload is None:
if strtobool(os.environ.get(prefix + "OFFLOAD_PARAMS", "False")) == 1:
if str_to_bool(os.environ.get(prefix + "OFFLOAD_PARAMS", "False")) == 1:
self.cpu_offload = CPUOffload(offload_params=True)
else:
self.cpu_offload = CPUOffload(offload_params=False)
@ -894,9 +910,10 @@ class FullyShardedDataParallelPlugin:
if self.state_dict_type is None:
state_dict_type_policy = os.environ.get(prefix + "STATE_DICT_TYPE", "FULL_STATE_DICT")
self.set_state_dict_type(state_dict_type_policy)
self.use_orig_params = strtobool(os.environ.get(prefix + "USE_ORIG_PARAMS", "False")) == 1
self.sync_module_states = strtobool(os.environ.get(prefix + "SYNC_MODULE_STATES", "True")) == 1
self.forward_prefetch = strtobool(os.environ.get(prefix + "FORWARD_PREFETCH", "False")) == 1
self.use_orig_params = str_to_bool(os.environ.get(prefix + "USE_ORIG_PARAMS", "False")) == 1
self.sync_module_states = str_to_bool(os.environ.get(prefix + "SYNC_MODULE_STATES", "True")) == 1
self.forward_prefetch = str_to_bool(os.environ.get(prefix + "FORWARD_PREFETCH", "False")) == 1
self.activation_checkpointing = str_to_bool(os.environ.get(prefix + "ACTIVATION_CHECKPOINTING", "False")) == 1
if self.sync_module_states:
self.param_init_fn = lambda x: x.to_empty(device=torch.cuda.current_device(), recurse=False)
@ -1152,13 +1169,13 @@ class MegatronLMPlugin:
if self.gradient_clipping is None:
self.gradient_clipping = float(os.environ.get(prefix + "GRADIENT_CLIPPING", 1.0))
if self.recompute_activation is None:
self.recompute_activation = strtobool(os.environ.get(prefix + "RECOMPUTE_ACTIVATION", "False")) == 1
self.recompute_activation = str_to_bool(os.environ.get(prefix + "RECOMPUTE_ACTIVATION", "False")) == 1
if self.use_distributed_optimizer is None:
self.use_distributed_optimizer = (
strtobool(os.environ.get(prefix + "USE_DISTRIBUTED_OPTIMIZER", "False")) == 1
str_to_bool(os.environ.get(prefix + "USE_DISTRIBUTED_OPTIMIZER", "False")) == 1
)
if self.sequence_parallelism is None:
self.sequence_parallelism = strtobool(os.environ.get(prefix + "SEQUENCE_PARALLELISM", "False")) == 1
self.sequence_parallelism = str_to_bool(os.environ.get(prefix + "SEQUENCE_PARALLELISM", "False")) == 1
if self.pp_degree > 1 or self.use_distributed_optimizer:
self.DDP_impl = "local"

View File

@ -254,16 +254,19 @@ class DummyScheduler:
Args:
optimizer (`torch.optim.optimizer.Optimizer`):
The optimizer to wrap.
total_num_steps (int):
total_num_steps (int, *optional*):
Total number of steps.
warmup_num_steps (int):
warmup_num_steps (int, *optional*):
Number of steps for warmup.
lr_scheduler_callable (callable, *optional*):
A callable function that creates an LR Scheduler. It accepts only one argument `optimizer`.
**kwargs:
Other arguments.
"""
def __init__(self, optimizer, total_num_steps=None, warmup_num_steps=0, **kwargs):
def __init__(self, optimizer, total_num_steps=None, warmup_num_steps=0, lr_scheduler_callable=None, **kwargs):
self.optimizer = optimizer
self.total_num_steps = total_num_steps
self.warmup_num_steps = warmup_num_steps
self.lr_scheduler_callable = lr_scheduler_callable
self.kwargs = kwargs

View File

@ -13,7 +13,21 @@
# limitations under the License.
import os
from distutils.util import strtobool
def str_to_bool(value) -> int:
"""
Converts a string representation of truth to `True` (1) or `False` (0).
True values are `y`, `yes`, `t`, `true`, `on`, and `1`; False value are `n`, `no`, `f`, `false`, `off`, and `0`;
"""
value = value.lower()
if value in ("y", "yes", "t", "true", "on", "1"):
return 1
elif value in ("n", "no", "f", "false", "off", "0"):
return 0
else:
raise ValueError(f"invalid truth value {value}")
def get_int_from_env(env_keys, default):
@ -28,7 +42,7 @@ def get_int_from_env(env_keys, default):
def parse_flag_from_env(key, default=False):
"""Returns truthy value for `key` from the env if available else the default."""
value = os.environ.get(key, str(default))
return strtobool(value) == 1 # As its name indicates `strtobool` actually returns an int...
return str_to_bool(value) == 1 # As its name indicates `str_to_bool` actually returns an int...
def parse_choice_from_env(key, default="no"):

View File

@ -16,14 +16,13 @@ import importlib
import importlib.metadata
import os
import warnings
from distutils.util import strtobool
from functools import lru_cache
import torch
from packaging import version
from packaging.version import parse
from .environment import parse_flag_from_env
from .environment import parse_flag_from_env, str_to_bool
from .versions import compare_versions, is_torch_version
@ -117,8 +116,6 @@ def is_bf16_available(ignore_tpu=False):
return not ignore_tpu
if torch.cuda.is_available():
return torch.cuda.is_bf16_supported()
if is_npu_available():
return False
return True
@ -143,7 +140,7 @@ def is_bnb_available():
def is_megatron_lm_available():
if strtobool(os.environ.get("ACCELERATE_USE_MEGATRON_LM", "False")) == 1:
if str_to_bool(os.environ.get("ACCELERATE_USE_MEGATRON_LM", "False")) == 1:
package_exists = importlib.util.find_spec("megatron") is not None
if package_exists:
try:
@ -166,6 +163,10 @@ def is_datasets_available():
return _is_package_available("datasets")
def is_timm_available():
return _is_package_available("timm")
def is_aim_available():
package_exists = _is_package_available("aim")
if package_exists:
@ -210,7 +211,16 @@ def is_tqdm_available():
def is_mlflow_available():
return _is_package_available("mlflow")
if _is_package_available("mlflow"):
return True
if importlib.util.find_spec("mlflow") is not None:
try:
_ = importlib.metadata.metadata("mlflow-skinny")
return True
except importlib.metadata.PackageNotFoundError:
return False
return False
def is_mps_available():

View File

@ -537,7 +537,9 @@ class PrepareForLaunch:
):
# Prepare the environment for torch.distributed
os.environ["LOCAL_RANK"] = str(index)
os.environ["RANK"] = str(index)
nproc = int(os.environ.get("NPROC", 1))
node_rank = int(os.environ.get("NODE_RANK", 0))
os.environ["RANK"] = str(nproc * node_rank + index)
os.environ["FORK_LAUNCHED"] = str(1)
self.launcher(*args)

View File

@ -35,6 +35,10 @@ from .offload import load_offloaded_weight, offload_weight, save_offload_index
from .tqdm import is_tqdm_available, tqdm
if is_npu_available(check_device=False):
import torch_npu # noqa: F401
if is_safetensors_available():
from safetensors import safe_open
from safetensors.torch import load_file as safe_load_file
@ -766,9 +770,6 @@ def get_balanced_memory(
user_not_set_max_memory = max_memory is None
max_memory = get_max_memory(max_memory)
if not (torch.cuda.is_available() or is_xpu_available()) or is_mps_available():
return max_memory
if not is_xpu_available():
num_devices = len([d for d in max_memory if torch.device(d).type == "cuda" and max_memory[d] > 0])
else:
@ -784,6 +785,9 @@ def get_balanced_memory(
]
)
if num_devices == 0:
return max_memory
if num_devices == 1:
# We cannot do low_zero on just one GPU, but we will still reserve some memory for the buffer
low_zero = False
@ -856,6 +860,24 @@ def get_balanced_memory(
return max_memory
def calculate_maximum_sizes(model: torch.nn.Module):
"Computes the total size of the model and its largest layer"
sizes = compute_module_sizes(model)
# `transformers` models store this information for us
no_split_modules = getattr(model, "_no_split_modules", None)
if no_split_modules is None:
no_split_modules = []
modules_to_treat = (
list(model.named_parameters(recurse=False))
+ list(model.named_children())
+ list(model.named_buffers(recurse=False))
)
largest_layer = get_max_layer_size(modules_to_treat, sizes, no_split_modules)
total_size = sizes[""]
return total_size, largest_layer
def infer_auto_device_map(
model: nn.Module,
max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,
@ -1429,14 +1451,12 @@ def get_mixed_precision_context_manager(native_amp: bool = False, autocast_kwarg
autocast_kwargs = autocast_kwargs.to_kwargs()
if native_amp:
if state.mixed_precision == "fp16":
if is_npu_available():
return torch.npu.amp.autocast(dtype=torch.float16, **autocast_kwargs)
else:
return torch.autocast(device_type=state.device.type, dtype=torch.float16, **autocast_kwargs)
return torch.autocast(device_type=state.device.type, dtype=torch.float16, **autocast_kwargs)
elif state.mixed_precision == "bf16" and state.distributed_type in [
DistributedType.NO,
DistributedType.MULTI_CPU,
DistributedType.MULTI_GPU,
DistributedType.MULTI_NPU,
DistributedType.MULTI_XPU,
]:
return torch.autocast(device_type=state.device.type, dtype=torch.bfloat16, **autocast_kwargs)

View File

@ -547,7 +547,7 @@ def pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False):
@verify_operation
def reduce(tensor, reduction="mean"):
def reduce(tensor, reduction="mean", scale=1.0):
"""
Recursively reduce the tensors in a nested list/tuple/dictionary of lists of tensors across all processes by the
mean of a given operation.
@ -557,25 +557,29 @@ def reduce(tensor, reduction="mean"):
The data to reduce.
reduction (`str`, *optional*, defaults to `"mean"`):
A reduction method. Can be of "mean", "sum", or "none"
scale (`float`, *optional*):
A default scaling value to be applied after the reduce, only valied on XLA.
Returns:
The same data structure as `data` with all the tensors reduced.
"""
def _reduce_across_processes(tensor, reduction="mean"):
def _reduce_across_processes(tensor, reduction="mean", scale=1.0):
state = PartialState()
cloned_tensor = tensor.clone()
if state.distributed_type == DistributedType.NO:
return cloned_tensor
if state.distributed_type == DistributedType.TPU:
xm.all_reduce("sum", cloned_tensor)
xm.all_reduce("sum", cloned_tensor, scale)
elif state.distributed_type.value in TORCH_DISTRIBUTED_OPERATION_TYPES:
torch.distributed.all_reduce(cloned_tensor, ReduceOp.SUM)
if reduction == "mean":
cloned_tensor /= state.num_processes
return cloned_tensor
return recursively_apply(_reduce_across_processes, tensor, error_on_other_type=True, reduction=reduction)
return recursively_apply(
_reduce_across_processes, tensor, error_on_other_type=True, reduction=reduction, scale=scale
)
def convert_to_fp32(tensor):

View File

@ -23,17 +23,17 @@ from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .constants import FSDP_PYTORCH_VERSION
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .imports import is_deepspeed_available, is_safetensors_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
if is_safetensors_available():
from safetensors.torch import save_file as safe_save_file
def is_compiled_module(module):
"""
@ -65,6 +65,8 @@ def extract_model_from_parallel(model, keep_fp32_wrapper: bool = True):
model = model._orig_mod
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
options += (DeepSpeedEngine,)
if is_torch_version(">=", FSDP_PYTORCH_VERSION):
@ -107,18 +109,22 @@ def wait_for_everyone():
PartialState().wait_for_everyone()
def save(obj, f):
def save(obj, f, safe_serialization=False):
"""
Save the data to disk. Use in place of `torch.save()`.
Args:
obj: The data to save
f: The file (or file-like object) to use to save the data
safe_serialization (`bool`, *optional*, defaults to `False`): Whether to save `obj` using `safetensors`
"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(obj, f)
elif PartialState().local_process_index == 0:
torch.save(obj, f)
if safe_serialization:
safe_save_file(obj, f, metadata={"format": "pt"})
else:
torch.save(obj, f)
@contextmanager
@ -172,14 +178,22 @@ def patch_environment(**kwargs):
>>> print(os.environ["FOO"]) # raises KeyError
```
"""
existing_vars = {}
for key, value in kwargs.items():
os.environ[key.upper()] = str(value)
key = key.upper()
if key in os.environ:
existing_vars[key] = os.environ[key]
os.environ[key] = str(value)
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
key = key.upper()
if key in existing_vars:
# restore previous value
os.environ[key] = existing_vars[key]
else:
os.environ.pop(key, None)
def get_pretty_name(obj):
@ -222,3 +236,13 @@ def is_port_in_use(port: int = None) -> bool:
port = 29500
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
return s.connect_ex(("localhost", port)) == 0
def convert_bytes(size):
"Converts `size` from bytes to the largest possible unit"
for x in ["bytes", "KB", "MB", "GB", "TB"]:
if size < 1024.0:
return f"{round(size, 2)} {x}"
size /= 1024.0
return f"{round(size, 2)} PB"

View File

@ -31,7 +31,6 @@ from transformers.utils import is_torch_bf16_available
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.scheduler import AcceleratedScheduler
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
@ -332,7 +331,8 @@ class DeepSpeedConfigIntegration(AccelerateTestCase):
model, optimizer, train_dataloader, eval_dataloader, dummy_lr_scheduler
)
self.assertTrue(
"You cannot create a `DummyScheduler` without specifying a scheduler in the config file."
"Either specify a scheduler in the config file or "
"pass in the `lr_scheduler_callable` parameter when using `accelerate.utils.DummyScheduler`."
in str(cm.exception)
)
@ -352,7 +352,7 @@ class DeepSpeedConfigIntegration(AccelerateTestCase):
self.assertTrue(accelerator.deepspeed_config["train_batch_size"], 16)
self.assertEqual(type(model), DeepSpeedEngine)
self.assertEqual(type(optimizer), DeepSpeedOptimizerWrapper)
self.assertEqual(type(lr_scheduler), AcceleratedScheduler)
self.assertEqual(type(lr_scheduler), DeepSpeedSchedulerWrapper)
self.assertEqual(type(accelerator.deepspeed_engine_wrapped), DeepSpeedEngineWrapper)
elif optim_type == DS_OPTIMIZER and scheduler_type == DS_SCHEDULER:
@ -483,6 +483,31 @@ class DeepSpeedConfigIntegration(AccelerateTestCase):
in str(cm.exception)
)
# passing `DummyScheduler` without `lr_scheduler_callable` should fail
with self.assertRaises(ValueError) as cm:
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, dummy_optimizer, train_dataloader, eval_dataloader, dummy_lr_scheduler
)
self.assertTrue(
"Either specify a scheduler in the config file or "
"pass in the `lr_scheduler_callable` parameter when using `accelerate.utils.DummyScheduler`."
in str(cm.exception)
)
# passing `lr_scheduler_callable` to DummyScheduler should enable DS Optim + Custom Scheduler
def _lr_scheduler_callable(optimizer):
return get_scheduler(
name="linear",
optimizer=optimizer,
num_warmup_steps=0,
num_training_steps=1000,
)
dummy_lr_scheduler = DummyScheduler(dummy_optimizer, lr_scheduler_callable=_lr_scheduler_callable)
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, dummy_optimizer, train_dataloader, eval_dataloader, dummy_lr_scheduler
)
def test_save_checkpoints(self):
deepspeed_plugin = DeepSpeedPlugin(
hf_ds_config=self.ds_config_file[ZERO3],
@ -599,7 +624,7 @@ class DeepSpeedConfigIntegration(AccelerateTestCase):
deepspeed_plugin = DeepSpeedPlugin(
hf_ds_config=ds_config,
zero3_init_flag=True,
gradient_accumulation_steps=1,
gradient_accumulation_steps=2,
gradient_clipping=1.0,
zero_stage=2,
offload_optimizer_device="cpu",
@ -611,7 +636,7 @@ class DeepSpeedConfigIntegration(AccelerateTestCase):
accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision=dtype)
deepspeed_plugin = accelerator.state.deepspeed_plugin
self.assertEqual(deepspeed_plugin.deepspeed_config["gradient_clipping"], 1.0)
self.assertEqual(deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"], 1)
self.assertEqual(deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"], 2)
self.assertEqual(deepspeed_plugin.deepspeed_config["zero_optimization"]["stage"], 2)
self.assertEqual(
deepspeed_plugin.deepspeed_config["zero_optimization"]["offload_optimizer"]["device"], "cpu"
@ -632,6 +657,42 @@ class DeepSpeedConfigIntegration(AccelerateTestCase):
in str(cm.exception)
)
# base case of passing in `gradient_accumulation_steps` to `DeepSpeedPlugin`
AcceleratorState._reset_state(True)
deepspeed_plugin = DeepSpeedPlugin(zero_stage=2, gradient_accumulation_steps=4)
with mockenv_context(**self.dist_env):
accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision=dtype)
deepspeed_plugin = accelerator.state.deepspeed_plugin
self.assertEqual(deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"], 4)
# filling the `auto` gradient_accumulation_steps via Accelerator's value
AcceleratorState._reset_state(True)
deepspeed_plugin = DeepSpeedPlugin(
hf_ds_config=ds_config,
zero3_init_flag=True,
gradient_clipping=1.0,
zero_stage=2,
offload_optimizer_device="cpu",
offload_param_device="cpu",
zero3_save_16bit_model=True,
)
with mockenv_context(**self.dist_env):
accelerator = Accelerator(
deepspeed_plugin=deepspeed_plugin, mixed_precision=dtype, gradient_accumulation_steps=8
)
train_set = RegressionDataset(length=80)
eval_set = RegressionDataset(length=20)
train_dataloader = DataLoader(train_set, batch_size=16, shuffle=True)
eval_dataloader = DataLoader(eval_set, batch_size=32, shuffle=False)
model = AutoModelForCausalLM.from_pretrained("gpt2")
dummy_optimizer = DummyOptim(params=model.parameters(), lr=5e-5, weight_decay=1e-4)
dummy_lr_scheduler = DummyScheduler(dummy_optimizer, warmup_num_steps=10, total_num_steps=1000)
model, _, train_dataloader, eval_dataloader, _ = accelerator.prepare(
model, dummy_optimizer, train_dataloader, eval_dataloader, dummy_lr_scheduler
)
deepspeed_plugin = accelerator.state.deepspeed_plugin
self.assertEqual(deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"], 8)
def test_ds_config_assertions(self):
ambiguous_env = self.dist_env.copy()
ambiguous_env[

View File

@ -10,9 +10,10 @@ from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils import require_bnb, require_multi_gpu, require_safetensors, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
from accelerate.utils.modeling import load_checkpoint_in_model
def create_components():
@ -114,6 +115,30 @@ class AcceleratorTester(AccelerateTestCase):
accelerator.load_state(tmpdirname)
self.assertTrue(abs(model_signature - get_signature(model)) < 1e-3)
def test_save_model_pytorch(self):
accelerator = Accelerator()
model = torch.nn.Linear(10, 10)
model_signature = get_signature(model)
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_model(model, tmpdirname, safe_serialization=False)
# make sure loaded weights match
load_checkpoint_in_model(model, tmpdirname)
self.assertTrue(abs(model_signature - get_signature(model)) < 1e-3)
@require_safetensors
def test_save_model_safetensors(self):
accelerator = Accelerator()
model = torch.nn.Linear(10, 10)
model_signature = get_signature(model)
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_model(model, tmpdirname, safe_serialization=True)
# make sure loaded weights match
load_checkpoint_in_model(model, tmpdirname)
self.assertTrue(abs(model_signature - get_signature(model)) < 1e-3)
def test_save_load_model_with_hooks(self):
accelerator = Accelerator()
model, optimizer, scheduler, train_dl, valid_dl = create_components()

View File

@ -444,6 +444,18 @@ class BigModelingTester(unittest.TestCase):
output = model(x)
self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))
@require_cuda
def test_dispatch_model_force_hooks(self):
model = ModelForTest()
device_map = {"": 0}
x = torch.randn(2, 3)
expected = model(x)
dispatch_model(model, device_map, force_hooks=True)
output = model(x)
self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))
@require_cuda
def test_load_checkpoint_and_dispatch(self):
model = ModelForTest()
@ -638,22 +650,16 @@ class BigModelingTester(unittest.TestCase):
with init_empty_weights():
model = AutoModel.from_config(AutoConfig.from_pretrained("bigscience/bloom-560m"))
# TODO: @younesbelkada remove the positional arg on the next `transformers` release
quantization_config = BitsAndBytesConfig(load_in_8bit=True)
model = replace_with_bnb_linear(
model, modules_to_not_convert=["lm_head"], quantization_config=quantization_config
)
# TODO: @younesbelkada remove this block on the next `transformers` release
for p in model.parameters():
p.requires_grad = False
model_path = hf_hub_download("bigscience/bloom-560m", "pytorch_model.bin")
model = load_checkpoint_and_dispatch(
model,
checkpoint=model_path,
# device_map="auto",
device_map="balanced",
)
@ -674,16 +680,11 @@ class BigModelingTester(unittest.TestCase):
with init_empty_weights():
model = AutoModel.from_config(AutoConfig.from_pretrained("bigscience/bloom-560m"))
# TODO: @younesbelkada remove the positional arg on the next `transformers` release
quantization_config = BitsAndBytesConfig(load_in_8bit=True)
model = replace_with_bnb_linear(
model, modules_to_not_convert=["lm_head"], quantization_config=quantization_config
)
# TODO: @younesbelkada remove this block on the next `transformers` release
for p in model.parameters():
p.requires_grad = False
model_path = hf_hub_download("bigscience/bloom-560m", "pytorch_model.bin")
# test with auto
@ -699,14 +700,10 @@ class BigModelingTester(unittest.TestCase):
with init_empty_weights():
model = AutoModel.from_config(AutoConfig.from_pretrained("bigscience/bloom-560m"))
# TODO: @younesbelkada remove the positional arg on the next `transformers` release
model = replace_with_bnb_linear(
model, modules_to_not_convert=["lm_head"], quantization_config=quantization_config
)
for p in model.parameters():
p.requires_grad = False
# test with str device map
model = load_checkpoint_and_dispatch(
model,
@ -720,15 +717,10 @@ class BigModelingTester(unittest.TestCase):
with init_empty_weights():
model = AutoModel.from_config(AutoConfig.from_pretrained("bigscience/bloom-560m"))
# TODO: @younesbelkada remove the positional arg on the next `transformers` release
model = replace_with_bnb_linear(
model, modules_to_not_convert=["lm_head"], quantization_config=quantization_config
)
# TODO: @younesbelkada remove this block on the next `transformers` release
for p in model.parameters():
p.requires_grad = False
# test with torch.device device map
model = load_checkpoint_and_dispatch(
model,
@ -741,7 +733,6 @@ class BigModelingTester(unittest.TestCase):
@slow
@require_bnb
@unittest.skip("Un-skip in the next transformers release")
def test_dipatch_model_fp4_simple(self):
"""Tests that `dispatch_model` quantizes fp4 layers"""
from huggingface_hub import hf_hub_download

View File

@ -18,10 +18,17 @@ import unittest
from pathlib import Path
import torch
from huggingface_hub.utils import GatedRepoError, RepositoryNotFoundError
import accelerate
from accelerate.commands.estimate import estimate_command, estimate_command_parser, gather_data
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
from accelerate.test_utils.testing import (
require_timm,
require_transformers,
run_command,
)
from accelerate.utils import patch_environment
class AccelerateLauncherTester(unittest.TestCase):
@ -60,10 +67,22 @@ class AccelerateLauncherTester(unittest.TestCase):
def test_config_compatibility(self):
for config in sorted(self.test_config_path.glob("**/*.yaml")):
with self.subTest(config_file=config):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(config), self.test_file_path], env=os.environ.copy()
)
if "invalid" not in str(config):
with self.subTest(config_file=config):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(config), self.test_file_path], env=os.environ.copy()
)
def test_invalid_keys(self):
with self.assertRaises(
RuntimeError,
msg="The config file at 'invalid_keys.yaml' had unknown keys ('another_invalid_key', 'invalid_key')",
):
execute_subprocess_async(
self.base_cmd
+ ["--config_file", str(self.test_config_path / "invalid_keys.yaml"), self.test_file_path],
env=os.environ.copy(),
)
def test_accelerate_test(self):
execute_subprocess_async(["accelerate", "test"], env=os.environ.copy())
@ -211,3 +230,137 @@ class TpuConfigTester(unittest.TestCase):
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all',
output,
)
class ModelEstimatorTester(unittest.TestCase):
"""
Test case for checking the output of `accelerate estimate-memory` is correct.
- Uses `estimate_command` when trying to catch raised errors
- Uses `gather_data` when just verifying the calculations are correct
"""
parser = estimate_command_parser()
def test_invalid_model_name(self):
with self.assertRaises(
RepositoryNotFoundError, msg="Repo for model `somebrokenname` does not exist on the Hub"
):
args = self.parser.parse_args(["somebrokenname"])
estimate_command(args)
@require_timm
def test_invalid_model_name_timm(self):
with self.assertRaises(RuntimeError, msg="Tried to load `muellerzr/dummy` with `timm` but"):
args = self.parser.parse_args(["muellerzr/dummy", "--library_name", "timm"])
estimate_command(args)
@require_transformers
def test_invalid_model_name_transformers(self):
with self.assertRaises(RuntimeError, msg="Tried to load `muellerzr/dummy` with `transformers` but"):
args = self.parser.parse_args(["muellerzr/dummy", "--library_name", "transformers"])
estimate_command(args)
def test_no_metadata(self):
with self.assertRaises(
ValueError, msg="Model `muellerzr/dummy` does not have any library metadata on the Hub"
):
args = self.parser.parse_args(["muellerzr/dummy"])
estimate_command(args)
def test_gated(self):
with self.assertRaises(GatedRepoError, msg="Repo for model `meta-llama/Llama-2-7b` is gated"):
args = self.parser.parse_args(["meta-llama/Llama-2-7b"])
with patch_environment(hf_hub_disable_implicit_token="1"):
estimate_command(args)
@require_transformers
def test_remote_code(self):
# Also tests that custom `Auto` classes work
args = self.parser.parse_args(["hf-internal-testing/test_dynamic_model"])
with self.assertRaises(ValueError, msg="--trust_remote_code"):
gather_data(args)
# Verify it works with the flag
args = self.parser.parse_args(["hf-internal-testing/test_dynamic_model", "--trust_remote_code"])
gather_data(args)
@require_transformers
def test_explicit_dtypes(self):
args = self.parser.parse_args(["bert-base-cased", "--dtypes", "float32", "float16"])
output = gather_data(args)
# The largest layer and total size of the model in bytes
largest_layer, total_size = 89075712, 433249280
# Check that full precision -> int4 is calculating correctly
self.assertEqual(len(output), 2, f"Output was missing a precision, expected 2 but received {len(output)}")
for i, factor in enumerate([1, 2]):
precision = 32 // factor
precision_str = f"float{precision}"
largest_layer_estimate = largest_layer / factor
total_size_estimate = total_size / factor
total_training_size_estimate = total_size_estimate * 4
self.assertEqual(precision_str, output[i][0], f"Output is missing precision `{precision_str}`")
self.assertEqual(
largest_layer_estimate,
output[i][1],
f"Calculation for largest layer size in `{precision_str}` is incorrect.",
)
self.assertEqual(
total_size_estimate,
output[i][2],
msg=f"Calculation for total size in `{precision_str}` is incorrect.",
)
self.assertEqual(
total_training_size_estimate,
output[i][3],
msg=f"Calculation for total training size in `{precision_str}` is incorrect.",
)
@require_transformers
def test_transformers_model(self):
args = self.parser.parse_args(["bert-base-cased", "--dtypes", "float32"])
output = gather_data(args)
# The largest layer and total size of the model in bytes
largest_layer, total_size = 89075712, 433249280
self.assertEqual(
largest_layer,
output[0][1],
f"Calculation for largest layer size in `fp32` is incorrect, expected {largest_layer} but received {output[0][1]}",
)
self.assertEqual(
total_size,
output[0][2],
f"Calculation for total size in `fp32` is incorrect, expected {total_size} but received {output[0][2]}",
)
@require_transformers
def test_no_split_modules(self):
# idefics-80b-instruct has ["IdeficsDecoderLayer", "IdeficsGatedCrossAttentionLayer"]
args = self.parser.parse_args(["HuggingFaceM4/idefics-80b-instruct", "--dtypes", "float32"])
output = gather_data(args)
# without factoring in `no_split` modules, the largest layer is 721420288 bytes
self.assertNotEqual(
output[0][1], 721420288, "Largest layer calculation incorrect, did not factor in `no_split` modules."
)
# the real answer is 3240165632 bytes
self.assertEqual(output[0][1], 3240165632)
@require_timm
def test_timm_model(self):
args = self.parser.parse_args(["timm/resnet50.a1_in1k", "--library_name", "timm"])
output = gather_data(args)
# The largest layer and total size of the model in bytes
largest_layer, total_size = 9437184, 102441032
self.assertEqual(
largest_layer,
output[0][1],
f"Calculation for largest layer size in `fp32` is incorrect, expected {largest_layer} but received {output[0][1]}",
)
self.assertEqual(
total_size,
output[0][2],
f"Calculation for total size in `fp32` is incorrect, expected {total_size} but received {output[0][2]}",
)

View File

@ -0,0 +1,15 @@
compute_environment: LOCAL_MACHINE
deepspeed_config: {}
distributed_type: 'NO'
downcast_bf16: 'no'
fsdp_config: {}
machine_rank: 0
main_process_ip: null
main_process_port: null
main_training_function: main
mixed_precision: 'no'
num_machines: 1
num_processes: 1
use_cpu: false
invalid_key: "invalid_value"
another_invalid_key: "another_invalid_value"

View File

@ -41,6 +41,7 @@ EXCLUDE_EXAMPLES = [
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
"early_stopping.py",
]
@ -222,3 +223,7 @@ class FeatureExamplesTests(TempDirTestCase):
def test_local_sgd(self):
testargs = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs)
def test_early_stopping(self):
testargs = ["examples/by_feature/early_stopping.py"]
run_command(self._launch_args + testargs)

View File

@ -547,6 +547,10 @@ class ModelingUtilsTester(unittest.TestCase):
max_memory = get_balanced_memory(model, max_memory={0: 0, 1: 300, 2: 300})
self.assertDictEqual({0: 0, 1: 215, 2: 300}, max_memory)
# If we set a device to 0, it's not counted.
max_memory = get_balanced_memory(model, max_memory={0: 0, "cpu": 100})
self.assertDictEqual({0: 0, "cpu": 100}, max_memory)
@require_cuda
@require_safetensors
def test_load_state_dict(self):

View File

@ -21,7 +21,7 @@ import torch
import accelerate
from accelerate import Accelerator
from accelerate.big_modeling import dispatch_model
from accelerate.test_utils import assert_exception, execute_subprocess_async, require_multi_gpu
from accelerate.test_utils import assert_exception, execute_subprocess_async, require_multi_gpu, skip
from accelerate.utils import patch_environment
@ -66,6 +66,8 @@ class MultiGPUTester(unittest.TestCase):
with patch_environment(omp_num_threads=1, cuda_visible_devices="0,1"):
execute_subprocess_async(cmd, env=os.environ.copy())
# Need to see why this test raises forking issues when ran as a suite
@skip
@require_multi_gpu
def test_notebook_launcher(self):
"""

View File

@ -103,6 +103,25 @@ class UtilsTester(unittest.TestCase):
self.assertNotIn("AA", os.environ)
self.assertNotIn("BB", os.environ)
def test_patch_environment_key_exists(self):
# check that patch_environment correctly restores pre-existing env vars
with patch_environment(aa=1, BB=2):
self.assertEqual(os.environ.get("AA"), "1")
self.assertEqual(os.environ.get("BB"), "2")
with patch_environment(Aa=10, bb="20", cC=30):
self.assertEqual(os.environ.get("AA"), "10")
self.assertEqual(os.environ.get("BB"), "20")
self.assertEqual(os.environ.get("CC"), "30")
self.assertEqual(os.environ.get("AA"), "1")
self.assertEqual(os.environ.get("BB"), "2")
self.assertNotIn("CC", os.environ)
self.assertNotIn("AA", os.environ)
self.assertNotIn("BB", os.environ)
self.assertNotIn("CC", os.environ)
def test_can_undo_convert_outputs(self):
model = RegressionModel()
model._original_forward = model.forward