mirror of
https://github.com/huggingface/accelerate.git
synced 2025-11-19 17:14:29 +08:00
Compare commits
1 Commits
dataloader
...
v0.14.0
| Author | SHA1 | Date | |
|---|---|---|---|
| 4e2c5117d7 |
@ -15,14 +15,10 @@
|
||||
"remoteEnv": {
|
||||
"PYTHONPATH": "${containerEnv:PATH}:${containerWorkspaceFolder}"
|
||||
},
|
||||
"customizations": {
|
||||
"vscode": {
|
||||
"extensions": [
|
||||
// Ensure we have IntelliSense in VSCode when running inside container
|
||||
"ms-python.python"
|
||||
]
|
||||
}
|
||||
},
|
||||
"extensions": [
|
||||
// Ensure we have IntelliSense in VSCode when running inside container
|
||||
"ms-python.python"
|
||||
],
|
||||
"workspaceFolder": "/workspaces/accelerate",
|
||||
// Need git for VSCode to color code modifications. Only runs when building environment.
|
||||
"onCreateCommand": "apt-get update && apt-get install -y git && pip install -e '.[dev]'"
|
||||
|
||||
1
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
1
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@ -55,3 +55,4 @@ body:
|
||||
attributes:
|
||||
label: Expected behavior
|
||||
description: "A clear and concise description of what you would expect to happen."
|
||||
render: Shell
|
||||
|
||||
5
.github/workflows/build_pr_documentation.yml
vendored
5
.github/workflows/build_pr_documentation.yml
vendored
@ -9,8 +9,11 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yml@main
|
||||
uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yml@use_hf_hub
|
||||
with:
|
||||
commit_sha: ${{ github.event.pull_request.head.sha }}
|
||||
pr_number: ${{ github.event.number }}
|
||||
package: accelerate
|
||||
secrets:
|
||||
token: ${{ secrets.HF_DOC_PUSH }}
|
||||
comment_bot_token: ${{ secrets.HUGGINGFACE_PUSH }}
|
||||
|
||||
5
.github/workflows/delete_doc_comment.yml
vendored
5
.github/workflows/delete_doc_comment.yml
vendored
@ -7,7 +7,10 @@ on:
|
||||
|
||||
jobs:
|
||||
delete:
|
||||
uses: huggingface/doc-builder/.github/workflows/delete_doc_comment.yml@main
|
||||
uses: huggingface/doc-builder/.github/workflows/delete_doc_comment.yml@use_hf_hub
|
||||
with:
|
||||
pr_number: ${{ github.event.number }}
|
||||
package: accelerate
|
||||
secrets:
|
||||
token: ${{ secrets.HF_DOC_PUSH }}
|
||||
comment_bot_token: ${{ secrets.HUGGINGFACE_PUSH }}
|
||||
|
||||
6
.github/workflows/nightly.yml
vendored
6
.github/workflows/nightly.yml
vendored
@ -8,15 +8,12 @@ on:
|
||||
env:
|
||||
RUN_SLOW: "yes"
|
||||
IS_GITHUB_CI: "1"
|
||||
SLACK_API_TOKEN: ${{ secrets.SLACK_API_TOKEN }}
|
||||
|
||||
|
||||
jobs:
|
||||
run_all_tests_single_gpu:
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: "0"
|
||||
TEST_TYPE: "single_gpu"
|
||||
container:
|
||||
image: huggingface/accelerate-gpu:latest
|
||||
options: --gpus all --shm-size "16gb"
|
||||
@ -46,14 +43,12 @@ jobs:
|
||||
- name: Generate Report
|
||||
if: always()
|
||||
run: |
|
||||
pip install slack_sdk
|
||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
run_all_tests_multi_gpu:
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: "0,1"
|
||||
TEST_TYPE: "multi_gpu"
|
||||
container:
|
||||
image: huggingface/accelerate-gpu:latest
|
||||
options: --gpus all --shm-size "16gb"
|
||||
@ -90,5 +85,4 @@ jobs:
|
||||
- name: Generate Report
|
||||
if: always()
|
||||
run: |
|
||||
pip install slack_sdk
|
||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
4
.github/workflows/run_merge_tests.yml
vendored
4
.github/workflows/run_merge_tests.yml
vendored
@ -26,7 +26,7 @@ jobs:
|
||||
source activate accelerate
|
||||
git config --global --add safe.directory '*'
|
||||
git fetch && git checkout ${{ github.sha }}
|
||||
pip install -e .[testing,test_trackers] -U
|
||||
pip install -e .[testing,test_trackers]
|
||||
pip install pytest-reportlog
|
||||
|
||||
- name: Run CLI tests
|
||||
@ -64,7 +64,7 @@ jobs:
|
||||
source activate accelerate
|
||||
git config --global --add safe.directory '*'
|
||||
git fetch && git checkout ${{ github.sha }}
|
||||
pip install -e .[testing,test_trackers] -U
|
||||
pip install -e .[testing,test_trackers]
|
||||
pip install pytest-reportlog
|
||||
|
||||
- name: Run CLI tests
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@ -138,7 +138,4 @@ dmypy.json
|
||||
.DS_Store
|
||||
|
||||
# More test things
|
||||
wandb
|
||||
|
||||
# ruff
|
||||
.ruff_cache
|
||||
wandb
|
||||
@ -152,7 +152,7 @@ Follow these steps to start contributing:
|
||||
$ make test
|
||||
```
|
||||
|
||||
`accelerate` relies on `black` and `ruff` to format its source code
|
||||
`accelerate` relies on `black` and `isort` to format its source code
|
||||
consistently. After you make changes, apply automatic style corrections and code verifications
|
||||
that can't be automated in one go with:
|
||||
|
||||
@ -165,7 +165,7 @@ Follow these steps to start contributing:
|
||||
$ make style
|
||||
```
|
||||
|
||||
`accelerate` also uses a few custom scripts to check for coding mistakes. Quality
|
||||
`accelerate` also uses `flake8` and a few custom scripts to check for coding mistakes. Quality
|
||||
control runs in CI, however you can also run the same checks with:
|
||||
|
||||
```bash
|
||||
|
||||
11
Makefile
11
Makefile
@ -8,19 +8,20 @@ extra_quality_checks:
|
||||
python utils/check_copies.py
|
||||
python utils/check_dummies.py
|
||||
python utils/check_repo.py
|
||||
doc-builder style src/accelerate docs/source --max_len 119
|
||||
python utils/style_doc.py src/accelerate docs/source --max_len 119
|
||||
|
||||
# this target runs checks on all files
|
||||
quality:
|
||||
black --check $(check_dirs)
|
||||
ruff $(check_dirs)
|
||||
doc-builder style src/accelerate docs/source --max_len 119 --check_only
|
||||
isort --check-only $(check_dirs)
|
||||
flake8 $(check_dirs)
|
||||
python utils/style_doc.py src/accelerate docs/source --max_len 119 --check_only
|
||||
|
||||
# Format source code automatically and check is there are any problems left that need manual fixing
|
||||
style:
|
||||
black $(check_dirs)
|
||||
ruff $(check_dirs) --fix
|
||||
doc-builder style src/accelerate docs/source --max_len 119
|
||||
isort $(check_dirs)
|
||||
python utils/style_doc.py src/accelerate docs/source --max_len 119
|
||||
|
||||
# Run tests for the library
|
||||
test:
|
||||
|
||||
16
README.md
16
README.md
@ -16,7 +16,7 @@ limitations under the License.
|
||||
|
||||
<p align="center">
|
||||
<br>
|
||||
<img src="https://raw.githubusercontent.com/huggingface/accelerate/main/docs/source/imgs/accelerate_logo.png" width="400"/>
|
||||
<img src="docs/source/imgs/accelerate_logo.png" width="400"/>
|
||||
<br>
|
||||
<p>
|
||||
|
||||
@ -136,7 +136,7 @@ Want to learn more? Check out the [documentation](https://huggingface.co/docs/ac
|
||||
|
||||
## Launching script
|
||||
|
||||
🤗 Accelerate also provides an optional CLI tool that allows you to quickly configure and test your training environment before launching the scripts. No need to remember how to use `torch.distributed.run` or to write a specific launcher for TPU training!
|
||||
🤗 Accelerate also provides an optional CLI tool that allows you to quickly configure and test your training environment before launching the scripts. No need to remember how to use `torch.distributed.launch` or to write a specific launcher for TPU training!
|
||||
On your machine(s) just run:
|
||||
|
||||
```bash
|
||||
@ -155,7 +155,7 @@ For instance, here is how you would run the GLUE example on the MRPC task (from
|
||||
accelerate launch examples/nlp_example.py
|
||||
```
|
||||
|
||||
This CLI tool is **optional**, and you can still use `python my_script.py` or `python -m torchrun my_script.py` at your convenance.
|
||||
This CLI tool is **optional**, and you can still use `python my_script.py` or `python -m torch.distributed.launch my_script.py` at your convenance.
|
||||
|
||||
## Launching multi-CPU run using MPI
|
||||
|
||||
@ -171,12 +171,12 @@ mpirun -np 2 python examples/nlp_example.py
|
||||
🤗 Accelerate supports training on single/multiple GPUs using DeepSpeed. To use it, you don't need to change anything in your training code; you can set everything using just `accelerate config`. However, if you desire to tweak your DeepSpeed related args from your python script, we provide you the `DeepSpeedPlugin`.
|
||||
|
||||
```python
|
||||
from accelerate import Accelerator, DeepSpeedPlugin
|
||||
from accelerator import Accelerator, DeepSpeedPlugin
|
||||
|
||||
# deepspeed needs to know your gradient accumulation steps before hand, so don't forget to pass it
|
||||
# Remember you still need to do gradient accumulation by yourself, just like you would have done without deepspeed
|
||||
deepspeed_plugin = DeepSpeedPlugin(zero_stage=2, gradient_accumulation_steps=2)
|
||||
accelerator = Accelerator(mixed_precision='fp16', deepspeed_plugin=deepspeed_plugin)
|
||||
accelerator = Accelerator(fp16=True, deepspeed_plugin=deepspeed_plugin)
|
||||
|
||||
# How to save your 🤗 Transformer?
|
||||
accelerator.wait_for_everyone()
|
||||
@ -208,17 +208,13 @@ You shouldn't use 🤗 Accelerate if you don't want to write a training loop you
|
||||
|
||||
## Frameworks using 🤗 Accelerate
|
||||
|
||||
If you like the simplicity of 🤗 Accelerate but would prefer a higher-level abstraction around its capabilities, some frameworks and libraries that are built on top of 🤗 Accelerate are listed below:
|
||||
If you like the simplicity of 🤗 Accelerate but would prefer a higher-level abstraction around your training loop, some frameworks that are built on top of 🤗 Accelerate are listed below:
|
||||
|
||||
* [Animus](https://github.com/Scitator/animus) is a minimalistic framework to run machine learning experiments. Animus highlights common "breakpoints" in ML experiments and provides a unified interface for them within [IExperiment](https://github.com/Scitator/animus/blob/main/animus/core.py#L76).
|
||||
* [Catalyst](https://github.com/catalyst-team/catalyst#getting-started) is a PyTorch framework for Deep Learning Research and Development. It focuses on reproducibility, rapid experimentation, and codebase reuse so you can create something new rather than write yet another train loop. Catalyst provides a [Runner](https://catalyst-team.github.io/catalyst/api/core.html#runner) to connect all parts of the experiment: hardware backend, data transformations, model train, and inference logic.
|
||||
* [fastai](https://github.com/fastai/fastai#installing) is a PyTorch framework for Deep Learning that simplifies training fast and accurate neural nets using modern best practices. fastai provides a [Learner](https://docs.fast.ai/learner.html#Learner) to handle the training, fine-tuning, and inference of deep learning algorithms.
|
||||
* [Finetuner](https://github.com/jina-ai/finetuner) is a service that enables models to create higher-quality embeddings for semantic search, visual similarity search, cross-modal text<->image search, recommendation systems, clustering, duplication detection, anomaly detection, or other uses.
|
||||
* [InvokeAI](https://github.com/invoke-ai/InvokeAI) is a creative engine for Stable Diffusion models, offering industry-leading WebUI, terminal usage support, and serves as the foundation for many commercial products.
|
||||
* [Kornia](https://kornia.readthedocs.io/en/latest/get-started/introduction.html) is a differentiable library that allows classical computer vision to be integrated into deep learning models. Kornia provides a [Trainer](https://kornia.readthedocs.io/en/latest/x.html#kornia.x.Trainer) with the specific purpose to train and fine-tune the supported deep learning algorithms within the library.
|
||||
* [Open Assistant](https://projects.laion.ai/Open-Assistant/) is a chat-based assistant that understands tasks, can interact with their party systems, and retrieve information dynamically to do so.
|
||||
* [pytorch-accelerated](https://github.com/Chris-hughes10/pytorch-accelerated) is a lightweight training library, with a streamlined feature set centred around a general-purpose [Trainer](https://pytorch-accelerated.readthedocs.io/en/latest/trainer.html), that places a huge emphasis on simplicity and transparency; enabling users to understand exactly what is going on under the hood, but without having to write and maintain the boilerplate themselves!
|
||||
* [Stable Diffusion web UI](https://github.com/AUTOMATIC1111/stable-diffusion-webui) is an open-source browser-based easy-to-use interface based on the Gradio library for Stable Diffusion.
|
||||
|
||||
|
||||
## Installation
|
||||
|
||||
@ -16,12 +16,12 @@ import argparse
|
||||
import time
|
||||
|
||||
import torch
|
||||
|
||||
import transformers
|
||||
from accelerate.utils import compute_module_sizes
|
||||
from measures_util import end_measure, log_measures, start_measure
|
||||
from transformers import AutoConfig, AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer
|
||||
|
||||
from accelerate.utils import compute_module_sizes
|
||||
|
||||
|
||||
DEFAULT_MODELS = {
|
||||
"gpt-j-6b": {"is_causal": True, "model": "sgugger/sharded-gpt-j-6B", "tokenizer": "EleutherAI/gpt-j-6B"},
|
||||
|
||||
@ -2,9 +2,10 @@ import gc
|
||||
import threading
|
||||
import time
|
||||
|
||||
import psutil
|
||||
import torch
|
||||
|
||||
import psutil
|
||||
|
||||
|
||||
class PeakCPUMemory:
|
||||
def __init__(self):
|
||||
|
||||
@ -4,7 +4,7 @@
|
||||
# Use base conda image to reduce time
|
||||
FROM continuumio/miniconda3:latest AS compile-image
|
||||
# Specify py version
|
||||
ENV PYTHON_VERSION=3.8
|
||||
ENV PYTHON_VERSION=3.7.3
|
||||
# Install apt libs
|
||||
RUN apt-get update && \
|
||||
apt-get install -y curl git wget && \
|
||||
@ -23,9 +23,7 @@ SHELL ["/bin/bash", "-c"]
|
||||
RUN source activate accelerate && \
|
||||
python3 -m pip install --no-cache-dir \
|
||||
git+https://github.com/huggingface/accelerate#egg=accelerate[testing,test_trackers] \
|
||||
--extra-index-url https://download.pytorch.org/whl/cu117
|
||||
|
||||
RUN python3 -m pip install --no-cache-dir bitsandbytes
|
||||
--extra-index-url https://download.pytorch.org/whl/cu113
|
||||
|
||||
# Stage 2
|
||||
FROM nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04 AS build-image
|
||||
|
||||
267
docs/README.md
267
docs/README.md
@ -1,267 +0,0 @@
|
||||
<!---
|
||||
Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
|
||||
# Generating the documentation
|
||||
|
||||
To generate the documentation, you first have to build it. Several packages are necessary to build the doc,
|
||||
you can install them with the following command, at the root of the code repository:
|
||||
|
||||
```bash
|
||||
pip install -e ".[docs]"
|
||||
```
|
||||
|
||||
Then you need to install our special tool that builds the documentation:
|
||||
|
||||
```bash
|
||||
pip install git+https://github.com/huggingface/doc-builder
|
||||
```
|
||||
|
||||
---
|
||||
**NOTE**
|
||||
|
||||
You only need to generate the documentation to inspect it locally (if you're planning changes and want to
|
||||
check how they look before committing for instance). You don't have to commit the built documentation.
|
||||
|
||||
---
|
||||
|
||||
## Building the documentation
|
||||
|
||||
Once you have setup the `doc-builder` and additional packages, you can generate the documentation by
|
||||
typing the following command:
|
||||
|
||||
```bash
|
||||
doc-builder build accelerate docs/source/ --build_dir ~/tmp/test-build
|
||||
```
|
||||
|
||||
You can adapt the `--build_dir` to set any temporary folder that you prefer. This command will create it and generate
|
||||
the MDX files that will be rendered as the documentation on the main website. You can inspect them in your favorite
|
||||
Markdown editor.
|
||||
|
||||
## Previewing the documentation
|
||||
|
||||
To preview the docs, first install the `watchdog` module with:
|
||||
|
||||
```bash
|
||||
pip install watchdog
|
||||
```
|
||||
|
||||
Then run the following command:
|
||||
|
||||
```bash
|
||||
doc-builder preview {package_name} {path_to_docs}
|
||||
```
|
||||
|
||||
For example:
|
||||
|
||||
```bash
|
||||
doc-builder preview accelerate docs/source/
|
||||
```
|
||||
|
||||
The docs will be viewable at [http://localhost:3000](http://localhost:3000). You can also preview the docs once you have opened a PR. You will see a bot add a comment to a link where the documentation with your changes lives.
|
||||
|
||||
---
|
||||
**NOTE**
|
||||
|
||||
The `preview` command only works with existing doc files. When you add a completely new file, you need to update `_toctree.yml` & restart `preview` command (`ctrl-c` to stop it & call `doc-builder preview ...` again).
|
||||
|
||||
---
|
||||
|
||||
## Adding a new element to the navigation bar
|
||||
|
||||
Accepted files are Markdown (.md or .mdx).
|
||||
|
||||
Create a file with its extension and put it in the source directory. You can then link it to the toc-tree by putting
|
||||
the filename without the extension in the [`_toctree.yml`](https://github.com/huggingface/accelerate/blob/main/docs/source/_toctree.yml) file.
|
||||
|
||||
## Renaming section headers and moving sections
|
||||
|
||||
It helps to keep the old links working when renaming the section header and/or moving sections from one document to another. This is because the old links are likely to be used in Issues, Forums, and Social media and it'd make for a much more superior user experience if users reading those months later could still easily navigate to the originally intended information.
|
||||
|
||||
Therefore, we simply keep a little map of moved sections at the end of the document where the original section was. The key is to preserve the original anchor.
|
||||
|
||||
So if you renamed a section from: "Section A" to "Section B", then you can add at the end of the file:
|
||||
|
||||
```
|
||||
Sections that were moved:
|
||||
|
||||
[ <a href="#section-b">Section A</a><a id="section-a"></a> ]
|
||||
```
|
||||
and of course, if you moved it to another file, then:
|
||||
|
||||
```
|
||||
Sections that were moved:
|
||||
|
||||
[ <a href="../new-file#section-b">Section A</a><a id="section-a"></a> ]
|
||||
```
|
||||
|
||||
Use the relative style to link to the new file so that the versioned docs continue to work.
|
||||
|
||||
|
||||
## Writing Documentation - Specification
|
||||
|
||||
The `huggingface/accelerate` documentation follows the
|
||||
[Google documentation](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) style for docstrings,
|
||||
although we can write them directly in Markdown.
|
||||
|
||||
### Adding a new tutorial
|
||||
|
||||
Adding a new tutorial or section is done in two steps:
|
||||
|
||||
- Add a new file under `./source`. This file can either be ReStructuredText (.rst) or Markdown (.md).
|
||||
- Link that file in `./source/_toctree.yml` on the correct toc-tree.
|
||||
|
||||
Make sure to put your new file under the proper section. It's unlikely to go in the first section (*Get Started*), so
|
||||
depending on the intended targets (beginners, more advanced users, or researchers) it should go in sections two, three, or
|
||||
four.
|
||||
|
||||
### Writing source documentation
|
||||
|
||||
Values that should be put in `code` should either be surrounded by backticks: \`like so\`. Note that argument names
|
||||
and objects like True, None, or any strings should usually be put in `code`.
|
||||
|
||||
When mentioning a class, function, or method, it is recommended to use our syntax for internal links so that our tool
|
||||
adds a link to its documentation with this syntax: \[\`XXXClass\`\] or \[\`function\`\]. This requires the class or
|
||||
function to be in the main package.
|
||||
|
||||
If you want to create a link to some internal class or function, you need to
|
||||
provide its path. For instance: \[\`utils.gather\`\]. This will be converted into a link with
|
||||
`utils.gather` in the description. To get rid of the path and only keep the name of the object you are
|
||||
linking to in the description, add a ~: \[\`~utils.gather\`\] will generate a link with `gather` in the description.
|
||||
|
||||
The same works for methods so you can either use \[\`XXXClass.method\`\] or \[~\`XXXClass.method\`\].
|
||||
|
||||
#### Defining arguments in a method
|
||||
|
||||
Arguments should be defined with the `Args:` (or `Arguments:` or `Parameters:`) prefix, followed by a line return and
|
||||
an indentation. The argument should be followed by its type, with its shape if it is a tensor, a colon, and its
|
||||
description:
|
||||
|
||||
```
|
||||
Args:
|
||||
n_layers (`int`): The number of layers of the model.
|
||||
```
|
||||
|
||||
If the description is too long to fit in one line (more than 119 characters in total), another indentation is necessary
|
||||
before writing the description after the argument.
|
||||
|
||||
Finally, to maintain uniformity if any *one* description is too long to fit on one line, the
|
||||
rest of the parameters should follow suit and have an indention before their description.
|
||||
|
||||
Here's an example showcasing everything so far:
|
||||
|
||||
```
|
||||
Args:
|
||||
gradient_accumulation_steps (`int`, *optional*, default to 1):
|
||||
The number of steps that should pass before gradients are accumulated. A number > 1 should be combined with `Accelerator.accumulate`.
|
||||
cpu (`bool`, *optional*):
|
||||
Whether or not to force the script to execute on CPU. Will ignore GPU available if set to `True` and force the execution on one process only.
|
||||
```
|
||||
|
||||
For optional arguments or arguments with defaults we follow the following syntax: imagine we have a function with the
|
||||
following signature:
|
||||
|
||||
```
|
||||
def my_function(x: str = None, a: float = 1):
|
||||
```
|
||||
|
||||
then its documentation should look like this:
|
||||
|
||||
```
|
||||
Args:
|
||||
x (`str`, *optional*):
|
||||
This argument controls ... and has a description longer than 119 chars.
|
||||
a (`float`, *optional*, defaults to 1):
|
||||
This argument is used to ... and has a description longer than 119 chars.
|
||||
```
|
||||
|
||||
Note that we always omit the "defaults to \`None\`" when None is the default for any argument. Also note that even
|
||||
if the first line describing your argument type and its default gets long, you can't break it on several lines. You can
|
||||
however write as many lines as you want in the indented description (see the example above with `input_ids`).
|
||||
|
||||
#### Writing a multi-line code block
|
||||
|
||||
Multi-line code blocks can be useful for displaying examples. They are done between two lines of three backticks as usual in Markdown:
|
||||
|
||||
|
||||
````
|
||||
```python
|
||||
# first line of code
|
||||
# second line
|
||||
# etc
|
||||
```
|
||||
````
|
||||
|
||||
#### Writing a return block
|
||||
|
||||
The return block should be introduced with the `Returns:` prefix, followed by a line return and an indentation.
|
||||
The first line should be the type of the return, followed by a line return. No need to indent further for the elements
|
||||
building the return.
|
||||
|
||||
Here's an example of a single value return:
|
||||
|
||||
```
|
||||
Returns:
|
||||
`List[int]`: A list of integers in the range [0, 1] --- 1 for a special token, 0 for a sequence token.
|
||||
```
|
||||
|
||||
Here's an example of a tuple return, comprising several objects:
|
||||
|
||||
```
|
||||
Returns:
|
||||
`tuple(torch.FloatTensor)` comprising various elements depending on the configuration ([`BertConfig`]) and inputs:
|
||||
- ** loss** (*optional*, returned when `masked_lm_labels` is provided) `torch.FloatTensor` of shape `(1,)` --
|
||||
Total loss is the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
|
||||
- **prediction_scores** (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`) --
|
||||
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
|
||||
```
|
||||
|
||||
## Styling the docstring
|
||||
|
||||
We have an automatic script running with the `make style` comment that will make sure that:
|
||||
- the docstrings fully take advantage of the line width
|
||||
- all code examples are formatted using black, like the code of the Transformers library
|
||||
|
||||
This script may have some weird failures if you made a syntax mistake or if you uncover a bug. Therefore, it's
|
||||
recommended to commit your changes before running `make style`, so you can revert the changes done by that script
|
||||
easily.
|
||||
|
||||
## Writing documentation examples
|
||||
|
||||
The syntax for Example docstrings can look as follows:
|
||||
|
||||
```
|
||||
Example:
|
||||
|
||||
```python
|
||||
>>> import time
|
||||
>>> from accelerate import Accelerator
|
||||
>>> accelerator = Accelerator()
|
||||
>>> if accelerator.is_main_process:
|
||||
... time.sleep(2)
|
||||
>>> else:
|
||||
... print("I'm waiting for the main process to finish its sleep...")
|
||||
>>> accelerator.wait_for_everyone()
|
||||
>>> # Should print on every process at the same time
|
||||
>>> print("Everyone is here")
|
||||
```
|
||||
```
|
||||
|
||||
The docstring should give a minimal, clear example of how the respective function
|
||||
is to be used in inference and also include the expected (ideally sensible)
|
||||
output.
|
||||
Often, readers will try out the example before even going through the function
|
||||
or class definitions. Therefore, it is of utmost importance that the example
|
||||
works as expected.
|
||||
@ -17,40 +17,36 @@
|
||||
title: Launching distributed training from Jupyter Notebooks
|
||||
title: Tutorials
|
||||
- sections:
|
||||
- local: usage_guides/explore
|
||||
title: Start Here!
|
||||
- local: usage_guides/training_zoo
|
||||
title: Example Zoo
|
||||
- local: usage_guides/big_modeling
|
||||
title: How perform inference on large models with small resources
|
||||
- local: usage_guides/gradient_accumulation
|
||||
title: Performing gradient accumulation
|
||||
- local: usage_guides/fsdp
|
||||
title: Fully Sharded Data Parallelism
|
||||
- local: usage_guides/checkpoint
|
||||
title: Saving and loading training states
|
||||
- local: usage_guides/tracking
|
||||
title: Using experiment trackers
|
||||
- local: usage_guides/memory
|
||||
title: How to avoid CUDA Out-of-Memory
|
||||
- local: usage_guides/mps
|
||||
title: How to use Apple Silicon M1 GPUs
|
||||
- local: usage_guides/deepspeed
|
||||
title: How to use DeepSpeed
|
||||
- local: usage_guides/fsdp
|
||||
title: How to use Fully Sharded Data Parallelism
|
||||
- local: usage_guides/tracking
|
||||
title: Using experiment trackers
|
||||
- local: usage_guides/big_modeling
|
||||
title: How to use large models with small resources
|
||||
- local: usage_guides/memory
|
||||
title: How to avoid CUDA Out-of-Memory
|
||||
- local: usage_guides/sagemaker
|
||||
title: Using 🤗 Accelerate on SageMaker
|
||||
- local: usage_guides/mps
|
||||
title: How to use Apple Silicon M1 GPUs
|
||||
- local: usage_guides/megatron_lm
|
||||
title: How to use Megatron-LM
|
||||
- local: usage_guides/sagemaker
|
||||
title: How to use 🤗 Accelerate with SageMaker
|
||||
- local: usage_guides/ipex
|
||||
title: How to use 🤗 Accelerate with Intel® Extension for PyTorch for cpu
|
||||
- local: usage_guides/training_zoo
|
||||
title: 🤗 Accelerate Example Zoo
|
||||
title: How-To Guides
|
||||
- sections:
|
||||
- local: concept_guides/performance
|
||||
title: Comparing performance across distributed setups
|
||||
- local: concept_guides/deferring_execution
|
||||
title: Executing and deferring jobs
|
||||
- local: concept_guides/gradient_synchronization
|
||||
title: Gradient synchronization
|
||||
- local: concept_guides/deferring_execution
|
||||
title: Executing and deferring jobs
|
||||
- local: concept_guides/training_tpu
|
||||
title: TPU best practices
|
||||
title: Concepts and fundamentals
|
||||
|
||||
@ -10,7 +10,7 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Launching Multi-GPU Training from a Jupyter Environment
|
||||
# Launching Multi-Node Training from a Jupyter Environment
|
||||
|
||||
This tutorial teaches you how to fine tune a computer vision model with 🤗 Accelerate from a Jupyter Notebook on a distributed system.
|
||||
You will also learn how to setup a few requirements needed for ensuring your environment is configured properly, your data has been prepared properly, and finally how to launch training.
|
||||
@ -35,7 +35,7 @@ The following code will restart Jupyter after writing the configuration, as CUDA
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
CUDA can't be initialized more than once on a multi-GPU system. It's fine to debug in the notebook and have calls to CUDA, but in order to finally train a full cleanup and restart will need to be performed.
|
||||
CUDA can't be initialized more than once on a multi-node system. It's fine to debug in the notebook and have calls to CUDA, but in order to finally train a full cleanup and restart will need to be performed.
|
||||
|
||||
</Tip>
|
||||
|
||||
@ -153,7 +153,7 @@ def get_dataloaders(batch_size: int = 64):
|
||||
random_perm = np.random.permutation(len(fnames))
|
||||
cut = int(0.8 * len(fnames))
|
||||
train_split = random_perm[:cut]
|
||||
eval_split = random_perm[cut:]
|
||||
eval_split = random_perm[:cut]
|
||||
|
||||
# For training a simple RandomResizedCrop will be used
|
||||
train_tfm = Compose([RandomResizedCrop((224, 224), scale=(0.5, 1.0)), ToTensor()])
|
||||
@ -337,7 +337,7 @@ def training_loop(mixed_precision="fp16", seed: int = 42, batch_size: int = 64):
|
||||
mean = torch.tensor(model.default_cfg["mean"])[None, :, None, None]
|
||||
std = torch.tensor(model.default_cfg["std"])[None, :, None, None]
|
||||
|
||||
# To make these constants available on the active device, set it to the accelerator device
|
||||
# To make this constant available on the active device, set it to the accelerator device
|
||||
mean = mean.to(accelerator.device)
|
||||
std = std.to(accelerator.device)
|
||||
|
||||
@ -426,4 +426,4 @@ This notebook showed how to perform distributed training from inside of a Jupyte
|
||||
|
||||
- Make sure to save any code that use CUDA (or CUDA imports) for the function passed to [`notebook_launcher`]
|
||||
- Set the `num_processes` to be the number of devices used for training (such as number of GPUs, CPUs, TPUs, etc)
|
||||
- If using the TPU, declare your model outside the training loop function
|
||||
- If using the TPU, declare your model outside the training loop function
|
||||
@ -116,38 +116,4 @@ for batch in dataloader:
|
||||
accelerator.backward(loss)
|
||||
```
|
||||
|
||||
As a result, you should either use *`accelerator.accumulate` or `accelerator.no_sync`* when it comes to API choice.
|
||||
|
||||
## Just how much of a slowdown is there, and easy mistakes you can make
|
||||
|
||||
To setup a realistic example, consider the following setup:
|
||||
|
||||
* Two single-GPU T4 nodes and one node with two GPUs
|
||||
* Each GPU is a T4, and are hosted on GCP
|
||||
* The script used is a modification of the [NLP Example](https://github.com/muellerzr/timing_experiments/blob/main/baseline.py) script
|
||||
* Batch size per GPU is 16, and gradients are accumulated every 4 steps
|
||||
|
||||
All scripts are available in [this repository](https://github.com/muellerzr/timing_experiments).
|
||||
|
||||
If not careful about gradient synchronization and GPU communication, a *large* amount of time can be wasted
|
||||
from when these GPUs communicate to each other during unnessisary periods.
|
||||
|
||||
By how much?
|
||||
|
||||
Reference:
|
||||
- Baseline: uses no synchronization practices discussed here
|
||||
- `no_sync` improperly: `no_sync` only around the `backward` call, not the `forward`
|
||||
- `no_sync`: using the `no_sync` pattern properly
|
||||
- `accumulate`: using [`~Accelerator.accumulate`] properly
|
||||
|
||||
Below are the average seconds per batch iterating over 29 batches of data for each setup on both a single node and on the dual-node setup:
|
||||
|
||||
| | Baseline | `no_sync` improperly | `no_sync` | `accumulate`|
|
||||
| :---------: | :-------: | :------------------: | :-------: | :---------: |
|
||||
| Multi-Node | 2±0.01s | 2.13±0.08s | **0.91±0.11s** | **0.91±0.11s** |
|
||||
| Single Node | 0.50±0.01s | 0.50±0.01s | **0.41±0.015s** | **0.41±0.015s** |
|
||||
|
||||
As you can see, if you are not careful about how you setup your gradient synchronization, you can get upwards of more than a 2x slowdown during training!
|
||||
|
||||
If you are worried about making sure everything is done properly, we highly recommend utilizing the [`~Accelerator.accumulate`] function and passing in
|
||||
`gradient_accumulation_steps` to the [`Accelerator`] object so Accelerate can handle this for you.
|
||||
As a result, you should either use *`accelerator.accumulate` or `accelerator.no_sync`* when it comes to API choice.
|
||||
@ -29,7 +29,7 @@ There's three reasons for this that this tutorial will cover:
|
||||
While this issue has not come up as much, make sure to use [`utils.set_seed`] to fully set the seed in all distributed cases so training will be reproducable:
|
||||
|
||||
```python
|
||||
from accelerate.utils import set_seed
|
||||
from accelerate import set_seed
|
||||
|
||||
set_seed(42)
|
||||
```
|
||||
@ -89,12 +89,3 @@ learning_rate *= accelerator.num_processes
|
||||
optimizer = AdamW(params=model.parameters(), lr=learning_rate)
|
||||
```
|
||||
|
||||
You will also find that `accelerate` will step the learning rate based on the number of processes being trained on. This is because
|
||||
of the observed batch size noted earlier. So in a case of 2 GPUs, the learning rate will be stepped twice as often as a single GPU
|
||||
to account for the batch size being twice as large (if no changes to the batch size on the single GPU instance are made).
|
||||
|
||||
## Gradient Accumulation and Mixed Precision
|
||||
|
||||
When using gradient accumulation and mixed precision, due to how gradient averaging works (accumulation) and the precision loss (mixed precision),
|
||||
some degredation in performance is expected. This will be explicitly seen when comparing the batch-wise loss between different compute
|
||||
setups. However, the overall loss, metric, and general performance at the end of training should be _roughly_ the same.
|
||||
@ -55,7 +55,7 @@ accelerate launch {my_script.py}
|
||||
><div class="w-full text-center bg-gradient-to-br from-blue-400 to-blue-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Tutorials</div>
|
||||
<p class="text-gray-700">Learn the basics and become familiar with using 🤗 Accelerate. Start here if you are using 🤗 Accelerate for the first time!</p>
|
||||
</a>
|
||||
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./usage_guides/explore"
|
||||
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./usage_guides/gradient_accumulation"
|
||||
><div class="w-full text-center bg-gradient-to-br from-indigo-400 to-indigo-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">How-to guides</div>
|
||||
<p class="text-gray-700">Practical guides to help you achieve a specific goal. Take a look at these guides to learn how to use 🤗 Accelerate to solve real-world problems.</p>
|
||||
</a>
|
||||
|
||||
@ -13,7 +13,7 @@ specific language governing permissions and limitations under the License.
|
||||
# Accelerator
|
||||
|
||||
The [`Accelerator`] is the main class provided by 🤗 Accelerate.
|
||||
It serves at the main entry point for the API.
|
||||
It serves at the main entrypoint for the API.
|
||||
|
||||
## Quick adaptation of your code
|
||||
|
||||
@ -45,7 +45,7 @@ you should search for and replace by the corresponding methods of your `accelera
|
||||
|
||||
### Printing
|
||||
|
||||
`print` statements should be replaced by [`~Accelerator.print`] to be printed once per process:
|
||||
`print` statements should be replaced by [`~Accelerator.print`] to be printed once per process
|
||||
|
||||
```diff
|
||||
- print("My thing I want to print!")
|
||||
@ -113,7 +113,7 @@ def do_my_thing():
|
||||
|
||||
### Synchronicity control
|
||||
|
||||
Use [`~Accelerator.wait_for_everyone`] to make sure all processes join that point before continuing. (Useful before a model save for instance).
|
||||
Use [`~Accelerator.wait_for_everyone`] to make sure all processes join that point before continuing. (Useful before a model save for instance)
|
||||
|
||||
### Saving and loading
|
||||
|
||||
@ -160,4 +160,4 @@ multi-device training, check if the step should actually be performed, and auto-
|
||||
|
||||
## Overall API documentation:
|
||||
|
||||
[[autodoc]] Accelerator
|
||||
[[autodoc]] Accelerator
|
||||
@ -35,50 +35,6 @@ accelerate config [arguments]
|
||||
(`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`.
|
||||
* `-h`, `--help` (`bool`) -- Show a help message and exit
|
||||
|
||||
## accelerate config default
|
||||
|
||||
**Command**:
|
||||
|
||||
`accelerate config default` or `accelerate-config default`
|
||||
|
||||
Create a default config file for Accelerate with only a few flags set.
|
||||
|
||||
**Usage**:
|
||||
|
||||
```bash
|
||||
accelerate config default [arguments]
|
||||
```
|
||||
|
||||
**Optional Arguments**:
|
||||
* `--config_file CONFIG_FILE` (`str`) -- The path to use to store the config file. Will default to a file named default_config.yaml in the cache location, which is the content
|
||||
of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have such an environment variable, your cache directory
|
||||
(`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`.
|
||||
|
||||
* `-h`, `--help` (`bool`) -- Show a help message and exit
|
||||
* `--mixed_precision {no,fp16,bf16}` (`str`) -- Whether or not to use mixed precision training. Choose between FP16 and BF16 (bfloat16) training. BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.
|
||||
|
||||
## accelerate config update
|
||||
|
||||
**Command**:
|
||||
|
||||
`accelerate config update` or `accelerate-config update`
|
||||
|
||||
Update an existing config file with the latest defaults while maintaining the old configuration.
|
||||
|
||||
**Usage**:
|
||||
|
||||
```bash
|
||||
accelerate config update [arguments]
|
||||
```
|
||||
|
||||
**Optional Arguments**:
|
||||
* `--config_file CONFIG_FILE` (`str`) -- The path to the config file to update. Will default to a file named default_config.yaml in the cache location, which is the content
|
||||
of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have such an environment variable, your cache directory
|
||||
(`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`.
|
||||
|
||||
* `-h`, `--help` (`bool`) -- Show a help message and exit
|
||||
|
||||
|
||||
## accelerate env
|
||||
|
||||
**Command**:
|
||||
@ -125,8 +81,6 @@ accelerate launch [arguments] {training_script} --{training_script-argument-1} -
|
||||
* `-m`, `--module` (`bool`) -- Change each process to interpret the launch script as a Python module, executing with the same behavior as 'python -m'.
|
||||
* `--no_python` (`bool`) -- Skip prepending the training script with 'python' - just execute it directly. Useful when the script is not a Python script.
|
||||
* `--debug` (`bool`) -- Whether to print out the torch.distributed stack trace when something fails.
|
||||
* `-q`, `--quiet` (`bool`) -- Silence subprocess errors from the launch stack trace to only show the relevant tracebacks. (Only applicable to DeepSpeed and single-process configurations).
|
||||
|
||||
|
||||
The rest of these arguments are configured through `accelerate config` and are read in from the specified `--config_file` (or default configuration) for their
|
||||
values. They can also be passed in manually.
|
||||
@ -135,6 +89,7 @@ values. They can also be passed in manually.
|
||||
|
||||
* `--cpu` (`bool`) -- Whether or not to force the training on the CPU.
|
||||
* `--multi_gpu` (`bool`) -- Whether or not this should launch a distributed GPU training.
|
||||
* `--mps` (`bool`) -- Whether or not this should use MPS-enabled GPU device on MacOS machines.
|
||||
* `--tpu` (`bool`) -- Whether or not this should launch a TPU training.
|
||||
|
||||
**Resource Selection Arguments**:
|
||||
|
||||
@ -21,14 +21,4 @@ To utilize this replace cases of `logging` with `accelerate.logging`:
|
||||
+ logger = get_logger(__name__)
|
||||
```
|
||||
|
||||
## Setting the log level
|
||||
|
||||
The log level can be set with the `ACCELERATE_LOG_LEVEL` environment variable or by passing
|
||||
`log_level` to `get_logger`:
|
||||
```python
|
||||
from accelerate.logging import get_logger
|
||||
|
||||
logger = get_logger(__name__, log_level="INFO")
|
||||
```
|
||||
|
||||
[[autodoc]] logging.get_logger
|
||||
@ -18,8 +18,6 @@ instances share the same state, which is initialized on the first instantiation.
|
||||
These classes are immutable and store information about certain configurations or
|
||||
states.
|
||||
|
||||
[[autodoc]] state.PartialState
|
||||
|
||||
[[autodoc]] state.AcceleratorState
|
||||
|
||||
[[autodoc]] state.GradientState
|
||||
@ -24,7 +24,3 @@ specific language governing permissions and limitations under the License.
|
||||
- __init__
|
||||
[[autodoc]] tracking.CometMLTracker
|
||||
- __init__
|
||||
[[autodoc]] tracking.AimTracker
|
||||
- __init__
|
||||
[[autodoc]] tracking.MLflowTracker
|
||||
- __init__
|
||||
|
||||
@ -24,8 +24,6 @@ These are basic dataclasses used throughout 🤗 Accelerate and they can be pass
|
||||
|
||||
[[autodoc]] utils.PrecisionType
|
||||
|
||||
[[autodoc]] utils.ProjectConfiguration
|
||||
|
||||
## Data Manipulation and Operations
|
||||
|
||||
These include data operations that mimic the same `torch` ops but can be used on distributed processes.
|
||||
@ -95,10 +93,3 @@ These utilities relate to setting and synchronizing of all the random states.
|
||||
[[autodoc]] utils.synchronize_rng_state
|
||||
|
||||
[[autodoc]] utils.synchronize_rng_states
|
||||
|
||||
|
||||
## PyTorch XLA
|
||||
|
||||
These include utilities that are useful while using PyTorch with XLA.
|
||||
|
||||
[[autodoc]] utils.install_xla
|
||||
|
||||
@ -67,9 +67,9 @@ use `shuffle=True` or any kind of random sampler).
|
||||
|
||||
</Tip>
|
||||
|
||||
Alternatively, you can use the option `split_batches=True` when creating and initializing your
|
||||
[`Accelerator`], in which case the batch size will always stay the same, whether you run your
|
||||
script on 1, 2, 4, or 64 GPUs.
|
||||
Alternatively, you can use the option `split_batches=True` when creating initializing your
|
||||
[`Accelerator`], in which case the batch size will always stay the same, whether your run your
|
||||
script on 1, 2, 4 or 64 GPUs.
|
||||
|
||||
You should execute this instruction as soon as all objects for training are created, before starting your actual
|
||||
training loop.
|
||||
@ -164,8 +164,9 @@ should be calculated through the [`~Accelerator.gather_for_metrics`] method to a
|
||||
|
||||
## Launching your distributed script
|
||||
|
||||
You can use the regular commands to launch your distributed training (like `torch.distributed.run` for
|
||||
PyTorch), they are fully compatible with 🤗 Accelerate.
|
||||
You can use the regular commands to launch your distributed training (like `torch.distributed.launch` for
|
||||
PyTorch), they are fully compatible with 🤗 Accelerate. The only caveat here is that 🤗 Accelerate uses the environment
|
||||
to determine all useful information, so `torch.distributed.launch` should be used with the flag `--use_env`.
|
||||
|
||||
🤗 Accelerate also provides a CLI tool that unifies all launchers, so you only have to remember one command. To use it,
|
||||
just run:
|
||||
@ -205,7 +206,7 @@ Now that this is done, you can run your script with the following command:
|
||||
accelerate launch path_to_script.py --args_for_the_script
|
||||
```
|
||||
|
||||
If you stored the config file in a non-default location, you can indicate it to the launcher like this:
|
||||
If you stored the config file in a non-default location, you can indicate it to the launcher like his:
|
||||
|
||||
```bash
|
||||
accelerate launch --config_file path_to_config.yaml path_to_script.py --args_for_the_script
|
||||
@ -369,11 +370,7 @@ Note that since all the model parameters are references to tensors, this will lo
|
||||
## Saving/loading entire states
|
||||
|
||||
When training your model, you may want to save the current state of the model, optimizer, random generators, and potentially LR schedulers to be restored in the _same script_.
|
||||
You can use [`~Accelerator.save_state`] and [`~Accelerator.load_state`] respectively to do so.
|
||||
|
||||
To further customize where and how states saved through [`~Accelerator.save_state`] the [`~utils.ProjectConfiguration`] class can be used. For example
|
||||
if `automatic_checkpoint_naming` is enabled each saved checkpoint will be located then at `Accelerator.project_dir/checkpoints/checkpoint_{checkpoint_number}`.
|
||||
|
||||
You can use [`~Accelerator.save_state`] and [`~Accelerator.load_state`] respectively to do so, just by simply passing in a save location.
|
||||
If you have registered any other stateful items to be stored through [`~Accelerator.register_for_checkpointing`] they will also be saved and/or loaded.
|
||||
|
||||
<Tip>
|
||||
|
||||
@ -10,7 +10,7 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Handling big models for inference
|
||||
# Handling big models
|
||||
|
||||
When loading a pretrained model in PyTorch, the usual workflow looks like this:
|
||||
|
||||
@ -102,7 +102,7 @@ Here is how we can use this to load the [GPT-J-6B](https://huggingface.co/Eleuth
|
||||
git clone https://huggingface.co/sgugger/sharded-gpt-j-6B
|
||||
cd sharded-gpt-j-6B
|
||||
git-lfs install
|
||||
git lfs pull
|
||||
git pull
|
||||
```
|
||||
|
||||
then we can initialize the model with
|
||||
@ -118,15 +118,7 @@ with init_empty_weights():
|
||||
model = AutoModelForCausalLM.from_config(config)
|
||||
```
|
||||
|
||||
Note that loading the model with `from_config` in Transformers does not tie the weights, which may cause issue when
|
||||
loading a checkpoint that does not contain duplicate keys for the tied weights. So you should tie the weights before
|
||||
loading the checkpoint.
|
||||
|
||||
```py
|
||||
model.tie_weights()
|
||||
```
|
||||
|
||||
Then load the checkpoint we just downloaded with:
|
||||
and load the checkpoint we just downloaded with:
|
||||
|
||||
```py
|
||||
from accelerate import load_checkpoint_and_dispatch
|
||||
@ -299,4 +291,4 @@ We are aware of the current limitations in the API:
|
||||
- [`load_checkpoint_and_dispatch`] and [`load_checkpoint_in_model`] do not perform any check on the correctness of your state dict compared to your model at the moment (this will be fixed in a future version), so you may get some weird errors if trying to load a checkpoint with mismatched or missing keys.
|
||||
- The model parallelism used when your model is split on several GPUs is naive and not optimized, meaning that only one GPU works at a given time and the other sits idle.
|
||||
- When weights are offloaded on the CPU/hard drive, there is no pre-fetching (yet, we will work on this for future versions) which means the weights are put on the GPU when they are needed and not before.
|
||||
- Hard-drive offloading might be very slow if the hardware you run on does not have fast communication between disk and CPU (like NVMes).
|
||||
- Hard-drive offloading might be very slow if the hardware you run on does not have fast communication between disk and CPU (like NVMes).
|
||||
@ -17,31 +17,27 @@ saving and loading the model, optimizer, RNG generators, and the GradScaler. Ins
|
||||
- Use [`~Accelerator.save_state`] for saving everything mentioned above to a folder location
|
||||
- Use [`~Accelerator.load_state`] for loading everything stored from an earlier `save_state`
|
||||
|
||||
To further customize where and how states saved through [`~Accelerator.save_state`] the [`~utils.ProjectConfiguration`] class can be used. For example
|
||||
if `automatic_checkpoint_naming` is enabled each saved checkpoint will be located then at `Accelerator.project_dir/checkpoints/checkpoint_{checkpoint_number}`.
|
||||
|
||||
It should be noted that the expectation is that those states come from the same training script, they should not be from two separate scripts.
|
||||
|
||||
- By using [`~Accelerator.register_for_checkpointing`], you can register custom objects to be automatically stored or loaded from the two prior functions,
|
||||
so long as the object has a `state_dict` **and** a `load_state_dict` functionality. This could include objects such as a learning rate scheduler.
|
||||
|
||||
|
||||
Below is a brief example using checkpointing to save and reload a state during training:
|
||||
|
||||
```python
|
||||
from accelerate import Accelerator
|
||||
import torch
|
||||
|
||||
accelerator = Accelerator(project_dir="my/save/path")
|
||||
accelerator = Accelerator()
|
||||
|
||||
my_scheduler = torch.optim.lr_scheduler.StepLR(my_optimizer, step_size=1, gamma=0.99)
|
||||
my_model, my_optimizer, my_training_dataloader = accelerator.prepare(my_model, my_optimizer, my_training_dataloader)
|
||||
my_model, my_optimizer, my_training_dataloader = accelerate.prepare(my_model, my_optimizer, my_training_dataloader)
|
||||
|
||||
# Register the LR scheduler
|
||||
accelerator.register_for_checkpointing(my_scheduler)
|
||||
accelerate.register_for_checkpointing(my_scheduler)
|
||||
|
||||
# Save the starting state
|
||||
accelerator.save_state()
|
||||
accelerate.save_state("my/save/path")
|
||||
|
||||
device = accelerator.device
|
||||
my_model.to(device)
|
||||
@ -60,22 +56,5 @@ for epoch in range(num_epochs):
|
||||
my_scheduler.step()
|
||||
|
||||
# Restore previous state
|
||||
accelerator.load_state("my/save/path/checkpointing/checkpoint_0")
|
||||
accelerate.load_state("my/save/path")
|
||||
```
|
||||
|
||||
## Restoring the state of the DataLoader
|
||||
|
||||
After resuming from a checkpoint, it may also be desireable to resume from a particular point in the active `DataLoader` if
|
||||
the state was saved during the middle of an epoch. You can use [`~Accelerator.skip_first_batches`] to do so.
|
||||
|
||||
```python
|
||||
from accelerate import Accelerator
|
||||
|
||||
accelerator = Accelerator(project_dir="my/save/path")
|
||||
|
||||
train_dataloader = accelerator.prepare(train_dataloader)
|
||||
accelerator.load_state("my_state")
|
||||
|
||||
# Assume the checkpoint was saved 100 steps into the epoch
|
||||
accelerator.skip_first_batches(train_dataloader, 100)
|
||||
```
|
||||
@ -395,196 +395,6 @@ We will look at the changes needed in the code when using these.
|
||||
based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method.
|
||||
Only the `auto` fields specified in above examples are handled by `prepare` method and the rest have to be explicitly specified by the user.
|
||||
|
||||
**Things to note when using DeepSpeed Config File**
|
||||
|
||||
Below is a sample script using `deepspeed_config_file` in different scenarios.
|
||||
|
||||
Code `test.py`:
|
||||
|
||||
```python
|
||||
from accelerate import Accelerator
|
||||
from accelerate.state import AcceleratorState
|
||||
|
||||
|
||||
def main():
|
||||
accelerator = Accelerator()
|
||||
accelerator.print(f"{AcceleratorState()}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
```
|
||||
|
||||
**Scenario 1**: Manually tampered accelerate config file having `deepspeed_config_file` along with other entries.
|
||||
|
||||
1. Content of the `accelerate` config:
|
||||
|
||||
```yaml
|
||||
command_file: null
|
||||
commands: null
|
||||
compute_environment: LOCAL_MACHINE
|
||||
deepspeed_config:
|
||||
gradient_accumulation_steps: 1
|
||||
gradient_clipping: 1.0
|
||||
offload_optimizer_device: 'cpu'
|
||||
offload_param_device: 'cpu'
|
||||
zero3_init_flag: true
|
||||
zero3_save_16bit_model: true
|
||||
zero_stage: 3
|
||||
deepspeed_config_file: 'ds_config.json'
|
||||
distributed_type: DEEPSPEED
|
||||
downcast_bf16: 'no'
|
||||
dynamo_backend: 'NO'
|
||||
fsdp_config: {}
|
||||
gpu_ids: null
|
||||
machine_rank: 0
|
||||
main_process_ip: null
|
||||
main_process_port: null
|
||||
main_training_function: main
|
||||
megatron_lm_config: {}
|
||||
num_machines: 1
|
||||
num_processes: 2
|
||||
rdzv_backend: static
|
||||
same_network: true
|
||||
tpu_name: null
|
||||
tpu_zone: null
|
||||
use_cpu: false
|
||||
```
|
||||
|
||||
2. `ds_config.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"bf16": {
|
||||
"enabled": true
|
||||
},
|
||||
"zero_optimization": {
|
||||
"stage": 3,
|
||||
"stage3_gather_16bit_weights_on_model_save": false,
|
||||
"offload_optimizer": {
|
||||
"device": "none"
|
||||
},
|
||||
"offload_param": {
|
||||
"device": "none"
|
||||
}
|
||||
},
|
||||
"gradient_clipping": 1.0,
|
||||
"train_batch_size": "auto",
|
||||
"train_micro_batch_size_per_gpu": "auto",
|
||||
"gradient_accumulation_steps": 10,
|
||||
"steps_per_print": 2000000
|
||||
}
|
||||
```
|
||||
|
||||
3. Output of `accelerate launch test.py`:
|
||||
|
||||
```bash
|
||||
ValueError: When using `deepspeed_config_file`, the following accelerate config variables will be ignored:
|
||||
['gradient_accumulation_steps', 'gradient_clipping', 'zero_stage', 'offload_optimizer_device', 'offload_param_device',
|
||||
'zero3_save_16bit_model', 'mixed_precision'].
|
||||
Please specify them appropriately in the DeepSpeed config file.
|
||||
If you are using an accelerate config file, remove others config variables mentioned in the above specified list.
|
||||
The easiest method is to create a new config following the questionnaire via `accelerate config`.
|
||||
It will only ask for the necessary config variables when using `deepspeed_config_file`.
|
||||
```
|
||||
|
||||
**Scenario 2**: Use the solution of the error to create new accelerate config and check that no ambiguity error is now thrown.
|
||||
|
||||
1. Run `accelerate config`:
|
||||
|
||||
```bash
|
||||
$ accelerate config
|
||||
-------------------------------------------------------------------------------------------------------------------------------
|
||||
In which compute environment are you running?
|
||||
This machine
|
||||
-------------------------------------------------------------------------------------------------------------------------------
|
||||
Which type of machine are you using?
|
||||
multi-GPU
|
||||
How many different machines will you use (use more than 1 for multi-node training)? [1]:
|
||||
Do you wish to optimize your script with torch dynamo?[yes/NO]:
|
||||
Do you want to use DeepSpeed? [yes/NO]: yes
|
||||
Do you want to specify a json file to a DeepSpeed config? [yes/NO]: yes
|
||||
Please enter the path to the json DeepSpeed config file: ds_config.json
|
||||
Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: yes
|
||||
How many GPU(s) should be used for distributed training? [1]:4
|
||||
accelerate configuration saved at ds_config_sample.yaml
|
||||
```
|
||||
|
||||
2. Content of the `accelerate` config:
|
||||
|
||||
```yaml
|
||||
compute_environment: LOCAL_MACHINE
|
||||
deepspeed_config:
|
||||
deepspeed_config_file: ds_config.json
|
||||
zero3_init_flag: true
|
||||
distributed_type: DEEPSPEED
|
||||
downcast_bf16: 'no'
|
||||
dynamo_backend: 'NO'
|
||||
fsdp_config: {}
|
||||
machine_rank: 0
|
||||
main_training_function: main
|
||||
megatron_lm_config: {}
|
||||
num_machines: 1
|
||||
num_processes: 4
|
||||
rdzv_backend: static
|
||||
same_network: true
|
||||
use_cpu: false
|
||||
```
|
||||
|
||||
3. Output of `accelerate launch test.py`:
|
||||
|
||||
```bash
|
||||
Distributed environment: DEEPSPEED Backend: nccl
|
||||
Num processes: 4
|
||||
Process index: 0
|
||||
Local process index: 0
|
||||
Device: cuda:0
|
||||
Mixed precision type: bf16
|
||||
ds_config: {'bf16': {'enabled': True}, 'zero_optimization': {'stage': 3, 'stage3_gather_16bit_weights_on_model_save': False, 'offload_optimizer': {'device': 'none'}, 'offload_param': {'device': 'none'}}, 'gradient_clipping': 1.0, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 10, 'steps_per_print': inf, 'fp16': {'enabled': False}}
|
||||
```
|
||||
|
||||
**Scenario 3**: Setting the `accelerate launch` command arguments related to DeepSpeed as `"auto"` in the DeepSpeed` configuration file and check that things work as expected.
|
||||
|
||||
1. New `ds_config.json` with `"auto"` for the `accelerate launch` DeepSpeed command arguments:
|
||||
|
||||
```json
|
||||
{
|
||||
"bf16": {
|
||||
"enabled": "auto"
|
||||
},
|
||||
"zero_optimization": {
|
||||
"stage": "auto",
|
||||
"stage3_gather_16bit_weights_on_model_save": "auto",
|
||||
"offload_optimizer": {
|
||||
"device": "auto"
|
||||
},
|
||||
"offload_param": {
|
||||
"device": "auto"
|
||||
}
|
||||
},
|
||||
"gradient_clipping": "auto",
|
||||
"train_batch_size": "auto",
|
||||
"train_micro_batch_size_per_gpu": "auto",
|
||||
"gradient_accumulation_steps": "auto",
|
||||
"steps_per_print": 2000000
|
||||
}
|
||||
```
|
||||
|
||||
2. Output of `accelerate launch --mixed_precision="fp16" --zero_stage=3 --gradient_accumulation_steps=5 --gradient_clipping=1.0 --offload_param_device="cpu" --offload_optimizer_device="nvme" --zero3_save_16bit_model="true" test.py`:
|
||||
|
||||
```bash
|
||||
Distributed environment: DEEPSPEED Backend: nccl
|
||||
Num processes: 4
|
||||
Process index: 0
|
||||
Local process index: 0
|
||||
Device: cuda:0
|
||||
Mixed precision type: fp16
|
||||
ds_config: {'bf16': {'enabled': False}, 'zero_optimization': {'stage': 3, 'stage3_gather_16bit_weights_on_model_save': True, 'offload_optimizer': {'device': 'nvme'}, 'offload_param': {'device': 'cpu'}}, 'gradient_clipping': 1.0, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 5, 'steps_per_print': inf, 'fp16': {'enabled': True, 'auto_cast': True}}
|
||||
```
|
||||
|
||||
**Note**: Remaining `"auto"` values are handled in `accelerator.prepare()` call as explained in point 2 of
|
||||
`Important code changes when using DeepSpeed Config File`.
|
||||
|
||||
## Saving and loading
|
||||
|
||||
1. Saving and loading of models is unchanged for ZeRO Stage-1 and Stage-2.
|
||||
|
||||
@ -1,48 +0,0 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Learning how to incorporate 🤗 Accelerate features quickly!
|
||||
|
||||
Please use the interactive tool below to help you get started with learning about a particular
|
||||
feature of 🤗 Accelerate and how to utilize it! It will provide you with a code diff, an explaination
|
||||
towards what is going on, as well as provide you with some useful links to explore more within
|
||||
the documentation!
|
||||
|
||||
Most code examples start from the following python code before integrating 🤗 Accelerate in some way:
|
||||
|
||||
```python
|
||||
for batch in dataloader:
|
||||
optimizer.zero_grad()
|
||||
inputs, targets = batch
|
||||
inputs = inputs.to(device)
|
||||
targets = targets.to(device)
|
||||
outputs = model(inputs)
|
||||
loss = loss_function(outputs, targets)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
scheduler.step()
|
||||
```
|
||||
|
||||
<div class="block dark:hidden">
|
||||
<iframe
|
||||
src="https://muellerzr-accelerate-examples.hf.space?__theme=light"
|
||||
width="850"
|
||||
height="1600"
|
||||
></iframe>
|
||||
</div>
|
||||
<div class="hidden dark:block">
|
||||
<iframe
|
||||
src="https://muellerzr-accelerate-examples.hf.space?__theme=dark"
|
||||
width="850"
|
||||
height="1600"
|
||||
></iframe>
|
||||
</div>
|
||||
@ -67,43 +67,12 @@ Currently, `Accelerate` supports the following config through the CLI:
|
||||
`Sharding Strategy`: [1] FULL_SHARD (shards optimizer states, gradients and parameters), [2] SHARD_GRAD_OP (shards optimizer states and gradients), [3] NO_SHARD
|
||||
`Offload Params`: Decides Whether to offload parameters and gradients to CPU
|
||||
`Auto Wrap Policy`: [1] TRANSFORMER_BASED_WRAP, [2] SIZE_BASED_WRAP, [3] NO_WRAP
|
||||
`Transformer Layer Class to Wrap`: When using `TRANSFORMER_BASED_WRAP`, user specifies comma-separated string of transformer layer class names (case-sensitive) to wrap ,e.g,
|
||||
`BertLayer`, `GPTJBlock`, `T5Block`, `BertLayer,BertEmbeddings,BertSelfOutput`...
|
||||
`Transformer Layer Class to Wrap`: When using `TRANSFORMER_BASED_WRAP`, user specifies transformer layer class name (case-sensitive) to wrap ,e.g, `BertLayer`, `GPTJBlock`, `T5Block`...
|
||||
`Min Num Params`: minimum number of parameters when using `SIZE_BASED_WRAP`
|
||||
`Backward Prefetch`: [1] BACKWARD_PRE, [2] BACKWARD_POST, [3] NO_PREFETCH
|
||||
`State Dict Type`: [1] FULL_STATE_DICT, [2] LOCAL_STATE_DICT, [3] SHARDED_STATE_DICT
|
||||
```
|
||||
|
||||
## Saving and loading
|
||||
|
||||
1. When using transformers `save_pretrained`, pass `state_dict=accelerator.get_state_dict(model)` to save the model state dict.
|
||||
Below is an example:
|
||||
|
||||
```diff
|
||||
unwrapped_model.save_pretrained(
|
||||
args.output_dir,
|
||||
is_main_process=accelerator.is_main_process,
|
||||
save_function=accelerator.save,
|
||||
+ state_dict=accelerator.get_state_dict(model),
|
||||
)
|
||||
```
|
||||
|
||||
### State Dict
|
||||
|
||||
`accelerator.get_state_dict` will call the underlying `model.state_dict` implementation. With a model wrapped by FSDP, the default behavior of `state_dict` is to gather all of the state in the rank 0 device. This can cause CUDA out of memory errors if the parameters don't fit on a single GPU.
|
||||
|
||||
To avoid this, PyTorch provides a context manager that adjusts the behavior of `state_dict`. To offload some of the state dict onto CPU, you can use the following code:
|
||||
|
||||
```
|
||||
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, StateDictType, FullStateDictConfig
|
||||
|
||||
full_state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
|
||||
with FSDP.state_dict_type(unwrapped_model, StateDictType.FULL_STATE_DICT, full_state_dict_config):
|
||||
state = accelerator.get_state_dict(unwrapped_model)
|
||||
```
|
||||
|
||||
You can then pass `state` into the `save_pretrained` method. There are several modes for `StateDictType` and `FullStateDictConfig` that you can use to control the behavior of `state_dict`. For more information, see the [PyTorch documentation](https://pytorch.org/docs/stable/fsdp.html).
|
||||
|
||||
## A few caveats to be aware of
|
||||
|
||||
- PyTorch FSDP auto wraps sub-modules, flattens the parameters and shards the parameters in place.
|
||||
|
||||
@ -72,7 +72,7 @@ First the code shown earlier will be converted to utilize 🤗 Accelerate withou
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
In its current state, this code is not going to perform gradient accumulation efficiently due to a process called gradient synchronization. Read more about that in the [Concepts tutorial](../concept_guides/gradient_synchronization)!
|
||||
In its current state, this code is not going to perform gradient accumulation efficiently due to a process called gradient synchronization. Read more about that in the [Concepts tutorial](concept_guides/gradient_synchronization)!
|
||||
|
||||
</Tip>
|
||||
|
||||
@ -111,13 +111,6 @@ You can remove all the special checks for the step number and the loss adjustmen
|
||||
|
||||
As you can see the [`Accelerator`] is able to keep track of the batch number you are on and it will automatically know whether to step through the prepared optimizer and how to adjust the loss.
|
||||
|
||||
<Tip>
|
||||
|
||||
Typically with gradient accumulation, you would need to adjust the number of steps to reflect the change in total batches you are
|
||||
training on. 🤗 Accelerate will automatically do this for you, so long as you pass `adjust_scheduler_to_accumulation` to the [`Accelerator`] object's `__init__`.
|
||||
|
||||
</Tip>
|
||||
|
||||
## The finished code
|
||||
|
||||
Below is the finished implementation for performing gradient accumulation with 🤗 Accelerate
|
||||
@ -134,4 +127,4 @@ for batch in training_dataloader:
|
||||
optimizer.zero_grad()
|
||||
```
|
||||
|
||||
To learn more about what magic this wraps around, read the [Gradient Synchronization concept guide](../concept_guides/gradient_synchronization)
|
||||
To learn more about what magic this wraps around, read the [Gradient Synchronization concept guide](/concept_guides/gradient_synchronization)
|
||||
@ -1,171 +0,0 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Intel® Extension for PyTorch
|
||||
|
||||
[IPEX](https://github.com/intel/intel-extension-for-pytorch) is optimized for CPUs with AVX-512 or above, and functionally works for CPUs with only AVX2. So, it is expected to bring performance benefit for Intel CPU generations with AVX-512 or above while CPUs with only AVX2 (e.g., AMD CPUs or older Intel CPUs) might result in a better performance under IPEX, but not guaranteed. IPEX provides performance optimizations for CPU training with both Float32 and BFloat16. The usage of BFloat16 is the main focus of the following sections.
|
||||
|
||||
Low precision data type BFloat16 has been natively supported on the 3rd Generation Xeon® Scalable Processors (aka Cooper Lake) with AVX512 instruction set and will be supported on the next generation of Intel® Xeon® Scalable Processors with Intel® Advanced Matrix Extensions (Intel® AMX) instruction set with further boosted performance. The Auto Mixed Precision for CPU backend has been enabled since PyTorch-1.10. At the same time, the support of Auto Mixed Precision with BFloat16 for CPU and BFloat16 optimization of operators has been massively enabled in Intel® Extension for PyTorch, and partially upstreamed to PyTorch master branch. Users can get better performance and user experience with IPEX Auto Mixed Precision.
|
||||
|
||||
## IPEX installation:
|
||||
|
||||
IPEX release is following PyTorch, to install via pip:
|
||||
|
||||
| PyTorch Version | IPEX version |
|
||||
| :---------------: | :----------: |
|
||||
| 2.0 | 2.0.0 |
|
||||
| 1.13 | 1.13.0 |
|
||||
| 1.12 | 1.12.300 |
|
||||
| 1.11 | 1.11.200 |
|
||||
| 1.10 | 1.10.100 |
|
||||
|
||||
```
|
||||
pip install intel_extension_for_pytorch==<version_name> -f https://developer.intel.com/ipex-whl-stable-cpu
|
||||
```
|
||||
|
||||
Check more approaches for [IPEX installation](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/installation.html).
|
||||
|
||||
|
||||
## How It Works For Training optimization in CPU
|
||||
|
||||
🤗 Accelerate has integrated [IPEX](https://github.com/intel/intel-extension-for-pytorch), all you need to do is enabling it through the config.
|
||||
|
||||
**Scenario 1**: Acceleration of No distributed CPU training
|
||||
|
||||
Run <u>accelerate config</u> on your machine:
|
||||
|
||||
```bash
|
||||
$ accelerate config
|
||||
-----------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
In which compute environment are you running?
|
||||
This machine
|
||||
-----------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
Which type of machine are you using?
|
||||
No distributed training
|
||||
Do you want to run your training on CPU only (even if a GPU / Apple Silicon device is available)? [yes/NO]:yes
|
||||
Do you want to use Intel PyTorch Extension (IPEX) to speed up training on CPU? [yes/NO]:yes
|
||||
Do you wish to optimize your script with torch dynamo?[yes/NO]:NO
|
||||
Do you want to use DeepSpeed? [yes/NO]: NO
|
||||
-----------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
Do you wish to use FP16 or BF16 (mixed precision)?
|
||||
bf16
|
||||
```
|
||||
This will generate a config file that will be used automatically to properly set the
|
||||
default options when doing
|
||||
|
||||
```bash
|
||||
accelerate launch my_script.py --args_to_my_script
|
||||
```
|
||||
|
||||
For instance, here is how you would run the NLP example `examples/nlp_example.py` (from the root of the repo) with IPEX enabled.
|
||||
default_config.yaml that is generated after `accelerate config`
|
||||
|
||||
```bash
|
||||
compute_environment: LOCAL_MACHINE
|
||||
distributed_type: 'NO'
|
||||
downcast_bf16: 'no'
|
||||
ipex_config:
|
||||
ipex_enabled: true
|
||||
machine_rank: 0
|
||||
main_training_function: main
|
||||
mixed_precision: bf16
|
||||
num_machines: 1
|
||||
num_processes: 1
|
||||
rdzv_backend: static
|
||||
same_network: true
|
||||
tpu_env: []
|
||||
tpu_use_cluster: false
|
||||
tpu_use_sudo: false
|
||||
use_cpu: true
|
||||
```
|
||||
```bash
|
||||
accelerate launch examples/nlp_example.py
|
||||
```
|
||||
|
||||
**Scenario 2**: Acceleration of distributed CPU training
|
||||
we use Intel oneCCL for communication, combined with Intel® MPI library to deliver flexible, efficient, scalable cluster messaging on Intel® architecture. you could refer the [here](https://huggingface.co/docs/transformers/perf_train_cpu_many) for the installation guide
|
||||
|
||||
Run <u>accelerate config</u> on your machine(node0):
|
||||
|
||||
```bash
|
||||
$ accelerate config
|
||||
-----------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
In which compute environment are you running?
|
||||
This machine
|
||||
-----------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
Which type of machine are you using?
|
||||
multi-CPU
|
||||
How many different machines will you use (use more than 1 for multi-node training)? [1]: 4
|
||||
-----------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
What is the rank of this machine?
|
||||
0
|
||||
What is the IP address of the machine that will host the main process? 36.112.23.24
|
||||
What is the port you will use to communicate with the main process? 29500
|
||||
Are all the machines on the same local network? Answer `no` if nodes are on the cloud and/or on different network hosts [YES/no]: yes
|
||||
Do you want to use Intel PyTorch Extension (IPEX) to speed up training on CPU? [yes/NO]:yes
|
||||
Do you wish to optimize your script with torch dynamo?[yes/NO]:NO
|
||||
How many CPU(s) should be used for distributed training? [1]:16
|
||||
-----------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
Do you wish to use FP16 or BF16 (mixed precision)?
|
||||
bf16
|
||||
```
|
||||
For instance, here is how you would run the NLP example `examples/nlp_example.py` (from the root of the repo) with IPEX enabled for distributed CPU training.
|
||||
|
||||
default_config.yaml that is generated after `accelerate config`
|
||||
```bash
|
||||
compute_environment: LOCAL_MACHINE
|
||||
distributed_type: MULTI_CPU
|
||||
downcast_bf16: 'no'
|
||||
ipex_config:
|
||||
ipex_enabled: true
|
||||
machine_rank: 0
|
||||
main_process_ip: 36.112.23.24
|
||||
main_process_port: 29500
|
||||
main_training_function: main
|
||||
mixed_precision: bf16
|
||||
num_machines: 4
|
||||
num_processes: 16
|
||||
rdzv_backend: static
|
||||
same_network: true
|
||||
tpu_env: []
|
||||
tpu_use_cluster: false
|
||||
tpu_use_sudo: false
|
||||
use_cpu: true
|
||||
```
|
||||
|
||||
Set following env and using intel MPI to launch the training
|
||||
|
||||
In node0, you need to create a configuration file which contains the IP addresses of each node (for example hostfile) and pass that configuration file path as an argument.
|
||||
```bash
|
||||
$ cat hostfile
|
||||
xxx.xxx.xxx.xxx #node0 ip
|
||||
xxx.xxx.xxx.xxx #node1 ip
|
||||
xxx.xxx.xxx.xxx #node2 ip
|
||||
xxx.xxx.xxx.xxx #node3 ip
|
||||
```
|
||||
Now, run the following command in node0 and **16DDP** will be enabled in node0,node1,node2,node3 with BF16 mixed precision:
|
||||
```bash
|
||||
oneccl_bindings_for_pytorch_path=$(python -c "from oneccl_bindings_for_pytorch import cwd; print(cwd)")
|
||||
source $oneccl_bindings_for_pytorch_path/env/setvars.sh
|
||||
export CCL_WORKER_COUNT=1
|
||||
export MASTER_ADDR=xxx.xxx.xxx.xxx #node0 ip
|
||||
export CCL_ATL_TRANSPORT=ofi
|
||||
mpirun -f hostfile -n 16 -ppn 4 accelerate launch examples/nlp_example.py
|
||||
```
|
||||
|
||||
## Related Resources
|
||||
|
||||
- [Project's github](https://github.com/intel/intel-extension-for-pytorch)
|
||||
- [API docs](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/api_doc.html)
|
||||
- [Tuning guide](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/performance_tuning/tuning_guide.html)
|
||||
- [Blogs & Publications](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/blogs_publications.html)
|
||||
|
||||
@ -115,7 +115,7 @@ An example of thr corresponding questions for using Megatron-LM features is show
|
||||
```bash
|
||||
:~$ accelerate config --config_file "megatron_gpt_config.yaml"
|
||||
In which compute environment are you running? ([0] This machine, [1] AWS (Amazon SageMaker)): 0
|
||||
Which type of machine are you using? ([0] No distributed training, [1] multi-CPU, [2] multi-GPU, [3] TPU): 2
|
||||
Which type of machine are you using? ([0] No distributed training, [1] multi-CPU, [2] multi-GPU, [3] TPU [4] MPS): 2
|
||||
How many different machines will you use (use more than 1 for multi-node training)? [1]:
|
||||
Do you want to use DeepSpeed? [yes/NO]:
|
||||
Do you want to use FullyShardedDataParallel? [yes/NO]:
|
||||
@ -290,7 +290,6 @@ You will implement the `accelerate.utils.AbstractTrainStep` or inherit from thei
|
||||
```python
|
||||
from accelerate.utils import MegatronLMDummyScheduler, GPTTrainStep, avg_losses_across_data_parallel_group
|
||||
|
||||
|
||||
# Custom loss function for the Megatron model
|
||||
class GPTTrainStepWithCustomLoss(GPTTrainStep):
|
||||
def __init__(self, megatron_args, **kwargs):
|
||||
|
||||
@ -31,10 +31,41 @@ please follow this nice medium article [GPU-Acceleration Comes to PyTorch on M1
|
||||
|
||||
|
||||
## How it works out of the box
|
||||
It is enabled by default on MacOs machines with MPS enabled Apple Silicon GPUs.
|
||||
To disable it, pass `--cpu` flag to `accelerate launch` command or answer the corresponding question when answering the `accelerate config` questionnaire.
|
||||
|
||||
You can directly run the following script to test it out on MPS enabled Apple Silicon machines:
|
||||
On your machine(s) just run:
|
||||
|
||||
```bash
|
||||
accelerate config
|
||||
```
|
||||
|
||||
and answer the questions asked, specifically choose `MPS` for the query:
|
||||
|
||||
```
|
||||
Which type of machine are you using?.
|
||||
```
|
||||
|
||||
This will generate a config file that will be used automatically to properly set
|
||||
the default options when doing `accelerate launch`, such as the one shown below:
|
||||
|
||||
```bash
|
||||
compute_environment: LOCAL_MACHINE
|
||||
deepspeed_config: {}
|
||||
distributed_type: MPS
|
||||
downcast_bf16: 'no'
|
||||
fsdp_config: {}
|
||||
machine_rank: 0
|
||||
main_process_ip: null
|
||||
main_process_port: null
|
||||
main_training_function: main
|
||||
mixed_precision: 'no'
|
||||
num_machines: 1
|
||||
num_processes: 1
|
||||
use_cpu: false
|
||||
```
|
||||
|
||||
After this configuration has been made, here is how you run the CV example
|
||||
(from the root of the repo) with MPS enabled:
|
||||
|
||||
```bash
|
||||
accelerate launch /examples/cv_example.py --data_dir images
|
||||
```
|
||||
|
||||
@ -160,43 +160,10 @@ use_cpu: false
|
||||
want to use different/other Python packages you can do this by adding them to the `requirements.txt`. These packages
|
||||
will be installed before your training script is started.
|
||||
|
||||
### Local Training: SageMaker Local mode
|
||||
### Remote scripts: Use scripts located on Github
|
||||
|
||||
The local mode in the SageMaker SDK allows you to run your training script locally inside the HuggingFace DLC (Deep Learning container)
|
||||
or using your custom container image. This is useful for debugging and testing your training script inside the final container environment.
|
||||
Local mode uses Docker compose (*Note: Docker Compose V2 is not supported yet*). The SDK will handle the authentication against ECR
|
||||
to pull the DLC to your local environment. You can emulate CPU (single and multi-instance) and GPU (single instance) SageMaker training jobs.
|
||||
|
||||
To use local mode, you need to set your `ec2_instance_type` to `local`.
|
||||
|
||||
```yaml
|
||||
ec2_instance_type: local
|
||||
```
|
||||
|
||||
### Advanced configuration
|
||||
|
||||
The configuration allows you to override parameters for the [Estimator](https://sagemaker.readthedocs.io/en/stable/api/training/estimators.html).
|
||||
These settings have to be applied in the config file and are not part of `accelerate config`. You can control many additional aspects of the training job, e.g. use Spot instances, enable network isolation and many more.
|
||||
|
||||
```yaml
|
||||
additional_args:
|
||||
# enable network isolation to restrict internet access for containers
|
||||
enable_network_isolation: True
|
||||
```
|
||||
|
||||
You can find all available configuration [here](https://sagemaker.readthedocs.io/en/stable/api/training/estimators.html).
|
||||
*undecided if feature is needed. Contact us if you would like this feature.*
|
||||
|
||||
### Use Spot Instances
|
||||
|
||||
You can use Spot Instances e.g. using (see [Advanced configuration](#advanced-configuration)):
|
||||
```yaml
|
||||
additional_args:
|
||||
use_spot_instances: True
|
||||
max_wait: 86400
|
||||
```
|
||||
|
||||
*Note: Spot Instances are subject to be terminated and training to be continued from a checkpoint. This is not handled in 🤗 Accelerate out of the box. Contact us if you would like this feature.*
|
||||
|
||||
### Remote scripts: Use scripts located on Github
|
||||
|
||||
*undecided if feature is needed. Contact us if you would like this feature.*
|
||||
*undecided if feature is needed. Contact us if you would like this feature.*
|
||||
|
||||
@ -83,12 +83,6 @@ for iteration in config["num_iterations"]:
|
||||
accelerator.end_training()
|
||||
```
|
||||
|
||||
If a tracker requires a directory to save data to such as `TensorBoard` then a `logging_dir` or `project_dir` can be passed in. `project_dir` is useful
|
||||
if there are other further configurations such as those which can be combined with the [`~utils.ProjectConfiguration`] dataclass.
|
||||
|
||||
```python
|
||||
accelerator = Accelerator(log_with="tensorboard", logging_dir=".")
|
||||
```
|
||||
|
||||
## Implementing Custom Trackers
|
||||
|
||||
@ -111,12 +105,9 @@ Every tracker must implement three functions and have three properties:
|
||||
- This should be implemented as a `@property` function
|
||||
- Should return the internal tracking mechanism the library uses, such as the `run` object for `wandb`.
|
||||
|
||||
Each method should also utilize the [`state.PartialState`] class if the logger should only be executed on the main process for instance.
|
||||
|
||||
A brief example can be seen below with an integration with Weights and Biases, containing only the relevant information and logging just on
|
||||
the main process:
|
||||
A brief example can be seen below with an integration with Weights and Biases, containing only the relevant information:
|
||||
```python
|
||||
from accelerate.tracking import GeneralTracker, on_main_process
|
||||
from accelerate.tracking import GeneralTracker
|
||||
from typing import Optional
|
||||
|
||||
import wandb
|
||||
@ -126,7 +117,6 @@ class MyCustomTracker(GeneralTracker):
|
||||
name = "wandb"
|
||||
requires_logging_directory = False
|
||||
|
||||
@on_main_process
|
||||
def __init__(self, run_name: str):
|
||||
self.run_name = run_name
|
||||
run = wandb.init(self.run_name)
|
||||
@ -135,11 +125,9 @@ class MyCustomTracker(GeneralTracker):
|
||||
def tracker(self):
|
||||
return self.run.run
|
||||
|
||||
@on_main_process
|
||||
def store_init_configuration(self, values: dict):
|
||||
wandb.config(values)
|
||||
|
||||
@on_main_process
|
||||
def log(self, values: dict, step: Optional[int] = None):
|
||||
wandb.log(values, step=step)
|
||||
```
|
||||
@ -173,26 +161,16 @@ wandb_tracker = accelerator.get_tracker("wandb")
|
||||
|
||||
From there you can interact with `wandb`'s `run` object like normal:
|
||||
|
||||
```python
|
||||
wandb_run.log_artifact(some_artifact_to_log)
|
||||
```
|
||||
|
||||
<Tip>
|
||||
Trackers built in Accelerate will automatically execute on the correct process,
|
||||
so if a tracker is only meant to be ran on the main process it will do so
|
||||
automatically.
|
||||
<Tip warning={true}>
|
||||
Make sure to only interact with trackers on the main process!
|
||||
</Tip>
|
||||
|
||||
If you want to truly remove Accelerate's wrapping entirely, you can
|
||||
achieve the same outcome with:
|
||||
|
||||
```python
|
||||
wandb_tracker = accelerator.get_tracker("wandb", unwrap=True)
|
||||
with accelerator.on_main_process:
|
||||
wandb_tracker.log_artifact(some_artifact_to_log)
|
||||
if accelerator.is_main_process:
|
||||
wandb_run.log_artifact(some_artifact_to_log)
|
||||
```
|
||||
|
||||
|
||||
## When a wrapper cannot work
|
||||
|
||||
If a library has an API that does not follow a strict `.log` with an overall dictionary such as Neptune.AI, logging can be done manually under an `if accelerator.is_main_process` statement:
|
||||
|
||||
@ -64,9 +64,9 @@ To run it in each of these various modes, use the following commands:
|
||||
accelerate config # This will create a config file on your server
|
||||
accelerate launch ./nlp_example.py # This will run the script on your server
|
||||
```
|
||||
* With traditional PyTorch launcher (`torch.distributed.launch` can be used with older versions of PyTorch)
|
||||
* With traditional PyTorch launcher
|
||||
```bash
|
||||
python -m torchrun --nproc_per_node 2 --use_env ./nlp_example.py
|
||||
python -m torch.distributed.launch --nproc_per_node 2 --use_env ./nlp_example.py
|
||||
```
|
||||
- multi GPUs, multi node (several machines, using PyTorch distributed mode)
|
||||
* With Accelerate config and launcher, on each machine:
|
||||
@ -74,14 +74,14 @@ To run it in each of these various modes, use the following commands:
|
||||
accelerate config # This will create a config file on each server
|
||||
accelerate launch ./nlp_example.py # This will run the script on each server
|
||||
```
|
||||
* With PyTorch launcher only (`torch.distributed.launch` can be used in older versions of PyTorch)
|
||||
* With PyTorch launcher only
|
||||
```bash
|
||||
python -m torchrun --nproc_per_node 2 \
|
||||
python -m torch.distributed.launch --nproc_per_node 2 \
|
||||
--use_env \
|
||||
--node_rank 0 \
|
||||
--master_addr master_node_ip_address \
|
||||
./nlp_example.py # On the first server
|
||||
python -m torchrun --nproc_per_node 2 \
|
||||
python -m torch.distributed.launch --nproc_per_node 2 \
|
||||
--use_env \
|
||||
--node_rank 1 \
|
||||
--master_addr master_node_ip_address \
|
||||
@ -152,9 +152,9 @@ To run it in each of these various modes, use the following commands:
|
||||
accelerate config # This will create a config file on your server
|
||||
accelerate launch ./cv_example.py --data_dir path_to_data # This will run the script on your server
|
||||
```
|
||||
* With traditional PyTorch launcher (`torch.distributed.launch` can be used with older versions of PyTorch)
|
||||
* With traditional PyTorch launcher
|
||||
```bash
|
||||
python -m torchrun --nproc_per_node 2 --use_env ./cv_example.py --data_dir path_to_data
|
||||
python -m torch.distributed.launch --nproc_per_node 2 --use_env ./cv_example.py --data_dir path_to_data
|
||||
```
|
||||
- multi GPUs, multi node (several machines, using PyTorch distributed mode)
|
||||
* With Accelerate config and launcher, on each machine:
|
||||
@ -162,14 +162,14 @@ To run it in each of these various modes, use the following commands:
|
||||
accelerate config # This will create a config file on each server
|
||||
accelerate launch ./cv_example.py --data_dir path_to_data # This will run the script on each server
|
||||
```
|
||||
* With PyTorch launcher only (`torch.distributed.launch` can be used with older versions of PyTorch)
|
||||
* With PyTorch launcher only
|
||||
```bash
|
||||
python -m torchrun --nproc_per_node 2 \
|
||||
python -m torch.distributed.launch --nproc_per_node 2 \
|
||||
--use_env \
|
||||
--node_rank 0 \
|
||||
--master_addr master_node_ip_address \
|
||||
./cv_example.py --data_dir path_to_data # On the first server
|
||||
python -m torchrun --nproc_per_node 2 \
|
||||
python -m torch.distributed.launch --nproc_per_node 2 \
|
||||
--use_env \
|
||||
--node_rank 1 \
|
||||
--master_addr master_node_ip_address \
|
||||
@ -190,22 +190,7 @@ To run it in each of these various modes, use the following commands:
|
||||
|
||||
### Using AWS SageMaker integration
|
||||
- [Examples showcasing AWS SageMaker integration of 🤗 Accelerate.](https://github.com/pacman100/accelerate-aws-sagemaker)
|
||||
|
||||
|
||||
## Simple Multi-GPU Hardware Launcher
|
||||
|
||||
[multigpu_remote_launcher.py](./multigpu_remote_launcher.py) is a minimal script that demonstrates launching accelerate
|
||||
on multiple remote GPUs, and with automatic hardware environment and dependency setup for reproducibility. You can
|
||||
easily customize the training function used, training arguments, hyperparameters, and type of compute hardware, and then
|
||||
run the script to automatically launch multi GPU training on remote hardware.
|
||||
|
||||
This script uses [Runhouse](https://github.com/run-house/runhouse) to launch on self-hosted hardware (e.g. in your own
|
||||
cloud account or on-premise cluster) but there are other options for running remotely as well. Runhouse can be installed
|
||||
with `pip install runhouse`, and you can refer to
|
||||
[hardware setup](https://runhouse-docs.readthedocs-hosted.com/en/main/rh_primitives/cluster.html#hardware-setup)
|
||||
for hardware setup instructions, or this
|
||||
[Colab tutorial](https://colab.research.google.com/drive/1qVwYyLTCPYPSdz9ZX7BZl9Qm0A3j7RJe) for a more in-depth walkthrough.
|
||||
|
||||
|
||||
## Finer Examples
|
||||
|
||||
While the first two scripts are extremely barebones when it comes to what you can do with accelerate, more advanced features are documented in two other locations.
|
||||
|
||||
@ -19,7 +19,7 @@ Adjustments to each script from the base `nlp_example.py` file can be found quic
|
||||
|
||||
All following scripts also accept these arguments in addition to their added ones.
|
||||
|
||||
These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torch.distributed.run`), such as:
|
||||
These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torch.distributed.launch`), such as:
|
||||
|
||||
```bash
|
||||
accelerate launch ../nlp_example.py --mixed_precision fp16 --cpu 0
|
||||
@ -34,7 +34,7 @@ accelerate launch ../nlp_example.py --mixed_precision fp16 --cpu 0
|
||||
- `output_dir`, where saved state folders should be saved to, default is current working directory
|
||||
- `resume_from_checkpoint`, what checkpoint folder to resume from. ("epoch_0", "step_22", ...)
|
||||
|
||||
These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torchrun`), such as:
|
||||
These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torch.distributed.launch`), such as:
|
||||
|
||||
(Note, `resume_from_checkpoint` assumes that we've ran the script for one epoch with the `--checkpointing_steps epoch` flag)
|
||||
|
||||
@ -48,7 +48,7 @@ accelerate launch ./checkpointing.py --checkpointing_steps epoch output_dir "che
|
||||
- Arguments available:
|
||||
- `num_folds`, the number of folds the training dataset should be split into.
|
||||
|
||||
These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torchrun`), such as:
|
||||
These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torch.distributed.launch`), such as:
|
||||
|
||||
```bash
|
||||
accelerate launch ./cross_validation.py --num_folds 2
|
||||
@ -61,7 +61,7 @@ accelerate launch ./cross_validation.py --num_folds 2
|
||||
- Arguments available:
|
||||
- `with_tracking`, whether to load in all available experiment trackers from the environment.
|
||||
|
||||
These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torchrun`), such as:
|
||||
These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torch.distributed.launch`), such as:
|
||||
|
||||
```bash
|
||||
accelerate launch ./tracking.py --with_tracking
|
||||
@ -73,7 +73,7 @@ accelerate launch ./tracking.py --with_tracking
|
||||
- Arguments available:
|
||||
- `gradient_accumulation_steps`, the number of steps to perform before the gradients are accumulated and the optimizer and scheduler are stepped + zero_grad
|
||||
|
||||
These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torchrun`), such as:
|
||||
These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torch.distributed.launch`), such as:
|
||||
|
||||
```bash
|
||||
accelerate launch ./gradient_accumulation.py --gradient_accumulation_steps 5
|
||||
|
||||
@ -14,16 +14,16 @@
|
||||
import argparse
|
||||
import os
|
||||
|
||||
# New Code #
|
||||
import evaluate
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
from accelerate import Accelerator
|
||||
# New Code #
|
||||
import evaluate
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from accelerate.utils import find_executable_batch_size
|
||||
from datasets import load_dataset
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
|
||||
########################################################################
|
||||
@ -84,20 +84,10 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
|
||||
|
||||
def collate_fn(examples):
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
elif accelerator.mixed_precision != "no":
|
||||
pad_to_multiple_of = 8
|
||||
else:
|
||||
pad_to_multiple_of = None
|
||||
|
||||
return tokenizer.pad(
|
||||
examples,
|
||||
padding="longest",
|
||||
pad_to_multiple_of=pad_to_multiple_of,
|
||||
return_tensors="pt",
|
||||
)
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
if accelerator.distributed_type == DistributedType.TPU:
|
||||
return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
|
||||
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
|
||||
|
||||
# Instantiate dataloaders.
|
||||
train_dataloader = DataLoader(
|
||||
@ -224,8 +214,8 @@ def main():
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
default=None,
|
||||
choices=["no", "fp16", "bf16", "fp8"],
|
||||
default="no",
|
||||
choices=["no", "fp16", "bf16"],
|
||||
help="Whether to use mixed precision. Choose"
|
||||
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
||||
"and an Nvidia Ampere GPU.",
|
||||
|
||||
@ -15,14 +15,14 @@
|
||||
import argparse
|
||||
import os
|
||||
|
||||
import evaluate
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
import evaluate
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from datasets import load_dataset
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
|
||||
########################################################################
|
||||
@ -86,22 +86,9 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
elif accelerator.mixed_precision != "no":
|
||||
pad_to_multiple_of = 8
|
||||
else:
|
||||
pad_to_multiple_of = None
|
||||
|
||||
return tokenizer.pad(
|
||||
examples,
|
||||
padding="longest",
|
||||
max_length=max_length,
|
||||
pad_to_multiple_of=pad_to_multiple_of,
|
||||
return_tensors="pt",
|
||||
)
|
||||
if accelerator.distributed_type == DistributedType.TPU:
|
||||
return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
|
||||
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
|
||||
|
||||
# Instantiate dataloaders.
|
||||
train_dataloader = DataLoader(
|
||||
@ -216,12 +203,13 @@ def training_function(config, args):
|
||||
# Now we train the model
|
||||
for epoch in range(starting_epoch, num_epochs):
|
||||
model.train()
|
||||
# New Code #
|
||||
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
|
||||
# We need to skip steps until we reach the resumed step
|
||||
train_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step)
|
||||
overall_step += resume_step
|
||||
for step, batch in enumerate(train_dataloader):
|
||||
# New Code #
|
||||
# We need to skip steps until we reach the resumed step during the first epoch
|
||||
if args.resume_from_checkpoint and epoch == starting_epoch:
|
||||
if resume_step is not None and step < resume_step:
|
||||
overall_step += 1
|
||||
continue
|
||||
# We could avoid this line since we set the accelerator with `device_placement=True`.
|
||||
batch.to(accelerator.device)
|
||||
outputs = model(**batch)
|
||||
@ -281,8 +269,8 @@ def main():
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
default=None,
|
||||
choices=["no", "fp16", "bf16", "fp8"],
|
||||
default="no",
|
||||
choices=["no", "fp16", "bf16"],
|
||||
help="Whether to use mixed precision. Choose"
|
||||
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
||||
"and an Nvidia Ampere GPU.",
|
||||
|
||||
@ -15,20 +15,20 @@
|
||||
import argparse
|
||||
from typing import List
|
||||
|
||||
import evaluate
|
||||
import numpy as np
|
||||
import torch
|
||||
from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
import evaluate
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from datasets import DatasetDict, load_dataset
|
||||
|
||||
# New Code #
|
||||
# We'll be using StratifiedKFold for this example
|
||||
from sklearn.model_selection import StratifiedKFold
|
||||
from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
from accelerate import Accelerator, DistributedType
|
||||
|
||||
|
||||
########################################################################
|
||||
# This is a fully working simple example to use Accelerate,
|
||||
@ -106,22 +106,9 @@ def get_fold_dataloaders(
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
elif accelerator.mixed_precision != "no":
|
||||
pad_to_multiple_of = 8
|
||||
else:
|
||||
pad_to_multiple_of = None
|
||||
|
||||
return tokenizer.pad(
|
||||
examples,
|
||||
padding="longest",
|
||||
max_length=max_length,
|
||||
pad_to_multiple_of=pad_to_multiple_of,
|
||||
return_tensors="pt",
|
||||
)
|
||||
if accelerator.distributed_type == DistributedType.TPU:
|
||||
return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
|
||||
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
|
||||
|
||||
# Instantiate dataloaders.
|
||||
train_dataloader = DataLoader(
|
||||
@ -263,8 +250,8 @@ def main():
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
default=None,
|
||||
choices=["no", "fp16", "bf16", "fp8"],
|
||||
default="no",
|
||||
choices=["no", "fp16", "bf16"],
|
||||
help="Whether to use mixed precision. Choose"
|
||||
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
||||
"and an Nvidia Ampere GPU.",
|
||||
|
||||
@ -31,12 +31,16 @@ import random
|
||||
from itertools import chain
|
||||
from pathlib import Path
|
||||
|
||||
import datasets
|
||||
import torch
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
import datasets
|
||||
import transformers
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from accelerate.logging import get_logger
|
||||
from accelerate.utils import DummyOptim, DummyScheduler, set_seed
|
||||
from datasets import load_dataset
|
||||
from huggingface_hub import Repository
|
||||
from torch.utils.data import DataLoader
|
||||
from tqdm.auto import tqdm
|
||||
from transformers import (
|
||||
CONFIG_MAPPING,
|
||||
@ -51,10 +55,6 @@ from transformers import (
|
||||
from transformers.utils import get_full_repo_name
|
||||
from transformers.utils.versions import require_version
|
||||
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from accelerate.logging import get_logger
|
||||
from accelerate.utils import DummyOptim, DummyScheduler, set_seed
|
||||
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
@ -285,9 +285,10 @@ def evaluate(args, model, eval_dataloader, accelerator, eval_dataset):
|
||||
outputs = model(**batch)
|
||||
|
||||
loss = outputs.loss
|
||||
losses.append(accelerator.gather_for_metrics(loss.repeat(args.per_device_eval_batch_size)))
|
||||
losses.append(accelerator.gather(loss.repeat(args.per_device_eval_batch_size)))
|
||||
|
||||
losses = torch.cat(losses)
|
||||
losses = losses[: len(eval_dataset)]
|
||||
try:
|
||||
eval_loss = torch.mean(losses)
|
||||
perplexity = math.exp(eval_loss)
|
||||
@ -642,7 +643,7 @@ def main():
|
||||
total_loss += loss.detach().float()
|
||||
loss = loss / args.gradient_accumulation_steps
|
||||
accelerator.backward(loss)
|
||||
if (step + 1) % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:
|
||||
if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:
|
||||
optimizer.step()
|
||||
lr_scheduler.step()
|
||||
optimizer.zero_grad()
|
||||
|
||||
@ -16,13 +16,13 @@ import argparse
|
||||
import gc
|
||||
import os
|
||||
|
||||
import evaluate
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
import evaluate
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from datasets import load_dataset
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
|
||||
########################################################################
|
||||
@ -147,22 +147,9 @@ def training_function(config, args):
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
elif accelerator.mixed_precision != "no":
|
||||
pad_to_multiple_of = 8
|
||||
else:
|
||||
pad_to_multiple_of = None
|
||||
|
||||
return tokenizer.pad(
|
||||
examples,
|
||||
padding="longest",
|
||||
max_length=max_length,
|
||||
pad_to_multiple_of=pad_to_multiple_of,
|
||||
return_tensors="pt",
|
||||
)
|
||||
if accelerator.distributed_type == DistributedType.TPU:
|
||||
return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
|
||||
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
|
||||
|
||||
# Instantiate dataloaders.
|
||||
train_dataloader = DataLoader(
|
||||
@ -343,8 +330,8 @@ def main():
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
default=None,
|
||||
choices=["no", "fp16", "bf16", "fp8"],
|
||||
default="no",
|
||||
choices=["no", "fp16", "bf16"],
|
||||
help="Whether to use mixed precision. Choose"
|
||||
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
||||
"and an Nvidia Ampere GPU.",
|
||||
|
||||
@ -15,14 +15,14 @@
|
||||
import argparse
|
||||
import os
|
||||
|
||||
import evaluate
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
import evaluate
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from datasets import load_dataset
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
|
||||
########################################################################
|
||||
@ -81,22 +81,9 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
elif accelerator.mixed_precision != "no":
|
||||
pad_to_multiple_of = 8
|
||||
else:
|
||||
pad_to_multiple_of = None
|
||||
|
||||
return tokenizer.pad(
|
||||
examples,
|
||||
padding="longest",
|
||||
max_length=max_length,
|
||||
pad_to_multiple_of=pad_to_multiple_of,
|
||||
return_tensors="pt",
|
||||
)
|
||||
if accelerator.distributed_type == DistributedType.TPU:
|
||||
return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
|
||||
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
|
||||
|
||||
# Instantiate dataloaders.
|
||||
train_dataloader = DataLoader(
|
||||
@ -205,8 +192,8 @@ def main():
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
default=None,
|
||||
choices=["no", "fp16", "bf16", "fp8"],
|
||||
default="no",
|
||||
choices=["no", "fp16", "bf16"],
|
||||
help="Whether to use mixed precision. Choose"
|
||||
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
||||
"and an Nvidia Ampere GPU.",
|
||||
|
||||
@ -31,12 +31,16 @@ import random
|
||||
from itertools import chain
|
||||
from pathlib import Path
|
||||
|
||||
import datasets
|
||||
import torch
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
import datasets
|
||||
import transformers
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from accelerate.logging import get_logger
|
||||
from accelerate.utils import MegatronLMDummyScheduler, set_seed
|
||||
from datasets import load_dataset
|
||||
from huggingface_hub import Repository
|
||||
from torch.utils.data import DataLoader
|
||||
from tqdm.auto import tqdm
|
||||
from transformers import (
|
||||
CONFIG_MAPPING,
|
||||
@ -51,10 +55,6 @@ from transformers import (
|
||||
from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry
|
||||
from transformers.utils.versions import require_version
|
||||
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from accelerate.logging import get_logger
|
||||
from accelerate.utils import MegatronLMDummyScheduler, set_seed
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
|
||||
@ -14,16 +14,16 @@
|
||||
import argparse
|
||||
import os
|
||||
|
||||
# New Code #
|
||||
import evaluate
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
# New Code #
|
||||
import evaluate
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from accelerate.utils import find_executable_batch_size
|
||||
from datasets import load_dataset
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
|
||||
########################################################################
|
||||
@ -86,22 +86,9 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
elif accelerator.mixed_precision != "no":
|
||||
pad_to_multiple_of = 8
|
||||
else:
|
||||
pad_to_multiple_of = None
|
||||
|
||||
return tokenizer.pad(
|
||||
examples,
|
||||
padding="longest",
|
||||
max_length=max_length,
|
||||
pad_to_multiple_of=pad_to_multiple_of,
|
||||
return_tensors="pt",
|
||||
)
|
||||
if accelerator.distributed_type == DistributedType.TPU:
|
||||
return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
|
||||
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
|
||||
|
||||
# Instantiate dataloaders.
|
||||
train_dataloader = DataLoader(
|
||||
@ -217,8 +204,8 @@ def main():
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
default=None,
|
||||
choices=["no", "fp16", "bf16", "fp8"],
|
||||
default="no",
|
||||
choices=["no", "fp16", "bf16"],
|
||||
help="Whether to use mixed precision. Choose"
|
||||
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
||||
"and an Nvidia Ampere GPU.",
|
||||
|
||||
@ -15,14 +15,14 @@
|
||||
import argparse
|
||||
import os
|
||||
|
||||
import evaluate
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
import evaluate
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from datasets import load_dataset
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
|
||||
########################################################################
|
||||
@ -88,22 +88,9 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
elif accelerator.mixed_precision != "no":
|
||||
pad_to_multiple_of = 8
|
||||
else:
|
||||
pad_to_multiple_of = None
|
||||
|
||||
return tokenizer.pad(
|
||||
examples,
|
||||
padding="longest",
|
||||
max_length=max_length,
|
||||
pad_to_multiple_of=pad_to_multiple_of,
|
||||
return_tensors="pt",
|
||||
)
|
||||
if accelerator.distributed_type == DistributedType.TPU:
|
||||
return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
|
||||
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
|
||||
|
||||
# Instantiate dataloaders.
|
||||
train_dataloader = DataLoader(
|
||||
@ -222,8 +209,8 @@ def main():
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
default=None,
|
||||
choices=["no", "fp16", "bf16", "fp8"],
|
||||
default="no",
|
||||
choices=["no", "fp16", "bf16"],
|
||||
help="Whether to use mixed precision. Choose"
|
||||
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
||||
"and an Nvidia Ampere GPU.",
|
||||
|
||||
@ -15,14 +15,14 @@
|
||||
import argparse
|
||||
import os
|
||||
|
||||
import evaluate
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
import evaluate
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from datasets import load_dataset
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
|
||||
########################################################################
|
||||
@ -86,22 +86,9 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
elif accelerator.mixed_precision != "no":
|
||||
pad_to_multiple_of = 8
|
||||
else:
|
||||
pad_to_multiple_of = None
|
||||
|
||||
return tokenizer.pad(
|
||||
examples,
|
||||
padding="longest",
|
||||
max_length=max_length,
|
||||
pad_to_multiple_of=pad_to_multiple_of,
|
||||
return_tensors="pt",
|
||||
)
|
||||
if accelerator.distributed_type == DistributedType.TPU:
|
||||
return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
|
||||
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
|
||||
|
||||
# Instantiate dataloaders.
|
||||
train_dataloader = DataLoader(
|
||||
@ -249,8 +236,8 @@ def main():
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
default=None,
|
||||
choices=["no", "fp16", "bf16", "fp8"],
|
||||
default="no",
|
||||
choices=["no", "fp16", "bf16"],
|
||||
help="Whether to use mixed precision. Choose"
|
||||
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
||||
"and an Nvidia Ampere GPU.",
|
||||
|
||||
@ -17,14 +17,14 @@ import os
|
||||
import re
|
||||
|
||||
import numpy as np
|
||||
import PIL
|
||||
import torch
|
||||
from timm import create_model
|
||||
from torch.optim.lr_scheduler import OneCycleLR
|
||||
from torch.utils.data import DataLoader, Dataset
|
||||
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
|
||||
|
||||
import PIL
|
||||
from accelerate import Accelerator
|
||||
from timm import create_model
|
||||
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
|
||||
|
||||
|
||||
########################################################################
|
||||
@ -173,7 +173,7 @@ def training_function(config, args):
|
||||
)
|
||||
# We need to keep track of how many total steps we have iterated over
|
||||
overall_step = 0
|
||||
# We also need to keep track of the starting epoch so files are named properly
|
||||
# We also need to keep track of the stating epoch so files are named properly
|
||||
starting_epoch = 0
|
||||
|
||||
# Potentially load in the weights and states from a previous save
|
||||
@ -203,11 +203,12 @@ def training_function(config, args):
|
||||
model.train()
|
||||
if args.with_tracking:
|
||||
total_loss = 0
|
||||
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
|
||||
for step, batch in enumerate(train_dataloader):
|
||||
# We need to skip steps until we reach the resumed step
|
||||
train_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step)
|
||||
overall_step += resume_step
|
||||
for batch in train_dataloader:
|
||||
if args.resume_from_checkpoint and epoch == starting_epoch:
|
||||
if resume_step is not None and step < resume_step:
|
||||
overall_step += 1
|
||||
continue
|
||||
# We could avoid this line since we set the accelerator with `device_placement=True`.
|
||||
batch = {k: v.to(accelerator.device) for k, v in batch.items()}
|
||||
inputs = (batch["image"] - mean) / std
|
||||
@ -229,7 +230,6 @@ def training_function(config, args):
|
||||
accelerator.save_state(output_dir)
|
||||
model.eval()
|
||||
accurate = 0
|
||||
num_elems = 0
|
||||
for step, batch in enumerate(eval_dataloader):
|
||||
# We could avoid this line since we set the accelerator with `device_placement=True`.
|
||||
batch = {k: v.to(accelerator.device) for k, v in batch.items()}
|
||||
@ -239,10 +239,9 @@ def training_function(config, args):
|
||||
predictions = outputs.argmax(dim=-1)
|
||||
predictions, references = accelerator.gather_for_metrics((predictions, batch["label"]))
|
||||
accurate_preds = predictions == references
|
||||
num_elems += accurate_preds.shape[0]
|
||||
accurate += accurate_preds.long().sum()
|
||||
|
||||
eval_metric = accurate.item() / num_elems
|
||||
eval_metric = accurate.item() / accelerator.gradient_state.samples_seen
|
||||
# Use accelerator.print to print only on the main process.
|
||||
accelerator.print(f"epoch {epoch}: {100 * eval_metric:.2f}")
|
||||
if args.with_tracking:
|
||||
@ -271,8 +270,8 @@ def main():
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
default=None,
|
||||
choices=["no", "fp16", "bf16", "fp8"],
|
||||
default="no",
|
||||
choices=["no", "fp16", "bf16"],
|
||||
help="Whether to use mixed precision. Choose"
|
||||
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
||||
"and an Nvidia Ampere GPU.",
|
||||
|
||||
@ -15,14 +15,14 @@
|
||||
import argparse
|
||||
import os
|
||||
|
||||
import evaluate
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
import evaluate
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from datasets import load_dataset
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
|
||||
########################################################################
|
||||
@ -109,22 +109,9 @@ def training_function(config, args):
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
elif accelerator.mixed_precision != "no":
|
||||
pad_to_multiple_of = 8
|
||||
else:
|
||||
pad_to_multiple_of = None
|
||||
|
||||
return tokenizer.pad(
|
||||
examples,
|
||||
padding="longest",
|
||||
max_length=max_length,
|
||||
pad_to_multiple_of=pad_to_multiple_of,
|
||||
return_tensors="pt",
|
||||
)
|
||||
if accelerator.distributed_type == DistributedType.TPU:
|
||||
return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
|
||||
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
|
||||
|
||||
# Instantiate dataloaders.
|
||||
train_dataloader = DataLoader(
|
||||
@ -193,11 +180,12 @@ def training_function(config, args):
|
||||
model.train()
|
||||
if args.with_tracking:
|
||||
total_loss = 0
|
||||
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
|
||||
# We need to skip steps until we reach the resumed step
|
||||
train_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step)
|
||||
overall_step += resume_step
|
||||
for step, batch in enumerate(train_dataloader):
|
||||
# We need to skip steps until we reach the resumed step
|
||||
if args.resume_from_checkpoint and epoch == starting_epoch:
|
||||
if resume_step is not None and step < resume_step:
|
||||
overall_step += 1
|
||||
continue
|
||||
# We could avoid this line since we set the accelerator with `device_placement=True`.
|
||||
batch.to(accelerator.device)
|
||||
outputs = model(**batch)
|
||||
@ -263,8 +251,8 @@ def main():
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
default=None,
|
||||
choices=["no", "fp16", "bf16", "fp8"],
|
||||
default="no",
|
||||
choices=["no", "fp16", "bf16"],
|
||||
help="Whether to use mixed precision. Choose"
|
||||
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
||||
"and an Nvidia Ampere GPU.",
|
||||
|
||||
@ -17,14 +17,14 @@ import os
|
||||
import re
|
||||
|
||||
import numpy as np
|
||||
import PIL
|
||||
import torch
|
||||
from timm import create_model
|
||||
from torch.optim.lr_scheduler import OneCycleLR
|
||||
from torch.utils.data import DataLoader, Dataset
|
||||
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
|
||||
|
||||
import PIL
|
||||
from accelerate import Accelerator
|
||||
from timm import create_model
|
||||
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
|
||||
|
||||
|
||||
########################################################################
|
||||
@ -189,8 +189,8 @@ def main():
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
default=None,
|
||||
choices=["no", "fp16", "bf16", "fp8"],
|
||||
default="no",
|
||||
choices=["no", "fp16", "bf16"],
|
||||
help="Whether to use mixed precision. Choose"
|
||||
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
||||
"and an Nvidia Ampere GPU.",
|
||||
|
||||
@ -1,55 +0,0 @@
|
||||
import argparse
|
||||
|
||||
import runhouse as rh
|
||||
import torch
|
||||
from nlp_example import training_function
|
||||
|
||||
from accelerate.utils import PrepareForLaunch, patch_environment
|
||||
|
||||
|
||||
def launch_train(*args):
|
||||
num_processes = torch.cuda.device_count()
|
||||
print(f"Device count: {num_processes}")
|
||||
with patch_environment(
|
||||
world_size=num_processes, master_addr="127.0.01", master_port="29500", mixed_precision=args[1].mixed_precision
|
||||
):
|
||||
launcher = PrepareForLaunch(training_function, distributed_type="MULTI_GPU")
|
||||
torch.multiprocessing.start_processes(launcher, args=args, nprocs=num_processes, start_method="spawn")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/main/rh_primitives/cluster.html#hardware-setup
|
||||
# for cloud access setup instructions (if using on-demand hardware), and for API specifications.
|
||||
|
||||
# on-demand GPU
|
||||
# gpu = rh.cluster(name='rh-cluster', instance_type='V100:1', provider='cheapest', use_spot=False) # single GPU
|
||||
gpu = rh.cluster(name="rh-cluster", instance_type="V100:4", provider="cheapest", use_spot=False) # multi GPU
|
||||
gpu.up_if_not()
|
||||
|
||||
# on-prem GPU
|
||||
# gpu = rh.cluster(
|
||||
# ips=["ip_addr"], ssh_creds={ssh_user:"<username>", ssh_private_key:"<key_path>"}, name="rh-cluster"
|
||||
# )
|
||||
|
||||
# Set up remote function
|
||||
reqs = [
|
||||
"pip:./",
|
||||
"transformers",
|
||||
"datasets",
|
||||
"evaluate",
|
||||
"tqdm",
|
||||
"scipy",
|
||||
"scikit-learn",
|
||||
"tensorboard",
|
||||
"torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117",
|
||||
]
|
||||
launch_train_gpu = rh.function(fn=launch_train, system=gpu, reqs=reqs, name="train_bert_glue")
|
||||
|
||||
# Define train args/config, run train function
|
||||
train_args = argparse.Namespace(cpu=False, mixed_precision="fp16")
|
||||
config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
|
||||
launch_train_gpu(config, train_args, stream_logs=True)
|
||||
|
||||
# Alternatively, we can just run as instructed in the README (but only because there's already a wrapper CLI):
|
||||
# gpu.install_packages(reqs)
|
||||
# gpu.run(['accelerate launch --multi_gpu accelerate/examples/nlp_example.py'])
|
||||
@ -14,14 +14,14 @@
|
||||
# limitations under the License.
|
||||
import argparse
|
||||
|
||||
import evaluate
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
import evaluate
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from datasets import load_dataset
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
|
||||
########################################################################
|
||||
@ -79,33 +79,16 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
elif accelerator.mixed_precision != "no":
|
||||
pad_to_multiple_of = 8
|
||||
else:
|
||||
pad_to_multiple_of = None
|
||||
|
||||
return tokenizer.pad(
|
||||
examples,
|
||||
padding="longest",
|
||||
max_length=max_length,
|
||||
pad_to_multiple_of=pad_to_multiple_of,
|
||||
return_tensors="pt",
|
||||
)
|
||||
if accelerator.distributed_type == DistributedType.TPU:
|
||||
return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
|
||||
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
|
||||
|
||||
# Instantiate dataloaders.
|
||||
train_dataloader = DataLoader(
|
||||
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size, drop_last=True
|
||||
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
|
||||
)
|
||||
eval_dataloader = DataLoader(
|
||||
tokenized_datasets["validation"],
|
||||
shuffle=False,
|
||||
collate_fn=collate_fn,
|
||||
batch_size=EVAL_BATCH_SIZE,
|
||||
drop_last=(accelerator.mixed_precision == "fp8"),
|
||||
tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
|
||||
)
|
||||
|
||||
return train_dataloader, eval_dataloader
|
||||
@ -137,6 +120,7 @@ def training_function(config, args):
|
||||
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
|
||||
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
|
||||
model = model.to(accelerator.device)
|
||||
|
||||
# Instantiate optimizer
|
||||
optimizer = AdamW(params=model.parameters(), lr=lr)
|
||||
|
||||
@ -150,7 +134,6 @@ def training_function(config, args):
|
||||
# Prepare everything
|
||||
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
|
||||
# prepare method.
|
||||
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
||||
)
|
||||
@ -193,8 +176,8 @@ def main():
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
default=None,
|
||||
choices=["no", "fp16", "bf16", "fp8"],
|
||||
default="no",
|
||||
choices=["no", "fp16", "bf16"],
|
||||
help="Whether to use mixed precision. Choose"
|
||||
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
||||
"and an Nvidia Ampere GPU.",
|
||||
|
||||
@ -1,17 +1,3 @@
|
||||
[tool.black]
|
||||
line-length = 119
|
||||
target-version = ['py37']
|
||||
|
||||
[tool.ruff]
|
||||
# Never enforce `E501` (line length violations).
|
||||
ignore = ["E501", "E741", "W605"]
|
||||
select = ["E", "F", "I", "W"]
|
||||
line-length = 119
|
||||
|
||||
# Ignore import violations in all `__init__.py` files.
|
||||
[tool.ruff.per-file-ignores]
|
||||
"__init__.py" = ["E402", "F401", "F403", "F811"]
|
||||
|
||||
[tool.ruff.isort]
|
||||
lines-after-imports = 2
|
||||
known-first-party = ["accelerate"]
|
||||
target-version = ['py36']
|
||||
|
||||
@ -4,6 +4,11 @@ ensure_newline_before_comments = True
|
||||
force_grid_wrap = 0
|
||||
include_trailing_comma = True
|
||||
known_first_party = accelerate
|
||||
known_third_party =
|
||||
numpy
|
||||
torch
|
||||
torch_xla
|
||||
|
||||
line_length = 119
|
||||
lines_after_imports = 2
|
||||
multi_line_output = 3
|
||||
|
||||
6
setup.py
6
setup.py
@ -16,10 +16,10 @@ from setuptools import setup
|
||||
from setuptools import find_packages
|
||||
|
||||
extras = {}
|
||||
extras["quality"] = ["black ~= 23.1", "ruff >= 0.0.241", "hf-doc-builder >= 0.3.0"]
|
||||
extras["quality"] = ["black ~= 22.0", "isort >= 5.5.4", "flake8 >= 3.8.3", "hf-doc-builder >= 0.3.0"]
|
||||
extras["docs"] = []
|
||||
extras["test_prod"] = ["pytest", "pytest-xdist", "pytest-subtests", "parameterized"]
|
||||
extras["test_dev"] = ["datasets", "evaluate", "transformers", "scipy", "scikit-learn", "deepspeed", "tqdm"]
|
||||
extras["test_dev"] = ["datasets", "evaluate", "transformers", "scipy", "scikit-learn", "deepspeed<0.7.0", "tqdm"]
|
||||
extras["testing"] = extras["test_prod"] + extras["test_dev"]
|
||||
extras["rich"] = ["rich"]
|
||||
|
||||
@ -32,7 +32,7 @@ extras["sagemaker"] = [
|
||||
|
||||
setup(
|
||||
name="accelerate",
|
||||
version="0.19.0.dev0",
|
||||
version="0.14.0",
|
||||
description="Accelerate",
|
||||
long_description=open("README.md", "r", encoding="utf-8").read(),
|
||||
long_description_content_type="text/markdown",
|
||||
|
||||
@ -1,18 +1,12 @@
|
||||
__version__ = "0.19.0.dev0"
|
||||
# flake8: noqa
|
||||
# There's no way to ignore "F401 '...' imported but unused" warnings in this
|
||||
# module, but to preserve other warnings. So, don't check this module at all.
|
||||
|
||||
__version__ = "0.14.0"
|
||||
|
||||
from .accelerator import Accelerator
|
||||
from .big_modeling import (
|
||||
cpu_offload,
|
||||
cpu_offload_with_hook,
|
||||
disk_offload,
|
||||
dispatch_model,
|
||||
init_empty_weights,
|
||||
init_on_device,
|
||||
load_checkpoint_and_dispatch,
|
||||
)
|
||||
from .data_loader import skip_first_batches
|
||||
from .big_modeling import cpu_offload, disk_offload, dispatch_model, init_empty_weights, load_checkpoint_and_dispatch
|
||||
from .launchers import debug_launcher, notebook_launcher
|
||||
from .state import PartialState
|
||||
from .utils import (
|
||||
DeepSpeedPlugin,
|
||||
DistributedDataParallelKwargs,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -19,24 +19,15 @@ from typing import Dict, List, Optional, Union
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
from .hooks import (
|
||||
AlignDevicesHook,
|
||||
CpuOffload,
|
||||
UserCpuOffloadHook,
|
||||
add_hook_to_module,
|
||||
attach_align_device_hook,
|
||||
attach_align_device_hook_on_blocks,
|
||||
)
|
||||
from .hooks import AlignDevicesHook, add_hook_to_module, attach_align_device_hook, attach_align_device_hook_on_blocks
|
||||
from .utils import (
|
||||
OffloadedWeightsLoader,
|
||||
check_device_map,
|
||||
extract_submodules_state_dict,
|
||||
find_tied_parameters,
|
||||
get_balanced_memory,
|
||||
infer_auto_device_map,
|
||||
load_checkpoint_in_model,
|
||||
offload_state_dict,
|
||||
retie_parameters,
|
||||
)
|
||||
from .utils.versions import is_torch_version
|
||||
|
||||
@ -71,31 +62,6 @@ def init_empty_weights(include_buffers: bool = False):
|
||||
"""
|
||||
if not is_torch_version(">=", "1.9.0"):
|
||||
raise NotImplementedError("Initializing empty weights to a meta device requires torch >= 1.9.0")
|
||||
with init_on_device(torch.device("meta"), include_buffers=include_buffers) as f:
|
||||
yield f
|
||||
|
||||
|
||||
@contextmanager
|
||||
def init_on_device(device: torch.device, include_buffers: bool = False):
|
||||
"""
|
||||
A context manager under which models are initialized with all parameters on the specified device.
|
||||
|
||||
Args:
|
||||
device (`torch.device`):
|
||||
Device to initialize all parameters on.
|
||||
include_buffers (`bool`, *optional*, defaults to `False`):
|
||||
Whether or not to also put all buffers on the meta device while initializing.
|
||||
|
||||
Example:
|
||||
|
||||
```python
|
||||
import torch.nn as nn
|
||||
from accelerate import init_on_device
|
||||
|
||||
with init_on_device(device=torch.device("cuda")):
|
||||
tst = nn.Liner(100, 100) # on `cuda` device
|
||||
```
|
||||
"""
|
||||
old_register_parameter = nn.Module.register_parameter
|
||||
if include_buffers:
|
||||
old_register_buffer = nn.Module.register_buffer
|
||||
@ -105,12 +71,12 @@ def init_on_device(device: torch.device, include_buffers: bool = False):
|
||||
if param is not None:
|
||||
param_cls = type(module._parameters[name])
|
||||
kwargs = module._parameters[name].__dict__
|
||||
module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs)
|
||||
module._parameters[name] = param_cls(module._parameters[name].to(torch.device("meta")), **kwargs)
|
||||
|
||||
def register_empty_buffer(module, name, buffer):
|
||||
old_register_buffer(module, name, buffer)
|
||||
if buffer is not None:
|
||||
module._buffers[name] = module._buffers[name].to(device)
|
||||
module._buffers[name] = module._buffers[name].to(torch.device("meta"))
|
||||
|
||||
# Patch tensor creation
|
||||
if include_buffers:
|
||||
@ -123,7 +89,7 @@ def init_on_device(device: torch.device, include_buffers: bool = False):
|
||||
|
||||
def patch_tensor_constructor(fn):
|
||||
def wrapper(*args, **kwargs):
|
||||
kwargs["device"] = device
|
||||
kwargs["device"] = torch.device("meta")
|
||||
return fn(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
@ -191,50 +157,6 @@ def cpu_offload(
|
||||
return model
|
||||
|
||||
|
||||
def cpu_offload_with_hook(
|
||||
model: torch.nn.Module,
|
||||
execution_device: Optional[Union[int, str, torch.device]] = None,
|
||||
prev_module_hook: Optional[UserCpuOffloadHook] = None,
|
||||
):
|
||||
"""
|
||||
Offloads a model on the CPU and puts it back to an execution device when executed. The difference with
|
||||
[`cpu_offload`] is that the model stays on the execution device after the forward and is only offloaded again when
|
||||
the `offload` method of the returned `hook` is called. Useful for pipelines running a model in a loop.
|
||||
|
||||
Args:
|
||||
model (`torch.nn.Module`):
|
||||
The model to offload.
|
||||
execution_device(`str`, `int` or `torch.device`, *optional*):
|
||||
The device on which the model should be executed. Will default to the MPS device if it's available, then
|
||||
GPU 0 if there is a GPU, and finally to the CPU.
|
||||
prev_module_hook (`UserCpuOffloadHook`, *optional*):
|
||||
The hook sent back by this function for a previous model in the pipeline you are running. If passed, its
|
||||
offload method will be called just before the forward of the model to which this hook is attached.
|
||||
|
||||
Example:
|
||||
|
||||
```py
|
||||
model_1, hook_1 = cpu_offload_with_hook(model_1, cuda_device)
|
||||
model_2, hook_2 = cpu_offload_with_hook(model_2, cuda_device, prev_module_hook=hook_1)
|
||||
model_3, hook_3 = cpu_offload_with_hook(model_3, cuda_device, prev_module_hook=hook_2)
|
||||
|
||||
hid_1 = model_1(input)
|
||||
for i in range(50):
|
||||
# model1 is offloaded on the CPU at the first iteration, model 2 stays on the GPU for this whole loop.
|
||||
hid_2 = model_2(hid_1)
|
||||
# model2 is offloaded to the CPU just before this forward.
|
||||
hid_3 = model_3(hid_3)
|
||||
|
||||
# For model3, you need to manually call the hook offload method.
|
||||
hook_3.offload()
|
||||
```
|
||||
"""
|
||||
hook = CpuOffload(execution_device=execution_device, prev_module_hook=prev_module_hook)
|
||||
add_hook_to_module(model, hook, append=True)
|
||||
user_hook = UserCpuOffloadHook(model, hook)
|
||||
return model, user_hook
|
||||
|
||||
|
||||
def disk_offload(
|
||||
model: nn.Module,
|
||||
offload_dir: Union[str, os.PathLike],
|
||||
@ -288,8 +210,7 @@ def dispatch_model(
|
||||
device_map: Dict[str, Union[str, int, torch.device]],
|
||||
main_device: Optional[torch.device] = None,
|
||||
state_dict: Optional[Dict[str, torch.Tensor]] = None,
|
||||
offload_dir: Optional[Union[str, os.PathLike]] = None,
|
||||
offload_index: Optional[Dict[str, str]] = None,
|
||||
offload_dir: Union[str, os.PathLike] = None,
|
||||
offload_buffers: bool = False,
|
||||
preload_module_classes: Optional[List[str]] = None,
|
||||
):
|
||||
@ -310,9 +231,6 @@ def dispatch_model(
|
||||
The state dict of the part of the model that will be kept on CPU.
|
||||
offload_dir (`str` or `os.PathLike`):
|
||||
The folder in which to offload the model weights (or where the model weights are already offloaded).
|
||||
offload_index (`Dict`, *optional*):
|
||||
A dictionary from weight name to their information (`dtype`/ `shape` or safetensors filename). Will default
|
||||
to the index saved in `save_folder`.
|
||||
offload_buffers (`bool`, *optional*, defaults to `False`):
|
||||
Whether or not to offload the buffers with the model parameters.
|
||||
preload_module_classes (`List[str]`, *optional*):
|
||||
@ -338,15 +256,13 @@ def dispatch_model(
|
||||
state_dict = extract_submodules_state_dict(model.state_dict(), cpu_modules)
|
||||
|
||||
disk_modules = [name for name, device in device_map.items() if device == "disk"]
|
||||
if offload_dir is None and offload_index is None and len(disk_modules) > 0:
|
||||
if offload_dir is None and len(disk_modules) > 0:
|
||||
raise ValueError(
|
||||
"We need an `offload_dir` to dispatch this model according to this `device_map`, the following submodules "
|
||||
f"need to be offloaded: {', '.join(disk_modules)}."
|
||||
)
|
||||
if (
|
||||
len(disk_modules) > 0
|
||||
and offload_index is None
|
||||
and (not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, "index.json")))
|
||||
if len(disk_modules) > 0 and (
|
||||
not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, "index.json"))
|
||||
):
|
||||
disk_state_dict = extract_submodules_state_dict(model.state_dict(), disk_modules)
|
||||
offload_state_dict(offload_dir, disk_state_dict)
|
||||
@ -354,19 +270,14 @@ def dispatch_model(
|
||||
execution_device = {
|
||||
name: main_device if device in ["cpu", "disk"] else device for name, device in device_map.items()
|
||||
}
|
||||
execution_device[""] = main_device
|
||||
offloaded_devices = ["disk"] if main_device == "cpu" else ["cpu", "disk"]
|
||||
offload = {name: device in offloaded_devices for name, device in device_map.items()}
|
||||
save_folder = offload_dir if len(disk_modules) > 0 else None
|
||||
if state_dict is not None or save_folder is not None or offload_index is not None:
|
||||
device = main_device if offload_index is not None else None
|
||||
weights_map = OffloadedWeightsLoader(
|
||||
state_dict=state_dict, save_folder=save_folder, index=offload_index, device=device
|
||||
)
|
||||
if state_dict is not None or save_folder is not None:
|
||||
weights_map = OffloadedWeightsLoader(state_dict=state_dict, save_folder=save_folder)
|
||||
else:
|
||||
weights_map = None
|
||||
|
||||
tied_params = find_tied_parameters(model)
|
||||
attach_align_device_hook_on_blocks(
|
||||
model,
|
||||
execution_device=execution_device,
|
||||
@ -375,8 +286,6 @@ def dispatch_model(
|
||||
weights_map=weights_map,
|
||||
preload_module_classes=preload_module_classes,
|
||||
)
|
||||
# Attaching the hook may break tied weights, so we retie them
|
||||
retie_parameters(model, tied_params)
|
||||
model.hf_device_map = device_map
|
||||
return model
|
||||
|
||||
@ -432,28 +341,6 @@ def load_checkpoint_and_dispatch(
|
||||
of the forward. This should only be used for classes that have submodules which are registered but not
|
||||
called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
|
||||
`dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
|
||||
|
||||
Example:
|
||||
|
||||
```python
|
||||
>>> from accelerate import init_empty_weights, load_checkpoint_and_dispatch
|
||||
>>> from huggingface_hub import hf_hub_download
|
||||
>>> from transformers import AutoConfig, AutoModelForCausalLM
|
||||
|
||||
>>> # Download the Weights
|
||||
>>> checkpoint = "EleutherAI/gpt-j-6B"
|
||||
>>> weights_location = hf_hub_download(checkpoint, "pytorch_model.bin")
|
||||
|
||||
>>> # Create a model and initialize it with empty weights
|
||||
>>> config = AutoConfig.from_pretrained(checkpoint)
|
||||
>>> with init_empty_weights():
|
||||
... model = AutoModelForCausalLM.from_config(config)
|
||||
|
||||
>>> # Load the checkpoint and dispatch it to the right devices
|
||||
>>> model = load_checkpoint_and_dispatch(
|
||||
... model, weights_location, device_map="auto", no_split_module_classes=["GPTJBlock"]
|
||||
... )
|
||||
```
|
||||
"""
|
||||
if not is_torch_version(">=", "1.9.0"):
|
||||
raise NotImplementedError("Loading and dispatching requires torch >= 1.9.0")
|
||||
@ -474,7 +361,7 @@ def load_checkpoint_and_dispatch(
|
||||
device_map = infer_auto_device_map(
|
||||
model, max_memory=max_memory, no_split_module_classes=no_split_module_classes, dtype=dtype
|
||||
)
|
||||
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
|
||||
if offload_state_dict is None and "disk" in device_map.values():
|
||||
offload_state_dict = True
|
||||
load_checkpoint_in_model(
|
||||
model,
|
||||
@ -483,7 +370,6 @@ def load_checkpoint_and_dispatch(
|
||||
offload_folder=offload_folder,
|
||||
dtype=dtype,
|
||||
offload_state_dict=offload_state_dict,
|
||||
offload_buffers=offload_buffers,
|
||||
)
|
||||
if device_map is None:
|
||||
return model
|
||||
|
||||
@ -37,7 +37,6 @@ if is_tpu_available(check_device=False):
|
||||
import torch_xla.core.xla_model as xm
|
||||
|
||||
from .logging import get_logger
|
||||
from .state import PartialState
|
||||
|
||||
|
||||
logger = get_logger(__name__)
|
||||
@ -110,16 +109,7 @@ def save_accelerator_state(
|
||||
return output_dir
|
||||
|
||||
|
||||
def load_accelerator_state(
|
||||
input_dir,
|
||||
models,
|
||||
optimizers,
|
||||
schedulers,
|
||||
process_index,
|
||||
scaler=None,
|
||||
map_location=None,
|
||||
**load_model_func_kwargs,
|
||||
):
|
||||
def load_accelerator_state(input_dir, models, optimizers, schedulers, process_index, scaler=None):
|
||||
"""
|
||||
Loads states of the models, optimizers, scaler, and RNG generators from a given directory.
|
||||
|
||||
@ -136,32 +126,19 @@ def load_accelerator_state(
|
||||
The current process index in the Accelerator state
|
||||
scaler (`torch.cuda.amp.GradScaler`, *optional*):
|
||||
An optional *GradScaler* instance to load
|
||||
map_location (`str`, *optional*):
|
||||
What device to load the optimizer state onto. Should be one of either "cpu" or "on_device".
|
||||
load_model_func_kwargs (`dict`, *optional*):
|
||||
Additional arguments that can be passed to the model's `load_state_dict` method.
|
||||
"""
|
||||
if map_location not in [None, "cpu", "on_device"]:
|
||||
raise TypeError(
|
||||
"Unsupported optimizer map location passed, please choose one of `None`, `'cpu'`, or `'on_device'`"
|
||||
)
|
||||
if map_location is None:
|
||||
map_location = "cpu"
|
||||
elif map_location == "on_device":
|
||||
map_location = PartialState().device
|
||||
# Model states
|
||||
for i, model in enumerate(models):
|
||||
weights_name = f"{MODEL_NAME}.bin" if i == 0 else f"{MODEL_NAME}_{i}.bin"
|
||||
input_model_file = os.path.join(input_dir, weights_name)
|
||||
models[i].load_state_dict(torch.load(input_model_file, map_location=map_location), **load_model_func_kwargs)
|
||||
models[i].load_state_dict(torch.load(input_model_file, map_location="cpu"))
|
||||
logger.info("All model weights loaded successfully")
|
||||
|
||||
# Optimizer states
|
||||
for i, opt in enumerate(optimizers):
|
||||
optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin"
|
||||
input_optimizer_file = os.path.join(input_dir, optimizer_name)
|
||||
optimizer_state = torch.load(input_optimizer_file)
|
||||
optimizers[i].load_state_dict(optimizer_state)
|
||||
optimizers[i].load_state_dict(torch.load(input_optimizer_file, map_location="cpu"))
|
||||
logger.info("All optimizer states loaded successfully")
|
||||
|
||||
# Scheduler states
|
||||
@ -188,7 +165,7 @@ def load_accelerator_state(
|
||||
if is_tpu_available():
|
||||
xm.set_rng_state(states["xm_seed"])
|
||||
logger.info("All random states loaded successfully")
|
||||
except Exception:
|
||||
except:
|
||||
logger.info("Could not load random states")
|
||||
|
||||
|
||||
@ -208,4 +185,4 @@ def load_custom_state(obj, path, index: int = 0):
|
||||
"""
|
||||
load_location = f"{path}/custom_checkpoint_{index}.pkl"
|
||||
logger.info(f"Loading the state of {get_pretty_name(obj)} from {load_location}")
|
||||
obj.load_state_dict(torch.load(load_location, map_location="cpu"))
|
||||
obj.load_state_dict(torch.load(load_location))
|
||||
|
||||
@ -16,7 +16,7 @@
|
||||
|
||||
from argparse import ArgumentParser
|
||||
|
||||
from accelerate.commands.config import get_config_parser
|
||||
from accelerate.commands.config import config_command_parser
|
||||
from accelerate.commands.env import env_command_parser
|
||||
from accelerate.commands.launch import launch_command_parser
|
||||
from accelerate.commands.test import test_command_parser
|
||||
@ -24,11 +24,11 @@ from accelerate.commands.tpu import tpu_command_parser
|
||||
|
||||
|
||||
def main():
|
||||
parser = ArgumentParser("Accelerate CLI tool", usage="accelerate <command> [<args>]", allow_abbrev=False)
|
||||
parser = ArgumentParser("Accelerate CLI tool", usage="accelerate <command> [<args>]")
|
||||
subparsers = parser.add_subparsers(help="accelerate command helpers")
|
||||
|
||||
# Register commands
|
||||
get_config_parser(subparsers=subparsers)
|
||||
config_command_parser(subparsers=subparsers)
|
||||
env_command_parser(subparsers=subparsers)
|
||||
launch_command_parser(subparsers=subparsers)
|
||||
tpu_command_parser(subparsers=subparsers)
|
||||
|
||||
@ -15,37 +15,70 @@
|
||||
# limitations under the License.
|
||||
|
||||
import argparse
|
||||
import os
|
||||
|
||||
from .config import config_command_parser
|
||||
from .config_args import default_config_file, load_config_from_file # noqa: F401
|
||||
from .default import default_command_parser
|
||||
from .update import update_command_parser
|
||||
from accelerate.utils import ComputeEnvironment
|
||||
|
||||
from .cluster import get_cluster_input
|
||||
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
|
||||
from .config_utils import _ask_field, _convert_compute_environment
|
||||
from .sagemaker import get_sagemaker_input
|
||||
|
||||
|
||||
def get_config_parser(subparsers=None):
|
||||
parent_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
|
||||
# The main config parser
|
||||
config_parser = config_command_parser(subparsers)
|
||||
# The subparser to add commands to
|
||||
subcommands = config_parser.add_subparsers(title="subcommands", dest="subcommand")
|
||||
def get_user_input():
|
||||
compute_environment = _ask_field(
|
||||
"In which compute environment are you running? ([0] This machine, [1] AWS (Amazon SageMaker)): ",
|
||||
_convert_compute_environment,
|
||||
error_message="Please enter 0 or 1",
|
||||
)
|
||||
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
|
||||
config = get_sagemaker_input()
|
||||
else:
|
||||
config = get_cluster_input()
|
||||
return config
|
||||
|
||||
# Then add other parsers with the parent parser
|
||||
default_command_parser(subcommands, parents=[parent_parser])
|
||||
update_command_parser(subcommands, parents=[parent_parser])
|
||||
|
||||
return config_parser
|
||||
def config_command_parser(subparsers=None):
|
||||
if subparsers is not None:
|
||||
parser = subparsers.add_parser("config")
|
||||
else:
|
||||
parser = argparse.ArgumentParser("Accelerate config command")
|
||||
|
||||
parser.add_argument(
|
||||
"--config_file",
|
||||
default=None,
|
||||
help=(
|
||||
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
|
||||
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
|
||||
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
|
||||
"with 'huggingface'."
|
||||
),
|
||||
)
|
||||
|
||||
if subparsers is not None:
|
||||
parser.set_defaults(func=config_command)
|
||||
return parser
|
||||
|
||||
|
||||
def config_command(args):
|
||||
config = get_user_input()
|
||||
if args.config_file is not None:
|
||||
config_file = args.config_file
|
||||
else:
|
||||
if not os.path.isdir(cache_dir):
|
||||
os.makedirs(cache_dir)
|
||||
config_file = default_yaml_config_file
|
||||
|
||||
if config_file.endswith(".json"):
|
||||
config.to_json_file(config_file)
|
||||
else:
|
||||
config.to_yaml_file(config_file)
|
||||
|
||||
|
||||
def main():
|
||||
config_parser = get_config_parser()
|
||||
args = config_parser.parse_args()
|
||||
|
||||
if not hasattr(args, "func"):
|
||||
config_parser.print_help()
|
||||
exit(1)
|
||||
|
||||
# Run
|
||||
args.func(args)
|
||||
parser = config_command_parser()
|
||||
args = parser.parse_args()
|
||||
config_command(args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@ -16,38 +16,23 @@
|
||||
|
||||
import os
|
||||
|
||||
from ...utils import (
|
||||
ComputeEnvironment,
|
||||
DistributedType,
|
||||
is_deepspeed_available,
|
||||
is_mps_available,
|
||||
is_transformers_available,
|
||||
)
|
||||
from ...utils import ComputeEnvironment, DistributedType, is_deepspeed_available, is_transformers_available
|
||||
from ...utils.constants import (
|
||||
DEEPSPEED_MULTINODE_LAUNCHERS,
|
||||
FSDP_AUTO_WRAP_POLICY,
|
||||
FSDP_BACKWARD_PREFETCH,
|
||||
FSDP_SHARDING_STRATEGY,
|
||||
FSDP_STATE_DICT_TYPE,
|
||||
TORCH_DYNAMO_MODES,
|
||||
)
|
||||
from .config_args import ClusterConfig
|
||||
from .config_utils import (
|
||||
DYNAMO_BACKENDS,
|
||||
_ask_field,
|
||||
_ask_options,
|
||||
_convert_distributed_mode,
|
||||
_convert_dynamo_backend,
|
||||
_convert_mixed_precision,
|
||||
_convert_yes_no_to_bool,
|
||||
)
|
||||
from .config_utils import _ask_field, _convert_distributed_mode, _convert_yes_no_to_bool
|
||||
|
||||
|
||||
def get_cluster_input():
|
||||
distributed_type = _ask_options(
|
||||
"Which type of machine are you using?",
|
||||
["No distributed training", "multi-CPU", "multi-GPU", "TPU"],
|
||||
distributed_type = _ask_field(
|
||||
"Which type of machine are you using? ([0] No distributed training, [1] multi-CPU, [2] multi-GPU, [3] TPU [4] MPS): ",
|
||||
_convert_distributed_mode,
|
||||
error_message="Please enter 0, 1, 2, 3 or 4.",
|
||||
)
|
||||
|
||||
machine_rank = 0
|
||||
@ -58,25 +43,28 @@ def get_cluster_input():
|
||||
main_process_port = None
|
||||
rdzv_backend = "static"
|
||||
same_network = True
|
||||
|
||||
tpu_name = None
|
||||
tpu_zone = None
|
||||
commands = None
|
||||
command_file = None
|
||||
if distributed_type in [DistributedType.MULTI_GPU, DistributedType.MULTI_CPU]:
|
||||
num_machines = _ask_field(
|
||||
"How many different machines will you use (use more than 1 for multi-node training)? [1]: ",
|
||||
int,
|
||||
lambda x: int(x),
|
||||
default=1,
|
||||
)
|
||||
if num_machines > 1:
|
||||
machine_rank = _ask_options(
|
||||
"What is the rank of this machine?",
|
||||
list(range(num_machines)),
|
||||
int,
|
||||
machine_rank = _ask_field(
|
||||
"What is the rank of this machine (from 0 to the number of machines - 1 )? [0]: ",
|
||||
lambda x: int(x),
|
||||
default=0,
|
||||
)
|
||||
main_process_ip = _ask_field(
|
||||
"What is the IP address of the machine that will host the main process? ",
|
||||
)
|
||||
main_process_port = _ask_field(
|
||||
"What is the port you will use to communicate with the main process? ",
|
||||
int,
|
||||
lambda x: int(x),
|
||||
)
|
||||
same_network = _ask_field(
|
||||
"Are all the machines on the same local network? Answer `no` if nodes are on the cloud and/or on different network hosts [YES/no]: ",
|
||||
@ -91,7 +79,7 @@ def get_cluster_input():
|
||||
|
||||
if distributed_type == DistributedType.NO:
|
||||
use_cpu = _ask_field(
|
||||
"Do you want to run your training on CPU only (even if a GPU / Apple Silicon device is available)? [yes/NO]:",
|
||||
"Do you want to run your training on CPU only (even if a GPU is available)? [yes/NO]:",
|
||||
_convert_yes_no_to_bool,
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
@ -101,60 +89,8 @@ def get_cluster_input():
|
||||
else:
|
||||
use_cpu = False
|
||||
|
||||
ipex_config = {}
|
||||
if use_cpu:
|
||||
ipex_config["ipex_enabled"] = _ask_field(
|
||||
"Do you want to use Intel PyTorch Extension (IPEX) to speed up training on CPU? [yes/NO]:",
|
||||
_convert_yes_no_to_bool,
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
|
||||
dynamo_config = {}
|
||||
use_dynamo = _ask_field(
|
||||
"Do you wish to optimize your script with torch dynamo?[yes/NO]:",
|
||||
_convert_yes_no_to_bool,
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
if use_dynamo:
|
||||
prefix = "dynamo_"
|
||||
dynamo_config[prefix + "backend"] = _ask_options(
|
||||
"Which dynamo backend would you like to use?",
|
||||
[x.lower() for x in DYNAMO_BACKENDS],
|
||||
_convert_dynamo_backend,
|
||||
default=2,
|
||||
)
|
||||
use_custom_options = _ask_field(
|
||||
"Do you want to customize the defaults sent to torch.compile? [yes/NO]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
|
||||
if use_custom_options:
|
||||
dynamo_config[prefix + "mode"] = _ask_options(
|
||||
"Which mode do you want to use?",
|
||||
TORCH_DYNAMO_MODES,
|
||||
lambda x: TORCH_DYNAMO_MODES[int(x)],
|
||||
default=0,
|
||||
)
|
||||
dynamo_config[prefix + "use_fullgraph"] = _ask_field(
|
||||
"Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
dynamo_config[prefix + "use_dynamic"] = _ask_field(
|
||||
"Do you want to enable dynamic shape tracing? [yes/NO]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
|
||||
use_mps = not use_cpu and is_mps_available()
|
||||
deepspeed_config = {}
|
||||
if distributed_type in [DistributedType.MULTI_GPU, DistributedType.NO] and not use_mps:
|
||||
if distributed_type in [DistributedType.MULTI_GPU, DistributedType.NO]:
|
||||
use_deepspeed = _ask_field(
|
||||
"Do you want to use DeepSpeed? [yes/NO]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
@ -177,28 +113,30 @@ def get_cluster_input():
|
||||
if use_deepspeed_config:
|
||||
deepspeed_config["deepspeed_config_file"] = _ask_field(
|
||||
"Please enter the path to the json DeepSpeed config file: ",
|
||||
str,
|
||||
lambda x: str(x),
|
||||
default="none",
|
||||
)
|
||||
else:
|
||||
deepspeed_config["zero_stage"] = _ask_options(
|
||||
"What should be your DeepSpeed's ZeRO optimization stage?",
|
||||
[0, 1, 2, 3],
|
||||
int,
|
||||
deepspeed_config["zero_stage"] = _ask_field(
|
||||
"What should be your DeepSpeed's ZeRO optimization stage (0, 1, 2, 3)? [2]: ",
|
||||
lambda x: int(x),
|
||||
default=2,
|
||||
)
|
||||
|
||||
deepspeed_devices = ["none", "cpu", "nvme"]
|
||||
if deepspeed_config["zero_stage"] >= 2:
|
||||
deepspeed_config["offload_optimizer_device"] = _ask_options(
|
||||
"Where to offload optimizer states?", deepspeed_devices, lambda x: deepspeed_devices[int(x)]
|
||||
deepspeed_config["offload_optimizer_device"] = _ask_field(
|
||||
"Where to offload optimizer states? [none/cpu/nvme]: ",
|
||||
lambda x: str(x),
|
||||
default="none",
|
||||
)
|
||||
deepspeed_config["offload_param_device"] = _ask_options(
|
||||
"Where to offload parameters?", deepspeed_devices, lambda x: deepspeed_devices[int(x)]
|
||||
deepspeed_config["offload_param_device"] = _ask_field(
|
||||
"Where to offload parameters? [none/cpu/nvme]: ",
|
||||
lambda x: str(x),
|
||||
default="none",
|
||||
)
|
||||
deepspeed_config["gradient_accumulation_steps"] = _ask_field(
|
||||
"How many gradient accumulation steps you're passing in your script? [1]: ",
|
||||
int,
|
||||
lambda x: int(x),
|
||||
default=1,
|
||||
)
|
||||
use_gradient_clipping = _ask_field(
|
||||
@ -210,7 +148,7 @@ def get_cluster_input():
|
||||
if use_gradient_clipping:
|
||||
deepspeed_config["gradient_clipping"] = _ask_field(
|
||||
"What is the gradient clipping value? [1.0]: ",
|
||||
float,
|
||||
lambda x: float(x),
|
||||
default=1.0,
|
||||
)
|
||||
if deepspeed_config["zero_stage"] == 3:
|
||||
@ -234,11 +172,14 @@ def get_cluster_input():
|
||||
)
|
||||
|
||||
if num_machines > 1:
|
||||
launcher_query = "Which Type of launcher do you want to use?"
|
||||
deepspeed_config["deepspeed_multinode_launcher"] = _ask_options(
|
||||
launcher_query = "Which Type of launcher do you want to use "
|
||||
for i, launcher in enumerate(DEEPSPEED_MULTINODE_LAUNCHERS):
|
||||
launcher_query += f"[{i}] {launcher}, "
|
||||
launcher_query = launcher_query[:-2] + ")? [0]: "
|
||||
deepspeed_config["deepspeed_multinode_launcher"] = _ask_field(
|
||||
launcher_query,
|
||||
DEEPSPEED_MULTINODE_LAUNCHERS,
|
||||
lambda x: DEEPSPEED_MULTINODE_LAUNCHERS[int(x)],
|
||||
default=DEEPSPEED_MULTINODE_LAUNCHERS[0],
|
||||
)
|
||||
|
||||
if deepspeed_config["deepspeed_multinode_launcher"] != DEEPSPEED_MULTINODE_LAUNCHERS[1]:
|
||||
@ -248,7 +189,7 @@ def get_cluster_input():
|
||||
"for more information please refer official [documentation]"
|
||||
"(https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node). "
|
||||
"Please specify the location of hostfile: ",
|
||||
str,
|
||||
lambda x: str(x),
|
||||
)
|
||||
|
||||
is_exclusion_filter = _ask_field(
|
||||
@ -260,7 +201,7 @@ def get_cluster_input():
|
||||
if is_exclusion_filter:
|
||||
deepspeed_config["deepspeed_exclusion_filter"] = _ask_field(
|
||||
"DeepSpeed exclusion filter string: ",
|
||||
str,
|
||||
lambda x: str(x),
|
||||
)
|
||||
|
||||
is_inclusion_filter = _ask_field(
|
||||
@ -272,7 +213,7 @@ def get_cluster_input():
|
||||
if is_inclusion_filter:
|
||||
deepspeed_config["deepspeed_inclusion_filter"] = _ask_field(
|
||||
"DeepSpeed inclusion filter string: ",
|
||||
str,
|
||||
lambda x: str(x),
|
||||
)
|
||||
|
||||
fsdp_config = {}
|
||||
@ -286,11 +227,13 @@ def get_cluster_input():
|
||||
if use_fsdp:
|
||||
distributed_type = DistributedType.FSDP
|
||||
if distributed_type == DistributedType.FSDP:
|
||||
sharding_strategy_query = "What should be your sharding strategy?"
|
||||
fsdp_config["fsdp_sharding_strategy"] = _ask_options(
|
||||
sharding_strategy_query = "What should be your sharding strategy ("
|
||||
for i, strategy in enumerate(FSDP_SHARDING_STRATEGY):
|
||||
sharding_strategy_query += f"[{i+1}] {strategy}, "
|
||||
sharding_strategy_query = sharding_strategy_query[:-2] + ")? [1]: "
|
||||
fsdp_config["fsdp_sharding_strategy"] = _ask_field(
|
||||
sharding_strategy_query,
|
||||
FSDP_SHARDING_STRATEGY,
|
||||
lambda x: int(x) + 1,
|
||||
lambda x: int(x),
|
||||
default=1,
|
||||
)
|
||||
fsdp_config["fsdp_offload_params"] = _ask_field(
|
||||
@ -299,35 +242,43 @@ def get_cluster_input():
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
fsdp_wrap_query = "What should be your auto wrap policy?"
|
||||
fsdp_config["fsdp_auto_wrap_policy"] = _ask_options(
|
||||
fsdp_wrap_query = "What should be your auto wrap policy ("
|
||||
for i, wrap_policy in enumerate(FSDP_AUTO_WRAP_POLICY):
|
||||
fsdp_wrap_query += f"[{i}] {wrap_policy}, "
|
||||
fsdp_wrap_query = fsdp_wrap_query[:-2] + ")? [0]: "
|
||||
fsdp_config["fsdp_auto_wrap_policy"] = _ask_field(
|
||||
fsdp_wrap_query,
|
||||
FSDP_AUTO_WRAP_POLICY,
|
||||
lambda x: FSDP_AUTO_WRAP_POLICY[int(x)],
|
||||
default="TRANSFORMER_BASED_WRAP",
|
||||
)
|
||||
if fsdp_config["fsdp_auto_wrap_policy"] == FSDP_AUTO_WRAP_POLICY[0]:
|
||||
fsdp_config["fsdp_transformer_layer_cls_to_wrap"] = _ask_field(
|
||||
"Specify the comma-separated list of transformer layer class names (case-sensitive) to wrap ,e.g, :"
|
||||
"`BertLayer`, `GPTJBlock`, `T5Block`, `BertLayer,BertEmbeddings,BertSelfOutput` ...? : ",
|
||||
str,
|
||||
"What is the transformer layer class name (case-sensitive) to wrap ,e.g, `BertLayer`, `GPTJBlock`, `T5Block` ...? : ",
|
||||
lambda x: str(x),
|
||||
)
|
||||
elif fsdp_config["fsdp_auto_wrap_policy"] == FSDP_AUTO_WRAP_POLICY[1]:
|
||||
fsdp_config["fsdp_min_num_params"] = _ask_field(
|
||||
"What should be your FSDP's minimum number of parameters for Default Auto Wrapping Policy? [1e8]: ",
|
||||
int,
|
||||
lambda x: int(x),
|
||||
default=1e8,
|
||||
)
|
||||
fsdp_backward_prefetch_query = "What should be your FSDP's backward prefetch policy?"
|
||||
fsdp_config["fsdp_backward_prefetch_policy"] = _ask_options(
|
||||
fsdp_backward_prefetch_query = "What should be your FSDP's backward prefetch policy ("
|
||||
for i, backward_prefetch_policy in enumerate(FSDP_BACKWARD_PREFETCH):
|
||||
fsdp_backward_prefetch_query += f"[{i}] {backward_prefetch_policy}, "
|
||||
fsdp_backward_prefetch_query = fsdp_backward_prefetch_query[:-2] + ")? [0]: "
|
||||
fsdp_config["fsdp_backward_prefetch_policy"] = _ask_field(
|
||||
fsdp_backward_prefetch_query,
|
||||
FSDP_BACKWARD_PREFETCH,
|
||||
lambda x: FSDP_BACKWARD_PREFETCH[int(x)],
|
||||
default="BACKWARD_PRE",
|
||||
)
|
||||
fsdp_state_dict_type_query = "What should be your FSDP's state dict type?"
|
||||
fsdp_config["fsdp_state_dict_type"] = _ask_options(
|
||||
fsdp_state_dict_type_query = "What should be your FSDP's state dict type ("
|
||||
for i, state_dict_type in enumerate(FSDP_STATE_DICT_TYPE):
|
||||
fsdp_state_dict_type_query += f"[{i}] {state_dict_type}, "
|
||||
fsdp_state_dict_type_query = fsdp_state_dict_type_query[:-2] + ")? [0]: "
|
||||
fsdp_config["fsdp_state_dict_type"] = _ask_field(
|
||||
fsdp_state_dict_type_query,
|
||||
FSDP_STATE_DICT_TYPE,
|
||||
lambda x: FSDP_STATE_DICT_TYPE[int(x)],
|
||||
default="FULL_STATE_DICT",
|
||||
)
|
||||
|
||||
megatron_lm_config = {}
|
||||
@ -344,7 +295,7 @@ def get_cluster_input():
|
||||
prefix = "megatron_lm_"
|
||||
megatron_lm_config[prefix + "tp_degree"] = _ask_field(
|
||||
"What is the Tensor Parallelism degree/size? [1]:",
|
||||
int,
|
||||
lambda x: int(x),
|
||||
default=1,
|
||||
error_message="Please enter an integer.",
|
||||
)
|
||||
@ -358,14 +309,14 @@ def get_cluster_input():
|
||||
|
||||
megatron_lm_config[prefix + "pp_degree"] = _ask_field(
|
||||
"What is the Pipeline Parallelism degree/size? [1]:",
|
||||
int,
|
||||
lambda x: int(x),
|
||||
default=1,
|
||||
error_message="Please enter an integer.",
|
||||
)
|
||||
if megatron_lm_config[prefix + "pp_degree"] > 1:
|
||||
megatron_lm_config[prefix + "num_micro_batches"] = _ask_field(
|
||||
"What is the number of micro-batches? [1]:",
|
||||
int,
|
||||
lambda x: int(x),
|
||||
default=1,
|
||||
error_message="Please enter an integer.",
|
||||
)
|
||||
@ -387,61 +338,22 @@ def get_cluster_input():
|
||||
|
||||
megatron_lm_config[prefix + "gradient_clipping"] = _ask_field(
|
||||
"What is the gradient clipping value based on global L2 Norm (0 to disable)? [1.0]: ",
|
||||
float,
|
||||
lambda x: float(x),
|
||||
default=1.0,
|
||||
)
|
||||
# TPU specific defaults
|
||||
tpu_commands = None
|
||||
tpu_command_file = None
|
||||
tpu_downcast_bf16 = "no"
|
||||
tpu_env = []
|
||||
tpu_name = None
|
||||
tpu_vm = None
|
||||
tpu_zone = None
|
||||
tpu_use_sudo = False
|
||||
tpu_use_cluster = False
|
||||
|
||||
if distributed_type in [DistributedType.MULTI_CPU, DistributedType.MULTI_GPU, DistributedType.TPU]:
|
||||
machine_type = str(distributed_type).split(".")[1].replace("MULTI_", "")
|
||||
if machine_type == "TPU":
|
||||
machine_type += " cores"
|
||||
else:
|
||||
machine_type += "(s)"
|
||||
num_processes = _ask_field(
|
||||
f"How many {machine_type} should be used for distributed training? [1]:",
|
||||
int,
|
||||
default=1,
|
||||
error_message="Please enter an integer.",
|
||||
)
|
||||
elif distributed_type in [DistributedType.FSDP, DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]:
|
||||
num_processes = _ask_field(
|
||||
"How many GPU(s) should be used for distributed training? [1]:",
|
||||
int,
|
||||
default=1,
|
||||
error_message="Please enter an integer.",
|
||||
)
|
||||
else:
|
||||
num_processes = 1
|
||||
|
||||
if distributed_type in [DistributedType.MULTI_GPU, DistributedType.NO] and not use_cpu and not use_mps:
|
||||
gpu_ids = _ask_field(
|
||||
"What GPU(s) (by id) should be used for training on this machine as a comma-seperated list? [all]:",
|
||||
default="all",
|
||||
)
|
||||
|
||||
if distributed_type == DistributedType.TPU:
|
||||
mixed_precision = "no"
|
||||
main_training_function = _ask_field(
|
||||
"What is the name of the function in your script that should be launched in all parallel scripts? [main]: ",
|
||||
default="main",
|
||||
)
|
||||
tpu_use_cluster = _ask_field(
|
||||
use_cluster = _ask_field(
|
||||
"Are you using a TPU cluster? [yes/NO]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
if tpu_use_cluster:
|
||||
if use_cluster:
|
||||
tpu_name = _ask_field(
|
||||
"What is the name of your TPU cluster? ",
|
||||
default=None,
|
||||
@ -452,11 +364,6 @@ def get_cluster_input():
|
||||
default=None,
|
||||
error_message="Please enter the zone of your TPU cluster.",
|
||||
)
|
||||
tpu_use_sudo = _ask_field(
|
||||
"To run a python script in a TPU pod, should `sudo` be used? [yes/NO]: ",
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
run_commands = _ask_field(
|
||||
"Do you have code you wish to run on startup in each pod? [yes/NO]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
@ -471,18 +378,18 @@ def get_cluster_input():
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
if use_command_file:
|
||||
tpu_command_file = _ask_field(
|
||||
command_file = _ask_field(
|
||||
"What is the path to your bash script? ",
|
||||
default=None,
|
||||
error_message="Please enter the path to your bash script.",
|
||||
)
|
||||
tpu_command_file = os.path.abspath(tpu_command_file)
|
||||
command_file = os.path.abspath(command_file)
|
||||
else:
|
||||
print("Please enter each command seperately you wish to run on startup in each pod.")
|
||||
tpu_commands = []
|
||||
commands = []
|
||||
another_command = True
|
||||
while another_command:
|
||||
tpu_commands.append(
|
||||
commands.append(
|
||||
_ask_field(
|
||||
"Please enter a single command to be ran ",
|
||||
default=None,
|
||||
@ -495,33 +402,53 @@ def get_cluster_input():
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
tpu_vm = _ask_field(
|
||||
"If not using an instance group, what are the names of the Compute VM instances to be used, seperated by a comma: ",
|
||||
default="",
|
||||
).split(",")
|
||||
tpu_env = _ask_field(
|
||||
"What environment variables do you wish to set in each pod, seperated by a comma: ",
|
||||
default="",
|
||||
).split(",")
|
||||
|
||||
else:
|
||||
main_training_function = "main"
|
||||
if distributed_type == DistributedType.DEEPSPEED and use_deepspeed_config:
|
||||
mixed_precision = None
|
||||
else:
|
||||
mixed_precision = _ask_options(
|
||||
"Do you wish to use FP16 or BF16 (mixed precision)?",
|
||||
["no", "fp16", "bf16", "fp8"],
|
||||
_convert_mixed_precision,
|
||||
)
|
||||
|
||||
if use_dynamo and mixed_precision == "no" and not use_cpu:
|
||||
print(
|
||||
"Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts."
|
||||
if distributed_type in [DistributedType.MULTI_CPU, DistributedType.MULTI_GPU, DistributedType.TPU]:
|
||||
machine_type = str(distributed_type).split(".")[1].replace("MULTI_", "")
|
||||
if machine_type == "TPU":
|
||||
machine_type += " cores"
|
||||
else:
|
||||
machine_type += "(s)"
|
||||
num_processes = _ask_field(
|
||||
f"How many {machine_type} should be used for distributed training? [1]:",
|
||||
lambda x: int(x),
|
||||
default=1,
|
||||
error_message="Please enter an integer.",
|
||||
)
|
||||
elif distributed_type in [DistributedType.FSDP, DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]:
|
||||
num_processes = _ask_field(
|
||||
"How many GPU(s) should be used for distributed training? [1]:",
|
||||
lambda x: int(x),
|
||||
default=1,
|
||||
error_message="Please enter an integer.",
|
||||
)
|
||||
else:
|
||||
num_processes = 1
|
||||
|
||||
if distributed_type in [DistributedType.MULTI_GPU, DistributedType.NO] and not use_cpu:
|
||||
gpu_ids = _ask_field(
|
||||
"What GPU(s) (by id) should be used for training on this machine as a comma-seperated list? [all]:",
|
||||
default="all",
|
||||
)
|
||||
|
||||
if distributed_type != DistributedType.TPU:
|
||||
if distributed_type == DistributedType.DEEPSPEED and use_deepspeed_config:
|
||||
mixed_precision = "no"
|
||||
else:
|
||||
mixed_precision = _ask_field(
|
||||
"Do you wish to use FP16 or BF16 (mixed precision)? [NO/fp16/bf16]: ",
|
||||
lambda x: str(x).lower(),
|
||||
default="no",
|
||||
)
|
||||
else:
|
||||
mixed_precision = "no"
|
||||
|
||||
downcast_bf16 = "no"
|
||||
if distributed_type == DistributedType.TPU and mixed_precision == "bf16":
|
||||
tpu_downcast_bf16 = _ask_field(
|
||||
downcast_bf16 = _ask_field(
|
||||
"Should `torch.float` be cast as `bfloat16` and `torch.double` remain `float32` on TPUs?", default="no"
|
||||
)
|
||||
|
||||
@ -531,7 +458,7 @@ def get_cluster_input():
|
||||
num_processes=num_processes,
|
||||
gpu_ids=gpu_ids,
|
||||
mixed_precision=mixed_precision,
|
||||
downcast_bf16=tpu_downcast_bf16,
|
||||
downcast_bf16=downcast_bf16,
|
||||
machine_rank=machine_rank,
|
||||
num_machines=num_machines,
|
||||
main_process_ip=main_process_ip,
|
||||
@ -540,17 +467,11 @@ def get_cluster_input():
|
||||
deepspeed_config=deepspeed_config,
|
||||
fsdp_config=fsdp_config,
|
||||
megatron_lm_config=megatron_lm_config,
|
||||
ipex_config=ipex_config,
|
||||
use_cpu=use_cpu,
|
||||
rdzv_backend=rdzv_backend,
|
||||
same_network=same_network,
|
||||
commands=tpu_commands,
|
||||
command_file=tpu_command_file,
|
||||
tpu_env=tpu_env,
|
||||
tpu_name=tpu_name,
|
||||
tpu_vm=tpu_vm,
|
||||
tpu_zone=tpu_zone,
|
||||
tpu_use_sudo=tpu_use_sudo,
|
||||
tpu_use_cluster=tpu_use_cluster,
|
||||
dynamo_config=dynamo_config,
|
||||
commands=commands,
|
||||
command_file=command_file,
|
||||
)
|
||||
|
||||
@ -1,89 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import argparse
|
||||
import os
|
||||
|
||||
from accelerate.utils import ComputeEnvironment
|
||||
|
||||
from .cluster import get_cluster_input
|
||||
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
|
||||
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
|
||||
from .sagemaker import get_sagemaker_input
|
||||
|
||||
|
||||
description = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
|
||||
|
||||
|
||||
def get_user_input():
|
||||
compute_environment = _ask_options(
|
||||
"In which compute environment are you running?",
|
||||
["This machine", "AWS (Amazon SageMaker)"],
|
||||
_convert_compute_environment,
|
||||
)
|
||||
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
|
||||
config = get_sagemaker_input()
|
||||
else:
|
||||
config = get_cluster_input()
|
||||
return config
|
||||
|
||||
|
||||
def config_command_parser(subparsers=None):
|
||||
if subparsers is not None:
|
||||
parser = subparsers.add_parser("config", description=description)
|
||||
else:
|
||||
parser = argparse.ArgumentParser("Accelerate config command", description=description)
|
||||
|
||||
parser.add_argument(
|
||||
"--config_file",
|
||||
default=None,
|
||||
help=(
|
||||
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
|
||||
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
|
||||
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
|
||||
"with 'huggingface'."
|
||||
),
|
||||
)
|
||||
|
||||
if subparsers is not None:
|
||||
parser.set_defaults(func=config_command)
|
||||
return parser
|
||||
|
||||
|
||||
def config_command(args):
|
||||
config = get_user_input()
|
||||
if args.config_file is not None:
|
||||
config_file = args.config_file
|
||||
else:
|
||||
if not os.path.isdir(cache_dir):
|
||||
os.makedirs(cache_dir)
|
||||
config_file = default_yaml_config_file
|
||||
|
||||
if config_file.endswith(".json"):
|
||||
config.to_json_file(config_file)
|
||||
else:
|
||||
config.to_yaml_file(config_file)
|
||||
print(f"accelerate configuration saved at {config_file}")
|
||||
|
||||
|
||||
def main():
|
||||
parser = config_command_parser()
|
||||
args = parser.parse_args()
|
||||
config_command(args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -41,16 +41,8 @@ else:
|
||||
|
||||
|
||||
def load_config_from_file(config_file):
|
||||
if config_file is not None:
|
||||
if not os.path.isfile(config_file):
|
||||
raise FileNotFoundError(
|
||||
f"The passed configuration file `{config_file}` does not exist. "
|
||||
"Please pass an existing file to `accelerate launch`, or use the the default one "
|
||||
"created through `accelerate config` and run `accelerate launch` "
|
||||
"without the `--config_file` argument."
|
||||
)
|
||||
else:
|
||||
config_file = default_config_file
|
||||
config_file_exists = config_file is not None and os.path.isfile(config_file)
|
||||
config_file = config_file if config_file_exists else default_config_file
|
||||
with open(config_file, "r", encoding="utf-8") as f:
|
||||
if config_file.endswith(".json"):
|
||||
if (
|
||||
@ -85,9 +77,6 @@ class BaseConfig:
|
||||
for key, value in result.items():
|
||||
if isinstance(value, Enum):
|
||||
result[key] = value.value
|
||||
if isinstance(value, dict) and not bool(value):
|
||||
result[key] = None
|
||||
result = {k: v for k, v in result.items() if v is not None}
|
||||
return result
|
||||
|
||||
@classmethod
|
||||
@ -98,12 +87,9 @@ class BaseConfig:
|
||||
if "compute_environment" not in config_dict:
|
||||
config_dict["compute_environment"] = ComputeEnvironment.LOCAL_MACHINE
|
||||
if "mixed_precision" not in config_dict:
|
||||
config_dict["mixed_precision"] = "fp16" if ("fp16" in config_dict and config_dict["fp16"]) else None
|
||||
config_dict["mixed_precision"] = "fp16" if ("fp16" in config_dict and config_dict["fp16"]) else "no"
|
||||
if "fp16" in config_dict: # Convert the config to the new format.
|
||||
del config_dict["fp16"]
|
||||
if "dynamo_backend" in config_dict: # Convert the config to the new format.
|
||||
dynamo_backend = config_dict.pop("dynamo_backend")
|
||||
config_dict["dynamo_config"] = {} if dynamo_backend == "NO" else {"dynamo_backend": dynamo_backend}
|
||||
if "use_cpu" not in config_dict:
|
||||
config_dict["use_cpu"] = False
|
||||
return cls(**config_dict)
|
||||
@ -122,14 +108,12 @@ class BaseConfig:
|
||||
config_dict["compute_environment"] = ComputeEnvironment.LOCAL_MACHINE
|
||||
|
||||
if "mixed_precision" not in config_dict:
|
||||
config_dict["mixed_precision"] = "fp16" if ("fp16" in config_dict and config_dict["fp16"]) else None
|
||||
config_dict["mixed_precision"] = "fp16" if ("fp16" in config_dict and config_dict["fp16"]) else "no"
|
||||
if "fp16" in config_dict: # Convert the config to the new format.
|
||||
del config_dict["fp16"]
|
||||
if "dynamo_backend" in config_dict: # Convert the config to the new format.
|
||||
dynamo_backend = config_dict.pop("dynamo_backend")
|
||||
config_dict["dynamo_config"] = {} if dynamo_backend == "NO" else {"dynamo_backend": dynamo_backend}
|
||||
if "use_cpu" not in config_dict:
|
||||
config_dict["use_cpu"] = False
|
||||
|
||||
return cls(**config_dict)
|
||||
|
||||
def to_yaml_file(self, yaml_file):
|
||||
@ -144,8 +128,6 @@ class BaseConfig:
|
||||
self.distributed_type = SageMakerDistributedType(self.distributed_type)
|
||||
else:
|
||||
self.distributed_type = DistributedType(self.distributed_type)
|
||||
if self.dynamo_config is None:
|
||||
self.dynamo_config = {}
|
||||
|
||||
|
||||
@dataclass
|
||||
@ -166,23 +148,14 @@ class ClusterConfig(BaseConfig):
|
||||
fsdp_config: dict = None
|
||||
# args for megatron_lm
|
||||
megatron_lm_config: dict = None
|
||||
# args for ipex
|
||||
ipex_config: dict = None
|
||||
# args for TPU
|
||||
downcast_bf16: bool = False
|
||||
|
||||
# args for TPU pods
|
||||
tpu_name: str = None
|
||||
tpu_zone: str = None
|
||||
tpu_use_cluster: bool = False
|
||||
tpu_use_sudo: bool = False
|
||||
command_file: str = None
|
||||
commands: List[str] = None
|
||||
tpu_vm: List[str] = None
|
||||
tpu_env: List[str] = None
|
||||
|
||||
# args for dynamo
|
||||
dynamo_config: dict = None
|
||||
|
||||
def __post_init__(self):
|
||||
if self.deepspeed_config is None:
|
||||
@ -191,8 +164,6 @@ class ClusterConfig(BaseConfig):
|
||||
self.fsdp_config = {}
|
||||
if self.megatron_lm_config is None:
|
||||
self.megatron_lm_config = {}
|
||||
if self.ipex_config is None:
|
||||
self.ipex_config = {}
|
||||
return super().__post_init__()
|
||||
|
||||
|
||||
@ -200,7 +171,7 @@ class ClusterConfig(BaseConfig):
|
||||
class SageMakerConfig(BaseConfig):
|
||||
ec2_instance_type: str
|
||||
iam_role_name: str
|
||||
image_uri: Optional[str] = None
|
||||
image_uri: str
|
||||
profile: Optional[str] = None
|
||||
region: str = "us-east-1"
|
||||
num_machines: int = 1
|
||||
@ -211,5 +182,3 @@ class SageMakerConfig(BaseConfig):
|
||||
py_version: str = SAGEMAKER_PYTHON_VERSION
|
||||
sagemaker_inputs_file: str = None
|
||||
sagemaker_metrics_file: str = None
|
||||
additional_args: dict = None
|
||||
dynamo_config: dict = None
|
||||
|
||||
@ -14,30 +14,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import argparse
|
||||
|
||||
from ...utils.dataclasses import (
|
||||
ComputeEnvironment,
|
||||
DistributedType,
|
||||
DynamoBackend,
|
||||
PrecisionType,
|
||||
SageMakerDistributedType,
|
||||
)
|
||||
from ..menu import BulletMenu
|
||||
|
||||
|
||||
DYNAMO_BACKENDS = [
|
||||
"EAGER",
|
||||
"AOT_EAGER",
|
||||
"INDUCTOR",
|
||||
"NVFUSER",
|
||||
"AOT_NVFUSER",
|
||||
"AOT_CUDAGRAPHS",
|
||||
"OFI",
|
||||
"FX2TRT",
|
||||
"ONNXRT",
|
||||
"IPEX",
|
||||
]
|
||||
from ...utils.dataclasses import ComputeEnvironment, DistributedType, SageMakerDistributedType
|
||||
|
||||
|
||||
def _ask_field(input_text, convert_value=None, default=None, error_message=None):
|
||||
@ -48,17 +25,11 @@ def _ask_field(input_text, convert_value=None, default=None, error_message=None)
|
||||
if default is not None and len(result) == 0:
|
||||
return default
|
||||
return convert_value(result) if convert_value is not None else result
|
||||
except Exception:
|
||||
except:
|
||||
if error_message is not None:
|
||||
print(error_message)
|
||||
|
||||
|
||||
def _ask_options(input_text, options=[], convert_value=None, default=0):
|
||||
menu = BulletMenu(input_text, options)
|
||||
result = menu.run(default_choice=default)
|
||||
return convert_value(result) if convert_value is not None else result
|
||||
|
||||
|
||||
def _convert_compute_environment(value):
|
||||
value = int(value)
|
||||
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value])
|
||||
@ -66,17 +37,7 @@ def _convert_compute_environment(value):
|
||||
|
||||
def _convert_distributed_mode(value):
|
||||
value = int(value)
|
||||
return DistributedType(["NO", "MULTI_CPU", "MULTI_GPU", "TPU"][value])
|
||||
|
||||
|
||||
def _convert_dynamo_backend(value):
|
||||
value = int(value)
|
||||
return DynamoBackend(DYNAMO_BACKENDS[value]).value
|
||||
|
||||
|
||||
def _convert_mixed_precision(value):
|
||||
value = int(value)
|
||||
return PrecisionType(["no", "fp16", "bf16", "fp8"][value])
|
||||
return DistributedType(["NO", "MULTI_CPU", "MULTI_GPU", "TPU", "MPS"][value])
|
||||
|
||||
|
||||
def _convert_sagemaker_distributed_mode(value):
|
||||
@ -86,14 +47,3 @@ def _convert_sagemaker_distributed_mode(value):
|
||||
|
||||
def _convert_yes_no_to_bool(value):
|
||||
return {"yes": True, "no": False}[value.lower()]
|
||||
|
||||
|
||||
class SubcommandHelpFormatter(argparse.RawDescriptionHelpFormatter):
|
||||
"""
|
||||
A custom formatter that will remove the usage line from the help message for subcommands.
|
||||
"""
|
||||
|
||||
def _format_usage(self, usage, actions, groups, prefix):
|
||||
usage = super()._format_usage(usage, actions, groups, prefix)
|
||||
usage = usage.replace("<command> [<args>] ", "")
|
||||
return usage
|
||||
|
||||
@ -1,105 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
|
||||
from .config_args import ClusterConfig, default_json_config_file
|
||||
from .config_utils import SubcommandHelpFormatter
|
||||
|
||||
|
||||
description = "Create a default config file for Accelerate with only a few flags set."
|
||||
|
||||
|
||||
def write_basic_config(mixed_precision="no", save_location: str = default_json_config_file, dynamo_backend="no"):
|
||||
"""
|
||||
Creates and saves a basic cluster config to be used on a local machine with potentially multiple GPUs. Will also
|
||||
set CPU if it is a CPU-only machine.
|
||||
|
||||
Args:
|
||||
mixed_precision (`str`, *optional*, defaults to "no"):
|
||||
Mixed Precision to use. Should be one of "no", "fp16", or "bf16"
|
||||
save_location (`str`, *optional*, defaults to `default_json_config_file`):
|
||||
Optional custom save location. Should be passed to `--config_file` when using `accelerate launch`. Default
|
||||
location is inside the huggingface cache folder (`~/.cache/huggingface`) but can be overriden by setting
|
||||
the `HF_HOME` environmental variable, followed by `accelerate/default_config.yaml`.
|
||||
"""
|
||||
path = Path(save_location)
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
if path.exists():
|
||||
print(
|
||||
f"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`."
|
||||
)
|
||||
return False
|
||||
mixed_precision = mixed_precision.lower()
|
||||
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
|
||||
raise ValueError(
|
||||
f"`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}"
|
||||
)
|
||||
config = {
|
||||
"compute_environment": "LOCAL_MACHINE",
|
||||
"mixed_precision": mixed_precision,
|
||||
}
|
||||
if torch.cuda.is_available():
|
||||
num_gpus = torch.cuda.device_count()
|
||||
config["num_processes"] = num_gpus
|
||||
config["use_cpu"] = False
|
||||
if num_gpus > 1:
|
||||
config["distributed_type"] = "MULTI_GPU"
|
||||
else:
|
||||
config["distributed_type"] = "NO"
|
||||
else:
|
||||
num_gpus = 0
|
||||
config["use_cpu"] = True
|
||||
config["num_processes"] = 1
|
||||
config["distributed_type"] = "NO"
|
||||
config = ClusterConfig(**config)
|
||||
config.to_json_file(path)
|
||||
return path
|
||||
|
||||
|
||||
def default_command_parser(parser, parents):
|
||||
parser = parser.add_parser("default", parents=parents, help=description, formatter_class=SubcommandHelpFormatter)
|
||||
parser.add_argument(
|
||||
"--config_file",
|
||||
default=default_json_config_file,
|
||||
help=(
|
||||
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
|
||||
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
|
||||
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
|
||||
"with 'huggingface'."
|
||||
),
|
||||
dest="save_location",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
choices=["no", "fp16", "bf16"],
|
||||
type=str,
|
||||
help="Whether or not to use mixed precision training. "
|
||||
"Choose between FP16 and BF16 (bfloat16) training. "
|
||||
"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.",
|
||||
default="no",
|
||||
)
|
||||
parser.set_defaults(func=default_config_command)
|
||||
return parser
|
||||
|
||||
|
||||
def default_config_command(args):
|
||||
config_file = write_basic_config(args.mixed_precision, args.save_location)
|
||||
if config_file:
|
||||
print(f"accelerate configuration saved at {config_file}")
|
||||
@ -16,19 +16,11 @@
|
||||
import json
|
||||
import os
|
||||
|
||||
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
|
||||
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES
|
||||
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
|
||||
from ...utils.imports import is_boto3_available
|
||||
from .config_args import SageMakerConfig
|
||||
from .config_utils import (
|
||||
DYNAMO_BACKENDS,
|
||||
_ask_field,
|
||||
_ask_options,
|
||||
_convert_dynamo_backend,
|
||||
_convert_mixed_precision,
|
||||
_convert_sagemaker_distributed_mode,
|
||||
_convert_yes_no_to_bool,
|
||||
)
|
||||
from .config_utils import _ask_field, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool
|
||||
|
||||
|
||||
if is_boto3_available():
|
||||
@ -95,10 +87,9 @@ def _get_iam_role_arn(role_name):
|
||||
|
||||
|
||||
def get_sagemaker_input():
|
||||
credentials_configuration = _ask_options(
|
||||
"How do you want to authorize?",
|
||||
["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "],
|
||||
int,
|
||||
credentials_configuration = _ask_field(
|
||||
"How do you want to authorize? ([0] AWS Profile, [1] Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)): ",
|
||||
lambda x: int(x),
|
||||
)
|
||||
aws_profile = None
|
||||
if credentials_configuration == 0:
|
||||
@ -118,10 +109,9 @@ def get_sagemaker_input():
|
||||
aws_region = _ask_field("Enter your AWS Region: [us-east-1]", default="us-east-1")
|
||||
os.environ["AWS_DEFAULT_REGION"] = aws_region
|
||||
|
||||
role_management = _ask_options(
|
||||
"Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?",
|
||||
["Provide IAM Role name", "Create new IAM role using credentials"],
|
||||
int,
|
||||
role_management = _ask_field(
|
||||
"Do you already have an IAM Role for executing Amazon SageMaker Training Jobs? ([0] provide IAM Role name, [1] create new IAM role using credentials: ",
|
||||
lambda x: int(x),
|
||||
)
|
||||
if role_management == 0:
|
||||
iam_role_name = _ask_field("Enter your IAM role name: ")
|
||||
@ -166,86 +156,45 @@ def get_sagemaker_input():
|
||||
lambda x: str(x).lower(),
|
||||
)
|
||||
|
||||
distributed_type = _ask_options(
|
||||
"What is the distributed mode?",
|
||||
["No distributed training", "Data parallelism"],
|
||||
distributed_type = _ask_field(
|
||||
"What is the distributed mode? ([0] No distributed training, [1] data parallelism): ",
|
||||
_convert_sagemaker_distributed_mode,
|
||||
error_message="Please enter 0 or 1",
|
||||
)
|
||||
dynamo_config = {}
|
||||
use_dynamo = _ask_field(
|
||||
"Do you wish to optimize your script with torch dynamo?[yes/NO]:",
|
||||
_convert_yes_no_to_bool,
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
if use_dynamo:
|
||||
prefix = "dynamo_"
|
||||
dynamo_config[prefix + "backend"] = _ask_options(
|
||||
"Which dynamo backend would you like to use?",
|
||||
[x.lower() for x in DYNAMO_BACKENDS],
|
||||
_convert_dynamo_backend,
|
||||
default=2,
|
||||
)
|
||||
use_custom_options = _ask_field(
|
||||
"Do you want to customize the defaults sent to torch.compile? [yes/NO]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
|
||||
if use_custom_options:
|
||||
dynamo_config[prefix + "mode"] = _ask_options(
|
||||
"Which mode do you want to use?",
|
||||
TORCH_DYNAMO_MODES,
|
||||
lambda x: TORCH_DYNAMO_MODES[int(x)],
|
||||
default="default",
|
||||
)
|
||||
dynamo_config[prefix + "use_fullgraph"] = _ask_field(
|
||||
"Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
dynamo_config[prefix + "use_dynamic"] = _ask_field(
|
||||
"Do you want to enable dynamic shape tracing? [yes/NO]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
ec2_instance_query = "Which EC2 instance type you want to use for your training?"
|
||||
ec2_instance_query = "Which EC2 instance type you want to use for your training "
|
||||
if distributed_type != SageMakerDistributedType.NO:
|
||||
ec2_instance_type = _ask_options(
|
||||
ec2_instance_query, SAGEMAKER_PARALLEL_EC2_INSTANCES, lambda x: SAGEMAKER_PARALLEL_EC2_INSTANCES[int(x)]
|
||||
)
|
||||
ec2_instance_query += "("
|
||||
for i, instance_type in enumerate(SAGEMAKER_PARALLEL_EC2_INSTANCES):
|
||||
ec2_instance_query += f"[{i}] {instance_type}, "
|
||||
ec2_instance_query = ec2_instance_query[:-2] + ")? [0]: "
|
||||
ec2_instance_type = _ask_field(ec2_instance_query, lambda x: SAGEMAKER_PARALLEL_EC2_INSTANCES[int(x)])
|
||||
else:
|
||||
ec2_instance_query += "? [ml.p3.2xlarge]:"
|
||||
ec2_instance_type = _ask_field(ec2_instance_query, lambda x: str(x).lower(), default="ml.p3.2xlarge")
|
||||
|
||||
num_machines = 1
|
||||
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
|
||||
if (
|
||||
distributed_type == SageMakerDistributedType.DATA_PARALLEL
|
||||
or distributed_type == SageMakerDistributedType.MODEL_PARALLEL
|
||||
):
|
||||
num_machines = _ask_field(
|
||||
"How many machines do you want use? [1]: ",
|
||||
int,
|
||||
lambda x: int(x),
|
||||
default=1,
|
||||
)
|
||||
|
||||
mixed_precision = _ask_options(
|
||||
"Do you wish to use FP16 or BF16 (mixed precision)?",
|
||||
["no", "fp16", "bf16", "fp8"],
|
||||
_convert_mixed_precision,
|
||||
mixed_precision = _ask_field(
|
||||
"Do you wish to use FP16 or BF16 (mixed precision)? [No/FP16/BF16]: ",
|
||||
lambda x: str(x),
|
||||
default="No",
|
||||
)
|
||||
|
||||
if use_dynamo and mixed_precision == "no":
|
||||
print(
|
||||
"Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts."
|
||||
)
|
||||
|
||||
return SageMakerConfig(
|
||||
image_uri=docker_image,
|
||||
compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER,
|
||||
distributed_type=distributed_type,
|
||||
use_cpu=False,
|
||||
dynamo_config=dynamo_config,
|
||||
ec2_instance_type=ec2_instance_type,
|
||||
profile=aws_profile,
|
||||
region=aws_region,
|
||||
|
||||
@ -1,63 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from .config_args import default_config_file, load_config_from_file
|
||||
from .config_utils import SubcommandHelpFormatter
|
||||
|
||||
|
||||
description = "Update an existing config file with the latest defaults while maintaining the old configuration."
|
||||
|
||||
|
||||
def update_config(args):
|
||||
"""
|
||||
Update an existing config file with the latest defaults while maintaining the old configuration.
|
||||
"""
|
||||
config_file = args.config_file
|
||||
if config_file is None and Path(default_config_file).exists():
|
||||
config_file = default_config_file
|
||||
elif not Path(config_file).exists():
|
||||
raise ValueError(f"The passed config file located at {config_file} doesn't exist.")
|
||||
config = load_config_from_file(config_file)
|
||||
|
||||
if config_file.endswith(".json"):
|
||||
config.to_json_file(config_file)
|
||||
else:
|
||||
config.to_yaml_file(config_file)
|
||||
return config_file
|
||||
|
||||
|
||||
def update_command_parser(parser, parents):
|
||||
parser = parser.add_parser("update", parents=parents, help=description, formatter_class=SubcommandHelpFormatter)
|
||||
parser.add_argument(
|
||||
"--config_file",
|
||||
default=None,
|
||||
help=(
|
||||
"The path to the config file to update. Will default to a file named default_config.yaml in the cache "
|
||||
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
|
||||
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
|
||||
"with 'huggingface'."
|
||||
),
|
||||
)
|
||||
|
||||
parser.set_defaults(func=update_config_command)
|
||||
return parser
|
||||
|
||||
|
||||
def update_config_command(args):
|
||||
config_file = update_config(args)
|
||||
print(f"Sucessfully updated the configuration file at {config_file}.")
|
||||
@ -20,18 +20,21 @@ import logging
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import warnings
|
||||
from ast import literal_eval
|
||||
from pathlib import Path
|
||||
from typing import Dict, List
|
||||
|
||||
import psutil
|
||||
import torch
|
||||
|
||||
import psutil
|
||||
from accelerate.commands.config import default_config_file, load_config_from_file
|
||||
from accelerate.commands.config.config_args import SageMakerConfig
|
||||
from accelerate.commands.config.config_utils import DYNAMO_BACKENDS
|
||||
from accelerate.state import get_int_from_env
|
||||
from accelerate.utils import (
|
||||
ComputeEnvironment,
|
||||
DistributedType,
|
||||
PrecisionType,
|
||||
PrepareForLaunch,
|
||||
_filter_args,
|
||||
is_deepspeed_available,
|
||||
@ -39,13 +42,10 @@ from accelerate.utils import (
|
||||
is_sagemaker_available,
|
||||
is_torch_version,
|
||||
patch_environment,
|
||||
prepare_deepspeed_cmd_env,
|
||||
prepare_multi_gpu_env,
|
||||
prepare_sagemager_args_inputs,
|
||||
prepare_simple_launcher_cmd_env,
|
||||
prepare_tpu,
|
||||
)
|
||||
from accelerate.utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS, TORCH_DYNAMO_MODES
|
||||
from accelerate.utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS
|
||||
from accelerate.utils.dataclasses import SageMakerDistributedType
|
||||
from accelerate.utils.launch import env_var_path_add
|
||||
|
||||
|
||||
if is_rich_available():
|
||||
@ -61,6 +61,8 @@ logger = logging.getLogger(__name__)
|
||||
options_to_group = {
|
||||
"--multi-gpu": "Distributed GPUs",
|
||||
"--tpu": "TPU",
|
||||
"--mps": "MPS",
|
||||
"--use_mps_device": "MPS",
|
||||
"--use_deepspeed": "DeepSpeed Arguments",
|
||||
"--use_fsdp": "FSDP Arguments",
|
||||
"--use_megatron_lm": "Megatron-LM Arguments",
|
||||
@ -123,9 +125,9 @@ class _CustomHelpAction(argparse._HelpAction):
|
||||
|
||||
def launch_command_parser(subparsers=None):
|
||||
if subparsers is not None:
|
||||
parser = subparsers.add_parser("launch", add_help=False, allow_abbrev=False)
|
||||
parser = subparsers.add_parser("launch", add_help=False)
|
||||
else:
|
||||
parser = argparse.ArgumentParser("Accelerate launch command", add_help=False, allow_abbrev=False)
|
||||
parser = argparse.ArgumentParser("Accelerate launch command", add_help=False)
|
||||
|
||||
parser.register("action", "help", _CustomHelpAction)
|
||||
parser.add_argument("-h", "--help", action="help", help="Show this help message and exit.")
|
||||
@ -133,12 +135,6 @@ def launch_command_parser(subparsers=None):
|
||||
parser.add_argument(
|
||||
"--config_file", default=None, help="The config file to use for the default values in the launching script."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--quiet",
|
||||
"-q",
|
||||
action="store_true",
|
||||
help="Silence subprocess errors from the launch stack trace and only show the relevant tracebacks. (Only applicable to DeepSpeed and single-process configurations)",
|
||||
)
|
||||
# Hardware selection arguments
|
||||
hardware_args = parser.add_argument_group(
|
||||
"Hardware Selection Arguments", "Arguments for selecting the hardware to be used."
|
||||
@ -146,6 +142,12 @@ def launch_command_parser(subparsers=None):
|
||||
hardware_args.add_argument(
|
||||
"--cpu", default=False, action="store_true", help="Whether or not to force the training on the CPU."
|
||||
)
|
||||
hardware_args.add_argument(
|
||||
"--mps",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="Whether or not this should use MPS-enabled GPU device on MacOS machines.",
|
||||
)
|
||||
hardware_args.add_argument(
|
||||
"--multi_gpu",
|
||||
default=False,
|
||||
@ -155,6 +157,12 @@ def launch_command_parser(subparsers=None):
|
||||
hardware_args.add_argument(
|
||||
"--tpu", default=False, action="store_true", help="Whether or not this should launch a TPU training."
|
||||
)
|
||||
hardware_args.add_argument(
|
||||
"--use_mps_device",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="This argument is deprecated, use `--mps` instead.",
|
||||
)
|
||||
|
||||
# Resource selection arguments
|
||||
resource_args = parser.add_argument_group(
|
||||
@ -163,11 +171,17 @@ def launch_command_parser(subparsers=None):
|
||||
resource_args.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
choices=["no", "fp16", "bf16", "fp8"],
|
||||
choices=["no", "fp16", "bf16"],
|
||||
help="Whether or not to use mixed precision training. "
|
||||
"Choose between FP16 and BF16 (bfloat16) training. "
|
||||
"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.",
|
||||
)
|
||||
resource_args.add_argument(
|
||||
"--fp16",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="This argument is deprecated, use `--mixed_precision fp16` instead.",
|
||||
)
|
||||
resource_args.add_argument(
|
||||
"--num_processes", type=int, default=None, help="The total number of processes to be launched in parallel."
|
||||
)
|
||||
@ -181,34 +195,6 @@ def launch_command_parser(subparsers=None):
|
||||
help="The number of CPU threads per process. Can be tuned for optimal performance.",
|
||||
)
|
||||
|
||||
# Dynamo arguments
|
||||
resource_args.add_argument(
|
||||
"--dynamo_backend",
|
||||
type=str,
|
||||
choices=["no"] + [b.lower() for b in DYNAMO_BACKENDS],
|
||||
help="Choose a backend to optimize your training with dynamo, see more at "
|
||||
"https://github.com/pytorch/torchdynamo.",
|
||||
)
|
||||
resource_args.add_argument(
|
||||
"--dynamo_mode",
|
||||
type=str,
|
||||
default="default",
|
||||
choices=TORCH_DYNAMO_MODES,
|
||||
help="Choose a mode to optimize your training with dynamo.",
|
||||
)
|
||||
resource_args.add_argument(
|
||||
"--dynamo_use_fullgraph",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="Whether to use full graph mode for dynamo or it is ok to break model into several subgraphs",
|
||||
)
|
||||
resource_args.add_argument(
|
||||
"--dynamo_use_dynamic",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="Whether to enable dynamic shape tracing.",
|
||||
)
|
||||
|
||||
# Training Paradigm arguments
|
||||
paradigm_args = parser.add_argument_group(
|
||||
"Training Paradigm Arguments", "Arguments for selecting which training paradigm to be used."
|
||||
@ -257,19 +243,6 @@ def launch_command_parser(subparsers=None):
|
||||
default=None,
|
||||
help="The port to use to communicate with the machine of rank 0.",
|
||||
)
|
||||
distributed_args.add_argument(
|
||||
"-t",
|
||||
"--tee",
|
||||
default="0",
|
||||
type=str,
|
||||
help="Tee std streams into a log file and also to console.",
|
||||
)
|
||||
distributed_args.add_argument(
|
||||
"--role",
|
||||
type=str,
|
||||
default="default",
|
||||
help="User-defined role for the workers.",
|
||||
)
|
||||
# Rendezvous related arguments
|
||||
distributed_args.add_argument(
|
||||
"--rdzv_conf",
|
||||
@ -301,40 +274,8 @@ def launch_command_parser(subparsers=None):
|
||||
help="Skip prepending the training script with 'python' - just execute it directly. Useful when the script is not a Python script.",
|
||||
)
|
||||
|
||||
# TPU arguments
|
||||
# tpu arguments
|
||||
tpu_args = parser.add_argument_group("TPU", "Arguments related to TPU.")
|
||||
tpu_args.add_argument(
|
||||
"--tpu_cluster",
|
||||
action="store_true",
|
||||
dest="tpu_use_cluster",
|
||||
help="Whether to use a GCP TPU pod for training.",
|
||||
)
|
||||
tpu_args.add_argument(
|
||||
"--no_tpu_cluster",
|
||||
action="store_false",
|
||||
dest="tpu_use_cluster",
|
||||
help="Should not be passed explicitly, this is for internal use only.",
|
||||
)
|
||||
tpu_args.add_argument(
|
||||
"--tpu_use_sudo",
|
||||
action="store_true",
|
||||
help="Whether to use `sudo` when running the TPU training script in each pod.",
|
||||
)
|
||||
tpu_args.add_argument(
|
||||
"--vm",
|
||||
type=str,
|
||||
action="append",
|
||||
help=(
|
||||
"List of single Compute VM instance names. "
|
||||
"If not provided we assume usage of instance groups. For TPU pods."
|
||||
),
|
||||
)
|
||||
tpu_args.add_argument(
|
||||
"--env",
|
||||
type=str,
|
||||
action="append",
|
||||
help="List of environment variables to set on the Compute VM instances. For TPU pods.",
|
||||
)
|
||||
tpu_args.add_argument(
|
||||
"--main_training_function",
|
||||
type=str,
|
||||
@ -359,50 +300,45 @@ def launch_command_parser(subparsers=None):
|
||||
"--zero_stage",
|
||||
default=None,
|
||||
type=int,
|
||||
help="DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed). "
|
||||
"If unspecified, will default to `2`.",
|
||||
help="DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed).",
|
||||
)
|
||||
deepspeed_args.add_argument(
|
||||
"--offload_optimizer_device",
|
||||
default=None,
|
||||
type=str,
|
||||
help="Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed). "
|
||||
"If unspecified, will default to 'none'.",
|
||||
help="Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed).",
|
||||
)
|
||||
deepspeed_args.add_argument(
|
||||
"--offload_param_device",
|
||||
default=None,
|
||||
type=str,
|
||||
help="Decides where (none|cpu|nvme) to offload parameters (useful only when `use_deepspeed` flag is passed). "
|
||||
"If unspecified, will default to 'none'.",
|
||||
help="Decides where (none|cpu|nvme) to offload parameters (useful only when `use_deepspeed` flag is passed).",
|
||||
)
|
||||
deepspeed_args.add_argument(
|
||||
"--gradient_accumulation_steps",
|
||||
default=None,
|
||||
type=int,
|
||||
help="No of gradient_accumulation_steps used in your training script (useful only when `use_deepspeed` flag is passed). "
|
||||
"If unspecified, will default to `1`.",
|
||||
help="No of gradient_accumulation_steps used in your training script (useful only when `use_deepspeed` flag is passed).",
|
||||
)
|
||||
deepspeed_args.add_argument(
|
||||
"--gradient_clipping",
|
||||
default=None,
|
||||
type=float,
|
||||
help="gradient clipping value used in your training script (useful only when `use_deepspeed` flag is passed). "
|
||||
"If unspecified, will default to `1.0`.",
|
||||
help="gradient clipping value used in your training script (useful only when `use_deepspeed` flag is passed).",
|
||||
)
|
||||
deepspeed_args.add_argument(
|
||||
"--zero3_init_flag",
|
||||
default=None,
|
||||
type=str,
|
||||
help="Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. "
|
||||
"Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `true`.",
|
||||
"Only applicable with DeepSpeed ZeRO Stage-3.",
|
||||
)
|
||||
deepspeed_args.add_argument(
|
||||
"--zero3_save_16bit_model",
|
||||
default=None,
|
||||
type=str,
|
||||
help="Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. "
|
||||
"Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `false`.",
|
||||
"Only applicable with DeepSpeed ZeRO Stage-3.",
|
||||
)
|
||||
deepspeed_args.add_argument(
|
||||
"--deepspeed_hostfile",
|
||||
@ -426,7 +362,7 @@ def launch_command_parser(subparsers=None):
|
||||
"--deepspeed_multinode_launcher",
|
||||
default=None,
|
||||
type=str,
|
||||
help="DeepSpeed multi-node launcher to use. If unspecified, will default to `pdsh`.",
|
||||
help="DeepSpeed multi-node launcher to use.",
|
||||
)
|
||||
|
||||
# fsdp arguments
|
||||
@ -553,15 +489,6 @@ def launch_command_parser(subparsers=None):
|
||||
),
|
||||
)
|
||||
|
||||
# ipex args
|
||||
ipex_args = parser.add_argument_group("IPEX Arguments", "Arguments related to IPEX.")
|
||||
ipex_args.add_argument(
|
||||
"--ipex_enabled",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="Whether to use Intel PyTorch Extension (IPEX) to speed up training on CPU?",
|
||||
)
|
||||
|
||||
# Other arguments of the training scripts
|
||||
parser.add_argument("training_script_args", nargs=argparse.REMAINDER, help="Arguments of the training script.")
|
||||
|
||||
@ -571,53 +498,244 @@ def launch_command_parser(subparsers=None):
|
||||
|
||||
|
||||
def simple_launcher(args):
|
||||
cmd, current_env = prepare_simple_launcher_cmd_env(args)
|
||||
cmd = []
|
||||
if args.no_python and args.module:
|
||||
raise ValueError("--module and --no_python cannot be used together")
|
||||
if not args.no_python:
|
||||
cmd.append(sys.executable)
|
||||
if args.module:
|
||||
cmd.append("-m")
|
||||
cmd.append(args.training_script)
|
||||
cmd.extend(args.training_script_args)
|
||||
|
||||
current_env = os.environ.copy()
|
||||
current_env["USE_CPU"] = str(args.cpu or args.use_cpu)
|
||||
if args.use_mps_device:
|
||||
warnings.warn(
|
||||
'`use_mps_device` flag is deprecated and will be removed in version 0.15.0 of 🤗 Accelerate. Use "--mps" instead.',
|
||||
FutureWarning,
|
||||
)
|
||||
args.mps = True
|
||||
current_env["USE_MPS_DEVICE"] = str(args.mps)
|
||||
if args.mps:
|
||||
current_env["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
|
||||
elif args.gpu_ids != "all" and args.gpu_ids is not None:
|
||||
current_env["CUDA_VISIBLE_DEVICES"] = args.gpu_ids
|
||||
if args.num_machines > 1:
|
||||
current_env["MASTER_ADDR"] = args.main_process_ip
|
||||
current_env["MASTER_PORT"] = str(args.main_process_port)
|
||||
elif args.num_processes > 1:
|
||||
current_env["MASTER_ADDR"] = args.main_process_ip if args.main_process_ip is not None else "127.0.0.1"
|
||||
current_env["MASTER_PORT"] = str(args.main_process_port) if args.main_process_port is not None else "29500"
|
||||
|
||||
try:
|
||||
mixed_precision = PrecisionType(args.mixed_precision.lower())
|
||||
except ValueError:
|
||||
raise ValueError(
|
||||
f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}."
|
||||
)
|
||||
|
||||
if args.fp16:
|
||||
warnings.warn(
|
||||
"`fp16` is deprecated and will be removed in version 0.15.0 of 🤗 Accelerate. Use `mixed_precision fp16` instead.",
|
||||
FutureWarning,
|
||||
)
|
||||
mixed_precision = "fp16"
|
||||
|
||||
current_env["MIXED_PRECISION"] = str(mixed_precision)
|
||||
current_env["OMP_NUM_THREADS"] = str(args.num_cpu_threads_per_process)
|
||||
|
||||
process = subprocess.Popen(cmd, env=current_env)
|
||||
process.wait()
|
||||
if process.returncode != 0:
|
||||
if not args.quiet:
|
||||
raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
|
||||
else:
|
||||
sys.exit(1)
|
||||
raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
|
||||
|
||||
|
||||
def multi_gpu_launcher(args):
|
||||
if is_torch_version(">=", "1.9.1"):
|
||||
if is_torch_version(">=", "1.9.0"):
|
||||
import torch.distributed.run as distrib_run
|
||||
num_processes = getattr(args, "num_processes")
|
||||
num_machines = getattr(args, "num_machines")
|
||||
main_process_ip = getattr(args, "main_process_ip")
|
||||
main_process_port = getattr(args, "main_process_port")
|
||||
if num_machines > 1:
|
||||
setattr(args, "nproc_per_node", str(num_processes // num_machines))
|
||||
setattr(args, "nnodes", str(num_machines))
|
||||
setattr(args, "node_rank", int(args.machine_rank))
|
||||
if getattr(args, "same_network", False):
|
||||
setattr(args, "master_addr", str(main_process_ip))
|
||||
setattr(args, "master_port", str(main_process_port))
|
||||
else:
|
||||
setattr(args, "rdzv_endpoint", f"{main_process_ip}:{main_process_port}")
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
"Native multi-GPU training through `accelerate launch` requires pytorch>=1.9.1. "
|
||||
"Please call `torch.distributed.launch` directly instead."
|
||||
)
|
||||
setattr(args, "nproc_per_node", str(num_processes))
|
||||
if main_process_port is not None:
|
||||
setattr(args, "master_port", str(main_process_port))
|
||||
|
||||
current_env = prepare_multi_gpu_env(args)
|
||||
if args.module and args.no_python:
|
||||
raise ValueError("--module and --no_python cannot be used together")
|
||||
elif args.module:
|
||||
setattr(args, "module", True)
|
||||
elif args.no_python:
|
||||
setattr(args, "no_python", True)
|
||||
|
||||
current_env = os.environ.copy()
|
||||
gpu_ids = getattr(args, "gpu_ids", "all")
|
||||
if gpu_ids != "all" and args.gpu_ids is not None:
|
||||
current_env["CUDA_VISIBLE_DEVICES"] = gpu_ids
|
||||
mixed_precision = args.mixed_precision.lower()
|
||||
try:
|
||||
mixed_precision = PrecisionType(mixed_precision)
|
||||
except ValueError:
|
||||
raise ValueError(f"Unknown mixed_precision mode: {mixed_precision}. Choose between {PrecisionType.list()}.")
|
||||
|
||||
if args.fp16:
|
||||
warnings.warn(
|
||||
"`fp16` is deprecated and will be removed in version 0.15.0 of 🤗 Accelerate. Use `mixed_precision fp16` instead.",
|
||||
FutureWarning,
|
||||
)
|
||||
mixed_precision = "fp16"
|
||||
|
||||
current_env["MIXED_PRECISION"] = str(mixed_precision)
|
||||
if args.use_fsdp:
|
||||
current_env["USE_FSDP"] = "true"
|
||||
current_env["FSDP_SHARDING_STRATEGY"] = str(args.fsdp_sharding_strategy)
|
||||
current_env["FSDP_OFFLOAD_PARAMS"] = str(args.fsdp_offload_params).lower()
|
||||
current_env["FSDP_MIN_NUM_PARAMS"] = str(args.fsdp_min_num_params)
|
||||
if args.fsdp_auto_wrap_policy is not None:
|
||||
current_env["FSDP_AUTO_WRAP_POLICY"] = str(args.fsdp_auto_wrap_policy)
|
||||
if args.fsdp_transformer_layer_cls_to_wrap is not None:
|
||||
current_env["FSDP_TRANSFORMER_CLS_TO_WRAP"] = str(args.fsdp_transformer_layer_cls_to_wrap)
|
||||
if args.fsdp_backward_prefetch_policy is not None:
|
||||
current_env["FSDP_BACKWARD_PREFETCH"] = str(args.fsdp_backward_prefetch_policy)
|
||||
if args.fsdp_state_dict_type is not None:
|
||||
current_env["FSDP_STATE_DICT_TYPE"] = str(args.fsdp_state_dict_type)
|
||||
|
||||
if args.use_megatron_lm:
|
||||
prefix = "MEGATRON_LM_"
|
||||
current_env["USE_MEGATRON_LM"] = "true"
|
||||
current_env[prefix + "TP_DEGREE"] = str(args.megatron_lm_tp_degree)
|
||||
current_env[prefix + "PP_DEGREE"] = str(args.megatron_lm_pp_degree)
|
||||
current_env[prefix + "GRADIENT_CLIPPING"] = str(args.megatron_lm_gradient_clipping)
|
||||
if args.megatron_lm_num_micro_batches is not None:
|
||||
current_env[prefix + "NUM_MICRO_BATCHES"] = str(args.megatron_lm_num_micro_batches)
|
||||
if args.megatron_lm_sequence_parallelism is not None:
|
||||
current_env[prefix + "SEQUENCE_PARALLELISM"] = str(args.megatron_lm_sequence_parallelism)
|
||||
if args.megatron_lm_recompute_activations is not None:
|
||||
current_env[prefix + "RECOMPUTE_ACTIVATIONS"] = str(args.megatron_lm_recompute_activations)
|
||||
if args.megatron_lm_use_distributed_optimizer is not None:
|
||||
current_env[prefix + "USE_DISTRIBUTED_OPTIMIZER"] = str(args.megatron_lm_use_distributed_optimizer)
|
||||
|
||||
current_env["OMP_NUM_THREADS"] = str(args.num_cpu_threads_per_process)
|
||||
if is_torch_version("<", "1.9.0"):
|
||||
raise NotImplementedError("Multi-node training requires pytorch>=1.9.0")
|
||||
|
||||
debug = getattr(args, "debug", False)
|
||||
args = _filter_args(
|
||||
args,
|
||||
distrib_run.get_args_parser(),
|
||||
["--training_script", args.training_script, "--training_script_args", args.training_script_args],
|
||||
)
|
||||
args = _filter_args(args)
|
||||
with patch_environment(**current_env):
|
||||
try:
|
||||
distrib_run.run(args)
|
||||
except Exception:
|
||||
except:
|
||||
if is_rich_available() and debug:
|
||||
console = get_console()
|
||||
console.print("\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]")
|
||||
console.print_exception(suppress=[__file__], show_locals=False)
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
def deepspeed_launcher(args):
|
||||
if is_torch_version(">=", "1.9.1"):
|
||||
if is_torch_version(">=", "1.9.0"):
|
||||
import torch.distributed.run as distrib_run
|
||||
if not is_deepspeed_available():
|
||||
raise ImportError("DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source.")
|
||||
num_processes = getattr(args, "num_processes")
|
||||
num_machines = getattr(args, "num_machines")
|
||||
main_process_ip = getattr(args, "main_process_ip")
|
||||
main_process_port = getattr(args, "main_process_port")
|
||||
|
||||
cmd, current_env = prepare_deepspeed_cmd_env(args)
|
||||
# make sure launcher is not None
|
||||
if args.deepspeed_multinode_launcher is None:
|
||||
# set to default pdsh
|
||||
setattr(args, "deepspeed_multinode_launcher", DEEPSPEED_MULTINODE_LAUNCHERS[0])
|
||||
|
||||
if num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]:
|
||||
cmd = ["deepspeed", "--no_local_rank"]
|
||||
cmd.extend(["--hostfile", str(args.deepspeed_hostfile), "--launcher", str(args.deepspeed_multinode_launcher)])
|
||||
if args.deepspeed_exclusion_filter is not None:
|
||||
cmd.extend(
|
||||
[
|
||||
"--exclude",
|
||||
str(args.deepspeed_exclusion_filter),
|
||||
]
|
||||
)
|
||||
elif args.deepspeed_inclusion_filter is not None:
|
||||
cmd.extend(
|
||||
[
|
||||
"--include",
|
||||
str(args.deepspeed_inclusion_filter),
|
||||
]
|
||||
)
|
||||
else:
|
||||
cmd.extend(["--num_gpus", str(args.num_processes // args.num_machines)])
|
||||
|
||||
if args.module and args.no_python:
|
||||
raise ValueError("--module and --no_python cannot be used together")
|
||||
elif args.module:
|
||||
cmd.append("--module")
|
||||
elif args.no_python:
|
||||
cmd.append("--no_python")
|
||||
cmd.append(args.training_script)
|
||||
cmd.extend(args.training_script_args)
|
||||
elif num_machines > 1 and args.deepspeed_multinode_launcher == DEEPSPEED_MULTINODE_LAUNCHERS[1]:
|
||||
setattr(args, "nproc_per_node", str(num_processes // num_machines))
|
||||
setattr(args, "nnodes", str(num_machines))
|
||||
setattr(args, "node_rank", int(args.machine_rank))
|
||||
if getattr(args, "same_network", False):
|
||||
setattr(args, "master_addr", str(main_process_ip))
|
||||
setattr(args, "master_port", str(main_process_port))
|
||||
else:
|
||||
setattr(args, "rdzv_endpoint", f"{main_process_ip}:{main_process_port}")
|
||||
else:
|
||||
setattr(args, "nproc_per_node", str(num_processes))
|
||||
if main_process_port is not None:
|
||||
setattr(args, "master_port", str(main_process_port))
|
||||
|
||||
if args.module and args.no_python:
|
||||
raise ValueError("--module and --no_python cannot be used together")
|
||||
elif args.module:
|
||||
setattr(args, "module", True)
|
||||
elif args.no_python:
|
||||
setattr(args, "no_python", True)
|
||||
|
||||
current_env = os.environ.copy()
|
||||
gpu_ids = getattr(args, "gpu_ids", "all")
|
||||
if gpu_ids != "all" and args.gpu_ids is not None:
|
||||
current_env["CUDA_VISIBLE_DEVICES"] = gpu_ids
|
||||
try:
|
||||
mixed_precision = PrecisionType(args.mixed_precision.lower())
|
||||
except ValueError:
|
||||
raise ValueError(
|
||||
f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}."
|
||||
)
|
||||
|
||||
if args.fp16:
|
||||
warnings.warn(
|
||||
'--fp16 flag is deprecated and will be removed in version 0.15.0 of 🤗 Accelerate. Use "--mixed_precision fp16" instead.',
|
||||
FutureWarning,
|
||||
)
|
||||
mixed_precision = "fp16"
|
||||
|
||||
current_env["PYTHONPATH"] = env_var_path_add("PYTHONPATH", os.path.abspath("."))
|
||||
current_env["MIXED_PRECISION"] = str(mixed_precision)
|
||||
current_env["USE_DEEPSPEED"] = "true"
|
||||
current_env["DEEPSPEED_ZERO_STAGE"] = str(args.zero_stage)
|
||||
current_env["GRADIENT_ACCUMULATION_STEPS"] = str(args.gradient_accumulation_steps)
|
||||
current_env["GRADIENT_CLIPPING"] = str(args.gradient_clipping).lower()
|
||||
current_env["DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE"] = str(args.offload_optimizer_device).lower()
|
||||
current_env["DEEPSPEED_OFFLOAD_PARAM_DEVICE"] = str(args.offload_param_device).lower()
|
||||
current_env["DEEPSPEED_ZERO3_INIT"] = str(args.zero3_init_flag).lower()
|
||||
current_env["DEEPSPEED_ZERO3_SAVE_16BIT_MODEL"] = str(args.zero3_save_16bit_model).lower()
|
||||
if args.deepspeed_config_file is not None:
|
||||
current_env["DEEPSPEED_CONFIG_FILE"] = str(args.deepspeed_config_file)
|
||||
|
||||
if args.num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]:
|
||||
with open(".deepspeed_env", "a") as f:
|
||||
@ -629,39 +747,38 @@ def deepspeed_launcher(args):
|
||||
process = subprocess.Popen(cmd, env=current_env)
|
||||
process.wait()
|
||||
if process.returncode != 0:
|
||||
if not args.quiet:
|
||||
raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
|
||||
else:
|
||||
sys.exit(1)
|
||||
raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
|
||||
else:
|
||||
if is_torch_version("<", "1.9.1"):
|
||||
raise NotImplementedError("Multi-node training requires pytorch>=1.9.1")
|
||||
if is_torch_version("<", "1.9.0"):
|
||||
raise NotImplementedError("Multi-node training requires pytorch>=1.9.0")
|
||||
|
||||
debug = getattr(args, "debug", False)
|
||||
args = _filter_args(
|
||||
args,
|
||||
distrib_run.get_args_parser(),
|
||||
["--training_script", args.training_script, "--training_script_args", args.training_script_args],
|
||||
)
|
||||
args = _filter_args(args)
|
||||
with patch_environment(**current_env):
|
||||
try:
|
||||
distrib_run.run(args)
|
||||
except Exception:
|
||||
except:
|
||||
if is_rich_available() and debug:
|
||||
console = get_console()
|
||||
console.print("\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]")
|
||||
console.print_exception(suppress=[__file__], show_locals=False)
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
def tpu_launcher(args):
|
||||
import torch_xla.distributed.xla_multiprocessing as xmp
|
||||
|
||||
current_env = {}
|
||||
|
||||
if args.no_python:
|
||||
raise ValueError("--no_python cannot be used with TPU launcher")
|
||||
|
||||
args, current_env = prepare_tpu(args, {})
|
||||
if args.mixed_precision == "bf16":
|
||||
if args.downcast_bf16:
|
||||
current_env["XLA_USE_BF16"] = "0"
|
||||
current_env["XLA_DOWNCAST_BF16"] = "1"
|
||||
else:
|
||||
current_env["XLA_USE_BF16"] = "1"
|
||||
current_env["XLA_DOWNCAST_BF16"] = "0"
|
||||
|
||||
if args.module:
|
||||
mod_name = args.training_script
|
||||
@ -686,63 +803,46 @@ def tpu_launcher(args):
|
||||
xmp.spawn(PrepareForLaunch(main_function), args=(), nprocs=args.num_processes)
|
||||
|
||||
|
||||
def tpu_pod_launcher(args):
|
||||
from torch_xla.distributed import xla_dist
|
||||
def _convert_nargs_to_dict(nargs: List[str]) -> Dict[str, str]:
|
||||
if len(nargs) < 0:
|
||||
return {}
|
||||
# helper function to infer type for argsparser
|
||||
|
||||
current_env = {}
|
||||
args, current_env = prepare_tpu(args, current_env, True)
|
||||
debug = getattr(args, "debug", False)
|
||||
def _infer_type(s):
|
||||
try:
|
||||
s = float(s)
|
||||
|
||||
training_script = args.training_script
|
||||
training_script_args = args.training_script_args
|
||||
new_args = _filter_args(
|
||||
args, xla_dist.get_args_parser(), ["--tpu", args.tpu_name, "--positional", "", "--restart-tpuvm-pod-server"]
|
||||
)
|
||||
if s // 1 == s:
|
||||
return int(s)
|
||||
return s
|
||||
except ValueError:
|
||||
return s
|
||||
|
||||
if args.tpu_use_sudo:
|
||||
new_cmd = ["sudo"]
|
||||
else:
|
||||
new_cmd = []
|
||||
parser = argparse.ArgumentParser()
|
||||
_, unknown = parser.parse_known_args(nargs)
|
||||
for index, argument in enumerate(unknown):
|
||||
if argument.startswith(("-", "--")):
|
||||
action = None
|
||||
if index + 1 < len(unknown): # checks if next index would be in list
|
||||
if unknown[index + 1].startswith(("-", "--")): # checks if next element is an key
|
||||
# raise an error if element is store_true or store_false
|
||||
raise ValueError(
|
||||
"SageMaker doesn’t support argparse actions for `store_true` or `store_false`. Please define explicit types"
|
||||
)
|
||||
else: # raise an error if last element is store_true or store_false
|
||||
raise ValueError(
|
||||
"SageMaker doesn’t support argparse actions for `store_true` or `store_false`. Please define explicit types"
|
||||
)
|
||||
# adds argument to parser based on action_store true
|
||||
if action is None:
|
||||
parser.add_argument(argument, type=_infer_type)
|
||||
else:
|
||||
parser.add_argument(argument, action=action)
|
||||
|
||||
new_cmd += [
|
||||
"accelerate-launch",
|
||||
"--tpu",
|
||||
"--no_tpu_cluster",
|
||||
"--num_machines",
|
||||
str(1),
|
||||
"--mixed_precision",
|
||||
"no",
|
||||
"--dynamo_backend",
|
||||
"no",
|
||||
"--num_processes",
|
||||
str(args.num_processes),
|
||||
"--main_training_function",
|
||||
str(args.main_training_function),
|
||||
training_script,
|
||||
] + training_script_args
|
||||
|
||||
new_args.positional = new_cmd
|
||||
bad_flags = ""
|
||||
for arg in vars(new_args):
|
||||
if arg.startswith("docker_"):
|
||||
value = getattr(new_args, arg)
|
||||
if value != "" and value is not None:
|
||||
bad_flags += f'{arg}="{value}"\n'
|
||||
if bad_flags != "":
|
||||
raise ValueError(
|
||||
f"Docker containers are not supported for TPU pod launcher currently, please remove the following flags:\n{bad_flags}"
|
||||
)
|
||||
new_args.env = [f"{k}={v}" for k, v in current_env.items()]
|
||||
new_args.env.append("ACCELERATE_IN_TPU_POD=1")
|
||||
try:
|
||||
xla_dist.resolve_and_execute(new_args)
|
||||
except Exception:
|
||||
if is_rich_available() and debug:
|
||||
console = get_console()
|
||||
console.print("\n[bold red]Using --debug, `torch_xla.xla_dist` Stack Trace:[/bold red]")
|
||||
console.print_exception(suppress=[__file__], show_locals=False)
|
||||
else:
|
||||
raise
|
||||
return {
|
||||
key: (literal_eval(value) if value == "True" or value == "False" else value)
|
||||
for key, value in parser.parse_args(nargs).__dict__.items()
|
||||
}
|
||||
|
||||
|
||||
def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):
|
||||
@ -757,33 +857,122 @@ def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):
|
||||
|
||||
from sagemaker.huggingface import HuggingFace
|
||||
|
||||
args, sagemaker_inputs = prepare_sagemager_args_inputs(sagemaker_config, args)
|
||||
# configure environment
|
||||
print("Configuring Amazon SageMaker environment")
|
||||
os.environ["AWS_DEFAULT_REGION"] = sagemaker_config.region
|
||||
|
||||
huggingface_estimator = HuggingFace(**args)
|
||||
# configure credentials
|
||||
if sagemaker_config.profile is not None:
|
||||
os.environ["AWS_PROFILE"] = sagemaker_config.profile
|
||||
elif args.aws_access_key_id is not None and args.aws_secret_access_key is not None:
|
||||
os.environ["AWS_ACCESS_KEY_ID"] = args.aws_access_key_id
|
||||
os.environ["AWS_SECRET_ACCESS_KEY"] = args.aws_secret_access_key
|
||||
else:
|
||||
raise EnvironmentError(
|
||||
"You need to provide an aws_access_key_id and aws_secret_access_key when not using aws_profile"
|
||||
)
|
||||
|
||||
# extract needed arguments
|
||||
source_dir = os.path.dirname(args.training_script)
|
||||
if not source_dir: # checks if string is empty
|
||||
source_dir = "."
|
||||
entry_point = os.path.basename(args.training_script)
|
||||
if not entry_point.endswith(".py"):
|
||||
raise ValueError(f'Your training script should be a python script and not "{entry_point}"')
|
||||
|
||||
print("Converting Arguments to Hyperparameters")
|
||||
hyperparameters = _convert_nargs_to_dict(args.training_script_args)
|
||||
|
||||
try:
|
||||
mixed_precision = PrecisionType(args.mixed_precision.lower())
|
||||
except ValueError:
|
||||
raise ValueError(
|
||||
f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}."
|
||||
)
|
||||
|
||||
if args.fp16:
|
||||
warnings.warn('--fp16 flag is deprecated. Use "--mixed_precision fp16" instead.', FutureWarning)
|
||||
mixed_precision = "fp16"
|
||||
|
||||
# Environment variables to be set for use during training job
|
||||
environment = {
|
||||
"USE_SAGEMAKER": "true",
|
||||
"MIXED_PRECISION": str(mixed_precision),
|
||||
"SAGEMAKER_DISTRIBUTED_TYPE": sagemaker_config.distributed_type.value,
|
||||
}
|
||||
# configure distribution set up
|
||||
distribution = None
|
||||
if sagemaker_config.distributed_type == SageMakerDistributedType.DATA_PARALLEL:
|
||||
distribution = {"smdistributed": {"dataparallel": {"enabled": True}}}
|
||||
|
||||
# configure sagemaker inputs
|
||||
sagemaker_inputs = None
|
||||
if sagemaker_config.sagemaker_inputs_file is not None:
|
||||
print(f"Loading SageMaker Inputs from {sagemaker_config.sagemaker_inputs_file} file")
|
||||
sagemaker_inputs = {}
|
||||
with open(sagemaker_config.sagemaker_inputs_file) as file:
|
||||
for i, line in enumerate(file):
|
||||
if i == 0:
|
||||
continue
|
||||
l = line.split("\t")
|
||||
sagemaker_inputs[l[0]] = l[1].strip()
|
||||
print(f"Loaded SageMaker Inputs: {sagemaker_inputs}")
|
||||
|
||||
# configure sagemaker metrics
|
||||
sagemaker_metrics = None
|
||||
if sagemaker_config.sagemaker_metrics_file is not None:
|
||||
print(f"Loading SageMaker Metrics from {sagemaker_config.sagemaker_metrics_file} file")
|
||||
sagemaker_metrics = []
|
||||
with open(sagemaker_config.sagemaker_metrics_file) as file:
|
||||
for i, line in enumerate(file):
|
||||
if i == 0:
|
||||
continue
|
||||
l = line.split("\t")
|
||||
metric_dict = {
|
||||
"Name": l[0],
|
||||
"Regex": l[1].strip(),
|
||||
}
|
||||
sagemaker_metrics.append(metric_dict)
|
||||
print(f"Loaded SageMaker Metrics: {sagemaker_metrics}")
|
||||
|
||||
# configure session
|
||||
print("Creating Estimator")
|
||||
huggingface_estimator = HuggingFace(
|
||||
image_uri=sagemaker_config.image_uri,
|
||||
entry_point=entry_point,
|
||||
source_dir=source_dir,
|
||||
role=sagemaker_config.iam_role_name,
|
||||
transformers_version=sagemaker_config.transformers_version,
|
||||
pytorch_version=sagemaker_config.pytorch_version,
|
||||
py_version=sagemaker_config.py_version,
|
||||
base_job_name=sagemaker_config.base_job_name,
|
||||
instance_count=sagemaker_config.num_machines,
|
||||
instance_type=sagemaker_config.ec2_instance_type,
|
||||
debugger_hook_config=False,
|
||||
distribution=distribution,
|
||||
hyperparameters=hyperparameters,
|
||||
environment=environment,
|
||||
metric_definitions=sagemaker_metrics,
|
||||
)
|
||||
|
||||
huggingface_estimator.fit(inputs=sagemaker_inputs)
|
||||
print(f"You can find your model data at: {huggingface_estimator.model_data}")
|
||||
|
||||
|
||||
def _validate_launch_command(args):
|
||||
def launch_command(args):
|
||||
# Sanity checks
|
||||
if sum([args.multi_gpu, args.cpu, args.tpu, args.use_deepspeed, args.use_fsdp]) > 1:
|
||||
raise ValueError(
|
||||
"You can only use one of `--cpu`, `--multi_gpu`, `--tpu`, `--use_deepspeed`, `--use_fsdp` at a time."
|
||||
)
|
||||
if args.multi_gpu and (args.num_processes is not None) and (args.num_processes < 2):
|
||||
raise ValueError("You need to use at least 2 processes to use `--multi_gpu`.")
|
||||
if sum([args.multi_gpu, args.tpu, args.use_deepspeed, args.use_fsdp]) > 1:
|
||||
raise ValueError("You can only pick one between `--multi_gpu`, `--use_deepspeed`, `--tpu`, `--use_fsdp`.")
|
||||
|
||||
defaults = None
|
||||
warned = []
|
||||
mp_from_config_flag = False
|
||||
# Get the default from the config file.
|
||||
if args.config_file is not None or os.path.isfile(default_config_file) and not args.cpu:
|
||||
defaults = load_config_from_file(args.config_file)
|
||||
if (
|
||||
not args.multi_gpu
|
||||
and not args.tpu
|
||||
and not args.tpu_use_cluster
|
||||
and not args.mps
|
||||
and not args.use_deepspeed
|
||||
and not args.use_fsdp
|
||||
and not args.use_megatron_lm
|
||||
@ -792,28 +981,23 @@ def _validate_launch_command(args):
|
||||
args.multi_gpu = defaults.distributed_type == DistributedType.MULTI_GPU
|
||||
args.tpu = defaults.distributed_type == DistributedType.TPU
|
||||
args.use_fsdp = defaults.distributed_type == DistributedType.FSDP
|
||||
args.mps = defaults.distributed_type == DistributedType.MPS
|
||||
args.use_megatron_lm = defaults.distributed_type == DistributedType.MEGATRON_LM
|
||||
args.tpu_use_cluster = defaults.tpu_use_cluster if args.tpu else False
|
||||
if args.gpu_ids is None:
|
||||
if defaults.gpu_ids is not None:
|
||||
args.gpu_ids = defaults.gpu_ids
|
||||
else:
|
||||
args.gpu_ids = "all"
|
||||
|
||||
if args.multi_gpu and args.num_machines is None:
|
||||
args.num_machines = defaults.num_machines
|
||||
|
||||
if len(args.gpu_ids.split(",")) < 2 and (args.gpu_ids != "all") and args.multi_gpu and args.num_machines <= 1:
|
||||
raise ValueError(
|
||||
"Less than two GPU ids were configured and tried to run on on multiple GPUs. "
|
||||
"Please ensure at least two are specified for `--gpu_ids`, or use `--gpu_ids='all'`."
|
||||
)
|
||||
if not args.mps:
|
||||
if args.gpu_ids is None:
|
||||
if defaults.gpu_ids is not None:
|
||||
args.gpu_ids = defaults.gpu_ids
|
||||
else:
|
||||
args.gpu_ids = "all"
|
||||
if len(args.gpu_ids.split(",")) < 2 and args.multi_gpu and (args.gpu_ids != "all"):
|
||||
args.multi_gpu = False
|
||||
if defaults.compute_environment == ComputeEnvironment.LOCAL_MACHINE:
|
||||
# Update args with the defaults
|
||||
for name, attr in defaults.__dict__.items():
|
||||
if isinstance(attr, dict):
|
||||
for k in defaults.deepspeed_config:
|
||||
setattr(args, k, defaults.deepspeed_config[k])
|
||||
if getattr(args, k) is None:
|
||||
setattr(args, k, defaults.deepspeed_config[k])
|
||||
for k in defaults.fsdp_config:
|
||||
arg_to_set = k
|
||||
if "fsdp" not in arg_to_set:
|
||||
@ -821,38 +1005,23 @@ def _validate_launch_command(args):
|
||||
setattr(args, arg_to_set, defaults.fsdp_config[k])
|
||||
for k in defaults.megatron_lm_config:
|
||||
setattr(args, k, defaults.megatron_lm_config[k])
|
||||
for k in defaults.dynamo_config:
|
||||
setattr(args, k, defaults.dynamo_config[k])
|
||||
for k in defaults.ipex_config:
|
||||
setattr(args, k, defaults.ipex_config[k])
|
||||
continue
|
||||
|
||||
# Those args are handled separately
|
||||
if (
|
||||
name not in ["compute_environment", "mixed_precision", "distributed_type"]
|
||||
name not in ["compute_environment", "fp16", "mixed_precision", "distributed_type"]
|
||||
and getattr(args, name, None) is None
|
||||
):
|
||||
setattr(args, name, attr)
|
||||
if not args.mixed_precision:
|
||||
if defaults.mixed_precision is None:
|
||||
args.mixed_precision = "no"
|
||||
if args.fp16:
|
||||
args.mixed_precision = "fp16"
|
||||
else:
|
||||
args.mixed_precision = defaults.mixed_precision
|
||||
mp_from_config_flag = True
|
||||
|
||||
# Silently set the default here
|
||||
if args.dynamo_backend is None:
|
||||
args.dynamo_backend = "no"
|
||||
else:
|
||||
if args.num_processes is None:
|
||||
args.num_processes = torch.cuda.device_count()
|
||||
args.num_processes = torch.cuda.device_count() if args.multi_gpu else 1
|
||||
warned.append(f"\t`--num_processes` was set to a value of `{args.num_processes}`")
|
||||
if torch.cuda.device_count() > 1 and not args.multi_gpu:
|
||||
warned.append(
|
||||
"\t\tMore than one GPU was found, enabling multi-GPU training.\n"
|
||||
"\t\tIf this was unintended please pass in `--num_processes=1`."
|
||||
)
|
||||
args.multi_gpu = True
|
||||
if args.num_machines is None:
|
||||
warned.append("\t`--num_machines` was set to a value of `1`")
|
||||
args.num_machines = 1
|
||||
@ -861,21 +1030,15 @@ def _validate_launch_command(args):
|
||||
args.mixed_precision = "no"
|
||||
if not hasattr(args, "use_cpu"):
|
||||
args.use_cpu = args.cpu
|
||||
if args.dynamo_backend is None:
|
||||
warned.append("\t`--dynamo_backend` was set to a value of `'no'`")
|
||||
args.dynamo_backend = "no"
|
||||
|
||||
is_aws_env_disabled = defaults is None or (
|
||||
defaults is not None and defaults.compute_environment != ComputeEnvironment.AMAZON_SAGEMAKER
|
||||
)
|
||||
if is_aws_env_disabled and args.num_cpu_threads_per_process is None:
|
||||
if args.num_cpu_threads_per_process is None:
|
||||
args.num_cpu_threads_per_process = 1
|
||||
if args.use_cpu and args.num_processes >= 1:
|
||||
if args.use_cpu and args.num_processes > 1:
|
||||
local_size = get_int_from_env(
|
||||
["MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"], 1
|
||||
)
|
||||
threads_per_process = int(psutil.cpu_count(logical=False) / local_size)
|
||||
if threads_per_process > 1:
|
||||
if args.num_cpu_threads_per_process > 1:
|
||||
args.num_cpu_threads_per_process = threads_per_process
|
||||
warned.append(
|
||||
f"\t`--num_cpu_threads_per_process` was set to `{args.num_cpu_threads_per_process}` to improve out-of-box performance when training on CPUs"
|
||||
@ -887,19 +1050,10 @@ def _validate_launch_command(args):
|
||||
message += (
|
||||
"\nTo avoid this warning pass in values for each of the problematic parameters or run `accelerate config`."
|
||||
)
|
||||
logger.warning(message)
|
||||
return args, defaults, mp_from_config_flag
|
||||
|
||||
|
||||
def launch_command(args):
|
||||
args, defaults, mp_from_config_flag = _validate_launch_command(args)
|
||||
logger.warn(message)
|
||||
|
||||
# Use the proper launcher
|
||||
if args.use_deepspeed and not args.cpu:
|
||||
args.deepspeed_fields_from_accelerate_config = list(defaults.deepspeed_config.keys()) if defaults else []
|
||||
if mp_from_config_flag:
|
||||
args.deepspeed_fields_from_accelerate_config.append("mixed_precision")
|
||||
args.deepspeed_fields_from_accelerate_config = ",".join(args.deepspeed_fields_from_accelerate_config)
|
||||
deepspeed_launcher(args)
|
||||
elif args.use_fsdp and not args.cpu:
|
||||
multi_gpu_launcher(args)
|
||||
@ -908,10 +1062,7 @@ def launch_command(args):
|
||||
elif args.multi_gpu and not args.cpu:
|
||||
multi_gpu_launcher(args)
|
||||
elif args.tpu and not args.cpu:
|
||||
if args.tpu_use_cluster:
|
||||
tpu_pod_launcher(args)
|
||||
else:
|
||||
tpu_launcher(args)
|
||||
tpu_launcher(args)
|
||||
elif defaults is not None and defaults.compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
|
||||
sagemaker_launcher(defaults, args)
|
||||
else:
|
||||
|
||||
@ -1 +0,0 @@
|
||||
from .selection_menu import BulletMenu
|
||||
@ -1,65 +0,0 @@
|
||||
# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
A utility for showing and hiding the terminal cursor on Windows and Linux, based on https://github.com/bchao1/bullet
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
from contextlib import contextmanager
|
||||
|
||||
|
||||
# Windows only
|
||||
if os.name == "nt":
|
||||
import ctypes
|
||||
import msvcrt # noqa
|
||||
|
||||
class CursorInfo(ctypes.Structure):
|
||||
# _fields is a specific attr expected by ctypes
|
||||
_fields_ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
|
||||
|
||||
|
||||
def hide_cursor():
|
||||
if os.name == "nt":
|
||||
ci = CursorInfo()
|
||||
handle = ctypes.windll.kernel32.GetStdHandle(-11)
|
||||
ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci))
|
||||
ci.visible = False
|
||||
ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci))
|
||||
elif os.name == "posix":
|
||||
sys.stdout.write("\033[?25l")
|
||||
sys.stdout.flush()
|
||||
|
||||
|
||||
def show_cursor():
|
||||
if os.name == "nt":
|
||||
ci = CursorInfo()
|
||||
handle = ctypes.windll.kernel32.GetStdHandle(-11)
|
||||
ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci))
|
||||
ci.visible = True
|
||||
ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci))
|
||||
elif os.name == "posix":
|
||||
sys.stdout.write("\033[?25h")
|
||||
sys.stdout.flush()
|
||||
|
||||
|
||||
@contextmanager
|
||||
def hide():
|
||||
"Context manager to hide the terminal cursor"
|
||||
try:
|
||||
hide_cursor()
|
||||
yield
|
||||
finally:
|
||||
show_cursor()
|
||||
@ -1,59 +0,0 @@
|
||||
# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
A variety of helper functions and constants when dealing with terminal menu choices, based on
|
||||
https://github.com/bchao1/bullet
|
||||
"""
|
||||
|
||||
import enum
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
|
||||
TERMINAL_WIDTH, _ = shutil.get_terminal_size()
|
||||
|
||||
CURSOR_TO_CHAR = {"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"}
|
||||
|
||||
|
||||
class Direction(enum.Enum):
|
||||
UP = 0
|
||||
DOWN = 1
|
||||
|
||||
|
||||
def forceWrite(content, end=""):
|
||||
sys.stdout.write(str(content) + end)
|
||||
sys.stdout.flush()
|
||||
|
||||
|
||||
def writeColor(content, color, end=""):
|
||||
forceWrite(f"\u001b[{color}m{content}\u001b[0m", end)
|
||||
|
||||
|
||||
def reset_cursor():
|
||||
forceWrite("\r")
|
||||
|
||||
|
||||
def move_cursor(num_lines: int, direction: str):
|
||||
forceWrite(f"\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}")
|
||||
|
||||
|
||||
def clear_line():
|
||||
forceWrite(" " * TERMINAL_WIDTH)
|
||||
reset_cursor()
|
||||
|
||||
|
||||
def linebreak():
|
||||
reset_cursor()
|
||||
forceWrite("-" * TERMINAL_WIDTH)
|
||||
@ -1,86 +0,0 @@
|
||||
# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
This file contains utilities for handling input from the user and registering specific keys to specific functions,
|
||||
based on https://github.com/bchao1/bullet
|
||||
"""
|
||||
|
||||
from typing import List
|
||||
|
||||
from .keymap import KEYMAP, get_character
|
||||
|
||||
|
||||
def mark(key: str):
|
||||
"""
|
||||
Mark the function with the key code so it can be handled in the register
|
||||
"""
|
||||
|
||||
def decorator(func):
|
||||
handle = getattr(func, "handle_key", [])
|
||||
handle += [key]
|
||||
setattr(func, "handle_key", handle)
|
||||
return func
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def mark_multiple(*keys: List[str]):
|
||||
"""
|
||||
Mark the function with the key codes so it can be handled in the register
|
||||
"""
|
||||
|
||||
def decorator(func):
|
||||
handle = getattr(func, "handle_key", [])
|
||||
handle += keys
|
||||
setattr(func, "handle_key", handle)
|
||||
return func
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
class KeyHandler(type):
|
||||
"""
|
||||
Metaclass that adds the key handlers to the class
|
||||
"""
|
||||
|
||||
def __new__(cls, name, bases, attrs):
|
||||
new_cls = super().__new__(cls, name, bases, attrs)
|
||||
if not hasattr(new_cls, "key_handler"):
|
||||
setattr(new_cls, "key_handler", {})
|
||||
setattr(new_cls, "handle_input", KeyHandler.handle_input)
|
||||
|
||||
for value in attrs.values():
|
||||
handled_keys = getattr(value, "handle_key", [])
|
||||
for key in handled_keys:
|
||||
new_cls.key_handler[key] = value
|
||||
return new_cls
|
||||
|
||||
@staticmethod
|
||||
def handle_input(cls):
|
||||
"Finds and returns the selected character if it exists in the handler"
|
||||
char = get_character()
|
||||
if char != KEYMAP["undefined"]:
|
||||
char = ord(char)
|
||||
handler = cls.key_handler.get(char)
|
||||
if handler:
|
||||
cls.current_selection = char
|
||||
return handler(cls)
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def register(cls):
|
||||
"""Adds KeyHandler metaclass to the class"""
|
||||
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy())
|
||||
@ -1,134 +0,0 @@
|
||||
# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Utilities relating to parsing raw characters from the keyboard, based on https://github.com/bchao1/bullet
|
||||
"""
|
||||
|
||||
|
||||
import os
|
||||
import string
|
||||
import sys
|
||||
|
||||
|
||||
ARROW_KEY_FLAG = 1 << 8
|
||||
|
||||
KEYMAP = {
|
||||
"tab": ord("\t"),
|
||||
"newline": ord("\r"),
|
||||
"esc": 27,
|
||||
"up": 65 + ARROW_KEY_FLAG,
|
||||
"down": 66 + ARROW_KEY_FLAG,
|
||||
"right": 67 + ARROW_KEY_FLAG,
|
||||
"left": 68 + ARROW_KEY_FLAG,
|
||||
"mod_int": 91,
|
||||
"undefined": sys.maxsize,
|
||||
"interrupt": 3,
|
||||
"insert": 50,
|
||||
"delete": 51,
|
||||
"pg_up": 53,
|
||||
"pg_down": 54,
|
||||
}
|
||||
|
||||
KEYMAP["arrow_begin"] = KEYMAP["up"]
|
||||
KEYMAP["arrow_end"] = KEYMAP["left"]
|
||||
|
||||
if sys.platform == "win32":
|
||||
WIN_CH_BUFFER = []
|
||||
WIN_KEYMAP = {
|
||||
b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
|
||||
b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
|
||||
b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
|
||||
b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
|
||||
b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
|
||||
b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
|
||||
b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
|
||||
b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
|
||||
}
|
||||
|
||||
for i in range(10):
|
||||
KEYMAP[str(i)] = ord(str(i))
|
||||
|
||||
|
||||
def get_raw_chars():
|
||||
"Gets raw characters from inputs"
|
||||
if os.name == "nt":
|
||||
import msvcrt
|
||||
|
||||
encoding = "mbcs"
|
||||
# Flush the keyboard buffer
|
||||
while msvcrt.kbhit():
|
||||
msvcrt.getch()
|
||||
if len(WIN_CH_BUFFER) == 0:
|
||||
# Read the keystroke
|
||||
ch = msvcrt.getch()
|
||||
|
||||
# If it is a prefix char, get second part
|
||||
if ch in (b"\x00", b"\xe0"):
|
||||
ch2 = ch + msvcrt.getch()
|
||||
# Translate actual Win chars to bullet char types
|
||||
try:
|
||||
chx = chr(WIN_KEYMAP[ch2])
|
||||
WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"]))
|
||||
WIN_CH_BUFFER.append(chx)
|
||||
if ord(chx) in (
|
||||
KEYMAP["insert"] - 1 << 9,
|
||||
KEYMAP["delete"] - 1 << 9,
|
||||
KEYMAP["pg_up"] - 1 << 9,
|
||||
KEYMAP["pg_down"] - 1 << 9,
|
||||
):
|
||||
WIN_CH_BUFFER.append(chr(126))
|
||||
ch = chr(KEYMAP["esc"])
|
||||
except KeyError:
|
||||
ch = ch2[1]
|
||||
else:
|
||||
ch = ch.decode(encoding)
|
||||
else:
|
||||
ch = WIN_CH_BUFFER.pop(0)
|
||||
elif os.name == "posix":
|
||||
import termios
|
||||
import tty
|
||||
|
||||
fd = sys.stdin.fileno()
|
||||
old_settings = termios.tcgetattr(fd)
|
||||
try:
|
||||
tty.setraw(fd)
|
||||
ch = sys.stdin.read(1)
|
||||
finally:
|
||||
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
|
||||
return ch
|
||||
|
||||
|
||||
def get_character():
|
||||
"Gets a character from the keyboard and returns the key code"
|
||||
char = get_raw_chars()
|
||||
if ord(char) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
|
||||
return char
|
||||
|
||||
elif ord(char) == KEYMAP["esc"]:
|
||||
combo = get_raw_chars()
|
||||
if ord(combo) == KEYMAP["mod_int"]:
|
||||
key = get_raw_chars()
|
||||
if ord(key) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(key) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
|
||||
return chr(ord(key) + ARROW_KEY_FLAG)
|
||||
else:
|
||||
return KEYMAP["undefined"]
|
||||
else:
|
||||
return get_raw_chars()
|
||||
|
||||
else:
|
||||
if char in string.printable:
|
||||
return char
|
||||
else:
|
||||
return KEYMAP["undefined"]
|
||||
@ -1,125 +0,0 @@
|
||||
# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Main driver for the selection menu, based on https://github.com/bchao1/bullet
|
||||
"""
|
||||
import sys
|
||||
|
||||
from . import cursor, input
|
||||
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
|
||||
from .keymap import KEYMAP
|
||||
|
||||
|
||||
@input.register
|
||||
class BulletMenu:
|
||||
"""
|
||||
A CLI menu to select a choice from a list of choices using the keyboard.
|
||||
"""
|
||||
|
||||
def __init__(self, prompt: str = None, choices: list = []):
|
||||
self.position = 0
|
||||
self.choices = choices
|
||||
self.prompt = prompt
|
||||
if sys.platform == "win32":
|
||||
self.arrow_char = "*"
|
||||
else:
|
||||
self.arrow_char = "➔ "
|
||||
|
||||
def write_choice(self, index, end: str = ""):
|
||||
if sys.platform != "win32":
|
||||
writeColor(self.choices[index], 32, end)
|
||||
else:
|
||||
forceWrite(self.choices[index], end)
|
||||
|
||||
def print_choice(self, index: int):
|
||||
"Prints the choice at the given index"
|
||||
if index == self.position:
|
||||
forceWrite(f" {self.arrow_char} ")
|
||||
self.write_choice(index)
|
||||
else:
|
||||
forceWrite(f" {self.choices[index]}")
|
||||
reset_cursor()
|
||||
|
||||
def move_direction(self, direction: Direction, num_spaces: int = 1):
|
||||
"Should not be directly called, used to move a direction of either up or down"
|
||||
old_position = self.position
|
||||
if direction == Direction.DOWN:
|
||||
if self.position + 1 >= len(self.choices):
|
||||
return
|
||||
self.position += num_spaces
|
||||
else:
|
||||
if self.position - 1 < 0:
|
||||
return
|
||||
self.position -= num_spaces
|
||||
clear_line()
|
||||
self.print_choice(old_position)
|
||||
move_cursor(num_spaces, direction.name)
|
||||
self.print_choice(self.position)
|
||||
|
||||
@input.mark(KEYMAP["up"])
|
||||
def move_up(self):
|
||||
self.move_direction(Direction.UP)
|
||||
|
||||
@input.mark(KEYMAP["down"])
|
||||
def move_down(self):
|
||||
self.move_direction(Direction.DOWN)
|
||||
|
||||
@input.mark(KEYMAP["newline"])
|
||||
def select(self):
|
||||
move_cursor(len(self.choices) - self.position, "DOWN")
|
||||
return self.position
|
||||
|
||||
@input.mark(KEYMAP["interrupt"])
|
||||
def interrupt(self):
|
||||
move_cursor(len(self.choices) - self.position, "DOWN")
|
||||
raise KeyboardInterrupt
|
||||
|
||||
@input.mark_multiple(*[KEYMAP[str(number)] for number in range(10)])
|
||||
def select_row(self):
|
||||
index = int(chr(self.current_selection))
|
||||
movement = index - self.position
|
||||
if index == self.position:
|
||||
return
|
||||
if index < len(self.choices):
|
||||
if self.position > index:
|
||||
self.move_direction(Direction.UP, -movement)
|
||||
elif self.position < index:
|
||||
self.move_direction(Direction.DOWN, movement)
|
||||
else:
|
||||
return
|
||||
else:
|
||||
return
|
||||
|
||||
def run(self, default_choice: int = 0):
|
||||
"Start the menu and return the selected choice"
|
||||
if self.prompt:
|
||||
linebreak()
|
||||
forceWrite(self.prompt, "\n")
|
||||
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter", "\n")
|
||||
self.position = default_choice
|
||||
for i in range(len(self.choices)):
|
||||
self.print_choice(i)
|
||||
forceWrite("\n")
|
||||
move_cursor(len(self.choices) - self.position, "UP")
|
||||
with cursor.hide():
|
||||
while True:
|
||||
choice = self.handle_input()
|
||||
if choice is not None:
|
||||
reset_cursor()
|
||||
for _ in range(len(self.choices) + 1):
|
||||
move_cursor(1, "UP")
|
||||
clear_line()
|
||||
self.write_choice(choice, "\n")
|
||||
return choice
|
||||
@ -45,12 +45,10 @@ def test_command_parser(subparsers=None):
|
||||
def test_command(args):
|
||||
script_name = os.path.sep.join(__file__.split(os.path.sep)[:-2] + ["test_utils", "scripts", "test_script.py"])
|
||||
|
||||
if args.config_file is None:
|
||||
test_args = script_name
|
||||
else:
|
||||
test_args = f"--config_file={args.config_file} {script_name}"
|
||||
|
||||
cmd = ["accelerate-launch"] + test_args.split()
|
||||
test_args = f"""
|
||||
--config_file={args.config_file} {script_name}
|
||||
""".split()
|
||||
cmd = ["accelerate-launch"] + test_args
|
||||
result = execute_subprocess_async(cmd, env=os.environ.copy())
|
||||
if result.returncode == 0:
|
||||
print("Test is a success! You are ready for your distributed training!")
|
||||
|
||||
@ -18,9 +18,8 @@ import argparse
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
from packaging.version import Version, parse
|
||||
|
||||
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
|
||||
from packaging.version import Version, parse
|
||||
|
||||
|
||||
_description = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
|
||||
@ -52,11 +51,6 @@ def tpu_command_parser(subparsers=None):
|
||||
help="The zone of the TPU to use. If not specified, will use the zone specified in the config file.",
|
||||
)
|
||||
pod_args = parser.add_argument_group("TPU Arguments", "Arguments for options ran inside the TPU.")
|
||||
pod_args.add_argument(
|
||||
"--use_alpha",
|
||||
action="store_true",
|
||||
help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.",
|
||||
)
|
||||
pod_args.add_argument(
|
||||
"--command_file",
|
||||
default=None,
|
||||
@ -127,10 +121,8 @@ def tpu_command_launcher(args):
|
||||
|
||||
# Then send it to gcloud
|
||||
# Eventually try to use google-api-core to do this instead of subprocess
|
||||
cmd = ["gcloud"]
|
||||
if args.use_alpha:
|
||||
cmd += ["alpha"]
|
||||
cmd += [
|
||||
cmd = [
|
||||
"gcloud",
|
||||
"compute",
|
||||
"tpus",
|
||||
"tpu-vm",
|
||||
|
||||
@ -13,7 +13,6 @@
|
||||
# limitations under the License.
|
||||
|
||||
import math
|
||||
from contextlib import suppress
|
||||
from typing import List, Optional, Union
|
||||
|
||||
import torch
|
||||
@ -340,10 +339,8 @@ class DataLoaderShard(DataLoader):
|
||||
- `"cuda"`: the CUDA random number generator (GPU only)
|
||||
- `"xla"`: the XLA random number generator (TPU only)
|
||||
- `"generator"`: an optional `torch.Generator`
|
||||
synchronized_generator (`torch.Generator`, *optional*):
|
||||
generator (`torch.Generator`, *optional*):
|
||||
A random number generator to keep synchronized across processes.
|
||||
split_batches (`int`, *optional*, defaults to 0):
|
||||
The number of batches to skip at the beginning.
|
||||
kwargs:
|
||||
All other keyword arguments to pass to the regular `DataLoader` initialization.
|
||||
|
||||
@ -356,44 +353,40 @@ class DataLoaderShard(DataLoader):
|
||||
- **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
|
||||
"""
|
||||
|
||||
def __init__(self, dataset, device=None, rng_types=None, synchronized_generator=None, skip_batches=0, **kwargs):
|
||||
def __init__(self, dataset, device=None, rng_types=None, synchronized_generator=None, **kwargs):
|
||||
super().__init__(dataset, **kwargs)
|
||||
self.device = device
|
||||
self.rng_types = rng_types
|
||||
self.synchronized_generator = synchronized_generator
|
||||
self.skip_batches = skip_batches
|
||||
self.gradient_state = GradientState()
|
||||
|
||||
def __iter__(self):
|
||||
if self.rng_types is not None:
|
||||
synchronize_rng_states(self.rng_types, self.synchronized_generator)
|
||||
self.gradient_state._add_dataloader(self)
|
||||
# We can safely pass because the default is -1
|
||||
with suppress(Exception):
|
||||
self.gradient_state._set_end_of_dataloader(False)
|
||||
try:
|
||||
length = getattr(self.dataset, "total_dataset_length", len(self.dataset))
|
||||
self.gradient_state._set_remainder(length % self.total_batch_size)
|
||||
except Exception:
|
||||
# We can safely pass because the default is -1
|
||||
pass
|
||||
dataloader_iter = super().__iter__()
|
||||
# We iterate one batch ahead to check when we are at the end
|
||||
try:
|
||||
current_batch = next(dataloader_iter)
|
||||
except StopIteration:
|
||||
yield
|
||||
|
||||
batch_index = 0
|
||||
while True:
|
||||
try:
|
||||
# But we still move it to the device so it is done before `StopIteration` is reached
|
||||
if self.device is not None:
|
||||
current_batch = send_to_device(current_batch, self.device)
|
||||
next_batch = next(dataloader_iter)
|
||||
if batch_index >= self.skip_batches:
|
||||
yield current_batch
|
||||
batch_index += 1
|
||||
yield current_batch
|
||||
current_batch = next_batch
|
||||
except StopIteration:
|
||||
self.gradient_state._remove_dataloader(self)
|
||||
if batch_index >= self.skip_batches:
|
||||
yield current_batch
|
||||
self.gradient_state._set_end_of_dataloader(True)
|
||||
yield current_batch
|
||||
break
|
||||
|
||||
@property
|
||||
@ -401,13 +394,13 @@ class DataLoaderShard(DataLoader):
|
||||
batch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler
|
||||
return (
|
||||
batch_sampler.batch_size
|
||||
if getattr(batch_sampler, "split_batches", False)
|
||||
else (batch_sampler.batch_size * getattr(batch_sampler, "num_processes", 1))
|
||||
if batch_sampler.split_batches
|
||||
else (batch_sampler.batch_size * batch_sampler.num_processes)
|
||||
)
|
||||
|
||||
@property
|
||||
def total_dataset_length(self):
|
||||
if hasattr(self.dataset, "total_length"):
|
||||
if hasattr("total_length", self.dataset):
|
||||
return self.dataset.total_length
|
||||
else:
|
||||
return len(self.dataset)
|
||||
@ -415,10 +408,9 @@ class DataLoaderShard(DataLoader):
|
||||
|
||||
class DataLoaderDispatcher(DataLoader):
|
||||
"""
|
||||
Args:
|
||||
Subclass of a PyTorch `DataLoader` that will iterate and preprocess on process 0 only, then dispatch on each
|
||||
process their part of the batch.
|
||||
|
||||
Args:
|
||||
split_batches (`bool`, *optional*, defaults to `False`):
|
||||
Whether the resulting `DataLoader` should split the batches of the original data loader across devices or
|
||||
yield full batches (in which case it will yield batches starting at the `process_index`-th and advancing of
|
||||
@ -426,8 +418,6 @@ class DataLoaderDispatcher(DataLoader):
|
||||
the same as the initial `dataloader` if this option is set to `True`, the batch size of the initial
|
||||
`dataloader` multiplied by `num_processes` otherwise. Setting this option to `True` requires that the batch
|
||||
size of the `dataloader` is a round multiple of `batch_size`.
|
||||
skip_batches (`int`, *optional*, defaults to 0):
|
||||
The number of batches to skip at the beginning of an iteration.
|
||||
|
||||
**Available attributes:**
|
||||
|
||||
@ -438,7 +428,7 @@ class DataLoaderDispatcher(DataLoader):
|
||||
- **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
|
||||
"""
|
||||
|
||||
def __init__(self, dataset, split_batches: bool = False, skip_batches=0, _drop_last: bool = False, **kwargs):
|
||||
def __init__(self, dataset, split_batches: bool = False, _drop_last: bool = False, **kwargs):
|
||||
shuffle = False
|
||||
if is_torch_version(">=", "1.11.0"):
|
||||
from torch.utils.data.datapipes.iter.combinatorics import ShufflerIterDataPipe
|
||||
@ -458,24 +448,21 @@ class DataLoaderDispatcher(DataLoader):
|
||||
self.gradient_state = GradientState()
|
||||
self.state = AcceleratorState()
|
||||
self._drop_last = _drop_last
|
||||
self.skip_batches = skip_batches
|
||||
# We can safely pass because the default is -1
|
||||
with suppress(Exception):
|
||||
try:
|
||||
length = getattr(self.dataset, "total_dataset_length", len(self.dataset))
|
||||
self.gradient_state._set_remainder(length % self.total_batch_size)
|
||||
except Exception:
|
||||
# We can safely pass because the default is -1
|
||||
pass
|
||||
|
||||
def _fetch_batches(self, iterator):
|
||||
batches, batch = None, None
|
||||
# On process 0, we gather the batch to dispatch.
|
||||
print("Starting to dispatch")
|
||||
if self.state.process_index == 0:
|
||||
print("In process zero")
|
||||
try:
|
||||
if self.split_batches:
|
||||
# One batch of the main iterator is dispatched and split.
|
||||
print("Getting next batch")
|
||||
batch = next(iterator)
|
||||
print(f'Batch: {batch}')
|
||||
else:
|
||||
# num_processes batches of the main iterator are concatenated then dispatched and split.
|
||||
# We add the batches one by one so we have the remainder available when drop_last=False.
|
||||
@ -486,18 +473,12 @@ class DataLoaderDispatcher(DataLoader):
|
||||
# In both cases, we need to get the structure of the batch that we will broadcast on other
|
||||
# processes to initialize the tensors with the right shape.
|
||||
# data_structure, stop_iteration
|
||||
print("getting batch info")
|
||||
batch_info = [get_data_structure(batch), False]
|
||||
print(f'Batch info: {batch_info}')
|
||||
except StopIteration:
|
||||
print("Hit stop iteration")
|
||||
batch_info = [None, True]
|
||||
else:
|
||||
batch_info = [None, self._stop_iteration]
|
||||
# This is inplace, so after this instruction, every process has the same `batch_info` as process 0.
|
||||
print(f'Batch info on process {AcceleratorState().process_index}: {batch_info}')
|
||||
from accelerate.utils import wait_for_everyone
|
||||
wait_for_everyone()
|
||||
broadcast_object_list(batch_info)
|
||||
self._stop_iteration = batch_info[1]
|
||||
if self._stop_iteration:
|
||||
@ -512,7 +493,7 @@ class DataLoaderDispatcher(DataLoader):
|
||||
return batch, batch_info
|
||||
|
||||
def __iter__(self):
|
||||
self.gradient_state._add_dataloader(self)
|
||||
self.gradient_state._set_end_of_dataloader(False)
|
||||
main_iterator = None
|
||||
if self.state.process_index == 0:
|
||||
# We only iterate through the DataLoader on process 0.
|
||||
@ -521,7 +502,6 @@ class DataLoaderDispatcher(DataLoader):
|
||||
self._stop_iteration = False
|
||||
first_batch = None
|
||||
next_batch, next_batch_info = self._fetch_batches(main_iterator)
|
||||
batch_index = 0
|
||||
while not stop_iteration:
|
||||
batch, batch_info = next_batch, next_batch_info
|
||||
|
||||
@ -558,11 +538,9 @@ class DataLoaderDispatcher(DataLoader):
|
||||
batch = slice_tensors(batch, data_slice)
|
||||
|
||||
if stop_iteration:
|
||||
self.gradient_state._remove_dataloader(self)
|
||||
self.gradient_state._set_remainder(observed_batch_size)
|
||||
if batch_index >= self.skip_batches:
|
||||
yield batch
|
||||
batch_index += 1
|
||||
self.gradient_state._set_end_of_dataloader(True)
|
||||
yield batch
|
||||
|
||||
def __len__(self):
|
||||
whole_length = super().__len__()
|
||||
@ -736,9 +714,7 @@ def prepare_data_loader(
|
||||
# Need to provide batch_size as batch_sampler is None for Iterable dataset
|
||||
if new_batch_sampler is None:
|
||||
kwargs["drop_last"] = dataloader.drop_last
|
||||
kwargs["batch_size"] = (
|
||||
dataloader.batch_size // num_processes if split_batches and not dispatch_batches else dataloader.batch_size
|
||||
)
|
||||
kwargs["batch_size"] = dataloader.batch_size // num_processes if split_batches else dataloader.batch_size
|
||||
|
||||
if dispatch_batches:
|
||||
kwargs.pop("generator")
|
||||
@ -772,118 +748,3 @@ def prepare_data_loader(
|
||||
if state.distributed_type == DistributedType.TPU:
|
||||
return MpDeviceLoaderWrapper(dataloader, device)
|
||||
return dataloader
|
||||
|
||||
|
||||
class SkipBatchSampler(BatchSampler):
|
||||
"""
|
||||
A `torch.utils.data.BatchSampler` that skips the first `n` batches of another `torch.utils.data.BatchSampler`.
|
||||
"""
|
||||
|
||||
def __init__(self, batch_sampler, skip_batches=0):
|
||||
self.batch_sampler = batch_sampler
|
||||
self.skip_batches = skip_batches
|
||||
|
||||
def __iter__(self):
|
||||
for index, samples in enumerate(self.batch_sampler):
|
||||
if index >= self.skip_batches:
|
||||
yield samples
|
||||
|
||||
@property
|
||||
def total_length(self):
|
||||
return len(self.batch_sampler)
|
||||
|
||||
def __len__(self):
|
||||
return len(self.batch_sampler) - self.skip_batches
|
||||
|
||||
|
||||
class SkipDataLoader(DataLoader):
|
||||
"""
|
||||
Subclass of a PyTorch `DataLoader` that will skip the first batches.
|
||||
|
||||
Args:
|
||||
dataset (`torch.utils.data.dataset.Dataset`):
|
||||
The dataset to use to build this datalaoder.
|
||||
skip_batches (`int`, *optional*, defaults to 0):
|
||||
The number of batches to skip at the beginning.
|
||||
kwargs:
|
||||
All other keyword arguments to pass to the regular `DataLoader` initialization.
|
||||
"""
|
||||
|
||||
def __init__(self, dataset, skip_batches=0, **kwargs):
|
||||
super().__init__(dataset, **kwargs)
|
||||
self.skip_batches = skip_batches
|
||||
|
||||
def __iter__(self):
|
||||
for index, batch in enumerate(super().__iter__()):
|
||||
if index >= self.skip_batches:
|
||||
yield batch
|
||||
|
||||
|
||||
def skip_first_batches(dataloader, num_batches=0):
|
||||
"""
|
||||
Creates a `torch.utils.data.DataLoader` that will efficiently skip the first `num_batches`.
|
||||
"""
|
||||
dataset = dataloader.dataset
|
||||
sampler_is_batch_sampler = False
|
||||
if isinstance(dataset, IterableDataset):
|
||||
new_batch_sampler = None
|
||||
else:
|
||||
sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler)
|
||||
batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler
|
||||
new_batch_sampler = SkipBatchSampler(batch_sampler, skip_batches=num_batches)
|
||||
|
||||
# We ignore all of those since they are all dealt with by our new_batch_sampler
|
||||
ignore_kwargs = [
|
||||
"batch_size",
|
||||
"shuffle",
|
||||
"sampler",
|
||||
"batch_sampler",
|
||||
"drop_last",
|
||||
]
|
||||
|
||||
kwargs = {
|
||||
k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k])
|
||||
for k in _PYTORCH_DATALOADER_KWARGS
|
||||
if k not in ignore_kwargs
|
||||
}
|
||||
|
||||
# Need to provide batch_size as batch_sampler is None for Iterable dataset
|
||||
if new_batch_sampler is None:
|
||||
kwargs["drop_last"] = dataloader.drop_last
|
||||
kwargs["batch_size"] = dataloader.batch_size
|
||||
|
||||
if isinstance(dataloader, DataLoaderDispatcher):
|
||||
if new_batch_sampler is None:
|
||||
# Need to manually skip batches in the dataloader
|
||||
kwargs["skip_batches"] = num_batches
|
||||
dataloader = DataLoaderDispatcher(
|
||||
dataset,
|
||||
split_batches=dataloader.split_batches,
|
||||
batch_sampler=new_batch_sampler,
|
||||
_drop_last=dataloader._drop_last,
|
||||
**kwargs,
|
||||
)
|
||||
elif isinstance(dataloader, DataLoaderShard):
|
||||
if new_batch_sampler is None:
|
||||
# Need to manually skip batches in the dataloader
|
||||
kwargs["skip_batches"] = num_batches
|
||||
elif sampler_is_batch_sampler:
|
||||
kwargs["sampler"] = new_batch_sampler
|
||||
kwargs["batch_size"] = dataloader.batch_size
|
||||
else:
|
||||
kwargs["batch_sampler"] = new_batch_sampler
|
||||
dataloader = DataLoaderShard(
|
||||
dataset,
|
||||
device=dataloader.device,
|
||||
rng_types=dataloader.rng_types,
|
||||
synchronized_generator=dataloader.synchronized_generator,
|
||||
**kwargs,
|
||||
)
|
||||
else:
|
||||
if new_batch_sampler is None:
|
||||
# Need to manually skip batches in the dataloader
|
||||
dataloader = SkipDataLoader(dataset, skip_batches=num_batches, **kwargs)
|
||||
else:
|
||||
dataloader = DataLoader(dataset, batch_sampler=new_batch_sampler, **kwargs)
|
||||
|
||||
return dataloader
|
||||
|
||||
@ -18,14 +18,7 @@ from typing import Dict, List, Mapping, Optional, Union
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
from .state import PartialState
|
||||
from .utils import (
|
||||
PrefixedDataset,
|
||||
find_device,
|
||||
named_module_tensors,
|
||||
send_to_device,
|
||||
set_module_tensor_to_device,
|
||||
)
|
||||
from .utils import PrefixedDataset, find_device, named_module_tensors, send_to_device, set_module_tensor_to_device
|
||||
|
||||
|
||||
class ModelHook:
|
||||
@ -128,10 +121,8 @@ def add_hook_to_module(module: nn.Module, hook: ModelHook, append: bool = False)
|
||||
</Tip>
|
||||
|
||||
Args:
|
||||
module (`torch.nn.Module`):
|
||||
The module to attach a hook to.
|
||||
hook (`ModelHook`):
|
||||
The hook to attach.
|
||||
module (`torch.nn.Module`): The module to attach a hook to.
|
||||
hook (`ModelHook`): The hook to attach.
|
||||
append (`bool`, *optional*, defaults to `False`):
|
||||
Whether the hook should be chained with an existing one (if module already contains a hook) or not.
|
||||
|
||||
@ -238,13 +229,6 @@ class AlignDevicesHook(ModelHook):
|
||||
self.param_original_devices = {}
|
||||
self.buffer_original_devices = {}
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
f"AlignDeviceHook(execution_device={self.execution_device}, offload={self.offload}, "
|
||||
f"io_same_device={self.io_same_device}, offload_buffers={self.offload_buffers}, "
|
||||
f"place_submodules={self.place_submodules})"
|
||||
)
|
||||
|
||||
def init_hook(self, module):
|
||||
if not self.offload and self.execution_device is not None:
|
||||
for name, _ in named_module_tensors(module, recurse=self.place_submodules):
|
||||
@ -468,7 +452,7 @@ def attach_align_device_hook_on_blocks(
|
||||
if not isinstance(offload, Mapping):
|
||||
offload = {key: offload for key in execution_device.keys()}
|
||||
|
||||
if module_name in execution_device and module_name in offload and not offload[module_name]:
|
||||
if module_name in execution_device and not offload[module_name]:
|
||||
hook = AlignDevicesHook(
|
||||
execution_device=execution_device[module_name],
|
||||
offload_buffers=offload_buffers,
|
||||
@ -477,7 +461,7 @@ def attach_align_device_hook_on_blocks(
|
||||
)
|
||||
add_hook_to_module(module, hook)
|
||||
attach_execution_device_hook(module, execution_device[module_name])
|
||||
elif module_name in execution_device and module_name in offload:
|
||||
elif module_name in execution_device:
|
||||
attach_align_device_hook(
|
||||
module,
|
||||
execution_device=execution_device[module_name],
|
||||
@ -494,7 +478,7 @@ def attach_align_device_hook_on_blocks(
|
||||
module, execution_device[module_name], preload_module_classes=preload_module_classes
|
||||
)
|
||||
elif module_name == "":
|
||||
hook = AlignDevicesHook(execution_device=execution_device.get(""), io_same_device=True)
|
||||
hook = AlignDevicesHook(io_same_device=True)
|
||||
add_hook_to_module(module, hook)
|
||||
|
||||
for child_name, child in module.named_children():
|
||||
@ -508,54 +492,3 @@ def attach_align_device_hook_on_blocks(
|
||||
module_name=child_name,
|
||||
preload_module_classes=preload_module_classes,
|
||||
)
|
||||
|
||||
|
||||
class CpuOffload(ModelHook):
|
||||
"""
|
||||
Offloads a model on the CPU until its forward pass is called. The model will not be offloaded back to the CPU after
|
||||
the forward, the user needs to call the `init_hook` method again for this.
|
||||
|
||||
Args:
|
||||
execution_device(`str`, `int` or `torch.device`, *optional*):
|
||||
The device on which the model should be executed. Will default to the MPS device if it's available, then
|
||||
GPU 0 if there is a GPU, and finally to the CPU.
|
||||
prev_module_hook (`UserCpuOffloadHook`, *optional*):
|
||||
The hook sent back by [`cpu_offload_with_hook`] for a previous model in the pipeline you are running. If
|
||||
passed, its offload method will be called just before the forward of the model to which this hook is
|
||||
attached.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
execution_device: Optional[Union[str, int, torch.device]] = None,
|
||||
prev_module_hook: Optional["UserCpuOffloadHook"] = None,
|
||||
):
|
||||
self.prev_module_hook = prev_module_hook
|
||||
|
||||
self.execution_device = execution_device if execution_device is not None else PartialState().default_device
|
||||
|
||||
def init_hook(self, module):
|
||||
return module.to("cpu")
|
||||
|
||||
def pre_forward(self, module, *args, **kwargs):
|
||||
if self.prev_module_hook is not None:
|
||||
self.prev_module_hook.offload()
|
||||
module.to(self.execution_device)
|
||||
return send_to_device(args, self.execution_device), send_to_device(kwargs, self.execution_device)
|
||||
|
||||
|
||||
class UserCpuOffloadHook:
|
||||
"""
|
||||
A simple hook grouping a model and a `ModelHook`, which provides easy APIs for to call the init method of the hook
|
||||
or remove it entirely.
|
||||
"""
|
||||
|
||||
def __init__(self, model, hook):
|
||||
self.model = model
|
||||
self.hook = hook
|
||||
|
||||
def offload(self):
|
||||
self.hook.init_hook(self.model)
|
||||
|
||||
def remove(self):
|
||||
remove_hook_from_module(self.model)
|
||||
|
||||
@ -15,25 +15,19 @@
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
import warnings
|
||||
|
||||
import torch
|
||||
|
||||
from .state import AcceleratorState
|
||||
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
|
||||
from .utils import PrecisionType, PrepareForLaunch, patch_environment
|
||||
|
||||
|
||||
def notebook_launcher(function, args=(), num_processes=None, mixed_precision="no", use_port="29500"):
|
||||
def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, mixed_precision="no", use_port="29500"):
|
||||
"""
|
||||
Launches a training function, using several processes if it's possible in the current environment (TPU with
|
||||
multiple cores for instance).
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
To use this function absolutely zero calls to a CUDA device must be made in the notebook session before calling. If
|
||||
any have been made, you will need to restart the notebook and make sure no cells use any CUDA capability.
|
||||
|
||||
</Tip>
|
||||
|
||||
Args:
|
||||
function (`Callable`):
|
||||
The training function to execute. If it accepts arguments, the first argument should be the index of the
|
||||
@ -47,21 +41,6 @@ def notebook_launcher(function, args=(), num_processes=None, mixed_precision="no
|
||||
If `fp16` or `bf16`, will use mixed precision training on multi-GPU.
|
||||
use_port (`str`, *optional*, defaults to `"29500"`):
|
||||
The port to use to communicate between processes when launching a multi-GPU training.
|
||||
|
||||
Example:
|
||||
|
||||
```python
|
||||
# Assume this is defined in a Jupyter Notebook on an instance with two GPUs
|
||||
from accelerate import notebook_launcher
|
||||
|
||||
|
||||
def train(*args):
|
||||
# Your training function here
|
||||
...
|
||||
|
||||
|
||||
notebook_launcher(train, args=(arg1, arg2), num_processes=2, mixed_precision="fp16")
|
||||
```
|
||||
"""
|
||||
# Are we in a google colab or a Kaggle Kernel?
|
||||
in_colab = False
|
||||
@ -110,7 +89,6 @@ def notebook_launcher(function, args=(), num_processes=None, mixed_precision="no
|
||||
if num_processes > 1:
|
||||
# Multi-GPU launch
|
||||
from torch.multiprocessing import start_processes
|
||||
from torch.multiprocessing.spawn import ProcessRaisedException
|
||||
|
||||
if len(AcceleratorState._shared_state) > 0:
|
||||
raise ValueError(
|
||||
@ -126,34 +104,35 @@ def notebook_launcher(function, args=(), num_processes=None, mixed_precision="no
|
||||
"function."
|
||||
)
|
||||
|
||||
if use_fp16:
|
||||
warnings.warn(
|
||||
"`fp16=True` is deprecated and will be removed in version 0.15.0 of 🤗 Accelerate. Use `mixed_precision='fp16'` instead.",
|
||||
FutureWarning,
|
||||
)
|
||||
mixed_precision = "fp16"
|
||||
|
||||
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
|
||||
# process here (the other ones will be set be the launcher).
|
||||
with patch_environment(
|
||||
world_size=num_processes, master_addr="127.0.01", master_port=use_port, mixed_precision=mixed_precision
|
||||
):
|
||||
launcher = PrepareForLaunch(function, distributed_type="MULTI_GPU")
|
||||
|
||||
print(f"Launching training on {num_processes} GPUs.")
|
||||
try:
|
||||
start_processes(launcher, args=args, nprocs=num_processes, start_method="fork")
|
||||
except ProcessRaisedException as e:
|
||||
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
|
||||
raise RuntimeError(
|
||||
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
|
||||
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
|
||||
"Please review your imports and test them when running the `notebook_launcher()` to identify "
|
||||
"which one is problematic."
|
||||
) from e
|
||||
start_processes(launcher, args=args, nprocs=num_processes, start_method="fork")
|
||||
|
||||
else:
|
||||
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
|
||||
if is_mps_available():
|
||||
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
|
||||
use_mps_device = "false"
|
||||
if torch.backends.mps.is_available():
|
||||
print("Launching training on MPS.")
|
||||
use_mps_device = "true"
|
||||
elif torch.cuda.is_available():
|
||||
print("Launching training on one GPU.")
|
||||
else:
|
||||
print("Launching training on CPU.")
|
||||
function(*args)
|
||||
with patch_environment(use_mps_device=use_mps_device):
|
||||
function(*args)
|
||||
|
||||
|
||||
def debug_launcher(function, args=(), num_processes=2):
|
||||
@ -184,9 +163,9 @@ def debug_launcher(function, args=(), num_processes=2):
|
||||
world_size=num_processes,
|
||||
master_addr="127.0.01",
|
||||
master_port="29500",
|
||||
accelerate_mixed_precision="no",
|
||||
mixed_precision="no",
|
||||
accelerate_debug_rdv_file=tmp_file.name,
|
||||
accelerate_use_cpu="yes",
|
||||
use_cpu="yes",
|
||||
):
|
||||
launcher = PrepareForLaunch(function, debug=True)
|
||||
start_processes(launcher, args=args, nprocs=num_processes, start_method="fork")
|
||||
|
||||
@ -13,9 +13,9 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import os
|
||||
|
||||
from .state import PartialState
|
||||
from .state import AcceleratorState
|
||||
from .utils import DistributedType
|
||||
|
||||
|
||||
class MultiProcessAdapter(logging.LoggerAdapter):
|
||||
@ -24,15 +24,17 @@ class MultiProcessAdapter(logging.LoggerAdapter):
|
||||
|
||||
`log` takes in an additional `main_process_only` kwarg, which dictates whether it should be called on all processes
|
||||
or only the main executed one. Default is `main_process_only=True`.
|
||||
|
||||
Does not require an `Accelerator` object to be created first.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def _should_log(main_process_only):
|
||||
"Check if log should be performed"
|
||||
state = PartialState()
|
||||
return not main_process_only or (main_process_only and state.is_main_process)
|
||||
state = AcceleratorState()
|
||||
if state.distributed_type != DistributedType.MEGATRON_LM:
|
||||
process_index_flag = state.local_process_index == 0
|
||||
else:
|
||||
process_index_flag = state.process_index == state.num_processes - 1
|
||||
return not main_process_only or (main_process_only and process_index_flag)
|
||||
|
||||
def log(self, level, msg, *args, **kwargs):
|
||||
"""
|
||||
@ -47,7 +49,7 @@ class MultiProcessAdapter(logging.LoggerAdapter):
|
||||
self.logger.log(level, msg, *args, **kwargs)
|
||||
|
||||
|
||||
def get_logger(name: str, log_level: str = None):
|
||||
def get_logger(name: str):
|
||||
"""
|
||||
Returns a `logging.Logger` for `name` that can handle multiprocessing.
|
||||
|
||||
@ -56,8 +58,6 @@ def get_logger(name: str, log_level: str = None):
|
||||
Args:
|
||||
name (`str`):
|
||||
The name for the logger, such as `__file__`
|
||||
log_level (`str`, *optional*):
|
||||
The log level to use. If not passed, will default to the `LOG_LEVEL` environment variable, or `INFO` if not
|
||||
|
||||
Example:
|
||||
|
||||
@ -68,15 +68,7 @@ def get_logger(name: str, log_level: str = None):
|
||||
|
||||
>>> logger.info("My log", main_process_only=False)
|
||||
>>> logger.debug("My log", main_process_only=True)
|
||||
|
||||
>>> logger = get_logger(__name__, log_level="DEBUG")
|
||||
>>> logger.info("My log")
|
||||
>>> logger.debug("My second log")
|
||||
```
|
||||
"""
|
||||
if log_level is None:
|
||||
log_level = os.environ.get("ACCELERATE_LOG_LEVEL", None)
|
||||
logger = logging.getLogger(name)
|
||||
if log_level is not None:
|
||||
logger.setLevel(log_level.upper())
|
||||
return MultiProcessAdapter(logger, {})
|
||||
|
||||
@ -12,6 +12,11 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# flake8: noqa
|
||||
# There's no way to ignore "F401 '...' imported but unused" warnings in this
|
||||
# module, but to preserve other warnings. So, don't check this module at all
|
||||
|
||||
|
||||
import warnings
|
||||
|
||||
|
||||
@ -20,3 +25,5 @@ warnings.warn(
|
||||
"`from accelerate import find_executable_batch_size` to avoid this warning.",
|
||||
FutureWarning,
|
||||
)
|
||||
|
||||
from .utils.memory import find_executable_batch_size
|
||||
|
||||
@ -16,7 +16,7 @@
|
||||
|
||||
import warnings
|
||||
|
||||
from .state import AcceleratorState, GradientState
|
||||
from .state import AcceleratorState
|
||||
|
||||
|
||||
warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler")
|
||||
@ -49,7 +49,6 @@ class AcceleratedScheduler:
|
||||
self.optimizers = optimizers if isinstance(optimizers, (list, tuple)) else [optimizers]
|
||||
self.split_batches = split_batches
|
||||
self.step_with_optimizer = step_with_optimizer
|
||||
self.gradient_state = GradientState()
|
||||
|
||||
def step(self, *args, **kwargs):
|
||||
if not self.step_with_optimizer:
|
||||
@ -58,11 +57,6 @@ class AcceleratedScheduler:
|
||||
return
|
||||
|
||||
# Otherwise, first make sure the optimizer was stepped.
|
||||
if not self.gradient_state.sync_gradients:
|
||||
if self.gradient_state.adjust_scheduler:
|
||||
self.scheduler._step_count += 1
|
||||
return
|
||||
|
||||
for opt in self.optimizers:
|
||||
if opt.step_was_skipped:
|
||||
return
|
||||
@ -75,9 +69,8 @@ class AcceleratedScheduler:
|
||||
num_processes = AcceleratorState().num_processes
|
||||
for _ in range(num_processes):
|
||||
# Special case when using OneCycle and `drop_last` was not used
|
||||
if hasattr(self.scheduler, "total_steps"):
|
||||
if self.scheduler._step_count <= self.scheduler.total_steps:
|
||||
self.scheduler.step(*args, **kwargs)
|
||||
if hasattr(self.scheduler, "total_steps") and self.scheduler._step_count <= self.scheduler.total_steps:
|
||||
self.scheduler.step(*args, **kwargs)
|
||||
else:
|
||||
self.scheduler.step(*args, **kwargs)
|
||||
|
||||
|
||||
@ -14,22 +14,15 @@
|
||||
|
||||
import os
|
||||
import warnings
|
||||
from contextlib import contextmanager
|
||||
from functools import partial
|
||||
from typing import Any, Callable, Optional
|
||||
|
||||
import torch
|
||||
|
||||
from .utils import (
|
||||
DistributedType,
|
||||
DynamoBackend,
|
||||
GradientAccumulationPlugin,
|
||||
get_ccl_version,
|
||||
get_int_from_env,
|
||||
is_ccl_available,
|
||||
is_deepspeed_available,
|
||||
is_fp8_available,
|
||||
is_mps_available,
|
||||
is_tpu_available,
|
||||
parse_choice_from_env,
|
||||
parse_flag_from_env,
|
||||
@ -41,25 +34,10 @@ if is_tpu_available(check_device=False):
|
||||
import torch_xla.core.xla_model as xm
|
||||
|
||||
|
||||
def is_initialized() -> bool:
|
||||
"""
|
||||
Checks if the `AcceleratorState` has been initialized from `Accelerator`. Same as `AcceleratorState.initialized`,
|
||||
but works as a module method.
|
||||
"""
|
||||
return AcceleratorState._shared_state != {}
|
||||
|
||||
|
||||
# Lambda function that does nothing
|
||||
def do_nothing(*args, **kwargs):
|
||||
return None
|
||||
|
||||
|
||||
# Inspired by Alex Martelli's 'Borg'.
|
||||
class PartialState:
|
||||
class AcceleratorState:
|
||||
"""
|
||||
Singleton class that has information about the current training environment and functions to help with process
|
||||
control. Designed to be used when only process control and device execution states are needed. Does *not* need to
|
||||
be initialized from `Accelerator`.
|
||||
Singleton class that has information about the current training environment.
|
||||
|
||||
**Available attributes:**
|
||||
|
||||
@ -71,26 +49,42 @@ class PartialState:
|
||||
of mixed precision being performed.
|
||||
- **num_processes** (`int`) -- The number of processes currently launched in parallel.
|
||||
- **process_index** (`int`) -- The index of the current process.
|
||||
- **is_last_process** (`bool`) -- Whether or not the current process is the last one.
|
||||
- **is_main_process** (`bool`) -- Whether or not the current process is the main one.
|
||||
- **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node.
|
||||
"""
|
||||
|
||||
_shared_state = {}
|
||||
|
||||
def __init__(self, cpu: bool = False, **kwargs):
|
||||
def __init__(
|
||||
self,
|
||||
mixed_precision: str = None,
|
||||
cpu: bool = False,
|
||||
deepspeed_plugin=None,
|
||||
fsdp_plugin=None,
|
||||
megatron_lm_plugin=None,
|
||||
_from_accelerator: bool = False,
|
||||
**kwargs,
|
||||
):
|
||||
self.__dict__ = self._shared_state
|
||||
if not self.initialized:
|
||||
self._cpu = cpu
|
||||
if parse_flag_from_env("USE_CPU"):
|
||||
cpu = True
|
||||
self._check_initialized(mixed_precision, cpu)
|
||||
self.fork_launched = parse_flag_from_env("FORK_LAUNCHED", 0)
|
||||
if not getattr(self, "initialized", False):
|
||||
self.backend = None
|
||||
env_device = os.environ.get("ACCELERATE_TORCH_DEVICE", None)
|
||||
self.device = torch.device(env_device) if env_device is not None else None
|
||||
self.deepspeed_plugin = None
|
||||
mixed_precision = (
|
||||
parse_choice_from_env("MIXED_PRECISION", "no") if mixed_precision is None else mixed_precision.lower()
|
||||
)
|
||||
if not _from_accelerator:
|
||||
raise ValueError(
|
||||
"Please make sure to properly initialize your accelerator via `accelerator = Accelerator()` "
|
||||
"before using any functionality from the `accelerate` library."
|
||||
)
|
||||
if (
|
||||
os.environ.get("ACCELERATE_USE_SAGEMAKER", "false") == "true"
|
||||
and os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") != SageMakerDistributedType.NO
|
||||
os.environ.get("USE_SAGEMAKER", "false") == "true"
|
||||
and os.environ.get("SAGEMAKER_DISTRIBUTED_TYPE") != SageMakerDistributedType.NO
|
||||
and not cpu
|
||||
):
|
||||
if os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") == SageMakerDistributedType.DATA_PARALLEL:
|
||||
if os.environ.get("SAGEMAKER_DISTRIBUTED_TYPE") == SageMakerDistributedType.DATA_PARALLEL:
|
||||
self.distributed_type = DistributedType.MULTI_GPU
|
||||
import smdistributed.dataparallel.torch.torch_smddp # noqa
|
||||
|
||||
@ -100,50 +94,68 @@ class PartialState:
|
||||
self.num_processes = torch.distributed.get_world_size()
|
||||
self.process_index = torch.distributed.get_rank()
|
||||
self.local_process_index = int(os.environ.get("LOCAL_RANK", -1))
|
||||
if self.device is None:
|
||||
self.device = torch.device("cuda", self.local_process_index)
|
||||
self.device = torch.device("cuda", self.local_process_index)
|
||||
torch.cuda.set_device(self.device)
|
||||
self.mixed_precision = mixed_precision
|
||||
elif is_tpu_available() and not cpu:
|
||||
self.distributed_type = DistributedType.TPU
|
||||
self.num_processes = xm.xrt_world_size()
|
||||
self.process_index = xm.get_ordinal()
|
||||
self.local_process_index = xm.get_local_ordinal()
|
||||
self.device = xm.xla_device()
|
||||
elif (
|
||||
os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true"
|
||||
and int(os.environ.get("LOCAL_RANK", -1)) != -1
|
||||
and not cpu
|
||||
):
|
||||
if mixed_precision == "bf16":
|
||||
if os.environ.get("DOWNCAST_BF16"):
|
||||
os.environ["XLA_USE_BF16"] = str(0)
|
||||
os.environ["XLA_DOWNCAST_BF16"] = str(1)
|
||||
self.downcast_bfloat = True
|
||||
else:
|
||||
os.environ["XLA_USE_BF16"] = str(1)
|
||||
os.environ["XLA_DOWNCAST_BF16"] = str(0)
|
||||
self.downcast_bfloat = False
|
||||
self.mixed_precision = mixed_precision
|
||||
elif os.environ.get("USE_DEEPSPEED", "false") == "true" and not cpu:
|
||||
assert (
|
||||
is_deepspeed_available()
|
||||
), "DeepSpeed is not available => install it using `pip3 install deepspeed` or build it from source"
|
||||
self.distributed_type = DistributedType.DEEPSPEED
|
||||
if not torch.distributed.is_initialized():
|
||||
from deepspeed import comm as dist
|
||||
from .utils import compare_versions
|
||||
|
||||
# DeepSpeed always uses nccl
|
||||
kwargs.pop("backend", None)
|
||||
self.backend = "nccl"
|
||||
dist.init_distributed(dist_backend=self.backend, auto_mpi_discovery=False, **kwargs)
|
||||
if compare_versions("deepspeed", ">", "0.6.5"):
|
||||
from deepspeed import comm as dist
|
||||
|
||||
dist.init_distributed(dist_backend=self.backend)
|
||||
else:
|
||||
torch.distributed.init_process_group(backend="nccl", **kwargs)
|
||||
|
||||
self.num_processes = torch.distributed.get_world_size()
|
||||
self.process_index = torch.distributed.get_rank()
|
||||
self.local_process_index = int(os.environ.get("LOCAL_RANK", -1))
|
||||
if self.device is None:
|
||||
self.device = torch.device("cuda", self.local_process_index)
|
||||
self.device = torch.device("cuda", self.local_process_index)
|
||||
torch.cuda.set_device(self.device)
|
||||
self._mixed_precision = "no" # deepspeed handles mixed_precision using deepspeed_config
|
||||
self.mixed_precision = "no" # deepspeed handles mixed_precision using deepspeed_config
|
||||
self.deepspeed_plugin = deepspeed_plugin
|
||||
elif int(os.environ.get("LOCAL_RANK", -1)) != -1 and not cpu:
|
||||
self.distributed_type = DistributedType.MULTI_GPU
|
||||
if not torch.distributed.is_initialized():
|
||||
self.backend = kwargs.pop("backend", "nccl")
|
||||
torch.distributed.init_process_group(backend=self.backend, **kwargs)
|
||||
torch.distributed.init_process_group(backend="nccl", **kwargs)
|
||||
self.backend = "nccl"
|
||||
self.num_processes = torch.distributed.get_world_size()
|
||||
self.process_index = torch.distributed.get_rank()
|
||||
self.local_process_index = int(os.environ.get("LOCAL_RANK", -1))
|
||||
if self.device is None:
|
||||
self.device = torch.device("cuda", self.local_process_index)
|
||||
self.device = torch.device("cuda", self.local_process_index)
|
||||
torch.cuda.set_device(self.device)
|
||||
self.mixed_precision = mixed_precision
|
||||
if os.environ.get("USE_FSDP", "false") == "true":
|
||||
self.distributed_type = DistributedType.FSDP
|
||||
if self.mixed_precision != "no":
|
||||
fsdp_plugin.set_mixed_precision(self.mixed_precision)
|
||||
self.fsdp_plugin = fsdp_plugin
|
||||
if os.environ.get("USE_MEGATRON_LM", "false") == "true":
|
||||
self.distributed_type = DistributedType.MEGATRON_LM
|
||||
megatron_lm_plugin.set_mixed_precision(self.mixed_precision)
|
||||
self.megatron_lm_plugin = megatron_lm_plugin
|
||||
elif get_int_from_env(["PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE", "WORLD_SIZE"], 1) > 1:
|
||||
self.distributed_type = DistributedType.MULTI_CPU
|
||||
if is_ccl_available() and get_int_from_env(["CCL_WORKER_COUNT"], 0) > 0:
|
||||
@ -178,528 +190,82 @@ class PartialState:
|
||||
"please try exporting rank 0's hostname as MASTER_ADDR"
|
||||
)
|
||||
if not torch.distributed.is_initialized():
|
||||
# Backend is not set by the user, we set it here
|
||||
kwargs.pop("nccl_backend", None)
|
||||
torch.distributed.init_process_group(backend, rank=rank, world_size=size, **kwargs)
|
||||
self.backend = backend
|
||||
torch.distributed.init_process_group(self.backend, rank=rank, world_size=size, **kwargs)
|
||||
self.num_processes = torch.distributed.get_world_size()
|
||||
self.process_index = torch.distributed.get_rank()
|
||||
self.local_process_index = local_rank
|
||||
if self.device is None:
|
||||
self.device = torch.device("cpu")
|
||||
self.device = torch.device("cpu")
|
||||
self.mixed_precision = mixed_precision
|
||||
else:
|
||||
self.distributed_type = DistributedType.NO
|
||||
self.num_processes = 1
|
||||
self.process_index = self.local_process_index = 0
|
||||
if parse_flag_from_env("USE_MPS_DEVICE") and not cpu:
|
||||
if not torch.backends.mps.is_available():
|
||||
if not torch.backends.mps.is_built():
|
||||
raise AssertionError(
|
||||
"MPS not available because the current PyTorch install was not "
|
||||
"built with MPS enabled. Please install torch version >=1.12.0 on "
|
||||
"your Apple silicon Mac running macOS 12.3 or later with a native "
|
||||
"version (arm64) of Python"
|
||||
)
|
||||
else:
|
||||
raise AssertionError(
|
||||
"MPS not available because the current MacOS version is not 12.3+ "
|
||||
"and/or you do not have an MPS-enabled device on this machine."
|
||||
)
|
||||
else:
|
||||
from .utils import is_torch_version
|
||||
|
||||
if self.device is None:
|
||||
self.device = torch.device("cpu") if cpu else self.default_device
|
||||
self.fork_launched = parse_flag_from_env("FORK_LAUNCHED", 0)
|
||||
if not is_torch_version(">", "1.12.0"):
|
||||
warnings.warn(
|
||||
"We strongly recommend to install PyTorch >= 1.13 (nightly version at the time of writing) on your MacOS machine. "
|
||||
"It has major fixes related to model correctness and performance improvements for transformer based models. "
|
||||
"Please refer to https://github.com/pytorch/pytorch/issues/82707 for more details."
|
||||
)
|
||||
self.device = torch.device("mps")
|
||||
elif cpu or not torch.cuda.is_available():
|
||||
self.device = torch.device("cpu")
|
||||
else:
|
||||
self.device = torch.device("cuda")
|
||||
self.mixed_precision = mixed_precision
|
||||
self.initialized = True
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return (
|
||||
def __repr__(self):
|
||||
mixed_precision = self.mixed_precision
|
||||
|
||||
repr = (
|
||||
f"Distributed environment: {self.distributed_type}{(' Backend: ' + self.backend) if self.backend else ''}\n"
|
||||
f"Num processes: {self.num_processes}\n"
|
||||
f"Process index: {self.process_index}\n"
|
||||
f"Local process index: {self.local_process_index}\n"
|
||||
f"Device: {self.device}\n"
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _reset_state():
|
||||
"Resets `_shared_state`, is used internally and should not be called"
|
||||
PartialState._shared_state = {}
|
||||
|
||||
@property
|
||||
def initialized(self) -> bool:
|
||||
"Returns whether the `PartialState` has been initialized"
|
||||
return self._shared_state != {}
|
||||
|
||||
@property
|
||||
def use_distributed(self):
|
||||
"""
|
||||
Whether the Accelerator is configured for distributed training
|
||||
"""
|
||||
return self.distributed_type != DistributedType.NO and self.num_processes > 1
|
||||
|
||||
@property
|
||||
def is_last_process(self) -> bool:
|
||||
"Returns whether the current process is the last one"
|
||||
return self.process_index == self.num_processes - 1
|
||||
|
||||
@property
|
||||
def is_main_process(self) -> bool:
|
||||
"Returns whether the current process is the main process"
|
||||
return (
|
||||
self.process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process
|
||||
)
|
||||
|
||||
@property
|
||||
def is_local_main_process(self) -> bool:
|
||||
"Returns whether the current process is the main process on the local node"
|
||||
return (
|
||||
self.local_process_index == 0
|
||||
if self.distributed_type != DistributedType.MEGATRON_LM
|
||||
else self.is_last_process
|
||||
)
|
||||
|
||||
def wait_for_everyone(self):
|
||||
"""
|
||||
Will stop the execution of the current process until every other process has reached that point (so this does
|
||||
nothing when the script is only run in one process). Useful to do before saving a model.
|
||||
|
||||
Example:
|
||||
|
||||
```python
|
||||
>>> # Assuming two GPU processes
|
||||
>>> import time
|
||||
>>> from accelerate.state import PartialState
|
||||
|
||||
>>> state = PartialState()
|
||||
>>> if state.is_main_process:
|
||||
... time.sleep(2)
|
||||
>>> else:
|
||||
... print("I'm waiting for the main process to finish its sleep...")
|
||||
>>> state.wait_for_everyone()
|
||||
>>> # Should print on every process at the same time
|
||||
>>> print("Everyone is here")
|
||||
```
|
||||
"""
|
||||
if self.distributed_type in (
|
||||
DistributedType.MULTI_GPU,
|
||||
DistributedType.MULTI_CPU,
|
||||
DistributedType.DEEPSPEED,
|
||||
DistributedType.FSDP,
|
||||
):
|
||||
torch.distributed.barrier()
|
||||
elif self.distributed_type == DistributedType.TPU:
|
||||
xm.rendezvous("accelerate.utils.wait_for_everyone")
|
||||
|
||||
def _goes_first(self, is_main: bool):
|
||||
if not is_main:
|
||||
self.wait_for_everyone()
|
||||
|
||||
yield
|
||||
|
||||
if is_main:
|
||||
self.wait_for_everyone()
|
||||
|
||||
@contextmanager
|
||||
def main_process_first(self):
|
||||
"""
|
||||
Lets the main process go first inside a with block.
|
||||
|
||||
The other processes will enter the with block after the main process exits.
|
||||
|
||||
Example:
|
||||
|
||||
```python
|
||||
>>> from accelerate import Accelerator
|
||||
|
||||
>>> accelerator = Accelerator()
|
||||
>>> with accelerator.main_process_first():
|
||||
... # This will be printed first by process 0 then in a seemingly
|
||||
... # random order by the other processes.
|
||||
... print(f"This will be printed by process {accelerator.process_index}")
|
||||
```
|
||||
"""
|
||||
yield from self._goes_first(self.is_main_process)
|
||||
|
||||
@contextmanager
|
||||
def local_main_process_first(self):
|
||||
"""
|
||||
Lets the local main process go inside a with block.
|
||||
|
||||
The other processes will enter the with block after the main process exits.
|
||||
|
||||
Example:
|
||||
|
||||
```python
|
||||
>>> from accelerate.state import PartialState
|
||||
|
||||
>>> state = PartialState()
|
||||
>>> with state.local_main_process_first():
|
||||
... # This will be printed first by local process 0 then in a seemingly
|
||||
... # random order by the other processes.
|
||||
... print(f"This will be printed by process {state.local_process_index}")
|
||||
```
|
||||
"""
|
||||
yield from self._goes_first(self.is_local_main_process)
|
||||
|
||||
def on_main_process(self, function: Callable[..., Any] = None):
|
||||
"""
|
||||
Decorator that only runs the decorated function on the main process.
|
||||
|
||||
Args:
|
||||
function (`Callable`): The function to decorate.
|
||||
|
||||
Example:
|
||||
|
||||
```python
|
||||
>>> from accelerate.state import PartialState
|
||||
|
||||
>>> state = PartialState()
|
||||
|
||||
|
||||
>>> @state.on_main_process
|
||||
... def print_something():
|
||||
... print("This will be printed by process 0 only.")
|
||||
|
||||
|
||||
>>> print_something()
|
||||
"This will be printed by process 0 only"
|
||||
```
|
||||
"""
|
||||
if not self.initialized:
|
||||
raise ValueError("The `PartialState` or `Accelerator` must be initialized before calling this function.")
|
||||
if self.is_main_process or not self.use_distributed:
|
||||
return function
|
||||
return do_nothing
|
||||
|
||||
def on_local_main_process(self, function: Callable[..., Any] = None):
|
||||
"""
|
||||
Decorator that only runs the decorated function on the local main process.
|
||||
|
||||
Args:
|
||||
function (`Callable`): The function to decorate.
|
||||
|
||||
Example:
|
||||
```python
|
||||
# Assume we have 2 servers with 4 processes each.
|
||||
from accelerate.state import PartialState
|
||||
|
||||
state = PartialState()
|
||||
|
||||
|
||||
@state.on_local_main_process
|
||||
def print_something():
|
||||
print("This will be printed by process 0 only on each server.")
|
||||
|
||||
|
||||
print_something()
|
||||
# On server 1:
|
||||
"This will be printed by process 0 only"
|
||||
# On server 2:
|
||||
"This will be printed by process 0 only"
|
||||
```
|
||||
"""
|
||||
if self.is_local_main_process or not self.use_distributed:
|
||||
return function
|
||||
return do_nothing
|
||||
|
||||
def on_last_process(self, function: Callable[..., Any]):
|
||||
"""
|
||||
Decorator that only runs the decorated function on the last process.
|
||||
|
||||
Args:
|
||||
function (`Callable`): The function to decorate.
|
||||
|
||||
Example:
|
||||
```python
|
||||
# Assume we have 4 processes.
|
||||
from accelerate.state import PartialState
|
||||
|
||||
state = PartialState()
|
||||
|
||||
|
||||
@state.on_last_process
|
||||
def print_something():
|
||||
print(f"Printed on process {state.process_index}")
|
||||
|
||||
|
||||
print_something()
|
||||
"Printed on process 3"
|
||||
```
|
||||
"""
|
||||
if self.is_last_process or not self.use_distributed:
|
||||
return function
|
||||
return do_nothing
|
||||
|
||||
def on_process(self, function: Callable[..., Any] = None, process_index: int = None):
|
||||
"""
|
||||
Decorator that only runs the decorated function on the process with the given index.
|
||||
|
||||
Args:
|
||||
function (`Callable`, `optional`):
|
||||
The function to decorate.
|
||||
process_index (`int`, `optional`):
|
||||
The index of the process on which to run the function.
|
||||
|
||||
Example:
|
||||
```python
|
||||
# Assume we have 4 processes.
|
||||
from accelerate.state import PartialState
|
||||
|
||||
state = PartialState()
|
||||
|
||||
|
||||
@state.on_process(process_index=2)
|
||||
def print_something():
|
||||
print(f"Printed on process {state.process_index}")
|
||||
|
||||
|
||||
print_something()
|
||||
"Printed on process 2"
|
||||
```
|
||||
"""
|
||||
if function is None:
|
||||
return partial(self.on_process, process_index=process_index)
|
||||
if (self.process_index == process_index) or (not self.use_distributed):
|
||||
return function
|
||||
return do_nothing
|
||||
|
||||
def on_local_process(self, function: Callable[..., Any] = None, local_process_index: int = None):
|
||||
"""
|
||||
Decorator that only runs the decorated function on the process with the given index on the current node.
|
||||
|
||||
Args:
|
||||
function (`Callable`, *optional*):
|
||||
The function to decorate.
|
||||
local_process_index (`int`, *optional*):
|
||||
The index of the local process on which to run the function.
|
||||
|
||||
Example:
|
||||
```python
|
||||
# Assume we have 2 servers with 4 processes each.
|
||||
from accelerate import Accelerator
|
||||
|
||||
accelerator = Accelerator()
|
||||
|
||||
|
||||
@accelerator.on_local_process(local_process_index=2)
|
||||
def print_something():
|
||||
print(f"Printed on process {accelerator.local_process_index}")
|
||||
|
||||
|
||||
print_something()
|
||||
# On server 1:
|
||||
"Printed on process 2"
|
||||
# On server 2:
|
||||
"Printed on process 2"
|
||||
```
|
||||
"""
|
||||
if function is None:
|
||||
return partial(self.on_local_process, local_process_index=local_process_index)
|
||||
if (self.local_process_index == local_process_index) or (not self.use_distributed):
|
||||
return function
|
||||
return do_nothing
|
||||
|
||||
def print(self, *args, **kwargs):
|
||||
if self.is_local_main_process:
|
||||
print(*args, **kwargs)
|
||||
|
||||
@property
|
||||
def default_device(self) -> torch.device:
|
||||
"""
|
||||
Returns the default device which is:
|
||||
- MPS if `torch.backends.mps.is_available()` and `torch.backends.mps.is_built()` both return True.
|
||||
- CUDA if `torch.cuda.is_available()`
|
||||
- CPU otherwise
|
||||
"""
|
||||
if is_mps_available():
|
||||
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
|
||||
return torch.device("mps")
|
||||
elif torch.cuda.is_available():
|
||||
return torch.device("cuda")
|
||||
else:
|
||||
return torch.device("cpu")
|
||||
|
||||
|
||||
class AcceleratorState:
|
||||
"""
|
||||
Singleton class that has information about the current training environment.
|
||||
|
||||
**Available attributes:**
|
||||
|
||||
- **device** (`torch.device`) -- The device to use.
|
||||
- **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently
|
||||
in use.
|
||||
- **initialized** (`bool`) -- Whether or not the `AcceleratorState` has been initialized from `Accelerator`.
|
||||
- **local_process_index** (`int`) -- The index of the current process on the current server.
|
||||
- **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type
|
||||
of mixed precision being performed.
|
||||
- **num_processes** (`int`) -- The number of processes currently launched in parallel.
|
||||
- **process_index** (`int`) -- The index of the current process.
|
||||
- **is_last_process** (`bool`) -- Whether or not the current process is the last one.
|
||||
- **is_main_process** (`bool`) -- Whether or not the current process is the main one.
|
||||
- **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node.
|
||||
"""
|
||||
|
||||
_shared_state = {}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
mixed_precision: str = None,
|
||||
cpu: bool = False,
|
||||
dynamo_plugin=None,
|
||||
deepspeed_plugin=None,
|
||||
fsdp_plugin=None,
|
||||
megatron_lm_plugin=None,
|
||||
ipex_plugin=None,
|
||||
_from_accelerator: bool = False,
|
||||
**kwargs,
|
||||
):
|
||||
self.__dict__ = self._shared_state
|
||||
if parse_flag_from_env("ACCELERATE_USE_CPU"):
|
||||
cpu = True
|
||||
if PartialState._shared_state == {}:
|
||||
PartialState(cpu, **kwargs)
|
||||
self.__dict__.update(PartialState._shared_state)
|
||||
self._check_initialized(mixed_precision, cpu)
|
||||
if not self.initialized:
|
||||
self.deepspeed_plugin = None
|
||||
self.ipex_plugin = None
|
||||
mixed_precision = (
|
||||
parse_choice_from_env("ACCELERATE_MIXED_PRECISION", "no")
|
||||
if mixed_precision is None
|
||||
else mixed_precision.lower()
|
||||
)
|
||||
if mixed_precision == "fp8" and not is_fp8_available():
|
||||
raise ValueError("Using `fp8` precision requires `transformer_engine` to be installed.")
|
||||
self.dynamo_plugin = dynamo_plugin
|
||||
if not _from_accelerator:
|
||||
raise ValueError(
|
||||
"Please make sure to properly initialize your accelerator via `accelerator = Accelerator()` "
|
||||
"before using any functionality from the `accelerate` library."
|
||||
)
|
||||
# deepspeed handles mixed_precision using deepspeed_config
|
||||
self._mixed_precision = "no" if self.distributed_type == DistributedType.DEEPSPEED else mixed_precision
|
||||
if self.distributed_type == DistributedType.TPU:
|
||||
if mixed_precision == "bf16":
|
||||
if os.environ.get("ACCELERATE_DOWNCAST_BF16"):
|
||||
os.environ["XLA_USE_BF16"] = str(0)
|
||||
os.environ["XLA_DOWNCAST_BF16"] = str(1)
|
||||
self.downcast_bfloat = True
|
||||
else:
|
||||
os.environ["XLA_USE_BF16"] = str(1)
|
||||
os.environ["XLA_DOWNCAST_BF16"] = str(0)
|
||||
self.downcast_bfloat = False
|
||||
elif os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true" and not cpu:
|
||||
self.deepspeed_plugin = deepspeed_plugin
|
||||
elif self.distributed_type == DistributedType.MULTI_GPU:
|
||||
if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true":
|
||||
self.distributed_type = DistributedType.FSDP
|
||||
if self._mixed_precision != "no":
|
||||
fsdp_plugin.set_mixed_precision(self._mixed_precision)
|
||||
self.fsdp_plugin = fsdp_plugin
|
||||
if os.environ.get("ACCELERATE_USE_MEGATRON_LM", "false") == "true":
|
||||
self.distributed_type = DistributedType.MEGATRON_LM
|
||||
megatron_lm_plugin.set_mixed_precision(self._mixed_precision)
|
||||
self.megatron_lm_plugin = megatron_lm_plugin
|
||||
elif self.distributed_type in [DistributedType.MULTI_CPU, DistributedType.NO]:
|
||||
if self.device.type == "cpu" and ipex_plugin is not None:
|
||||
self.ipex_plugin = ipex_plugin if ipex_plugin.use_ipex else None
|
||||
if self.ipex_plugin is not None:
|
||||
self.ipex_plugin.set_mixed_precision(mixed_precision)
|
||||
if (
|
||||
self.dynamo_plugin.backend != DynamoBackend.NO
|
||||
and self._mixed_precision == "no"
|
||||
and self.device.type == "cuda"
|
||||
):
|
||||
torch.backends.cuda.matmul.allow_tf32 = True
|
||||
PartialState._shared_state["distributed_type"] = self.distributed_type
|
||||
|
||||
@property
|
||||
def initialized(self) -> bool:
|
||||
return self._shared_state != PartialState._shared_state
|
||||
|
||||
def __repr__(self):
|
||||
repr = PartialState().__repr__() + f"\nMixed precision type: {self.mixed_precision}\n"
|
||||
if self.distributed_type == DistributedType.DEEPSPEED:
|
||||
repr += f"ds_config: {self.deepspeed_plugin.deepspeed_config}\n"
|
||||
else:
|
||||
repr += f"Mixed precision type: {mixed_precision}\n"
|
||||
return repr
|
||||
|
||||
def _check_initialized(self, mixed_precision=None, cpu=None):
|
||||
"Checks if a modification is trying to be made and the `AcceleratorState` has already been initialized"
|
||||
if self.initialized:
|
||||
err = "AcceleratorState has already been initialized and cannot be changed, restart your runtime completely and pass `{flag}` to `Accelerator()`."
|
||||
if cpu and self.device.type != "cpu":
|
||||
raise ValueError(err.format(flag="cpu=True"))
|
||||
if (
|
||||
mixed_precision is not None
|
||||
and mixed_precision != self._mixed_precision
|
||||
and self.distributed_type != DistributedType.DEEPSPEED
|
||||
):
|
||||
raise ValueError(err.format(flag=f"mixed_precision='{mixed_precision}'"))
|
||||
|
||||
# For backward compatibility
|
||||
@property
|
||||
def use_fp16(self):
|
||||
warnings.warn(
|
||||
"The `use_fp16` property is deprecated and will be removed in version 1.0 of Accelerate use "
|
||||
"`AcceleratorState.mixed_precision == 'fp16'` instead.",
|
||||
FutureWarning,
|
||||
)
|
||||
return self._mixed_precision != "no"
|
||||
|
||||
@property
|
||||
def mixed_precision(self):
|
||||
if self.distributed_type == DistributedType.DEEPSPEED:
|
||||
config = self.deepspeed_plugin.deepspeed_config
|
||||
if config.get("fp16", {}).get("enabled", False):
|
||||
mixed_precision = "fp16"
|
||||
elif config.get("bf16", {}).get("enabled", False):
|
||||
mixed_precision = "bf16"
|
||||
else:
|
||||
mixed_precision = "no"
|
||||
else:
|
||||
mixed_precision = self._mixed_precision
|
||||
return mixed_precision
|
||||
return self.mixed_precision != "no"
|
||||
|
||||
@staticmethod
|
||||
def _reset_state(reset_partial_state: bool = False):
|
||||
def _reset_state():
|
||||
"Resets `_shared_state`, is used internally and should not be called"
|
||||
AcceleratorState._shared_state = {}
|
||||
if reset_partial_state:
|
||||
PartialState._reset_state()
|
||||
|
||||
@property
|
||||
def use_distributed(self):
|
||||
"""
|
||||
Whether the Accelerator is configured for distributed training
|
||||
"""
|
||||
return PartialState().use_distributed
|
||||
|
||||
@property
|
||||
def is_last_process(self) -> bool:
|
||||
"Returns whether the current process is the last one"
|
||||
return PartialState().is_last_process
|
||||
|
||||
@property
|
||||
def is_main_process(self) -> bool:
|
||||
"Returns whether the current process is the main process"
|
||||
return PartialState().is_main_process
|
||||
|
||||
@property
|
||||
def is_local_main_process(self) -> bool:
|
||||
"Returns whether the current process is the main process on the local node"
|
||||
return PartialState().is_local_main_process
|
||||
|
||||
def wait_for_everyone(self):
|
||||
PartialState().wait_for_everyone()
|
||||
|
||||
@contextmanager
|
||||
def main_process_first(self):
|
||||
"""
|
||||
Lets the main process go first inside a with block.
|
||||
|
||||
The other processes will enter the with block after the main process exits.
|
||||
"""
|
||||
with PartialState().main_process_first():
|
||||
yield
|
||||
|
||||
@contextmanager
|
||||
def local_main_process_first(self):
|
||||
"""
|
||||
Lets the local main process go inside a with block.
|
||||
|
||||
The other processes will enter the with block after the main process exits.
|
||||
"""
|
||||
with PartialState().local_main_process_first():
|
||||
yield
|
||||
|
||||
def print(self, *args, **kwargs):
|
||||
PartialState().print(*args, **kwargs)
|
||||
def _check_initialized(self, mixed_precision=None, cpu=None):
|
||||
"Checks if a modification is trying to be made and the `AcceleratorState` has already been initialized"
|
||||
if getattr(self, "initialized", False):
|
||||
err = "AcceleratorState has already been initialized and cannot be changed, restart your runtime completely and pass `{flag}` to `Accelerate()`."
|
||||
if cpu and self.device.type != "cpu":
|
||||
raise ValueError(err.format(flag="cpu=True"))
|
||||
if mixed_precision is not None and mixed_precision != self.mixed_precision:
|
||||
raise ValueError(err.format(flag=f"mixed_precision='{mixed_precision}'"))
|
||||
|
||||
|
||||
class GradientState:
|
||||
@ -711,51 +277,23 @@ class GradientState:
|
||||
- **end_of_dataloader** (`bool`) -- Whether we have reached the end the current dataloader
|
||||
- **remainder** (`int`) -- The number of extra samples that were added from padding the dataloader
|
||||
- **sync_gradients** (`bool`) -- Whether the gradients should be synced across all devices
|
||||
- **active_dataloader** (`Optional[DataLoader]`) -- The dataloader that is currently being iterated over
|
||||
- **dataloader_references** (`List[Optional[DataLoader]]`) -- A list of references to the dataloaders that are
|
||||
being iterated over
|
||||
- **num_steps** (`int`) -- The number of steps to accumulate over
|
||||
- **adjust_scheduler** (`bool`) -- Whether the scheduler should be adjusted to account for the gradient
|
||||
accumulation
|
||||
"""
|
||||
|
||||
_shared_state = {}
|
||||
|
||||
def __init__(self, gradient_accumulation_plugin: Optional[GradientAccumulationPlugin] = None):
|
||||
def __init__(self):
|
||||
self.__dict__ = self._shared_state
|
||||
if not self.initialized:
|
||||
if not getattr(self, "initialized", False):
|
||||
self.sync_gradients = True
|
||||
self.end_of_dataloader = False
|
||||
self.remainder = -1
|
||||
self.active_dataloader = None
|
||||
self.dataloader_references = [None]
|
||||
self.plugin_kwargs = gradient_accumulation_plugin.to_kwargs()
|
||||
|
||||
# Plugin args are different and can be updated
|
||||
if gradient_accumulation_plugin is not None and self.plugin_kwargs != gradient_accumulation_plugin.to_kwargs():
|
||||
self.plugin_kwargs = gradient_accumulation_plugin.to_kwargs()
|
||||
|
||||
@property
|
||||
def num_steps(self) -> int:
|
||||
"Returns the number of steps to accumulate over"
|
||||
return self.plugin_kwargs.get("num_steps", 1)
|
||||
|
||||
@property
|
||||
def adjust_scheduler(self) -> bool:
|
||||
"Returns whether the scheduler should be adjusted"
|
||||
return self.plugin_kwargs.get("adjust_scheduler", False)
|
||||
|
||||
@property
|
||||
def initialized(self) -> bool:
|
||||
"Returns whether the `GradientState` has been initialized"
|
||||
return GradientState._shared_state != {}
|
||||
self.initialized = True
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
f"Sync Gradients: {self.sync_gradients}\n"
|
||||
f"At end of current dataloader: {self.end_of_dataloader}\n"
|
||||
f"Extra samples added: {self.remainder}\n"
|
||||
f"Gradient accumulation plugin: {self.plugin_kwargs}\n"
|
||||
f"Extra samples added: {self.remainder}"
|
||||
)
|
||||
|
||||
def _set_sync_gradients(self, sync_gradients):
|
||||
@ -769,25 +307,3 @@ class GradientState:
|
||||
def _set_remainder(self, remainder):
|
||||
"Private function that sets the number of remaining samples at the end of the dataloader. Users should not have to call this."
|
||||
self.remainder = remainder
|
||||
|
||||
def _add_dataloader(self, dataloader):
|
||||
"Private function that adds a dataloader to `self.dataloader_references` and sets `in_dataloader` to `True`. Users should not have to call this."
|
||||
self.active_dataloader = dataloader
|
||||
self.dataloader_references.append(self.active_dataloader)
|
||||
self._set_end_of_dataloader(False)
|
||||
|
||||
def _remove_dataloader(self, dataloader):
|
||||
"Private function that removes a dataloader from `self.dataloader_references` and sets `in_dataloader` to `False` if there are no more dataloaders. Users should not have to call this."
|
||||
self.dataloader_references.remove(dataloader)
|
||||
self.active_dataloader = self.dataloader_references[-1]
|
||||
self._set_end_of_dataloader(True)
|
||||
|
||||
@property
|
||||
def in_dataloader(self) -> bool:
|
||||
"Returns whether the current process is in a dataloader"
|
||||
return self.active_dataloader is not None
|
||||
|
||||
@staticmethod
|
||||
def _reset_state():
|
||||
"Resets `_shared_state`, is used internally and should not be called"
|
||||
GradientState._shared_state = {}
|
||||
|
||||
@ -1,12 +1,14 @@
|
||||
# flake8: noqa
|
||||
# There's no way to ignore "F401 '...' imported but unused" warnings in this
|
||||
# module, but to preserve other warnings. So, don't check this module at all.
|
||||
|
||||
from .testing import (
|
||||
are_the_same_tensors,
|
||||
execute_subprocess_async,
|
||||
require_cpu,
|
||||
require_cuda,
|
||||
require_huggingface_suite,
|
||||
require_mps,
|
||||
require_multi_gpu,
|
||||
require_safetensors,
|
||||
require_single_gpu,
|
||||
require_torch_min_version,
|
||||
require_tpu,
|
||||
@ -16,4 +18,4 @@ from .testing import (
|
||||
from .training import RegressionDataset, RegressionModel
|
||||
|
||||
|
||||
from .scripts import test_script, test_sync, test_ops # isort: skip
|
||||
from .scripts import test_script, test_sync # isort:skip
|
||||
|
||||
@ -16,15 +16,15 @@ import argparse
|
||||
import json
|
||||
import os
|
||||
|
||||
import evaluate
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
import evaluate
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
|
||||
from datasets import load_dataset
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
|
||||
MAX_GPU_BATCH_SIZE = 16
|
||||
|
||||
@ -13,32 +13,28 @@
|
||||
# limitations under the License.
|
||||
|
||||
import math
|
||||
import os
|
||||
from copy import deepcopy
|
||||
|
||||
import torch
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
import datasets
|
||||
import evaluate
|
||||
import torch
|
||||
import transformers
|
||||
from datasets import load_dataset
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
||||
|
||||
from accelerate import Accelerator
|
||||
from accelerate.test_utils import RegressionDataset, RegressionModel
|
||||
from accelerate.utils import is_tpu_available, set_seed
|
||||
from datasets import load_dataset
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
||||
|
||||
|
||||
os.environ["TRANSFORMERS_NO_ADVISORY_WARNINGS"] = "true"
|
||||
|
||||
|
||||
def get_basic_setup(accelerator, num_samples=82, batch_size=16):
|
||||
def get_basic_setup(accelerator, num_samples=82):
|
||||
"Returns everything needed to perform basic training"
|
||||
set_seed(42)
|
||||
model = RegressionModel()
|
||||
ddp_model = deepcopy(model)
|
||||
dset = RegressionDataset(length=num_samples)
|
||||
dataloader = DataLoader(dset, batch_size=batch_size)
|
||||
dataloader = DataLoader(dset, batch_size=16)
|
||||
model.to(accelerator.device)
|
||||
ddp_model, dataloader = accelerator.prepare(ddp_model, dataloader)
|
||||
return model, ddp_model, dataloader
|
||||
@ -88,17 +84,15 @@ def generate_predictions(model, dataloader, accelerator):
|
||||
logit, target = accelerator.gather_for_metrics((logit, target))
|
||||
logits_and_targets.append((logit, target))
|
||||
logits, targs = [], []
|
||||
for logit, targ in logits_and_targets:
|
||||
for (logit, targ) in logits_and_targets:
|
||||
logits.append(logit)
|
||||
targs.append(targ)
|
||||
logits, targs = torch.cat(logits), torch.cat(targs)
|
||||
return logits, targs
|
||||
|
||||
|
||||
def test_torch_metrics(
|
||||
accelerator: Accelerator, num_samples=82, dispatch_batches=False, split_batches=False, batch_size=16
|
||||
):
|
||||
model, ddp_model, dataloader = get_basic_setup(accelerator, num_samples, batch_size)
|
||||
def test_torch_metrics(accelerator: Accelerator, num_samples=82, dispatch_batches=False, split_batches=False):
|
||||
model, ddp_model, dataloader = get_basic_setup(accelerator, num_samples)
|
||||
logits, targs = generate_predictions(ddp_model, dataloader, accelerator)
|
||||
assert (
|
||||
len(logits) == num_samples
|
||||
@ -165,11 +159,6 @@ def main():
|
||||
print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99")
|
||||
test_torch_metrics(accelerator, 99)
|
||||
accelerator.state._reset_state()
|
||||
if accelerator.is_local_main_process:
|
||||
print("**Test last batch is not dropped when perfectly divisible**")
|
||||
accelerator = Accelerator()
|
||||
test_torch_metrics(accelerator, 512)
|
||||
accelerator.state._reset_state()
|
||||
|
||||
|
||||
def _mp_fn(index):
|
||||
|
||||
@ -18,13 +18,13 @@ import json
|
||||
import os
|
||||
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
|
||||
from datasets import load_dataset
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
|
||||
MAX_GPU_BATCH_SIZE = 16
|
||||
|
||||
@ -16,15 +16,15 @@ import argparse
|
||||
import json
|
||||
import os
|
||||
|
||||
import evaluate
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
import evaluate
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
|
||||
from datasets import load_dataset
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
|
||||
MAX_GPU_BATCH_SIZE = 16
|
||||
|
||||
@ -14,25 +14,12 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import warnings
|
||||
from typing import List
|
||||
from unittest.mock import Mock
|
||||
|
||||
import torch
|
||||
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
|
||||
from torch.utils.data import DataLoader, TensorDataset
|
||||
|
||||
from accelerate.accelerator import Accelerator
|
||||
from accelerate.utils.dataclasses import DistributedType
|
||||
|
||||
|
||||
class DummyIterableDataset(IterableDataset):
|
||||
def __init__(self, data):
|
||||
self.data = data
|
||||
|
||||
def __iter__(self):
|
||||
for element in self.data:
|
||||
yield element
|
||||
|
||||
|
||||
def create_accelerator(even_batches=True):
|
||||
@ -41,14 +28,11 @@ def create_accelerator(even_batches=True):
|
||||
return accelerator
|
||||
|
||||
|
||||
def create_dataloader(accelerator: Accelerator, dataset_size: int, batch_size: int, iterable: bool = False):
|
||||
def create_dataloader(accelerator: Accelerator, dataset_size: int, batch_size: int):
|
||||
"""
|
||||
Create a simple DataLoader to use during the test cases
|
||||
"""
|
||||
if iterable:
|
||||
dataset = DummyIterableDataset(torch.as_tensor(range(dataset_size)))
|
||||
else:
|
||||
dataset = TensorDataset(torch.as_tensor(range(dataset_size)))
|
||||
dataset = TensorDataset(torch.as_tensor(range(dataset_size)))
|
||||
|
||||
dl = DataLoader(dataset, batch_size=batch_size)
|
||||
dl = accelerator.prepare(dl)
|
||||
@ -77,6 +61,7 @@ def verify_dataloader_batch_sizes(
|
||||
|
||||
|
||||
def test_default_ensures_even_batch_sizes():
|
||||
|
||||
accelerator = create_accelerator()
|
||||
|
||||
# without padding, we would expect a different number of batches
|
||||
@ -118,95 +103,7 @@ def test_can_disable_even_batches():
|
||||
)
|
||||
|
||||
|
||||
def test_can_join_uneven_inputs():
|
||||
accelerator = create_accelerator(even_batches=False)
|
||||
|
||||
model = torch.nn.Linear(1, 1)
|
||||
ddp_model = accelerator.prepare(model)
|
||||
|
||||
dl = create_dataloader(accelerator, dataset_size=3, batch_size=1)
|
||||
|
||||
batch_idxs = []
|
||||
with accelerator.join_uneven_inputs([ddp_model]):
|
||||
for batch_idx, batch in enumerate(dl):
|
||||
output = ddp_model(batch[0].float())
|
||||
loss = output.sum()
|
||||
loss.backward()
|
||||
batch_idxs.append(batch_idx)
|
||||
|
||||
accelerator.wait_for_everyone()
|
||||
|
||||
if accelerator.process_index == 0:
|
||||
assert batch_idxs == [0, 1]
|
||||
elif accelerator.process_index == 1:
|
||||
assert batch_idxs == [0]
|
||||
|
||||
|
||||
def test_join_raises_warning_for_non_ddp_distributed(accelerator):
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
with accelerator.join_uneven_inputs([Mock()]):
|
||||
pass
|
||||
|
||||
assert issubclass(w[-1].category, UserWarning)
|
||||
assert "only supported for multi-GPU" in str(w[-1].message)
|
||||
|
||||
|
||||
def test_join_can_override_even_batches():
|
||||
default_even_batches = True
|
||||
overridden_even_batches = False
|
||||
accelerator = create_accelerator(even_batches=default_even_batches)
|
||||
model = torch.nn.Linear(1, 1)
|
||||
ddp_model = accelerator.prepare(model)
|
||||
train_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1)
|
||||
valid_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1)
|
||||
|
||||
with accelerator.join_uneven_inputs([ddp_model], even_batches=overridden_even_batches):
|
||||
train_dl_overridden_value = train_dl.batch_sampler.even_batches
|
||||
valid_dl_overridden_value = valid_dl.batch_sampler.even_batches
|
||||
|
||||
assert train_dl_overridden_value == overridden_even_batches
|
||||
assert valid_dl_overridden_value == overridden_even_batches
|
||||
assert train_dl.batch_sampler.even_batches == default_even_batches
|
||||
assert valid_dl.batch_sampler.even_batches == default_even_batches
|
||||
|
||||
|
||||
def test_join_can_override_for_mixed_type_dataloaders():
|
||||
default_even_batches = True
|
||||
overridden_even_batches = False
|
||||
accelerator = create_accelerator(even_batches=default_even_batches)
|
||||
model = torch.nn.Linear(1, 1)
|
||||
ddp_model = accelerator.prepare(model)
|
||||
create_dataloader(accelerator, dataset_size=3, batch_size=1, iterable=True)
|
||||
batch_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1)
|
||||
|
||||
with warnings.catch_warnings():
|
||||
warnings.filterwarnings("ignore")
|
||||
try:
|
||||
with accelerator.join_uneven_inputs([ddp_model], even_batches=overridden_even_batches):
|
||||
batch_dl_overridden_value = batch_dl.batch_sampler.even_batches
|
||||
except AttributeError:
|
||||
# ensure attribute error is not raised when processing iterable dl
|
||||
raise AssertionError
|
||||
|
||||
assert batch_dl_overridden_value == overridden_even_batches
|
||||
assert batch_dl.batch_sampler.even_batches == default_even_batches
|
||||
|
||||
|
||||
def test_join_raises_warning_for_iterable_when_overriding_even_batches():
|
||||
accelerator = create_accelerator()
|
||||
model = torch.nn.Linear(1, 1)
|
||||
ddp_model = accelerator.prepare(model)
|
||||
create_dataloader(accelerator, dataset_size=3, batch_size=1, iterable=True)
|
||||
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
with accelerator.join_uneven_inputs([ddp_model], even_batches=False):
|
||||
pass
|
||||
|
||||
assert issubclass(w[-1].category, UserWarning)
|
||||
assert "only supported for map-style datasets" in str(w[-1].message)
|
||||
|
||||
|
||||
def main():
|
||||
if __name__ == "__main__":
|
||||
accelerator = create_accelerator()
|
||||
|
||||
accelerator.print("Test that even_batches variable ensures uniform batches across processes")
|
||||
@ -214,25 +111,3 @@ def main():
|
||||
|
||||
accelerator.print("Run tests with even_batches disabled")
|
||||
test_can_disable_even_batches()
|
||||
|
||||
accelerator.print("Test joining uneven inputs")
|
||||
test_can_join_uneven_inputs()
|
||||
|
||||
accelerator.print("Test overriding even_batches when joining uneven inputs")
|
||||
test_join_can_override_even_batches()
|
||||
|
||||
accelerator.print("Test overriding even_batches for mixed dataloader types")
|
||||
test_join_can_override_for_mixed_type_dataloaders()
|
||||
|
||||
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders")
|
||||
test_join_raises_warning_for_iterable_when_overriding_even_batches()
|
||||
|
||||
accelerator.print("Test join with non DDP distributed raises warning")
|
||||
original_state = accelerator.state.distributed_type
|
||||
accelerator.state.distributed_type = DistributedType.FSDP
|
||||
test_join_raises_warning_for_non_ddp_distributed(accelerator)
|
||||
accelerator.state.distributed_type = original_state
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@ -1,93 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import torch
|
||||
|
||||
from accelerate import PartialState
|
||||
from accelerate.utils.operations import broadcast, gather, pad_across_processes, reduce
|
||||
|
||||
|
||||
def create_tensor(state):
|
||||
return (torch.arange(state.num_processes) + 1.0 + (state.num_processes * state.process_index)).to(state.device)
|
||||
|
||||
|
||||
def test_gather(state):
|
||||
tensor = create_tensor(state)
|
||||
gathered_tensor = gather(tensor)
|
||||
assert gathered_tensor.tolist() == list(range(1, state.num_processes**2 + 1))
|
||||
|
||||
|
||||
def test_broadcast(state):
|
||||
tensor = create_tensor(state)
|
||||
broadcasted_tensor = broadcast(tensor)
|
||||
assert broadcasted_tensor.shape == torch.Size([state.num_processes])
|
||||
assert broadcasted_tensor.tolist() == list(range(1, state.num_processes + 1))
|
||||
|
||||
|
||||
def test_pad_across_processes(state):
|
||||
# We need to pad the tensor with one more element if we are the main process
|
||||
# to ensure that we can pad
|
||||
if state.is_main_process:
|
||||
tensor = torch.arange(state.num_processes + 1).to(state.device)
|
||||
else:
|
||||
tensor = torch.arange(state.num_processes).to(state.device)
|
||||
padded_tensor = pad_across_processes(tensor)
|
||||
assert padded_tensor.shape == torch.Size([state.num_processes + 1])
|
||||
if not state.is_main_process:
|
||||
assert padded_tensor.tolist() == list(range(0, state.num_processes)) + [0]
|
||||
|
||||
|
||||
def test_reduce_sum(state):
|
||||
# For now runs on only two processes
|
||||
if state.num_processes != 2:
|
||||
return
|
||||
tensor = create_tensor(state)
|
||||
reduced_tensor = reduce(tensor, "sum")
|
||||
truth_tensor = torch.tensor([4.0, 6]).to(state.device)
|
||||
assert torch.allclose(reduced_tensor, truth_tensor), f"{reduced_tensor} != {truth_tensor}"
|
||||
|
||||
|
||||
def test_reduce_mean(state):
|
||||
# For now runs on only two processes
|
||||
if state.num_processes != 2:
|
||||
return
|
||||
tensor = create_tensor(state)
|
||||
reduced_tensor = reduce(tensor, "mean")
|
||||
truth_tensor = torch.tensor([2.0, 3]).to(state.device)
|
||||
assert torch.allclose(reduced_tensor, truth_tensor), f"{reduced_tensor} != {truth_tensor}"
|
||||
|
||||
|
||||
def _mp_fn(index):
|
||||
# For xla_spawn (TPUs)
|
||||
main()
|
||||
|
||||
|
||||
def main():
|
||||
state = PartialState()
|
||||
state.print("testing gather")
|
||||
test_gather(state)
|
||||
state.print("testing broadcast")
|
||||
test_broadcast(state)
|
||||
state.print("testing pad_across_processes")
|
||||
test_pad_across_processes(state)
|
||||
state.print("testing reduce_sum")
|
||||
test_reduce_sum(state)
|
||||
state.print("testing reduce_mean")
|
||||
test_reduce_mean(state)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -14,11 +14,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import contextlib
|
||||
import io
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
@ -30,101 +25,12 @@ from accelerate.utils import (
|
||||
DistributedType,
|
||||
gather,
|
||||
is_bf16_available,
|
||||
is_ipex_available,
|
||||
is_torch_version,
|
||||
set_seed,
|
||||
synchronize_rng_states,
|
||||
)
|
||||
|
||||
|
||||
def print_main(state):
|
||||
print(f"Printing from the main process {state.process_index}")
|
||||
|
||||
|
||||
def print_local_main(state):
|
||||
print(f"Printing from the local main process {state.local_process_index}")
|
||||
|
||||
|
||||
def print_last(state):
|
||||
print(f"Printing from the last process {state.process_index}")
|
||||
|
||||
|
||||
def print_on(state, process_idx):
|
||||
print(f"Printing from process {process_idx}: {state.process_index}")
|
||||
|
||||
|
||||
def process_execution_check():
|
||||
accelerator = Accelerator()
|
||||
num_processes = accelerator.num_processes
|
||||
|
||||
# Test main_process_first context manager
|
||||
path = Path("check_main_process_first.txt")
|
||||
if path.exists():
|
||||
path.unlink()
|
||||
with accelerator.main_process_first():
|
||||
if accelerator.is_main_process:
|
||||
time.sleep(0.1) # ensure main process takes longest
|
||||
with open(path, "a+") as f:
|
||||
f.write("Currently in the main process\n")
|
||||
else:
|
||||
with open(path, "a+") as f:
|
||||
f.write("Now on another process\n")
|
||||
accelerator.wait_for_everyone()
|
||||
if accelerator.is_main_process:
|
||||
with open(path, "r") as f:
|
||||
text = "".join(f.readlines())
|
||||
try:
|
||||
assert text.startswith("Currently in the main process\n"), "Main process was not first"
|
||||
if num_processes > 1:
|
||||
assert text.endswith("Now on another process\n"), "Main process was not first"
|
||||
assert (
|
||||
text.count("Now on another process\n") == num_processes - 1
|
||||
), f"Only wrote to file {text.count('Now on another process') + 1} times, not {num_processes}"
|
||||
except AssertionError:
|
||||
path.unlink()
|
||||
raise
|
||||
|
||||
# Test the decorators
|
||||
f = io.StringIO()
|
||||
with contextlib.redirect_stdout(f):
|
||||
accelerator.on_main_process(print_main)(accelerator.state)
|
||||
result = f.getvalue().rstrip()
|
||||
if accelerator.is_main_process:
|
||||
assert result == "Printing from the main process 0", f"{result} != Printing from the main process 0"
|
||||
else:
|
||||
assert f.getvalue().rstrip() == "", f'{result} != ""'
|
||||
f.truncate(0)
|
||||
f.seek(0)
|
||||
|
||||
with contextlib.redirect_stdout(f):
|
||||
accelerator.on_local_main_process(print_local_main)(accelerator.state)
|
||||
if accelerator.is_local_main_process:
|
||||
assert f.getvalue().rstrip() == "Printing from the local main process 0"
|
||||
else:
|
||||
assert f.getvalue().rstrip() == ""
|
||||
f.truncate(0)
|
||||
f.seek(0)
|
||||
|
||||
with contextlib.redirect_stdout(f):
|
||||
accelerator.on_last_process(print_last)(accelerator.state)
|
||||
if accelerator.is_last_process:
|
||||
assert f.getvalue().rstrip() == f"Printing from the last process {accelerator.state.num_processes - 1}"
|
||||
else:
|
||||
assert f.getvalue().rstrip() == ""
|
||||
f.truncate(0)
|
||||
f.seek(0)
|
||||
|
||||
for process_idx in range(num_processes):
|
||||
with contextlib.redirect_stdout(f):
|
||||
accelerator.on_process(print_on, process_index=process_idx)(accelerator.state, process_idx)
|
||||
if accelerator.process_index == process_idx:
|
||||
assert f.getvalue().rstrip() == f"Printing from process {process_idx}: {accelerator.process_index}"
|
||||
else:
|
||||
assert f.getvalue().rstrip() == ""
|
||||
f.truncate(0)
|
||||
f.seek(0)
|
||||
|
||||
|
||||
def init_state_check():
|
||||
# Test we can instantiate this twice in a row.
|
||||
state = AcceleratorState()
|
||||
@ -369,12 +275,10 @@ def training_check():
|
||||
assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training."
|
||||
assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training."
|
||||
|
||||
# BF16 support is only for CPU + TPU, and some GPU
|
||||
if is_bf16_available():
|
||||
# Mostly a test that BF16 doesn't crash as the operation inside the model is not converted to BF16
|
||||
print("BF16 training check.")
|
||||
# TEST that previous fp16 flag still works
|
||||
print("Legacy FP16 training check.")
|
||||
AcceleratorState._reset_state()
|
||||
accelerator = Accelerator(mixed_precision="bf16")
|
||||
accelerator = Accelerator(fp16=True)
|
||||
train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)
|
||||
model = RegressionModel()
|
||||
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
|
||||
@ -394,14 +298,12 @@ def training_check():
|
||||
assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training."
|
||||
assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training."
|
||||
|
||||
# IPEX support is only for CPU
|
||||
if is_ipex_available():
|
||||
print("ipex BF16 training check.")
|
||||
from accelerate.utils.dataclasses import IntelPyTorchExtensionPlugin
|
||||
|
||||
# BF16 support is only for CPU + TPU, and some GPU
|
||||
if is_bf16_available():
|
||||
# Mostly a test that BF16 doesn't crash as the operation inside the model is not converted to BF16
|
||||
print("BF16 training check.")
|
||||
AcceleratorState._reset_state()
|
||||
ipex_plugin = IntelPyTorchExtensionPlugin(use_ipex=True, dtype=torch.bfloat16)
|
||||
accelerator = Accelerator(mixed_precision="bf16", cpu=True, ipex_plugin=ipex_plugin)
|
||||
accelerator = Accelerator(mixed_precision="bf16")
|
||||
train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)
|
||||
model = RegressionModel()
|
||||
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
|
||||
@ -428,9 +330,6 @@ def main():
|
||||
if state.local_process_index == 0:
|
||||
print("**Initialization**")
|
||||
init_state_check()
|
||||
if state.local_process_index == 0:
|
||||
print("\n**Test process execution**")
|
||||
process_execution_check()
|
||||
|
||||
if state.local_process_index == 0:
|
||||
print("\n**Test random number generator synchronization**")
|
||||
|
||||
@ -20,10 +20,9 @@ from torch.optim import AdamW
|
||||
from torch.optim.lr_scheduler import LambdaLR
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
from accelerate.accelerator import Accelerator
|
||||
from accelerate.state import GradientState
|
||||
from accelerate import Accelerator
|
||||
from accelerate.test_utils import RegressionDataset, RegressionModel
|
||||
from accelerate.utils import DistributedType, is_torch_version, set_seed
|
||||
from accelerate.utils import DistributedType, set_seed
|
||||
|
||||
|
||||
def check_model_parameters(model_a, model_b, did_step, iteration):
|
||||
@ -152,7 +151,7 @@ def test_distributed_sync(accelerator):
|
||||
|
||||
def test_gradient_accumulation(split_batches=False, dispatch_batches=False):
|
||||
accelerator = Accelerator(
|
||||
split_batches=split_batches, dispatch_batches=dispatch_batches, gradient_accumulation_steps=2
|
||||
gradient_accumulation_steps=2, split_batches=split_batches, dispatch_batches=dispatch_batches
|
||||
)
|
||||
# Test that context manager behaves properly
|
||||
model, ddp_model, dataloader = get_training_setup(accelerator)
|
||||
@ -185,12 +184,11 @@ def test_gradient_accumulation(split_batches=False, dispatch_batches=False):
|
||||
# Shuffle ddp_input on each iteration
|
||||
torch.manual_seed(1337 + iteration)
|
||||
ddp_input = ddp_input[torch.randperm(len(ddp_input))]
|
||||
GradientState._reset_state()
|
||||
|
||||
|
||||
def test_gradient_accumulation_with_opt_and_scheduler(split_batches=False, dispatch_batches=False):
|
||||
accelerator = Accelerator(
|
||||
split_batches=split_batches, dispatch_batches=dispatch_batches, gradient_accumulation_steps=2
|
||||
gradient_accumulation_steps=2, split_batches=split_batches, dispatch_batches=dispatch_batches
|
||||
)
|
||||
# Test that context manager behaves properly
|
||||
model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched = get_training_setup(accelerator, True)
|
||||
@ -204,13 +202,11 @@ def test_gradient_accumulation_with_opt_and_scheduler(split_batches=False, dispa
|
||||
ddp_model.train()
|
||||
step_model(model, input, target, accelerator, False)
|
||||
opt.step()
|
||||
|
||||
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(dataloader)):
|
||||
if split_batches:
|
||||
if split_batches:
|
||||
sched.step()
|
||||
else:
|
||||
for _ in range(accelerator.num_processes):
|
||||
sched.step()
|
||||
else:
|
||||
for _ in range(accelerator.num_processes):
|
||||
sched.step()
|
||||
opt.zero_grad()
|
||||
# Perform gradient accumulation under wrapper
|
||||
with accelerator.accumulate(ddp_model):
|
||||
@ -228,41 +224,11 @@ def test_gradient_accumulation_with_opt_and_scheduler(split_batches=False, dispa
|
||||
check_model_parameters(model, ddp_model, did_step, iteration)
|
||||
# Shuffle ddp_input on each iteration
|
||||
torch.manual_seed(1337 + iteration)
|
||||
GradientState._reset_state()
|
||||
|
||||
|
||||
def test_dataloader_break():
|
||||
accelerator = Accelerator()
|
||||
|
||||
first_dset = RegressionDataset(length=80)
|
||||
first_dataloader = DataLoader(first_dset, batch_size=16)
|
||||
second_dset = RegressionDataset(length=96)
|
||||
second_dataloader = DataLoader(second_dset, batch_size=16)
|
||||
first_dataloader, second_dataloader = accelerator.prepare(first_dataloader, second_dataloader)
|
||||
for iteration, _ in enumerate(first_dataloader):
|
||||
# Will be True except if we are on the last batch
|
||||
if iteration < len(first_dataloader) - 1:
|
||||
assert id(accelerator.gradient_state.active_dataloader) == id(first_dataloader)
|
||||
if iteration == 1:
|
||||
for batch_num, _ in enumerate(second_dataloader):
|
||||
if batch_num < len(second_dataloader) - 1:
|
||||
assert id(accelerator.gradient_state.active_dataloader) == id(
|
||||
second_dataloader
|
||||
), f"First dataloader: {id(first_dataloader)}\nSecond dataloader: {id(second_dataloader)}\nActive dataloader: {id(accelerator.gradient_state.active_dataloader)}\n"
|
||||
else:
|
||||
assert id(accelerator.gradient_state.active_dataloader) == id(
|
||||
first_dataloader
|
||||
), f"First dataloader: {id(first_dataloader)}\nSecond dataloader: {id(second_dataloader)}\nActive dataloader: {id(accelerator.gradient_state.active_dataloader)}\n"
|
||||
else:
|
||||
assert accelerator.gradient_state.active_dataloader is None
|
||||
|
||||
|
||||
def main():
|
||||
accelerator = Accelerator()
|
||||
state = accelerator.state
|
||||
if state.local_process_index == 0:
|
||||
print("**Test `accumulate` gradient accumulation with dataloader break**")
|
||||
test_dataloader_break()
|
||||
if state.distributed_type == DistributedType.NO:
|
||||
if state.local_process_index == 0:
|
||||
print("**Test NOOP `no_sync` context manager**")
|
||||
@ -280,26 +246,23 @@ def main():
|
||||
f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**",
|
||||
)
|
||||
test_gradient_accumulation(split_batch, dispatch_batches)
|
||||
|
||||
# Currently will break on torch 2.0 +, need to investigate why
|
||||
if is_torch_version("<", "2.0") or state.distributed_type == DistributedType.NO:
|
||||
if state.local_process_index == 0:
|
||||
print(
|
||||
"**Test `accumulate` gradient accumulation with optimizer and scheduler, ",
|
||||
"`split_batches=False`, `dispatch_batches=False`**",
|
||||
)
|
||||
test_gradient_accumulation_with_opt_and_scheduler()
|
||||
if state.distributed_type == DistributedType.MULTI_GPU:
|
||||
for split_batch in [True, False]:
|
||||
for dispatch_batches in [True, False]:
|
||||
if not split_batch and not dispatch_batches:
|
||||
continue
|
||||
if state.local_process_index == 0:
|
||||
print(
|
||||
"**Test `accumulate` gradient accumulation with optimizer and scheduler, ",
|
||||
f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**",
|
||||
)
|
||||
test_gradient_accumulation_with_opt_and_scheduler(split_batch, dispatch_batches)
|
||||
if state.local_process_index == 0:
|
||||
print(
|
||||
"**Test `accumulate` gradient accumulation with optimizer and scheduler, ",
|
||||
"`split_batches=False`, `dispatch_batches=False`**",
|
||||
)
|
||||
test_gradient_accumulation_with_opt_and_scheduler()
|
||||
if state.distributed_type == DistributedType.MULTI_GPU:
|
||||
for split_batch in [True, False]:
|
||||
for dispatch_batches in [True, False]:
|
||||
if not split_batch and not dispatch_batches:
|
||||
continue
|
||||
if state.local_process_index == 0:
|
||||
print(
|
||||
"**Test `accumulate` gradient accumulation with optimizer and scheduler, ",
|
||||
f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**",
|
||||
)
|
||||
test_gradient_accumulation_with_opt_and_scheduler(split_batch, dispatch_batches)
|
||||
|
||||
|
||||
def _mp_fn(index):
|
||||
|
||||
@ -27,14 +27,12 @@ from unittest import mock
|
||||
|
||||
import torch
|
||||
|
||||
from ..state import AcceleratorState, PartialState
|
||||
from ..state import AcceleratorState
|
||||
from ..utils import (
|
||||
gather,
|
||||
is_comet_ml_available,
|
||||
is_datasets_available,
|
||||
is_deepspeed_available,
|
||||
is_mps_available,
|
||||
is_safetensors_available,
|
||||
is_tensorboard_available,
|
||||
is_torch_version,
|
||||
is_tpu_available,
|
||||
@ -89,14 +87,6 @@ def require_cuda(test_case):
|
||||
return unittest.skipUnless(torch.cuda.is_available(), "test requires a GPU")(test_case)
|
||||
|
||||
|
||||
def require_mps(test_case):
|
||||
"""
|
||||
Decorator marking a test that requires MPS backend. These tests are skipped when torch doesn't support `mps`
|
||||
backend.
|
||||
"""
|
||||
return unittest.skipUnless(is_mps_available(), "test requires a `mps` backend support in `torch`")(test_case)
|
||||
|
||||
|
||||
def require_huggingface_suite(test_case):
|
||||
"""
|
||||
Decorator marking a test that requires transformers and datasets. These tests are skipped when they are not.
|
||||
@ -129,14 +119,6 @@ def require_multi_gpu(test_case):
|
||||
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
|
||||
|
||||
|
||||
def require_safetensors(test_case):
|
||||
"""
|
||||
Decorator marking a test that requires safetensors installed. These tests are skipped when safetensors isn't
|
||||
installed
|
||||
"""
|
||||
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
|
||||
|
||||
|
||||
def require_deepspeed(test_case):
|
||||
"""
|
||||
Decorator marking a test that requires DeepSpeed installed. These tests are skipped when DeepSpeed isn't installed
|
||||
@ -232,20 +214,6 @@ class TempDirTestCase(unittest.TestCase):
|
||||
shutil.rmtree(path)
|
||||
|
||||
|
||||
class AccelerateTestCase(unittest.TestCase):
|
||||
"""
|
||||
A TestCase class that will reset the accelerator state at the end of every test. Every test that checks or utilizes
|
||||
the `AcceleratorState` class should inherit from this to avoid silent failures due to state being shared between
|
||||
tests.
|
||||
"""
|
||||
|
||||
def tearDown(self):
|
||||
super().tearDown()
|
||||
# Reset the state of the AcceleratorState singleton.
|
||||
AcceleratorState._reset_state()
|
||||
PartialState._reset_state()
|
||||
|
||||
|
||||
class MockingTestCase(unittest.TestCase):
|
||||
"""
|
||||
A TestCase class designed to dynamically add various mockers that should be used in every test, mimicking the
|
||||
@ -339,8 +307,8 @@ async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=Fals
|
||||
# XXX: the timeout doesn't seem to make any difference here
|
||||
await asyncio.wait(
|
||||
[
|
||||
asyncio.create_task(_read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:"))),
|
||||
asyncio.create_task(_read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:"))),
|
||||
_read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:")),
|
||||
_read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:")),
|
||||
],
|
||||
timeout=timeout,
|
||||
)
|
||||
@ -348,6 +316,7 @@ async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=Fals
|
||||
|
||||
|
||||
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
result = loop.run_until_complete(
|
||||
_stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo)
|
||||
|
||||
@ -18,13 +18,12 @@
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
from functools import wraps
|
||||
from abc import ABCMeta, abstractmethod, abstractproperty
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
import yaml
|
||||
|
||||
from .logging import get_logger
|
||||
from .state import PartialState
|
||||
from .utils import (
|
||||
LoggerType,
|
||||
is_aim_available,
|
||||
@ -38,10 +37,7 @@ from .utils import (
|
||||
_available_trackers = []
|
||||
|
||||
if is_tensorboard_available():
|
||||
try:
|
||||
from torch.utils import tensorboard
|
||||
except ModuleNotFoundError:
|
||||
import tensorboardX as tensorboard
|
||||
from torch.utils import tensorboard
|
||||
|
||||
_available_trackers.append(LoggerType.TENSORBOARD)
|
||||
|
||||
@ -68,71 +64,32 @@ if is_mlflow_available():
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
def on_main_process(function):
|
||||
"""
|
||||
Decorator to selectively run the decorated function on the main process only based on the `main_process_only`
|
||||
attribute in a class.
|
||||
|
||||
Checks at function execution rather than initialization time, not triggering the initialization of the
|
||||
`PartialState`.
|
||||
"""
|
||||
|
||||
@wraps(function)
|
||||
def execute_on_main_process(self, *args, **kwargs):
|
||||
if getattr(self, "main_process_only", False):
|
||||
return PartialState().on_main_process(function)(self, *args, **kwargs)
|
||||
else:
|
||||
return function(self, *args, **kwargs)
|
||||
|
||||
return execute_on_main_process
|
||||
|
||||
|
||||
def get_available_trackers():
|
||||
"Returns a list of all supported available trackers in the system"
|
||||
return _available_trackers
|
||||
|
||||
|
||||
class GeneralTracker:
|
||||
class GeneralTracker(object, metaclass=ABCMeta):
|
||||
"""
|
||||
A base Tracker class to be used for all logging integration implementations.
|
||||
|
||||
Each function should take in `**kwargs` that will automatically be passed in from a base dictionary provided to
|
||||
[`Accelerator`].
|
||||
|
||||
Should implement `name`, `requires_logging_directory`, and `tracker` properties such that:
|
||||
|
||||
`name` (`str`): String representation of the tracker class name, such as "TensorBoard" `requires_logging_directory`
|
||||
(`bool`): Whether the logger requires a directory to store their logs. `tracker` (`object`): Should return internal
|
||||
tracking mechanism used by a tracker class (such as the `run` for wandb)
|
||||
|
||||
Implementations can also include a `main_process_only` (`bool`) attribute to toggle if relevent logging, init, and
|
||||
other functions should occur on the main process or across all processes (by default will use `True`)
|
||||
[`Accelerator`]
|
||||
"""
|
||||
|
||||
main_process_only = True
|
||||
@abstractproperty
|
||||
def name(self):
|
||||
"String representation of the python class name"
|
||||
pass
|
||||
|
||||
def __init__(self, _blank=False):
|
||||
if not _blank:
|
||||
err = ""
|
||||
if not hasattr(self, "name"):
|
||||
err += "`name`"
|
||||
if not hasattr(self, "requires_logging_directory"):
|
||||
if len(err) > 0:
|
||||
err += ", "
|
||||
err += "`requires_logging_directory`"
|
||||
|
||||
# as tracker is a @property that relies on post-init
|
||||
if "tracker" not in dir(self):
|
||||
if len(err) > 0:
|
||||
err += ", "
|
||||
err += "`tracker`"
|
||||
if len(err) > 0:
|
||||
raise NotImplementedError(
|
||||
f"The implementation for this tracker class is missing the following "
|
||||
f"required attributes. Please define them in the class definition: "
|
||||
f"{err}"
|
||||
)
|
||||
@abstractproperty
|
||||
def requires_logging_directory(self):
|
||||
"""
|
||||
Whether the logger requires a directory to store their logs. Should either return `True` or `False`.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def store_init_configuration(self, values: dict):
|
||||
"""
|
||||
Logs `values` as hyperparameters for the run. Implementations should use the experiment configuration
|
||||
@ -145,6 +102,7 @@ class GeneralTracker:
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def log(self, values: dict, step: Optional[int], **kwargs):
|
||||
"""
|
||||
Logs `values` to the current run. Base `log` implementations of a tracking API should go in here, along with
|
||||
@ -165,6 +123,13 @@ class GeneralTracker:
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractproperty
|
||||
def tracker(self):
|
||||
"""
|
||||
Should return internal tracking mechanism used by a tracker class (such as the `run` for wandb)
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class TensorBoardTracker(GeneralTracker):
|
||||
"""
|
||||
@ -182,9 +147,7 @@ class TensorBoardTracker(GeneralTracker):
|
||||
name = "tensorboard"
|
||||
requires_logging_directory = True
|
||||
|
||||
@on_main_process
|
||||
def __init__(self, run_name: str, logging_dir: Union[str, os.PathLike], **kwargs):
|
||||
super().__init__()
|
||||
def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]], **kwargs):
|
||||
self.run_name = run_name
|
||||
self.logging_dir = os.path.join(logging_dir, run_name)
|
||||
self.writer = tensorboard.SummaryWriter(self.logging_dir, **kwargs)
|
||||
@ -197,7 +160,6 @@ class TensorBoardTracker(GeneralTracker):
|
||||
def tracker(self):
|
||||
return self.writer
|
||||
|
||||
@on_main_process
|
||||
def store_init_configuration(self, values: dict):
|
||||
"""
|
||||
Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. Stores the
|
||||
@ -221,7 +183,6 @@ class TensorBoardTracker(GeneralTracker):
|
||||
raise
|
||||
logger.debug("Stored initial configuration hyperparameters to TensorBoard and hparams yaml file")
|
||||
|
||||
@on_main_process
|
||||
def log(self, values: dict, step: Optional[int] = None, **kwargs):
|
||||
"""
|
||||
Logs `values` to the current run.
|
||||
@ -246,7 +207,6 @@ class TensorBoardTracker(GeneralTracker):
|
||||
self.writer.flush()
|
||||
logger.debug("Successfully logged to TensorBoard")
|
||||
|
||||
@on_main_process
|
||||
def finish(self):
|
||||
"""
|
||||
Closes `TensorBoard` writer
|
||||
@ -268,11 +228,8 @@ class WandBTracker(GeneralTracker):
|
||||
|
||||
name = "wandb"
|
||||
requires_logging_directory = False
|
||||
main_process_only = False
|
||||
|
||||
@on_main_process
|
||||
def __init__(self, run_name: str, **kwargs):
|
||||
super().__init__()
|
||||
self.run_name = run_name
|
||||
self.run = wandb.init(project=self.run_name, **kwargs)
|
||||
logger.debug(f"Initialized WandB project {self.run_name}")
|
||||
@ -284,7 +241,6 @@ class WandBTracker(GeneralTracker):
|
||||
def tracker(self):
|
||||
return self.run
|
||||
|
||||
@on_main_process
|
||||
def store_init_configuration(self, values: dict):
|
||||
"""
|
||||
Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.
|
||||
@ -297,7 +253,6 @@ class WandBTracker(GeneralTracker):
|
||||
wandb.config.update(values)
|
||||
logger.debug("Stored initial configuration hyperparameters to WandB")
|
||||
|
||||
@on_main_process
|
||||
def log(self, values: dict, step: Optional[int] = None, **kwargs):
|
||||
"""
|
||||
Logs `values` to the current run.
|
||||
@ -314,7 +269,6 @@ class WandBTracker(GeneralTracker):
|
||||
self.run.log(values, step=step, **kwargs)
|
||||
logger.debug("Successfully logged to WandB")
|
||||
|
||||
@on_main_process
|
||||
def finish(self):
|
||||
"""
|
||||
Closes `wandb` writer
|
||||
@ -339,9 +293,7 @@ class CometMLTracker(GeneralTracker):
|
||||
name = "comet_ml"
|
||||
requires_logging_directory = False
|
||||
|
||||
@on_main_process
|
||||
def __init__(self, run_name: str, **kwargs):
|
||||
super().__init__()
|
||||
self.run_name = run_name
|
||||
self.writer = Experiment(project_name=run_name, **kwargs)
|
||||
logger.debug(f"Initialized CometML project {self.run_name}")
|
||||
@ -353,7 +305,6 @@ class CometMLTracker(GeneralTracker):
|
||||
def tracker(self):
|
||||
return self.writer
|
||||
|
||||
@on_main_process
|
||||
def store_init_configuration(self, values: dict):
|
||||
"""
|
||||
Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.
|
||||
@ -366,7 +317,6 @@ class CometMLTracker(GeneralTracker):
|
||||
self.writer.log_parameters(values)
|
||||
logger.debug("Stored initial configuration hyperparameters to CometML")
|
||||
|
||||
@on_main_process
|
||||
def log(self, values: dict, step: Optional[int] = None, **kwargs):
|
||||
"""
|
||||
Logs `values` to the current run.
|
||||
@ -392,7 +342,6 @@ class CometMLTracker(GeneralTracker):
|
||||
self.writer.log_metrics(v, step=step, **kwargs)
|
||||
logger.debug("Successfully logged to CometML")
|
||||
|
||||
@on_main_process
|
||||
def finish(self):
|
||||
"""
|
||||
Closes `comet-ml` writer
|
||||
@ -415,7 +364,6 @@ class AimTracker(GeneralTracker):
|
||||
name = "aim"
|
||||
requires_logging_directory = True
|
||||
|
||||
@on_main_process
|
||||
def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = ".", **kwargs):
|
||||
self.run_name = run_name
|
||||
self.writer = Run(repo=logging_dir, **kwargs)
|
||||
@ -429,7 +377,6 @@ class AimTracker(GeneralTracker):
|
||||
def tracker(self):
|
||||
return self.writer
|
||||
|
||||
@on_main_process
|
||||
def store_init_configuration(self, values: dict):
|
||||
"""
|
||||
Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.
|
||||
@ -440,7 +387,6 @@ class AimTracker(GeneralTracker):
|
||||
"""
|
||||
self.writer["hparams"] = values
|
||||
|
||||
@on_main_process
|
||||
def log(self, values: dict, step: Optional[int], **kwargs):
|
||||
"""
|
||||
Logs `values` to the current run.
|
||||
@ -457,7 +403,6 @@ class AimTracker(GeneralTracker):
|
||||
for key, value in values.items():
|
||||
self.writer.track(value, name=key, step=step, **kwargs)
|
||||
|
||||
@on_main_process
|
||||
def finish(self):
|
||||
"""
|
||||
Closes `aim` writer
|
||||
@ -496,7 +441,6 @@ class MLflowTracker(GeneralTracker):
|
||||
name = "mlflow"
|
||||
requires_logging_directory = True
|
||||
|
||||
@on_main_process
|
||||
def __init__(
|
||||
self,
|
||||
experiment_name: str = None,
|
||||
@ -507,6 +451,7 @@ class MLflowTracker(GeneralTracker):
|
||||
run_name: Optional[str] = None,
|
||||
description: Optional[str] = None,
|
||||
):
|
||||
|
||||
experiment_name = os.getenv("MLFLOW_EXPERIMENT_NAME", experiment_name)
|
||||
run_id = os.getenv("MLFLOW_RUN_ID", run_id)
|
||||
tags = os.getenv("MLFLOW_TAGS", tags)
|
||||
@ -539,7 +484,6 @@ class MLflowTracker(GeneralTracker):
|
||||
def tracker(self):
|
||||
return self.active_run
|
||||
|
||||
@on_main_process
|
||||
def store_init_configuration(self, values: dict):
|
||||
"""
|
||||
Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.
|
||||
@ -566,7 +510,6 @@ class MLflowTracker(GeneralTracker):
|
||||
|
||||
logger.debug("Stored initial configuration hyperparameters to MLflow")
|
||||
|
||||
@on_main_process
|
||||
def log(self, values: dict, step: Optional[int]):
|
||||
"""
|
||||
Logs `values` to the current run.
|
||||
@ -590,7 +533,6 @@ class MLflowTracker(GeneralTracker):
|
||||
mlflow.log_metrics(metrics, step=step)
|
||||
logger.debug("Successfully logged to mlflow")
|
||||
|
||||
@on_main_process
|
||||
def finish(self):
|
||||
"""
|
||||
End the active MLflow run.
|
||||
@ -635,6 +577,7 @@ def filter_trackers(
|
||||
if log_with is not None:
|
||||
if not isinstance(log_with, (list, tuple)):
|
||||
log_with = [log_with]
|
||||
logger.debug(f"{log_with}")
|
||||
if "all" in log_with or LoggerType.ALL in log_with:
|
||||
loggers = [o for o in log_with if issubclass(type(o), GeneralTracker)] + get_available_trackers()
|
||||
else:
|
||||
@ -651,7 +594,7 @@ def filter_trackers(
|
||||
if getattr(tracker_init, "requires_logging_directory"):
|
||||
if logging_dir is None:
|
||||
raise ValueError(
|
||||
f"Logging with `{log_type}` requires a `logging_dir` to be passed in."
|
||||
f"Logging with `{str(log_type)}` requires a `logging_dir` to be passed in."
|
||||
)
|
||||
loggers.append(log_type)
|
||||
else:
|
||||
|
||||
@ -1,43 +1,38 @@
|
||||
# flake8: noqa
|
||||
# There's no way to ignore "F401 '...' imported but unused" warnings in this
|
||||
# module, but to preserve other warnings. So, don't check this module at all
|
||||
|
||||
from .constants import MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS
|
||||
from .dataclasses import (
|
||||
ComputeEnvironment,
|
||||
DeepSpeedPlugin,
|
||||
DistributedDataParallelKwargs,
|
||||
DistributedType,
|
||||
DynamoBackend,
|
||||
FP8RecipeKwargs,
|
||||
FullyShardedDataParallelPlugin,
|
||||
GradientAccumulationPlugin,
|
||||
GradScalerKwargs,
|
||||
InitProcessGroupKwargs,
|
||||
IntelPyTorchExtensionPlugin,
|
||||
KwargsHandler,
|
||||
LoggerType,
|
||||
MegatronLMPlugin,
|
||||
PrecisionType,
|
||||
ProjectConfiguration,
|
||||
RNGType,
|
||||
SageMakerDistributedType,
|
||||
TensorInformation,
|
||||
TorchDynamoPlugin,
|
||||
)
|
||||
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
|
||||
from .imports import (
|
||||
get_ccl_version,
|
||||
is_aim_available,
|
||||
is_apex_available,
|
||||
is_bf16_available,
|
||||
is_boto3_available,
|
||||
is_ccl_available,
|
||||
is_comet_ml_available,
|
||||
is_datasets_available,
|
||||
is_deepspeed_available,
|
||||
is_fp8_available,
|
||||
is_ipex_available,
|
||||
is_megatron_lm_available,
|
||||
is_mlflow_available,
|
||||
is_mps_available,
|
||||
is_rich_available,
|
||||
is_safetensors_available,
|
||||
is_sagemaker_available,
|
||||
is_tensorboard_available,
|
||||
is_tpu_available,
|
||||
@ -56,9 +51,7 @@ from .modeling import (
|
||||
infer_auto_device_map,
|
||||
load_checkpoint_in_model,
|
||||
load_offloaded_weights,
|
||||
load_state_dict,
|
||||
named_module_tensors,
|
||||
retie_parameters,
|
||||
set_module_tensor_to_device,
|
||||
)
|
||||
from .offload import (
|
||||
@ -83,7 +76,6 @@ from .operations import (
|
||||
get_data_structure,
|
||||
honor_type,
|
||||
initialize_tensors,
|
||||
is_namedtuple,
|
||||
is_tensor_information,
|
||||
is_torch_tensor,
|
||||
pad_across_processes,
|
||||
@ -105,16 +97,7 @@ if is_deepspeed_available():
|
||||
HfDeepSpeedConfig,
|
||||
)
|
||||
|
||||
from .launch import (
|
||||
PrepareForLaunch,
|
||||
_filter_args,
|
||||
get_launch_prefix,
|
||||
prepare_deepspeed_cmd_env,
|
||||
prepare_multi_gpu_env,
|
||||
prepare_sagemager_args_inputs,
|
||||
prepare_simple_launcher_cmd_env,
|
||||
prepare_tpu,
|
||||
)
|
||||
from .launch import PrepareForLaunch, _filter_args, get_launch_prefix
|
||||
from .megatron_lm import (
|
||||
AbstractTrainStep,
|
||||
BertTrainStep,
|
||||
@ -133,17 +116,14 @@ from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
|
||||
from .megatron_lm import prepare_model as megatron_lm_prepare_model
|
||||
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
|
||||
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
|
||||
from .memory import find_executable_batch_size, release_memory
|
||||
from .memory import find_executable_batch_size
|
||||
from .other import (
|
||||
extract_model_from_parallel,
|
||||
get_pretty_name,
|
||||
merge_dicts,
|
||||
patch_environment,
|
||||
save,
|
||||
wait_for_everyone,
|
||||
write_basic_config,
|
||||
)
|
||||
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
|
||||
from .torch_xla import install_xla
|
||||
from .tqdm import tqdm
|
||||
from .transformer_engine import convert_model, has_transformer_engine_layers
|
||||
|
||||
@ -29,7 +29,6 @@ FSDP_AUTO_WRAP_POLICY = ["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"]
|
||||
FSDP_BACKWARD_PREFETCH = ["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"]
|
||||
FSDP_STATE_DICT_TYPE = ["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"]
|
||||
DEEPSPEED_MULTINODE_LAUNCHERS = ["pdsh", "standard", "openmpi", "mvapich"]
|
||||
TORCH_DYNAMO_MODES = ["default", "reduce-overhead", "max-autotune"]
|
||||
|
||||
STR_OPERATION_TO_FUNC = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt}
|
||||
|
||||
|
||||
@ -23,16 +23,14 @@ import functools
|
||||
import os
|
||||
import typing
|
||||
import warnings
|
||||
from contextlib import contextmanager
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import timedelta
|
||||
from distutils.util import strtobool
|
||||
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
|
||||
from typing import Any, Callable, Dict, Iterable, List, Optional
|
||||
|
||||
import torch
|
||||
|
||||
from .constants import FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_STATE_DICT_TYPE, MODEL_NAME, OPTIMIZER_NAME
|
||||
from .versions import is_torch_version
|
||||
|
||||
|
||||
class KwargsHandler:
|
||||
@ -66,18 +64,7 @@ class DistributedDataParallelKwargs(KwargsHandler):
|
||||
|
||||
`static_graph` is only available in PyTorch 1.11.0 and later versions.
|
||||
|
||||
</Tip>
|
||||
|
||||
Example:
|
||||
|
||||
```python
|
||||
from accelerate import Accelerator
|
||||
from accelerate.utils import DistributedDataParallelKwargs
|
||||
|
||||
kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
|
||||
accelerator = Accelerator(kwargs_handlers=[kwargs])
|
||||
```
|
||||
"""
|
||||
</Tip>"""
|
||||
|
||||
dim: int = 0
|
||||
broadcast_buffers: bool = True
|
||||
@ -99,18 +86,7 @@ class GradScalerKwargs(KwargsHandler):
|
||||
|
||||
`GradScaler` is only available in PyTorch 1.5.0 and later versions.
|
||||
|
||||
</Tip>
|
||||
|
||||
Example:
|
||||
|
||||
```python
|
||||
from accelerate import Accelerator
|
||||
from accelerate.utils import GradScalerKwargs
|
||||
|
||||
kwargs = GradScalerKwargs(backoff_filter=0.25)
|
||||
accelerator = Accelerator(kwargs_handlers=[kwargs])
|
||||
```
|
||||
"""
|
||||
</Tip>"""
|
||||
|
||||
init_scale: float = 65536.0
|
||||
growth_factor: float = 2.0
|
||||
@ -126,54 +102,12 @@ class InitProcessGroupKwargs(KwargsHandler):
|
||||
to the documentation of this
|
||||
[method](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) for more
|
||||
information on each argument.
|
||||
|
||||
```python
|
||||
from datetime import timedelta
|
||||
from accelerate import Accelerator
|
||||
from accelerate.utils import InitProcessGroupKwargs
|
||||
|
||||
kwargs = InitProcessGroupKwargs(timeout=timedelta(seconds=800))
|
||||
accelerator = Accelerator(kwargs_handlers=[kwargs])
|
||||
```
|
||||
"""
|
||||
|
||||
backend: Optional[str] = "nccl"
|
||||
init_method: Optional[str] = None
|
||||
timeout: timedelta = timedelta(seconds=1800)
|
||||
|
||||
|
||||
@dataclass
|
||||
class FP8RecipeKwargs(KwargsHandler):
|
||||
"""
|
||||
Use this object in your [`Accelerator`] to customize the initialization of the recipe for FP8 mixed precision
|
||||
training. Please refer to the documentation of this
|
||||
[class](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html#transformer_engine.common.recipe.DelayedScaling)
|
||||
for more information on each argument.
|
||||
|
||||
```python
|
||||
from accelerate import Accelerator
|
||||
from accelerate.utils import FP8RecipeKwargs
|
||||
|
||||
kwargs = FP8RecipeKwargs(fp8_format="HYBRID")
|
||||
accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=[kwargs])
|
||||
```
|
||||
"""
|
||||
|
||||
margin: int = 0
|
||||
interval: int = 1
|
||||
fp8_format: str = "E4M3"
|
||||
amax_history_len: int = 1
|
||||
amax_compute_algo: str = "most_recent"
|
||||
override_linear_precision: Tuple[bool, bool, bool] = (False, False, False)
|
||||
|
||||
def __post_init__(self):
|
||||
self.fp8_format = self.fp8_format.upper()
|
||||
if self.fp8_format not in ["E4M3", "HYBRID"]:
|
||||
raise ValueError("`fp8_format` must be 'E4M3' or 'HYBRID'.")
|
||||
if self.amax_compute_algo not in ["max", "most_recent"]:
|
||||
raise ValueError("`amax_compute_algo` must be 'max' or 'most_recent'")
|
||||
|
||||
|
||||
class DistributedType(str, enum.Enum):
|
||||
"""
|
||||
Represents a type of distributed environment.
|
||||
@ -194,7 +128,7 @@ class DistributedType(str, enum.Enum):
|
||||
DEEPSPEED = "DEEPSPEED"
|
||||
FSDP = "FSDP"
|
||||
TPU = "TPU"
|
||||
MPS = "MPS" # here for backward compatibility. Remove in v0.18.0
|
||||
MPS = "MPS"
|
||||
MEGATRON_LM = "MEGATRON_LM"
|
||||
|
||||
|
||||
@ -230,50 +164,6 @@ class ComputeEnvironment(str, enum.Enum):
|
||||
AMAZON_SAGEMAKER = "AMAZON_SAGEMAKER"
|
||||
|
||||
|
||||
class DynamoBackend(str, enum.Enum):
|
||||
"""
|
||||
Represents a dynamo backend (see https://github.com/pytorch/torchdynamo).
|
||||
|
||||
Values:
|
||||
|
||||
- **NO** -- Do not use torch dynamo.
|
||||
- **EAGER** -- Uses PyTorch to run the extracted GraphModule. This is quite useful in debugging TorchDynamo
|
||||
issues.
|
||||
- **AOT_EAGER** -- Uses AotAutograd with no compiler, i.e, just using PyTorch eager for the AotAutograd's
|
||||
extracted forward and backward graphs. This is useful for debugging, and unlikely to give speedups.
|
||||
- **INDUCTOR** -- Uses TorchInductor backend with AotAutograd and cudagraphs by leveraging codegened Triton
|
||||
kernels. [Read
|
||||
more](https://dev-discuss.pytorch.org/t/torchinductor-a-pytorch-native-compiler-with-define-by-run-ir-and-symbolic-shapes/747)
|
||||
- **NVFUSER** -- nvFuser with TorchScript. [Read
|
||||
more](https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-1-nvfuser-and-its-primitives/593)
|
||||
- **AOT_NVFUSER** -- nvFuser with AotAutograd. [Read
|
||||
more](https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-1-nvfuser-and-its-primitives/593)
|
||||
- **AOT_CUDAGRAPHS** -- cudagraphs with AotAutograd. [Read
|
||||
more](https://github.com/pytorch/torchdynamo/pull/757)
|
||||
- **OFI** -- Uses Torchscript optimize_for_inference. Inference only. [Read
|
||||
more](https://pytorch.org/docs/stable/generated/torch.jit.optimize_for_inference.html)
|
||||
- **FX2TRT** -- Uses Nvidia TensorRT for inference optimizations. Inference only. [Read
|
||||
more](https://github.com/pytorch/TensorRT/blob/master/docsrc/tutorials/getting_started_with_fx_path.rst)
|
||||
- **ONNXRT** -- Uses ONNXRT for inference on CPU/GPU. Inference only. [Read more](https://onnxruntime.ai/)
|
||||
- **IPEX** -- Uses IPEX for inference on CPU. Inference only. [Read
|
||||
more](https://github.com/intel/intel-extension-for-pytorch).
|
||||
|
||||
"""
|
||||
|
||||
# Subclassing str as well as Enum allows the `SageMakerDistributedType` to be JSON-serializable out of the box.
|
||||
NO = "NO"
|
||||
EAGER = "EAGER"
|
||||
AOT_EAGER = "AOT_EAGER"
|
||||
INDUCTOR = "INDUCTOR"
|
||||
NVFUSER = "NVFUSER"
|
||||
AOT_NVFUSER = "AOT_NVFUSER"
|
||||
AOT_CUDAGRAPHS = "AOT_CUDAGRAPHS"
|
||||
OFI = "OFI"
|
||||
FX2TRT = "FX2TRT"
|
||||
ONNXRT = "ONNXRT"
|
||||
IPEX = "IPEX"
|
||||
|
||||
|
||||
class EnumWithContains(enum.EnumMeta):
|
||||
"A metaclass that adds the ability to check if `self` contains an item with the `in` operator"
|
||||
|
||||
@ -294,7 +184,7 @@ class BaseEnum(enum.Enum, metaclass=EnumWithContains):
|
||||
@classmethod
|
||||
def list(cls):
|
||||
"Method to list all the possible items in `cls`"
|
||||
return list(map(str, cls))
|
||||
return list(map(lambda item: str(item), cls))
|
||||
|
||||
|
||||
class LoggerType(BaseEnum):
|
||||
@ -327,7 +217,6 @@ class PrecisionType(BaseEnum):
|
||||
"""
|
||||
|
||||
NO = "no"
|
||||
FP8 = "fp8"
|
||||
FP16 = "fp16"
|
||||
BF16 = "bf16"
|
||||
|
||||
@ -348,90 +237,6 @@ class TensorInformation:
|
||||
dtype: torch.dtype
|
||||
|
||||
|
||||
@dataclass
|
||||
class ProjectConfiguration:
|
||||
"""
|
||||
Configuration for the Accelerator object based on inner-project needs.
|
||||
"""
|
||||
|
||||
project_dir: str = field(default=None, metadata={"help": "A path to a directory for storing data."})
|
||||
logging_dir: str = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"help": "A path to a directory for storing logs of locally-compatible loggers. If None, defaults to `project_dir`."
|
||||
},
|
||||
)
|
||||
automatic_checkpoint_naming: bool = field(
|
||||
default=False,
|
||||
metadata={"help": "Whether saved states should be automatically iteratively named."},
|
||||
)
|
||||
|
||||
total_limit: int = field(
|
||||
default=None,
|
||||
metadata={"help": "The maximum number of total saved states to keep."},
|
||||
)
|
||||
|
||||
iteration: int = field(
|
||||
default=0,
|
||||
metadata={"help": "The current save iteration."},
|
||||
)
|
||||
|
||||
def __post_init__(self):
|
||||
if self.logging_dir is None:
|
||||
self.logging_dir = self.project_dir
|
||||
|
||||
|
||||
@dataclass
|
||||
class GradientAccumulationPlugin(KwargsHandler):
|
||||
"""
|
||||
A plugin to configure gradient accumulation behavior.
|
||||
"""
|
||||
|
||||
num_steps: int = field(default=None, metadata={"help": "The number of steps to accumulate gradients for."})
|
||||
adjust_scheduler: bool = field(
|
||||
default=True,
|
||||
metadata={
|
||||
"help": "Whether to adjust the scheduler steps to account for the number of steps being accumulated. Should be `True` if the used scheduler was not adjusted for gradient accumulation."
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class TorchDynamoPlugin(KwargsHandler):
|
||||
"""
|
||||
This plugin is used to compile a model with PyTorch 2.0
|
||||
"""
|
||||
|
||||
backend: DynamoBackend = field(
|
||||
default=None,
|
||||
metadata={"help": f"Possible options are {[b.value.lower() for b in DynamoBackend]}"},
|
||||
)
|
||||
mode: str = field(
|
||||
default=None, metadata={"help": "Possible options are 'default', 'reduce-overhead' or 'max-autotune'"}
|
||||
)
|
||||
fullgraph: bool = field(default=None, metadata={"help": "Whether it is ok to break model into several subgraphs"})
|
||||
dynamic: bool = field(default=None, metadata={"help": "Whether to use dynamic shape for tracing"})
|
||||
options: Any = field(default=None, metadata={"help": "A dictionary of options to pass to the backend."})
|
||||
disable: bool = field(default=False, metadata={"help": "Turn torch.compile() into a no-op for testing"})
|
||||
|
||||
def __post_init__(self):
|
||||
prefix = "ACCELERATE_DYNAMO_"
|
||||
if self.backend is None:
|
||||
self.backend = os.environ.get(prefix + "BACKEND", "no")
|
||||
self.backend = DynamoBackend(self.backend.upper())
|
||||
if self.mode is None:
|
||||
self.mode = os.environ.get(prefix + "MODE", "default")
|
||||
if self.fullgraph is None:
|
||||
self.fullgraph = strtobool(os.environ.get(prefix + "USE_FULLGRAPH", "False")) == 1
|
||||
if self.dynamic is None:
|
||||
self.dynamic = strtobool(os.environ.get(prefix + "USE_DYNAMIC", "False")) == 1
|
||||
|
||||
def to_dict(self):
|
||||
dynamo_config = copy.deepcopy(self.__dict__)
|
||||
dynamo_config["backend"] = dynamo_config["backend"].value.lower()
|
||||
return dynamo_config
|
||||
|
||||
|
||||
@dataclass
|
||||
class DeepSpeedPlugin:
|
||||
"""
|
||||
@ -479,30 +284,8 @@ class DeepSpeedPlugin:
|
||||
def __post_init__(self):
|
||||
from .deepspeed import HfDeepSpeedConfig
|
||||
|
||||
if self.gradient_accumulation_steps is None:
|
||||
self.gradient_accumulation_steps = int(os.environ.get("ACCELERATE_GRADIENT_ACCUMULATION_STEPS", 1))
|
||||
|
||||
if self.gradient_clipping is None:
|
||||
gradient_clipping = os.environ.get("ACCELERATE_GRADIENT_CLIPPING", "none")
|
||||
if gradient_clipping != "none":
|
||||
self.gradient_clipping = float(gradient_clipping)
|
||||
|
||||
if self.zero_stage is None:
|
||||
self.zero_stage = int(os.environ.get("ACCELERATE_DEEPSPEED_ZERO_STAGE", 2))
|
||||
|
||||
if self.offload_optimizer_device is None:
|
||||
self.offload_optimizer_device = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE", "none")
|
||||
|
||||
if self.offload_param_device is None:
|
||||
self.offload_param_device = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE", "none")
|
||||
|
||||
if self.zero3_save_16bit_model is None:
|
||||
self.zero3_save_16bit_model = (
|
||||
os.environ.get("ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL", "false") == "true"
|
||||
)
|
||||
|
||||
if self.hf_ds_config is None:
|
||||
self.hf_ds_config = os.environ.get("ACCELERATE_DEEPSPEED_CONFIG_FILE", "none")
|
||||
self.hf_ds_config = os.environ.get("DEEPSPEED_CONFIG_FILE", "none")
|
||||
if (
|
||||
isinstance(self.hf_ds_config, dict)
|
||||
or (isinstance(self.hf_ds_config, str) and self.hf_ds_config != "none")
|
||||
@ -512,30 +295,31 @@ class DeepSpeedPlugin:
|
||||
self.hf_ds_config = HfDeepSpeedConfig(self.hf_ds_config)
|
||||
if "gradient_accumulation_steps" not in self.hf_ds_config.config:
|
||||
self.hf_ds_config.config["gradient_accumulation_steps"] = 1
|
||||
elif self.hf_ds_config.config["gradient_accumulation_steps"] == "auto":
|
||||
raise ValueError("gradient_accumulation_steps cannot be set to 'auto' in the DeepSpeed config.")
|
||||
if "zero_optimization" not in self.hf_ds_config.config:
|
||||
raise ValueError("Please specify the ZeRO optimization config in the DeepSpeed config.")
|
||||
|
||||
self._deepspeed_config_checks()
|
||||
plugin_to_config_mapping = {
|
||||
"gradient_accumulation_steps": "gradient_accumulation_steps",
|
||||
"gradient_clipping": "gradient_clipping",
|
||||
"zero_stage": "zero_optimization.stage",
|
||||
"offload_optimizer_device": "zero_optimization.offload_optimizer.device",
|
||||
"offload_param_device": "zero_optimization.offload_param.device",
|
||||
"zero3_save_16bit_model": "zero_optimization.stage3_gather_16bit_weights_on_model_save",
|
||||
}
|
||||
kwargs = {v: getattr(self, k) for k, v in plugin_to_config_mapping.items() if getattr(self, k) is not None}
|
||||
for key in kwargs.keys():
|
||||
self.fill_match(key, **kwargs, must_match=False)
|
||||
self.hf_ds_config.set_stage_and_offload()
|
||||
|
||||
# filling the missing values in the class attributes from the DeepSpeed config
|
||||
# when using the DeepSpeed config file.
|
||||
for key, value in plugin_to_config_mapping.items():
|
||||
config_value = self.hf_ds_config.get_value(value)
|
||||
if config_value is not None and config_value != "auto":
|
||||
setattr(self, key, config_value)
|
||||
else:
|
||||
if self.gradient_accumulation_steps is None:
|
||||
self.gradient_accumulation_steps = int(os.environ.get("GRADIENT_ACCUMULATION_STEPS", 1))
|
||||
|
||||
if self.gradient_clipping is None:
|
||||
gradient_clipping = os.environ.get("GRADIENT_CLIPPING", "none")
|
||||
if gradient_clipping != "none":
|
||||
self.gradient_clipping = float(gradient_clipping)
|
||||
|
||||
if self.zero_stage is None:
|
||||
self.zero_stage = int(os.environ.get("DEEPSPEED_ZERO_STAGE", 2))
|
||||
|
||||
if self.offload_optimizer_device is None:
|
||||
self.offload_optimizer_device = os.environ.get("DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE", "none")
|
||||
|
||||
if self.offload_param_device is None:
|
||||
self.offload_param_device = os.environ.get("DEEPSPEED_OFFLOAD_PARAM_DEVICE", "none")
|
||||
|
||||
if self.zero3_save_16bit_model is None:
|
||||
self.zero3_save_16bit_model = os.environ.get("DEEPSPEED_ZERO3_SAVE_16BIT_MODEL", "false") == "true"
|
||||
|
||||
config = {
|
||||
"train_batch_size": "auto",
|
||||
"train_micro_batch_size_per_gpu": "auto",
|
||||
@ -554,19 +338,15 @@ class DeepSpeedPlugin:
|
||||
if self.gradient_clipping:
|
||||
config["gradient_clipping"] = self.gradient_clipping
|
||||
self.hf_ds_config = HfDeepSpeedConfig(config)
|
||||
|
||||
self.deepspeed_config = self.hf_ds_config.config
|
||||
self.deepspeed_config["steps_per_print"] = float("inf") # this will stop deepspeed from logging @ stdout
|
||||
if self.zero3_init_flag is None:
|
||||
self.zero3_init_flag = (
|
||||
strtobool(os.environ.get("ACCELERATE_DEEPSPEED_ZERO3_INIT", str(self.hf_ds_config.is_zero3()))) == 1
|
||||
)
|
||||
self.zero3_init_flag = os.environ.get("DEEPSPEED_ZERO3_INIT", "false") == "true"
|
||||
if self.zero3_init_flag and not self.hf_ds_config.is_zero3():
|
||||
warnings.warn("DeepSpeed Zero3 Init flag is only applicable for ZeRO Stage 3. Setting it to False.")
|
||||
self.zero3_init_flag = False
|
||||
|
||||
def fill_match(self, ds_key_long, mismatches=None, must_match=True, **kwargs):
|
||||
mismatches = [] if mismatches is None else mismatches
|
||||
def fill_match(self, ds_key_long, mismatches, must_match=True, **kwargs):
|
||||
config, ds_key = self.hf_ds_config.find_config_node(ds_key_long)
|
||||
if config is None:
|
||||
return
|
||||
@ -611,28 +391,10 @@ class DeepSpeedPlugin:
|
||||
|
||||
def set_mixed_precision(self, mixed_precision):
|
||||
ds_config = self.deepspeed_config
|
||||
kwargs = {
|
||||
"fp16.enabled": mixed_precision == "fp16",
|
||||
"bf16.enabled": mixed_precision == "bf16",
|
||||
}
|
||||
if mixed_precision == "fp16":
|
||||
if "fp16" not in ds_config:
|
||||
ds_config["fp16"] = {"enabled": True, "auto_cast": True}
|
||||
elif mixed_precision == "bf16":
|
||||
if "bf16" not in ds_config:
|
||||
ds_config["bf16"] = {"enabled": True}
|
||||
|
||||
if mixed_precision != "no":
|
||||
diff_dtype = "bf16" if mixed_precision == "fp16" else "fp16"
|
||||
if str(ds_config.get(diff_dtype, {}).get("enabled", "False")).lower() == "true":
|
||||
raise ValueError(
|
||||
f"`--mixed_precision` arg cannot be set to `{mixed_precision}` when `{diff_dtype}` is set in the DeepSpeed config file."
|
||||
)
|
||||
for dtype in ["fp16", "bf16"]:
|
||||
if dtype not in ds_config:
|
||||
ds_config[dtype] = {"enabled": False}
|
||||
self.fill_match("fp16.enabled", must_match=False, **kwargs)
|
||||
self.fill_match("bf16.enabled", must_match=False, **kwargs)
|
||||
if mixed_precision == "fp16" and "fp16" not in ds_config and "bf16" not in ds_config:
|
||||
ds_config.update({"fp16": {"enabled": True, "auto_cast": True}})
|
||||
elif mixed_precision == "bf16" and "fp16" not in ds_config and "bf16" not in ds_config:
|
||||
ds_config.update({"bf16": {"enabled": True}})
|
||||
|
||||
def set_deepspeed_weakref(self):
|
||||
from .imports import is_transformers_available
|
||||
@ -658,48 +420,6 @@ class DeepSpeedPlugin:
|
||||
|
||||
self.dschf = HfDeepSpeedConfig(ds_config) # keep this object alive # noqa
|
||||
|
||||
def is_zero3_init_enabled(self):
|
||||
return self.zero3_init_flag
|
||||
|
||||
@contextmanager
|
||||
def zero3_init_context_manager(self, enable=False):
|
||||
old = self.zero3_init_flag
|
||||
if old == enable:
|
||||
yield
|
||||
else:
|
||||
self.zero3_init_flag = enable
|
||||
self.dschf = None
|
||||
self.set_deepspeed_weakref()
|
||||
yield
|
||||
self.zero3_init_flag = old
|
||||
self.dschf = None
|
||||
self.set_deepspeed_weakref()
|
||||
|
||||
def _deepspeed_config_checks(self):
|
||||
env_variable_names_to_ignore = [
|
||||
"ACCELERATE_GRADIENT_ACCUMULATION_STEPS",
|
||||
"ACCELERATE_GRADIENT_CLIPPING",
|
||||
"ACCELERATE_DEEPSPEED_ZERO_STAGE",
|
||||
"ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE",
|
||||
"ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE",
|
||||
"ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL",
|
||||
"ACCELERATE_MIXED_PRECISION",
|
||||
]
|
||||
env_variable_names_to_ignore = [
|
||||
name.replace("ACCELERATE_", "").replace("DEEPSPEED_", "").lower() for name in env_variable_names_to_ignore
|
||||
]
|
||||
|
||||
deepspeed_fields_from_accelerate_config = os.environ.get("ACCELERATE_CONFIG_DS_FIELDS", "").split(",")
|
||||
|
||||
if any(name in env_variable_names_to_ignore for name in deepspeed_fields_from_accelerate_config):
|
||||
raise ValueError(
|
||||
f"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\n"
|
||||
"Please specify them appropriately in the DeepSpeed config file.\n"
|
||||
"If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\n"
|
||||
"The easiest method is to create a new config following the questionnaire via `accelerate config`.\n"
|
||||
"It will only ask for the necessary config variables when using `deepspeed_config_file`."
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class FullyShardedDataParallelPlugin:
|
||||
@ -758,28 +478,13 @@ class FullyShardedDataParallelPlugin:
|
||||
},
|
||||
)
|
||||
|
||||
limit_all_gathers: bool = field(
|
||||
default=False,
|
||||
metadata={
|
||||
"help": "If False, then FSDP allows the CPU thread to schedule all-gathers "
|
||||
"without any extra synchronization. If True, then FSDP explicitly synchronizes the CPU thread to prevent "
|
||||
"too many in-flight all-gathers. This bool only affects the sharded strategies that schedule all-gathers. "
|
||||
"Enabling this can help lower the number of CUDA malloc retries."
|
||||
},
|
||||
)
|
||||
|
||||
use_orig_params: bool = field(
|
||||
default=False,
|
||||
metadata={"help": "If True, enables parameter-efficient fine-tuning"},
|
||||
)
|
||||
|
||||
def __post_init__(self):
|
||||
from torch.distributed.fsdp.fully_sharded_data_parallel import (
|
||||
BackwardPrefetch,
|
||||
CPUOffload,
|
||||
FullStateDictConfig,
|
||||
ShardingStrategy,
|
||||
StateDictType,
|
||||
_state_dict_type_to_config,
|
||||
)
|
||||
|
||||
if self.sharding_strategy is None:
|
||||
@ -800,8 +505,12 @@ class FullyShardedDataParallelPlugin:
|
||||
state_dict_type_policy = os.environ.get("FSDP_STATE_DICT_TYPE", "FULL_STATE_DICT")
|
||||
self.state_dict_type = StateDictType(FSDP_STATE_DICT_TYPE.index(state_dict_type_policy) + 1)
|
||||
|
||||
if self.state_dict_type == StateDictType.FULL_STATE_DICT and self.state_dict_config is None:
|
||||
self.state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
|
||||
if self.state_dict_type == StateDictType.FULL_STATE_DICT:
|
||||
self.state_dict_config = _state_dict_type_to_config[self.state_dict_type](
|
||||
offload_to_cpu=True, rank0_only=True
|
||||
)
|
||||
else:
|
||||
self.state_dict_config = _state_dict_type_to_config[self.state_dict_type]()
|
||||
|
||||
@staticmethod
|
||||
def get_module_class_from_name(module, name):
|
||||
@ -829,19 +538,16 @@ class FullyShardedDataParallelPlugin:
|
||||
if self.auto_wrap_policy is None:
|
||||
auto_wrap_policy = os.environ.get("FSDP_AUTO_WRAP_POLICY", "NO_WRAP")
|
||||
if auto_wrap_policy == FSDP_AUTO_WRAP_POLICY[0]:
|
||||
transformer_cls_names_to_wrap = os.environ.get("FSDP_TRANSFORMER_CLS_TO_WRAP", "").split(",")
|
||||
transformer_cls_to_wrap = set()
|
||||
for layer_class in transformer_cls_names_to_wrap:
|
||||
transformer_cls = FullyShardedDataParallelPlugin.get_module_class_from_name(model, layer_class)
|
||||
if transformer_cls is None:
|
||||
raise Exception("Could not find the transformer layer class to wrap in the model.")
|
||||
else:
|
||||
transformer_cls_to_wrap.add(transformer_cls)
|
||||
|
||||
transformer_cls_to_wrap = os.environ.get("FSDP_TRANSFORMER_CLS_TO_WRAP", "")
|
||||
transformer_cls_to_wrap = FullyShardedDataParallelPlugin.get_module_class_from_name(
|
||||
model, transformer_cls_to_wrap
|
||||
)
|
||||
if transformer_cls_to_wrap is None:
|
||||
raise Exception("Could not find the transformer layer class to wrap in the model.")
|
||||
self.auto_wrap_policy = functools.partial(
|
||||
transformer_auto_wrap_policy,
|
||||
# Transformer layer class to wrap
|
||||
transformer_layer_cls=transformer_cls_to_wrap,
|
||||
transformer_layer_cls={transformer_cls_to_wrap},
|
||||
)
|
||||
elif auto_wrap_policy == FSDP_AUTO_WRAP_POLICY[1]:
|
||||
min_num_params = int(os.environ.get("FSDP_MIN_NUM_PARAMS", 0))
|
||||
@ -866,14 +572,9 @@ class FullyShardedDataParallelPlugin:
|
||||
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
|
||||
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
|
||||
|
||||
if is_torch_version("<=", "1.13.5"):
|
||||
if self.state_dict_type == StateDictType.FULL_STATE_DICT:
|
||||
with FSDP.state_dict_type(model, self.state_dict_type, self.state_dict_config):
|
||||
state_dict = model.state_dict()
|
||||
else:
|
||||
FSDP.set_state_dict_type(model, self.state_dict_type, self.state_dict_config)
|
||||
state_dict = model.state_dict()
|
||||
|
||||
if self.state_dict_type == StateDictType.FULL_STATE_DICT:
|
||||
weights_name = f"{MODEL_NAME}.bin" if model_index == 0 else f"{MODEL_NAME}_{model_index}.bin"
|
||||
output_model_file = os.path.join(output_dir, weights_name)
|
||||
if accelerator.process_index == 0:
|
||||
@ -881,6 +582,8 @@ class FullyShardedDataParallelPlugin:
|
||||
torch.save(state_dict, output_model_file)
|
||||
print(f"Model saved to {output_model_file}")
|
||||
else:
|
||||
with FSDP.state_dict_type(model, self.state_dict_type, self.state_dict_config):
|
||||
state_dict = model.state_dict()
|
||||
weights_name = (
|
||||
f"{MODEL_NAME}_rank{accelerator.process_index}.bin"
|
||||
if model_index == 0
|
||||
@ -913,12 +616,7 @@ class FullyShardedDataParallelPlugin:
|
||||
print(f"Loading model from {input_model_file}")
|
||||
state_dict = torch.load(input_model_file)
|
||||
print(f"Model loaded from {input_model_file}")
|
||||
|
||||
if is_torch_version("<=", "1.13.5"):
|
||||
with FSDP.state_dict_type(model, self.state_dict_type, self.state_dict_config):
|
||||
model.load_state_dict(state_dict)
|
||||
else:
|
||||
FSDP.set_state_dict_type(model, self.state_dict_type, self.state_dict_config)
|
||||
with FSDP.state_dict_type(model, self.state_dict_type, self.state_dict_config):
|
||||
model.load_state_dict(state_dict)
|
||||
|
||||
def save_optimizer(self, accelerator, optimizer, model, output_dir, optimizer_index=0, optim_input=None):
|
||||
@ -1325,24 +1023,3 @@ class MegatronLMPlugin:
|
||||
self.megatron_lm_default_args[key] = True
|
||||
elif key.startswith("no_log_"):
|
||||
self.megatron_lm_default_args[key.replace("no_", "")] = True
|
||||
|
||||
|
||||
@dataclass
|
||||
class IntelPyTorchExtensionPlugin:
|
||||
"""
|
||||
This plugin is used to enable Intel PyTorch Extension (IPEX).
|
||||
"""
|
||||
|
||||
use_ipex: bool = field(default=None, metadata={"help": "Enable Intel PyTorch Extension (IPEX)"})
|
||||
dtype: torch.dtype = field(default=torch.float32, metadata={"help": "Enable mixed precision in IPEX"})
|
||||
|
||||
def __post_init__(self):
|
||||
prefix = "IPEX_"
|
||||
if self.use_ipex is None:
|
||||
self.use_ipex = strtobool(os.environ.get(prefix + "ENABLED", "False")) == 1
|
||||
|
||||
def set_mixed_precision(self, mixed_precision):
|
||||
if mixed_precision == "fp16":
|
||||
raise ValueError("Tried to use `fp16` but it is not supported on cpu")
|
||||
elif mixed_precision == "bf16":
|
||||
self.dtype = torch.bfloat16
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user