mirror of
https://github.com/huggingface/accelerate.git
synced 2025-11-19 09:04:28 +08:00
Compare commits
89 Commits
check-docs
...
v0.25.0
| Author | SHA1 | Date | |
|---|---|---|---|
| d08c23c209 | |||
| 0e48b2358d | |||
| 3499cf25aa | |||
| 68d63ee15f | |||
| 151637920d | |||
| 0ba3e9bb50 | |||
| b04d36c75f | |||
| 5fc1b230d3 | |||
| 244122c736 | |||
| d25efa71ce | |||
| 1aeb1e8997 | |||
| 0e51680994 | |||
| 7d430cf8de | |||
| b8ca803f98 | |||
| 1243191ecb | |||
| 2b25b8b3c5 | |||
| ca300c0a04 | |||
| 427ef8bd00 | |||
| 35b0206353 | |||
| fbe00d7897 | |||
| 62af737219 | |||
| cd51581248 | |||
| a5a7c039a0 | |||
| cf745c936d | |||
| 99877f56d6 | |||
| 0f2686c8d3 | |||
| a912b2ee09 | |||
| e9fd72a613 | |||
| 8dedb140ef | |||
| b55855a3d4 | |||
| 2b53a9089c | |||
| 39d255b3d0 | |||
| 99dff1a167 | |||
| a0a16e118a | |||
| 15458c5737 | |||
| fc0a43c3c1 | |||
| 8256a9c2d4 | |||
| 6727ac4394 | |||
| 9674b40580 | |||
| 0b0d9215a9 | |||
| e638b1e21a | |||
| 76de60dbdc | |||
| 217e1a248c | |||
| 5e0eb0d750 | |||
| 183c9dd3ce | |||
| 4f100318f4 | |||
| fa6f43033c | |||
| 820fc4ca7a | |||
| bd72a5f1a8 | |||
| 55088a2cf5 | |||
| c2d8e245e9 | |||
| d8e1285409 | |||
| 5b3f3b99d6 | |||
| 2935057606 | |||
| bb6759d634 | |||
| 55747318a0 | |||
| 217faafe08 | |||
| 5440387529 | |||
| e1fab05ce7 | |||
| c3ec7ff5a9 | |||
| d8535921ad | |||
| eb8c535c17 | |||
| b7686ccb44 | |||
| f3229872bc | |||
| 7843286f2e | |||
| 11e2e99cfc | |||
| 07e745f1c4 | |||
| c7c99a30ea | |||
| 8f45a2eae8 | |||
| 9fd64b7ea9 | |||
| 5be16ad90b | |||
| dab62832de | |||
| caa9f9bcbb | |||
| 943efedb88 | |||
| 50acb0c2ec | |||
| e6d96e5f70 | |||
| 1dfb6e9304 | |||
| 4bef6bc511 | |||
| 73640d0463 | |||
| 7a1159143e | |||
| cbb0b82fa2 | |||
| 5ae6111180 | |||
| 230a5f541b | |||
| 956114ac92 | |||
| 76ee7f211d | |||
| 420743af22 | |||
| 206ab491ed | |||
| 936d2f4f5c | |||
| da98d601b5 |
@ -15,13 +15,13 @@ jobs:
|
||||
outputs:
|
||||
version: ${{ steps.step1.outputs.version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v3.1.0
|
||||
- id: step1
|
||||
run: echo "version=$(python setup.py --version)" >> $GITHUB_OUTPUT
|
||||
|
||||
version-cpu:
|
||||
name: "Latest Accelerate CPU [version]"
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
runs-on: [self-hosted, intel-cpu, 8-cpu, ci]
|
||||
needs: get-version
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
@ -41,7 +41,7 @@ jobs:
|
||||
|
||||
version-cuda:
|
||||
name: "Latest Accelerate GPU [version]"
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
runs-on: [self-hosted, single-gpu, nvidia-gpu, t4, ci]
|
||||
needs: get-version
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
|
||||
2
.github/workflows/build_and_run_tests.yml
vendored
2
.github/workflows/build_and_run_tests.yml
vendored
@ -45,6 +45,6 @@ jobs:
|
||||
uses: ./.github/workflows/run_merge_tests.yml
|
||||
|
||||
run-integration-tests:
|
||||
needs: run-merge-tests
|
||||
needs: build-docker-containers
|
||||
if: always()
|
||||
uses: ./.github/workflows/self_hosted_integration_tests.yml
|
||||
15
.github/workflows/build_docker_images.yml
vendored
15
.github/workflows/build_docker_images.yml
vendored
@ -11,19 +11,9 @@ concurrency:
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
clean-storage:
|
||||
name: "Clean docker image storage"
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
steps:
|
||||
- name: Clean storage
|
||||
run: |
|
||||
docker image prune --all -f --filter "until=48h"
|
||||
docker system prune --all -f --filter "until=48h"
|
||||
|
||||
latest-cpu:
|
||||
name: "Latest Accelerate CPU [dev]"
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
needs: clean-storage
|
||||
runs-on: [self-hosted, intel-cpu, 8-cpu, ci]
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
@ -41,8 +31,7 @@ jobs:
|
||||
|
||||
latest-cuda:
|
||||
name: "Latest Accelerate GPU [dev]"
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
needs: clean-storage
|
||||
runs-on: [self-hosted, nvidia-gpu, t4, ci]
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
1
.github/workflows/build_documentation.yml
vendored
1
.github/workflows/build_documentation.yml
vendored
@ -14,5 +14,4 @@ jobs:
|
||||
commit_sha: ${{ github.sha }}
|
||||
package: accelerate
|
||||
secrets:
|
||||
token: ${{ secrets.HUGGINGFACE_PUSH }}
|
||||
hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}
|
||||
|
||||
8
.github/workflows/integration_tests.yml
vendored
8
.github/workflows/integration_tests.yml
vendored
@ -25,11 +25,6 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
transformers-version: [
|
||||
pypi,
|
||||
github
|
||||
]
|
||||
steps:
|
||||
- uses: actions/checkout@v3.1.0
|
||||
- name: Set up python 3.8
|
||||
@ -47,9 +42,6 @@ jobs:
|
||||
cd ..
|
||||
git clone https://github.com/huggingface/transformers
|
||||
cd transformers
|
||||
if [[ ${{ matrix.transformers-version }} = pypi ]]; then
|
||||
git checkout $(git describe --tags `git rev-list --tags --max-count=1`)
|
||||
fi
|
||||
pip install .[torch,testing]
|
||||
|
||||
- name: Show installed libraries
|
||||
|
||||
24
.github/workflows/nightly.yml
vendored
24
.github/workflows/nightly.yml
vendored
@ -13,7 +13,7 @@ env:
|
||||
|
||||
jobs:
|
||||
run_all_tests_single_gpu:
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
runs-on: [self-hosted, single-gpu, nvidia-gpu, t4, ci]
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: "0"
|
||||
TEST_TYPE: "single_gpu"
|
||||
@ -22,23 +22,25 @@ jobs:
|
||||
options: --gpus all --shm-size "16gb"
|
||||
defaults:
|
||||
run:
|
||||
working-directory: accelerate/
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Update clone & pip install
|
||||
run: |
|
||||
source activate accelerate
|
||||
git config --global --add safe.directory '*'
|
||||
git fetch && git checkout ${{ github.sha }}
|
||||
git clone https://github.com/huggingface/accelerate;
|
||||
cd accelerate;
|
||||
git checkout ${{ github.sha }};
|
||||
pip install -e . --no-deps
|
||||
pip install pytest-reportlog tabulate
|
||||
|
||||
- name: Run test on GPUs
|
||||
working-directory: accelerate
|
||||
run: |
|
||||
source activate accelerate
|
||||
make test
|
||||
|
||||
- name: Run examples on GPUs
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate
|
||||
@ -46,13 +48,14 @@ jobs:
|
||||
make test_examples
|
||||
|
||||
- name: Generate Report
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
pip install slack_sdk tabulate
|
||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
run_all_tests_multi_gpu:
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
runs-on: [self-hosted, multi-gpu, nvidia-gpu, t4, ci]
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: "0,1"
|
||||
TEST_TYPE: "multi_gpu"
|
||||
@ -61,18 +64,19 @@ jobs:
|
||||
options: --gpus all --shm-size "16gb"
|
||||
defaults:
|
||||
run:
|
||||
working-directory: accelerate/
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Update clone
|
||||
run: |
|
||||
source activate accelerate
|
||||
git config --global --add safe.directory '*'
|
||||
git fetch && git checkout ${{ github.sha }}
|
||||
git clone https://github.com/huggingface/accelerate;
|
||||
cd accelerate;
|
||||
git checkout ${{ github.sha }};
|
||||
pip install -e . --no-deps
|
||||
pip install pytest-reportlog tabulate
|
||||
|
||||
- name: Run core and big modeling tests on GPUs
|
||||
working-directory: accelerate
|
||||
run: |
|
||||
source activate accelerate
|
||||
make test_core
|
||||
@ -80,12 +84,14 @@ jobs:
|
||||
make test_cli
|
||||
|
||||
- name: Run Integration tests on GPUs
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate
|
||||
make test_integrations
|
||||
|
||||
- name: Run examples on GPUs
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate
|
||||
@ -93,6 +99,7 @@ jobs:
|
||||
make test_examples
|
||||
|
||||
- name: Generate Report
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
pip install slack_sdk tabulate
|
||||
@ -100,6 +107,5 @@ jobs:
|
||||
|
||||
|
||||
run-integration-tests:
|
||||
needs: [run_all_tests_single_gpu, run_all_tests_multi_gpu]
|
||||
if: always()
|
||||
uses: ./.github/workflows/self_hosted_integration_tests.yml
|
||||
2
.github/workflows/quality.yml
vendored
2
.github/workflows/quality.yml
vendored
@ -6,7 +6,7 @@ jobs:
|
||||
quality:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3.1.0
|
||||
- name: Set up Python 3.8
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
|
||||
59
.github/workflows/run_merge_tests.yml
vendored
59
.github/workflows/run_merge_tests.yml
vendored
@ -10,7 +10,7 @@ env:
|
||||
|
||||
jobs:
|
||||
run_all_tests_single_gpu:
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
runs-on: [self-hosted, single-gpu, nvidia-gpu, t4, ci]
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: "0"
|
||||
container:
|
||||
@ -18,72 +18,81 @@ jobs:
|
||||
options: --gpus all --shm-size "16gb"
|
||||
defaults:
|
||||
run:
|
||||
working-directory: accelerate/
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Update clone & pip install
|
||||
- name: Install accelerate
|
||||
run: |
|
||||
source activate accelerate
|
||||
git config --global --add safe.directory '*'
|
||||
git fetch && git checkout ${{ github.sha }}
|
||||
pip install -e .[testing,test_trackers] -U
|
||||
pip install pytest-reportlog tabulate
|
||||
source activate accelerate;
|
||||
git clone https://github.com/huggingface/accelerate;
|
||||
cd accelerate;
|
||||
git checkout ${{ github.sha }};
|
||||
pip install -e .[testing,test_trackers] -U;
|
||||
pip install pytest-reportlog tabulate ;
|
||||
|
||||
- name: Run CLI tests
|
||||
- name: Run CLI tests (use make cli)
|
||||
working-directory: accelerate
|
||||
run: |
|
||||
source activate accelerate
|
||||
source activate accelerate;
|
||||
make test_cli
|
||||
|
||||
- name: Run test on GPUs
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate
|
||||
source activate accelerate;
|
||||
make test
|
||||
- name: Run examples on GPUs
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate
|
||||
pip uninstall comet_ml -y
|
||||
source activate accelerate;
|
||||
pip uninstall comet_ml -y;
|
||||
make test_examples
|
||||
|
||||
- name: Generate Report
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
pip install tabulate
|
||||
pip install tabulate;
|
||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
run_all_tests_multi_gpu:
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
runs-on: [self-hosted, multi-gpu, nvidia-gpu, t4, ci]
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: 0,1
|
||||
container:
|
||||
image: huggingface/accelerate-gpu:latest
|
||||
options: --gpus all --shm-size "16gb"
|
||||
defaults:
|
||||
run:
|
||||
working-directory: accelerate/
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Update clone
|
||||
run: |
|
||||
source activate accelerate
|
||||
git config --global --add safe.directory '*'
|
||||
git fetch && git checkout ${{ github.sha }}
|
||||
pip install -e .[testing,test_trackers] -U
|
||||
source activate accelerate;
|
||||
git clone https://github.com/huggingface/accelerate;
|
||||
cd accelerate;
|
||||
git checkout ${{ github.sha }};
|
||||
pip install -e .[testing,test_trackers] -U;
|
||||
pip install pytest-reportlog tabulate
|
||||
|
||||
- name: Run test on GPUs
|
||||
working-directory: accelerate
|
||||
run: |
|
||||
source activate accelerate
|
||||
source activate accelerate;
|
||||
make test
|
||||
|
||||
- name: Run examples on GPUs
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate
|
||||
pip uninstall comet_ml -y
|
||||
source activate accelerate;
|
||||
pip uninstall comet_ml -y;
|
||||
make test_examples
|
||||
|
||||
- name: Generate Report
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
pip install tabulate
|
||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
source activate accelerate;
|
||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
@ -25,37 +25,32 @@ jobs:
|
||||
container:
|
||||
image: huggingface/accelerate-gpu:latest
|
||||
options: --gpus all --shm-size "16gb"
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
runs-on: [self-hosted, multi-gpu, nvidia-gpu, t4, ci]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
transformers-version: [
|
||||
pypi,
|
||||
github
|
||||
]
|
||||
cuda_visible_devices: [
|
||||
"0",
|
||||
"0,1"
|
||||
]
|
||||
steps:
|
||||
- name: Update accelerate clone and pip install
|
||||
working-directory: accelerate/
|
||||
run:
|
||||
source activate accelerate;
|
||||
git config --global --add safe.directory '*';
|
||||
git checkout main && git fetch && git checkout ${{ github.sha }};
|
||||
pip install -e .;
|
||||
|
||||
- name: Update transformers clone & pip install
|
||||
working-directory: transformers/
|
||||
- name: Install transformers
|
||||
run: |
|
||||
source activate accelerate
|
||||
git config --global --add safe.directory '*'
|
||||
git checkout main && git pull
|
||||
if [[ ${{ matrix.transformers-version }} = pypi ]]; then
|
||||
git checkout $(git describe --tags `git rev-list --tags --max-count=1`)
|
||||
fi
|
||||
pip install .[torch,deepspeed-testing]
|
||||
source activate accelerate;
|
||||
git clone https://github.com/huggingface/transformers --depth 1;
|
||||
cd transformers;
|
||||
pip install .[torch,deepspeed-testing];
|
||||
cd ..;
|
||||
|
||||
- name: Install accelerate
|
||||
run: |
|
||||
source activate accelerate;
|
||||
git clone https://github.com/huggingface/accelerate;
|
||||
cd accelerate;
|
||||
git checkout ${{ github.sha }} ;
|
||||
pip install -e .[testing];
|
||||
pip uninstall comet_ml wandb -y
|
||||
cd ..;
|
||||
|
||||
- name: Show installed libraries
|
||||
run: |
|
||||
@ -81,36 +76,40 @@ jobs:
|
||||
source activate accelerate;
|
||||
pytest -sv tests/deepspeed
|
||||
|
||||
- name: Run transformers examples tests
|
||||
working-directory: transformers/
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: ${{ matrix.cuda_visible_devices }}
|
||||
WANDB_DISABLED: true
|
||||
run: |
|
||||
source activate accelerate
|
||||
pip install -r examples/pytorch/_tests_requirements.txt
|
||||
pytest -sv examples/pytorch/test_accelerate_examples.py examples/pytorch/test_pytorch_examples.py
|
||||
|
||||
run-skorch-tests:
|
||||
container:
|
||||
image: huggingface/accelerate-gpu:latest
|
||||
options: --gpus all --shm-size "16gb"
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
runs-on: [self-hosted, multi-gpu, nvidia-gpu, t4, ci]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
skorch-version: [
|
||||
pypi,
|
||||
github
|
||||
]
|
||||
steps:
|
||||
- name: Update accelerate clone and pip install
|
||||
working-directory: accelerate/
|
||||
- name: Install accelerate
|
||||
run:
|
||||
source activate accelerate;
|
||||
git config --global --add safe.directory '*';
|
||||
git checkout main && git fetch && git checkout ${{ github.sha }};
|
||||
pip install -e .;
|
||||
git clone https://github.com/huggingface/accelerate;
|
||||
cd accelerate;
|
||||
git checkout ${{ github.sha }};
|
||||
pip install -e .[testing];
|
||||
cd ..
|
||||
|
||||
- name: Update skorch clone & pip install
|
||||
working-directory: skorch/
|
||||
- name: Install skorch
|
||||
run: |
|
||||
source activate accelerate
|
||||
git clone https://github.com/skorch-dev/skorch;
|
||||
cd skorch;
|
||||
git config --global --add safe.directory '*'
|
||||
git checkout master && git pull
|
||||
if [[ ${{ matrix.skorch-version }} = pypi ]]; then
|
||||
git checkout $(git describe --tags `git rev-list --tags --max-count=1`)
|
||||
fi
|
||||
pip install .[testing]
|
||||
pip install flaky
|
||||
|
||||
|
||||
4
.github/workflows/stale.yml
vendored
4
.github/workflows/stale.yml
vendored
@ -13,10 +13,10 @@ jobs:
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3.1.0
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v1
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: 3.8
|
||||
|
||||
|
||||
@ -269,7 +269,7 @@ If you use 🤗 Accelerate in your publication, please cite it by using the foll
|
||||
```bibtex
|
||||
@Misc{accelerate,
|
||||
title = {Accelerate: Training and inference at scale made simple, efficient and adaptable.},
|
||||
author = {Sylvain Gugger, Lysandre Debut, Thomas Wolf, Philipp Schmid, Zachary Mueller, Sourab Mangrulkar, Marc Sun, Benjamin Bossan},
|
||||
author = {Sylvain Gugger and Lysandre Debut and Thomas Wolf and Philipp Schmid and Zachary Mueller and Sourab Mangrulkar and Marc Sun and Benjamin Bossan},
|
||||
howpublished = {\url{https://github.com/huggingface/accelerate}},
|
||||
year = {2022}
|
||||
}
|
||||
|
||||
@ -15,6 +15,8 @@
|
||||
title: Launching distributed code
|
||||
- local: basic_tutorials/notebook
|
||||
title: Launching distributed training from Jupyter Notebooks
|
||||
- local: basic_tutorials/troubleshooting
|
||||
title: Troubleshooting guide
|
||||
title: Tutorials
|
||||
- sections:
|
||||
- local: usage_guides/explore
|
||||
@ -37,10 +39,6 @@
|
||||
title: Saving and loading training states
|
||||
- local: usage_guides/tracking
|
||||
title: Using experiment trackers
|
||||
- local: usage_guides/debug
|
||||
title: Debugging timeout errors
|
||||
- local: usage_guides/memory
|
||||
title: How to avoid CUDA Out-of-Memory
|
||||
- local: usage_guides/mps
|
||||
title: How to use Apple Silicon M1 GPUs
|
||||
- local: usage_guides/deepspeed
|
||||
@ -55,6 +53,8 @@
|
||||
title: How to use 🤗 Accelerate with Intel® Extension for PyTorch for cpu
|
||||
title: How-To Guides
|
||||
- sections:
|
||||
- local: concept_guides/internal_mechanism
|
||||
title: 🤗 Accelerate's internal mechanism
|
||||
- local: concept_guides/big_model_inference
|
||||
title: Loading big models into memory
|
||||
- local: concept_guides/performance
|
||||
|
||||
@ -153,6 +153,15 @@ the below example enabling unbuffered stdout and stderr:
|
||||
python -u -m accelerate.commands.launch --num_processes=2 {script_name.py} {--arg1} {--arg2}
|
||||
```
|
||||
|
||||
<Tip>
|
||||
|
||||
You can run your code on CPU as well! This is helpful for debugging and testing purposes on toy models and datasets.
|
||||
|
||||
```bash
|
||||
accelerate launch --cpu {script_name.py} {--arg1} {--arg2}
|
||||
```
|
||||
|
||||
</Tip>
|
||||
|
||||
## Why you should always use `accelerate config`
|
||||
|
||||
@ -200,3 +209,24 @@ Launching a script from the location of that custom yaml file looks like the fol
|
||||
```bash
|
||||
accelerate launch --config_file {path/to/config/my_config_file.yaml} {script_name.py} {--arg1} {--arg2} ...
|
||||
```
|
||||
|
||||
## Multi-node training
|
||||
Multi-node training with 🤗Accelerate is similar to [multi-node training with torchrun](https://pytorch.org/tutorials/intermediate/ddp_series_multinode.html). The simplest way to launch a multi-node training run is to do the following:
|
||||
|
||||
- Copy your codebase and data to all nodes. (or place them on a shared filesystem)
|
||||
- Setup your python packages on all nodes.
|
||||
- Run `accelerate config` on the main single node first. After specifying the number of nodes, you will be asked to specify the rank of each node (this will be 0 for the main/master node), along with the IP address and port for the main process. This is required for the worker nodes to communicate with the main process. Afterwards, you can copy or send this config file across all of your nodes, changing the `machine_rank` to 1, 2,3, etc. to avoid having to run the command (or just follow their directions directly for launching with `torchrun` as well)
|
||||
|
||||
Once you have done this, you can start your multi-node training run by running `accelerate launch` (or `torchrun`) on all nodes.
|
||||
|
||||
<Tip>
|
||||
It is required that the command be ran on all nodes for everything to start, not just running it from the main node. You can use something like SLURM or a different process executor to wrap around this requirement and call everything from a single command.
|
||||
</Tip>
|
||||
|
||||
<Tip>
|
||||
|
||||
It is recommended to use the intranet IP of your main node over the public IP for better latency. This is the `192.168.x.x` or the `172.x.x.x` address you see when you run `hostname -I` on the main node.
|
||||
|
||||
</Tip>
|
||||
|
||||
To get a better idea about multi-node training, check out our example for [multi-node training with FSDP](https://huggingface.co/blog/ram-efficient-pytorch-fsdp).
|
||||
|
||||
222
docs/source/basic_tutorials/troubleshooting.md
Normal file
222
docs/source/basic_tutorials/troubleshooting.md
Normal file
@ -0,0 +1,222 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Troubleshooting guide
|
||||
|
||||
This guide aims to provide you the tools and knowledge required to navigate some common issues. However,
|
||||
as 🤗 Accelerate continuously evolves and the use cases and setups are diverse, you might encounter an issue not covered in this
|
||||
guide. If the suggestions listed in this guide do not cover your such situation, please refer to the final section of
|
||||
the guide, [Asking for Help](#ask-for-help), to learn where to find help with your specific issue.
|
||||
|
||||
## Logging
|
||||
|
||||
When facing an error, logging can help narrow down where it is coming from. In a distributed setup with multiple processes,
|
||||
logging can be a challenge, but 🤗 Accelerate provides a utility that streamlines the logging process and ensures that
|
||||
logs are synchronized and managed effectively across the distributed setup.
|
||||
|
||||
To troubleshoot an issue, use `accelerate.logging` instead of the standard Python `logging` module:
|
||||
|
||||
```diff
|
||||
- import logging
|
||||
+ from accelerate.logging import get_logger
|
||||
- logger = logging.getLogger(__name__)
|
||||
+ logger = get_logger(__name__)
|
||||
```
|
||||
|
||||
To set the log level (`INFO`, `DEBUG`, `WARNING`, `ERROR`, `CRITICAL`), export it as the `ACCELERATE_LOG_LEVEL` environment,
|
||||
or pass as `log_level` to `get_logger`:
|
||||
|
||||
```python
|
||||
from accelerate.logging import get_logger
|
||||
|
||||
logger = get_logger(__name__, log_level="INFO")
|
||||
```
|
||||
|
||||
By default, the log is called on main processes only. To call it on all processes, pass `main_process_only=False`.
|
||||
If a log should be called on all processes and in order, also pass `in_order=True`.
|
||||
|
||||
## Hanging code and timeout errors
|
||||
|
||||
### Mismatched tensor shapes
|
||||
|
||||
If your code seems to be hanging for a significant amount time on a distributed setup, a common cause is mismatched shapes of tensors on different
|
||||
devices.
|
||||
|
||||
When running scripts in a distributed fashion, functions such as [`Accelerator.gather`] and [`Accelerator.reduce`] are
|
||||
necessary to grab tensors across devices to perform operations on them collectively. These (and other) functions rely on
|
||||
`torch.distributed` performing a `gather` operation, which requires that tensors have the **exact same shape** across all processes.
|
||||
When the tensor shapes don't match, you will experience handing code, and eventually hit a timeout exception.
|
||||
|
||||
If you suspect this to be the case, use Accelerate's operational debug mode to immediately catch the issue.
|
||||
|
||||
The recommended way to enable Accelerate's operational debug mode is during `accelerate config` setup.
|
||||
Alternative ways to enable debug mode are:
|
||||
|
||||
* From the CLI:
|
||||
|
||||
```bash
|
||||
accelerate launch --debug {my_script.py} --arg1 --arg2
|
||||
```
|
||||
|
||||
* As an environmental variable (which avoids the need for `accelerate launch`):
|
||||
|
||||
```bash
|
||||
ACCELERATE_DEBUG_MODE="1" torchrun {my_script.py} --arg1 --arg2
|
||||
```
|
||||
|
||||
* Manually changing the `config.yaml` file:
|
||||
|
||||
```diff
|
||||
compute_environment: LOCAL_MACHINE
|
||||
+debug: true
|
||||
```
|
||||
|
||||
Once you enable the debug mode, you should get a similar traceback that points to the tensor shape mismatch issue:
|
||||
|
||||
```py
|
||||
Traceback (most recent call last):
|
||||
File "/home/zach_mueller_huggingface_co/test.py", line 18, in <module>
|
||||
main()
|
||||
File "/home/zach_mueller_huggingface_co/test.py", line 15, in main
|
||||
broadcast_tensor = broadcast(tensor)
|
||||
File "/home/zach_mueller_huggingface_co/accelerate/src/accelerate/utils/operations.py", line 303, in wrapper
|
||||
accelerate.utils.operations.DistributedOperationException:
|
||||
|
||||
Cannot apply desired operation due to shape mismatches. All shapes across devices must be valid.
|
||||
|
||||
Operation: `accelerate.utils.operations.broadcast`
|
||||
Input shapes:
|
||||
- Process 0: [1, 5]
|
||||
- Process 1: [1, 2, 5]
|
||||
```
|
||||
|
||||
### Early stopping leads to hanging
|
||||
|
||||
When doing early stopping in distributed training, if each process has a specific stopping condition (e.g. validation loss),
|
||||
it may not be synchronized across all of them. As a result, a break can happen on process 0 but not on process 1.
|
||||
This will cause the code to hang indefinitely until a timeout occurs.
|
||||
|
||||
If you have early stopping conditionals, use `set_breakpoint` and `check_breakpoint` methods to make sure all the processes
|
||||
are ended correctly:
|
||||
|
||||
```py
|
||||
# Assume `should_do_breakpoint` is a custom defined function that returns a conditional,
|
||||
# and that conditional might be true only on process 1
|
||||
if should_do_breakpoint(loss):
|
||||
accelerator.set_breakpoint()
|
||||
|
||||
# Later in the training script when we need to check for the breakpoint
|
||||
if accelerator.check_breakpoint():
|
||||
break
|
||||
```
|
||||
|
||||
### Hanging on low kernel versions on Linux
|
||||
|
||||
This is a known issue. On Linux with kernel version < 5.5, hanging processes have been reported. To avoid
|
||||
encountering this problem, we recommend upgrading your system to a later kernel version.
|
||||
|
||||
## CUDA out of memory
|
||||
|
||||
One of the most frustrating errors when it comes to running training scripts is hitting "CUDA Out-of-Memory",
|
||||
as the entire script needs to be restarted, progress is lost, and typically a developer would want to simply
|
||||
start their script and let it run.
|
||||
|
||||
To address this problem, `Accelerate` offers a utility `find_executable_batch_size` that is heavily based on [toma](https://github.com/BlackHC/toma).
|
||||
The utility retries code that fails due to OOM (out-of-memory) conditions and lowers batch sizes automatically.
|
||||
|
||||
### find_executable_batch_size
|
||||
|
||||
This algorithm operates with exponential decay, decreasing the batch size in half after each failed run on some
|
||||
training script. To use it, restructure your training function to include an inner function that includes this wrapper,
|
||||
and build your dataloaders inside it. At a minimum, this could look like 4 new lines of code.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
The inner function *must* take in the batch size as the first parameter, but we do not pass one to it when called. The wrapper handles this for us.
|
||||
|
||||
</Tip>
|
||||
|
||||
It should also be noted that anything which will consume CUDA memory and passed to the `accelerator` **must** be declared inside the inner function,
|
||||
such as models and optimizers.
|
||||
|
||||
```diff
|
||||
def training_function(args):
|
||||
accelerator = Accelerator()
|
||||
|
||||
+ @find_executable_batch_size(starting_batch_size=args.batch_size)
|
||||
+ def inner_training_loop(batch_size):
|
||||
+ nonlocal accelerator # Ensure they can be used in our context
|
||||
+ accelerator.free_memory() # Free all lingering references
|
||||
model = get_model()
|
||||
model.to(accelerator.device)
|
||||
optimizer = get_optimizer()
|
||||
train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)
|
||||
lr_scheduler = get_scheduler(
|
||||
optimizer,
|
||||
num_training_steps=len(train_dataloader)*num_epochs
|
||||
)
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
||||
)
|
||||
train(model, optimizer, train_dataloader, lr_scheduler)
|
||||
validate(model, eval_dataloader)
|
||||
+ inner_training_loop()
|
||||
```
|
||||
|
||||
To find out more, check the documentation [here](../package_reference/utilities#accelerate.find_executable_batch_size).
|
||||
|
||||
## Non-reproducible results between device setups
|
||||
|
||||
If you have changed the device setup and are observing different model performance, this is likely due to the fact that
|
||||
you have not updated your script when moving from one setup to another. The same script with the same batch size across TPU,
|
||||
multi-GPU, and single-GPU with Accelerate will have different results.
|
||||
|
||||
For example, if you were previously training on a single GPU with a batch size of 16, when moving to two GPU setup,
|
||||
you need to change the batch size to 8 to have the same effective batch size. This is because when training with Accelerate,
|
||||
the batch size passed to the dataloader is the **batch size per GPU**.
|
||||
|
||||
To make sure you can reproduce the results between the setups, make sure to use the same seed, adjust the batch size
|
||||
accordingly, consider scaling the learning rate.
|
||||
|
||||
For more details and a quick reference for batch sizes, check out the [Comparing performance between different device setups](../concept_guides/performance) guide.
|
||||
|
||||
## Performance issues on different GPUs
|
||||
|
||||
If your multi-GPU setup consists of different GPUs, you may hit some limitations:
|
||||
|
||||
- There may be an imbalance in GPU memory between the GPUs. In this case, the GPU with smaller memory will limit the batch size or the size of the model that can be loaded onto the GPUs.
|
||||
- If you are using GPUs with different performance profiles, the performance will be driven by the slowest GPU that you are using as the other GPUs will have to wait for it to complete its workload.
|
||||
|
||||
Vastly different GPUs within the same setup can lead to performance bottlenecks.
|
||||
|
||||
## Ask for help
|
||||
|
||||
If the above troubleshooting tools and advice did not help you resolve your issue, reach out for help to the community
|
||||
and the team.
|
||||
|
||||
### Forums
|
||||
|
||||
Ask for help on the Hugging Face forums - post your question in the [🤗Accelerate category](https://discuss.huggingface.co/c/accelerate/18)
|
||||
Make sure to write a descriptive post with relevant context about your setup and reproducible code to maximize the likelihood that your problem is solved!
|
||||
|
||||
### Discord
|
||||
|
||||
Post a question on [Discord](http://hf.co/join/discord), and let the team and the community help you.
|
||||
|
||||
### GitHub Issues
|
||||
|
||||
Create an Issue on the 🤗 Accelerate [GitHub repository](https://github.com/huggingface/accelerate/issues) if you suspect
|
||||
to have found a bug related to the library. Include context regarding the bug and details about your distributed setup
|
||||
to help us better figure out what's wrong and how we can fix it.
|
||||
@ -154,7 +154,7 @@ By passing `device_map="auto"`, we tell 🤗 Accelerate to determine automatical
|
||||
#### `no_split_module_classes`
|
||||
|
||||
This parameter will indicate that some of the modules with the name `"Block"` should not be split across different devices. You should set here all blocks that
|
||||
include a residutal connection of some kind.
|
||||
include a residual connection of some kind.
|
||||
|
||||
|
||||
#### The `device_map`
|
||||
|
||||
@ -55,8 +55,8 @@ their gradients computed, collated, and updated before moving on to the next
|
||||
batch of data.
|
||||
When performing gradient accumulation, you accumulate `n` loss gradients and
|
||||
skip `optimizer.step()` until `n` batches have been reached. As all training
|
||||
processes only need to sychronize by the time `optimizer.step()` is called,
|
||||
without any modification to your training step, this neededless inter-process
|
||||
processes only need to synchronize by the time `optimizer.step()` is called,
|
||||
without any modification to your training step, this needless inter-process
|
||||
communication can cause a significant slowdown.
|
||||
|
||||
How can you avoid this overhead?
|
||||
|
||||
72
docs/source/concept_guides/internal_mechanism.md
Normal file
72
docs/source/concept_guides/internal_mechanism.md
Normal file
@ -0,0 +1,72 @@
|
||||
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# 🤗 Accelerate's internal mechanisms
|
||||
|
||||
Internally, 🤗 Accelerate works by first analyzing the environment in which the script is launched to determine which
|
||||
kind of distributed setup is used, how many different processes there are and which one the current script is in. All
|
||||
that information is stored in the [`~AcceleratorState`].
|
||||
|
||||
This class is initialized the first time you instantiate an [`~Accelerator`] as well as performing any
|
||||
specific initialization your distributed setup needs. Its state is then uniquely shared through all instances of
|
||||
[`~state.AcceleratorState`]. (The same can also be done with the [`PartialState`], a more barebones version it inherits)
|
||||
|
||||
Then, when calling [`~Accelerator.prepare`], the library:
|
||||
|
||||
- wraps your model(s) in the container adapted for the distributed setup,
|
||||
- wraps your optimizer(s) in an [`~optimizer.AcceleratedOptimizer`],
|
||||
- wraps your scheduler(s) in an [`~scheduler.AcceleratedScheduler`]
|
||||
- creates a new version of your dataloader(s) in a [`~data_loader.DataLoaderShard`] or [`~data_loader.DataLoaderDispatcher`]
|
||||
|
||||
While the model(s), optimizer(s), and scheduler(s) are just put in simple wrappers, the dataloader(s) are re-created. This is mostly
|
||||
because PyTorch does not let the user change the `batch_sampler` of a dataloader once it's been created and the
|
||||
library handles the sharding of your data between processes by changing that `batch_sampler` to yield every other
|
||||
`num_processes` batches (if enabled).
|
||||
|
||||
The [`~data_loader.DataLoaderShard`] subclasses `DataLoader` to add the following functionality:
|
||||
|
||||
- it synchronizes the appropriate random number generator of all processes at each new iteration, to ensure any
|
||||
randomization (like shuffling) is done the exact same way across processes.
|
||||
- it puts the batches on the proper device before yielding them (unless you have opted out of
|
||||
`device_placement=True`).
|
||||
|
||||
The [`~data_loader.DataLoaderDispatcher`] subclasses differs from the [`~data_loader.DataLoaderShard`] in that when iterating through the `DataLoader`, the data is all starting from process 0 and *then* split and sent off to each process rather than it happening at the dataset level.
|
||||
|
||||
The random number generator synchronization will by default synchronize:
|
||||
|
||||
- the `generator` attribute of a given sampler (like the PyTorch `RandomSampler`) for PyTorch >= 1.6
|
||||
- the main random number generator in PyTorch <=1.5.1
|
||||
|
||||
You can choose which random number generator(s) to synchronize with the `rng_types` argument of the main
|
||||
[`Accelerator`]. In PyTorch >= 1.6, it is recommended to rely on a local `generator` to avoid
|
||||
setting the same seed in the main random number generator in all processes.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
Synchronization of the main torch (or CUDA or XLA) random number generator will affect any other potential random
|
||||
artifacts you could have in your dataset (like random data augmentation) in the sense that all processes will get
|
||||
the same random numbers from the torch random modules (so will apply the same random data augmentation if it's
|
||||
controlled by torch).
|
||||
|
||||
</Tip>
|
||||
|
||||
<Tip>
|
||||
|
||||
The randomization part of your custom sampler, batch sampler or iterable dataset should be done using a local
|
||||
`torch.Generator` object (in PyTorch >= 1.6), see the traditional `RandomSampler`, as an example.
|
||||
|
||||
</Tip>
|
||||
|
||||
For more details about the internals, see the [Internals page](package_reference/torch_wrappers).
|
||||
@ -74,7 +74,7 @@ In this example, there are two GPUs for "Multi-GPU" and a TPU pod with 8 workers
|
||||
|
||||
## Learning Rates
|
||||
|
||||
As noted in multiple sources[[1](https://aws.amazon.com/blogs/machine-learning/scalable-multi-node-deep-learning-training-using-gpus-in-the-aws-cloud/)][[2](https://docs.nvidia.com/clara/tlt-mi_archive/clara-train-sdk-v2.0/nvmidl/appendix/training_with_multiple_gpus.html)], the learning rate should be scaled *linearly* based on the number of devices present. The below
|
||||
As noted in multiple sources[[1](https://aws.amazon.com/blogs/machine-learning/scalable-multi-node-deep-learning-training-using-gpus-in-the-aws-cloud/)][[2](https://docs.nvidia.com/clara/clara-train-sdk/pt/model.html#classification-models-multi-gpu-training)], the learning rate should be scaled *linearly* based on the number of devices present. The below
|
||||
snippet shows doing so with Accelerate:
|
||||
|
||||
<Tip>
|
||||
|
||||
@ -36,7 +36,7 @@ Below is an example of a training function passed to the [`notebook_launcher`] i
|
||||
|
||||
<Tip>
|
||||
|
||||
This code snippet is based off the one from the `simple_nlp_example` notebook found [here](https://github.com/huggingface/notebooks/blob/main/examples/accelerate/simple_nlp_example.ipynb) with slight
|
||||
This code snippet is based off the one from the `simple_nlp_example` notebook found [here](https://github.com/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_nlp_example.ipynb) with slight
|
||||
modifications for the sake of simplicity
|
||||
|
||||
</Tip>
|
||||
|
||||
@ -199,7 +199,7 @@ The following arguments are only useful when `use_deepspeed` is passed or `deeps
|
||||
|
||||
**Fully Sharded Data Parallelism Arguments**:
|
||||
|
||||
The following arguments are only useful when `use_fdsp` is passed or Fully Sharded Data Parallelism is configured through `accelerate config`:
|
||||
The following arguments are only useful when `use_fsdp` is passed or Fully Sharded Data Parallelism is configured through `accelerate config`:
|
||||
|
||||
* `--fsdp_offload_params` (`str`) -- Decides Whether (true|false) to offload parameters and gradients to CPU.
|
||||
* `--fsdp_min_num_params` (`int`) -- FSDP's minimum number of parameters for Default Auto Wrapping.
|
||||
|
||||
@ -15,23 +15,7 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
# Logging with Accelerate
|
||||
|
||||
Accelerate has its own logging utility to handle logging while in a distributed system.
|
||||
To utilize this replace cases of `logging` with `accelerate.logging`:
|
||||
```diff
|
||||
- import logging
|
||||
+ from accelerate.logging import get_logger
|
||||
- logger = logging.getLogger(__name__)
|
||||
+ logger = get_logger(__name__)
|
||||
```
|
||||
|
||||
## Setting the log level
|
||||
|
||||
The log level can be set with the `ACCELERATE_LOG_LEVEL` environment variable or by passing
|
||||
`log_level` to `get_logger`:
|
||||
```python
|
||||
from accelerate.logging import get_logger
|
||||
|
||||
logger = get_logger(__name__, log_level="INFO")
|
||||
```
|
||||
Refer to the [Troubleshooting guide](../usage_guides/troubleshooting#logging) or to the example below to learn
|
||||
how to use 🤗 Accelerate's logger.
|
||||
|
||||
[[autodoc]] logging.get_logger
|
||||
@ -31,3 +31,5 @@ rendered properly in your Markdown viewer.
|
||||
- __init__
|
||||
[[autodoc]] tracking.MLflowTracker
|
||||
- __init__
|
||||
[[autodoc]] tracking.ClearMLTracker
|
||||
- __init__
|
||||
|
||||
@ -50,6 +50,12 @@ These are basic dataclasses used throughout 🤗 Accelerate and they can be pass
|
||||
|
||||
[[autodoc]] utils.ProjectConfiguration
|
||||
|
||||
## Environmental Variables
|
||||
|
||||
These are environmental variables that can be enabled for different use cases
|
||||
|
||||
* `ACCELERATE_DEBUG_MODE` (`str`): Whether to run accelerate in debug mode. More info available [here](../usage_guides/debug.md).
|
||||
|
||||
## Plugins
|
||||
|
||||
These are plugins that can be passed to the [`Accelerator`] object. While they are defined elsewhere in the documentation,
|
||||
|
||||
@ -48,13 +48,13 @@ If you choose to leave those `.to(device)` calls, make sure to use the device pr
|
||||
<Tip warning={true}>
|
||||
|
||||
You can fully deactivate the automatic device placement by passing along `device_placement=False` when
|
||||
initializing [`Accelerator`].
|
||||
initializing the [`Accelerator`].
|
||||
However, if you place your objects manually on the proper device, be careful to create your optimizer after putting your
|
||||
model on `accelerator.device` or your training will fail on TPU.
|
||||
|
||||
</Tip>
|
||||
|
||||
3. Pass all objects relevant to training (optimizer, model, training dataloader, learning rate scheduler) to the
|
||||
3. Pass all PyTorch objects relevant to training (optimizer, model, dataloader(s), learning rate scheduler) to the
|
||||
[`~Accelerator.prepare`] method as soon as these objects are created, before starting your actual
|
||||
training loop:
|
||||
|
||||
@ -66,14 +66,14 @@ model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
|
||||
|
||||
**Important notes**:
|
||||
|
||||
* Only pass the learning rate scheduler to [`~Accelerator.prepare`] when the scheduler needs to be stepped at each optimizer step.
|
||||
* While you can send your dataloader to [`~Accelerator.prepare`] on its own, it's best to send it to [`~Accelerator.prepare`] together with the model and optimizer.
|
||||
* You should always pass the the learning rate scheduler to [`~Accelerator.prepare`], however if the scheduler should *not* be stepped at each optimization step, pass `step_with_optimizer=False` to the [`Accelerator`] init.
|
||||
* While you can send your dataloader to [`~Accelerator.prepare`] on its own (and there are cases for doing so, such as distributed inference), it's best to send it to [`~Accelerator.prepare`] together with the model and optimizer.
|
||||
* If you wish to run distributed evaluation, send your validation dataloader to [`~Accelerator.prepare`] as well. There are some nuances to distributed validation, check the [Distributed evaluation](#add-distributed-evaluation) section of the guide.
|
||||
* Any instruction using your training dataloader length (for instance if you want to log the number of total training
|
||||
steps) should go after the call to [`~Accelerator.prepare`].
|
||||
|
||||
Passing these objects to the [`~Accelerator.prepare`] method ensures that your training dataloader will be sharded across
|
||||
all GPUs/TPU cores available so that each one sees a different portion of the training dataset. Also, the random states
|
||||
Passing `DataLoader` objects to the [`~Accelerator.prepare`] method ensures that your dataloader will be sharded across
|
||||
all GPUs/TPU cores available so that each one sees a different portion of the training dataset. In other words, if there are 8 processes and a dataset of 64 items, each process will see 8 of these items per iteration. Also, the random states
|
||||
of all processes will be synchronized at the beginning of each iteration through your dataloader, to make sure the data
|
||||
is shuffled the same way (if you decided to use `shuffle=True` or any kind of random sampler).
|
||||
|
||||
@ -81,7 +81,7 @@ is shuffled the same way (if you decided to use `shuffle=True` or any kind of ra
|
||||
|
||||
The actual batch size for your training will be the number of devices used multiplied by the batch size you set in
|
||||
your script. For instance, training on 4 GPUs with a batch size of 16 set when creating the training dataloader will
|
||||
train at an actual batch size of 64.
|
||||
train at an actual batch size of 64 (4 * 16).
|
||||
If you want the batch size remain the same regardless of how many GPUs the script is run on, you can use the
|
||||
option `split_batches=True` when creating and initializing [`Accelerator`].
|
||||
Your training dataloader may change length when going through this method: if you run on X GPUs, it will have its
|
||||
@ -99,7 +99,7 @@ launcher.
|
||||
|
||||
### Add distributed evaluation
|
||||
|
||||
You can perform regular evaluation in your training script, if you leave your validation dataloader out of the
|
||||
You can perform regular evaluation in your training script if you leave your validation dataloader out of the
|
||||
[`~Accelerator.prepare`] method. In this case, you will need to put the input data on the
|
||||
`accelerator.device` manually.
|
||||
|
||||
@ -133,7 +133,7 @@ for inputs, targets in validation_dataloader:
|
||||
|
||||
Some data at the end of the dataset may be duplicated so the batch can be divided equally among all workers. As a result,
|
||||
metrics should be calculated through the [`~Accelerator.gather_for_metrics`] method to automatically remove the duplicated
|
||||
data while gathering.
|
||||
data while gathering and provide a more accurate metric.
|
||||
|
||||
<Tip>
|
||||
|
||||
@ -201,7 +201,7 @@ accelerate launch --config_file path_to_config.yaml path_to_script.py --args_for
|
||||
```
|
||||
|
||||
You can override any of the arguments determined by your config file. To see the complete list of parameters that you
|
||||
can pass in, run `accelerate launch -h`.
|
||||
can pass in, run `accelerate launch -h`. (And further niche argument help by passing in partial commands, such as `accelerate launch --multi_gpu -h` for all `multi_gpu` args)
|
||||
|
||||
Check out the [Launch tutorial](basic_tutorials/launch) for more information about launching your scripts.
|
||||
|
||||
@ -212,8 +212,8 @@ Here we describe common modifications/deviations from the base case scenario and
|
||||
|
||||
### Launch distributed training from a notebook
|
||||
|
||||
In Accelerate 0.3.0, a new [`notebook_launcher`] has been introduced to help you launch your training function from a
|
||||
notebook. This launcher supports launching a training with TPUs on Colab or Kaggle, as well as training on several GPUs
|
||||
Accelerate has a [`notebook_launcher`] to help you launch your training function from a
|
||||
notebook. This launcher supports launching a training with TPUs on Colab or Kaggle, as well as training on several GPUs and machines
|
||||
(if the machine on which you are running your notebook has them).
|
||||
|
||||
Define a function responsible for your whole training and/or evaluation in a cell of the notebook, then execute a
|
||||
|
||||
@ -52,7 +52,7 @@ will attempt to fill all the space in your GPU(s), then loading them to the CPU,
|
||||
|
||||
<Tip>
|
||||
|
||||
For more details on desigining your own device map, see this section of the [concept guide](../concept_guide/big_model_inference#desigining-a-device-map)
|
||||
For more details on desigining your own device map, see this section of the [concept guide](../concept_guide/big_model_inference#designing-a-device-map)
|
||||
|
||||
</Tip>
|
||||
|
||||
|
||||
@ -1,93 +0,0 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Debugging Distributed Operations
|
||||
|
||||
When running scripts in a distributed fashion, often functions such as [`Accelerator.gather`] and [`Accelerator.reduce`] (and others) are neccessary to grab tensors across devices and perform certain operations on them. However, if the tensors which are being grabbed are not the proper shapes then this will result in your code hanging forever. The only sign that exists of this truly happening is hitting a timeout exception from `torch.distributed`, but this can get quite costly as usually the timeout is 10 minutes.
|
||||
|
||||
Accelerate now has a `debug` mode which adds a neglible amount of time to each operation, but allows it to verify that the inputs you are bringing in can *actually* perform the operation you want **without** hitting this timeout problem!
|
||||
|
||||
## Visualizing the problem
|
||||
|
||||
To have a tangible example of this issue, let's take the following setup (on 2 GPUs):
|
||||
|
||||
```python
|
||||
from accelerate import PartialState
|
||||
|
||||
state = PartialState()
|
||||
if state.process_index == 0:
|
||||
tensor = torch.tensor([[0.0, 1, 2, 3, 4]]).to(state.device)
|
||||
else:
|
||||
tensor = torch.tensor([[[0.0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]]).to(state.device)
|
||||
|
||||
broadcast_tensor = broadcast(tensor)
|
||||
print(broadcast_tensor)
|
||||
```
|
||||
|
||||
We've created a single tensor on each device, with two radically different shapes. With this setup if we want to perform an operation such as [`utils.broadcast`], we would forever hit a timeout because `torch.distributed` requires that these operations have the **exact same shape** across all processes for it to work.
|
||||
|
||||
If you run this yourself, you will find that `broadcast_tensor` can be printed on the main process, but its results won't quite be right, and then it will just hang never printing it on any of the other processes:
|
||||
|
||||
```
|
||||
>>> tensor([[0, 1, 2, 3, 4]], device='cuda:0')
|
||||
```
|
||||
|
||||
## The solution
|
||||
|
||||
By enabling Accelerate's operational debug mode, Accelerate will properly find and catch errors such as this and provide a very clear traceback immediatly:
|
||||
|
||||
```
|
||||
Traceback (most recent call last):
|
||||
File "/home/zach_mueller_huggingface_co/test.py", line 18, in <module>
|
||||
main()
|
||||
File "/home/zach_mueller_huggingface_co/test.py", line 15, in main
|
||||
main()broadcast_tensor = broadcast(tensor)
|
||||
File "/home/zach_mueller_huggingface_co/accelerate/src/accelerate/utils/operations.py", line 303, in wrapper
|
||||
broadcast_tensor = broadcast(tensor)
|
||||
accelerate.utils.operations.DistributedOperationException: Cannot apply desired operation due to shape mismatches. All shapes across devices must be valid.
|
||||
|
||||
Operation: `accelerate.utils.operations.broadcast`
|
||||
Input shapes:
|
||||
- Process 0: [1, 5]
|
||||
- Process 1: [1, 2, 5]
|
||||
```
|
||||
|
||||
This explains that the shapes across our devices were *not* the same, and that we should ensure that they match properly to be compatible. Typically this means that there is either an extra dimension, or certain dimensions are incompatible with the operation.
|
||||
|
||||
To enable this please do one of the following:
|
||||
|
||||
Enable it through the questionarre during `accelerate config` (recommended)
|
||||
|
||||
From the CLI:
|
||||
|
||||
```
|
||||
accelerate launch --debug {my_script.py} --arg1 --arg2
|
||||
```
|
||||
|
||||
As an environmental variable (which avoids the need for `accelerate launch`):
|
||||
|
||||
```
|
||||
ACCELERATE_DEBUG_MODE="1" accelerate launch {my_script.py} --arg1 --arg2
|
||||
```
|
||||
|
||||
Manually changing the `config.yaml` file:
|
||||
|
||||
```diff
|
||||
compute_environment: LOCAL_MACHINE
|
||||
+debug: true
|
||||
```
|
||||
|
||||
|
||||
|
||||
@ -15,7 +15,7 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
# DeepSpeed
|
||||
|
||||
[DeepSpeed](https://github.com/microsoft/DeepSpeed) implements everything described in the [ZeRO paper](https://arxiv.org/abs/1910.02054). Currently, it provides full support for:
|
||||
[DeepSpeed](https://github.com/microsoft/DeepSpeed) implements everything described in the [ZeRO paper](https://arxiv.org/abs/1910.02054). Some of the salient optimizations are:
|
||||
|
||||
1. Optimizer state partitioning (ZeRO stage 1)
|
||||
2. Gradient partitioning (ZeRO stage 2)
|
||||
@ -23,6 +23,7 @@ rendered properly in your Markdown viewer.
|
||||
4. Custom mixed precision training handling
|
||||
5. A range of fast CUDA-extension-based optimizers
|
||||
6. ZeRO-Offload to CPU and Disk/NVMe
|
||||
7. Heirarchical partitioning of model parameters (ZeRO++)
|
||||
|
||||
ZeRO-Offload has its own dedicated paper: [ZeRO-Offload: Democratizing Billion-Scale Model Training](https://arxiv.org/abs/2101.06840). And NVMe-support is described in the paper [ZeRO-Infinity: Breaking the GPU
|
||||
Memory Wall for Extreme Scale Deep Learning](https://arxiv.org/abs/2104.07857).
|
||||
@ -44,7 +45,7 @@ won't be possible on a single GPU.
|
||||
|
||||
Training:
|
||||
|
||||
1. DeepSpeed ZeRO training supports the full ZeRO stages 1, 2 and 3 as well as CPU/Disk offload of optimizer states, gradients and parameters.
|
||||
1. 🤗 Accelerate integrates all features of DeepSpeed ZeRO. This includes all the ZeRO stages 1, 2 and 3 as well as ZeRO-Offload, ZeRO-Infinity (which can offload to disk/NVMe) and ZeRO++.
|
||||
Below is a short description of Data Parallelism using ZeRO - Zero Redundancy Optimizer along with diagram from this [blog post](https://www.microsoft.com/en-us/research/blog/zero-deepspeed-new-system-optimizations-enable-training-models-with-over-100-billion-parameters/)
|
||||

|
||||
|
||||
@ -60,6 +61,8 @@ Below is a short description of Data Parallelism using ZeRO - Zero Redundancy Op
|
||||
|
||||
e. **Param Offload**: Offloads the model parameters to CPU/Disk building on top of ZERO Stage 3
|
||||
|
||||
f. **Heirarchical Paritioning**: Enables efficient multi-node training with data-parallel training across nodes and ZeRO-3 sharding within a node, built on top of ZeRO Stage 3.
|
||||
|
||||
<u>Note</u>: With respect to Disk Offload, the disk should be an NVME for decent speed but it technically works on any Disk
|
||||
|
||||
Inference:
|
||||
@ -349,6 +352,27 @@ accelerate launch examples/by_feature/deepspeed_with_config_support.py \
|
||||
--report_to "wandb"\
|
||||
```
|
||||
|
||||
**ZeRO++ Config Example**
|
||||
You can use the the features of ZeRO++ by using the appropriate config parameters. Note that ZeRO++ is an extension for ZeRO Stage 3. Here is how the config file can be modified, from [DeepSpeed's ZeRO++ tutorial](https://www.deepspeed.ai/tutorials/zeropp/):
|
||||
|
||||
```json
|
||||
{
|
||||
"zero_optimization": {
|
||||
"stage": 3,
|
||||
"reduce_bucket_size": "auto",
|
||||
|
||||
"zero_quantized_weights": true,
|
||||
"zero_hpz_partition_size": 8,
|
||||
"zero_quantized_gradients": true,
|
||||
|
||||
"contiguous_gradients": true,
|
||||
"overlap_comm": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
For heirarchical partitioning, the partition size `zero_hpz_partition_size` should ideally be set to the number of GPUs per node. (For example, the above config file assumes 8 GPUs per node)
|
||||
|
||||
**Important code changes when using DeepSpeed Config File**
|
||||
|
||||
1. DeepSpeed Optimizers and Schedulers. For more information on these,
|
||||
@ -683,6 +707,8 @@ Papers:
|
||||
- [ZeRO: Memory Optimizations Toward Training Trillion Parameter Models](https://arxiv.org/abs/1910.02054)
|
||||
- [ZeRO-Offload: Democratizing Billion-Scale Model Training](https://arxiv.org/abs/2101.06840)
|
||||
- [ZeRO-Infinity: Breaking the GPU Memory Wall for Extreme Scale Deep Learning](https://arxiv.org/abs/2104.07857)
|
||||
- [ZeRO++: Extremely Efficient Collective Communication for Giant Model Training](https://arxiv.org/abs/2306.10209)
|
||||
|
||||
|
||||
Finally, please, remember that 🤗 `Accelerate` only integrates DeepSpeed, therefore if you
|
||||
have any problems or questions with regards to DeepSpeed usage, please, file an issue with [DeepSpeed GitHub](https://github.com/microsoft/DeepSpeed/issues).
|
||||
|
||||
@ -51,7 +51,7 @@ def run_inference(rank, world_size):
|
||||
One will notice how we have to check the rank to know what prompt to send, which can be a bit tedious.
|
||||
|
||||
A user might then also think that with 🤗 Accelerate, using the `Accelerator` to prepare a dataloader for such a task might also be
|
||||
a simple way to manage this. (To learn more, check out the relvent section in the [Quick Tour](../quicktour#distributed-evaluation))
|
||||
a simple way to manage this. (To learn more, check out the relevant section in the [Quick Tour](../quicktour#distributed-evaluation))
|
||||
|
||||
Can it manage it? Yes. Does it add unneeded extra code however: also yes.
|
||||
|
||||
|
||||
@ -16,7 +16,7 @@ rendered properly in your Markdown viewer.
|
||||
# Learning how to incorporate 🤗 Accelerate features quickly!
|
||||
|
||||
Please use the interactive tool below to help you get started with learning about a particular
|
||||
feature of 🤗 Accelerate and how to utilize it! It will provide you with a code diff, an explaination
|
||||
feature of 🤗 Accelerate and how to utilize it! It will provide you with a code diff, an explanation
|
||||
towards what is going on, as well as provide you with some useful links to explore more within
|
||||
the documentation!
|
||||
|
||||
|
||||
@ -40,23 +40,30 @@ For instance, here is how you would run the NLP example (from the root of the re
|
||||
|
||||
```bash
|
||||
compute_environment: LOCAL_MACHINE
|
||||
deepspeed_config: {}
|
||||
debug: false
|
||||
distributed_type: FSDP
|
||||
downcast_bf16: 'no'
|
||||
fsdp_config:
|
||||
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
|
||||
fsdp_backward_prefetch_policy: BACKWARD_PRE
|
||||
fsdp_cpu_ram_efficient_loading: true
|
||||
fsdp_forward_prefetch: false
|
||||
fsdp_offload_params: false
|
||||
fsdp_sharding_strategy: 1
|
||||
fsdp_state_dict_type: FULL_STATE_DICT
|
||||
fsdp_state_dict_type: SHARDED_STATE_DICT
|
||||
fsdp_sync_module_states: true
|
||||
fsdp_transformer_layer_cls_to_wrap: BertLayer
|
||||
fsdp_use_orig_params: true
|
||||
machine_rank: 0
|
||||
main_process_ip: null
|
||||
main_process_port: null
|
||||
main_training_function: main
|
||||
mixed_precision: 'no'
|
||||
mixed_precision: bf16
|
||||
num_machines: 1
|
||||
num_processes: 2
|
||||
rdzv_backend: static
|
||||
same_network: true
|
||||
tpu_env: []
|
||||
tpu_use_cluster: false
|
||||
tpu_use_sudo: false
|
||||
use_cpu: false
|
||||
```
|
||||
|
||||
@ -66,7 +73,7 @@ accelerate launch examples/nlp_example.py
|
||||
|
||||
Currently, `Accelerate` supports the following config through the CLI:
|
||||
|
||||
```bash
|
||||
|
||||
`Sharding Strategy`: [1] FULL_SHARD (shards optimizer states, gradients and parameters), [2] SHARD_GRAD_OP (shards optimizer states and gradients), [3] NO_SHARD (DDP), [4] HYBRID_SHARD (shards optimizer states, gradients and parameters within each node while each node has full copy), [5] HYBRID_SHARD_ZERO2 (shards optimizer states and gradients within each node while each node has full copy)
|
||||
|
||||
`Offload Params`: Decides Whether to offload parameters and gradients to CPU
|
||||
@ -94,10 +101,12 @@ all-gather while executing in the forward pass. only use with Static graphs.
|
||||
|
||||
`Use Orig Params`: If True, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable paramteres.
|
||||
Useful in cases such as parameter-efficient fine-tuning.
|
||||
Please refer this [blog](https://dev-discuss.pytorch.org/t/rethinking-pytorch-fully-sharded-data-parallel-fsdp-from-first-principles/1019)
|
||||
Please refer this [blog](https://dev-discuss.pytorch.org/t/rethinking-pytorch-fully-sharded-data-parallel-fsdp-from-first-principles/1019). This also enables to have different optimizer param groups. This should be `True` when creating optimizer object before preparing/wrapping the model with FSDP.
|
||||
|
||||
`CPU RAM Efficient Model loading`: If True, only the first process loads the pretrained model checkoint while all other processes have empty weights. Only applicable for 🤗 Transformers models. This should be set to False if you experience errors when loading the pretrained 🤗 Transformers model via `from_pretrained` method. When using this, `Sync Module States` needs to be True else all the processes expect the main process would have random empty weights leading to unexpected behaviour during training.
|
||||
|
||||
`Sync Module States`: If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0
|
||||
```
|
||||
|
||||
|
||||
For additional and more nuanced control, you can specify other FSDP parameters via `FullyShardedDataParallelPlugin`.
|
||||
When creating `FullyShardedDataParallelPlugin` object, pass it the parameters that weren't part of the accelerate config or if you want to override them.
|
||||
@ -160,66 +169,13 @@ When using transformers `save_pretrained`, pass `state_dict=accelerator.get_stat
|
||||
|
||||
### State Dict
|
||||
|
||||
`accelerator.get_state_dict` will call the underlying `model.state_dict` implementation. With a model wrapped by FSDP, the default behavior of `state_dict` is to gather all of the state in the rank 0 device. This can cause CUDA out of memory errors if the parameters don't fit on a single GPU.
|
||||
|
||||
To avoid this, PyTorch provides a context manager that adjusts the behavior of `state_dict`. To offload some of the state dict onto CPU, you can use the following code:
|
||||
|
||||
```
|
||||
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, StateDictType, FullStateDictConfig
|
||||
|
||||
full_state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
|
||||
with FSDP.state_dict_type(unwrapped_model, StateDictType.FULL_STATE_DICT, full_state_dict_config):
|
||||
state = accelerator.get_state_dict(unwrapped_model)
|
||||
```
|
||||
`accelerator.get_state_dict` will call the underlying `model.state_dict` implementation using `FullStateDictConfig(offload_to_cpu=True, rank0_only=True)` context manager to get the state dict only for rank 0 and it will be offloaded to CPU.
|
||||
|
||||
You can then pass `state` into the `save_pretrained` method. There are several modes for `StateDictType` and `FullStateDictConfig` that you can use to control the behavior of `state_dict`. For more information, see the [PyTorch documentation](https://pytorch.org/docs/stable/fsdp.html).
|
||||
|
||||
## A few caveats to be aware of
|
||||
|
||||
- PyTorch FSDP auto wraps sub-modules, flattens the parameters and shards the parameters in place.
|
||||
Due to this, any optimizer created before model wrapping gets broken and occupies more memory.
|
||||
Hence, it is highly recommended and efficient to prepare the model before creating the optimizer.
|
||||
`Accelerate` will automatically wrap the model and create an optimizer for you in case of single model with a warning message.
|
||||
> FSDP Warning: When using FSDP, it is efficient and recommended to call prepare for the model before creating the optimizer
|
||||
|
||||
However, below is the recommended way to prepare model and optimizer while using FSDP:
|
||||
|
||||
```diff
|
||||
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True)
|
||||
+ model = accelerator.prepare(model)
|
||||
|
||||
optimizer = torch.optim.AdamW(params=model.parameters(), lr=lr)
|
||||
|
||||
- model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
||||
- model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
||||
- )
|
||||
|
||||
+ optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
||||
+ optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
||||
+ )
|
||||
```
|
||||
|
||||
- In case of a single model, if you have created the optimizer with multiple parameter groups and called prepare with them together,
|
||||
then the parameter groups will be lost and the following warning is displayed:
|
||||
> FSDP Warning: When using FSDP, several parameter groups will be conflated into
|
||||
> a single one due to nested module wrapping and parameter flattening.
|
||||
|
||||
This is because parameter groups created before wrapping will have no meaning post wrapping due to parameter flattening of nested FSDP modules into 1D arrays (which can consume many layers).
|
||||
For instance, below are the named parameters of an FSDP model on GPU 0 (When using 2 GPUs. Around 55M (110M/2) params in 1D arrays as this will have the 1st shard of the parameters).
|
||||
Here, if one has applied no weight decay for [bias, LayerNorm.weight] the named parameters of an unwrapped BERT model,
|
||||
it can't be applied to the below FSDP wrapped model as there are no named parameters with either of those strings and
|
||||
the parameters of those layers are concatenated with parameters of various other layers.
|
||||
```
|
||||
{
|
||||
'_fsdp_wrapped_module.flat_param': torch.Size([494209]),
|
||||
'_fsdp_wrapped_module._fpw_module.bert.embeddings.word_embeddings._fsdp_wrapped_module.flat_param': torch.Size([11720448]),
|
||||
'_fsdp_wrapped_module._fpw_module.bert.encoder._fsdp_wrapped_module.flat_param': torch.Size([42527232])
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
- In case of multiple models, it is necessary to prepare the models before creating optimizers or else it will throw an error.
|
||||
Then pass the optimizers to the prepare call in the same order as corresponding models else `accelerator.save_state()` and `accelerator.load_state()` will result in wrong/unexpected behaviour.
|
||||
- In case of multiple models, pass the optimizers to the prepare call in the same order as corresponding models else `accelerator.save_state()` and `accelerator.load_state()` will result in wrong/unexpected behaviour.
|
||||
- This feature is incompatible with `--predict_with_generate` in the `run_translation.py` script of 🤗 `Transformers` library.
|
||||
|
||||
For more control, users can leverage the `FullyShardedDataParallelPlugin`. After creating an instance of this class, users can pass it to the Accelerator class instantiation.
|
||||
|
||||
@ -118,8 +118,24 @@ You can remove all the special checks for the step number and the loss adjustmen
|
||||
As you can see the [`Accelerator`] is able to keep track of the batch number you are on and it will automatically know whether to step through the prepared optimizer and how to adjust the loss.
|
||||
|
||||
<Tip>
|
||||
|
||||
Typically with gradient accumulation, you would need to adjust the number of steps to reflect the change in total batches you are
|
||||
training on. 🤗 Accelerate automagically does this for you by default. Behind the scenes we instantiate a GradientAccumulationPlugin configured to do this.
|
||||
training on. 🤗 Accelerate automagically does this for you by default. Behind the scenes we instantiate a [`GradientAccumulationPlugin`] configured to do this.
|
||||
|
||||
</Tip>
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
The [`state.GradientState`] is sync'd with the active dataloader being iterated upon. As such it assumes naively that when we have reached the end of the dataloader everything will sync and a step will be performed. To disable this, set `sync_with_dataloader` to be `False` in the [`GradientAccumulationPlugin`]:
|
||||
|
||||
```{python}
|
||||
from accelerate import Accelerator
|
||||
from accelerate.utils import GradientAccumulationPlugin
|
||||
|
||||
plugin = GradientAccumulationPlugin(sync_with_dataloader=False)
|
||||
accelerator = Accelerator(..., gradient_accumulation_plugin=plugin)
|
||||
```
|
||||
|
||||
</Tip>
|
||||
|
||||
## The finished code
|
||||
|
||||
@ -128,7 +128,7 @@ Do you want to enable Sequence Parallelism? [YES/no]:
|
||||
What is the Pipeline Parallelism degree/size? [1]:2
|
||||
What is the number of micro-batches? [1]:2
|
||||
Do you want to enable selective activation recomputation? [YES/no]:
|
||||
Do you want to use distributed optimizer which shards optimizer state and gradients across data pralellel ranks? [YES/no]:
|
||||
Do you want to use distributed optimizer which shards optimizer state and gradients across data parallel ranks? [YES/no]:
|
||||
What is the gradient clipping value based on global L2 Norm (0 to disable)? [1.0]:
|
||||
How many GPU(s) should be used for distributed training? [1]:4
|
||||
Do you wish to use FP16 or BF16 (mixed precision)? [NO/fp16/bf16]: bf16
|
||||
@ -355,8 +355,8 @@ def main():
|
||||
|
||||
2. For using the Megatron-LM datasets, a few more changes are required. Dataloaders for these datasets
|
||||
are available only on rank 0 of each tensor parallel group. As such, there are rank where dataloader won't be
|
||||
avaiable and this requires tweaks to the training loop. Being able to do all this shows how
|
||||
felixble and extensible 🤗 Accelerate is. The changes required are as follows.
|
||||
available and this requires tweaks to the training loop. Being able to do all this shows how
|
||||
flexible and extensible 🤗 Accelerate is. The changes required are as follows.
|
||||
|
||||
a. For Megatron-LM indexed datasets, we need to use `MegatronLMDummyDataLoader`
|
||||
and pass the required dataset args to it such as `data_path`, `seq_length` etc.
|
||||
@ -547,7 +547,7 @@ The `model(**batch_data)` call return loss(es) averaged across the data parallel
|
||||
This is fine for most cases wherein pre-training jobs are run using Megatron-LM features and
|
||||
you can easily compute the `perplexity` using the loss.
|
||||
For GPT model, returning logits in addition to loss(es) is supported.
|
||||
These logits aren't gathered across data prallel ranks. Use `accelerator.utils.gather_across_data_parallel_groups`
|
||||
These logits aren't gathered across data parallel ranks. Use `accelerator.utils.gather_across_data_parallel_groups`
|
||||
to gather logits across data parallel ranks. These logits along with labels can be used for computing various
|
||||
performance metrics.
|
||||
|
||||
|
||||
@ -1,58 +0,0 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Memory Utilities
|
||||
|
||||
One of the most frustrating errors when it comes to running training scripts is hitting "CUDA Out-of-Memory",
|
||||
as the entire script needs to be restarted, progress is lost, and typically a developer would want to simply
|
||||
start their script and let it run.
|
||||
|
||||
`Accelerate` provides a utility heavily based on [toma](https://github.com/BlackHC/toma) to give this capability.
|
||||
|
||||
## find_executable_batch_size
|
||||
|
||||
This algorithm operates with exponential decay, decreasing the batch size in half after each failed run on some
|
||||
training script. To use it, restructure your training function to include an inner function that includes this wrapper,
|
||||
and build your dataloaders inside it. At a minimum, this could look like 4 new lines of code.
|
||||
> Note: The inner function *must* take in the batch size as the first parameter, but we do not pass one to it when called. The wrapper handles this for us
|
||||
|
||||
It should also be noted that anything which will consume CUDA memory and passed to the `accelerator` **must** be declared inside the inner function,
|
||||
such as models and optimizers.
|
||||
|
||||
```diff
|
||||
def training_function(args):
|
||||
accelerator = Accelerator()
|
||||
|
||||
+ @find_executable_batch_size(starting_batch_size=args.batch_size)
|
||||
+ def inner_training_loop(batch_size):
|
||||
+ nonlocal accelerator # Ensure they can be used in our context
|
||||
+ accelerator.free_memory() # Free all lingering references
|
||||
model = get_model()
|
||||
model.to(accelerator.device)
|
||||
optimizer = get_optimizer()
|
||||
train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)
|
||||
lr_scheduler = get_scheduler(
|
||||
optimizer,
|
||||
num_training_steps=len(train_dataloader)*num_epochs
|
||||
)
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
||||
)
|
||||
train(model, optimizer, train_dataloader, lr_scheduler)
|
||||
validate(model, eval_dataloader)
|
||||
+ inner_training_loop()
|
||||
```
|
||||
|
||||
To find out more, check the documentation [here](../package_reference/utilities#accelerate.find_executable_batch_size).
|
||||
@ -32,6 +32,27 @@ Currently we support searching for models that can be used in `timm` and `transf
|
||||
|
||||
</Tip>
|
||||
|
||||
## Gradio Demos
|
||||
|
||||
Below are a few gradio demos related to what was described above. The first is the official Hugging Face memory estimation space, utilizing Accelerate directly:
|
||||
|
||||
<div class="block dark:hidden">
|
||||
<iframe
|
||||
src="https://hf-accelerate-model-memory-usage.hf.space?__theme=light"
|
||||
width="850"
|
||||
height="1600"
|
||||
></iframe>
|
||||
</div>
|
||||
<div class="hidden dark:block">
|
||||
<iframe
|
||||
src="https://hf-accelerate-model-memory-usage.hf.space?__theme=dark"
|
||||
width="850"
|
||||
height="1600"
|
||||
></iframe>
|
||||
</div>
|
||||
|
||||
A community member has taken the idea and expended it further, allowing you to filter models directly and see if you can run a particular LLM given GPU constraints and LoRA configurations. To play with it, see [here](https://huggingface.co/spaces/Vokturz/can-it-run-llm) for more details.
|
||||
|
||||
## The Command
|
||||
|
||||
When using `accelerate estimate-memory`, you need to pass in the name of the model you want to use, potentially the framework
|
||||
@ -113,9 +134,4 @@ This calculator will tell you how much memory is needed to purely load the model
|
||||
This calculation is accurate within a few % of the actual value, so it is a very good view of just how much memory it will take. For instance loading `bert-base-cased` actually takes `413.68 MB` when loaded on CUDA in full precision, and the calculator estimates `413.18 MB`.
|
||||
|
||||
When performing inference you can expect to add up to an additional 20% as found by [EleutherAI](https://blog.eleuther.ai/transformer-math/). We'll be conducting research into finding a more accurate estimate to these values, and will update
|
||||
this calculator once done.
|
||||
|
||||
## Live Gradio Demo
|
||||
|
||||
Lastly, we invite you to try the [live Gradio demo](https://huggingface.co/spaces/hf-accelerate/model-memory-usage) of this utility,
|
||||
which includes an option to post a discussion thread on a models repository with this data. Doing so will help provide access to these numbers in the community faster and help users know what you've learned!
|
||||
this calculator once done.
|
||||
@ -20,12 +20,15 @@ There are a large number of experiment tracking API's available, however getting
|
||||
|
||||
## Integrated Trackers
|
||||
|
||||
Currently `Accelerate` supports four trackers out-of-the-box:
|
||||
Currently `Accelerate` supports seven trackers out-of-the-box:
|
||||
|
||||
- TensorBoard
|
||||
- WandB
|
||||
- CometML
|
||||
- Aim
|
||||
- MLFlow
|
||||
- ClearML
|
||||
- DVCLive
|
||||
|
||||
To use any of them, pass in the selected type(s) to the `log_with` parameter in [`Accelerate`]:
|
||||
```python
|
||||
|
||||
@ -15,7 +15,7 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
# Example Zoo
|
||||
|
||||
Below contains a non-exhuastive list of tutorials and scripts showcasing 🤗 Accelerate
|
||||
Below contains a non-exhaustive list of tutorials and scripts showcasing 🤗 Accelerate
|
||||
|
||||
## Official Accelerate Examples:
|
||||
|
||||
@ -154,12 +154,12 @@ Below contains a non-exhaustive list of papers utilizing 🤗 Accelerate.
|
||||
* Puijin Cheng, Li Lin, Yijin Huang, Huaqing He, Wenhan Luo, Xiaoying Tang: “Learning Enhancement From Degradation: A Diffusion Model For Fundus Image Enhancement”, 2023; [arXiv:2303.04603](http://arxiv.org/abs/2303.04603).
|
||||
* Shun Shao, Yftah Ziser, Shay Cohen: “Erasure of Unaligned Attributes from Neural Representations”, 2023; [arXiv:2302.02997](http://arxiv.org/abs/2302.02997).
|
||||
* Seonghyeon Ye, Hyeonbin Hwang, Sohee Yang, Hyeongu Yun, Yireun Kim, Minjoon Seo: “In-Context Instruction Learning”, 2023; [arXiv:2302.14691](http://arxiv.org/abs/2302.14691).
|
||||
* Shikun Liu, Linxi Fan, Edward Johns, Zhiding Yu, Chaowei Xiao, Anima Anandkumar: “Prismer: A Vision-Language Model with An Ensemble of Experts”, 2023; [arXiv:2303.02506](http://arxiv.org/abs/2303.02506 ).
|
||||
* Shikun Liu, Linxi Fan, Edward Johns, Zhiding Yu, Chaowei Xiao, Anima Anandkumar: “Prismer: A Vision-Language Model with An Ensemble of Experts”, 2023; [arXiv:2303.02506](http://arxiv.org/abs/2303.02506).
|
||||
* Haoyu Chen, Zhihua Wang, Yang Yang, Qilin Sun, Kede Ma: “Learning a Deep Color Difference Metric for Photographic Images”, 2023; [arXiv:2303.14964](http://arxiv.org/abs/2303.14964).
|
||||
* Van-Hoang Le, Hongyu Zhang: “Log Parsing with Prompt-based Few-shot Learning”, 2023; [arXiv:2302.07435](http://arxiv.org/abs/2302.07435).
|
||||
* Keito Kudo, Yoichi Aoki, Tatsuki Kuribayashi, Ana Brassard, Masashi Yoshikawa, Keisuke Sakaguchi, Kentaro Inui: “Do Deep Neural Networks Capture Compositionality in Arithmetic Reasoning?”, 2023; [arXiv:2302.07866](http://arxiv.org/abs/2302.07866).
|
||||
* Ruoyao Wang, Peter Jansen, Marc-Alexandre Côté, Prithviraj Ammanabrolu: “Behavior Cloned Transformers are Neurosymbolic Reasoners”, 2022; [arXiv:2210.07382](http://arxiv.org/abs/2210.07382).
|
||||
* Martin Wessel, Tomáš Horych, Terry Ruas, Akiko Aizawa, Bela Gipp, Timo Spinde: “Introducing MBIB -- the first Media Bias Identification Benchmark Task and Dataset Collection”, 2023; [arXiv:2304.13148](http://arxiv.org/abs/2304.13148 ). DOI: [https://dx.doi.org/10.1145/3539618.3591882 10.1145/3539618.3591882].
|
||||
* Martin Wessel, Tomáš Horych, Terry Ruas, Akiko Aizawa, Bela Gipp, Timo Spinde: “Introducing MBIB -- the first Media Bias Identification Benchmark Task and Dataset Collection”, 2023; [arXiv:2304.13148](http://arxiv.org/abs/2304.13148). DOI: [https://dx.doi.org/10.1145/3539618.3591882 10.1145/3539618.3591882].
|
||||
* Hila Chefer, Yuval Alaluf, Yael Vinker, Lior Wolf, Daniel Cohen-Or: “Attend-and-Excite: Attention-Based Semantic Guidance for Text-to-Image Diffusion Models”, 2023; [arXiv:2301.13826](http://arxiv.org/abs/2301.13826).
|
||||
* Marcio Fonseca, Yftah Ziser, Shay B. Cohen: “Factorizing Content and Budget Decisions in Abstractive Summarization of Long Documents”, 2022; [arXiv:2205.12486](http://arxiv.org/abs/2205.12486).
|
||||
* Elad Richardson, Gal Metzer, Yuval Alaluf, Raja Giryes, Daniel Cohen-Or: “TEXTure: Text-Guided Texturing of 3D Shapes”, 2023; [arXiv:2302.01721](http://arxiv.org/abs/2302.01721).
|
||||
|
||||
@ -64,9 +64,9 @@ To run it in each of these various modes, use the following commands:
|
||||
accelerate config # This will create a config file on your server
|
||||
accelerate launch ./nlp_example.py # This will run the script on your server
|
||||
```
|
||||
* With traditional PyTorch launcher (`torch.distributed.launch` can be used with older versions of PyTorch)
|
||||
* With traditional PyTorch launcher (`python -m torch.distributed.run` can be used instead of `torchrun`)
|
||||
```bash
|
||||
python -m torchrun --nproc_per_node 2 --use_env ./nlp_example.py
|
||||
torchrun --nproc_per_node 2 ./nlp_example.py
|
||||
```
|
||||
- multi GPUs, multi node (several machines, using PyTorch distributed mode)
|
||||
* With Accelerate config and launcher, on each machine:
|
||||
@ -74,18 +74,15 @@ To run it in each of these various modes, use the following commands:
|
||||
accelerate config # This will create a config file on each server
|
||||
accelerate launch ./nlp_example.py # This will run the script on each server
|
||||
```
|
||||
* With PyTorch launcher only (`torch.distributed.launch` can be used in older versions of PyTorch)
|
||||
* With PyTorch launcher only (`python -m torch.distributed.run` can be used instead of `torchrun`). Run this command on each node:
|
||||
```bash
|
||||
python -m torchrun --nproc_per_node 2 \
|
||||
--use_env \
|
||||
--node_rank 0 \
|
||||
--master_addr master_node_ip_address \
|
||||
./nlp_example.py # On the first server
|
||||
python -m torchrun --nproc_per_node 2 \
|
||||
--use_env \
|
||||
--node_rank 1 \
|
||||
--master_addr master_node_ip_address \
|
||||
./nlp_example.py # On the second server
|
||||
torchrun \ # python -m torch.distributed.run
|
||||
--nproc_per_node 2 \
|
||||
--nnodes 2 \
|
||||
--rdzv_id 2299 \ # A unique job id
|
||||
--rdzv_backend c10d \
|
||||
--rdzv_endpoint master_node_ip_address:29500 \
|
||||
./nlp_example.py
|
||||
```
|
||||
- (multi) TPUs
|
||||
* With Accelerate config and launcher
|
||||
@ -149,37 +146,34 @@ To run it in each of these various modes, use the following commands:
|
||||
- multi GPUs (using PyTorch distributed mode)
|
||||
* With Accelerate config and launcher
|
||||
```bash
|
||||
accelerate config # This will create a config file on your server
|
||||
accelerate launch ./cv_example.py --data_dir path_to_data # This will run the script on your server
|
||||
accelerate config --config_file config.yaml # This will create a config file on your server to `config.yaml`
|
||||
accelerate launch --config_file config.yaml ./cv_example.py --data_dir path_to_data # This will run the script on your server
|
||||
```
|
||||
* With traditional PyTorch launcher (`torch.distributed.launch` can be used with older versions of PyTorch)
|
||||
* With traditional PyTorch launcher (`python -m torch.distributed.run` can be used instead of `torchrun`)
|
||||
```bash
|
||||
python -m torchrun --nproc_per_node 2 --use_env ./cv_example.py --data_dir path_to_data
|
||||
torchrun --nproc_per_node 2 ./cv_example.py --data_dir path_to_data
|
||||
```
|
||||
- multi GPUs, multi node (several machines, using PyTorch distributed mode)
|
||||
* With Accelerate config and launcher, on each machine:
|
||||
```bash
|
||||
accelerate config # This will create a config file on each server
|
||||
accelerate launch ./cv_example.py --data_dir path_to_data # This will run the script on each server
|
||||
accelerate config --config_file config.yaml # This will create a config file on your server to `config.yaml`
|
||||
accelerate launch --config_file config.yaml ./cv_example.py --data_dir path_to_data # This will run the script on each server
|
||||
```
|
||||
* With PyTorch launcher only (`torch.distributed.launch` can be used with older versions of PyTorch)
|
||||
* With PyTorch launcher only (`python -m torch.distributed.run` can be used instead of `torchrun`). Run this command on each node:
|
||||
```bash
|
||||
python -m torchrun --nproc_per_node 2 \
|
||||
--use_env \
|
||||
--node_rank 0 \
|
||||
--master_addr master_node_ip_address \
|
||||
./cv_example.py --data_dir path_to_data # On the first server
|
||||
python -m torchrun --nproc_per_node 2 \
|
||||
--use_env \
|
||||
--node_rank 1 \
|
||||
--master_addr master_node_ip_address \
|
||||
./cv_example.py --data_dir path_to_data # On the second server
|
||||
torchrun \ # python -m torch.distributed.run
|
||||
--nproc_per_node 2 \
|
||||
--nnodes 2 \
|
||||
--rdzv_id 2299 \ # A unique job id
|
||||
--rdzv_backend c10d \
|
||||
--rdzv_endpoint master_node_ip_address:29500 \
|
||||
./cv_example.py --data_dir path_to_data
|
||||
```
|
||||
- (multi) TPUs
|
||||
* With Accelerate config and launcher
|
||||
```bash
|
||||
accelerate config # This will create a config file on your TPU server
|
||||
accelerate launch ./cv_example.py --data_dir path_to_data # This will run the script on each server
|
||||
accelerate config --config_file config.yaml # This will create a config file on your server to `config.yaml`
|
||||
accelerate launch --config_file config.yaml ./cv_example.py --data_dir path_to_data # This will run the script on each server
|
||||
```
|
||||
* In PyTorch:
|
||||
Add an `xmp.spawn` line in your script as you usually do.
|
||||
@ -206,6 +200,13 @@ with `pip install runhouse`, and you can refer to
|
||||
for hardware setup instructions, or this
|
||||
[Colab tutorial](https://colab.research.google.com/drive/1qVwYyLTCPYPSdz9ZX7BZl9Qm0A3j7RJe) for a more in-depth walkthrough.
|
||||
|
||||
## SLURM Scripts
|
||||
In [/slurm/submit_multigpu.sh](./slurm/submit_multigpu.sh) and [/slurm/submit_multinode.sh](./slurm/submit_multinode.sh) we present two scripts for running the examples on a machine with [SLURM](https://slurm.schedmd.com/documentation.html) workload manager.
|
||||
|
||||
In [/slurm/submit_multigpu.sh](./slurm/submit_multigpu.sh) the only parameter in the launcher that needs to be modified is `--num_processes`, which determines the number of GPUs we will use. In this case, using the environment variable `$SLURM_GPUS`, we indicate that we want to utilize all the GPUs available on the node we have requested.
|
||||
|
||||
In [/slurm/submit_multinode.sh](./slurm/submit_multinode.sh) we must specify the number of nodes that will be part of the training (`--num_machines`), how many GPUs we will use in total (`--num_processes`), the [`backend`](https://pytorch.org/docs/stable/elastic/run.html#note-on-rendezvous-backend), `--main_process_ip` which will be the address the master node and the `--main_process_port`.
|
||||
|
||||
## Finer Examples
|
||||
|
||||
While the first two scripts are extremely barebones when it comes to what you can do with accelerate, more advanced features are documented in two other locations.
|
||||
|
||||
@ -220,7 +220,7 @@ def parse_args():
|
||||
default="all",
|
||||
help=(
|
||||
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,'
|
||||
' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.'
|
||||
' `"wandb"`, `"comet_ml"`, and `"dvclive"`. Use `"all"` (default) to report to all integrations.'
|
||||
"Only applicable when `--with_tracking` is passed."
|
||||
),
|
||||
)
|
||||
|
||||
@ -247,16 +247,19 @@ def training_function(config, args):
|
||||
args.model_name_or_path, return_dict=True, low_cpu_mem_usage=True
|
||||
)
|
||||
|
||||
# New Code #
|
||||
# For FSDP feature, it is highly recommended and efficient to prepare the model before creating optimizer
|
||||
model = accelerator.prepare(model)
|
||||
accelerator.print(model)
|
||||
no_decay = ["bias", "LayerNorm.weight"]
|
||||
optimizer_grouped_parameters = [
|
||||
{
|
||||
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
|
||||
"weight_decay": 0.003,
|
||||
},
|
||||
{
|
||||
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
|
||||
"weight_decay": 0.0,
|
||||
},
|
||||
]
|
||||
|
||||
# Instantiate optimizer
|
||||
# New Code #
|
||||
# For FSDP feature, at present it doesn't support multiple parameter groups,
|
||||
# so we need to create a single parameter group for the whole model
|
||||
optimizer = torch.optim.AdamW(params=model.parameters(), lr=lr, weight_decay=2e-4)
|
||||
optimizer = torch.optim.AdamW(params=optimizer_grouped_parameters, lr=lr, weight_decay=2e-4)
|
||||
|
||||
# Instantiate scheduler
|
||||
lr_scheduler = get_linear_schedule_with_warmup(
|
||||
@ -265,13 +268,8 @@ def training_function(config, args):
|
||||
num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps,
|
||||
)
|
||||
|
||||
# New Code #
|
||||
# For FSDP feature, prepare everything except the model as we have already prepared the model
|
||||
# before creating the optimizer
|
||||
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
|
||||
# prepare method.
|
||||
optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
||||
optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
||||
)
|
||||
|
||||
overall_step = 0
|
||||
|
||||
@ -216,7 +216,7 @@ def parse_args():
|
||||
default="all",
|
||||
help=(
|
||||
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,'
|
||||
' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.'
|
||||
' `"wandb"`, `"comet_ml"`, and `"dvclive"`. Use `"all"` (default) to report to all integrations.'
|
||||
"Only applicable when `--with_tracking` is passed."
|
||||
),
|
||||
)
|
||||
|
||||
@ -11,7 +11,7 @@ def launch_train(*args):
|
||||
num_processes = torch.cuda.device_count()
|
||||
print(f"Device count: {num_processes}")
|
||||
with patch_environment(
|
||||
world_size=num_processes, master_addr="127.0.01", master_port="29500", mixed_precision=args[1].mixed_precision
|
||||
world_size=num_processes, master_addr="127.0.0.1", master_port="29500", mixed_precision=args[1].mixed_precision
|
||||
):
|
||||
launcher = PrepareForLaunch(training_function, distributed_type="MULTI_GPU")
|
||||
torch.multiprocessing.start_processes(launcher, args=args, nprocs=num_processes, start_method="spawn")
|
||||
|
||||
27
examples/slurm/submit_multigpu.sh
Normal file
27
examples/slurm/submit_multigpu.sh
Normal file
@ -0,0 +1,27 @@
|
||||
#!/bin/bash
|
||||
|
||||
#SBATCH --job-name=multigpu
|
||||
#SBATCH -D .
|
||||
#SBATCH --output=O-%x.%j
|
||||
#SBATCH --error=E-%x.%j
|
||||
#SBATCH --nodes=1
|
||||
#SBATCH --ntasks-per-node=1 # number of MP tasks
|
||||
#SBATCH --gres=gpu:4 # number of GPUs per node
|
||||
#SBATCH --cpus-per-task=160 # number of cores per tasks
|
||||
#SBATCH --time=01:59:00 # maximum execution time (HH:MM:SS)
|
||||
|
||||
######################
|
||||
### Set enviroment ###
|
||||
######################
|
||||
source activateEnviroment.sh
|
||||
export GPUS_PER_NODE=4
|
||||
######################
|
||||
|
||||
export SCRIPT=/accelerate/examples/complete_nlp_example.py
|
||||
export SCRIPT_ARGS=" \
|
||||
--mixed_precision fp16 \
|
||||
--output_dir /accelerate/examples/output \
|
||||
--with_tracking \
|
||||
"
|
||||
|
||||
accelerate launch --num_processes $GPUS_PER_NODE $SCRIPT $SCRIPT_ARGS
|
||||
41
examples/slurm/submit_multinode.sh
Normal file
41
examples/slurm/submit_multinode.sh
Normal file
@ -0,0 +1,41 @@
|
||||
#!/bin/bash
|
||||
|
||||
#SBATCH --job-name=multinode
|
||||
#SBATCH -D .
|
||||
#SBATCH --output=O-%x.%j
|
||||
#SBATCH --error=E-%x.%j
|
||||
#SBATCH --nodes=4 # number of nodes
|
||||
#SBATCH --ntasks-per-node=1 # number of MP tasks
|
||||
#SBATCH --gres=gpu:4 # number of GPUs per node
|
||||
#SBATCH --cpus-per-task=160 # number of cores per tasks
|
||||
#SBATCH --time=01:59:00 # maximum execution time (HH:MM:SS)
|
||||
|
||||
######################
|
||||
### Set enviroment ###
|
||||
######################
|
||||
source activateEnviroment.sh
|
||||
export GPUS_PER_NODE=4
|
||||
######################
|
||||
|
||||
######################
|
||||
#### Set network #####
|
||||
######################
|
||||
head_node_ip=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
|
||||
######################
|
||||
|
||||
export LAUNCHER="accelerate launch \
|
||||
--num_processes $((SLURM_NNODES * GPUS_PER_NODE)) \
|
||||
--num_machines $SLURM_NNODES \
|
||||
--rdzv_backend c10d \
|
||||
--main_process_ip $head_node_ip \
|
||||
--main_process_port 29500 \
|
||||
"
|
||||
export SCRIPT="/accelerate/examples/complete_nlp_example.py"
|
||||
export SCRIPT_ARGS=" \
|
||||
--mixed_precision fp16 \
|
||||
--output_dir /accelerate/examples/output \
|
||||
"
|
||||
|
||||
# This step is necessary because accelerate launch does not handle multiline arguments properly
|
||||
export CMD="$LAUNCHER $PYTHON_FILE $ARGS"
|
||||
srun $CMD
|
||||
10
setup.py
10
setup.py
@ -19,11 +19,13 @@ extras = {}
|
||||
extras["quality"] = ["black ~= 23.1", "ruff >= 0.0.241", "hf-doc-builder >= 0.3.0", "urllib3 < 2.0.0"]
|
||||
extras["docs"] = []
|
||||
extras["test_prod"] = ["pytest", "pytest-xdist", "pytest-subtests", "parameterized"]
|
||||
extras["test_dev"] = ["datasets", "evaluate", "transformers", "scipy", "scikit-learn", "deepspeed", "tqdm", "bitsandbytes", "timm"]
|
||||
extras["test_dev"] = [
|
||||
"datasets", "evaluate", "transformers", "scipy", "scikit-learn", "deepspeed", "tqdm", "bitsandbytes", "timm"
|
||||
]
|
||||
extras["testing"] = extras["test_prod"] + extras["test_dev"]
|
||||
extras["rich"] = ["rich"]
|
||||
|
||||
extras["test_trackers"] = ["wandb", "comet-ml", "tensorboard"]
|
||||
extras["test_trackers"] = ["wandb", "comet-ml", "tensorboard", "dvclive"]
|
||||
extras["dev"] = extras["quality"] + extras["testing"] + extras["rich"]
|
||||
|
||||
extras["sagemaker"] = [
|
||||
@ -32,7 +34,7 @@ extras["sagemaker"] = [
|
||||
|
||||
setup(
|
||||
name="accelerate",
|
||||
version="0.24.0.dev0",
|
||||
version="0.25.0",
|
||||
description="Accelerate",
|
||||
long_description=open("README.md", "r", encoding="utf-8").read(),
|
||||
long_description_content_type="text/markdown",
|
||||
@ -52,7 +54,7 @@ setup(
|
||||
]
|
||||
},
|
||||
python_requires=">=3.8.0",
|
||||
install_requires=["numpy>=1.17", "packaging>=20.0", "psutil", "pyyaml", "torch>=1.10.0", "huggingface_hub"],
|
||||
install_requires=["numpy>=1.17", "packaging>=20.0", "psutil", "pyyaml", "torch>=1.10.0", "huggingface_hub", "safetensors>=0.3.1"],
|
||||
extras_require=extras,
|
||||
classifiers=[
|
||||
"Development Status :: 5 - Production/Stable",
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
__version__ = "0.24.0.dev0"
|
||||
__version__ = "0.25.0"
|
||||
|
||||
from .accelerator import Accelerator
|
||||
from .big_modeling import (
|
||||
|
||||
@ -14,7 +14,6 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import collections
|
||||
import contextlib
|
||||
import functools
|
||||
import json
|
||||
@ -35,6 +34,7 @@ import torch.utils.hooks as hooks
|
||||
|
||||
from .checkpointing import load_accelerator_state, load_custom_state, save_accelerator_state, save_custom_state
|
||||
from .data_loader import DataLoaderDispatcher, prepare_data_loader, skip_first_batches
|
||||
from .hooks import AlignDevicesHook
|
||||
from .logging import get_logger
|
||||
from .optimizer import AcceleratedOptimizer
|
||||
from .scheduler import AcceleratedScheduler
|
||||
@ -63,6 +63,8 @@ from .utils import (
|
||||
ProjectConfiguration,
|
||||
RNGType,
|
||||
TorchDynamoPlugin,
|
||||
check_os_kernel,
|
||||
clean_state_dict_for_safetensors,
|
||||
compare_versions,
|
||||
convert_model,
|
||||
convert_outputs_to_fp32,
|
||||
@ -72,14 +74,12 @@ from .utils import (
|
||||
get_mixed_precision_context_manager,
|
||||
get_pretty_name,
|
||||
has_transformer_engine_layers,
|
||||
id_tensor_storage,
|
||||
is_bf16_available,
|
||||
is_deepspeed_available,
|
||||
is_fp8_available,
|
||||
is_ipex_available,
|
||||
is_megatron_lm_available,
|
||||
is_npu_available,
|
||||
is_safetensors_available,
|
||||
is_torch_version,
|
||||
is_tpu_available,
|
||||
is_xpu_available,
|
||||
@ -97,6 +97,7 @@ from .utils import (
|
||||
wait_for_everyone,
|
||||
)
|
||||
from .utils.constants import FSDP_PYTORCH_VERSION
|
||||
from .utils.modeling import get_state_dict_offloaded_model
|
||||
from .utils.other import is_compiled_module
|
||||
|
||||
|
||||
@ -264,6 +265,7 @@ class Accelerator:
|
||||
kwargs_handlers: list[KwargsHandler] | None = None,
|
||||
dynamo_backend: DynamoBackend | str | None = None,
|
||||
):
|
||||
self.trackers = []
|
||||
if project_config is not None:
|
||||
self.project_configuration = project_config
|
||||
else:
|
||||
@ -469,6 +471,8 @@ class Accelerator:
|
||||
# Set a flag tensor for early stopping and other breakpoints
|
||||
self.flag_tensor = None
|
||||
|
||||
check_os_kernel()
|
||||
|
||||
@property
|
||||
def use_distributed(self):
|
||||
"""
|
||||
@ -1098,52 +1102,6 @@ class Accelerator:
|
||||
# Return the unprocessed object if previous criteria was not met
|
||||
return obj
|
||||
|
||||
def _prepare_fsdp(self, *args):
|
||||
result = []
|
||||
for obj in args:
|
||||
if isinstance(obj, torch.nn.Module):
|
||||
model = obj
|
||||
break
|
||||
optimizers = []
|
||||
|
||||
self._schedulers = []
|
||||
self._models = []
|
||||
intermediate_result = []
|
||||
for obj in args:
|
||||
if isinstance(obj, torch.optim.Optimizer):
|
||||
if len(obj.param_groups) > 1:
|
||||
logger.warning(
|
||||
"FSDP Warning: When using FSDP, several parameter groups will be conflated into "
|
||||
"a single one due to nested module wrapping and parameter flattening."
|
||||
)
|
||||
try:
|
||||
optimizer = obj.optimizer.__class__(model.parameters(), **obj.optimizer.defaults)
|
||||
except TypeError:
|
||||
if "differentiable" in obj.optimizer.defaults:
|
||||
# https://github.com/huggingface/accelerate/issues/801
|
||||
defaults = {k: v for k, v in obj.optimizer.defaults.items() if k != "differentiable"}
|
||||
optimizer = obj.optimizer.__class__(model.parameters(), **defaults)
|
||||
else:
|
||||
raise
|
||||
obj = self.prepare_optimizer(optimizer)
|
||||
optimizers.append(obj)
|
||||
elif isinstance(obj, torch.nn.Module):
|
||||
self._models.append(obj)
|
||||
intermediate_result.append(obj)
|
||||
|
||||
for obj in intermediate_result:
|
||||
if isinstance(obj, AcceleratedScheduler):
|
||||
obj.optimizer = optimizers
|
||||
for i, opt in enumerate(self._optimizers):
|
||||
if getattr(obj.scheduler, "optimizer", None) == opt.optimizer:
|
||||
obj.scheduler.optimizer = optimizers[i]
|
||||
obj.optimizers = [optimizers[i]]
|
||||
break
|
||||
self._schedulers.append(obj)
|
||||
result.append(obj)
|
||||
self._optimizers = optimizers
|
||||
return tuple(result)
|
||||
|
||||
def prepare(self, *args, device_placement=None):
|
||||
"""
|
||||
Prepare all objects passed in `args` for distributed training and mixed precision, then return them in the same
|
||||
@ -1212,35 +1170,6 @@ class Accelerator:
|
||||
" Please rerun your script specifying `--num_processes=1` or by launching with `python {{myscript.py}}`."
|
||||
)
|
||||
|
||||
if self.distributed_type == DistributedType.FSDP:
|
||||
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
|
||||
|
||||
model_count = 0
|
||||
optimizer_present = False
|
||||
is_type_fsdp = False
|
||||
for obj in args:
|
||||
if isinstance(obj, torch.nn.Module):
|
||||
model_count += 1
|
||||
# if the model is compiled using PyTorch 2.0,
|
||||
# check that the wrapped model is FSDP or not;
|
||||
# else check if it is FSDP or not;
|
||||
is_type_fsdp = isinstance(obj, FSDP) or (
|
||||
is_compiled_module(obj) and isinstance(obj._orig_mod, FSDP)
|
||||
)
|
||||
if isinstance(obj, torch.optim.Optimizer):
|
||||
optimizer_present = True
|
||||
if model_count > 1 and optimizer_present:
|
||||
raise ValueError(
|
||||
"For FSDP to work with multiple models (>1), "
|
||||
"prepare must be called for all the models before optimizers are created. "
|
||||
"Then pass the optimizers to the prepare call in the same order as corresponding models."
|
||||
)
|
||||
elif model_count == 1 and not is_type_fsdp and optimizer_present:
|
||||
logger.warning(
|
||||
"FSDP Warning: When using FSDP, "
|
||||
"it is efficient and recommended to call prepare for the model before creating the optimizer"
|
||||
)
|
||||
|
||||
if self.distributed_type == DistributedType.DEEPSPEED:
|
||||
model_count = 0
|
||||
for obj in args:
|
||||
@ -1296,14 +1225,6 @@ class Accelerator:
|
||||
if isinstance(obj, torch.optim.Optimizer):
|
||||
obj._switch_parameters(mapping)
|
||||
|
||||
if (
|
||||
self.distributed_type == DistributedType.FSDP
|
||||
and model_count == 1
|
||||
and not is_type_fsdp
|
||||
and optimizer_present
|
||||
):
|
||||
result = self._prepare_fsdp(*result)
|
||||
|
||||
for item in result:
|
||||
if any(
|
||||
item in container
|
||||
@ -1353,35 +1274,6 @@ class Accelerator:
|
||||
" Please rerun your script specifying `--num_processes=1` or by launching with `python {{myscript.py}}`."
|
||||
)
|
||||
|
||||
if (getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False)) and getattr(
|
||||
model, "hf_device_map", False
|
||||
):
|
||||
model_devices = set(model.hf_device_map.values())
|
||||
if len(model_devices) > 1 and self.distributed_type != DistributedType.NO:
|
||||
raise ValueError(
|
||||
"You can't train a model that has been loaded in 8-bit precision on multiple devices in any distributed mode."
|
||||
" In order to use 8-bit models that have been loaded across multiple GPUs the solution is to use Naive Pipeline Parallelism."
|
||||
" Therefore you should not specify that you are under any distributed regime in your accelerate config."
|
||||
)
|
||||
current_device = list(model_devices)[0]
|
||||
current_device_index = current_device.index if isinstance(current_device, torch.device) else current_device
|
||||
|
||||
if torch.device(current_device_index) != self.device:
|
||||
# if on the first device (GPU 0) we don't care
|
||||
if (self.device.index is not None) or (current_device_index != 0):
|
||||
raise ValueError(
|
||||
"You can't train a model that has been loaded in 8-bit precision on a different device than the one "
|
||||
"you're training on. Make sure you loaded the model on the correct device using for example `device_map={'':torch.cuda.current_device()}"
|
||||
"you're training on. Make sure you loaded the model on the correct device using for example `device_map={'':torch.cuda.current_device() or device_map={'':torch.xpu.current_device()}"
|
||||
)
|
||||
|
||||
if "cpu" in model_devices or "disk" in model_devices:
|
||||
raise ValueError(
|
||||
"You can't train a model that has been loaded in 8-bit precision with CPU or disk offload."
|
||||
)
|
||||
elif device_placement and not self.verify_device_map(model):
|
||||
model = model.to(self.device)
|
||||
|
||||
if self.native_amp:
|
||||
model._original_forward = model.forward
|
||||
model_forward_func = model.forward.__func__ if hasattr(model.forward, "__func__") else model.forward
|
||||
@ -1412,6 +1304,34 @@ class Accelerator:
|
||||
"or higher, compute capability of 8.9 or higher). Will use FP16 instead."
|
||||
)
|
||||
model.forward = fp8_autocast(enabled=fp8_enabled, fp8_recipe=fp8_recipe)(model.forward)
|
||||
|
||||
if (getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False)) and getattr(
|
||||
model, "hf_device_map", False
|
||||
):
|
||||
model_devices = set(model.hf_device_map.values())
|
||||
if len(model_devices) > 1 and self.distributed_type != DistributedType.NO:
|
||||
raise ValueError(
|
||||
"You can't train a model that has been loaded in 8-bit precision on multiple devices in any distributed mode."
|
||||
" In order to use 8-bit models that have been loaded across multiple GPUs the solution is to use Naive Pipeline Parallelism."
|
||||
" Therefore you should not specify that you are under any distributed regime in your accelerate config."
|
||||
)
|
||||
current_device = list(model_devices)[0]
|
||||
current_device_index = current_device.index if isinstance(current_device, torch.device) else current_device
|
||||
|
||||
if torch.device(current_device_index) != self.device:
|
||||
# if on the first device (GPU 0) we don't care
|
||||
if (self.device.index is not None) or (current_device_index != 0):
|
||||
raise ValueError(
|
||||
"You can't train a model that has been loaded in 8-bit precision on a different device than the one "
|
||||
"you're training on. Make sure you loaded the model on the correct device using for example `device_map={'':torch.cuda.current_device() or device_map={'':torch.xpu.current_device()}"
|
||||
)
|
||||
|
||||
if "cpu" in model_devices or "disk" in model_devices:
|
||||
raise ValueError(
|
||||
"You can't train a model that has been loaded in 8-bit precision with CPU or disk offload."
|
||||
)
|
||||
elif device_placement and not self.verify_device_map(model):
|
||||
model = model.to(self.device)
|
||||
if not evaluation_mode:
|
||||
if self.distributed_type in (
|
||||
DistributedType.MULTI_GPU,
|
||||
@ -2399,7 +2319,6 @@ class Accelerator:
|
||||
... )
|
||||
```
|
||||
"""
|
||||
self.trackers = []
|
||||
for tracker in self.log_with:
|
||||
if issubclass(type(tracker), GeneralTracker):
|
||||
# Custom trackers are already initialized
|
||||
@ -2441,7 +2360,7 @@ class Accelerator:
|
||||
>>> tensorboard_tracker = accelerator.get_tracker("tensorboard")
|
||||
```
|
||||
"""
|
||||
if len(getattr(self, "trackers", [])) > 0:
|
||||
if len(self.trackers) > 0:
|
||||
for tracker in self.trackers:
|
||||
if tracker.name == name:
|
||||
return tracker.tracker if unwrap else tracker
|
||||
@ -2508,6 +2427,10 @@ class Accelerator:
|
||||
f (`str` or `os.PathLike`): Where to save the content of `obj`.
|
||||
safe_serialization (`bool`, *optional*, defaults to `False`): Whether to save `obj` using `safetensors`
|
||||
|
||||
Note:
|
||||
If `save_on_each_node` was passed in as a `ProjectConfiguration`, will save the object once per node,
|
||||
rather than only once on the main node.
|
||||
|
||||
Example:
|
||||
|
||||
```python
|
||||
@ -2518,14 +2441,19 @@ class Accelerator:
|
||||
>>> accelerator.save(arr, "array.pkl")
|
||||
```
|
||||
"""
|
||||
save(obj, f, safe_serialization=safe_serialization)
|
||||
save(
|
||||
obj,
|
||||
f,
|
||||
save_on_each_node=self.project_configuration.save_on_each_node,
|
||||
safe_serialization=safe_serialization,
|
||||
)
|
||||
|
||||
def save_model(
|
||||
self,
|
||||
model: torch.nn.Module,
|
||||
save_directory: Union[str, os.PathLike],
|
||||
max_shard_size: Union[int, str] = "10GB",
|
||||
safe_serialization: bool = False,
|
||||
safe_serialization: bool = True,
|
||||
):
|
||||
"""
|
||||
Save a model so that it can be re-loaded using load_checkpoint_in_model
|
||||
@ -2546,7 +2474,7 @@ class Accelerator:
|
||||
|
||||
</Tip>
|
||||
|
||||
safe_serialization (`bool`, *optional*, defaults to `False`):
|
||||
safe_serialization (`bool`, *optional*, defaults to `True`):
|
||||
Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
|
||||
|
||||
Example:
|
||||
@ -2560,9 +2488,6 @@ class Accelerator:
|
||||
```
|
||||
"""
|
||||
|
||||
if safe_serialization and not is_safetensors_available():
|
||||
raise ImportError("`safe_serialization` requires the `safetensors library: `pip install safetensors`.")
|
||||
|
||||
if os.path.isfile(save_directory):
|
||||
logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
|
||||
return
|
||||
@ -2570,38 +2495,21 @@ class Accelerator:
|
||||
os.makedirs(save_directory, exist_ok=True)
|
||||
|
||||
# get the state_dict of the model
|
||||
state_dict = self.get_state_dict(model)
|
||||
if any(
|
||||
[
|
||||
module._hf_hook.offload
|
||||
for module in model.modules()
|
||||
if hasattr(module, "_hf_hook") and isinstance(module._hf_hook, AlignDevicesHook)
|
||||
]
|
||||
):
|
||||
state_dict = get_state_dict_offloaded_model(model)
|
||||
else:
|
||||
if any(param.device == torch.device("meta") for param in model.parameters()):
|
||||
raise RuntimeError("You can't save the model since some parameters are on the meta device.")
|
||||
state_dict = self.get_state_dict(model)
|
||||
|
||||
if safe_serialization:
|
||||
# Safetensors does not allow tensor aliasing.
|
||||
# We're going to remove aliases before saving
|
||||
ptrs = collections.defaultdict(list)
|
||||
# when bnb serialization is used the weights in the state dict can be strings
|
||||
for name, tensor in state_dict.items():
|
||||
if not isinstance(tensor, str):
|
||||
ptrs[id_tensor_storage(tensor)].append(name)
|
||||
|
||||
# These are all the pointers of shared tensors.
|
||||
shared_ptrs = {ptr: names for ptr, names in ptrs.items() if len(names) > 1}
|
||||
warn_names = set()
|
||||
for names in shared_ptrs.values():
|
||||
# When not all duplicates have been cleaned, still remove those keys, but put a clear warning.
|
||||
# If the link between tensors was done at runtime then `from_pretrained` will not get
|
||||
# the key back leading to random tensor. A proper warning will be shown
|
||||
# during reload (if applicable), but since the file is not necessarily compatible with
|
||||
# the config, better show a proper warning.
|
||||
found = 0
|
||||
for name in names:
|
||||
if name in state_dict:
|
||||
found += 1
|
||||
if found > 1:
|
||||
del state_dict[name]
|
||||
warn_names.add(name)
|
||||
if len(warn_names) > 0:
|
||||
logger.warning(
|
||||
f"Removed shared tensor {warn_names} while saving. This should be OK, but check by verifying that you don't receive any warning while reloading",
|
||||
)
|
||||
|
||||
state_dict = clean_state_dict_for_safetensors(state_dict)
|
||||
weights_name = SAFE_WEIGHTS_NAME if safe_serialization else WEIGHTS_NAME
|
||||
|
||||
# Shard the model if it is too big.
|
||||
@ -2679,7 +2587,7 @@ class Accelerator:
|
||||
self._save_model_state_pre_hook[handle.id] = hook
|
||||
return handle
|
||||
|
||||
def save_state(self, output_dir: str = None, **save_model_func_kwargs):
|
||||
def save_state(self, output_dir: str = None, safe_serialization: bool = True, **save_model_func_kwargs):
|
||||
"""
|
||||
Saves the current states of the model, optimizer, scaler, RNG generators, and registered objects to a folder.
|
||||
|
||||
@ -2700,6 +2608,8 @@ class Accelerator:
|
||||
Args:
|
||||
output_dir (`str` or `os.PathLike`):
|
||||
The name of the folder to save all relevant weights and states.
|
||||
safe_serialization (`bool`, *optional*, defaults to `True`):
|
||||
Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
|
||||
save_model_func_kwargs (`dict`, *optional*):
|
||||
Additional keyword arguments for saving model which can be passed to the underlying save function, such
|
||||
as optional arguments for DeepSpeed's `save_checkpoint` function.
|
||||
@ -2770,7 +2680,7 @@ class Accelerator:
|
||||
# Save the optimizers taking care of FSDP and DeepSpeed nuances
|
||||
optimizers = []
|
||||
if self.distributed_type == DistributedType.FSDP:
|
||||
for opt in self._optimizers:
|
||||
for i, opt in enumerate(self._optimizers):
|
||||
logger.info("Saving FSDP Optimizer")
|
||||
save_fsdp_optimizer(self.state.fsdp_plugin, self, opt, self._models[i], output_dir, i)
|
||||
logger.info(f"FSDP Optimizer saved to output dir {output_dir}")
|
||||
@ -2787,16 +2697,27 @@ class Accelerator:
|
||||
elif self.distributed_type not in [DistributedType.MEGATRON_LM]:
|
||||
schedulers = self._schedulers
|
||||
|
||||
# Save the samplers of the dataloaders
|
||||
dataloaders = self._dataloaders
|
||||
|
||||
# Call model loading hooks that might have been registered with
|
||||
# accelerator.register_model_state_hook
|
||||
for hook in self._save_model_state_pre_hook.values():
|
||||
hook(self._models, weights, output_dir)
|
||||
|
||||
save_location = save_accelerator_state(
|
||||
output_dir, weights, optimizers, schedulers, self.state.process_index, self.scaler
|
||||
output_dir,
|
||||
weights,
|
||||
optimizers,
|
||||
schedulers,
|
||||
dataloaders,
|
||||
self.state.process_index,
|
||||
self.scaler,
|
||||
save_on_each_node=self.project_configuration.save_on_each_node,
|
||||
safe_serialization=safe_serialization,
|
||||
)
|
||||
for i, obj in enumerate(self._custom_objects):
|
||||
save_custom_state(obj, output_dir, i)
|
||||
save_custom_state(obj, output_dir, i, save_on_each_node=self.project_configuration.save_on_each_node)
|
||||
self.project_configuration.iteration += 1
|
||||
return save_location
|
||||
|
||||
@ -2920,6 +2841,8 @@ class Accelerator:
|
||||
elif self.distributed_type not in [DistributedType.MEGATRON_LM]:
|
||||
schedulers = self._schedulers
|
||||
|
||||
dataloaders = self._dataloaders
|
||||
|
||||
# Call model loading hooks that might have been registered with
|
||||
# accelerator.register_model_state_hook
|
||||
for hook in self._load_model_state_pre_hook.values():
|
||||
@ -2940,6 +2863,7 @@ class Accelerator:
|
||||
models,
|
||||
optimizers,
|
||||
schedulers,
|
||||
dataloaders,
|
||||
self.state.process_index,
|
||||
self.scaler,
|
||||
map_location,
|
||||
@ -3071,6 +2995,13 @@ class Accelerator:
|
||||
from deepspeed.checkpoint.utils import clone_tensors_for_torch_save
|
||||
|
||||
state_dict = clone_tensors_for_torch_save(self.unwrap_model(model).state_dict())
|
||||
elif self.distributed_type == DistributedType.FSDP:
|
||||
from torch.distributed.fsdp import FullStateDictConfig, StateDictType
|
||||
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
|
||||
|
||||
full_state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
|
||||
with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT, full_state_dict_config):
|
||||
state_dict = model.state_dict()
|
||||
else:
|
||||
if unwrap:
|
||||
model = self.unwrap_model(model)
|
||||
|
||||
@ -402,6 +402,16 @@ def dispatch_model(
|
||||
skip_keys=skip_keys,
|
||||
preload_module_classes=preload_module_classes,
|
||||
)
|
||||
|
||||
# warn if there is any params on the meta device
|
||||
offloaded_devices_str = " and ".join(
|
||||
[device for device in set(device_map.values()) if device in ("cpu", "disk")]
|
||||
)
|
||||
if len(offloaded_devices_str) > 0:
|
||||
logging.warning(
|
||||
f"Some parameters are on the meta device device because they were offloaded to the {offloaded_devices_str}."
|
||||
)
|
||||
|
||||
# Attaching the hook may break tied weights, so we retie them
|
||||
retie_parameters(model, tied_params)
|
||||
|
||||
|
||||
@ -12,21 +12,25 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import random
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from safetensors.torch import load_file
|
||||
from torch.cuda.amp import GradScaler
|
||||
|
||||
from .utils import (
|
||||
MODEL_NAME,
|
||||
OPTIMIZER_NAME,
|
||||
RNG_STATE_NAME,
|
||||
SAFE_MODEL_NAME,
|
||||
SAFE_WEIGHTS_NAME,
|
||||
SAMPLER_NAME,
|
||||
SCALER_NAME,
|
||||
SCHEDULER_NAME,
|
||||
WEIGHTS_NAME,
|
||||
get_pretty_name,
|
||||
is_tpu_available,
|
||||
is_xpu_available,
|
||||
@ -49,12 +53,22 @@ def save_accelerator_state(
|
||||
model_states: List[dict],
|
||||
optimizers: list,
|
||||
schedulers: list,
|
||||
dataloaders: list,
|
||||
process_index: int,
|
||||
scaler: GradScaler = None,
|
||||
save_on_each_node: bool = False,
|
||||
safe_serialization: bool = True,
|
||||
):
|
||||
"""
|
||||
Saves the current states of the models, optimizers, scaler, and RNG generators to a given directory.
|
||||
|
||||
<Tip>
|
||||
|
||||
If `safe_serialization` is `True`, models will be saved with `safetensors` while the rest are saved using native
|
||||
`pickle`.
|
||||
|
||||
</Tip>
|
||||
|
||||
Args:
|
||||
output_dir (`str` or `os.PathLike`):
|
||||
The name of the folder to save all relevant weights and states.
|
||||
@ -64,35 +78,58 @@ def save_accelerator_state(
|
||||
A list of optimizer instances
|
||||
schedulers (`List[torch.optim.lr_scheduler._LRScheduler]`):
|
||||
A list of learning rate schedulers
|
||||
dataloaders (`List[torch.utils.data.DataLoader]`):
|
||||
A list of dataloader instances to save their sampler states
|
||||
process_index (`int`):
|
||||
The current process index in the Accelerator state
|
||||
scaler (`torch.cuda.amp.GradScaler`, *optional*):
|
||||
An optional gradient scaler instance to save
|
||||
save_on_each_node (`bool`, *optional*):
|
||||
Whether to save on every node, or only the main node.
|
||||
safe_serialization (`bool`, *optional*, defaults to `True`):
|
||||
Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
|
||||
"""
|
||||
output_dir = Path(output_dir)
|
||||
# Model states
|
||||
for i, state in enumerate(model_states):
|
||||
weights_name = f"{MODEL_NAME}.bin" if i == 0 else f"{MODEL_NAME}_{i}.bin"
|
||||
output_model_file = os.path.join(output_dir, weights_name)
|
||||
save(state, output_model_file)
|
||||
weights_name = WEIGHTS_NAME if not safe_serialization else SAFE_WEIGHTS_NAME
|
||||
if i > 0:
|
||||
weights_name = weights_name.replace(".", f"_{i}.")
|
||||
output_model_file = output_dir.joinpath(weights_name)
|
||||
save(state, output_model_file, save_on_each_node=save_on_each_node, safe_serialization=safe_serialization)
|
||||
logger.info(f"Model weights saved in {output_model_file}")
|
||||
# Optimizer states
|
||||
for i, opt in enumerate(optimizers):
|
||||
state = opt.state_dict()
|
||||
optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin"
|
||||
output_optimizer_file = os.path.join(output_dir, optimizer_name)
|
||||
save(state, output_optimizer_file)
|
||||
output_optimizer_file = output_dir.joinpath(optimizer_name)
|
||||
save(state, output_optimizer_file, save_on_each_node=save_on_each_node, safe_serialization=False)
|
||||
logger.info(f"Optimizer state saved in {output_optimizer_file}")
|
||||
# Scheduler states
|
||||
for i, scheduler in enumerate(schedulers):
|
||||
state = scheduler.state_dict()
|
||||
scheduler_name = f"{SCHEDULER_NAME}.bin" if i == 0 else f"{SCHEDULER_NAME}_{i}.bin"
|
||||
output_scheduler_file = os.path.join(output_dir, scheduler_name)
|
||||
save(state, output_scheduler_file)
|
||||
output_scheduler_file = output_dir.joinpath(scheduler_name)
|
||||
save(state, output_scheduler_file, save_on_each_node=save_on_each_node, safe_serialization=False)
|
||||
logger.info(f"Scheduler state saved in {output_scheduler_file}")
|
||||
# DataLoader states
|
||||
for i, dataloader in enumerate(dataloaders):
|
||||
sampler_name = f"{SAMPLER_NAME}.bin" if i == 0 else f"{SAMPLER_NAME}_{i}.bin"
|
||||
output_sampler_file = output_dir.joinpath(sampler_name)
|
||||
# Only save if we have our custom sampler
|
||||
from .data_loader import IterableDatasetShard, SeedableRandomSampler
|
||||
|
||||
if isinstance(dataloader.dataset, IterableDatasetShard):
|
||||
sampler = dataloader.sampler.sampler
|
||||
|
||||
if isinstance(sampler, SeedableRandomSampler):
|
||||
save(sampler, output_sampler_file, save_on_each_node=save_on_each_node, safe_serialization=False)
|
||||
logger.info(f"Sampler state for dataloader {i} saved in {output_sampler_file}")
|
||||
|
||||
# GradScaler state
|
||||
if scaler is not None:
|
||||
state = scaler.state_dict()
|
||||
output_scaler_file = os.path.join(output_dir, SCALER_NAME)
|
||||
output_scaler_file = output_dir.joinpath(SCALER_NAME)
|
||||
torch.save(state, output_scaler_file)
|
||||
logger.info(f"Gradient scaler state saved in {output_scaler_file}")
|
||||
# Random number generator states
|
||||
@ -107,7 +144,7 @@ def save_accelerator_state(
|
||||
states["torch_cuda_manual_seed"] = torch.cuda.get_rng_state_all()
|
||||
if is_tpu_available():
|
||||
states["xm_seed"] = xm.get_rng_state()
|
||||
output_states_file = os.path.join(output_dir, states_name)
|
||||
output_states_file = output_dir.joinpath(states_name)
|
||||
torch.save(states, output_states_file)
|
||||
logger.info(f"Random states saved in {output_states_file}")
|
||||
return output_dir
|
||||
@ -118,6 +155,7 @@ def load_accelerator_state(
|
||||
models,
|
||||
optimizers,
|
||||
schedulers,
|
||||
dataloaders,
|
||||
process_index,
|
||||
scaler=None,
|
||||
map_location=None,
|
||||
@ -152,17 +190,25 @@ def load_accelerator_state(
|
||||
map_location = "cpu"
|
||||
elif map_location == "on_device":
|
||||
map_location = PartialState().device
|
||||
|
||||
input_dir = Path(input_dir)
|
||||
# Model states
|
||||
for i, model in enumerate(models):
|
||||
weights_name = f"{MODEL_NAME}.bin" if i == 0 else f"{MODEL_NAME}_{i}.bin"
|
||||
input_model_file = os.path.join(input_dir, weights_name)
|
||||
models[i].load_state_dict(torch.load(input_model_file, map_location=map_location), **load_model_func_kwargs)
|
||||
ending = f"_{i}" if i > 0 else ""
|
||||
input_model_file = input_dir.joinpath(f"{SAFE_MODEL_NAME}{ending}.safetensors")
|
||||
if input_model_file.exists():
|
||||
state_dict = load_file(input_model_file, device=str(map_location))
|
||||
else:
|
||||
# Load with torch
|
||||
input_model_file = input_dir.joinpath(f"{MODEL_NAME}{ending}.bin")
|
||||
state_dict = torch.load(input_model_file, map_location=map_location)
|
||||
models[i].load_state_dict(state_dict, **load_model_func_kwargs)
|
||||
logger.info("All model weights loaded successfully")
|
||||
|
||||
# Optimizer states
|
||||
for i, opt in enumerate(optimizers):
|
||||
optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin"
|
||||
input_optimizer_file = os.path.join(input_dir, optimizer_name)
|
||||
input_optimizer_file = input_dir.joinpath(optimizer_name)
|
||||
optimizer_state = torch.load(input_optimizer_file, map_location=map_location)
|
||||
optimizers[i].load_state_dict(optimizer_state)
|
||||
logger.info("All optimizer states loaded successfully")
|
||||
@ -170,19 +216,32 @@ def load_accelerator_state(
|
||||
# Scheduler states
|
||||
for i, scheduler in enumerate(schedulers):
|
||||
scheduler_name = f"{SCHEDULER_NAME}.bin" if i == 0 else f"{SCHEDULER_NAME}_{i}.bin"
|
||||
input_scheduler_file = os.path.join(input_dir, scheduler_name)
|
||||
input_scheduler_file = input_dir.joinpath(scheduler_name)
|
||||
scheduler.load_state_dict(torch.load(input_scheduler_file))
|
||||
logger.info("All scheduler states loaded successfully")
|
||||
|
||||
for i, dataloader in enumerate(dataloaders):
|
||||
sampler_name = f"{SAMPLER_NAME}.bin" if i == 0 else f"{SAMPLER_NAME}_{i}.bin"
|
||||
input_sampler_file = input_dir.joinpath(sampler_name)
|
||||
# Only load if we have our custom sampler
|
||||
from .data_loader import IterableDatasetShard, SeedableRandomSampler
|
||||
|
||||
if isinstance(dataloader.dataset, IterableDatasetShard):
|
||||
sampler = dataloader.sampler.sampler
|
||||
|
||||
if isinstance(sampler, SeedableRandomSampler):
|
||||
dataloader.sampler.sampler = torch.load(input_sampler_file)
|
||||
logger.info("All dataloader sampler states loaded successfully")
|
||||
|
||||
# GradScaler state
|
||||
if scaler is not None:
|
||||
input_scaler_file = os.path.join(input_dir, SCALER_NAME)
|
||||
input_scaler_file = input_dir.joinpath(SCALER_NAME)
|
||||
scaler.load_state_dict(torch.load(input_scaler_file))
|
||||
logger.info("GradScaler state loaded successfully")
|
||||
|
||||
# Random states
|
||||
try:
|
||||
states = torch.load(os.path.join(input_dir, f"{RNG_STATE_NAME}_{process_index}.pkl"))
|
||||
states = torch.load(input_dir.joinpath(f"{RNG_STATE_NAME}_{process_index}.pkl"))
|
||||
random.setstate(states["random_state"])
|
||||
np.random.set_state(states["numpy_random_seed"])
|
||||
torch.set_rng_state(states["torch_manual_seed"])
|
||||
@ -197,14 +256,14 @@ def load_accelerator_state(
|
||||
logger.info("Could not load random states")
|
||||
|
||||
|
||||
def save_custom_state(obj, path, index: int = 0):
|
||||
def save_custom_state(obj, path, index: int = 0, save_on_each_node: bool = False):
|
||||
"""
|
||||
Saves the state of `obj` to `{path}/custom_checkpoint_{index}.pkl`
|
||||
"""
|
||||
# Should this be the right way to get a qual_name type value from `obj`?
|
||||
save_location = Path(path) / f"custom_checkpoint_{index}.pkl"
|
||||
logger.info(f"Saving the state of {get_pretty_name(obj)} to {save_location}")
|
||||
torch.save(obj.state_dict(), save_location)
|
||||
save(obj.state_dict(), save_location, save_on_each_node=save_on_each_node)
|
||||
|
||||
|
||||
def load_custom_state(obj, path, index: int = 0):
|
||||
|
||||
@ -179,7 +179,7 @@ def get_cluster_input():
|
||||
|
||||
use_mps = not use_cpu and is_mps_available()
|
||||
deepspeed_config = {}
|
||||
if distributed_type in [DistributedType.MULTI_GPU, DistributedType.NO] and not use_mps:
|
||||
if distributed_type in [DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.NO] and not use_mps:
|
||||
use_deepspeed = _ask_field(
|
||||
"Do you want to use DeepSpeed? [yes/NO]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
@ -381,17 +381,26 @@ def get_cluster_input():
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
fsdp_config["fsdp_use_orig_params"] = _ask_field(
|
||||
"Do you want to enable FSDP's `use_orig_params` feature? [yes/NO]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
fsdp_config["fsdp_sync_module_states"] = _ask_field(
|
||||
"Do you want each individually wrapped FSDP unit to broadcast module parameters from rank 0 at the start? [YES/no]: ",
|
||||
"Do you want to enable FSDP's `use_orig_params` feature? [YES/no]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
default=True,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
fsdp_config["fsdp_cpu_ram_efficient_loading"] = _ask_field(
|
||||
"Do you want to enable CPU RAM efficient model loading? Only applicable for 🤗 Transformers models. [YES/no]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
default=True,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
if fsdp_config["fsdp_cpu_ram_efficient_loading"]:
|
||||
fsdp_config["fsdp_sync_module_states"] = True
|
||||
else:
|
||||
fsdp_config["fsdp_sync_module_states"] = _ask_field(
|
||||
"Do you want each individually wrapped FSDP unit to broadcast module parameters from rank 0 at the start? [YES/no]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
default=True,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
|
||||
megatron_lm_config = {}
|
||||
if distributed_type in [DistributedType.MULTI_GPU]:
|
||||
@ -442,7 +451,7 @@ def get_cluster_input():
|
||||
|
||||
megatron_lm_config[prefix + "use_distributed_optimizer"] = _ask_field(
|
||||
"Do you want to use distributed optimizer "
|
||||
"which shards optimizer state and gradients across data pralellel ranks? [YES/no]: ",
|
||||
"which shards optimizer state and gradients across data parallel ranks? [YES/no]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
default=True,
|
||||
error_message="Please enter yes or no.",
|
||||
|
||||
@ -30,13 +30,15 @@ DYNAMO_BACKENDS = [
|
||||
"EAGER",
|
||||
"AOT_EAGER",
|
||||
"INDUCTOR",
|
||||
"NVFUSER",
|
||||
"AOT_NVFUSER",
|
||||
"AOT_CUDAGRAPHS",
|
||||
"AOT_TS_NVFUSER",
|
||||
"NVPRIMS_NVFUSER",
|
||||
"CUDAGRAPHS",
|
||||
"OFI",
|
||||
"FX2TRT",
|
||||
"ONNXRT",
|
||||
"TENSORRT",
|
||||
"IPEX",
|
||||
"TVM",
|
||||
]
|
||||
|
||||
|
||||
|
||||
@ -34,6 +34,7 @@ from accelerate.utils import (
|
||||
DistributedType,
|
||||
PrepareForLaunch,
|
||||
_filter_args,
|
||||
check_cuda_p2p_ib_support,
|
||||
is_bf16_available,
|
||||
is_deepspeed_available,
|
||||
is_npu_available,
|
||||
@ -519,11 +520,19 @@ def launch_command_parser(subparsers=None):
|
||||
)
|
||||
fsdp_args.add_argument(
|
||||
"--fsdp_use_orig_params",
|
||||
default="false",
|
||||
default="true",
|
||||
type=str,
|
||||
help="If True, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable paramteres."
|
||||
" (useful only when `use_fsdp` flag is passed).",
|
||||
)
|
||||
fsdp_args.add_argument(
|
||||
"--fsdp_cpu_ram_efficient_loading",
|
||||
default="true",
|
||||
type=str,
|
||||
help="If True, only the first process loads the pretrained model checkoint while all other processes have empty weights. "
|
||||
"Only applicable for 🤗 Transformers. When using this, `--fsdp_sync_module_states` needs to True. "
|
||||
"(useful only when `use_fsdp` flag is passed).",
|
||||
)
|
||||
fsdp_args.add_argument(
|
||||
"--fsdp_sync_module_states",
|
||||
default="true",
|
||||
@ -634,6 +643,17 @@ def multi_gpu_launcher(args):
|
||||
import torch.distributed.run as distrib_run
|
||||
|
||||
current_env = prepare_multi_gpu_env(args)
|
||||
if not check_cuda_p2p_ib_support():
|
||||
message = "Using RTX 3090 or 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled."
|
||||
warn = False
|
||||
if "NCCL_P2P_DISABLE" not in current_env:
|
||||
current_env["NCCL_P2P_DISABLE"] = "1"
|
||||
warn = True
|
||||
if "NCCL_IB_DISABLE" not in current_env:
|
||||
current_env["NCCL_IB_DISABLE"] = "1"
|
||||
warn = True
|
||||
if warn:
|
||||
logger.warning(message)
|
||||
|
||||
debug = getattr(args, "debug", False)
|
||||
args = _filter_args(
|
||||
@ -660,6 +680,17 @@ def deepspeed_launcher(args):
|
||||
raise ImportError("DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source.")
|
||||
|
||||
cmd, current_env = prepare_deepspeed_cmd_env(args)
|
||||
if not check_cuda_p2p_ib_support():
|
||||
message = "Using RTX 3090 or 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled."
|
||||
warn = False
|
||||
if "NCCL_P2P_DISABLE" not in current_env:
|
||||
current_env["NCCL_P2P_DISABLE"] = "1"
|
||||
warn = True
|
||||
if "NCCL_IB_DISABLE" not in current_env:
|
||||
current_env["NCCL_IB_DISABLE"] = "1"
|
||||
warn = True
|
||||
if warn:
|
||||
logger.warning(message)
|
||||
|
||||
if args.num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]:
|
||||
with open(".deepspeed_env", "a") as f:
|
||||
@ -748,7 +779,7 @@ def tpu_pod_launcher(args):
|
||||
"--tpu",
|
||||
"--no_tpu_cluster",
|
||||
"--num_machines",
|
||||
str(1),
|
||||
"1",
|
||||
"--mixed_precision",
|
||||
"no",
|
||||
"--dynamo_backend",
|
||||
|
||||
@ -17,7 +17,7 @@ from contextlib import suppress
|
||||
from typing import Callable, List, Optional, Union
|
||||
|
||||
import torch
|
||||
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
|
||||
from torch.utils.data import BatchSampler, DataLoader, IterableDataset, RandomSampler
|
||||
|
||||
from .logging import get_logger
|
||||
from .state import AcceleratorState, DistributedType, GradientState, is_tpu_available
|
||||
@ -64,6 +64,38 @@ for v, additional_kwargs in _PYTORCH_DATALOADER_ADDITIONAL_KWARGS.items():
|
||||
_PYTORCH_DATALOADER_KWARGS.update(additional_kwargs)
|
||||
|
||||
|
||||
class SeedableRandomSampler(RandomSampler):
|
||||
"""
|
||||
Same as a random sampler, except that in `__iter__` a seed can be used.
|
||||
|
||||
Needed specifically in distributed cases, when the random generator for each GPU needs to start from the same seed
|
||||
and be fully reproducable on multiple iterations.
|
||||
|
||||
If a custom `generator` is passed, it will rely on its initial seed as well as the current iteration it is on
|
||||
(stored in `self.epoch`).
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.epoch = 0
|
||||
self.seed = torch.random.initial_seed()
|
||||
|
||||
def __iter__(self):
|
||||
if self.generator is None:
|
||||
self.generator = torch.Generator()
|
||||
else:
|
||||
self.seed = self.generator.initial_seed()
|
||||
# Allow `self.epoch` to modify the seed of the generator
|
||||
seed = self.epoch + self.seed
|
||||
self.generator.manual_seed(seed)
|
||||
yield from super().__iter__()
|
||||
self.set_epoch(self.epoch + 1)
|
||||
|
||||
def set_epoch(self, epoch: int):
|
||||
"Sets the current iteration of the sampler."
|
||||
self.epoch = epoch
|
||||
|
||||
|
||||
class BatchSamplerShard(BatchSampler):
|
||||
"""
|
||||
Wraps a PyTorch `BatchSampler` to generate batches for one of the processes only. Instances of this class will
|
||||
@ -271,7 +303,25 @@ class IterableDatasetShard(IterableDataset):
|
||||
self.process_index = process_index
|
||||
self.split_batches = split_batches
|
||||
|
||||
def set_epoch(self, epoch):
|
||||
self.epoch = epoch
|
||||
if hasattr(self.dataset, "set_epoch"):
|
||||
self.dataset.set_epoch(epoch)
|
||||
|
||||
def __len__(self):
|
||||
# We will just raise the downstream error if the underlying dataset is not sized
|
||||
if self.drop_last:
|
||||
return (len(self.dataset) // (self.batch_size * self.num_processes)) * self.batch_size
|
||||
else:
|
||||
return math.ceil(len(self.dataset) / (self.batch_size * self.num_processes)) * self.batch_size
|
||||
|
||||
def __iter__(self):
|
||||
if (
|
||||
not hasattr(self.dataset, "set_epoch")
|
||||
and hasattr(self.dataset, "generator")
|
||||
and isinstance(self.dataset.generator, torch.Generator)
|
||||
):
|
||||
self.dataset.generator.manual_seed(self.epoch)
|
||||
real_batch_size = self.batch_size if self.split_batches else (self.batch_size * self.num_processes)
|
||||
process_batch_size = (self.batch_size // self.num_processes) if self.split_batches else self.batch_size
|
||||
process_slice = range(self.process_index * process_batch_size, (self.process_index + 1) * process_batch_size)
|
||||
@ -324,8 +374,9 @@ class DataLoaderStateMixin:
|
||||
"Prepares the gradient state for the current dataloader"
|
||||
self.reset()
|
||||
with suppress(Exception):
|
||||
length = getattr(self.dataset, "total_dataset_length", len(self.dataset))
|
||||
self.remainder = length % self.total_batch_size
|
||||
if not self._drop_last:
|
||||
length = getattr(self.dataset, "total_dataset_length", len(self.dataset))
|
||||
self.remainder = length % self.total_batch_size
|
||||
self.gradient_state._add_dataloader(self)
|
||||
|
||||
def end(self):
|
||||
@ -352,7 +403,7 @@ class DataLoaderShard(DataLoader, DataLoaderStateMixin):
|
||||
- `"generator"`: an optional `torch.Generator`
|
||||
synchronized_generator (`torch.Generator`, *optional*):
|
||||
A random number generator to keep synchronized across processes.
|
||||
split_batches (`int`, *optional*, defaults to 0):
|
||||
skip_batches (`int`, *optional*, defaults to 0):
|
||||
The number of batches to skip at the beginning.
|
||||
kwargs:
|
||||
All other keyword arguments to pass to the regular `DataLoader` initialization.
|
||||
@ -366,18 +417,31 @@ class DataLoaderShard(DataLoader, DataLoaderStateMixin):
|
||||
- **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
|
||||
"""
|
||||
|
||||
def __init__(self, dataset, device=None, rng_types=None, synchronized_generator=None, skip_batches=0, **kwargs):
|
||||
def __init__(
|
||||
self,
|
||||
dataset,
|
||||
device=None,
|
||||
rng_types=None,
|
||||
synchronized_generator=None,
|
||||
skip_batches=0,
|
||||
_drop_last: bool = False,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(dataset, **kwargs)
|
||||
self.device = device
|
||||
self.rng_types = rng_types
|
||||
self.synchronized_generator = synchronized_generator
|
||||
self.skip_batches = skip_batches
|
||||
self.gradient_state = GradientState()
|
||||
self._drop_last = _drop_last
|
||||
self.iteration = 0
|
||||
|
||||
def __iter__(self):
|
||||
if self.rng_types is not None:
|
||||
synchronize_rng_states(self.rng_types, self.synchronized_generator)
|
||||
self.begin()
|
||||
|
||||
self.set_epoch(self.iteration)
|
||||
dataloader_iter = super().__iter__()
|
||||
# We iterate one batch ahead to check when we are at the end
|
||||
try:
|
||||
@ -401,8 +465,21 @@ class DataLoaderShard(DataLoader, DataLoaderStateMixin):
|
||||
if batch_index >= self.skip_batches:
|
||||
yield current_batch
|
||||
break
|
||||
|
||||
self.iteration += 1
|
||||
self.end()
|
||||
|
||||
def set_epoch(self, epoch: int):
|
||||
# In case it is manually passed in, the user can set it to what they like
|
||||
if self.iteration != epoch:
|
||||
self.iteration = epoch
|
||||
if hasattr(self.batch_sampler, "sampler") and hasattr(self.batch_sampler.sampler, "set_epoch"):
|
||||
self.batch_sampler.sampler.set_epoch(epoch)
|
||||
# We support if a custom `Dataset` implementation has `set_epoch`
|
||||
# or in general HF datasets `Datasets`
|
||||
elif hasattr(self.dataset, "set_epoch"):
|
||||
self.dataset.set_epoch(epoch)
|
||||
|
||||
@property
|
||||
def total_batch_size(self):
|
||||
batch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler
|
||||
@ -506,6 +583,7 @@ class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
|
||||
self.skip_batches = skip_batches
|
||||
|
||||
self.slice_fn = slice_tensors if slice_fn is None else slice_fn
|
||||
self.iteration = 0
|
||||
|
||||
def _fetch_batches(self, iterator):
|
||||
batches, batch = None, None
|
||||
@ -546,6 +624,7 @@ class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
|
||||
|
||||
def __iter__(self):
|
||||
self.begin()
|
||||
self.set_epoch(self.iteration)
|
||||
main_iterator = None
|
||||
if is_torch_version(">=", "2.0.1"):
|
||||
# NOTE PyTorch DataLoader adds forward compatibilities for DataPipes, which broadcasts
|
||||
@ -615,8 +694,18 @@ class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
|
||||
if batch_index >= self.skip_batches:
|
||||
yield batch
|
||||
batch_index += 1
|
||||
self.iteration += 1
|
||||
self.end()
|
||||
|
||||
def set_epoch(self, epoch: int):
|
||||
# In case it is manually passed in, the user can set it to what they like
|
||||
if self.iteration != epoch:
|
||||
self.iteration = epoch
|
||||
if hasattr(self.batch_sampler.sampler, "set_epoch"):
|
||||
self.batch_sampler.sampler.set_epoch(epoch)
|
||||
elif hasattr(self.dataset, "set_epoch"):
|
||||
self.dataset.set_epoch(epoch)
|
||||
|
||||
def __len__(self):
|
||||
whole_length = super().__len__()
|
||||
if self.split_batches:
|
||||
@ -739,6 +828,23 @@ def prepare_data_loader(
|
||||
new_batch_sampler = dataloader.batch_sampler if not isinstance(new_dataset, IterableDataset) else None
|
||||
sampler_is_batch_sampler = False
|
||||
synchronized_generator = None
|
||||
sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler)
|
||||
if sampler_is_batch_sampler:
|
||||
sampler = getattr(dataloader.sampler, "sampler", None)
|
||||
else:
|
||||
sampler = getattr(dataloader.batch_sampler, "sampler", None)
|
||||
if isinstance(sampler, RandomSampler):
|
||||
# When iterating through the dataloader during distributed processes
|
||||
# we want to ensure that on each process we are iterating through the same
|
||||
# samples in the same order if a seed is set. This requires a tweak
|
||||
# to the `torch.utils.data.RandomSampler` class (if used).
|
||||
sampler = SeedableRandomSampler(
|
||||
data_source=sampler.data_source,
|
||||
replacement=sampler.replacement,
|
||||
num_samples=sampler._num_samples,
|
||||
generator=getattr(sampler, "generator", torch.Generator()),
|
||||
)
|
||||
|
||||
# No change if no multiprocess
|
||||
if (num_processes != 1 or state.distributed_type == DistributedType.MEGATRON_LM) and not dispatch_batches:
|
||||
if isinstance(new_dataset, IterableDataset):
|
||||
@ -753,17 +859,6 @@ def prepare_data_loader(
|
||||
split_batches=split_batches,
|
||||
)
|
||||
else:
|
||||
# New batch sampler for the current process.
|
||||
sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler)
|
||||
if sampler_is_batch_sampler:
|
||||
sampler = dataloader.sampler.sampler
|
||||
else:
|
||||
sampler = dataloader.batch_sampler.sampler
|
||||
if hasattr(sampler, "generator"):
|
||||
if sampler.generator is None:
|
||||
sampler.generator = torch.Generator()
|
||||
synchronized_generator = sampler.generator
|
||||
|
||||
batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler
|
||||
new_batch_sampler = BatchSamplerShard(
|
||||
batch_sampler,
|
||||
@ -797,7 +892,11 @@ def prepare_data_loader(
|
||||
kwargs["batch_size"] = (
|
||||
dataloader.batch_size // num_processes if split_batches and not dispatch_batches else dataloader.batch_size
|
||||
)
|
||||
|
||||
if isinstance(sampler, SeedableRandomSampler):
|
||||
if sampler_is_batch_sampler:
|
||||
dataloader.sampler.sampler = sampler
|
||||
else:
|
||||
dataloader.batch_sampler.sampler = sampler
|
||||
if dispatch_batches:
|
||||
kwargs.pop("generator")
|
||||
dataloader = DataLoaderDispatcher(
|
||||
@ -815,6 +914,7 @@ def prepare_data_loader(
|
||||
sampler=new_batch_sampler,
|
||||
batch_size=dataloader.batch_size,
|
||||
rng_types=rng_types,
|
||||
_drop_last=dataloader.drop_last,
|
||||
synchronized_generator=synchronized_generator,
|
||||
**kwargs,
|
||||
)
|
||||
@ -825,6 +925,7 @@ def prepare_data_loader(
|
||||
batch_sampler=new_batch_sampler,
|
||||
rng_types=rng_types,
|
||||
synchronized_generator=synchronized_generator,
|
||||
_drop_last=dataloader.drop_last,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
@ -26,6 +26,7 @@ from .utils import (
|
||||
send_to_device,
|
||||
set_module_tensor_to_device,
|
||||
)
|
||||
from .utils.modeling import get_non_persistent_buffers
|
||||
|
||||
|
||||
class ModelHook:
|
||||
@ -262,14 +263,17 @@ class AlignDevicesHook(ModelHook):
|
||||
module, include_buffers=self.offload_buffers, recurse=self.place_submodules
|
||||
)
|
||||
}
|
||||
|
||||
for name, _ in named_module_tensors(
|
||||
module, include_buffers=self.offload_buffers, recurse=self.place_submodules
|
||||
module, include_buffers=self.offload_buffers, recurse=self.place_submodules, remove_non_persistent=True
|
||||
):
|
||||
set_module_tensor_to_device(module, name, "meta")
|
||||
if not self.offload_buffers and self.execution_device is not None:
|
||||
for name, _ in module.named_buffers(recurse=self.place_submodules):
|
||||
set_module_tensor_to_device(module, name, self.execution_device)
|
||||
elif self.offload_buffers and self.execution_device is not None:
|
||||
for name in get_non_persistent_buffers(module, recurse=self.place_submodules):
|
||||
set_module_tensor_to_device(module, name, self.execution_device)
|
||||
|
||||
return module
|
||||
|
||||
def pre_forward(self, module, *args, **kwargs):
|
||||
@ -277,7 +281,10 @@ class AlignDevicesHook(ModelHook):
|
||||
self.input_device = find_device([args, kwargs])
|
||||
if self.offload:
|
||||
for name, _ in named_module_tensors(
|
||||
module, include_buffers=self.offload_buffers, recurse=self.place_submodules
|
||||
module,
|
||||
include_buffers=self.offload_buffers,
|
||||
recurse=self.place_submodules,
|
||||
remove_non_persistent=True,
|
||||
):
|
||||
fp16_statistics = None
|
||||
if "weight" in name and name.replace("weight", "SCB") in self.weights_map.keys():
|
||||
@ -294,7 +301,10 @@ class AlignDevicesHook(ModelHook):
|
||||
def post_forward(self, module, output):
|
||||
if self.offload:
|
||||
for name, _ in named_module_tensors(
|
||||
module, include_buffers=self.offload_buffers, recurse=self.place_submodules
|
||||
module,
|
||||
include_buffers=self.offload_buffers,
|
||||
recurse=self.place_submodules,
|
||||
remove_non_persistent=True,
|
||||
):
|
||||
set_module_tensor_to_device(module, name, "meta")
|
||||
if type(module).__name__ == "Linear8bitLt":
|
||||
|
||||
@ -19,7 +19,7 @@ import tempfile
|
||||
import torch
|
||||
|
||||
from .state import AcceleratorState, PartialState
|
||||
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
|
||||
from .utils import PrecisionType, PrepareForLaunch, are_libraries_initialized, is_mps_available, patch_environment
|
||||
|
||||
|
||||
def test_launch():
|
||||
@ -142,6 +142,17 @@ def notebook_launcher(
|
||||
"inside your training function. Restart your notebook and make sure no cells initializes an "
|
||||
"`Accelerator`."
|
||||
)
|
||||
# Check for specific libraries known to initialize CUDA that users constantly use
|
||||
problematic_imports = are_libraries_initialized("bitsandbytes")
|
||||
if len(problematic_imports) > 1:
|
||||
err = (
|
||||
"Could not start distributed process. Libraries known to initialize CUDA upon import have been "
|
||||
"imported already. Please keep these imports inside your training function to try and help with this:"
|
||||
)
|
||||
for lib_name in problematic_imports:
|
||||
err += f"\n\t* `{lib_name}`"
|
||||
raise RuntimeError(err)
|
||||
|
||||
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
|
||||
# process here (the other ones will be set be the launcher).
|
||||
with patch_environment(
|
||||
@ -222,7 +233,7 @@ def debug_launcher(function, args=(), num_processes=2):
|
||||
# process here (the other ones will be set be the launcher).
|
||||
with patch_environment(
|
||||
world_size=num_processes,
|
||||
master_addr="127.0.01",
|
||||
master_addr="127.0.0.1",
|
||||
master_port="29500",
|
||||
accelerate_mixed_precision="no",
|
||||
accelerate_debug_rdv_file=tmp_file.name,
|
||||
|
||||
@ -12,6 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import functools
|
||||
import logging
|
||||
import os
|
||||
|
||||
@ -67,6 +68,17 @@ class MultiProcessAdapter(logging.LoggerAdapter):
|
||||
self.logger.log(level, msg, *args, **kwargs)
|
||||
state.wait_for_everyone()
|
||||
|
||||
@functools.lru_cache(None)
|
||||
def warning_once(self, *args, **kwargs):
|
||||
"""
|
||||
This method is identical to `logger.warning()`, but will emit the warning with the same message only once
|
||||
|
||||
Note: The cache is for the function arguments, so 2 different callers using the same arguments will hit the
|
||||
cache. The assumption here is that all warning messages are unique across the code. If they aren't then need to
|
||||
switch to another type of cache that includes the caller frame information in the hashing function.
|
||||
"""
|
||||
self.warning(*args, **kwargs)
|
||||
|
||||
|
||||
def get_logger(name: str, log_level: str = None):
|
||||
"""
|
||||
@ -85,9 +97,11 @@ def get_logger(name: str, log_level: str = None):
|
||||
|
||||
```python
|
||||
>>> from accelerate.logging import get_logger
|
||||
>>> from accelerate import Accelerator
|
||||
|
||||
>>> logger = get_logger(__name__)
|
||||
|
||||
>>> accelerator = Accelerator()
|
||||
>>> logger.info("My log", main_process_only=False)
|
||||
>>> logger.debug("My log", main_process_only=True)
|
||||
|
||||
@ -95,9 +109,6 @@ def get_logger(name: str, log_level: str = None):
|
||||
>>> logger.info("My log")
|
||||
>>> logger.debug("My second log")
|
||||
|
||||
>>> from accelerate import Accelerator
|
||||
|
||||
>>> accelerator = Accelerator()
|
||||
>>> array = ["a", "b", "c", "d"]
|
||||
>>> letter_at_rank = array[accelerator.process_index]
|
||||
>>> logger.info(letter_at_rank, in_order=True)
|
||||
|
||||
@ -28,6 +28,7 @@ from .utils import (
|
||||
DistributedType,
|
||||
DynamoBackend,
|
||||
GradientAccumulationPlugin,
|
||||
check_cuda_p2p_ib_support,
|
||||
get_ccl_version,
|
||||
get_int_from_env,
|
||||
is_ccl_available,
|
||||
@ -175,10 +176,20 @@ class PartialState:
|
||||
if is_xpu_available and is_ccl_available():
|
||||
# Set DeepSpeed backend to ccl for xpu
|
||||
self.backend = "ccl"
|
||||
elif is_npu_available():
|
||||
self.backend = "hccl"
|
||||
else:
|
||||
self.backend = "nccl"
|
||||
dist.init_distributed(dist_backend=self.backend, auto_mpi_discovery=False, **kwargs)
|
||||
|
||||
if not check_cuda_p2p_ib_support():
|
||||
if "NCCL_P2P_DISABLE" not in os.environ or "NCCL_IB_DISABLE" not in os.environ:
|
||||
raise NotImplementedError(
|
||||
"Using RTX 3090 or 4000 series doesn't support faster communication broadband via P2P or IB. "
|
||||
'Please set `NCCL_P2P_DISABLE="1"` and `NCCL_IB_DISABLE="1" or use `accelerate launch` which '
|
||||
"will do this automatically."
|
||||
)
|
||||
|
||||
self.num_processes = torch.distributed.get_world_size()
|
||||
self.process_index = torch.distributed.get_rank()
|
||||
self.local_process_index = int(os.environ.get("LOCAL_RANK", -1))
|
||||
@ -187,6 +198,10 @@ class PartialState:
|
||||
self.device = torch.device("xpu", self.local_process_index)
|
||||
if self.device is not None:
|
||||
torch.xpu.set_device(self.device)
|
||||
elif is_npu_available():
|
||||
self.device = torch.device("npu", self.local_process_index)
|
||||
if self.device is not None:
|
||||
torch.npu.set_device(self.device)
|
||||
else:
|
||||
self.device = torch.device("cuda", self.local_process_index)
|
||||
if self.device is not None:
|
||||
@ -200,6 +215,13 @@ class PartialState:
|
||||
if self.backend is None:
|
||||
self.backend = "nccl"
|
||||
torch.distributed.init_process_group(backend=self.backend, **kwargs)
|
||||
if not check_cuda_p2p_ib_support():
|
||||
if "NCCL_P2P_DISABLE" not in os.environ or "NCCL_IB_DISABLE" not in os.environ:
|
||||
raise NotImplementedError(
|
||||
"Using RTX 3090 or 4000 series doesn't support faster communication broadband via P2P or IB. "
|
||||
'Please set `NCCL_P2P_DISABLE="1"` and `NCCL_IB_DISABLE="1" or use `accelerate launch` which '
|
||||
"will do this automatically."
|
||||
)
|
||||
self.num_processes = torch.distributed.get_world_size()
|
||||
self.process_index = torch.distributed.get_rank()
|
||||
self.local_process_index = int(os.environ.get("LOCAL_RANK", -1))
|
||||
@ -287,7 +309,11 @@ class PartialState:
|
||||
else:
|
||||
self.device = self.default_device
|
||||
else:
|
||||
self.distributed_type = DistributedType.NO
|
||||
self.distributed_type = (
|
||||
DistributedType.NO
|
||||
if os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "false"
|
||||
else DistributedType.DEEPSPEED
|
||||
)
|
||||
self.num_processes = 1
|
||||
self.process_index = self.local_process_index = 0
|
||||
|
||||
|
||||
@ -9,7 +9,6 @@ from .testing import (
|
||||
require_mps,
|
||||
require_multi_gpu,
|
||||
require_multi_xpu,
|
||||
require_safetensors,
|
||||
require_single_gpu,
|
||||
require_single_xpu,
|
||||
require_torch_min_version,
|
||||
|
||||
@ -219,6 +219,25 @@ def test_gather_for_metrics_with_iterable_dataset():
|
||||
logger.removeHandler(list_handler)
|
||||
|
||||
|
||||
def test_gather_for_metrics_drop_last():
|
||||
accelerator = Accelerator()
|
||||
per_device_batch_size = 5
|
||||
num_items = (10 * accelerator.num_processes) + 1
|
||||
dataloader = DataLoader(range(num_items), batch_size=per_device_batch_size, drop_last=True)
|
||||
dataloader = accelerator.prepare(dataloader)
|
||||
|
||||
iterator = iter(dataloader)
|
||||
next(iterator) # Skip first batch tensor([0, 1, 2, 3, 4], device='cuda:0')
|
||||
batch = next(iterator)
|
||||
gathered_items = accelerator.gather_for_metrics(batch)
|
||||
|
||||
# Should return a full set of complete batches from each GPU
|
||||
num_expected_items = per_device_batch_size * accelerator.num_processes
|
||||
assert gathered_items.size(0) == (
|
||||
num_expected_items
|
||||
), f"Expected number of items: {num_expected_items}, Actual: {gathered_items.size(0)}"
|
||||
|
||||
|
||||
def main():
|
||||
accelerator = Accelerator(split_batches=False, dispatch_batches=False)
|
||||
if accelerator.is_local_main_process:
|
||||
@ -255,6 +274,10 @@ def main():
|
||||
accelerator = Accelerator()
|
||||
test_torch_metrics(accelerator, 512)
|
||||
accelerator.state._reset_state()
|
||||
if accelerator.is_local_main_process:
|
||||
print("**Test that `drop_last` is taken into account**")
|
||||
test_gather_for_metrics_drop_last()
|
||||
accelerator.state._reset_state()
|
||||
|
||||
|
||||
def _mp_fn(index):
|
||||
|
||||
@ -1,17 +1,34 @@
|
||||
# Test file to ensure that in general certain situational setups for notebooks work.
|
||||
import argparse
|
||||
import os
|
||||
|
||||
from pytest import raises
|
||||
|
||||
from accelerate import PartialState, notebook_launcher
|
||||
from accelerate.test_utils import require_bnb
|
||||
from accelerate.utils import is_bnb_available
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--num_processes", type=int, default=1)
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
def function():
|
||||
def basic_function():
|
||||
# Just prints the PartialState
|
||||
print(f"PartialState:\n{PartialState()}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
notebook_launcher(function, num_processes=int(args.num_processes))
|
||||
NUM_PROCESSES = os.environ.get("ACCELERATE_NUM_PROCESSES", 1)
|
||||
|
||||
|
||||
def test_can_initialize():
|
||||
notebook_launcher(basic_function, (), num_processes=NUM_PROCESSES)
|
||||
|
||||
|
||||
@require_bnb
|
||||
def test_problematic_imports():
|
||||
with raises(AssertionError, match="Please keep these imports"):
|
||||
notebook_launcher(basic_function, (), num_processes=NUM_PROCESSES)
|
||||
|
||||
|
||||
def main():
|
||||
print("Test basic notebook can be ran")
|
||||
test_can_initialize()
|
||||
if is_bnb_available():
|
||||
print("Test problematic imports (bnb)")
|
||||
test_problematic_imports()
|
||||
|
||||
@ -21,11 +21,12 @@ import time
|
||||
from copy import deepcopy
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from torch.utils.data import DataLoader
|
||||
from torch.utils.data import DataLoader, Dataset
|
||||
|
||||
from accelerate import Accelerator
|
||||
from accelerate.data_loader import prepare_data_loader
|
||||
from accelerate.data_loader import SeedableRandomSampler, prepare_data_loader
|
||||
from accelerate.state import AcceleratorState
|
||||
from accelerate.test_utils import RegressionDataset, are_the_same_tensors
|
||||
from accelerate.utils import (
|
||||
@ -288,11 +289,65 @@ def central_dl_preparation_check():
|
||||
print("Shuffled central dataloader passing.")
|
||||
|
||||
|
||||
def custom_sampler_check():
|
||||
state = AcceleratorState()
|
||||
|
||||
class CustomDataset(Dataset):
|
||||
def __init__(self, data):
|
||||
self.data = data
|
||||
|
||||
def __len__(self):
|
||||
return len(self.data)
|
||||
|
||||
def __getitem__(self, index):
|
||||
return self.data[index]
|
||||
|
||||
class CustomBatchSampler:
|
||||
def __init__(self, dataset_length: int, batch_size: int, shuffle: bool = True):
|
||||
self.batch_size = batch_size
|
||||
self.data_index = np.arange(dataset_length)
|
||||
self.shuffle = shuffle
|
||||
|
||||
def __iter__(self):
|
||||
num_batches = len(self)
|
||||
if self.shuffle:
|
||||
index = np.random.permutation(self.data_index)
|
||||
else:
|
||||
index = self.data_index
|
||||
output = np.array_split(index, num_batches)
|
||||
yield from output
|
||||
|
||||
def __len__(self):
|
||||
return math.ceil(len(self.data_index) / self.batch_size)
|
||||
|
||||
dataset = CustomDataset(range(32 * state.num_processes))
|
||||
sampler = CustomBatchSampler(len(dataset), batch_size=8)
|
||||
dl = DataLoader(dataset, batch_sampler=sampler)
|
||||
dl = prepare_data_loader(dl, state.device, state.num_processes, state.process_index)
|
||||
# We need just ensure that `dl.batch_sampler` (or `dl.batch_sampler.batch_sampler` is indeed the old batch sampler
|
||||
if hasattr(dl.batch_sampler, "batch_sampler"):
|
||||
assert isinstance(
|
||||
dl.batch_sampler.batch_sampler, CustomBatchSampler
|
||||
), "Custom sampler was changed after calling `prepare_data_loader`"
|
||||
else:
|
||||
assert isinstance(
|
||||
dl.batch_sampler, CustomBatchSampler
|
||||
), "Custom sampler was changed after calling `prepare_data_loader`"
|
||||
|
||||
|
||||
def mock_training(length, batch_size, generator):
|
||||
set_seed(42)
|
||||
generator.manual_seed(42)
|
||||
train_set = RegressionDataset(length=length, seed=42)
|
||||
train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)
|
||||
|
||||
# The SeedableRandomSampler is needed during distributed setups
|
||||
# for full reproducability across processes with the `DataLoader`
|
||||
sampler = SeedableRandomSampler(
|
||||
generator=generator,
|
||||
data_source=train_set,
|
||||
num_samples=len(train_set),
|
||||
)
|
||||
train_dl = DataLoader(train_set, batch_size=batch_size, sampler=sampler)
|
||||
model = RegressionModel()
|
||||
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
|
||||
for epoch in range(3):
|
||||
@ -598,6 +653,7 @@ def main():
|
||||
dl_preparation_check()
|
||||
if state.distributed_type != DistributedType.TPU:
|
||||
central_dl_preparation_check()
|
||||
custom_sampler_check()
|
||||
|
||||
# Trainings are not exactly the same in DeepSpeed and CPU mode
|
||||
if state.distributed_type == DistributedType.DEEPSPEED:
|
||||
|
||||
@ -31,11 +31,13 @@ from ..state import AcceleratorState, PartialState
|
||||
from ..utils import (
|
||||
gather,
|
||||
is_bnb_available,
|
||||
is_clearml_available,
|
||||
is_comet_ml_available,
|
||||
is_datasets_available,
|
||||
is_deepspeed_available,
|
||||
is_dvclive_available,
|
||||
is_mps_available,
|
||||
is_safetensors_available,
|
||||
is_pandas_available,
|
||||
is_tensorboard_available,
|
||||
is_timm_available,
|
||||
is_torch_version,
|
||||
@ -177,14 +179,6 @@ def require_multi_xpu(test_case):
|
||||
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
|
||||
|
||||
|
||||
def require_safetensors(test_case):
|
||||
"""
|
||||
Decorator marking a test that requires safetensors installed. These tests are skipped when safetensors isn't
|
||||
installed
|
||||
"""
|
||||
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
|
||||
|
||||
|
||||
def require_deepspeed(test_case):
|
||||
"""
|
||||
Decorator marking a test that requires DeepSpeed installed. These tests are skipped when DeepSpeed isn't installed
|
||||
@ -231,6 +225,27 @@ def require_comet_ml(test_case):
|
||||
return unittest.skipUnless(is_comet_ml_available(), "test requires comet_ml")(test_case)
|
||||
|
||||
|
||||
def require_clearml(test_case):
|
||||
"""
|
||||
Decorator marking a test that requires clearml installed. These tests are skipped when clearml isn't installed
|
||||
"""
|
||||
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
|
||||
|
||||
|
||||
def require_dvclive(test_case):
|
||||
"""
|
||||
Decorator marking a test that requires dvclive installed. These tests are skipped when dvclive isn't installed
|
||||
"""
|
||||
return unittest.skipUnless(is_dvclive_available(), "test requires dvclive")(test_case)
|
||||
|
||||
|
||||
def require_pandas(test_case):
|
||||
"""
|
||||
Decorator marking a test that requires pandas installed. These tests are skipped when pandas isn't installed
|
||||
"""
|
||||
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
|
||||
|
||||
|
||||
_atleast_one_tracker_available = (
|
||||
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
|
||||
)
|
||||
@ -416,13 +431,15 @@ class SubprocessCallException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def run_command(command: List[str], return_stdout=False):
|
||||
def run_command(command: List[str], return_stdout=False, env=None):
|
||||
"""
|
||||
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
|
||||
if an error occured while running `command`
|
||||
"""
|
||||
if env is None:
|
||||
env = os.environ.copy()
|
||||
try:
|
||||
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
|
||||
output = subprocess.check_output(command, stderr=subprocess.STDOUT, env=env)
|
||||
if return_stdout:
|
||||
if hasattr(output, "decode"):
|
||||
output = output.decode("utf-8")
|
||||
|
||||
@ -28,7 +28,9 @@ from .state import PartialState
|
||||
from .utils import (
|
||||
LoggerType,
|
||||
is_aim_available,
|
||||
is_clearml_available,
|
||||
is_comet_ml_available,
|
||||
is_dvclive_available,
|
||||
is_mlflow_available,
|
||||
is_tensorboard_available,
|
||||
is_wandb_available,
|
||||
@ -53,6 +55,12 @@ if is_aim_available():
|
||||
if is_mlflow_available():
|
||||
_available_trackers.append(LoggerType.MLFLOW)
|
||||
|
||||
if is_clearml_available():
|
||||
_available_trackers.append(LoggerType.CLEARML)
|
||||
|
||||
if is_dvclive_available():
|
||||
_available_trackers.append(LoggerType.DVCLIVE)
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
@ -365,11 +373,11 @@ class WandBTracker(GeneralTracker):
|
||||
Args:
|
||||
table_name (`str`):
|
||||
The name to give to the logged table on the wandb workspace
|
||||
columns (List of `str`'s *optional*):
|
||||
columns (list of `str`, *optional*):
|
||||
The name of the columns on the table
|
||||
data (List of List of Any data type *optional*):
|
||||
data (List of List of Any data type, *optional*):
|
||||
The data to be logged in the table
|
||||
dataframe (Any data type *optional*):
|
||||
dataframe (Any data type, *optional*):
|
||||
The data to be logged in the table
|
||||
step (`int`, *optional*):
|
||||
The run step. If included, the log will be affiliated with this step.
|
||||
@ -632,8 +640,8 @@ class MLflowTracker(GeneralTracker):
|
||||
for name, value in list(values.items()):
|
||||
# internally, all values are converted to str in MLflow
|
||||
if len(str(value)) > mlflow.utils.validation.MAX_PARAM_VAL_LENGTH:
|
||||
logger.warning(
|
||||
f'Trainer is attempting to log a value of "{value}" for key "{name}" as a parameter. MLflow\'s'
|
||||
logger.warning_once(
|
||||
f'Accelerate is attempting to log a value of "{value}" for key "{name}" as a parameter. MLflow\'s'
|
||||
f" log_param() only accepts values no longer than {mlflow.utils.validation.MAX_PARAM_VAL_LENGTH} characters so we dropped this attribute."
|
||||
)
|
||||
del values[name]
|
||||
@ -662,7 +670,7 @@ class MLflowTracker(GeneralTracker):
|
||||
if isinstance(v, (int, float)):
|
||||
metrics[k] = v
|
||||
else:
|
||||
logger.warning(
|
||||
logger.warning_once(
|
||||
f'MLflowTracker is attempting to log a value of "{v}" of type {type(v)} for key "{k}" as a metric. '
|
||||
"MLflow's log_metric() only accepts float and int types so we dropped this attribute."
|
||||
)
|
||||
@ -681,12 +689,249 @@ class MLflowTracker(GeneralTracker):
|
||||
mlflow.end_run()
|
||||
|
||||
|
||||
class ClearMLTracker(GeneralTracker):
|
||||
"""
|
||||
A `Tracker` class that supports `clearml`. Should be initialized at the start of your script.
|
||||
|
||||
Args:
|
||||
run_name (`str`, *optional*):
|
||||
Name of the experiment. Environment variables `CLEARML_PROJECT` and `CLEARML_TASK` have priority over this
|
||||
argument.
|
||||
kwargs:
|
||||
Kwargs passed along to the `Task.__init__` method.
|
||||
"""
|
||||
|
||||
name = "clearml"
|
||||
requires_logging_directory = False
|
||||
|
||||
@on_main_process
|
||||
def __init__(self, run_name: str = None, **kwargs):
|
||||
from clearml import Task
|
||||
|
||||
current_task = Task.current_task()
|
||||
self._initialized_externally = False
|
||||
if current_task:
|
||||
self._initialized_externally = True
|
||||
self.task = current_task
|
||||
return
|
||||
|
||||
kwargs.setdefault("project_name", os.environ.get("CLEARML_PROJECT", run_name))
|
||||
kwargs.setdefault("task_name", os.environ.get("CLEARML_TASK", run_name))
|
||||
self.task = Task.init(**kwargs)
|
||||
|
||||
@property
|
||||
def tracker(self):
|
||||
return self.task
|
||||
|
||||
@on_main_process
|
||||
def store_init_configuration(self, values: dict):
|
||||
"""
|
||||
Connect configuration dictionary to the Task object. Should be run at the beginning of your experiment.
|
||||
|
||||
Args:
|
||||
values (`dict`):
|
||||
Values to be stored as initial hyperparameters as key-value pairs.
|
||||
"""
|
||||
return self.task.connect_configuration(values)
|
||||
|
||||
@on_main_process
|
||||
def log(self, values: Dict[str, Union[int, float]], step: Optional[int] = None, **kwargs):
|
||||
"""
|
||||
Logs `values` dictionary to the current run. The dictionary keys must be strings. The dictionary values must be
|
||||
ints or floats
|
||||
|
||||
Args:
|
||||
values (`Dict[str, Union[int, float]]`):
|
||||
Values to be logged as key-value pairs. If the key starts with 'eval_'/'test_'/'train_', the value will
|
||||
be reported under the 'eval'/'test'/'train' series and the respective prefix will be removed.
|
||||
Otherwise, the value will be reported under the 'train' series, and no prefix will be removed.
|
||||
step (`int`, *optional*):
|
||||
If specified, the values will be reported as scalars, with the iteration number equal to `step`.
|
||||
Otherwise they will be reported as single values.
|
||||
kwargs:
|
||||
Additional key word arguments passed along to the `clearml.Logger.report_single_value` or
|
||||
`clearml.Logger.report_scalar` methods.
|
||||
"""
|
||||
clearml_logger = self.task.get_logger()
|
||||
for k, v in values.items():
|
||||
if not isinstance(v, (int, float)):
|
||||
logger.warning_once(
|
||||
"Accelerator is attempting to log a value of "
|
||||
f'"{v}" of type {type(v)} for key "{k}" as a scalar. '
|
||||
"This invocation of ClearML logger's report_scalar() "
|
||||
"is incorrect so we dropped this attribute."
|
||||
)
|
||||
continue
|
||||
if step is None:
|
||||
clearml_logger.report_single_value(name=k, value=v, **kwargs)
|
||||
continue
|
||||
title, series = ClearMLTracker._get_title_series(k)
|
||||
clearml_logger.report_scalar(title=title, series=series, value=v, iteration=step, **kwargs)
|
||||
|
||||
@on_main_process
|
||||
def log_images(self, values: dict, step: Optional[int] = None, **kwargs):
|
||||
"""
|
||||
Logs `images` to the current run.
|
||||
|
||||
Args:
|
||||
values (`Dict[str, List[Union[np.ndarray, PIL.Image]]`):
|
||||
Values to be logged as key-value pairs. The values need to have type `List` of `np.ndarray` or
|
||||
step (`int`, *optional*):
|
||||
The run step. If included, the log will be affiliated with this step.
|
||||
kwargs:
|
||||
Additional key word arguments passed along to the `clearml.Logger.report_image` method.
|
||||
"""
|
||||
clearml_logger = self.task.get_logger()
|
||||
for k, v in values.items():
|
||||
title, series = ClearMLTracker._get_title_series(k)
|
||||
clearml_logger.report_image(title=title, series=series, iteration=step, image=v, **kwargs)
|
||||
|
||||
@on_main_process
|
||||
def log_table(
|
||||
self,
|
||||
table_name: str,
|
||||
columns: List[str] = None,
|
||||
data: List[List[Any]] = None,
|
||||
dataframe: Any = None,
|
||||
step: Optional[int] = None,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Log a Table to the task. Can be defined eitherwith `columns` and `data` or with `dataframe`.
|
||||
|
||||
Args:
|
||||
table_name (`str`):
|
||||
The name of the table
|
||||
columns (list of `str`, *optional*):
|
||||
The name of the columns on the table
|
||||
data (List of List of Any data type, *optional*):
|
||||
The data to be logged in the table. If `columns` is not specified, then the first entry in data will be
|
||||
the name of the columns of the table
|
||||
dataframe (Any data type, *optional*):
|
||||
The data to be logged in the table
|
||||
step (`int`, *optional*):
|
||||
The run step. If included, the log will be affiliated with this step.
|
||||
kwargs:
|
||||
Additional key word arguments passed along to the `clearml.Logger.report_table` method.
|
||||
"""
|
||||
to_report = dataframe
|
||||
if dataframe is None:
|
||||
if data is None:
|
||||
raise ValueError(
|
||||
"`ClearMLTracker.log_table` requires that `data` to be supplied if `dataframe` is `None`"
|
||||
)
|
||||
to_report = [columns] + data if columns else data
|
||||
title, series = ClearMLTracker._get_title_series(table_name)
|
||||
self.task.get_logger().report_table(title=title, series=series, table_plot=to_report, iteration=step, **kwargs)
|
||||
|
||||
@on_main_process
|
||||
def finish(self):
|
||||
"""
|
||||
Close the ClearML task. If the task was initialized externally (e.g. by manually calling `Task.init`), this
|
||||
function is a noop
|
||||
"""
|
||||
if self.task and not self._initialized_externally:
|
||||
self.task.close()
|
||||
|
||||
@staticmethod
|
||||
def _get_title_series(name):
|
||||
for prefix in ["eval", "test", "train"]:
|
||||
if name.startswith(prefix + "_"):
|
||||
return name[len(prefix) + 1 :], prefix
|
||||
return name, "train"
|
||||
|
||||
|
||||
class DVCLiveTracker(GeneralTracker):
|
||||
"""
|
||||
A `Tracker` class that supports `dvclive`. Should be initialized at the start of your script.
|
||||
|
||||
Args:
|
||||
run_name (`str`, *optional*):
|
||||
Ignored for dvclive. See `kwargs` instead.
|
||||
kwargs:
|
||||
Additional key word arguments passed along to [`dvclive.Live()`](https://dvc.org/doc/dvclive/live).
|
||||
|
||||
Example:
|
||||
|
||||
```py
|
||||
from accelerate import Accelerator
|
||||
|
||||
accelerator = Accelerator(log_with="dvclive")
|
||||
accelerator.init_trackers(project_name="my_project", init_kwargs={"dvclive": {"dir": "my_directory"}})
|
||||
```
|
||||
"""
|
||||
|
||||
name = "dvclive"
|
||||
requires_logging_directory = False
|
||||
|
||||
@on_main_process
|
||||
def __init__(self, run_name: Optional[str] = None, live: Optional[Any] = None, **kwargs):
|
||||
from dvclive import Live
|
||||
|
||||
super().__init__()
|
||||
self.live = live if live is not None else Live(**kwargs)
|
||||
|
||||
@property
|
||||
def tracker(self):
|
||||
return self.live
|
||||
|
||||
@on_main_process
|
||||
def store_init_configuration(self, values: dict):
|
||||
"""
|
||||
Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. Stores the
|
||||
hyperparameters in a yaml file for future use.
|
||||
|
||||
Args:
|
||||
values (Dictionary `str` to `bool`, `str`, `float`, `int`, or a List or Dict of those types):
|
||||
Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,
|
||||
`str`, `float`, or `int`.
|
||||
"""
|
||||
self.live.log_params(values)
|
||||
|
||||
@on_main_process
|
||||
def log(self, values: dict, step: Optional[int] = None, **kwargs):
|
||||
"""
|
||||
Logs `values` to the current run.
|
||||
|
||||
Args:
|
||||
values (Dictionary `str` to `str`, `float`, or `int`):
|
||||
Values to be logged as key-value pairs. The values need to have type `str`, `float`, or `int`.
|
||||
step (`int`, *optional*):
|
||||
The run step. If included, the log will be affiliated with this step.
|
||||
kwargs:
|
||||
Additional key word arguments passed along to `dvclive.Live.log_metric()`.
|
||||
"""
|
||||
from dvclive.plots import Metric
|
||||
|
||||
if step is not None:
|
||||
self.live.step = step
|
||||
for k, v in values.items():
|
||||
if Metric.could_log(v):
|
||||
self.live.log_metric(k, v, **kwargs)
|
||||
else:
|
||||
logger.warning_once(
|
||||
"Accelerator attempted to log a value of "
|
||||
f'"{v}" of type {type(v)} for key "{k}" as a scalar. '
|
||||
"This invocation of DVCLive's Live.log_metric() "
|
||||
"is incorrect so we dropped this attribute."
|
||||
)
|
||||
|
||||
@on_main_process
|
||||
def finish(self):
|
||||
"""
|
||||
Closes `dvclive.Live()`.
|
||||
"""
|
||||
self.live.end()
|
||||
|
||||
|
||||
LOGGER_TYPE_TO_CLASS = {
|
||||
"aim": AimTracker,
|
||||
"comet_ml": CometMLTracker,
|
||||
"mlflow": MLflowTracker,
|
||||
"tensorboard": TensorBoardTracker,
|
||||
"wandb": WandBTracker,
|
||||
"clearml": ClearMLTracker,
|
||||
"dvclive": DVCLiveTracker,
|
||||
}
|
||||
|
||||
|
||||
@ -709,6 +954,7 @@ def filter_trackers(
|
||||
- `"wandb"`
|
||||
- `"comet_ml"`
|
||||
- `"mlflow"`
|
||||
- `"dvclive"`
|
||||
If `"all"` is selected, will pick up all available trackers in the environment and initialize them. Can
|
||||
also accept implementations of `GeneralTracker` for custom trackers, and can be combined with `"all"`.
|
||||
logging_dir (`str`, `os.PathLike`, *optional*):
|
||||
|
||||
@ -2,8 +2,10 @@ from .constants import (
|
||||
MODEL_NAME,
|
||||
OPTIMIZER_NAME,
|
||||
RNG_STATE_NAME,
|
||||
SAFE_MODEL_NAME,
|
||||
SAFE_WEIGHTS_INDEX_NAME,
|
||||
SAFE_WEIGHTS_NAME,
|
||||
SAMPLER_NAME,
|
||||
SCALER_NAME,
|
||||
SCHEDULER_NAME,
|
||||
TORCH_DISTRIBUTED_OPERATION_TYPES,
|
||||
@ -35,7 +37,14 @@ from .dataclasses import (
|
||||
TensorInformation,
|
||||
TorchDynamoPlugin,
|
||||
)
|
||||
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env, str_to_bool
|
||||
from .environment import (
|
||||
are_libraries_initialized,
|
||||
check_cuda_p2p_ib_support,
|
||||
get_int_from_env,
|
||||
parse_choice_from_env,
|
||||
parse_flag_from_env,
|
||||
str_to_bool,
|
||||
)
|
||||
from .imports import (
|
||||
get_ccl_version,
|
||||
is_4bit_bnb_available,
|
||||
@ -45,18 +54,20 @@ from .imports import (
|
||||
is_bnb_available,
|
||||
is_boto3_available,
|
||||
is_ccl_available,
|
||||
is_clearml_available,
|
||||
is_comet_ml_available,
|
||||
is_cuda_available,
|
||||
is_datasets_available,
|
||||
is_deepspeed_available,
|
||||
is_dvclive_available,
|
||||
is_fp8_available,
|
||||
is_ipex_available,
|
||||
is_megatron_lm_available,
|
||||
is_mlflow_available,
|
||||
is_mps_available,
|
||||
is_npu_available,
|
||||
is_pandas_available,
|
||||
is_rich_available,
|
||||
is_safetensors_available,
|
||||
is_sagemaker_available,
|
||||
is_tensorboard_available,
|
||||
is_timm_available,
|
||||
@ -98,6 +109,7 @@ from .offload import (
|
||||
save_offload_index,
|
||||
)
|
||||
from .operations import (
|
||||
CannotPadNestedTensorWarning,
|
||||
broadcast,
|
||||
broadcast_object_list,
|
||||
concatenate,
|
||||
@ -164,6 +176,8 @@ from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
|
||||
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
|
||||
from .memory import find_executable_batch_size, release_memory
|
||||
from .other import (
|
||||
check_os_kernel,
|
||||
clean_state_dict_for_safetensors,
|
||||
clear_environment,
|
||||
convert_bytes,
|
||||
extract_model_from_parallel,
|
||||
|
||||
@ -17,13 +17,15 @@ import operator as op
|
||||
|
||||
SCALER_NAME = "scaler.pt"
|
||||
MODEL_NAME = "pytorch_model"
|
||||
SAFE_MODEL_NAME = "model"
|
||||
RNG_STATE_NAME = "random_states"
|
||||
OPTIMIZER_NAME = "optimizer"
|
||||
SCHEDULER_NAME = "scheduler"
|
||||
WEIGHTS_NAME = "pytorch_model.bin"
|
||||
WEIGHTS_INDEX_NAME = "pytorch_model.bin.index.json"
|
||||
SAFE_WEIGHTS_NAME = "model.safetensors"
|
||||
SAFE_WEIGHTS_INDEX_NAME = "model.safetensors.index.json"
|
||||
SAMPLER_NAME = "sampler"
|
||||
WEIGHTS_NAME = f"{MODEL_NAME}.bin"
|
||||
WEIGHTS_INDEX_NAME = f"{WEIGHTS_NAME}.index.json"
|
||||
SAFE_WEIGHTS_NAME = f"{SAFE_MODEL_NAME}.safetensors"
|
||||
SAFE_WEIGHTS_INDEX_NAME = f"{SAFE_WEIGHTS_NAME}.index.json"
|
||||
SAGEMAKER_PYTORCH_VERSION = "1.10.2"
|
||||
SAGEMAKER_PYTHON_VERSION = "py38"
|
||||
SAGEMAKER_TRANSFORMERS_VERSION = "4.17.0"
|
||||
@ -32,7 +34,8 @@ FSDP_SHARDING_STRATEGY = ["FULL_SHARD", "SHARD_GRAD_OP", "NO_SHARD", "HYBRID_SHA
|
||||
FSDP_AUTO_WRAP_POLICY = ["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"]
|
||||
FSDP_BACKWARD_PREFETCH = ["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"]
|
||||
FSDP_STATE_DICT_TYPE = ["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"]
|
||||
FSDP_PYTORCH_VERSION = "2.0.1"
|
||||
FSDP_PYTORCH_VERSION = "2.1.0"
|
||||
FSDP_MODEL_NAME = "pytorch_model_fsdp"
|
||||
DEEPSPEED_MULTINODE_LAUNCHERS = ["pdsh", "standard", "openmpi", "mvapich", "mpich"]
|
||||
TORCH_DYNAMO_MODES = ["default", "reduce-overhead", "max-autotune"]
|
||||
|
||||
|
||||
@ -32,6 +32,7 @@ import torch
|
||||
|
||||
from .constants import FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_STATE_DICT_TYPE
|
||||
from .environment import str_to_bool
|
||||
from .imports import is_cuda_available, is_npu_available, is_xpu_available
|
||||
from .versions import compare_versions
|
||||
|
||||
|
||||
@ -200,6 +201,29 @@ class FP8RecipeKwargs(KwargsHandler):
|
||||
raise ValueError("`amax_compute_algo` must be 'max' or 'most_recent'")
|
||||
|
||||
|
||||
class EnumWithContains(enum.EnumMeta):
|
||||
"A metaclass that adds the ability to check if `self` contains an item with the `in` operator"
|
||||
|
||||
def __contains__(cls, item):
|
||||
try:
|
||||
cls(item)
|
||||
except ValueError:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class BaseEnum(enum.Enum, metaclass=EnumWithContains):
|
||||
"An enum class that can get the value of an item with `str(Enum.key)`"
|
||||
|
||||
def __str__(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def list(cls):
|
||||
"Method to list all the possible items in `cls`"
|
||||
return list(map(str, cls))
|
||||
|
||||
|
||||
class DistributedType(str, enum.Enum):
|
||||
"""
|
||||
Represents a type of distributed environment.
|
||||
@ -259,7 +283,7 @@ class ComputeEnvironment(str, enum.Enum):
|
||||
AMAZON_SAGEMAKER = "AMAZON_SAGEMAKER"
|
||||
|
||||
|
||||
class DynamoBackend(str, enum.Enum):
|
||||
class DynamoBackend(str, BaseEnum):
|
||||
"""
|
||||
Represents a dynamo backend (see https://github.com/pytorch/torchdynamo).
|
||||
|
||||
@ -273,19 +297,21 @@ class DynamoBackend(str, enum.Enum):
|
||||
- **INDUCTOR** -- Uses TorchInductor backend with AotAutograd and cudagraphs by leveraging codegened Triton
|
||||
kernels. [Read
|
||||
more](https://dev-discuss.pytorch.org/t/torchinductor-a-pytorch-native-compiler-with-define-by-run-ir-and-symbolic-shapes/747)
|
||||
- **NVFUSER** -- nvFuser with TorchScript. [Read
|
||||
- **AOT_TS_NVFUSER** -- nvFuser with AotAutograd/TorchScript. [Read
|
||||
more](https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-1-nvfuser-and-its-primitives/593)
|
||||
- **AOT_NVFUSER** -- nvFuser with AotAutograd. [Read
|
||||
- **NVPRIMS_NVFUSER** -- nvFuser with PrimTorch. [Read
|
||||
more](https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-1-nvfuser-and-its-primitives/593)
|
||||
- **AOT_CUDAGRAPHS** -- cudagraphs with AotAutograd. [Read
|
||||
more](https://github.com/pytorch/torchdynamo/pull/757)
|
||||
- **CUDAGRAPHS** -- cudagraphs with AotAutograd. [Read more](https://github.com/pytorch/torchdynamo/pull/757)
|
||||
- **OFI** -- Uses Torchscript optimize_for_inference. Inference only. [Read
|
||||
more](https://pytorch.org/docs/stable/generated/torch.jit.optimize_for_inference.html)
|
||||
- **FX2TRT** -- Uses Nvidia TensorRT for inference optimizations. Inference only. [Read
|
||||
more](https://github.com/pytorch/TensorRT/blob/master/docsrc/tutorials/getting_started_with_fx_path.rst)
|
||||
- **ONNXRT** -- Uses ONNXRT for inference on CPU/GPU. Inference only. [Read more](https://onnxruntime.ai/)
|
||||
- **TENSORRT** -- Uses ONNXRT to run TensorRT for inference optimizations. [Read
|
||||
more](https://github.com/onnx/onnx-tensorrt)
|
||||
- **IPEX** -- Uses IPEX for inference on CPU. Inference only. [Read
|
||||
more](https://github.com/intel/intel-extension-for-pytorch).
|
||||
- **TVM** -- Uses Apach TVM for inference optimizations. [Read more](https://tvm.apache.org/)
|
||||
|
||||
"""
|
||||
|
||||
@ -294,36 +320,15 @@ class DynamoBackend(str, enum.Enum):
|
||||
EAGER = "EAGER"
|
||||
AOT_EAGER = "AOT_EAGER"
|
||||
INDUCTOR = "INDUCTOR"
|
||||
NVFUSER = "NVFUSER"
|
||||
AOT_NVFUSER = "AOT_NVFUSER"
|
||||
AOT_CUDAGRAPHS = "AOT_CUDAGRAPHS"
|
||||
AOT_TS_NVFUSER = "AOT_TS_NVFUSER"
|
||||
NVPRIMS_NVFUSER = "NVPRIMS_NVFUSER"
|
||||
CUDAGRAPHS = "CUDAGRAPHS"
|
||||
OFI = "OFI"
|
||||
FX2TRT = "FX2TRT"
|
||||
ONNXRT = "ONNXRT"
|
||||
TENSORRT = "TENSORRT"
|
||||
IPEX = "IPEX"
|
||||
|
||||
|
||||
class EnumWithContains(enum.EnumMeta):
|
||||
"A metaclass that adds the ability to check if `self` contains an item with the `in` operator"
|
||||
|
||||
def __contains__(cls, item):
|
||||
try:
|
||||
cls(item)
|
||||
except ValueError:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class BaseEnum(enum.Enum, metaclass=EnumWithContains):
|
||||
"An enum class that can get the value of an item with `str(Enum.key)`"
|
||||
|
||||
def __str__(self):
|
||||
return self.value
|
||||
|
||||
@classmethod
|
||||
def list(cls):
|
||||
"Method to list all the possible items in `cls`"
|
||||
return list(map(str, cls))
|
||||
TVM = "TVM"
|
||||
|
||||
|
||||
class LoggerType(BaseEnum):
|
||||
@ -335,6 +340,7 @@ class LoggerType(BaseEnum):
|
||||
- **TENSORBOARD** -- TensorBoard as an experiment tracker
|
||||
- **WANDB** -- wandb as an experiment tracker
|
||||
- **COMETML** -- comet_ml as an experiment tracker
|
||||
- **DVCLIVE** -- dvclive as an experiment tracker
|
||||
"""
|
||||
|
||||
ALL = "all"
|
||||
@ -343,6 +349,8 @@ class LoggerType(BaseEnum):
|
||||
WANDB = "wandb"
|
||||
COMETML = "comet_ml"
|
||||
MLFLOW = "mlflow"
|
||||
CLEARML = "clearml"
|
||||
DVCLIVE = "dvclive"
|
||||
|
||||
|
||||
class PrecisionType(BaseEnum):
|
||||
@ -415,6 +423,16 @@ class ProjectConfiguration:
|
||||
metadata={"help": "The current save iteration."},
|
||||
)
|
||||
|
||||
save_on_each_node: bool = field(
|
||||
default=False,
|
||||
metadata={
|
||||
"help": (
|
||||
"When doing multi-node distributed training, whether to save models and checkpoints on each node, or"
|
||||
" only on the main one"
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
def set_directories(self, project_dir: str = None):
|
||||
"Sets `self.project_dir` and `self.logging_dir` to the appropriate values."
|
||||
self.project_dir = project_dir
|
||||
@ -727,7 +745,7 @@ class DeepSpeedPlugin:
|
||||
or ds_config["train_micro_batch_size_per_gpu"] == "auto"
|
||||
):
|
||||
ds_config["train_micro_batch_size_per_gpu"] = 1
|
||||
if ds_config["train_batch_size"] == "auto":
|
||||
if ds_config.get("train_batch_size", None) == "auto":
|
||||
del ds_config["train_batch_size"]
|
||||
|
||||
if compare_versions("transformers", "<", "4.33"):
|
||||
@ -852,7 +870,7 @@ class FullyShardedDataParallelPlugin:
|
||||
},
|
||||
)
|
||||
use_orig_params: bool = field(
|
||||
default=False,
|
||||
default=True,
|
||||
metadata={
|
||||
"help": "If True, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable paramteres. "
|
||||
"Useful in cases such as parameter-efficient fine-tuning. "
|
||||
@ -916,7 +934,17 @@ class FullyShardedDataParallelPlugin:
|
||||
self.activation_checkpointing = str_to_bool(os.environ.get(prefix + "ACTIVATION_CHECKPOINTING", "False")) == 1
|
||||
|
||||
if self.sync_module_states:
|
||||
self.param_init_fn = lambda x: x.to_empty(device=torch.cuda.current_device(), recurse=False)
|
||||
if is_npu_available():
|
||||
device = torch.npu.current_device()
|
||||
elif is_cuda_available():
|
||||
device = torch.cuda.current_device()
|
||||
elif is_xpu_available():
|
||||
device = torch.xpu.current_device()
|
||||
else:
|
||||
raise RuntimeError(
|
||||
"There are currently no available devices found, must be one of 'XPU', 'CUDA', or 'NPU'."
|
||||
)
|
||||
self.param_init_fn = lambda x: x.to_empty(device=device, recurse=False)
|
||||
|
||||
@staticmethod
|
||||
def get_module_class_from_name(module, name):
|
||||
@ -1015,7 +1043,7 @@ class MegatronLMPlugin:
|
||||
default=None,
|
||||
metadata={"help": "enable sequence parallelism"},
|
||||
)
|
||||
recompute_activation: bool = field(
|
||||
recompute_activations: bool = field(
|
||||
default=None,
|
||||
metadata={"help": "enable selective activation recomputation"},
|
||||
)
|
||||
@ -1168,8 +1196,8 @@ class MegatronLMPlugin:
|
||||
self.num_micro_batches = int(os.environ.get(prefix + "NUM_MICRO_BATCHES", 1))
|
||||
if self.gradient_clipping is None:
|
||||
self.gradient_clipping = float(os.environ.get(prefix + "GRADIENT_CLIPPING", 1.0))
|
||||
if self.recompute_activation is None:
|
||||
self.recompute_activation = str_to_bool(os.environ.get(prefix + "RECOMPUTE_ACTIVATION", "False")) == 1
|
||||
if self.recompute_activations is None:
|
||||
self.recompute_activations = str_to_bool(os.environ.get(prefix + "RECOMPUTE_ACTIVATIONS", "False")) == 1
|
||||
if self.use_distributed_optimizer is None:
|
||||
self.use_distributed_optimizer = (
|
||||
str_to_bool(os.environ.get(prefix + "USE_DISTRIBUTED_OPTIMIZER", "False")) == 1
|
||||
@ -1206,7 +1234,7 @@ class MegatronLMPlugin:
|
||||
"eval_iters": self.eval_iters,
|
||||
"eval_interval": self.eval_interval,
|
||||
}
|
||||
if self.recompute_activation:
|
||||
if self.recompute_activations:
|
||||
self.megatron_lm_default_args["recompute_granularity"] = "selective"
|
||||
if self.tensorboard_dir is not None:
|
||||
self.megatron_lm_default_args["tensorboard_dir"] = self.tensorboard_dir
|
||||
|
||||
@ -13,6 +13,10 @@
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import sys
|
||||
from typing import Dict
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
def str_to_bool(value) -> int:
|
||||
@ -48,3 +52,26 @@ def parse_flag_from_env(key, default=False):
|
||||
def parse_choice_from_env(key, default="no"):
|
||||
value = os.environ.get(key, str(default))
|
||||
return value
|
||||
|
||||
|
||||
def are_libraries_initialized(*library_names: str) -> Dict[str, bool]:
|
||||
"""
|
||||
Checks if any of `library_names` are imported in the environment. Will return results as a `key:bool` pair.
|
||||
"""
|
||||
return [lib_name for lib_name in library_names if lib_name in sys.modules]
|
||||
|
||||
|
||||
def check_cuda_p2p_ib_support():
|
||||
"""
|
||||
Checks if the devices being used have issues with P2P and IB communications, namely any consumer GPU hardware after
|
||||
the 3090.
|
||||
"""
|
||||
if torch.cuda.is_available():
|
||||
# Get the first device/default
|
||||
device_name = torch.cuda.get_device_name()
|
||||
device_count = torch.cuda.device_count()
|
||||
unsupported_devices = ["RTX 3090", "RTX 40"]
|
||||
if device_count > 1:
|
||||
if any(device in device_name for device in unsupported_devices):
|
||||
return False
|
||||
return True
|
||||
|
||||
@ -16,7 +16,7 @@ import os
|
||||
import torch
|
||||
|
||||
from ..logging import get_logger
|
||||
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
|
||||
from .constants import FSDP_MODEL_NAME, FSDP_PYTORCH_VERSION, OPTIMIZER_NAME
|
||||
from .imports import is_torch_distributed_available
|
||||
from .versions import is_torch_version
|
||||
|
||||
@ -47,7 +47,7 @@ def save_fsdp_model(fsdp_plugin, accelerator, model, output_dir, model_index=0):
|
||||
):
|
||||
state_dict = model.state_dict()
|
||||
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
|
||||
weights_name = f"{MODEL_NAME}.bin" if model_index == 0 else f"{MODEL_NAME}_{model_index}.bin"
|
||||
weights_name = f"{FSDP_MODEL_NAME}.bin" if model_index == 0 else f"{FSDP_MODEL_NAME}_{model_index}.bin"
|
||||
output_model_file = os.path.join(output_dir, weights_name)
|
||||
if accelerator.process_index == 0:
|
||||
logger.info(f"Saving model to {output_model_file}")
|
||||
@ -55,16 +55,16 @@ def save_fsdp_model(fsdp_plugin, accelerator, model, output_dir, model_index=0):
|
||||
logger.info(f"Model saved to {output_model_file}")
|
||||
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
|
||||
weights_name = (
|
||||
f"{MODEL_NAME}_rank{accelerator.process_index}.bin"
|
||||
f"{FSDP_MODEL_NAME}_rank{accelerator.process_index}.bin"
|
||||
if model_index == 0
|
||||
else f"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"
|
||||
else f"{FSDP_MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"
|
||||
)
|
||||
output_model_file = os.path.join(output_dir, weights_name)
|
||||
logger.info(f"Saving model to {output_model_file}")
|
||||
torch.save(state_dict, output_model_file)
|
||||
logger.info(f"Model saved to {output_model_file}")
|
||||
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
|
||||
ckpt_dir = os.path.join(output_dir, f"{MODEL_NAME}_{model_index}")
|
||||
ckpt_dir = os.path.join(output_dir, f"{FSDP_MODEL_NAME}_{model_index}")
|
||||
os.makedirs(ckpt_dir, exist_ok=True)
|
||||
logger.info(f"Saving model to {ckpt_dir}")
|
||||
state_dict = {"model": state_dict}
|
||||
@ -96,16 +96,16 @@ def load_fsdp_model(fsdp_plugin, accelerator, model, input_dir, model_index=0):
|
||||
"initializing FSDP object"
|
||||
)
|
||||
return
|
||||
weights_name = f"{MODEL_NAME}.bin" if model_index == 0 else f"{MODEL_NAME}_{model_index}.bin"
|
||||
weights_name = f"{FSDP_MODEL_NAME}.bin" if model_index == 0 else f"{FSDP_MODEL_NAME}_{model_index}.bin"
|
||||
input_model_file = os.path.join(input_dir, weights_name)
|
||||
logger.info(f"Loading model from {input_model_file}")
|
||||
state_dict = torch.load(input_model_file)
|
||||
logger.info(f"Model loaded from {input_model_file}")
|
||||
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
|
||||
weights_name = (
|
||||
f"{MODEL_NAME}_rank{accelerator.process_index}.bin"
|
||||
f"{FSDP_MODEL_NAME}_rank{accelerator.process_index}.bin"
|
||||
if model_index == 0
|
||||
else f"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"
|
||||
else f"{FSDP_MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"
|
||||
)
|
||||
input_model_file = os.path.join(input_dir, weights_name)
|
||||
logger.info(f"Loading model from {input_model_file}")
|
||||
@ -113,8 +113,8 @@ def load_fsdp_model(fsdp_plugin, accelerator, model, input_dir, model_index=0):
|
||||
logger.info(f"Model loaded from {input_model_file}")
|
||||
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
|
||||
ckpt_dir = (
|
||||
os.path.join(input_dir, f"{MODEL_NAME}_{model_index}")
|
||||
if f"{MODEL_NAME}" not in input_dir
|
||||
os.path.join(input_dir, f"{FSDP_MODEL_NAME}_{model_index}")
|
||||
if f"{FSDP_MODEL_NAME}" not in input_dir
|
||||
else input_dir
|
||||
)
|
||||
logger.info(f"Loading model from {ckpt_dir}")
|
||||
@ -164,16 +164,14 @@ def load_fsdp_optimizer(fsdp_plugin, accelerator, optimizer, model, input_dir, o
|
||||
):
|
||||
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
|
||||
optim_state = None
|
||||
# below check should work but currently it isn't working (mostly opytorch issue),
|
||||
# in the meantime disabling it at the cost of excess memory usage
|
||||
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
|
||||
optimizer_name = (
|
||||
f"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else f"{OPTIMIZER_NAME}_{optimizer_index}.bin"
|
||||
)
|
||||
input_optimizer_file = os.path.join(input_dir, optimizer_name)
|
||||
logger.info(f"Loading Optimizer state from {input_optimizer_file}")
|
||||
optim_state = torch.load(input_optimizer_file)
|
||||
logger.info(f"Optimizer state loaded from {input_optimizer_file}")
|
||||
if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
|
||||
optimizer_name = (
|
||||
f"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else f"{OPTIMIZER_NAME}_{optimizer_index}.bin"
|
||||
)
|
||||
input_optimizer_file = os.path.join(input_dir, optimizer_name)
|
||||
logger.info(f"Loading Optimizer state from {input_optimizer_file}")
|
||||
optim_state = torch.load(input_optimizer_file)
|
||||
logger.info(f"Optimizer state loaded from {input_optimizer_file}")
|
||||
else:
|
||||
ckpt_dir = (
|
||||
os.path.join(input_dir, f"{OPTIMIZER_NAME}_{optimizer_index}")
|
||||
|
||||
@ -151,10 +151,6 @@ def is_megatron_lm_available():
|
||||
return False
|
||||
|
||||
|
||||
def is_safetensors_available():
|
||||
return _is_package_available("safetensors")
|
||||
|
||||
|
||||
def is_transformers_available():
|
||||
return _is_package_available("transformers")
|
||||
|
||||
@ -210,6 +206,14 @@ def is_tqdm_available():
|
||||
return _is_package_available("tqdm")
|
||||
|
||||
|
||||
def is_clearml_available():
|
||||
return _is_package_available("clearml")
|
||||
|
||||
|
||||
def is_pandas_available():
|
||||
return _is_package_available("pandas")
|
||||
|
||||
|
||||
def is_mlflow_available():
|
||||
if _is_package_available("mlflow"):
|
||||
return True
|
||||
@ -293,3 +297,7 @@ def is_xpu_available(check_device=False):
|
||||
except RuntimeError:
|
||||
return False
|
||||
return hasattr(torch, "xpu") and torch.xpu.is_available()
|
||||
|
||||
|
||||
def is_dvclive_available():
|
||||
return _is_package_available("dvclive")
|
||||
|
||||
@ -21,7 +21,6 @@ from typing import Any, Dict, List, Tuple
|
||||
import torch
|
||||
|
||||
from ..commands.config.config_args import SageMakerConfig
|
||||
from ..commands.config.config_utils import DYNAMO_BACKENDS
|
||||
from ..utils import (
|
||||
DynamoBackend,
|
||||
PrecisionType,
|
||||
@ -89,7 +88,9 @@ def prepare_simple_launcher_cmd_env(args: argparse.Namespace) -> Tuple[List[str]
|
||||
try:
|
||||
dynamo_backend = DynamoBackend(args.dynamo_backend.upper())
|
||||
except ValueError:
|
||||
raise ValueError(f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DYNAMO_BACKENDS}.")
|
||||
raise ValueError(
|
||||
f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DynamoBackend.list()}."
|
||||
)
|
||||
current_env["ACCELERATE_DYNAMO_BACKEND"] = dynamo_backend.value
|
||||
current_env["ACCELERATE_DYNAMO_MODE"] = args.dynamo_mode
|
||||
current_env["ACCELERATE_DYNAMO_USE_FULLGRAPH"] = str(args.dynamo_use_fullgraph)
|
||||
@ -127,7 +128,10 @@ def prepare_multi_gpu_env(args: argparse.Namespace) -> Dict[str, str]:
|
||||
if main_process_port is None:
|
||||
main_process_port = 29500
|
||||
|
||||
if is_port_in_use(main_process_port):
|
||||
# only need to check port availability in main process, in case we have to start multiple launchers on the same machine
|
||||
# for some reasons like splitting log files.
|
||||
need_port_check = num_machines <= 1 or int(args.machine_rank) == 0
|
||||
if need_port_check and is_port_in_use(main_process_port):
|
||||
raise ConnectionError(
|
||||
f"Tried to launch distributed communication on port `{main_process_port}`, but another process is utilizing it. "
|
||||
"Please specify a different port (such as using the `----main_process_port` flag or specifying a different `main_process_port` in your config file)"
|
||||
@ -163,7 +167,9 @@ def prepare_multi_gpu_env(args: argparse.Namespace) -> Dict[str, str]:
|
||||
try:
|
||||
dynamo_backend = DynamoBackend(args.dynamo_backend.upper())
|
||||
except ValueError:
|
||||
raise ValueError(f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DYNAMO_BACKENDS}.")
|
||||
raise ValueError(
|
||||
f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DynamoBackend.list()}."
|
||||
)
|
||||
current_env["ACCELERATE_DYNAMO_BACKEND"] = dynamo_backend.value
|
||||
current_env["ACCELERATE_DYNAMO_MODE"] = args.dynamo_mode
|
||||
current_env["ACCELERATE_DYNAMO_USE_FULLGRAPH"] = str(args.dynamo_use_fullgraph)
|
||||
@ -171,6 +177,9 @@ def prepare_multi_gpu_env(args: argparse.Namespace) -> Dict[str, str]:
|
||||
|
||||
if args.use_fsdp:
|
||||
current_env["ACCELERATE_USE_FSDP"] = "true"
|
||||
if args.fsdp_cpu_ram_efficient_loading and not args.fsdp_sync_module_states:
|
||||
raise ValueError("When using `--fsdp_cpu_ram_efficient_loading` set `--fsdp_sync_module_states` to `True`")
|
||||
|
||||
current_env["FSDP_SHARDING_STRATEGY"] = str(args.fsdp_sharding_strategy)
|
||||
current_env["FSDP_OFFLOAD_PARAMS"] = str(args.fsdp_offload_params).lower()
|
||||
current_env["FSDP_MIN_NUM_PARAMS"] = str(args.fsdp_min_num_params)
|
||||
@ -184,6 +193,7 @@ def prepare_multi_gpu_env(args: argparse.Namespace) -> Dict[str, str]:
|
||||
current_env["FSDP_STATE_DICT_TYPE"] = str(args.fsdp_state_dict_type)
|
||||
current_env["FSDP_FORWARD_PREFETCH"] = str(args.fsdp_forward_prefetch).lower()
|
||||
current_env["FSDP_USE_ORIG_PARAMS"] = str(args.fsdp_use_orig_params).lower()
|
||||
current_env["FSDP_CPU_RAM_EFFICIENT_LOADING"] = str(args.fsdp_cpu_ram_efficient_loading).lower()
|
||||
current_env["FSDP_SYNC_MODULE_STATES"] = str(args.fsdp_sync_module_states).lower()
|
||||
|
||||
if args.use_megatron_lm:
|
||||
@ -265,7 +275,10 @@ def prepare_deepspeed_cmd_env(args: argparse.Namespace) -> Tuple[List[str], Dict
|
||||
if main_process_port is None:
|
||||
main_process_port = 29500
|
||||
|
||||
if is_port_in_use(main_process_port):
|
||||
# only need to check port availability in main process, in case we have to start multiple launchers on the same machine
|
||||
# for some reasons like splitting log files.
|
||||
need_port_check = num_machines <= 1 or int(args.machine_rank) == 0
|
||||
if need_port_check and is_port_in_use(main_process_port):
|
||||
raise ConnectionError(
|
||||
f"Tried to launch distributed communication on port `{main_process_port}`, but another process is utilizing it. "
|
||||
"Please specify a different port (such as using the `----main_process_port` flag or specifying a different `main_process_port` in your config file)"
|
||||
@ -284,10 +297,12 @@ def prepare_deepspeed_cmd_env(args: argparse.Namespace) -> Tuple[List[str], Dict
|
||||
current_env["ACCELERATE_DEBUG_MODE"] = "true"
|
||||
gpu_ids = getattr(args, "gpu_ids", "all")
|
||||
if gpu_ids != "all" and args.gpu_ids is not None:
|
||||
if not is_xpu_available():
|
||||
current_env["CUDA_VISIBLE_DEVICES"] = gpu_ids
|
||||
else:
|
||||
if is_xpu_available():
|
||||
current_env["ZE_AFFINITY_MASK"] = gpu_ids
|
||||
elif is_npu_available():
|
||||
current_env["ASCEND_RT_VISIBLE_DEVICES"] = gpu_ids
|
||||
else:
|
||||
current_env["CUDA_VISIBLE_DEVICES"] = gpu_ids
|
||||
try:
|
||||
mixed_precision = PrecisionType(args.mixed_precision.lower())
|
||||
except ValueError:
|
||||
@ -419,7 +434,9 @@ def prepare_sagemager_args_inputs(
|
||||
try:
|
||||
dynamo_backend = DynamoBackend(args.dynamo_backend.upper())
|
||||
except ValueError:
|
||||
raise ValueError(f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DYNAMO_BACKENDS}.")
|
||||
raise ValueError(
|
||||
f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DynamoBackend.list()}."
|
||||
)
|
||||
|
||||
# Environment variables to be set for use during training job
|
||||
environment = {
|
||||
|
||||
@ -30,7 +30,7 @@ import torch.nn as nn
|
||||
from ..state import AcceleratorState
|
||||
from .constants import SAFE_WEIGHTS_NAME, WEIGHTS_NAME
|
||||
from .dataclasses import AutocastKwargs, CustomDtype, DistributedType
|
||||
from .imports import is_mps_available, is_npu_available, is_safetensors_available, is_xpu_available
|
||||
from .imports import is_mps_available, is_npu_available, is_xpu_available
|
||||
from .offload import load_offloaded_weight, offload_weight, save_offload_index
|
||||
from .tqdm import is_tqdm_available, tqdm
|
||||
|
||||
@ -39,9 +39,9 @@ if is_npu_available(check_device=False):
|
||||
import torch_npu # noqa: F401
|
||||
|
||||
|
||||
if is_safetensors_available():
|
||||
from safetensors import safe_open
|
||||
from safetensors.torch import load_file as safe_load_file
|
||||
from safetensors import safe_open
|
||||
from safetensors.torch import load_file as safe_load_file
|
||||
|
||||
|
||||
WEIGHTS_INDEX_NAME = "pytorch_model.bin.index.json"
|
||||
|
||||
@ -250,7 +250,7 @@ def set_module_tensor_to_device(
|
||||
Args:
|
||||
module (`torch.nn.Module`):
|
||||
The module in which the tensor we want to move lives.
|
||||
param_name (`str`):
|
||||
tensor_name (`str`):
|
||||
The full name of the parameter/buffer.
|
||||
device (`int`, `str` or `torch.device`):
|
||||
The device on which to set the tensor.
|
||||
@ -365,7 +365,9 @@ def set_module_tensor_to_device(
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
|
||||
def named_module_tensors(module: nn.Module, include_buffers: bool = True, recurse: bool = False):
|
||||
def named_module_tensors(
|
||||
module: nn.Module, include_buffers: bool = True, recurse: bool = False, remove_non_persistent: bool = False
|
||||
):
|
||||
"""
|
||||
A helper function that gathers all the tensors (parameters + buffers) of a given module. If `include_buffers=True`
|
||||
it's the same as doing `module.named_parameters(recurse=recurse) + module.named_buffers(recurse=recurse)`.
|
||||
@ -377,13 +379,40 @@ def named_module_tensors(module: nn.Module, include_buffers: bool = True, recurs
|
||||
Whether or not to include the buffers in the result.
|
||||
recurse (`bool`, *optional`, defaults to `False`):
|
||||
Whether or not to go look in every submodule or just return the direct parameters and buffers.
|
||||
remove_non_persistent (`bool`, *optional*, defaults to `False`):
|
||||
Whether or not to remove the non persistent buffer from the buffers. Useful only when include_buffers =
|
||||
True
|
||||
"""
|
||||
for named_parameter in module.named_parameters(recurse=recurse):
|
||||
yield named_parameter
|
||||
|
||||
if include_buffers:
|
||||
non_persistent_buffers = set()
|
||||
if remove_non_persistent:
|
||||
non_persistent_buffers = get_non_persistent_buffers(module, recurse=recurse)
|
||||
for named_buffer in module.named_buffers(recurse=recurse):
|
||||
yield named_buffer
|
||||
name, _ = named_buffer
|
||||
if name not in non_persistent_buffers:
|
||||
yield named_buffer
|
||||
|
||||
|
||||
def get_non_persistent_buffers(module: nn.Module, recurse: bool = False):
|
||||
"""
|
||||
Gather all non persistent buffers of a given modules into a set
|
||||
|
||||
Args:
|
||||
module (`nn.Module`):
|
||||
The module we want the non persistent buffers on.
|
||||
recurse (`bool`, *optional*, defaults to `False`):
|
||||
Whether or not to go look in every submodule or just return the direct non persistent buffers.
|
||||
"""
|
||||
|
||||
non_persistent_buffers_set = module._non_persistent_buffers_set
|
||||
if recurse:
|
||||
for _, m in module.named_modules():
|
||||
non_persistent_buffers_set |= m._non_persistent_buffers_set
|
||||
|
||||
return non_persistent_buffers_set
|
||||
|
||||
|
||||
class FindTiedParametersResult(list):
|
||||
@ -535,15 +564,22 @@ def retie_parameters(model, tied_params):
|
||||
"""
|
||||
for tied_group in tied_params:
|
||||
param_to_tie = None
|
||||
# First iteration of the loop will set param_to_tie, next ones will tie it to the others
|
||||
# two loops : the first one to set param_to_tie , the second one to change the values of tied_group
|
||||
for param_name in tied_group:
|
||||
module = model
|
||||
splits = param_name.split(".")
|
||||
for split in splits[:-1]:
|
||||
module = getattr(module, split)
|
||||
if param_to_tie is None:
|
||||
param_to_tie = getattr(module, splits[-1])
|
||||
else:
|
||||
param = getattr(module, splits[-1])
|
||||
if param_to_tie is None and param.device != torch.device("meta"):
|
||||
param_to_tie = param
|
||||
break
|
||||
if param_to_tie is not None:
|
||||
for param_name in tied_group:
|
||||
module = model
|
||||
splits = param_name.split(".")
|
||||
for split in splits[:-1]:
|
||||
module = getattr(module, split)
|
||||
setattr(module, splits[-1], param_to_tie)
|
||||
|
||||
|
||||
@ -1156,10 +1192,6 @@ def load_state_dict(checkpoint_file, device_map=None):
|
||||
name, once a given module name is inside, every submodule of it will be sent to the same device.
|
||||
"""
|
||||
if checkpoint_file.endswith(".safetensors"):
|
||||
if not is_safetensors_available():
|
||||
raise ImportError(
|
||||
f"To load {checkpoint_file}, the `safetensors` library is necessary `pip install safetensors`."
|
||||
)
|
||||
with safe_open(checkpoint_file, framework="pt") as f:
|
||||
metadata = f.metadata()
|
||||
weight_names = f.keys()
|
||||
@ -1228,6 +1260,54 @@ def load_state_dict(checkpoint_file, device_map=None):
|
||||
return torch.load(checkpoint_file, map_location=torch.device("cpu"))
|
||||
|
||||
|
||||
def get_state_dict_offloaded_model(model: nn.Module):
|
||||
"""
|
||||
Returns the state dictionary for an offloaded model via iterative onloading
|
||||
|
||||
Args:
|
||||
model (`torch.nn.Module`):
|
||||
The offloaded model we want to save
|
||||
"""
|
||||
from ..hooks import AlignDevicesHook
|
||||
|
||||
state_dict = {}
|
||||
placeholders = set()
|
||||
for name, module in model.named_modules():
|
||||
if name == "":
|
||||
continue
|
||||
if hasattr(module, "_hf_hook") and isinstance(module._hf_hook, AlignDevicesHook) and module._hf_hook.offload:
|
||||
original_device = module._hf_hook.execution_device
|
||||
# assign hook execution device to cpu
|
||||
module._hf_hook.execution_device = "cpu"
|
||||
# onload meta tensors to execution device
|
||||
try:
|
||||
module._hf_hook.pre_forward(module)
|
||||
except MemoryError:
|
||||
raise MemoryError("Offloaded module must fit in CPU memory to call save_model!") from None
|
||||
module_state_dict = module.state_dict()
|
||||
# offload meta tensors from cpu
|
||||
module._hf_hook.post_forward(module, torch.tensor([]))
|
||||
# re-assign hook to original execution device
|
||||
module._hf_hook.execution_device = original_device
|
||||
else:
|
||||
module_state_dict = module.state_dict()
|
||||
|
||||
for key in module_state_dict:
|
||||
# ignore placeholder parameters that are still on the meta device
|
||||
if module_state_dict[key].device == torch.device("meta"):
|
||||
placeholders.add(name + f".{key}")
|
||||
continue
|
||||
params = module_state_dict[key]
|
||||
state_dict[name + f".{key}"] = params
|
||||
for key in placeholders.copy():
|
||||
if key in state_dict:
|
||||
placeholders.remove(key)
|
||||
if placeholders:
|
||||
logger.warning(f"The following tensors were not saved because they were still on meta device: {placeholders}")
|
||||
|
||||
return state_dict
|
||||
|
||||
|
||||
def load_checkpoint_in_model(
|
||||
model: nn.Module,
|
||||
checkpoint: Union[str, os.PathLike],
|
||||
@ -1458,6 +1538,7 @@ def get_mixed_precision_context_manager(native_amp: bool = False, autocast_kwarg
|
||||
DistributedType.MULTI_GPU,
|
||||
DistributedType.MULTI_NPU,
|
||||
DistributedType.MULTI_XPU,
|
||||
DistributedType.FSDP,
|
||||
]:
|
||||
return torch.autocast(device_type=state.device.type, dtype=torch.bfloat16, **autocast_kwargs)
|
||||
else:
|
||||
|
||||
@ -19,8 +19,7 @@ from typing import Dict, List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from .imports import is_safetensors_available
|
||||
from safetensors import safe_open
|
||||
|
||||
|
||||
def offload_weight(weight, weight_name, offload_folder, index=None):
|
||||
@ -165,11 +164,6 @@ class OffloadedWeightsLoader(Mapping):
|
||||
return self.state_dict[key]
|
||||
weight_info = self.index[key]
|
||||
if weight_info.get("safetensors_file") is not None:
|
||||
if not is_safetensors_available():
|
||||
raise ImportError("These offloaded weights require the use of safetensors: `pip install safetensors`.")
|
||||
|
||||
from safetensors import safe_open
|
||||
|
||||
device = "cpu" if self.device is None else self.device
|
||||
with safe_open(weight_info["safetensors_file"], framework="pt", device=device) as f:
|
||||
tensor = f.get_tensor(weight_info.get("weight_name", key))
|
||||
|
||||
@ -17,6 +17,7 @@ A set of basic tensor ops compatible with tpu, gpu, and multigpu
|
||||
"""
|
||||
|
||||
import pickle
|
||||
import warnings
|
||||
from functools import update_wrapper, wraps
|
||||
from typing import Any, Mapping
|
||||
|
||||
@ -25,7 +26,7 @@ import torch
|
||||
from ..state import PartialState
|
||||
from .constants import TORCH_DISTRIBUTED_OPERATION_TYPES
|
||||
from .dataclasses import DistributedType, TensorInformation
|
||||
from .imports import is_torch_distributed_available, is_tpu_available
|
||||
from .imports import is_torch_distributed_available, is_torch_version, is_tpu_available
|
||||
|
||||
|
||||
if is_tpu_available(check_device=False):
|
||||
@ -231,6 +232,9 @@ def find_batch_size(data):
|
||||
Returns:
|
||||
`int`: The batch size.
|
||||
"""
|
||||
if isinstance(data, (tuple, list, Mapping)) and (len(data) == 0):
|
||||
raise ValueError(f"Cannot find the batch size from empty {type(data)}.")
|
||||
|
||||
if isinstance(data, (tuple, list)):
|
||||
return find_batch_size(data[0])
|
||||
elif isinstance(data, Mapping):
|
||||
@ -280,6 +284,12 @@ def _tpu_gather(tensor):
|
||||
|
||||
|
||||
def _gpu_gather(tensor):
|
||||
state = PartialState()
|
||||
if is_torch_version(">=", "1.13"):
|
||||
gather_op = torch.distributed.all_gather_into_tensor
|
||||
else:
|
||||
gather_op = torch.distributed._all_gather_base
|
||||
|
||||
def _gpu_gather_one(tensor):
|
||||
if tensor.ndim == 0:
|
||||
tensor = tensor.clone()[None]
|
||||
@ -287,9 +297,33 @@ def _gpu_gather(tensor):
|
||||
# Can only gather contiguous tensors
|
||||
if not tensor.is_contiguous():
|
||||
tensor = tensor.contiguous()
|
||||
output_tensors = [torch.empty_like(tensor) for _ in range(torch.distributed.get_world_size())]
|
||||
torch.distributed.all_gather(output_tensors, tensor)
|
||||
return torch.cat(output_tensors, dim=0)
|
||||
|
||||
# Check if `tensor` is not on CUDA
|
||||
if state.device.type == "cuda" and tensor.device.type != "cuda":
|
||||
raise RuntimeError(
|
||||
"One or more of the tensors passed to `gather` were not on the GPU while the `Accelerator` is configured for CUDA. "
|
||||
"Please move it to the GPU before calling `gather`."
|
||||
)
|
||||
|
||||
if state.backend is not None and state.backend != "gloo":
|
||||
# We use `empty` as `all_gather_into_tensor` slightly
|
||||
# differs from `all_gather` for better efficiency,
|
||||
# and we rely on the number of items in the tensor
|
||||
# rather than its direct shape
|
||||
output_tensors = torch.empty(
|
||||
state.num_processes * tensor.numel(),
|
||||
dtype=tensor.dtype,
|
||||
device=state.device,
|
||||
)
|
||||
gather_op(output_tensors, tensor)
|
||||
return output_tensors.view(-1, *tensor.size()[1:])
|
||||
else:
|
||||
# a backend of `None` is always CPU
|
||||
# also gloo does not support `all_gather_into_tensor`,
|
||||
# which will result in a larger memory overhead for the op
|
||||
output_tensors = [torch.empty_like(tensor) for _ in range(state.num_processes)]
|
||||
torch.distributed.all_gather(output_tensors, tensor)
|
||||
return torch.cat(output_tensors, dim=0)
|
||||
|
||||
return recursively_apply(_gpu_gather_one, tensor, error_on_other_type=True)
|
||||
|
||||
@ -499,6 +533,10 @@ def concatenate(data, dim=0):
|
||||
return torch.cat(data, dim=dim)
|
||||
|
||||
|
||||
class CannotPadNestedTensorWarning(UserWarning):
|
||||
pass
|
||||
|
||||
|
||||
@chained_operation
|
||||
def pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False):
|
||||
"""
|
||||
@ -517,6 +555,12 @@ def pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False):
|
||||
"""
|
||||
|
||||
def _pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False):
|
||||
if getattr(tensor, "is_nested", False):
|
||||
warnings.warn(
|
||||
"Cannot pad nested tensors without more information. Leaving unprocessed.",
|
||||
CannotPadNestedTensorWarning,
|
||||
)
|
||||
return tensor
|
||||
if dim >= len(tensor.shape):
|
||||
return tensor
|
||||
|
||||
|
||||
@ -12,28 +12,37 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import collections
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import socket
|
||||
from contextlib import contextmanager
|
||||
from functools import partial
|
||||
from types import MethodType
|
||||
from typing import OrderedDict
|
||||
|
||||
import torch
|
||||
from packaging.version import Version
|
||||
from safetensors.torch import save_file as safe_save_file
|
||||
|
||||
from ..commands.config.default import write_basic_config # noqa: F401
|
||||
from ..logging import get_logger
|
||||
from ..state import PartialState
|
||||
from .constants import FSDP_PYTORCH_VERSION
|
||||
from .dataclasses import DistributedType
|
||||
from .imports import is_deepspeed_available, is_safetensors_available, is_tpu_available
|
||||
from .imports import is_deepspeed_available, is_torch_distributed_available, is_tpu_available
|
||||
from .modeling import id_tensor_storage
|
||||
from .transformer_engine import convert_model
|
||||
from .versions import is_torch_version
|
||||
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
if is_tpu_available(check_device=False):
|
||||
import torch_xla.core.xla_model as xm
|
||||
|
||||
if is_safetensors_available():
|
||||
from safetensors.torch import save_file as safe_save_file
|
||||
|
||||
|
||||
def is_compiled_module(module):
|
||||
"""
|
||||
@ -69,7 +78,7 @@ def extract_model_from_parallel(model, keep_fp32_wrapper: bool = True):
|
||||
|
||||
options += (DeepSpeedEngine,)
|
||||
|
||||
if is_torch_version(">=", FSDP_PYTORCH_VERSION):
|
||||
if is_torch_version(">=", FSDP_PYTORCH_VERSION) and is_torch_distributed_available():
|
||||
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
|
||||
|
||||
options += (FSDP,)
|
||||
@ -109,22 +118,69 @@ def wait_for_everyone():
|
||||
PartialState().wait_for_everyone()
|
||||
|
||||
|
||||
def save(obj, f, safe_serialization=False):
|
||||
def clean_state_dict_for_safetensors(state_dict: dict):
|
||||
"""
|
||||
Cleans the state dictionary from a model and removes tensor aliasing if present.
|
||||
|
||||
Args:
|
||||
state_dict (`dict`):
|
||||
The state dictionary from a model
|
||||
"""
|
||||
ptrs = collections.defaultdict(list)
|
||||
# When bnb serialization is used, weights in state dict can be strings
|
||||
for name, tensor in state_dict.items():
|
||||
if not isinstance(tensor, str):
|
||||
ptrs[id_tensor_storage(tensor)].append(name)
|
||||
|
||||
# These are all pointers of tensors with shared memory
|
||||
shared_ptrs = {ptr: names for ptr, names in ptrs.items() if len(names) > 1}
|
||||
warn_names = set()
|
||||
for names in shared_ptrs.values():
|
||||
# When not all duplicates have been cleaned, we still remove those keys but put a clear warning.
|
||||
# If the link between tensors was done at runtime then `from_pretrained` will not get
|
||||
# the key back leading to random tensor. A proper warning will be shown
|
||||
# during reload (if applicable), but since the file is not necessarily compatible with
|
||||
# the config, better show a proper warning.
|
||||
found_names = [name for name in names if name in state_dict]
|
||||
warn_names.update(found_names[1:])
|
||||
for name in found_names[1:]:
|
||||
del state_dict[name]
|
||||
if len(warn_names) > 0:
|
||||
logger.warning(
|
||||
f"Removed shared tensor {warn_names} while saving. This should be OK, but check by verifying that you don't receive any warning while reloading",
|
||||
)
|
||||
state_dict = {k: v.contiguous() if isinstance(v, torch.Tensor) else v for k, v in state_dict.items()}
|
||||
return state_dict
|
||||
|
||||
|
||||
def save(obj, f, save_on_each_node: bool = False, safe_serialization: bool = False):
|
||||
"""
|
||||
Save the data to disk. Use in place of `torch.save()`.
|
||||
|
||||
Args:
|
||||
obj: The data to save
|
||||
f: The file (or file-like object) to use to save the data
|
||||
safe_serialization (`bool`, *optional*, defaults to `False`): Whether to save `obj` using `safetensors`
|
||||
obj:
|
||||
The data to save
|
||||
f:
|
||||
The file (or file-like object) to use to save the data
|
||||
save_on_each_node (`bool`, *optional*, defaults to `False`):
|
||||
Whether to only save on the global main process
|
||||
safe_serialization (`bool`, *optional*, defaults to `False`):
|
||||
Whether to save `obj` using `safetensors` or the traditional PyTorch way (that uses `pickle`).
|
||||
"""
|
||||
# Check if it's a model and remove duplicates
|
||||
if safe_serialization:
|
||||
save_func = partial(safe_save_file, metadata={"format": "pt"})
|
||||
if isinstance(obj, OrderedDict):
|
||||
obj = clean_state_dict_for_safetensors(obj)
|
||||
else:
|
||||
save_func = torch.save
|
||||
|
||||
if PartialState().distributed_type == DistributedType.TPU:
|
||||
xm.save(obj, f)
|
||||
elif PartialState().local_process_index == 0:
|
||||
if safe_serialization:
|
||||
safe_save_file(obj, f, metadata={"format": "pt"})
|
||||
else:
|
||||
torch.save(obj, f)
|
||||
elif PartialState().is_main_process and not save_on_each_node:
|
||||
save_func(obj, f)
|
||||
elif PartialState().is_local_main_process and save_on_each_node:
|
||||
save_func(obj, f)
|
||||
|
||||
|
||||
@contextmanager
|
||||
@ -246,3 +302,21 @@ def convert_bytes(size):
|
||||
size /= 1024.0
|
||||
|
||||
return f"{round(size, 2)} PB"
|
||||
|
||||
|
||||
def check_os_kernel():
|
||||
"""Warns if the kernel version is below the recommended minimum on Linux."""
|
||||
# see issue #1929
|
||||
info = platform.uname()
|
||||
system = info.system
|
||||
if system != "Linux":
|
||||
return
|
||||
|
||||
_, version, *_ = re.split(r"(\d+\.\d+\.\d+)", info.release)
|
||||
min_version = "5.5.0"
|
||||
if Version(version) < Version(min_version):
|
||||
msg = (
|
||||
f"Detected kernel version {version}, which is below the recommended minimum of {min_version}; this can "
|
||||
"cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher."
|
||||
)
|
||||
logger.warning(msg, main_process_only=True)
|
||||
|
||||
@ -36,15 +36,15 @@ def convert_model(model, to_transformer_engine=True, _convert_linear=True, _conv
|
||||
te_module = te.Linear(
|
||||
module.in_features, module.out_features, bias=has_bias, params_dtype=module.weight.dtype
|
||||
)
|
||||
te_module.weight.data = module.weight.data.clone()
|
||||
module.weight.copy_(te_module.weight)
|
||||
if has_bias:
|
||||
te_module.bias.data = module.bias.data.clone()
|
||||
module.bias.copy_(te_module.bias)
|
||||
|
||||
setattr(model, name, te_module)
|
||||
elif isinstance(module, nn.LayerNorm) and to_transformer_engine and _convert_ln:
|
||||
te_module = te.LayerNorm(module.normalized_shape[0], eps=module.eps, params_dtype=module.weight.dtype)
|
||||
te_module.weight.data = module.weight.data.clone()
|
||||
te_module.bias.data = module.bias.data.clone()
|
||||
module.weight.copy_(te_module.weight)
|
||||
module.bias.copy_(te_module.bias)
|
||||
|
||||
setattr(model, name, te_module)
|
||||
elif isinstance(module, te.Linear) and not to_transformer_engine and _convert_linear:
|
||||
@ -52,15 +52,15 @@ def convert_model(model, to_transformer_engine=True, _convert_linear=True, _conv
|
||||
new_module = nn.Linear(
|
||||
module.in_features, module.out_features, bias=has_bias, params_dtype=module.weight.dtype
|
||||
)
|
||||
new_module.weight.data = module.weight.data.clone()
|
||||
module.weight.copy_(new_module.weight)
|
||||
if has_bias:
|
||||
new_module.bias.data = module.bias.data.clone()
|
||||
module.bias.copy_(new_module.bias)
|
||||
|
||||
setattr(model, name, new_module)
|
||||
elif isinstance(module, te.LayerNorm) and not to_transformer_engine and _convert_ln:
|
||||
new_module = nn.LayerNorm(module.normalized_shape[0], eps=module.eps, params_dtype=module.weight.dtype)
|
||||
new_module.weight.data = module.weight.data.clone()
|
||||
new_module.bias.data = module.bias.data.clone()
|
||||
module.weight.copy_(new_module.weight)
|
||||
module.bias.copy_(new_module.bias)
|
||||
|
||||
setattr(model, name, new_module)
|
||||
else:
|
||||
|
||||
@ -55,8 +55,6 @@ from accelerate.utils.other import patch_environment
|
||||
|
||||
set_seed(42)
|
||||
|
||||
T5_SMALL = "t5-small"
|
||||
T5_TINY = "patrickvonplaten/t5-tiny-random"
|
||||
GPT2_TINY = "sshleifer/tiny-gpt2"
|
||||
|
||||
ZERO2 = "zero2"
|
||||
|
||||
@ -252,6 +252,11 @@ class FSDPIntegrationTest(TempDirTestCase):
|
||||
continue
|
||||
state_dict_config_index = len(cmd_config)
|
||||
for state_dict_type in FSDP_STATE_DICT_TYPE:
|
||||
# Todo: Currently failing for `LOCAL_STATE_DICT` with error
|
||||
# Unexpected key(s) in state_dict: "_fsdp_wrapped_module._flat_param".
|
||||
if state_dict_type == "LOCAL_STATE_DICT":
|
||||
continue
|
||||
|
||||
cmd_config = cmd_config[:state_dict_config_index]
|
||||
cmd_config.append(f"--fsdp_state_dict_type={state_dict_type}")
|
||||
cmd_config.extend(
|
||||
|
||||
@ -5,12 +5,13 @@ import tempfile
|
||||
from unittest.mock import patch
|
||||
|
||||
import torch
|
||||
from parameterized import parameterized
|
||||
from torch.utils.data import DataLoader, TensorDataset
|
||||
|
||||
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
|
||||
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights, load_checkpoint_and_dispatch
|
||||
from accelerate.accelerator import Accelerator
|
||||
from accelerate.state import GradientState, PartialState
|
||||
from accelerate.test_utils import require_bnb, require_multi_gpu, require_safetensors, slow
|
||||
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
|
||||
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
|
||||
from accelerate.utils import patch_environment
|
||||
from accelerate.utils.modeling import load_checkpoint_in_model
|
||||
@ -26,6 +27,17 @@ def create_components():
|
||||
return model, optimizer, scheduler, train_dl, valid_dl
|
||||
|
||||
|
||||
class ModelForTest(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.linear1 = torch.nn.Linear(3, 4)
|
||||
self.batchnorm = torch.nn.BatchNorm1d(4)
|
||||
self.linear2 = torch.nn.Linear(4, 5)
|
||||
|
||||
def forward(self, x):
|
||||
return self.linear2(self.batchnorm(self.linear1(x)))
|
||||
|
||||
|
||||
def get_signature(model):
|
||||
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
|
||||
|
||||
@ -35,6 +47,13 @@ def load_random_weights(model):
|
||||
model.load_state_dict(state)
|
||||
|
||||
|
||||
def parameterized_custom_name_func(func, param_num, param):
|
||||
# customize the test name generator function as we want both params to appear in the sub-test
|
||||
# name, as by default it shows only the first param
|
||||
param_based_name = "use_safetensors" if param.args[0] is True else "use_pytorch"
|
||||
return f"{func.__name__}_{param_based_name}"
|
||||
|
||||
|
||||
class AcceleratorTester(AccelerateTestCase):
|
||||
@require_cuda
|
||||
def test_accelerator_can_be_reinstantiated(self):
|
||||
@ -97,7 +116,8 @@ class AcceleratorTester(AccelerateTestCase):
|
||||
accelerator = Accelerator()
|
||||
self.assertEqual(str(accelerator.state.device), "cuda:64")
|
||||
|
||||
def test_save_load_model(self):
|
||||
@parameterized.expand((True, False), name_func=parameterized_custom_name_func)
|
||||
def test_save_load_model(self, use_safetensors):
|
||||
accelerator = Accelerator()
|
||||
model, optimizer, scheduler, train_dl, valid_dl = create_components()
|
||||
accelerator.prepare(model, optimizer, scheduler, train_dl, valid_dl)
|
||||
@ -105,7 +125,7 @@ class AcceleratorTester(AccelerateTestCase):
|
||||
model_signature = get_signature(model)
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
||||
accelerator.save_state(tmpdirname)
|
||||
accelerator.save_state(tmpdirname, safe_serialization=use_safetensors)
|
||||
|
||||
# make sure random weights don't match
|
||||
load_random_weights(model)
|
||||
@ -115,31 +135,40 @@ class AcceleratorTester(AccelerateTestCase):
|
||||
accelerator.load_state(tmpdirname)
|
||||
self.assertTrue(abs(model_signature - get_signature(model)) < 1e-3)
|
||||
|
||||
def test_save_model_pytorch(self):
|
||||
@parameterized.expand([True, False], name_func=parameterized_custom_name_func)
|
||||
def test_save_model(self, use_safetensors):
|
||||
accelerator = Accelerator()
|
||||
model = torch.nn.Linear(10, 10)
|
||||
|
||||
model_signature = get_signature(model)
|
||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
||||
accelerator.save_model(model, tmpdirname, safe_serialization=False)
|
||||
accelerator.save_model(model, tmpdirname, safe_serialization=use_safetensors)
|
||||
# make sure loaded weights match
|
||||
load_checkpoint_in_model(model, tmpdirname)
|
||||
self.assertTrue(abs(model_signature - get_signature(model)) < 1e-3)
|
||||
|
||||
@require_safetensors
|
||||
def test_save_model_safetensors(self):
|
||||
@parameterized.expand([True, False], name_func=parameterized_custom_name_func)
|
||||
def test_save_model_offload(self, use_safetensors):
|
||||
accelerator = Accelerator()
|
||||
model = torch.nn.Linear(10, 10)
|
||||
|
||||
model_signature = get_signature(model)
|
||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
||||
accelerator.save_model(model, tmpdirname, safe_serialization=True)
|
||||
device_map = {"linear1": "cpu", "batchnorm": "disk", "linear2": "cpu"}
|
||||
|
||||
# make sure loaded weights match
|
||||
load_checkpoint_in_model(model, tmpdirname)
|
||||
self.assertTrue(abs(model_signature - get_signature(model)) < 1e-3)
|
||||
inputs = torch.randn(3, 3)
|
||||
model = ModelForTest()
|
||||
expected = model(inputs)
|
||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
accelerator.save_model(model, tmp_dir, safe_serialization=use_safetensors)
|
||||
# load and save offloaded model
|
||||
load_checkpoint_and_dispatch(model, tmp_dir, device_map=device_map, offload_folder=tmp_dir)
|
||||
accelerator.save_model(model, tmp_dir, safe_serialization=use_safetensors)
|
||||
|
||||
def test_save_load_model_with_hooks(self):
|
||||
# load weights that were saved from the offloaded model
|
||||
load_checkpoint_and_dispatch(model, tmp_dir)
|
||||
output = model(inputs)
|
||||
self.assertTrue(torch.allclose(expected, output, atol=1e-5))
|
||||
|
||||
@parameterized.expand([True, False], name_func=parameterized_custom_name_func)
|
||||
def test_save_load_model_with_hooks(self, use_safetensors):
|
||||
accelerator = Accelerator()
|
||||
model, optimizer, scheduler, train_dl, valid_dl = create_components()
|
||||
accelerator.prepare(model, optimizer, scheduler, train_dl, valid_dl)
|
||||
@ -164,7 +193,7 @@ class AcceleratorTester(AccelerateTestCase):
|
||||
load_hook = accelerator.register_load_state_pre_hook(load_config)
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
||||
accelerator.save_state(tmpdirname)
|
||||
accelerator.save_state(tmpdirname, safe_serialization=use_safetensors)
|
||||
|
||||
# make sure random weights don't match with hooks
|
||||
load_random_weights(model)
|
||||
@ -185,7 +214,7 @@ class AcceleratorTester(AccelerateTestCase):
|
||||
load_hook.remove()
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
||||
accelerator.save_state(tmpdirname)
|
||||
accelerator.save_state(tmpdirname, safe_serialization=use_safetensors)
|
||||
|
||||
# make sure random weights don't match with hooks removed
|
||||
load_random_weights(model)
|
||||
|
||||
@ -45,6 +45,33 @@ class ModelForTest(nn.Module):
|
||||
return self.linear2(self.batchnorm(self.linear1(x)))
|
||||
|
||||
|
||||
class LinearWithNonPersistentBuffers(nn.Module):
|
||||
def __init__(self, in_features: int, out_features: int, bias: bool = True, device=None, dtype=None) -> None:
|
||||
factory_kwargs = {"device": device, "dtype": dtype}
|
||||
super().__init__()
|
||||
self.in_features = in_features
|
||||
self.out_features = out_features
|
||||
self.register_buffer("weight", torch.ones((out_features, in_features), **factory_kwargs))
|
||||
if bias:
|
||||
self.register_buffer("bias", torch.ones(out_features, **factory_kwargs), persistent=False)
|
||||
else:
|
||||
self.register_buffer("bias", None)
|
||||
|
||||
def forward(self, input: torch.Tensor) -> torch.Tensor:
|
||||
return torch.nn.functional.linear(input, self.weight, self.bias)
|
||||
|
||||
|
||||
class ModelForTestNonPersistentBuffers(nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.linear1 = LinearWithNonPersistentBuffers(3, 4)
|
||||
self.batchnorm = nn.BatchNorm1d(4)
|
||||
self.linear2 = LinearWithNonPersistentBuffers(4, 5)
|
||||
|
||||
def forward(self, x):
|
||||
return self.linear2(self.batchnorm(self.linear1(x)))
|
||||
|
||||
|
||||
class ModelForTestCopy(nn.Module):
|
||||
def __init__(self, id: int):
|
||||
super().__init__()
|
||||
@ -302,6 +329,18 @@ class BigModelingTester(unittest.TestCase):
|
||||
output = model(x)
|
||||
self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))
|
||||
|
||||
@require_cuda
|
||||
def test_dispatch_model_with_non_persistent_buffers(self):
|
||||
model = ModelForTestNonPersistentBuffers()
|
||||
device_map = {"linear1": 0, "batchnorm": "cpu", "linear2": "disk"}
|
||||
x = torch.randn(2, 3)
|
||||
expected = model(x)
|
||||
|
||||
with TemporaryDirectory() as tmp_dir:
|
||||
dispatch_model(model, device_map, offload_dir=tmp_dir, offload_buffers=True)
|
||||
output = model(x)
|
||||
self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))
|
||||
|
||||
@require_mps
|
||||
def test_dispatch_model_mps(self):
|
||||
model = ModelForTest()
|
||||
|
||||
@ -269,8 +269,8 @@ class ModelEstimatorTester(unittest.TestCase):
|
||||
estimate_command(args)
|
||||
|
||||
def test_gated(self):
|
||||
with self.assertRaises(GatedRepoError, msg="Repo for model `meta-llama/Llama-2-7b` is gated"):
|
||||
args = self.parser.parse_args(["meta-llama/Llama-2-7b"])
|
||||
with self.assertRaises(GatedRepoError, msg="Repo for model `meta-llama/Llama-2-7b-hf` is gated"):
|
||||
args = self.parser.parse_args(["meta-llama/Llama-2-7b-hf"])
|
||||
with patch_environment(hf_hub_disable_implicit_token="1"):
|
||||
estimate_command(args)
|
||||
|
||||
|
||||
@ -205,7 +205,7 @@ class FeatureExamplesTests(TempDirTestCase):
|
||||
run_command(self._launch_args + testargs)
|
||||
|
||||
@require_trackers
|
||||
@mock.patch.dict(os.environ, {"WANDB_MODE": "offline"})
|
||||
@mock.patch.dict(os.environ, {"WANDB_MODE": "offline", "DVCLIVE_TEST": "true"})
|
||||
def test_tracking(self):
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
testargs = f"""
|
||||
|
||||
@ -92,11 +92,11 @@ class KwargsHandlerTester(unittest.TestCase):
|
||||
prefix = "ACCELERATE_DYNAMO_"
|
||||
# nvfuser's dynamo backend name is "nvprims_nvfuser"
|
||||
# use "nvfuser" here to cause exception if this test causes os.environ changed permanently
|
||||
os.environ[prefix + "BACKEND"] = "nvfuser"
|
||||
os.environ[prefix + "BACKEND"] = "aot_ts_nvfuser"
|
||||
os.environ[prefix + "MODE"] = "reduce-overhead"
|
||||
|
||||
dynamo_plugin_kwargs = TorchDynamoPlugin().to_kwargs()
|
||||
self.assertEqual(dynamo_plugin_kwargs, {"backend": "nvfuser", "mode": "reduce-overhead"})
|
||||
self.assertEqual(dynamo_plugin_kwargs, {"backend": "aot_ts_nvfuser", "mode": "reduce-overhead"})
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@ -20,9 +20,10 @@ from collections import OrderedDict
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from safetensors.torch import save_file
|
||||
|
||||
from accelerate import init_empty_weights
|
||||
from accelerate.test_utils import require_cuda, require_huggingface_suite, require_multi_gpu, require_safetensors
|
||||
from accelerate.test_utils import require_cuda, require_huggingface_suite, require_multi_gpu
|
||||
from accelerate.utils.modeling import (
|
||||
check_device_map,
|
||||
clean_device_map,
|
||||
@ -50,6 +51,22 @@ class ModelForTest(nn.Module):
|
||||
return self.linear2(self.batchnorm(self.linear1(x)))
|
||||
|
||||
|
||||
class LinearWithNonPersistentBuffers(nn.Module):
|
||||
def __init__(self, in_features: int, out_features: int, bias: bool = True, device=None, dtype=None) -> None:
|
||||
factory_kwargs = {"device": device, "dtype": dtype}
|
||||
super().__init__()
|
||||
self.in_features = in_features
|
||||
self.out_features = out_features
|
||||
self.register_buffer("weight", torch.empty((out_features, in_features), **factory_kwargs))
|
||||
if bias:
|
||||
self.register_buffer("bias", torch.empty(out_features, **factory_kwargs), persistent=False)
|
||||
else:
|
||||
self.register_buffer("bias", None)
|
||||
|
||||
def forward(self, input: torch.Tensor) -> torch.Tensor:
|
||||
return torch.nn.functional.linear(input, self.weight, self.bias)
|
||||
|
||||
|
||||
def sequential_model(num_layers):
|
||||
layers = OrderedDict([(f"linear{i}", nn.Linear(1000, 1000)) for i in range(1, num_layers + 1)])
|
||||
return nn.Sequential(layers)
|
||||
@ -186,6 +203,14 @@ class ModelingUtilsTester(unittest.TestCase):
|
||||
["linear1.weight", "linear1.bias", "batchnorm.weight", "batchnorm.bias", "linear2.weight", "linear2.bias"],
|
||||
)
|
||||
|
||||
model = LinearWithNonPersistentBuffers(10, 10)
|
||||
|
||||
named_tensors = named_module_tensors(model, include_buffers=True, remove_non_persistent=False)
|
||||
self.assertListEqual([name for name, _ in named_tensors], ["weight", "bias"])
|
||||
|
||||
named_tensors = named_module_tensors(model, include_buffers=True, remove_non_persistent=True)
|
||||
self.assertListEqual([name for name, _ in named_tensors], ["weight"])
|
||||
|
||||
def test_find_tied_parameters(self):
|
||||
model = sequential_model(4)
|
||||
self.assertListEqual(find_tied_parameters(model), [])
|
||||
@ -552,10 +577,7 @@ class ModelingUtilsTester(unittest.TestCase):
|
||||
self.assertDictEqual({0: 0, "cpu": 100}, max_memory)
|
||||
|
||||
@require_cuda
|
||||
@require_safetensors
|
||||
def test_load_state_dict(self):
|
||||
from safetensors.torch import save_file
|
||||
|
||||
state_dict = {k: torch.randn(4, 5) for k in ["a", "b", "c"]}
|
||||
device_maps = [{"a": "cpu", "b": 0, "c": "disk"}, {"a": 0, "b": 0, "c": "disk"}, {"a": 0, "b": 0, "c": 0}]
|
||||
|
||||
|
||||
@ -21,7 +21,8 @@ import torch
|
||||
import accelerate
|
||||
from accelerate import Accelerator
|
||||
from accelerate.big_modeling import dispatch_model
|
||||
from accelerate.test_utils import assert_exception, execute_subprocess_async, require_multi_gpu, skip
|
||||
from accelerate.test_utils import assert_exception, execute_subprocess_async, require_multi_gpu
|
||||
from accelerate.test_utils.testing import run_command
|
||||
from accelerate.utils import patch_environment
|
||||
|
||||
|
||||
@ -33,6 +34,9 @@ class MultiGPUTester(unittest.TestCase):
|
||||
mod_file.split(os.path.sep)[:-1] + ["scripts", "test_distributed_data_loop.py"]
|
||||
)
|
||||
self.operation_file_path = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_ops.py"])
|
||||
self.notebook_launcher_path = os.path.sep.join(
|
||||
mod_file.split(os.path.sep)[:-1] + ["scripts", "test_notebook.py"]
|
||||
)
|
||||
|
||||
@require_multi_gpu
|
||||
def test_multi_gpu(self):
|
||||
@ -66,23 +70,16 @@ class MultiGPUTester(unittest.TestCase):
|
||||
with patch_environment(omp_num_threads=1, cuda_visible_devices="0,1"):
|
||||
execute_subprocess_async(cmd, env=os.environ.copy())
|
||||
|
||||
# Need to see why this test raises forking issues when ran as a suite
|
||||
@skip
|
||||
@require_multi_gpu
|
||||
def test_notebook_launcher(self):
|
||||
"""
|
||||
This test checks that the `notebook_launcher` will be able to intialize
|
||||
a `PartialState` without issue
|
||||
This test checks a variety of situations and scenarios
|
||||
with the `notebook_launcher`
|
||||
"""
|
||||
cmd = [
|
||||
"python",
|
||||
"-m",
|
||||
"accelerate.test_utils.scripts.test_notebook",
|
||||
"--num_processes",
|
||||
str(torch.cuda.device_count()),
|
||||
]
|
||||
cmd = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.notebook_launcher_path]
|
||||
print(f"Running {cmd}")
|
||||
with patch_environment(omp_num_threads=1):
|
||||
execute_subprocess_async(cmd, env=os.environ.copy())
|
||||
run_command(cmd, env=os.environ.copy())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@ -444,7 +444,7 @@ class MixedInt8EmptyModelTest(unittest.TestCase):
|
||||
model_8bit_from_saved = load_and_quantize_model(
|
||||
model_8bit_from_saved,
|
||||
bnb_quantization_config,
|
||||
weights_location=tmpdirname + "/pytorch_model.bin",
|
||||
weights_location=tmpdirname,
|
||||
device_map=device_map,
|
||||
no_split_module_classes=["BloomBlock"],
|
||||
offload_folder=tmpdirname + "/tmp",
|
||||
|
||||
@ -24,6 +24,7 @@ from contextlib import contextmanager
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
from parameterized import parameterized_class
|
||||
from torch import nn
|
||||
from torch.utils.data import DataLoader, TensorDataset
|
||||
|
||||
@ -80,6 +81,14 @@ class DummyModel(nn.Module):
|
||||
return x * self.a + self.b
|
||||
|
||||
|
||||
def parameterized_custom_name_func(func, param_num, param):
|
||||
# customize the test name generator function as we want both params to appear in the sub-test
|
||||
# name, as by default it shows only the first param
|
||||
param_based_name = "use_safetensors" if param["use_safetensors"] is True else "use_pytorch"
|
||||
return f"{func.__name__}_{param_based_name}"
|
||||
|
||||
|
||||
@parameterized_class(("use_safetensors",), [[True], [False]], class_name_func=parameterized_custom_name_func)
|
||||
class CheckpointTest(unittest.TestCase):
|
||||
def test_with_save_limit(self):
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
@ -94,10 +103,10 @@ class CheckpointTest(unittest.TestCase):
|
||||
model, optimizer, train_dataloader, valid_dataloader
|
||||
)
|
||||
# Save initial
|
||||
accelerator.save_state()
|
||||
accelerator.save_state(safe_serialization=self.use_safetensors)
|
||||
|
||||
# Save second state
|
||||
accelerator.save_state()
|
||||
accelerator.save_state(safe_serialization=self.use_safetensors)
|
||||
self.assertEqual(len(os.listdir(accelerator.project_dir)), 1)
|
||||
|
||||
def test_can_resume_training_with_folder(self):
|
||||
@ -113,7 +122,7 @@ class CheckpointTest(unittest.TestCase):
|
||||
)
|
||||
# Save initial
|
||||
initial = os.path.join(tmpdir, "initial")
|
||||
accelerator.save_state(initial)
|
||||
accelerator.save_state(initial, safe_serialization=self.use_safetensors)
|
||||
(a, b) = model.a.item(), model.b.item()
|
||||
opt_state = optimizer.state_dict()
|
||||
ground_truth_rands = train(3, model, train_dataloader, optimizer, accelerator)
|
||||
@ -139,7 +148,7 @@ class CheckpointTest(unittest.TestCase):
|
||||
test_rands = train(2, model, train_dataloader, optimizer, accelerator)
|
||||
# Save everything
|
||||
checkpoint = os.path.join(tmpdir, "checkpoint")
|
||||
accelerator.save_state(checkpoint)
|
||||
accelerator.save_state(checkpoint, safe_serialization=self.use_safetensors)
|
||||
|
||||
# Load everything back in and make sure all states work
|
||||
accelerator.load_state(checkpoint)
|
||||
@ -165,7 +174,7 @@ class CheckpointTest(unittest.TestCase):
|
||||
model, optimizer, train_dataloader, valid_dataloader
|
||||
)
|
||||
# Save initial
|
||||
accelerator.save_state()
|
||||
accelerator.save_state(safe_serialization=self.use_safetensors)
|
||||
(a, b) = model.a.item(), model.b.item()
|
||||
opt_state = optimizer.state_dict()
|
||||
ground_truth_rands = train(3, model, train_dataloader, optimizer, accelerator)
|
||||
@ -191,7 +200,7 @@ class CheckpointTest(unittest.TestCase):
|
||||
|
||||
test_rands = train(2, model, train_dataloader, optimizer, accelerator)
|
||||
# Save everything
|
||||
accelerator.save_state()
|
||||
accelerator.save_state(safe_serialization=self.use_safetensors)
|
||||
|
||||
# Load everything back in and make sure all states work
|
||||
accelerator.load_state(os.path.join(tmpdir, "checkpoints", "checkpoint_1"))
|
||||
@ -230,7 +239,7 @@ class CheckpointTest(unittest.TestCase):
|
||||
model, optimizer, train_dataloader, valid_dataloader
|
||||
)
|
||||
# Save initial
|
||||
accelerator.save_state()
|
||||
accelerator.save_state(safe_serialization=self.use_safetensors)
|
||||
(a, b) = model.a.item(), model.b.item()
|
||||
opt_state = optimizer.state_dict()
|
||||
ground_truth_rands = train(3, model, train_dataloader, optimizer, accelerator)
|
||||
@ -256,7 +265,7 @@ class CheckpointTest(unittest.TestCase):
|
||||
|
||||
test_rands = train(2, model, train_dataloader, optimizer, accelerator)
|
||||
# Save everything
|
||||
accelerator.save_state()
|
||||
accelerator.save_state(safe_serialization=self.use_safetensors)
|
||||
|
||||
# Load everything back in and make sure all states work
|
||||
accelerator.load_state(os.path.join(tmpdir, "checkpoints", "checkpoint_1"))
|
||||
@ -296,7 +305,7 @@ class CheckpointTest(unittest.TestCase):
|
||||
model, optimizer, train_dataloader, valid_dataloader, scheduler
|
||||
)
|
||||
# Save initial
|
||||
accelerator.save_state()
|
||||
accelerator.save_state(safe_serialization=self.use_safetensors)
|
||||
scheduler_state = scheduler.state_dict()
|
||||
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
|
||||
self.assertNotEqual(scheduler_state, scheduler.state_dict())
|
||||
@ -319,11 +328,11 @@ class CheckpointTest(unittest.TestCase):
|
||||
model, optimizer, train_dataloader, valid_dataloader, scheduler
|
||||
)
|
||||
# Save initial
|
||||
accelerator.save_state()
|
||||
accelerator.save_state(safe_serialization=self.use_safetensors)
|
||||
train(2, model, train_dataloader, optimizer, accelerator, scheduler)
|
||||
(a2, b2) = model.a.item(), model.b.item()
|
||||
# Save a first time
|
||||
accelerator.save_state()
|
||||
accelerator.save_state(safe_serialization=self.use_safetensors)
|
||||
train(1, model, train_dataloader, optimizer, accelerator, scheduler)
|
||||
(a3, b3) = model.a.item(), model.b.item()
|
||||
|
||||
@ -344,7 +353,7 @@ class CheckpointTest(unittest.TestCase):
|
||||
model = accelerator.prepare(model)
|
||||
# Save 3 states:
|
||||
for _ in range(11):
|
||||
accelerator.save_state()
|
||||
accelerator.save_state(safe_serialization=self.use_safetensors)
|
||||
self.assertTrue(not os.path.exists(os.path.join(tmpdir, "checkpoints", "checkpoint_0")))
|
||||
self.assertTrue(os.path.exists(os.path.join(tmpdir, "checkpoints", "checkpoint_9")))
|
||||
self.assertTrue(os.path.exists(os.path.join(tmpdir, "checkpoints", "checkpoint_10")))
|
||||
@ -352,10 +361,14 @@ class CheckpointTest(unittest.TestCase):
|
||||
@require_cuda
|
||||
def test_map_location(self):
|
||||
cmd = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__)]
|
||||
execute_subprocess_async(cmd, env=os.environ.copy())
|
||||
env = os.environ.copy()
|
||||
env["USE_SAFETENSORS"] = str(self.use_safetensors)
|
||||
env["OMP_NUM_THREADS"] = "1"
|
||||
execute_subprocess_async(cmd, env=env)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
use_safetensors = os.environ.get("USE_SAFETENSORS", "False") == "True"
|
||||
savedir = "/tmp/accelerate/state_checkpointing"
|
||||
model = DummyModel()
|
||||
optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3)
|
||||
@ -380,7 +393,7 @@ if __name__ == "__main__":
|
||||
assert param_device.type == accelerator.device.type
|
||||
model = model.cpu()
|
||||
accelerator.wait_for_everyone()
|
||||
accelerator.save_state()
|
||||
accelerator.save_state(safe_serialization=use_safetensors)
|
||||
accelerator.wait_for_everyone()
|
||||
|
||||
# Check CPU state
|
||||
|
||||
@ -25,6 +25,7 @@ from pathlib import Path
|
||||
from typing import Optional
|
||||
from unittest import mock
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
# We use TF to parse the logs
|
||||
@ -32,13 +33,21 @@ from accelerate import Accelerator
|
||||
from accelerate.test_utils.testing import (
|
||||
MockingTestCase,
|
||||
TempDirTestCase,
|
||||
require_clearml,
|
||||
require_comet_ml,
|
||||
require_dvclive,
|
||||
require_pandas,
|
||||
require_tensorboard,
|
||||
require_wandb,
|
||||
skip,
|
||||
)
|
||||
from accelerate.tracking import CometMLTracker, GeneralTracker
|
||||
from accelerate.utils import ProjectConfiguration, is_comet_ml_available, is_tensorboard_available
|
||||
from accelerate.utils import (
|
||||
ProjectConfiguration,
|
||||
is_comet_ml_available,
|
||||
is_dvclive_available,
|
||||
is_tensorboard_available,
|
||||
)
|
||||
|
||||
|
||||
if is_comet_ml_available():
|
||||
@ -49,6 +58,11 @@ if is_tensorboard_available():
|
||||
|
||||
import tensorboard.compat.proto.event_pb2 as event_pb2
|
||||
|
||||
if is_dvclive_available():
|
||||
from dvclive.plots.metric import Metric
|
||||
from dvclive.serialize import load_yaml
|
||||
from dvclive.utils import parse_metrics
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -250,6 +264,147 @@ class CometMLTest(unittest.TestCase):
|
||||
self.assertEqual(self.get_value_from_key(list_of_json, "my_text"), "some_value")
|
||||
|
||||
|
||||
@require_clearml
|
||||
class ClearMLTest(TempDirTestCase, MockingTestCase):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
# ClearML offline session location is stored in CLEARML_CACHE_DIR
|
||||
self.add_mocks(mock.patch.dict(os.environ, {"CLEARML_CACHE_DIR": self.tmpdir}))
|
||||
|
||||
@staticmethod
|
||||
def _get_offline_dir(accelerator):
|
||||
from clearml.config import get_offline_dir
|
||||
|
||||
return get_offline_dir(task_id=accelerator.get_tracker("clearml", unwrap=True).id)
|
||||
|
||||
@staticmethod
|
||||
def _get_metrics(offline_dir):
|
||||
metrics = []
|
||||
with open(os.path.join(offline_dir, "metrics.jsonl")) as f:
|
||||
json_lines = f.readlines()
|
||||
for json_line in json_lines:
|
||||
metrics.extend(json.loads(json_line))
|
||||
return metrics
|
||||
|
||||
def test_init_trackers(self):
|
||||
from clearml import Task
|
||||
from clearml.utilities.config import text_to_config_dict
|
||||
|
||||
Task.set_offline(True)
|
||||
accelerator = Accelerator(log_with="clearml")
|
||||
config = {"num_iterations": 12, "learning_rate": 1e-2, "some_boolean": False, "some_string": "some_value"}
|
||||
accelerator.init_trackers("test_project_with_config", config)
|
||||
|
||||
offline_dir = ClearMLTest._get_offline_dir(accelerator)
|
||||
accelerator.end_training()
|
||||
|
||||
with open(os.path.join(offline_dir, "task.json")) as f:
|
||||
offline_session = json.load(f)
|
||||
clearml_offline_config = text_to_config_dict(offline_session["configuration"]["General"]["value"])
|
||||
self.assertDictEqual(config, clearml_offline_config)
|
||||
|
||||
def test_log(self):
|
||||
from clearml import Task
|
||||
|
||||
Task.set_offline(True)
|
||||
accelerator = Accelerator(log_with="clearml")
|
||||
accelerator.init_trackers("test_project_with_log")
|
||||
values_with_iteration = {"should_be_under_train": 1, "eval_value": 2, "test_value": 3.1, "train_value": 4.1}
|
||||
accelerator.log(values_with_iteration, step=1)
|
||||
single_values = {"single_value_1": 1.1, "single_value_2": 2.2}
|
||||
accelerator.log(single_values)
|
||||
|
||||
offline_dir = ClearMLTest._get_offline_dir(accelerator)
|
||||
accelerator.end_training()
|
||||
|
||||
metrics = ClearMLTest._get_metrics(offline_dir)
|
||||
self.assertEqual(len(values_with_iteration) + len(single_values), len(metrics))
|
||||
for metric in metrics:
|
||||
if metric["metric"] == "Summary":
|
||||
self.assertIn(metric["variant"], single_values)
|
||||
self.assertEqual(metric["value"], single_values[metric["variant"]])
|
||||
elif metric["metric"] == "should_be_under_train":
|
||||
self.assertEqual(metric["variant"], "train")
|
||||
self.assertEqual(metric["iter"], 1)
|
||||
self.assertEqual(metric["value"], values_with_iteration["should_be_under_train"])
|
||||
else:
|
||||
values_with_iteration_key = metric["variant"] + "_" + metric["metric"]
|
||||
self.assertIn(values_with_iteration_key, values_with_iteration)
|
||||
self.assertEqual(metric["iter"], 1)
|
||||
self.assertEqual(metric["value"], values_with_iteration[values_with_iteration_key])
|
||||
|
||||
def test_log_images(self):
|
||||
from clearml import Task
|
||||
|
||||
Task.set_offline(True)
|
||||
accelerator = Accelerator(log_with="clearml")
|
||||
accelerator.init_trackers("test_project_with_log_images")
|
||||
|
||||
base_image = np.eye(256, 256, dtype=np.uint8) * 255
|
||||
base_image_3d = np.concatenate((np.atleast_3d(base_image), np.zeros((256, 256, 2), dtype=np.uint8)), axis=2)
|
||||
images = {
|
||||
"base_image": base_image,
|
||||
"base_image_3d": base_image_3d,
|
||||
}
|
||||
accelerator.get_tracker("clearml").log_images(images, step=1)
|
||||
|
||||
offline_dir = ClearMLTest._get_offline_dir(accelerator)
|
||||
accelerator.end_training()
|
||||
|
||||
images_saved = Path(os.path.join(offline_dir, "data")).rglob("*.jpeg")
|
||||
self.assertEqual(len(list(images_saved)), len(images))
|
||||
|
||||
def test_log_table(self):
|
||||
from clearml import Task
|
||||
|
||||
Task.set_offline(True)
|
||||
accelerator = Accelerator(log_with="clearml")
|
||||
accelerator.init_trackers("test_project_with_log_table")
|
||||
|
||||
accelerator.get_tracker("clearml").log_table(
|
||||
"from lists with columns", columns=["A", "B", "C"], data=[[1, 3, 5], [2, 4, 6]]
|
||||
)
|
||||
accelerator.get_tracker("clearml").log_table("from lists", data=[["A2", "B2", "C2"], [7, 9, 11], [8, 10, 12]])
|
||||
offline_dir = ClearMLTest._get_offline_dir(accelerator)
|
||||
accelerator.end_training()
|
||||
|
||||
metrics = ClearMLTest._get_metrics(offline_dir)
|
||||
self.assertEqual(len(metrics), 2)
|
||||
for metric in metrics:
|
||||
self.assertIn(metric["metric"], ["from lists", "from lists with columns"])
|
||||
plot = json.loads(metric["plot_str"])
|
||||
if metric["metric"] == "from lists with columns":
|
||||
print(plot["data"][0])
|
||||
self.assertCountEqual(plot["data"][0]["header"]["values"], ["A", "B", "C"])
|
||||
self.assertCountEqual(plot["data"][0]["cells"]["values"], [[1, 2], [3, 4], [5, 6]])
|
||||
else:
|
||||
self.assertCountEqual(plot["data"][0]["header"]["values"], ["A2", "B2", "C2"])
|
||||
self.assertCountEqual(plot["data"][0]["cells"]["values"], [[7, 8], [9, 10], [11, 12]])
|
||||
|
||||
@require_pandas
|
||||
def test_log_table_pandas(self):
|
||||
import pandas as pd
|
||||
from clearml import Task
|
||||
|
||||
Task.set_offline(True)
|
||||
accelerator = Accelerator(log_with="clearml")
|
||||
accelerator.init_trackers("test_project_with_log_table_pandas")
|
||||
|
||||
accelerator.get_tracker("clearml").log_table(
|
||||
"from df", dataframe=pd.DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]}), step=1
|
||||
)
|
||||
|
||||
offline_dir = ClearMLTest._get_offline_dir(accelerator)
|
||||
accelerator.end_training()
|
||||
|
||||
metrics = ClearMLTest._get_metrics(offline_dir)
|
||||
self.assertEqual(len(metrics), 1)
|
||||
self.assertEqual(metrics[0]["metric"], "from df")
|
||||
plot = json.loads(metrics[0]["plot_str"])
|
||||
self.assertCountEqual(plot["data"][0]["header"]["values"], [["A"], ["B"], ["C"]])
|
||||
self.assertCountEqual(plot["data"][0]["cells"]["values"], [[1, 2], [3, 4], [5, 6]])
|
||||
|
||||
|
||||
class MyCustomTracker(GeneralTracker):
|
||||
"Basic tracker that writes to a csv for testing"
|
||||
_col_names = [
|
||||
@ -329,3 +484,41 @@ class CustomTrackerTestCase(unittest.TestCase):
|
||||
"some_string": "",
|
||||
}
|
||||
self.assertDictEqual(data, truth)
|
||||
|
||||
|
||||
@require_dvclive
|
||||
@mock.patch("dvclive.live.get_dvc_repo", return_value=None)
|
||||
class DVCLiveTrackingTest(unittest.TestCase):
|
||||
def test_init_trackers(self, mock_repo):
|
||||
project_name = "test_project_with_config"
|
||||
with tempfile.TemporaryDirectory() as dirpath:
|
||||
accelerator = Accelerator(log_with="dvclive")
|
||||
config = {
|
||||
"num_iterations": 12,
|
||||
"learning_rate": 1e-2,
|
||||
"some_boolean": False,
|
||||
"some_string": "some_value",
|
||||
}
|
||||
init_kwargs = {"dvclive": {"dir": dirpath, "save_dvc_exp": False, "dvcyaml": None}}
|
||||
accelerator.init_trackers(project_name, config, init_kwargs)
|
||||
accelerator.end_training()
|
||||
live = accelerator.trackers[0].live
|
||||
params = load_yaml(live.params_file)
|
||||
assert params == config
|
||||
|
||||
def test_log(self, mock_repo):
|
||||
project_name = "test_project_with_log"
|
||||
with tempfile.TemporaryDirectory() as dirpath:
|
||||
accelerator = Accelerator(log_with="dvclive", project_dir=dirpath)
|
||||
init_kwargs = {"dvclive": {"dir": dirpath, "save_dvc_exp": False, "dvcyaml": None}}
|
||||
accelerator.init_trackers(project_name, init_kwargs=init_kwargs)
|
||||
values = {"total_loss": 0.1, "iteration": 1, "my_text": "some_value"}
|
||||
accelerator.log(values, step=0)
|
||||
accelerator.end_training()
|
||||
live = accelerator.trackers[0].live
|
||||
logs, latest = parse_metrics(live)
|
||||
assert latest == values
|
||||
scalars = os.path.join(live.plots_dir, Metric.subfolder)
|
||||
assert os.path.join(scalars, "total_loss.tsv") in logs
|
||||
assert os.path.join(scalars, "iteration.tsv") in logs
|
||||
assert os.path.join(scalars, "my_text.tsv") in logs
|
||||
|
||||
@ -14,20 +14,29 @@
|
||||
|
||||
import os
|
||||
import pickle
|
||||
import tempfile
|
||||
import unittest
|
||||
import warnings
|
||||
from collections import UserDict, namedtuple
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
from accelerate.state import PartialState
|
||||
from accelerate.test_utils.testing import require_cuda, require_torch_min_version
|
||||
from accelerate.test_utils.training import RegressionModel
|
||||
from accelerate.utils import (
|
||||
CannotPadNestedTensorWarning,
|
||||
check_os_kernel,
|
||||
convert_outputs_to_fp32,
|
||||
extract_model_from_parallel,
|
||||
find_device,
|
||||
listify,
|
||||
pad_across_processes,
|
||||
patch_environment,
|
||||
recursively_apply,
|
||||
save,
|
||||
send_to_device,
|
||||
)
|
||||
|
||||
@ -36,6 +45,10 @@ ExampleNamedTuple = namedtuple("ExampleNamedTuple", "a b c")
|
||||
|
||||
|
||||
class UtilsTester(unittest.TestCase):
|
||||
def setUp(self):
|
||||
# logging requires initialized state
|
||||
PartialState()
|
||||
|
||||
def test_send_to_device(self):
|
||||
tensor = torch.randn(5, 2)
|
||||
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
||||
@ -173,3 +186,54 @@ class UtilsTester(unittest.TestCase):
|
||||
self.assertEqual(find_device([1, "a", torch.tensor([1, 2, 3])]), torch.device("cpu"))
|
||||
self.assertEqual(find_device({"a": 1, "b": torch.tensor([1, 2, 3])}), torch.device("cpu"))
|
||||
self.assertIsNone(find_device([1, "a"]))
|
||||
|
||||
def test_check_os_kernel_no_warning_when_release_gt_min(self):
|
||||
# min version is 5.5
|
||||
with patch("platform.uname", return_value=Mock(release="5.15.0-35-generic", system="Linux")):
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
check_os_kernel()
|
||||
self.assertEqual(len(w), 0)
|
||||
|
||||
def test_check_os_kernel_no_warning_when_not_linux(self):
|
||||
# system must be Linux
|
||||
with patch("platform.uname", return_value=Mock(release="5.4.0-35-generic", system="Darwin")):
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
check_os_kernel()
|
||||
self.assertEqual(len(w), 0)
|
||||
|
||||
def test_check_os_kernel_warning_when_release_lt_min(self):
|
||||
# min version is 5.5
|
||||
with patch("platform.uname", return_value=Mock(release="5.4.0-35-generic", system="Linux")):
|
||||
with self.assertLogs() as ctx:
|
||||
check_os_kernel()
|
||||
self.assertEqual(len(ctx.records), 1)
|
||||
self.assertEqual(ctx.records[0].levelname, "WARNING")
|
||||
self.assertIn("5.4.0", ctx.records[0].msg)
|
||||
self.assertIn("5.5.0", ctx.records[0].msg)
|
||||
|
||||
def test_save_safetensor_shared_memory(self):
|
||||
class Model(nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.a = nn.Linear(100, 100)
|
||||
self.b = self.a
|
||||
|
||||
def forward(self, x):
|
||||
return self.b(self.a(x))
|
||||
|
||||
model = Model()
|
||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
save_path = os.path.join(tmp_dir, "model.safetensors")
|
||||
with self.assertLogs(level="WARNING") as log:
|
||||
save(model.state_dict(), save_path, safe_serialization=True)
|
||||
self.assertEqual(len(log.records), 1)
|
||||
self.assertIn("Removed shared tensor", log.output[0])
|
||||
|
||||
@require_torch_min_version(version="1.12")
|
||||
def test_pad_across_processes(self):
|
||||
from torch.nested import nested_tensor
|
||||
|
||||
nt = nested_tensor([[1, 2, 3], [1], [1, 2]])
|
||||
with self.assertWarns(CannotPadNestedTensorWarning):
|
||||
nt2 = pad_across_processes(nt)
|
||||
self.assertIs(nt, nt2)
|
||||
|
||||
@ -17,6 +17,7 @@ https://github.com/allenai/allennlp.
|
||||
"""
|
||||
import os
|
||||
from datetime import datetime as dt
|
||||
from datetime import timezone
|
||||
|
||||
from github import Github
|
||||
|
||||
@ -36,7 +37,7 @@ def main():
|
||||
for issue in open_issues:
|
||||
comments = sorted([comment for comment in issue.get_comments()], key=lambda i: i.created_at, reverse=True)
|
||||
last_comment = comments[0] if len(comments) > 0 else None
|
||||
current_time = dt.utcnow()
|
||||
current_time = dt.now(timezone.utc)
|
||||
days_since_updated = (current_time - issue.updated_at).days
|
||||
days_since_creation = (current_time - issue.created_at).days
|
||||
if (
|
||||
|
||||
Reference in New Issue
Block a user