mirror of
https://github.com/huggingface/accelerate.git
synced 2025-11-19 09:04:28 +08:00
Compare commits
193 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 28a3b985f0 | |||
| 415eddf1be | |||
| 230857691a | |||
| a5a3e57125 | |||
| 0af1d8b8de | |||
| d16d7371a1 | |||
| 7a5c231b9e | |||
| 4f02bb764a | |||
| 709fd1e42b | |||
| f4f1260a0e | |||
| c6da9f8693 | |||
| 3ebbe573ad | |||
| 24bf5ec546 | |||
| e1247de01e | |||
| 12a007d559 | |||
| 5bdcd7e169 | |||
| 2471eacdd6 | |||
| 167cb5eb20 | |||
| 947f64ee62 | |||
| 8330b375d4 | |||
| 92404fbf5f | |||
| 3a02754915 | |||
| fec1170e35 | |||
| eac206f063 | |||
| 6882ff2bea | |||
| 57a4c7465e | |||
| 404510a5ec | |||
| 3086e26db9 | |||
| 5d5d07abfc | |||
| 5a0b7dc597 | |||
| c799c198e9 | |||
| 1f7a79b428 | |||
| 4cc3530b64 | |||
| 5d4a3beb01 | |||
| 0284f9a9f6 | |||
| 573d22d48f | |||
| 13ca7dccb6 | |||
| 3b5a00e048 | |||
| 3c4eaedd46 | |||
| c0faec766c | |||
| 91a2599f93 | |||
| 5f9235a731 | |||
| 7a36a75c7c | |||
| f62854a281 | |||
| a9869ea0dc | |||
| 6d59614603 | |||
| 2d74c0c077 | |||
| 40007b4e97 | |||
| 7141881b1f | |||
| f0049b2cfb | |||
| 83bad87559 | |||
| 24d8b63fc3 | |||
| 4a83ee5382 | |||
| 05d240af95 | |||
| bad2ce42ed | |||
| 30cb7ece76 | |||
| b7fa2fa956 | |||
| d5d378d64e | |||
| 065e74d11a | |||
| 86b6deaea1 | |||
| b24a0ef5db | |||
| e061edc6e7 | |||
| c3f422699a | |||
| 0553483638 | |||
| 415789d0e4 | |||
| ae472bac48 | |||
| 4f2c2ba45c | |||
| e26065a265 | |||
| 1cb6fdcf7b | |||
| 4ba436eccc | |||
| 91e8a3ced4 | |||
| 4ad4d28c49 | |||
| befd87f043 | |||
| abce3604f0 | |||
| 27a607ea90 | |||
| aa21174de9 | |||
| 6cf1cc0a39 | |||
| bb465a9cf0 | |||
| 67308ca6ef | |||
| 63772f6ac2 | |||
| 8798cf06ab | |||
| 47bb2dd53e | |||
| 724824abbe | |||
| afc2c99e6a | |||
| 0fb95a2d3b | |||
| 7ac153f404 | |||
| 0f1b91bb74 | |||
| d1eb44c856 | |||
| 11a363287a | |||
| 5cfe409443 | |||
| 5b3a7f3892 | |||
| 060361fca3 | |||
| 6ac27e2383 | |||
| ba5f49219f | |||
| 2c767338f2 | |||
| 234a85506d | |||
| 232ebd159a | |||
| 4d3d4bc88f | |||
| 2b1e7bd462 | |||
| c7e5e41b8c | |||
| 9557598c45 | |||
| 156331aecd | |||
| cd7df4117d | |||
| 6af157ea93 | |||
| 83317b3081 | |||
| e831bcb3b1 | |||
| 092c3af0c4 | |||
| 3e944c5583 | |||
| f67737363c | |||
| f7daaaa305 | |||
| 3dc131cd8d | |||
| ef0f62c12a | |||
| baafaf4a6e | |||
| abc86c0e35 | |||
| 4450cb3132 | |||
| fd0dcd1c45 | |||
| f478201c28 | |||
| c7046845e7 | |||
| 701e24c539 | |||
| 37da848e6c | |||
| c470a1336a | |||
| 581a390e2f | |||
| 2fc48c7eee | |||
| 1024231133 | |||
| 5ca095a34f | |||
| b77c65398c | |||
| a91691463b | |||
| 5056d327f8 | |||
| c0a37015e3 | |||
| e9b9c7d022 | |||
| 6c09584f73 | |||
| b8c8583953 | |||
| df485ae1e3 | |||
| 6386f70103 | |||
| 6d92198ef4 | |||
| 16488be9a4 | |||
| 685bd3a439 | |||
| 2e69948c1a | |||
| 7531e8c13e | |||
| 8e439de744 | |||
| d96a5aa730 | |||
| d7bcd85d4d | |||
| d927b8f3a2 | |||
| f579d9550d | |||
| bbecad4e8e | |||
| b82999a84b | |||
| 11568e562c | |||
| d9a1b8f975 | |||
| b634388ef1 | |||
| 4d415f2129 | |||
| 829171a9a4 | |||
| 5a232de2fa | |||
| 5f8048cd04 | |||
| 4378b560e8 | |||
| 8644e23b71 | |||
| b2fc3a3b0e | |||
| 290446d446 | |||
| 85a75d4c3d | |||
| f94f0ff912 | |||
| 1b2e634970 | |||
| dd62fc90ce | |||
| 10b418495e | |||
| c2f193a25c | |||
| 1812152392 | |||
| b8b353b7a7 | |||
| f2778d6502 | |||
| 2ad42e77c3 | |||
| e8aaee5d9b | |||
| 910c1b6a8f | |||
| 92d3240bb5 | |||
| 02a8a9a3a7 | |||
| ee163b66fb | |||
| 354db5b5f7 | |||
| 92b1ad01f3 | |||
| 60bfdaa934 | |||
| 16eb6d76bf | |||
| c8acfa700b | |||
| e70e3c87de | |||
| bc8dfe3caf | |||
| e3d324240f | |||
| 10882eeddd | |||
| 145a98fc12 | |||
| 64ae9ea3fe | |||
| 8aa72b9748 | |||
| 97d115a266 | |||
| 63cfd9efdc | |||
| 6cf8221a09 | |||
| 7a2feecad4 | |||
| ee004674b9 | |||
| 65544d8fe9 | |||
| 5fce525f90 | |||
| ca37b0e471 | |||
| 82a1258ffc |
8
.github/PULL_REQUEST_TEMPLATE.md
vendored
8
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -37,11 +37,11 @@ members/contributors who may be interested in your PR.
|
||||
If you know how to use git blame, that is the easiest way, otherwise, here is a rough guide of **who to tag**.
|
||||
|
||||
- Big modeling: @SunMarc
|
||||
- Fully-Sharded Data Parallism: @pacman100
|
||||
- DeepSpeed: @pacman100
|
||||
- Fully-Sharded Data Parallism: @muellerzr
|
||||
- DeepSpeed: @muellerzr
|
||||
- Command Line Interface: @muellerzr
|
||||
- Documentation: @muellerzr
|
||||
- Core parts of the library: @muellerzr @BenjaminBossan
|
||||
- Maintained examples: @muellerzr or @pacman100
|
||||
- Core parts of the library: @muellerzr @BenjaminBossan @SunMarc
|
||||
- Maintained examples: @muellerzr or @SunMarc
|
||||
|
||||
-->
|
||||
@ -37,7 +37,7 @@ jobs:
|
||||
with:
|
||||
file: docker/accelerate-cpu/Dockerfile
|
||||
push: true
|
||||
tags: huggingface/accelerate-cpu:${{needs.get-version.outputs.version}}
|
||||
tags: huggingface/accelerate:cpu-release-${{ needs.get-version.outputs.version }}
|
||||
|
||||
version-cuda:
|
||||
name: "Latest Accelerate GPU [version]"
|
||||
@ -57,4 +57,25 @@ jobs:
|
||||
with:
|
||||
file: docker/accelerate-gpu/Dockerfile
|
||||
push: true
|
||||
tags: huggingface/accelerate-gpu:${{needs.get-version.outputs.version}}
|
||||
tags: huggingface/accelerate:gpu-release-${{needs.get-version.outputs.version}}
|
||||
|
||||
version-cuda-deepspeed:
|
||||
name: "Latest Accelerate GPU DeepSpeed [version]"
|
||||
runs-on: [self-hosted, single-gpu, nvidia-gpu, t4, ci]
|
||||
needs: get-version
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Build and Push GPU
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
file: docker/accelerate-gpu-deepspeed/Dockerfile
|
||||
push: true
|
||||
tags: huggingface/accelerate:gpu-deepspeed-release-${{needs.get-version.outputs.version}}
|
||||
|
||||
|
||||
42
.github/workflows/build_docker_images.yml
vendored
42
.github/workflows/build_docker_images.yml
vendored
@ -22,12 +22,18 @@ jobs:
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
- name: Get current date
|
||||
id: date
|
||||
run: |
|
||||
echo "date=$(date '+%Y-%m-%d')" >> $GITHUB_ENV
|
||||
- name: Build and Push CPU
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
file: docker/accelerate-cpu/Dockerfile
|
||||
push: true
|
||||
tags: huggingface/accelerate-cpu
|
||||
tags: |
|
||||
huggingface/accelerate:cpu-nightly
|
||||
huggingface/accelerate:cpu-nightly-${{ env.date }}
|
||||
|
||||
latest-cuda:
|
||||
name: "Latest Accelerate GPU [dev]"
|
||||
@ -40,10 +46,40 @@ jobs:
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Get current date
|
||||
id: date
|
||||
run: |
|
||||
echo "date=$(date '+%Y-%m-%d')" >> $GITHUB_ENV
|
||||
- name: Build and Push GPU
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
file: docker/accelerate-gpu/Dockerfile
|
||||
push: true
|
||||
tags: huggingface/accelerate-gpu
|
||||
tags: |
|
||||
huggingface/accelerate:gpu-nightly
|
||||
huggingface/accelerate:gpu-nightly-${{ env.date }}
|
||||
|
||||
latest-cuda-deepspeed:
|
||||
name: "Latest Accelerate GPU DeepSpeed [dev]"
|
||||
runs-on: [self-hosted, nvidia-gpu, t4, ci]
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
- name: Get current date
|
||||
id: date
|
||||
run: |
|
||||
echo "date=$(date '+%Y-%m-%d')" >> $GITHUB_ENV
|
||||
- name: Build and Push GPU
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
file: docker/accelerate-gpu-deepspeed/Dockerfile
|
||||
push: true
|
||||
tags: |
|
||||
huggingface/accelerate:gpu-deepspeed-nightly
|
||||
huggingface/accelerate:gpu-deepspeed-nightly-${{ env.date }}
|
||||
|
||||
|
||||
1
.github/workflows/build_documentation.yml
vendored
1
.github/workflows/build_documentation.yml
vendored
@ -13,5 +13,6 @@ jobs:
|
||||
with:
|
||||
commit_sha: ${{ github.sha }}
|
||||
package: accelerate
|
||||
custom_container: huggingface/transformers-doc-builder
|
||||
secrets:
|
||||
hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}
|
||||
|
||||
1
.github/workflows/build_pr_documentation.yml
vendored
1
.github/workflows/build_pr_documentation.yml
vendored
@ -14,3 +14,4 @@ jobs:
|
||||
commit_sha: ${{ github.event.pull_request.head.sha }}
|
||||
pr_number: ${{ github.event.number }}
|
||||
package: accelerate
|
||||
custom_container: huggingface/transformers-doc-builder
|
||||
|
||||
2
.github/workflows/integration_tests.yml
vendored
2
.github/workflows/integration_tests.yml
vendored
@ -31,6 +31,8 @@ jobs:
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: 3.8
|
||||
cache: 'pip'
|
||||
cache-dependency-path: 'setup.py'
|
||||
|
||||
- name: Install Accelerate from source
|
||||
run: |
|
||||
|
||||
126
.github/workflows/nightly.yml
vendored
126
.github/workflows/nightly.yml
vendored
@ -12,13 +12,13 @@ env:
|
||||
|
||||
|
||||
jobs:
|
||||
run_all_tests_single_gpu:
|
||||
run_core_tests_single_gpu:
|
||||
runs-on: [self-hosted, single-gpu, nvidia-gpu, t4, ci]
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: "0"
|
||||
TEST_TYPE: "single_gpu"
|
||||
container:
|
||||
image: huggingface/accelerate-gpu:latest
|
||||
image: huggingface/accelerate:gpu-nightly
|
||||
options: --gpus all --shm-size "16gb"
|
||||
defaults:
|
||||
run:
|
||||
@ -33,6 +33,11 @@ jobs:
|
||||
pip install -e . --no-deps
|
||||
pip install pytest-reportlog tabulate
|
||||
|
||||
- name: Show installed libraries
|
||||
run: |
|
||||
source activate accelerate;
|
||||
pip freeze
|
||||
|
||||
- name: Run test on GPUs
|
||||
working-directory: accelerate
|
||||
run: |
|
||||
@ -54,13 +59,67 @@ jobs:
|
||||
pip install slack_sdk tabulate
|
||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
run_all_tests_multi_gpu:
|
||||
run_deepspeed_tests_single_gpu:
|
||||
runs-on: [self-hosted, single-gpu, nvidia-gpu, t4, ci]
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: "0"
|
||||
TEST_TYPE: "single_gpu_deepspeed"
|
||||
container:
|
||||
image: huggingface/accelerate:gpu-deepspeed-nightly
|
||||
options: --gpus all --shm-size "16gb"
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Update clone & pip install
|
||||
run: |
|
||||
source activate accelerate
|
||||
git clone https://github.com/huggingface/accelerate;
|
||||
cd accelerate;
|
||||
git checkout ${{ github.sha }};
|
||||
pip install -e . --no-deps
|
||||
pip install pytest-reportlog tabulate
|
||||
|
||||
- name: Show installed libraries
|
||||
run: |
|
||||
source activate accelerate;
|
||||
pip freeze
|
||||
|
||||
- name: Run test on GPUs
|
||||
working-directory: accelerate
|
||||
run: |
|
||||
source activate accelerate
|
||||
make test_deepspeed
|
||||
|
||||
- name: Run Integration tests on GPUs
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate
|
||||
make test_integrations
|
||||
|
||||
- name: Run examples on GPUs
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate
|
||||
pip uninstall comet_ml -y
|
||||
make test_examples
|
||||
|
||||
- name: Generate Report
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
pip install slack_sdk tabulate
|
||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
run_core_tests_multi_gpu:
|
||||
runs-on: [self-hosted, multi-gpu, nvidia-gpu, t4, ci]
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: "0,1"
|
||||
TEST_TYPE: "multi_gpu"
|
||||
container:
|
||||
image: huggingface/accelerate-gpu:latest
|
||||
image: huggingface/accelerate:gpu-nightly
|
||||
options: --gpus all --shm-size "16gb"
|
||||
defaults:
|
||||
run:
|
||||
@ -75,6 +134,11 @@ jobs:
|
||||
pip install -e . --no-deps
|
||||
pip install pytest-reportlog tabulate
|
||||
|
||||
- name: Show installed libraries
|
||||
run: |
|
||||
source activate accelerate;
|
||||
pip freeze
|
||||
|
||||
- name: Run core and big modeling tests on GPUs
|
||||
working-directory: accelerate
|
||||
run: |
|
||||
@ -105,6 +169,60 @@ jobs:
|
||||
pip install slack_sdk tabulate
|
||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
run_deepspeed_tests_multi_gpu:
|
||||
runs-on: [self-hosted, multi-gpu, nvidia-gpu, t4, ci]
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: "0,1"
|
||||
TEST_TYPE: "multi_gpu_deepspeed"
|
||||
container:
|
||||
image: huggingface/accelerate:gpu-deepspeed-nightly
|
||||
options: --gpus all --shm-size "16gb"
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Update clone
|
||||
run: |
|
||||
source activate accelerate
|
||||
git clone https://github.com/huggingface/accelerate;
|
||||
cd accelerate;
|
||||
git checkout ${{ github.sha }};
|
||||
pip install -e . --no-deps
|
||||
pip install pytest-reportlog tabulate
|
||||
|
||||
- name: Show installed libraries
|
||||
run: |
|
||||
source activate accelerate;
|
||||
pip freeze
|
||||
|
||||
- name: Run DeepSpeed tests
|
||||
working-directory: accelerate
|
||||
run: |
|
||||
source activate accelerate
|
||||
make test_deepspeed
|
||||
|
||||
- name: Run Integration tests on GPUs
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate
|
||||
make test_integrations
|
||||
|
||||
- name: Run examples on GPUs
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate
|
||||
pip uninstall comet_ml -y
|
||||
make test_examples
|
||||
|
||||
- name: Generate Report
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
pip install slack_sdk tabulate
|
||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
|
||||
run-integration-tests:
|
||||
if: always()
|
||||
|
||||
2
.github/workflows/quality.yml
vendored
2
.github/workflows/quality.yml
vendored
@ -11,6 +11,8 @@ jobs:
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: 3.8
|
||||
cache: 'pip'
|
||||
cache-dependency-path: 'setup.py'
|
||||
- name: Install Python dependencies
|
||||
run: pip install -e .[quality]
|
||||
- name: Run Quality check
|
||||
|
||||
94
.github/workflows/run_merge_tests.yml
vendored
94
.github/workflows/run_merge_tests.yml
vendored
@ -9,12 +9,12 @@ env:
|
||||
IS_GITHUB_CI: "1"
|
||||
|
||||
jobs:
|
||||
run_all_tests_single_gpu:
|
||||
run_core_tests_single_gpu:
|
||||
runs-on: [self-hosted, single-gpu, nvidia-gpu, t4, ci]
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: "0"
|
||||
container:
|
||||
image: huggingface/accelerate-gpu:latest
|
||||
image: huggingface/accelerate:gpu-nightly
|
||||
options: --gpus all --shm-size "16gb"
|
||||
defaults:
|
||||
run:
|
||||
@ -29,6 +29,11 @@ jobs:
|
||||
pip install -e .[testing,test_trackers] -U;
|
||||
pip install pytest-reportlog tabulate ;
|
||||
|
||||
- name: Show installed libraries
|
||||
run: |
|
||||
source activate accelerate;
|
||||
pip freeze
|
||||
|
||||
- name: Run CLI tests (use make cli)
|
||||
working-directory: accelerate
|
||||
run: |
|
||||
@ -56,12 +61,51 @@ jobs:
|
||||
pip install tabulate;
|
||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
run_all_tests_multi_gpu:
|
||||
run_deepspeed_tests_single_gpu:
|
||||
runs-on: [self-hosted, single-gpu, nvidia-gpu, t4, ci]
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: "0"
|
||||
container:
|
||||
image: huggingface/accelerate:gpu-deepspeed-nightly
|
||||
options: --gpus all --shm-size "16gb"
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Install accelerate
|
||||
run: |
|
||||
source activate accelerate;
|
||||
git clone https://github.com/huggingface/accelerate;
|
||||
cd accelerate;
|
||||
git checkout ${{ github.sha }};
|
||||
pip install -e .[testing,test_trackers] -U;
|
||||
pip install pytest-reportlog tabulate ;
|
||||
|
||||
- name: Show installed libraries
|
||||
run: |
|
||||
source activate accelerate;
|
||||
pip freeze
|
||||
|
||||
- name: Run test on GPUs
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate;
|
||||
make test_deepspeed
|
||||
|
||||
- name: Generate Report
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
pip install tabulate;
|
||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
run_core_tests_multi_gpu:
|
||||
runs-on: [self-hosted, multi-gpu, nvidia-gpu, t4, ci]
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: 0,1
|
||||
container:
|
||||
image: huggingface/accelerate-gpu:latest
|
||||
image: huggingface/accelerate:gpu-nightly
|
||||
options: --gpus all --shm-size "16gb"
|
||||
defaults:
|
||||
run:
|
||||
@ -76,6 +120,11 @@ jobs:
|
||||
pip install -e .[testing,test_trackers] -U;
|
||||
pip install pytest-reportlog tabulate
|
||||
|
||||
- name: Show installed libraries
|
||||
run: |
|
||||
source activate accelerate;
|
||||
pip freeze
|
||||
|
||||
- name: Run test on GPUs
|
||||
working-directory: accelerate
|
||||
run: |
|
||||
@ -96,3 +145,40 @@ jobs:
|
||||
run: |
|
||||
source activate accelerate;
|
||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
run_deepspeed_tests_multi_gpu:
|
||||
runs-on: [self-hosted, multi-gpu, nvidia-gpu, t4, ci]
|
||||
container:
|
||||
image: huggingface/accelerate:gpu-deepspeed-nightly
|
||||
options: --gpus all --shm-size "16gb"
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Install accelerate
|
||||
run: |
|
||||
source activate accelerate;
|
||||
git clone https://github.com/huggingface/accelerate;
|
||||
cd accelerate;
|
||||
git checkout ${{ github.sha }};
|
||||
pip install -e .[testing,test_trackers] -U;
|
||||
pip install pytest-reportlog tabulate ;
|
||||
|
||||
- name: Show installed libraries
|
||||
run: |
|
||||
source activate accelerate;
|
||||
pip freeze
|
||||
|
||||
- name: Run test on GPUs
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate;
|
||||
make test_deepspeed
|
||||
|
||||
- name: Generate Report
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
pip install tabulate;
|
||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
@ -23,7 +23,7 @@ defaults:
|
||||
jobs:
|
||||
run-trainer-tests:
|
||||
container:
|
||||
image: huggingface/accelerate-gpu:latest
|
||||
image: huggingface/accelerate:gpu-deepspeed-nightly
|
||||
options: --gpus all --shm-size "16gb"
|
||||
runs-on: [self-hosted, multi-gpu, nvidia-gpu, t4, ci]
|
||||
strategy:
|
||||
@ -88,7 +88,7 @@ jobs:
|
||||
|
||||
run-skorch-tests:
|
||||
container:
|
||||
image: huggingface/accelerate-gpu:latest
|
||||
image: huggingface/accelerate:gpu-nightly
|
||||
options: --gpus all --shm-size "16gb"
|
||||
runs-on: [self-hosted, multi-gpu, nvidia-gpu, t4, ci]
|
||||
strategy:
|
||||
|
||||
4
.github/workflows/stale.yml
vendored
4
.github/workflows/stale.yml
vendored
@ -19,10 +19,12 @@ jobs:
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: 3.8
|
||||
cache: 'pip'
|
||||
cache-dependency-path: 'setup.py'
|
||||
|
||||
- name: Install requirements
|
||||
run: |
|
||||
pip install PyGithub
|
||||
- name: Close stale issues
|
||||
run: |
|
||||
python utils/stale.py
|
||||
python utils/stale.py
|
||||
|
||||
45
.github/workflows/test.yml
vendored
45
.github/workflows/test.yml
vendored
@ -16,30 +16,6 @@ env:
|
||||
IS_GITHUB_CI: "1"
|
||||
|
||||
jobs:
|
||||
check-hf-status:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3.1.0
|
||||
- name: Set up python 3.8
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: 3.8
|
||||
|
||||
- name: Install the library
|
||||
run: |
|
||||
pip install --upgrade pip
|
||||
pip install -e .
|
||||
pip install pytest-reportlog tabulate pytest
|
||||
|
||||
- name: Check status
|
||||
run: |
|
||||
make test_hub_status
|
||||
|
||||
- name: Generate Report
|
||||
if: always()
|
||||
run: |
|
||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
run-tests:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
@ -67,23 +43,20 @@ jobs:
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: 3.8
|
||||
|
||||
- name: Activate python cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
${{ env.pythonLocation }}
|
||||
${{ env.HF_HOME }}
|
||||
key: ${{ env.pythonLocation }}-${{ matrix.pytorch-version }}-${{ matrix.test-kind }}-${{ hashFiles('setup.py') }}
|
||||
cache: 'pip'
|
||||
cache-dependency-path: 'setup.py'
|
||||
|
||||
- name: Install the library
|
||||
run: |
|
||||
pip install --upgrade pip
|
||||
if [[ ${{ matrix.test-kind }} = test_prod ]]; then pip install -e .[test_prod]; fi
|
||||
if [[ ${{ matrix.test-kind }} != test_prod ]]; then pip install -e .[testing,test_trackers]; fi
|
||||
if [[ ${{ matrix.test-kind }} = test_rest ]]; then pip uninstall comet_ml -y; fi
|
||||
if [[ ${{ matrix.test-kind }} = minimum ]]; then pip install torch==1.10.0; fi
|
||||
pip install pytest-reportlog tabulate
|
||||
if [[ ${{ matrix.pytorch-version }} = minimum ]]; then pip install torch==2.3.1; fi
|
||||
pip install pytest-reportlog tabulate setuptools
|
||||
|
||||
- name: Show installed libraries
|
||||
run: |
|
||||
pip freeze
|
||||
|
||||
- name: Run Tests
|
||||
env:
|
||||
@ -94,4 +67,4 @@ jobs:
|
||||
- name: Generate Report
|
||||
if: always()
|
||||
run: |
|
||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
55
.github/workflows/test_imports.yml
vendored
Normal file
55
.github/workflows/test_imports.yml
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
name: Run Import Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "src/**"
|
||||
- "tests/**"
|
||||
- ".github/**"
|
||||
- "examples/**"
|
||||
- "setup.py"
|
||||
types: [opened, synchronize, reopened]
|
||||
|
||||
env:
|
||||
HF_HOME: ~/hf_cache
|
||||
TESTING_MOCKED_DATALOADERS: "1"
|
||||
IS_GITHUB_CI: "1"
|
||||
|
||||
jobs:
|
||||
run-tests:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
pytorch-version: [
|
||||
latest,
|
||||
minimum,
|
||||
]
|
||||
steps:
|
||||
- uses: actions/checkout@v3.1.0
|
||||
- name: Set up python 3.8
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: 3.8
|
||||
cache: 'pip'
|
||||
cache-dependency-path: 'setup.py'
|
||||
|
||||
- name: Install the library
|
||||
run: |
|
||||
pip install -e .
|
||||
pip install pytest-reportlog tabulate setuptools git+https://github.com/muellerzr/import-timer
|
||||
|
||||
- name: Show installed libraries
|
||||
run: |
|
||||
pip freeze
|
||||
|
||||
- name: Run Import Tests
|
||||
env:
|
||||
PYTORCH_VERSION: ${{ matrix.pytorch-version }}
|
||||
run: |
|
||||
pytest -sv tests/test_imports.py
|
||||
|
||||
- name: Generate Report
|
||||
if: always()
|
||||
run: |
|
||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
15
.github/workflows/trufflehog.yml
vendored
Normal file
15
.github/workflows/trufflehog.yml
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
on:
|
||||
push:
|
||||
|
||||
name: Secret Leaks
|
||||
|
||||
jobs:
|
||||
trufflehog:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Secret Scanning
|
||||
uses: trufflesecurity/trufflehog@main
|
||||
9
Makefile
9
Makefile
@ -12,26 +12,23 @@ extra_quality_checks:
|
||||
|
||||
# this target runs checks on all files
|
||||
quality:
|
||||
ruff $(check_dirs)
|
||||
ruff check $(check_dirs)
|
||||
ruff format --check $(check_dirs)
|
||||
doc-builder style src/accelerate docs/source --max_len 119 --check_only
|
||||
|
||||
# Format source code automatically and check is there are any problems left that need manual fixing
|
||||
style:
|
||||
ruff $(check_dirs) --fix
|
||||
ruff check $(check_dirs) --fix
|
||||
ruff format $(check_dirs)
|
||||
doc-builder style src/accelerate docs/source --max_len 119
|
||||
|
||||
# Run tests for the library
|
||||
test_hub_status:
|
||||
python -m pytest -s -v ./tests/test_hub_status.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_hub_status.log",)
|
||||
|
||||
test_big_modeling:
|
||||
python -m pytest -s -v ./tests/test_big_modeling.py ./tests/test_modeling_utils.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_big_modeling.log",)
|
||||
|
||||
test_core:
|
||||
python -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py --ignore=./tests/deepspeed --ignore=./tests/test_big_modeling.py \
|
||||
--ignore=./tests/fsdp --ignore=./tests/test_cli.py --ignore=./tests/test_hub_status.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_core.log",)
|
||||
--ignore=./tests/fsdp --ignore=./tests/test_cli.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_core.log",)
|
||||
|
||||
test_cli:
|
||||
python -m pytest -s -v ./tests/test_cli.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_cli.log",)
|
||||
|
||||
30
README.md
30
README.md
@ -22,22 +22,12 @@ limitations under the License.
|
||||
|
||||
<p align="center">
|
||||
<!-- Uncomment when CircleCI is set up
|
||||
<a href="https://circleci.com/gh/huggingface/accelerate">
|
||||
<img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/master">
|
||||
</a>
|
||||
<a href="https://circleci.com/gh/huggingface/accelerate"><img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/master"></a>
|
||||
-->
|
||||
<a href="https://github.com/huggingface/accelerate/blob/main/LICENSE">
|
||||
<img alt="License" src="https://img.shields.io/github/license/huggingface/accelerate.svg?color=blue">
|
||||
</a>
|
||||
<a href="https://huggingface.co/docs/accelerate/index.html">
|
||||
<img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/accelerate/index.html.svg?down_color=red&down_message=offline&up_message=online">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/accelerate/releases">
|
||||
<img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/accelerate.svg">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/accelerate/blob/main/CODE_OF_CONDUCT.md">
|
||||
<img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/accelerate/blob/main/LICENSE"><img alt="License" src="https://img.shields.io/github/license/huggingface/accelerate.svg?color=blue"></a>
|
||||
<a href="https://huggingface.co/docs/accelerate/index.html"><img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/accelerate/index.html.svg?down_color=red&down_message=offline&up_message=online"></a>
|
||||
<a href="https://github.com/huggingface/accelerate/releases"><img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/accelerate.svg"></a>
|
||||
<a href="https://github.com/huggingface/accelerate/blob/main/CODE_OF_CONDUCT.md"><img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg"></a>
|
||||
</p>
|
||||
|
||||
<h3 align="center">
|
||||
@ -171,7 +161,15 @@ To learn more, check the CLI documentation available [here](https://huggingface.
|
||||
|
||||
🤗 Here is another way to launch multi-CPU run using MPI. You can learn how to install Open MPI on [this page](https://www.open-mpi.org/faq/?category=building#easy-build). You can use Intel MPI or MVAPICH as well.
|
||||
Once you have MPI setup on your cluster, just run:
|
||||
|
||||
```bash
|
||||
accelerate config
|
||||
```
|
||||
Answer the questions that are asked, selecting to run using multi-CPU, and answer "yes" when asked if you want accelerate to launch mpirun.
|
||||
Then, use `accelerate launch` with your script like:
|
||||
```bash
|
||||
accelerate launch examples/nlp_example.py
|
||||
```
|
||||
Alternatively, you can use mpirun directly, without using the CLI like:
|
||||
```bash
|
||||
mpirun -np 2 python examples/nlp_example.py
|
||||
```
|
||||
|
||||
@ -1,3 +1,16 @@
|
||||
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import gc
|
||||
import threading
|
||||
import time
|
||||
|
||||
73
docker/README.md
Normal file
73
docker/README.md
Normal file
@ -0,0 +1,73 @@
|
||||
<!---
|
||||
Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
|
||||
# Official Hugging Face Accelerate Docker Images
|
||||
|
||||
Accelerate publishes a variety of docker versions as part of our CI that users can also use. These are stable images that Accelerate can run off of which comes with a variety of different setup configurations, all of which are officially hosted on [Docker Hub](https://hub.docker.com/r/huggingface/accelerate).
|
||||
|
||||
A breakdown of each are given below
|
||||
|
||||
## Naming Conventions
|
||||
|
||||
Accelerate docker images follow a tagging convention of:
|
||||
|
||||
```bash
|
||||
huggingface/accelerate:{accelerator}-{nightly,release}
|
||||
```
|
||||
|
||||
`accelerator` in this instance is one of many applical pre-configured backend supports:
|
||||
* `gpu`: Comes compiled off of the `nvidia/cuda` image and includes core parts like `bitsandbytes`. Runs off python 3.9.
|
||||
* `cpu`: Comes compiled off of `python:3.9-slim` and is designed for non-CUDA based workloads.
|
||||
* More to come soon
|
||||
* `gpu-deepspeed`: Comes compiled off of the `nvidia/cuda` image and includes core parts like `bitsandbytes` as well as the latest `deepspeed` version. Runs off python 3.10.
|
||||
|
||||
## Nightlies vs Releases
|
||||
|
||||
Each release a new build is pushed with a version number included in the name. For a GPU-supported image of version 0.28.0 for instance, it would look like the following:
|
||||
|
||||
```bash
|
||||
huggingface/accelerate:gpu-release-0.28.0
|
||||
```
|
||||
|
||||
Nightlies contain two different image tags. There is a general `nightly` tag which is built each night, and a `nightly-YYYY-MM-DD` which corresponds to a build from a particular date.
|
||||
|
||||
For instance, here is an example nightly CPU image from 3/14/2024
|
||||
|
||||
```bash
|
||||
huggingface/accelerate:cpu-nightly-2024-03-14
|
||||
```
|
||||
|
||||
## Running the images
|
||||
|
||||
Each image comes compiled with `conda` and an `accelerate` environment contains all of the installed dependencies.
|
||||
|
||||
To pull down the latest nightly run:
|
||||
|
||||
```bash
|
||||
docker pull huggingface/accelerate:gpu-nightly
|
||||
```
|
||||
|
||||
To then run it in interactive mode with GPU-memory available, run:
|
||||
|
||||
```bash
|
||||
docker container run --gpus all -it huggingface/accelerate:gpu-nightly
|
||||
```
|
||||
|
||||
## DEPRECATED IMAGES
|
||||
|
||||
CPU and GPU docker images were hosted at `huggingface/accelerate-gpu` and `huggingface/accelerate-cpu`. These builds are now outdated and will not receive updates.
|
||||
|
||||
The builds at the corresponding `huggingface/accelerate:{gpu,cpu}` contain the same `Dockerfile`, so it's as simple as changing the docker image to the desired ones from above. We will not be deleting these images for posterity, but they will not be receiving updates going forward.
|
||||
46
docker/accelerate-gpu-deepspeed/Dockerfile
Normal file
46
docker/accelerate-gpu-deepspeed/Dockerfile
Normal file
@ -0,0 +1,46 @@
|
||||
# Builds GPU docker image of PyTorch specifically
|
||||
# Uses multi-staged approach to reduce size
|
||||
# Stage 1
|
||||
# Use base conda image to reduce time
|
||||
FROM continuumio/miniconda3:latest AS compile-image
|
||||
# Specify py version
|
||||
# Note: DeepSpeed beyond v0.12.6 requires py 3.10
|
||||
ENV PYTHON_VERSION=3.10
|
||||
# Install apt libs
|
||||
RUN apt-get update && \
|
||||
apt-get install -y curl git wget && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists*
|
||||
|
||||
# Create our conda env
|
||||
RUN conda create --name accelerate python=${PYTHON_VERSION} ipython jupyter pip
|
||||
# We don't install pytorch here yet since CUDA isn't available
|
||||
# instead we use the direct torch wheel
|
||||
ENV PATH /opt/conda/envs/accelerate/bin:$PATH
|
||||
# Activate our bash shell
|
||||
RUN chsh -s /bin/bash
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
# Activate the conda env, install mpy4pi, and install torch + accelerate
|
||||
RUN source activate accelerate && conda install -c conda-forge mpi4py
|
||||
RUN source activate accelerate && \
|
||||
python3 -m pip install --no-cache-dir \
|
||||
git+https://github.com/huggingface/accelerate#egg=accelerate[testing,test_trackers,deepspeed] \
|
||||
--extra-index-url https://download.pytorch.org/whl/cu117
|
||||
|
||||
RUN python3 -m pip install --no-cache-dir bitsandbytes
|
||||
|
||||
# Stage 2
|
||||
FROM nvidia/cuda:12.1.0-cudnn8-devel-ubuntu20.04 AS build-image
|
||||
COPY --from=compile-image /opt/conda /opt/conda
|
||||
ENV PATH /opt/conda/bin:$PATH
|
||||
|
||||
# Install apt libs
|
||||
RUN apt-get update && \
|
||||
apt-get install -y curl git wget && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists*
|
||||
|
||||
RUN echo "source activate accelerate" >> ~/.profile
|
||||
|
||||
# Activate the virtualenv
|
||||
CMD ["/bin/bash"]
|
||||
@ -19,8 +19,6 @@
|
||||
title: Launching distributed code
|
||||
- local: basic_tutorials/notebook
|
||||
title: Launching distributed training from Jupyter Notebooks
|
||||
- local: basic_tutorials/troubleshooting
|
||||
title: Troubleshooting guide
|
||||
title: Tutorials
|
||||
- sections:
|
||||
- isExpanded: true
|
||||
@ -33,8 +31,12 @@
|
||||
title: Model quantization
|
||||
- local: usage_guides/tracking
|
||||
title: Experiment trackers
|
||||
- local: usage_guides/profiler
|
||||
title: Profiler
|
||||
- local: usage_guides/checkpoint
|
||||
title: Save and load training states
|
||||
- local: basic_tutorials/troubleshooting
|
||||
title: Troubleshoot
|
||||
- local: usage_guides/training_zoo
|
||||
title: Example Zoo
|
||||
title: Accelerate
|
||||
@ -48,6 +50,8 @@
|
||||
title: Low precision (FP8) training
|
||||
- local: usage_guides/deepspeed
|
||||
title: DeepSpeed
|
||||
- local: usage_guides/ddp_comm_hook
|
||||
title: DDP Communication Hooks
|
||||
- local: usage_guides/fsdp
|
||||
title: Fully Sharded Data Parallelism
|
||||
- local: usage_guides/megatron_lm
|
||||
@ -78,6 +82,8 @@
|
||||
title: Executing and deferring jobs
|
||||
- local: concept_guides/gradient_synchronization
|
||||
title: Gradient synchronization
|
||||
- local: concept_guides/fsdp_and_deepspeed
|
||||
title: FSDP vs DeepSpeed
|
||||
- local: concept_guides/low_precision_training
|
||||
title: How training in low-precision environments is possible (FP8)
|
||||
- local: concept_guides/training_tpu
|
||||
|
||||
@ -430,6 +430,17 @@ args = (model, "fp16", 42, 64)
|
||||
notebook_launcher(training_loop, args, num_processes=8)
|
||||
```
|
||||
|
||||
To launch the training process with elasticity, enabling fault tolerance, you can use the `elastic_launch` feature provided by PyTorch. This requires setting additional parameters such as `rdzv_backend` and `max_restarts`. Here is an example of how to use `notebook_launcher` with elastic capabilities:
|
||||
|
||||
```python
|
||||
notebook_launcher(
|
||||
training_loop,
|
||||
args,
|
||||
num_processes=2,
|
||||
max_restarts=3
|
||||
)
|
||||
```
|
||||
|
||||
As it's running it will print the progress as well as state how many devices you ran on. This tutorial was ran with two GPUs:
|
||||
|
||||
```python out
|
||||
@ -443,6 +454,12 @@ epoch 4: 94.71
|
||||
|
||||
And that's it!
|
||||
|
||||
Please note that [`notebook_launcher`] ignores the 🤗 Accelerate config file, to launch based on the config use:
|
||||
|
||||
```bash
|
||||
accelerate launch
|
||||
```
|
||||
|
||||
## Debugging
|
||||
|
||||
A common issue when running the `notebook_launcher` is receiving a CUDA has already been initialized issue. This usually stems
|
||||
|
||||
@ -13,77 +13,82 @@ specific language governing permissions and limitations under the License.
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Troubleshooting guide
|
||||
# Troubleshoot
|
||||
|
||||
This guide aims to provide you the tools and knowledge required to navigate some common issues. However,
|
||||
as 🤗 Accelerate continuously evolves and the use cases and setups are diverse, you might encounter an issue not covered in this
|
||||
guide. If the suggestions listed in this guide do not cover your such situation, please refer to the final section of
|
||||
the guide, [Asking for Help](#ask-for-help), to learn where to find help with your specific issue.
|
||||
This guide provides solutions to some issues you might encounter when using Accelerate. Not all errors are covered because Accelerate is an active library that is continuously evolving and there are many different use cases and distributed training setups. If the solutions described here don't help with your specific error, please take a look at the [Ask for help](#ask-for-help) section to learn where and how to get help.
|
||||
|
||||
## Logging
|
||||
|
||||
When facing an error, logging can help narrow down where it is coming from. In a distributed setup with multiple processes,
|
||||
logging can be a challenge, but 🤗 Accelerate provides a utility that streamlines the logging process and ensures that
|
||||
logs are synchronized and managed effectively across the distributed setup.
|
||||
Logging can help you identify where an error is coming from. In a distributed setup with multiple processes, logging can be a challenge, but Accelerate provides the [`~accelerate.logging`] utility to ensure logs are synchronized.
|
||||
|
||||
To troubleshoot an issue, use `accelerate.logging` instead of the standard Python `logging` module:
|
||||
To troubleshoot an issue, use [`~accelerate.logging`] instead of the standard Python [`logging`](https://docs.python.org/3/library/logging.html#module-logging) module. Set the verbosity level (`INFO`, `DEBUG`, `WARNING`, `ERROR`, `CRITICAL`) with the `log_level` parameter, and then you can either:
|
||||
|
||||
```diff
|
||||
- import logging
|
||||
+ from accelerate.logging import get_logger
|
||||
- logger = logging.getLogger(__name__)
|
||||
+ logger = get_logger(__name__)
|
||||
```
|
||||
1. Export the `log_level` as the `ACCELERATE_LOG_LEVEL` environment variable.
|
||||
2. Pass the `log_level` directly to `get_logger`.
|
||||
|
||||
To set the log level (`INFO`, `DEBUG`, `WARNING`, `ERROR`, `CRITICAL`), export it as the `ACCELERATE_LOG_LEVEL` environment,
|
||||
or pass as `log_level` to `get_logger`:
|
||||
For example, to set `log_level="INFO"`:
|
||||
|
||||
```python
|
||||
```py
|
||||
from accelerate.logging import get_logger
|
||||
|
||||
logger = get_logger(__name__, log_level="INFO")
|
||||
logger = get_logger(__name__, log_level="DEBUG")
|
||||
```
|
||||
|
||||
By default, the log is called on main processes only. To call it on all processes, pass `main_process_only=False`.
|
||||
If a log should be called on all processes and in order, also pass `in_order=True`.
|
||||
|
||||
```py
|
||||
from accelerate.logging import get_logger
|
||||
|
||||
logger = get_logger(__name__, log_level="DEBUG")
|
||||
# log all processes
|
||||
logger.debug("thing_to_log", main_process_only=False)
|
||||
# log all processes in order
|
||||
logger.debug("thing_to_log", main_process_only=False, in_order=True)
|
||||
```
|
||||
|
||||
## Hanging code and timeout errors
|
||||
|
||||
### Mismatched tensor shapes
|
||||
There can be many reasons why your code is hanging. Let's take a look at how to solve some of the most common issues that can cause your code to hang.
|
||||
|
||||
If your code seems to be hanging for a significant amount time on a distributed setup, a common cause is mismatched shapes of tensors on different
|
||||
devices.
|
||||
### Mismatched tensor shapes
|
||||
|
||||
When running scripts in a distributed fashion, functions such as [`Accelerator.gather`] and [`Accelerator.reduce`] are
|
||||
necessary to grab tensors across devices to perform operations on them collectively. These (and other) functions rely on
|
||||
`torch.distributed` performing a `gather` operation, which requires that tensors have the **exact same shape** across all processes.
|
||||
When the tensor shapes don't match, you will experience handing code, and eventually hit a timeout exception.
|
||||
Mismatched tensor shapes is a common issue that can cause your code to hang for a significant amount of time on a distributed setup.
|
||||
|
||||
If you suspect this to be the case, use Accelerate's operational debug mode to immediately catch the issue.
|
||||
When running scripts in a distributed setup, functions such as [`Accelerator.gather`] and [`Accelerator.reduce`] are necessary to grab tensors across devices to collectively perform operations on them. These (and other) functions rely on `torch.distributed` to perform a `gather` operation, which requires tensors to have the **exact same shape** across all processes. When the tensor shapes don't match, your code hangs and you'll eventually hit a timeout exception.
|
||||
|
||||
The recommended way to enable Accelerate's operational debug mode is during `accelerate config` setup.
|
||||
Alternative ways to enable debug mode are:
|
||||
You can use Accelerate's operational debug mode to immediately catch this issue. We recommend enabling this mode during the `accelerate config` setup, but you can also enable it from the CLI, as an environment variable, or by manually editing the `config.yaml` file.
|
||||
|
||||
* From the CLI:
|
||||
<hfoptions id="mismatch">
|
||||
<hfoption id="CLI">
|
||||
|
||||
```bash
|
||||
accelerate launch --debug {my_script.py} --arg1 --arg2
|
||||
```
|
||||
|
||||
* As an environmental variable (which avoids the need for `accelerate launch`):
|
||||
</hfoption>
|
||||
<hfoption id="environment variable">
|
||||
|
||||
If enabling debug mode as an environment variable, you don't need to call `accelerate launch`.
|
||||
|
||||
```bash
|
||||
ACCELERATE_DEBUG_MODE="1" torchrun {my_script.py} --arg1 --arg2
|
||||
```
|
||||
|
||||
* Manually changing the `config.yaml` file:
|
||||
</hfoption>
|
||||
<hfoption id="config.yaml">
|
||||
|
||||
```diff
|
||||
compute_environment: LOCAL_MACHINE
|
||||
+debug: true
|
||||
Add `debug: true` to your `config.yaml` file.
|
||||
|
||||
```yaml
|
||||
compute_environment: LOCAL_MACHINE
|
||||
debug: true
|
||||
```
|
||||
|
||||
Once you enable the debug mode, you should get a similar traceback that points to the tensor shape mismatch issue:
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
Once you enable debug mode, you should get a traceback that points to the tensor shape mismatch issue.
|
||||
|
||||
```py
|
||||
Traceback (most recent call last):
|
||||
@ -100,16 +105,14 @@ Operation: `accelerate.utils.operations.broadcast`
|
||||
Input shapes:
|
||||
- Process 0: [1, 5]
|
||||
- Process 1: [1, 2, 5]
|
||||
```
|
||||
```
|
||||
|
||||
### Early stopping leads to hanging
|
||||
### Early stopping
|
||||
|
||||
When doing early stopping in distributed training, if each process has a specific stopping condition (e.g. validation loss),
|
||||
it may not be synchronized across all of them. As a result, a break can happen on process 0 but not on process 1.
|
||||
This will cause the code to hang indefinitely until a timeout occurs.
|
||||
For early stopping in distributed training, if each process has a specific stopping condition (e.g. validation loss), it may not be synchronized across all processes. As a result, a break can happen on process 0 but not on process 1 which will cause your code to hang indefinitely until a timeout occurs.
|
||||
|
||||
If you have early stopping conditionals, use `set_breakpoint` and `check_breakpoint` methods to make sure all the processes
|
||||
are ended correctly:
|
||||
If you have early stopping conditionals, use the `set_breakpoint` and `check_breakpoint` methods to make sure all the processes
|
||||
are ended correctly.
|
||||
|
||||
```py
|
||||
# Assume `should_do_breakpoint` is a custom defined function that returns a conditional,
|
||||
@ -122,35 +125,38 @@ if accelerator.check_breakpoint():
|
||||
break
|
||||
```
|
||||
|
||||
### Hanging on low kernel versions on Linux
|
||||
### Low kernel versions on Linux
|
||||
|
||||
This is a known issue. On Linux with kernel version < 5.5, hanging processes have been reported. To avoid
|
||||
encountering this problem, we recommend upgrading your system to a later kernel version.
|
||||
On Linux with kernel version < 5.5, hanging processes have been reported. To avoid this problem, upgrade your system to a later kernel version.
|
||||
|
||||
## CUDA out of memory
|
||||
### MPI
|
||||
|
||||
One of the most frustrating errors when it comes to running training scripts is hitting "CUDA Out-of-Memory",
|
||||
as the entire script needs to be restarted, progress is lost, and typically a developer would want to simply
|
||||
start their script and let it run.
|
||||
If your distributed CPU training job using MPI is hanging, ensure that you have
|
||||
[passwordless SSH](https://www.open-mpi.org/faq/?category=rsh#ssh-keys) setup (using keys) between the nodes. This means
|
||||
that for all nodes in your hostfile, you should to be able to SSH from one node to another without being prompted for a password.
|
||||
|
||||
To address this problem, `Accelerate` offers a utility `find_executable_batch_size` that is heavily based on [toma](https://github.com/BlackHC/toma).
|
||||
The utility retries code that fails due to OOM (out-of-memory) conditions and lowers batch sizes automatically.
|
||||
Next, try to run the `mpirun` command as a sanity check. For example, the command below should print out the
|
||||
hostnames for each of the nodes.
|
||||
|
||||
### find_executable_batch_size
|
||||
```bash
|
||||
mpirun -f hostfile -n {number of nodes} -ppn 1 hostname
|
||||
```
|
||||
|
||||
This algorithm operates with exponential decay, decreasing the batch size in half after each failed run on some
|
||||
training script. To use it, restructure your training function to include an inner function that includes this wrapper,
|
||||
and build your dataloaders inside it. At a minimum, this could look like 4 new lines of code.
|
||||
## CUDA Out-of-Memory
|
||||
|
||||
One of the most frustrating errors when it comes to running training scripts is hitting "CUDA Out-of-Memory". The entire script needs to be restarted and any progress is lost.
|
||||
|
||||
To address this problem, Accelerate provides the [`find_executable_batch_size`] utility that is heavily based on [toma](https://github.com/BlackHC/toma).
|
||||
This utility retries code that fails due to OOM (out-of-memory) conditions and automatically lowers batch sizes. For each OOM condition, the algorithm decreases the batch size by half and retries the code until it succeeds.
|
||||
|
||||
To use [`find_executable_batch_size`], restructure your training function to include an inner function with `find_executable_batch_size` and build your dataloaders inside it. At a minimum, this only takes 4 new lines of code.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
The inner function *must* take in the batch size as the first parameter, but we do not pass one to it when called. The wrapper handles this for us.
|
||||
The inner function **must** take batch size as the first parameter, but we do not pass one to it when called. The wrapper will handles this for you. Any object (models, optimizers) that consumes CUDA memory and is passed to the [`Accelerator`] also **must** be declared inside the inner function.
|
||||
|
||||
</Tip>
|
||||
|
||||
It should also be noted that anything which will consume CUDA memory and passed to the `accelerator` **must** be declared inside the inner function,
|
||||
such as models and optimizers.
|
||||
|
||||
```diff
|
||||
def training_function(args):
|
||||
accelerator = Accelerator()
|
||||
@ -175,48 +181,31 @@ def training_function(args):
|
||||
+ inner_training_loop()
|
||||
```
|
||||
|
||||
To find out more, check the documentation [here](../package_reference/utilities#accelerate.find_executable_batch_size).
|
||||
|
||||
## Non-reproducible results between device setups
|
||||
|
||||
If you have changed the device setup and are observing different model performance, this is likely due to the fact that
|
||||
you have not updated your script when moving from one setup to another. The same script with the same batch size across TPU,
|
||||
multi-GPU, and single-GPU with Accelerate will have different results.
|
||||
If you changed the device setup and observe different model performance, it is likely you didn't update your script when moving from one setup to another. Even if you're using the same script with the same batch size, the results will still be different on a TPU, multi-GPU, and single GPU.
|
||||
|
||||
For example, if you were previously training on a single GPU with a batch size of 16, when moving to two GPU setup,
|
||||
you need to change the batch size to 8 to have the same effective batch size. This is because when training with Accelerate,
|
||||
the batch size passed to the dataloader is the **batch size per GPU**.
|
||||
For example, if you were training on a single GPU with a batch size of 16 and you move to a dual GPU setup, you need to change the batch size to 8 to have the same effective batch size. This is because when training with Accelerate, the batch size passed to the dataloader is the **batch size per GPU**.
|
||||
|
||||
To make sure you can reproduce the results between the setups, make sure to use the same seed, adjust the batch size
|
||||
accordingly, consider scaling the learning rate.
|
||||
To make sure you can reproduce the results between the setups, make sure to use the same seed, adjust the batch size accordingly, and consider scaling the learning rate.
|
||||
|
||||
For more details and a quick reference for batch sizes, check out the [Comparing performance between different device setups](../concept_guides/performance) guide.
|
||||
|
||||
## Performance issues on different GPUs
|
||||
|
||||
If your multi-GPU setup consists of different GPUs, you may hit some limitations:
|
||||
If your multi-GPU setup consists of different GPUs, you may encounter some performance issues:
|
||||
|
||||
- There may be an imbalance in GPU memory between the GPUs. In this case, the GPU with smaller memory will limit the batch size or the size of the model that can be loaded onto the GPUs.
|
||||
- If you are using GPUs with different performance profiles, the performance will be driven by the slowest GPU that you are using as the other GPUs will have to wait for it to complete its workload.
|
||||
- There may be an imbalance in GPU memory between the GPUs. In this case, the GPU with the smaller memory will limit the batch size or the size of the model that can be loaded onto the GPUs.
|
||||
- If you are using GPUs with different performance profiles, the performance will be driven by the slowest GPU you are using because the other GPUs will have to wait for it to complete its workload.
|
||||
|
||||
Vastly different GPUs within the same setup can lead to performance bottlenecks.
|
||||
|
||||
## Ask for help
|
||||
|
||||
If the above troubleshooting tools and advice did not help you resolve your issue, reach out for help to the community
|
||||
and the team.
|
||||
If none of the solutions and advice here helped resolve your issue, you can always reach out to the community and Accelerate team for help.
|
||||
|
||||
### Forums
|
||||
- Ask for help on the Hugging Face forums by posting your question in the [🤗 Accelerate category](https://discuss.huggingface.co/c/accelerate/18). Make sure to write a descriptive post with relevant context about your setup and reproducible code to maximize the likelihood that your problem is solved!
|
||||
|
||||
Ask for help on the Hugging Face forums - post your question in the [🤗Accelerate category](https://discuss.huggingface.co/c/accelerate/18)
|
||||
Make sure to write a descriptive post with relevant context about your setup and reproducible code to maximize the likelihood that your problem is solved!
|
||||
- Post a question on [Discord](http://hf.co/join/discord), and let the team and the community help you.
|
||||
|
||||
### Discord
|
||||
|
||||
Post a question on [Discord](http://hf.co/join/discord), and let the team and the community help you.
|
||||
|
||||
### GitHub Issues
|
||||
|
||||
Create an Issue on the 🤗 Accelerate [GitHub repository](https://github.com/huggingface/accelerate/issues) if you suspect
|
||||
to have found a bug related to the library. Include context regarding the bug and details about your distributed setup
|
||||
to help us better figure out what's wrong and how we can fix it.
|
||||
- Create an Issue on the 🤗 Accelerate [GitHub repository](https://github.com/huggingface/accelerate/issues) if you think you've found a bug related to the library. Include context regarding the bug and details about your distributed setup to help us better figure out what's wrong and how we can fix it.
|
||||
|
||||
192
docs/source/concept_guides/fsdp_and_deepspeed.md
Normal file
192
docs/source/concept_guides/fsdp_and_deepspeed.md
Normal file
@ -0,0 +1,192 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Moving between FSDP And DeepSpeed
|
||||
|
||||
🤗 Accelerate offers flexibilty of training frameworks, by integrating two extremely powerful tools for distributed training, namely [Pytorch FSDP](../usage_guides/fsdp) and [Microsoft DeepSpeed](../usage_guides/deepspeed). The aim of this tutorial is to draw parallels, as well as to outline potential differences, to empower the user to switch seamlessly between these two frameworks.
|
||||
|
||||
<Tip>
|
||||
|
||||
To switch between the frameworks, we recommend launching code 🤗 `accelerate launch` passing in the correct config file with `--config_file`, or passing in the respective arguments directly for [FSDP and DeepSpeed](../package_reference/cli#accelerate-launch) .
|
||||
|
||||
Example 🤗 Accelerate configurations can be found here for [DeepSpeed](../usage_guides/deepspeed#accelerate-deepspeed-plugin) and [FSDP](../usage_guides/fsdp#how-it-works-out-of-the-box), or in the [example zoo under "Launch Configurations"](../usage_guides/explore)
|
||||
|
||||
</Tip>
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
This tutorial is for single-node, multi-GPU, scenarios only.
|
||||
|
||||
</Tip>
|
||||
|
||||
## Configuring Functionalities
|
||||
|
||||
Model tensors are split into different GPUs in an attempt to scale up model sizes; this is termed *sharding* in FSDP, and *partitioning* in DeepSpeed. FSDP sharding and DeepSpeed ZeRO (partitioning) stages are configured by `--fsdp_sharding_strategy`, and `--zero_stage`, respectively. In particular, FSDP `FULL_SHARD` maps to DeepSpeed ZeRO stage `3`; see this [comprehensive mapping between FSDP sharding and DeepSpeed ZeRO settings](../usage_guides/fsdp#mapping-between-fsdp-sharding-strategies-and-deepspeed-zero-stages). The below table summarizes and groups similar settings:
|
||||
|
||||
Group | Framework | Configuration | Example | Restrictions (if any)
|
||||
--|--|--|--|--
|
||||
sharding / partitioning | FSDP<br>DeepSpeed | `--fsdp_sharding_strategy`<br>`--zero_stage` | `1` (`FULL_SHARD`) <br>`3` |
|
||||
offload | FSDP<br>DeepSpeed | `--fsdp_offload_params`<br>`--offload_param_device`<br>`--offload_optimizer_device` | `true`<br>`cpu`<br>`cpu` | all or nothing <br><br>
|
||||
model loading | FSDP<br>DeepSpeed | <span style="white-space:nowrap;">`--fsdp_cpu_ram_efficient_loading`</span><br>`--zero3_init_flag` | `true`<br>`true` | <br>only ZeRO 3
|
||||
efficient checkpointing | FSDP<br>DeepSpeed | `--fsdp_state_dict_type`<br>`--zero3_save_16bit_model` | `SHARDED_STATE_DICT`<br>`true` | <br>only ZeRO 3
|
||||
weights prefetching | FSDP<br><br>DeepSpeed | `--fsdp_forward_prefetch`<br>`--fsdp_backward_prefetch`<br>None | `true`<br>`BACKWARD_PRE` | <br><br>
|
||||
model | FSDP<br><br>DeepSpeed | `--fsdp_auto_wrap_policy`<br><span style="white-space:nowrap;">`--fsdp_transformer_layer_cls_to_wrap`</span><br>None | `TRANSFORMER_BASED_WRAP`<br><Layer Class> |<br>Usually not needed <br>Transparent to user.
|
||||
parameters summoning | FSDP<br>DeepSpeed | `--fsdp_use_orig_params`<br>None | `true` | required for `torch.compile`<br>Transparent to user
|
||||
parameters syncing | FSDP<br>DeepSpeed | `--fsdp_sync_module_states`<br>None | `true` |
|
||||
training | FSDP<br>DeepSpeed | None<br>`--gradient_accumulation_steps`<br>`--gradient_clipping` | <br>`auto`<br>`auto` | Transparent to user
|
||||
|
||||
For detailed descriptions of the above, refer to [🤗 `Accelerate` launch documentation](../package_reference/cli#accelerate-launch).
|
||||
|
||||
<Tip>
|
||||
|
||||
To access other DeepSpeed configurations, such as mixed precision settings,
|
||||
you need to pass in a `--deepspeed_config_file`, see the [documentation](../usage_guides/deepspeed#deepspeed-config-file).
|
||||
|
||||
DeepSpeed can be also configured via [`DeepSpeedPlugin`], e.g., `DeepSpeedPlugin.zero_stage` is equivalent of `--zero_stage`, and `DeepSpeedPlugin.hf_ds_config` can be used to pass `--deepeed_config_file.`
|
||||
|
||||
</Tip>
|
||||
|
||||
<Tip>
|
||||
|
||||
FSDP can be also configured via [`FullyShardedDataParallelPlugin`], e.g., `FullyShardedDataParallelPlugin.sharding_strategy` is equivalent of `--fsdp_sharding_strategy`.
|
||||
|
||||
</Tip>
|
||||
|
||||
### Checkpointing
|
||||
|
||||
Do note that while FSDP can be configured via `--fsdp_state_dict_type` to save either full / sharded checkpoints.
|
||||
|
||||
<Tip>
|
||||
|
||||
For DeepSpeed Zero3, one could pass a `--zero3_save_16bit_model true`, which conveniently consolidates the model to a single rank and saves; this is the FSDP equivalent of `fsdp_state_dict_type: FULL_STATE_DICT`.
|
||||
|
||||
</Tip>
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
For large models, consolidating the model to a single rank can be very slow.
|
||||
|
||||
</Tip>
|
||||
|
||||
<Tip>
|
||||
|
||||
For quicker checkpointing, for FSDP use `fsdp_state_dict_type: SHARDED_STATE_DICT`, and for DeepSpeed Zero3 [use the `zero_to_fp32.py` script to post-convert sharded checkpoints](https://www.deepspeed.ai/tutorials/zero/#extracting-weights).
|
||||
|
||||
|
||||
</Tip>
|
||||
|
||||
### Offloading
|
||||
|
||||
FSDP only allows *all-or-nothing* offload (i.e., either offload parameters, gradients, and optimizer, or keep them all in GPU), but DeepSpeed can offload parameters and optimizer differently. Furthermore, DeepSpeed also supports [offloading to NVME](https://www.deepspeed.ai/docs/config-json/#parameter-offloading).
|
||||
|
||||
### Prefetching
|
||||
|
||||
FSDP allows two prefetching configurations `--fsdp_forward_prefetch` and `--fsdp_backward_prefetch` to improve overlap of comms / computation at a cost of extra memory, see [FSDP documentation](https://pytorch.org/docs/stable/fsdp.html).
|
||||
For DeepSpeed, the prefetching will be turned on when needed, and it turns on depending on certain hyper-params like `stage3_param_persistence_threshold`, `stage3_max_reuse_distance`, etc, [that can be configured for Zero3](https://www.deepspeed.ai/docs/config-json/#parameter-offloading); 🤗 `accelerate` may set these hyper-params automatically if you don't set those explicitly in the deepspeed config file.
|
||||
|
||||
<Tip>
|
||||
|
||||
For FSDP set `fsdp_backward_prefetch: BACKWARD_PRE` for improved throughputs if memory allows.
|
||||
|
||||
</Tip>
|
||||
|
||||
### Model Loading
|
||||
|
||||
While FSDP require an explicit `--fsdp_cpu_ram_efficient_loading true` to activate efficient model loading, 🤗 `transformers` will activate the similar feature whenever DeepSpeed Zero3 is used.
|
||||
|
||||
<Tip>
|
||||
|
||||
For FSDP, whenever setting `--fsdp_cpu_ram_efficient_loading true`, 🤗 `accelerate` will automatically set `sync_module_states` to true.
|
||||
For RAM efficient loading the weights will be loaded only in a singe rank, and thus requires `sync_module_states` to broadcast weights to other ranks.
|
||||
|
||||
</Tip>
|
||||
|
||||
### Model
|
||||
|
||||
FSDP requires an explicit `--fsdp_auto_wrap_policy` for the algorithm to decide how to schedule the all-gather and reduce-scatter operations. But for DeepSpeed this is transparent to the user.
|
||||
|
||||
<Tip>
|
||||
|
||||
For FSDP, simply set `fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP`. With the latest [`transformers`] versions, we try our best to figure out the suitable `fsdp_transformer_layer_cls_to_wrap` for HF transformers models. However, if you get an error regarding it, please specify this.
|
||||
|
||||
</Tip>
|
||||
|
||||
### Parameters Summoning
|
||||
|
||||
FSDP requires an explicit `--fsdp_use_orig_params` flag if using `torch.compile`, see [the pytorch documenation](https://pytorch.org/docs/stable/fsdp.html#module-torch.distributed.fsdp). For DeepSpeed this is transparent to the user.
|
||||
|
||||
<Tip>
|
||||
|
||||
For FSDP, when using `torch.compile` please set `fsdp_use_orig_params: True`.
|
||||
|
||||
</Tip>
|
||||
|
||||
|
||||
## Training
|
||||
|
||||
Deepspeed requires explicit `--gradient_accumulation_steps` and `--gradient_clipping` flags. For FSDP this is transparent to the user.
|
||||
|
||||
<Tip>
|
||||
|
||||
When using DeepSpeed, set `gradient_accumulation_steps: "auto"` and `gradient_clipping: "auto"` to automatically pick up values set in the [`Accelerator`] or [`TrainingArguments`] (if using `transformers`).
|
||||
|
||||
</Tip>
|
||||
|
||||
|
||||
## On Differences in Data Precision Handling
|
||||
|
||||
To discuss the how data precision is handled in both FSDP and Deepspeed, it is instructive to first give an overview of how model parameters are handled in these frameworks. Before the model / optimizer parameters are distributed across GPUs, parameter preparation is involved to first "flatten" them to one-dimensional [`torch.Tensor`](https://pytorch.org/docs/stable/tensors.html#torch-tensor). The implementation of FSDP / DeepSpeed varies in the respect of the `dtype` in which these "flattened" parameters are stored, and there are ramifications with regards to how [`torch.Optimizer`](https://pytorch.org/docs/stable/optim.html#module-torch.optim) allocate their `dtype`s. The table below outlines the processes for both frameworks; the "Local" column indicates the process occurring at a per-gpu level, therefore any memory overheads by upcasting should be understood to be amortized by the number of gpus used.
|
||||
|
||||
<Tip>
|
||||
|
||||
As a rule of thumb, for stable training with automatic mixed precision, all the trainable parameters have to be in `torch.float32`.
|
||||
|
||||
</Tip>
|
||||
|
||||
Process | Local | Framework | Details
|
||||
--|--|--|--
|
||||
Loading, i.e., [`AutoModel.from_pretrained(..., torch_dtype=torch_dtype)`] |
|
||||
Preparation, i.e., creation of "flat params" | ✅ | FSDP<br>DeepSpeed | created in `torch_dtype`.<br> disregards `torch_dtype`, created in `float32`.
|
||||
Optimizer initialization | ✅ | FSDP<br>DeepSpeed | creates parameters in `torch_dtype`<br> creates parameters in `float32`
|
||||
Training Step, i.e, forward, backward, reduction | | FSDP<br>DeepSpeed | follows [`MixedPrecision`](https://pytorch.org/docs/stable/fsdp.html#torch.distributed.fsdp.MixedPrecision)<br> follows `deepspeed_config_file` mixed precision settings.
|
||||
Optimizer (Pre-Step) | ✅ | FSDP<br>DeepSpeed | upcasting (if any) to `torch_dtype`<br>upcasted to `float32`
|
||||
Optimizer (Actual Step) | ✅ | FSDP<br>DeepSpeed | occurs in `torch_dtype` <br> occurs in `float32`.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
Therefore when using DeepSpeed a small number of GPUs, be aware of potentially significant memory overheads due to the upcasting during preperation.
|
||||
|
||||
</Tip>
|
||||
|
||||
<Tip>
|
||||
|
||||
With FSDP, in the absence of mixed precision, it is possible to operate the [`torch.Optimizer`](https://pytorch.org/docs/stable/optim.html#module-torch.optim) in low precision `torch_dtype`, which may be helpful when using small number of GPUs.
|
||||
|
||||
</Tip>
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
With mixed precision, FSDP and DeepSpeed will upcast in the model preparation step (c.f. table above). But do note that FSDP will then save checkpoints in the upcasted precision; Deepspeed may still save low precision checkpoints if `--zero3_save_16bit_model` is specified.
|
||||
|
||||
</Tip>
|
||||
|
||||
|
||||
To clarify the above table consider the concrete examples below; the optimizer pre- and actual step combined for brevity. With FSDP it is possible to operate in the two modes shown below, but DeepSpeed can only operate in one.
|
||||
|
||||
Framework | Model Loading (`torch_dtype`) | Mixed Precision | Preparation (Local) | Training | Optimizer (Local)
|
||||
--|--|--|--|--|--
|
||||
FSDP | bf16 | default (none) | bf16 | bf16 | bf16
|
||||
FSDP | bf16 | bf16 | fp32 | bf16 | fp32
|
||||
DeepSpeed | bf16 | bf16 | fp32 | bf16 | fp32
|
||||
@ -167,3 +167,18 @@ As you can see, if you are not careful about how you set up your gradient synchr
|
||||
|
||||
If you are worried about making sure everything is done properly, we highly recommend utilizing the [`~Accelerator.accumulate`] function and passing in
|
||||
`gradient_accumulation_steps` or `gradient_accumulation_plugin` to the [`Accelerator`] object so Accelerate can handle this for you.
|
||||
|
||||
### `no_sync` requires additional GPU memory when using FSDP
|
||||
|
||||
Be aware that not syncing gradients can have adverse effects while performing FSDP training. As it has been warned in `torch`, the [`no_sync` context manager for FSDP](https://pytorch.org/docs/stable/fsdp.html#torch.distributed.fsdp.FullyShardedDataParallel.no_sync) will require additional memory.
|
||||
|
||||
Therefore in memory intensive situations while using FSDP, we recommend to set `sync_each_batch` to `True` in the [`~utils.GradientAccumulationPlugin`] to disable `no_sync`.
|
||||
|
||||
See the example below where we fine-tune Mixtral (47B parameters) on 8 A100-80GB GPUs. We see that even for a modest `gradient_accumulation_steps=2` we quickly go out-of-memory (OOM) if `no_sync` is enabled. Again, this is due to additional memory overheads due to FSDP's `no_sync`. However, if `no_sync` is disabled via `sync_each_batch=True`, then the memory consumption for `gradient_accumulation_steps=16` reverts to that of `gradient_accumulation_steps=1`.
|
||||
|
||||
| Model | `no_sync` (accum=1) | `no_sync` (accum=2) | `no_sync` disabled (accum=16)
|
||||
| :-------------: | :-----------------: | :-----------------: | :-----------------:
|
||||
mixtral 8x7B | 69G | OOM | 69G
|
||||
|
||||
> [!WARNING]
|
||||
> Disabling `no_sync` means there _will be slowdown_ due the extra data syncs, as explained by the earlier sections of this guide.
|
||||
@ -34,7 +34,7 @@ MS-AMP O3 | FP8 | FP8 | FP8 | FP16 | FP8 | FP8+FP16
|
||||
|
||||
## `TransformersEngine`
|
||||
|
||||
`TransformersEngine` is the first solution to trying to train in 8-bit floating point. It works by using drop-in replacement layers for certain ones in a model that utilize their FP8-engine to reduce the number of bits (such as 32 to 8) without degrading the final accuracy of the model.
|
||||
`TransformersEngine` is the first solution to trying to train in 8-bit floating point. It works by using drop-in replacement layers for certain ones in a model that utilizes their FP8-engine to reduce the number of bits (such as 32 to 8) without degrading the final accuracy of the model.
|
||||
|
||||
Specifically, 🤗 Accelerate will find and replace the following layers with `TransformersEngine` versions:
|
||||
|
||||
@ -71,4 +71,4 @@ MS-AMP takes a different approach to `TransformersEngine` by providing three dif
|
||||
|
||||
## Combining the two
|
||||
|
||||
More experiments need to be performed but it's been noted that combining both MS-AMP and TransformersEngine can lead to the highest throughput by relying on NVIDIA's optimized FP8 operators and utilizing how MS-AMP reduces the memory overhead.
|
||||
More experiments need to be performed but it's been noted that combining both MS-AMP and TransformersEngine can lead to the highest throughput by relying on NVIDIA's optimized FP8 operators and utilizing how MS-AMP reduces the memory overhead.
|
||||
|
||||
BIN
docs/source/imgs/profile_export.png
Normal file
BIN
docs/source/imgs/profile_export.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 105 KiB |
@ -20,3 +20,7 @@ The [`Accelerator`] is the main class for enabling distributed training on any t
|
||||
## Accelerator[[api]]
|
||||
|
||||
[[autodoc]] Accelerator
|
||||
|
||||
## Utilities
|
||||
|
||||
[[autodoc]] accelerate.utils.gather_object
|
||||
|
||||
@ -208,6 +208,10 @@ The following arguments are only useful when `use_fsdp` is passed or Fully Shard
|
||||
* `--fsdp_transformer_layer_cls_to_wrap` (`str`) -- Transformer layer class name (case-sensitive) to wrap, e.g, `BertLayer`, `GPTJBlock`, `T5Block` ...
|
||||
* `--fsdp_backward_prefetch_policy` (`str`) -- FSDP's backward prefetch policy.
|
||||
* `--fsdp_state_dict_type` (`str`) -- FSDP's state dict type.
|
||||
* `--fsdp_forward_prefetch` (`str`) -- FSDP forward prefetch.
|
||||
* `--fsdp_use_orig_params` (`str`) -- If True, allows non-uniform `requires_grad` mixed in a FSDP unit.
|
||||
* `--fsdp_cpu_ram_efficient_loading` (`str`) - If true, only the first process loads the pretrained model checkoint while all other processes have empty weights. When using this, `--fsdp_sync_module_states` needs to True.
|
||||
* `--fsdp_sync_module_states` (`str`) - If true, each individually wrapped FSDP unit will broadcast module parameters from rank 0.
|
||||
|
||||
**Megatron-LM Arguments**:
|
||||
|
||||
|
||||
@ -17,12 +17,12 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
[[autodoc]] utils.DeepSpeedPlugin
|
||||
|
||||
[[autodoc]] utils.DummyOptim
|
||||
[[autodoc]] utils.deepspeed.DummyOptim
|
||||
|
||||
[[autodoc]] utils.DummyScheduler
|
||||
[[autodoc]] utils.deepspeed.DummyScheduler
|
||||
|
||||
[[autodoc]] utils.DeepSpeedEngineWrapper
|
||||
[[autodoc]] utils.deepspeed.DeepSpeedEngineWrapper
|
||||
|
||||
[[autodoc]] utils.DeepSpeedOptimizerWrapper
|
||||
[[autodoc]] utils.deepspeed.DeepSpeedOptimizerWrapper
|
||||
|
||||
[[autodoc]] utils.DeepSpeedSchedulerWrapper
|
||||
[[autodoc]] utils.deepspeed.DeepSpeedSchedulerWrapper
|
||||
|
||||
@ -15,4 +15,6 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
# Utilities for Fully Sharded Data Parallelism
|
||||
|
||||
[[autodoc]] utils.merge_fsdp_weights
|
||||
|
||||
[[autodoc]] utils.FullyShardedDataParallelPlugin
|
||||
@ -30,6 +30,10 @@ related to distributed training or mixed precision are created.
|
||||
|
||||
[[autodoc]] utils.FP8RecipeKwargs
|
||||
|
||||
## ProfileKwargs
|
||||
|
||||
[[autodoc]] utils.ProfileKwargs
|
||||
|
||||
## GradScalerKwargs
|
||||
|
||||
[[autodoc]] GradScalerKwargs
|
||||
|
||||
@ -166,6 +166,10 @@ These functionalities check the state of the current working environment includi
|
||||
|
||||
When setting up 🤗 Accelerate for the first time, rather than running `accelerate config` [~utils.write_basic_config] can be used as an alternative for quick configuration.
|
||||
|
||||
[[autodoc]] utils.set_numa_affinity
|
||||
|
||||
[[autodoc]] utils.environment.override_numa_affinity
|
||||
|
||||
## Memory
|
||||
|
||||
[[autodoc]] utils.find_executable_batch_size
|
||||
|
||||
@ -9,7 +9,7 @@ Unless required by applicable law or agreed to in writing, software distributed
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
⚠️ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
@ -25,17 +25,18 @@ This quicktour introduces the three main features of Accelerate:
|
||||
|
||||
## Unified launch interface
|
||||
|
||||
Accelerate automatically selects the appropriate configuration values for any given distributed training framework (DeepSpeed, FSDP, etc.) through a unified configuration file generated from the [`accelerate config`](../../docs/source/package_reference/cli#accelerate-config) command. You could also pass the configuration values explicitly to the command line which is helpful in certain situations like if you're using SLURM.
|
||||
Accelerate automatically selects the appropriate configuration values for any given distributed training framework (DeepSpeed, FSDP, etc.) through a unified configuration file generated from the [`accelerate config`](package_reference/cli#accelerate-config) command. You could also pass the configuration values explicitly to the command line which is helpful in certain situations like if you're using SLURM.
|
||||
|
||||
But in most cases, you should always run [`accelerate config`](../../docs/source/package_reference/cli#accelerate-config) first to help Accelerate learn about your training setup.
|
||||
|
||||
But in most cases, you should always run [`accelerate config`](package_reference/cli#accelerate-config) first to help Accelerate learn about your training setup.
|
||||
|
||||
```bash
|
||||
accelerate config
|
||||
```
|
||||
|
||||
The [`accelerate config`](../../docs/source/package_reference/cli#accelerate-config) command creates and saves a default_config.yaml file in Accelerates cache folder. This file stores the configuration for your training environment, which helps Accelerate correctly launch your training script based on your machine.
|
||||
The [`accelerate config`](package_reference/cli#accelerate-config) command creates and saves a default_config.yaml file in Accelerates cache folder. This file stores the configuration for your training environment, which helps Accelerate correctly launch your training script based on your machine.
|
||||
|
||||
After you've configured your environment, you can test your setup with [`accelerate test`](../../docs/source/package_reference/cli#accelerate-test), which launches a short script to test the distributed environment.
|
||||
After you've configured your environment, you can test your setup with [`accelerate test`](package_reference/cli#accelerate-test), which launches a short script to test the distributed environment.
|
||||
|
||||
```bash
|
||||
accelerate test
|
||||
@ -44,7 +45,7 @@ accelerate test
|
||||
> [!TIP]
|
||||
> Add `--config_file` to the `accelerate test` or `accelerate launch` command to specify the location of the configuration file if it is saved in a non-default location like the cache.
|
||||
|
||||
Once your environment is setup, launch your training script with [`accelerate launch`](../../docs/source/package_reference/cli#accelerate-launch)!
|
||||
Once your environment is setup, launch your training script with [`accelerate launch`](package_reference/cli#accelerate-launch)!
|
||||
|
||||
```bash
|
||||
accelerate launch path_to_script.py --args_for_the_script
|
||||
@ -92,6 +93,9 @@ accelerator = Accelerator()
|
||||
> [!WARNING]
|
||||
> This step is *optional* but it is considered best practice to allow Accelerate to handle device placement. You could also deactivate automatic device placement by passing `device_placement=False` when initializing the [`Accelerator`]. If you want to explicitly place objects on a device with `.to(device)`, make sure you use `accelerator.device` instead. For example, if you create an optimizer before placing a model on `accelerator.device`, training fails on a TPU.
|
||||
|
||||
> [!WARNING]
|
||||
> Accelerate does not use non-blocking transfers by default for its automatic device placement, which can result in potentially unwanted CUDA synchronizations. You can enable non-blocking transfers by passing a [`~utils.dataclasses.DataLoaderConfiguration`] with `non_blocking=True` set as the `dataloader_config` when initializing the [`Accelerator`]. As usual, non-blocking transfers will only work if the dataloader also has `pin_memory=True` set. Be wary that using non-blocking transfers from GPU to CPU may cause incorrect results if it results in CPU operations being performed on non-ready tensors.
|
||||
|
||||
```py
|
||||
device = accelerator.device
|
||||
```
|
||||
@ -110,7 +114,7 @@ model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
|
||||
accelerator.backward(loss)
|
||||
```
|
||||
|
||||
Read [Accelerate’s internal mechanisms](../../docs/source/concept_guides/internal_mechanism) guide to learn more details about how Accelerate adapts your code.
|
||||
Read [Accelerate’s internal mechanisms](concept_guides/internal_mechanism) guide to learn more details about how Accelerate adapts your code.
|
||||
|
||||
### Distributed evaluation
|
||||
|
||||
@ -120,7 +124,7 @@ To perform distributed evaluation, pass your validation dataloader to the [`~Acc
|
||||
validation_dataloader = accelerator.prepare(validation_dataloader)
|
||||
```
|
||||
|
||||
Each device in your distributed setup only receives a part of the evaluation data, which means you should group your predictions together with the [`~Accelerator.gather_for_metrics`] method. This method requires all tensors to be the same size on each process, so if your tensors have different sizes on each process (for instance when dynamically padding to the maximum length in a batch), you should use the [`~Accelerator.pad_across_processes`] method to pad you tensor to the largest size across processes.
|
||||
Each device in your distributed setup only receives a part of the evaluation data, which means you should group your predictions together with the [`~Accelerator.gather_for_metrics`] method. This method requires all tensors to be the same size on each process, so if your tensors have different sizes on each process (for instance when dynamically padding to the maximum length in a batch), you should use the [`~Accelerator.pad_across_processes`] method to pad you tensor to the largest size across processes. Note that the tensors needs to be 1D and that we concatenate the tensors along the first dimension.
|
||||
|
||||
```python
|
||||
for inputs, targets in validation_dataloader:
|
||||
@ -131,6 +135,8 @@ for inputs, targets in validation_dataloader:
|
||||
metric.add_batch(all_predictions, all_targets)
|
||||
```
|
||||
|
||||
For more complex cases (e.g. 2D tensors, don't want to concatenate tensors, dict of 3D tensors), you can pass `use_gather_object=True` in `gather_for_metrics`. This will return the list of objects after gathering. Note that using it with GPU tensors is not well supported and inefficient.
|
||||
|
||||
> [!TIP]
|
||||
> Data at the end of a dataset may be duplicated so the batch can be equally divided among all workers. The [`~Accelerator.gather_for_metrics`] method automatically removes the duplicated data to calculate a more accurate metric.
|
||||
|
||||
@ -139,7 +145,7 @@ for inputs, targets in validation_dataloader:
|
||||
Accelerate's Big Model Inference has two main features, [`~accelerate.init_empty_weights`] and [`~accelerate.load_checkpoint_and_dispatch`], to load large models for inference that typically don't fit into memory.
|
||||
|
||||
> [!TIP]
|
||||
> Take a look at the [Handling big models for inference](../../docs/source/concept_guides/big_model_inference) guide for a better understanding of how Big Model Inference works under the hood.
|
||||
> Take a look at the [Handling big models for inference](concept_guides/big_model_inference) guide for a better understanding of how Big Model Inference works under the hood.
|
||||
|
||||
### Empty weights initialization
|
||||
|
||||
@ -174,7 +180,7 @@ model = load_checkpoint_and_dispatch(
|
||||
|
||||
Now that you've been introduced to the main Accelerate features, your next steps could include:
|
||||
|
||||
* Check out the [tutorials](docs/source/basic_tutorials/overview) for a gentle walkthrough of Accelerate. This is especially useful if you're new to distributed training and the library.
|
||||
* Dive into the [guides](docs/source/usage_guides/explore) to see how to use Accelerate for specific use-cases.
|
||||
* Deepen your conceptual understanding of how Accelerate works internally by reading the [concept guides](docs/source/concept_guides/internal_mechanism).
|
||||
* Look up classes and commands in the [API reference](docs/source/package_reference/accelerator) to see what parameters and options are available.
|
||||
* Check out the [tutorials](basic_tutorials/overview) for a gentle walkthrough of Accelerate. This is especially useful if you're new to distributed training and the library.
|
||||
* Dive into the [guides](usage_guides/explore) to see how to use Accelerate for specific use-cases.
|
||||
* Deepen your conceptual understanding of how Accelerate works internally by reading the [concept guides](concept_guides/internal_mechanism).
|
||||
* Look up classes and commands in the [API reference](package_reference/accelerator) to see what parameters and options are available.
|
||||
|
||||
325
docs/source/usage_guides/ddp_comm_hook.md
Normal file
325
docs/source/usage_guides/ddp_comm_hook.md
Normal file
@ -0,0 +1,325 @@
|
||||
<!--
|
||||
Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# DDP Communication Hooks
|
||||
|
||||
Distributed Data Parallel (DDP) communication hooks provide a generic interface to control how gradients are communicated across workers by overriding the vanilla allreduce in `DistributedDataParallel`. A few built-in communication hooks are provided, and users can easily apply any of these hooks to optimize communication.
|
||||
|
||||
|
||||
- **FP16 Compression Hook**: Compresses gradients by casting them to half-precision floating-point format (`torch.float16`), reducing communication overhead.
|
||||
- **BF16 Compression Hook**: Similar to FP16, but uses the Brain Floating Point format (`torch.bfloat16`), which can be more efficient on certain hardware.
|
||||
- **PowerSGD Hook**: An advanced gradient compression algorithm that provides high compression rates and can accelerate bandwidth-bound distributed training.
|
||||
|
||||
In this tutorial, you will see how to quickly set up DDP communication hooks and perform training with the utilities provided in 🤗 Accelerate, which can be as simple as adding just one new line of code! This demonstrates how to use DDP communication hooks to optimize gradient communication in distributed training with the 🤗 Accelerate library.
|
||||
|
||||
## FP16 Compression Hook
|
||||
|
||||
<hfoptions id="fp16">
|
||||
<hfoption id="PyTorch">
|
||||
|
||||
```python
|
||||
import torch
|
||||
from torch.nn.parallel import DistributedDataParallel as DDP
|
||||
from torch.distributed.algorithms.ddp_comm_hooks import default_hooks
|
||||
|
||||
class MyModel(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.layer = torch.nn.Linear(10, 10)
|
||||
|
||||
def forward(self, x):
|
||||
return self.layer(x)
|
||||
|
||||
model = MyModel()
|
||||
model = DDP(model, device_ids=[torch.cuda.current_device()])
|
||||
model.register_comm_hook(state=None, hook=default_hooks.fp16_compress_hook)
|
||||
|
||||
# Training loop
|
||||
for data, targets in data_loader:
|
||||
outputs = model(data)
|
||||
loss = criterion(outputs, targets)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="Accelerate">
|
||||
|
||||
```python
|
||||
from accelerate import Accelerator, DDPCommunicationHookType, DistributedDataParallelKwargs
|
||||
import torch
|
||||
|
||||
class MyModel(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.layer = torch.nn.Linear(10, 10)
|
||||
|
||||
def forward(self, x):
|
||||
return self.layer(x)
|
||||
|
||||
# DDP Communication Hook setup
|
||||
ddp_kwargs = DistributedDataParallelKwargs(comm_hook=DDPCommunicationHookType.FP16)
|
||||
accelerator = Accelerator(kwargs_handlers=[ddp_kwargs])
|
||||
|
||||
model = MyModel()
|
||||
optimizer = torch.optim.Adam(model.parameters())
|
||||
data_loader = DataLoader(dataset, batch_size=16)
|
||||
|
||||
model, optimizer, data_loader = accelerator.prepare(model, optimizer, data_loader)
|
||||
|
||||
# Training loop
|
||||
for data, targets in data_loader:
|
||||
outputs = model(data)
|
||||
loss = criterion(outputs, targets)
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
### BF16 Compression Hook
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
BF16 Compression Hook API is experimental, and it requires NCCL version later than 2.9.6.
|
||||
|
||||
</Tip>
|
||||
|
||||
<hfoptions id="bf16">
|
||||
<hfoption id="PyTorch">
|
||||
|
||||
```python
|
||||
import torch
|
||||
from torch.nn.parallel import DistributedDataParallel as DDP
|
||||
from torch.distributed.algorithms.ddp_comm_hooks import default_hooks
|
||||
|
||||
class MyModel(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.layer = torch.nn.Linear(10, 10)
|
||||
|
||||
def forward(self, x):
|
||||
return self.layer(x)
|
||||
|
||||
model = MyModel()
|
||||
model = DDP(model, device_ids=[torch.cuda.current_device()])
|
||||
model.register_comm_hook(state=None, hook=default_hooks.bf16_compress_hook)
|
||||
|
||||
# Training loop
|
||||
for data, targets in data_loader:
|
||||
outputs = model(data)
|
||||
loss = criterion(outputs, targets)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="Accelerate">
|
||||
|
||||
```python
|
||||
from accelerate import Accelerator, DDPCommunicationHookType, DistributedDataParallelKwargs
|
||||
import torch
|
||||
|
||||
class MyModel(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.layer = torch.nn.Linear(10, 10)
|
||||
|
||||
def forward(self, x):
|
||||
return self.layer(x)
|
||||
|
||||
# DDP Communication Hook setup
|
||||
ddp_kwargs = DistributedDataParallelKwargs(comm_hook=DDPCommunicationHookType.BF16)
|
||||
accelerator = Accelerator(kwargs_handlers=[ddp_kwargs])
|
||||
|
||||
model = MyModel()
|
||||
optimizer = torch.optim.Adam(model.parameters())
|
||||
data_loader = DataLoader(dataset, batch_size=16)
|
||||
|
||||
model, optimizer, data_loader = accelerator.prepare(model, optimizer, data_loader)
|
||||
|
||||
# Training loop
|
||||
for data, targets in data_loader:
|
||||
outputs = model(data)
|
||||
loss = criterion(outputs, targets)
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
### PowerSGD Hook
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
PowerSGD typically requires extra memory of the same size as the model’s gradients to enable error feedback, which can compensate for biased compressed communication and improve accuracy.
|
||||
|
||||
</Tip>
|
||||
|
||||
<hfoptions id="powerSGD">
|
||||
<hfoption id="PyTorch">
|
||||
|
||||
```python
|
||||
import torch
|
||||
from torch.nn.parallel import DistributedDataParallel as DDP
|
||||
from torch.distributed.algorithms.ddp_comm_hooks import powerSGD_hook
|
||||
|
||||
class MyModel(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.layer = torch.nn.Linear(10, 10)
|
||||
|
||||
def forward(self, x):
|
||||
return self.layer(x)
|
||||
|
||||
model = MyModel()
|
||||
model = DDP(model, device_ids=[torch.cuda.current_device()])
|
||||
state = powerSGD_hook.PowerSGDState(process_group=None)
|
||||
model.register_comm_hook(state=state, hook=powerSGD_hook.powerSGD_hook)
|
||||
|
||||
# Training loop
|
||||
for data, targets in data_loader:
|
||||
outputs = model(data)
|
||||
loss = criterion(outputs, targets)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="Accelerate">
|
||||
|
||||
```python
|
||||
from accelerate import Accelerator, DDPCommunicationHookType, DistributedDataParallelKwargs
|
||||
import torch
|
||||
|
||||
class MyModel(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.layer = torch.nn.Linear(10, 10)
|
||||
|
||||
def forward(self, x):
|
||||
return self.layer(x)
|
||||
|
||||
# DDP Communication Hook setup
|
||||
ddp_kwargs = DistributedDataParallelKwargs(comm_hook=DDPCommunicationHookType.POWER_SGD)
|
||||
accelerator = Accelerator(kwargs_handlers=[ddp_kwargs])
|
||||
|
||||
model = MyModel()
|
||||
optimizer = torch.optim.Adam(model.parameters())
|
||||
data_loader = DataLoader(dataset, batch_size=16)
|
||||
|
||||
model, optimizer, data_loader = accelerator.prepare(model, optimizer, data_loader)
|
||||
|
||||
# Training loop
|
||||
for data, targets in data_loader:
|
||||
outputs = model(data)
|
||||
loss = criterion(outputs, targets)
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
## DDP Communication Hooks utilities
|
||||
|
||||
There are two additional utilities for supporting optional functionalities with the communication hooks.
|
||||
|
||||
### comm_wrapper
|
||||
|
||||
`comm_wrapper` is an option to wrap a communication hook with additional functionality. For example, it can be used to combine FP16 compression with other communication strategies. Currently supported wrappers are `no`, `fp16`, and `bf16`.
|
||||
|
||||
```python
|
||||
from accelerate import Accelerator, DDPCommunicationHookType, DistributedDataParallelKwargs
|
||||
import torch
|
||||
|
||||
class MyModel(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.layer = torch.nn.Linear(10, 10)
|
||||
|
||||
def forward(self, x):
|
||||
return self.layer(x)
|
||||
|
||||
# DDP Communication Hook setup
|
||||
ddp_kwargs = DistributedDataParallelKwargs(
|
||||
comm_hook=DDPCommunicationHookType.POWER_SGD,
|
||||
comm_wrapper=DDPCommunicationHookType.FP16
|
||||
)
|
||||
accelerator = Accelerator(kwargs_handlers=[ddp_kwargs])
|
||||
|
||||
model = MyModel()
|
||||
optimizer = torch.optim.Adam(model.parameters())
|
||||
data_loader = DataLoader(dataset, batch_size=16)
|
||||
|
||||
model, optimizer, data_loader = accelerator.prepare(model, optimizer, data_loader)
|
||||
|
||||
# Training loop
|
||||
for data, targets in data_loader:
|
||||
outputs = model(data)
|
||||
loss = criterion(outputs, targets)
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
```
|
||||
|
||||
### comm_state_option
|
||||
|
||||
`comm_state_option` allows you to pass additional state information required by certain communication hooks. This is particularly useful for stateful hooks like `PowerSGD`, which require maintaining hyperparameters and internal states across training steps. Below is an example showcasing the use of `comm_state_option` with the `PowerSGD` hook.
|
||||
|
||||
```python
|
||||
from accelerate import Accelerator, DDPCommunicationHookType, DistributedDataParallelKwargs
|
||||
import torch
|
||||
|
||||
class MyModel(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.layer = torch.nn.Linear(10, 10)
|
||||
|
||||
def forward(self, x):
|
||||
return self.layer(x)
|
||||
|
||||
# DDP Communication Hook setup
|
||||
ddp_kwargs = DistributedDataParallelKwargs(
|
||||
comm_hook=DDPCommunicationHookType.POWER_SGD,
|
||||
comm_state_option={"matrix_approximation_rank": 2}
|
||||
)
|
||||
accelerator = Accelerator(kwargs_handlers=[ddp_kwargs])
|
||||
|
||||
model = MyModel()
|
||||
optimizer = torch.optim.Adam(model.parameters())
|
||||
data_loader = DataLoader(dataset, batch_size=16)
|
||||
|
||||
model, optimizer, data_loader = accelerator.prepare(model, optimizer, data_loader)
|
||||
|
||||
# Training loop
|
||||
for data, targets in data_loader:
|
||||
outputs = model(data)
|
||||
loss = criterion(outputs, targets)
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
```
|
||||
|
||||
For more advanced usage and additional hooks, refer to the [PyTorch DDP Communication Hooks documentation](https://pytorch.org/docs/stable/ddp_comm_hooks.html).
|
||||
@ -9,7 +9,7 @@ Unless required by applicable law or agreed to in writing, software distributed
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
⚠️ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
@ -157,10 +157,18 @@ Currently, `Accelerate` supports following config through the CLI:
|
||||
`gradient_accumulation_steps`: Number of training steps to accumulate gradients before averaging and applying them.
|
||||
`gradient_clipping`: Enable gradient clipping with value.
|
||||
`offload_optimizer_device`: [none] Disable optimizer offloading, [cpu] offload optimizer to CPU, [nvme] offload optimizer to NVMe SSD. Only applicable with ZeRO >= Stage-2.
|
||||
`offload_optimizer_nvme_path`: Decides Nvme Path to offload optimizer states. If unspecified, will default to 'none'.
|
||||
`offload_param_device`: [none] Disable parameter offloading, [cpu] offload parameters to CPU, [nvme] offload parameters to NVMe SSD. Only applicable with ZeRO Stage-3.
|
||||
`offload_param_nvme_path`: Decides Nvme Path to offload parameters. If unspecified, will default to 'none'.
|
||||
`zero3_init_flag`: Decides whether to enable `deepspeed.zero.Init` for constructing massive models. Only applicable with ZeRO Stage-3.
|
||||
`zero3_save_16bit_model`: Decides whether to save 16-bit model weights when using ZeRO Stage-3.
|
||||
`mixed_precision`: `no` for FP32 training, `fp16` for FP16 mixed-precision training and `bf16` for BF16 mixed-precision training.
|
||||
`deepspeed_moe_layer_cls_names`: Comma-separated list of transformer Mixture-of-Experts (MoE) layer class names (case-sensitive) to wrap ,e.g, `MixtralSparseMoeBlock`, `Qwen2MoeSparseMoeBlock`, `JetMoEAttention,JetMoEBlock` ...
|
||||
`deepspeed_hostfile`: DeepSpeed hostfile for configuring multi-node compute resources.
|
||||
`deepspeed_exclusion_filter`: DeepSpeed exclusion filter string when using mutli-node setup.
|
||||
`deepspeed_inclusion_filter`: DeepSpeed inclusion filter string when using mutli-node setup.
|
||||
`deepspeed_multinode_launcher`: DeepSpeed multi-node launcher to use. If unspecified, will default to `pdsh`.
|
||||
`deepspeed_config_file`: path to the DeepSpeed config file in `json` format. See the next section for more details on this.
|
||||
```
|
||||
To be able to tweak more options, you will need to use a DeepSpeed config file.
|
||||
|
||||
@ -353,7 +361,7 @@ accelerate launch examples/by_feature/deepspeed_with_config_support.py \
|
||||
```
|
||||
|
||||
**ZeRO++ Config Example**
|
||||
You can use the the features of ZeRO++ by using the appropriate config parameters. Note that ZeRO++ is an extension for ZeRO Stage 3. Here is how the config file can be modified, from [DeepSpeed's ZeRO++ tutorial](https://www.deepspeed.ai/tutorials/zeropp/):
|
||||
You can use the features of ZeRO++ by using the appropriate config parameters. Note that ZeRO++ is an extension for ZeRO Stage 3. Here is how the config file can be modified, from [DeepSpeed's ZeRO++ tutorial](https://www.deepspeed.ai/tutorials/zeropp/):
|
||||
|
||||
```json
|
||||
{
|
||||
@ -425,7 +433,7 @@ Only the `auto` fields specified in above examples are handled by `prepare` meth
|
||||
The `auto` values are calculated as:
|
||||
|
||||
- `reduce_bucket_size`: `hidden_size * hidden_size`
|
||||
- `stage3_prefetch_bucket_size`: `0.9 * hidden_size * hidden_size`
|
||||
- `stage3_prefetch_bucket_size`: `int(0.9 * hidden_size * hidden_size)`
|
||||
- `stage3_param_persistence_threshold`: `10 * hidden_size`
|
||||
|
||||
For the `auto` feature to work for these 3 config entries - Accelerate will use `model.config.hidden_size` or `max(model.config.hidden_sizes)` as `hidden_size`. If neither of these is available, the launching will fail and you will have to set these 3 config entries manually. Remember the first 2 config entries are the communication buffers - the larger they are the more efficient the comms will be, and the larger they are the more GPU memory they will consume, so it's a tunable performance trade-off.
|
||||
@ -519,7 +527,7 @@ ValueError: When using `deepspeed_config_file`, the following accelerate config
|
||||
['gradient_accumulation_steps', 'gradient_clipping', 'zero_stage', 'offload_optimizer_device', 'offload_param_device',
|
||||
'zero3_save_16bit_model', 'mixed_precision'].
|
||||
Please specify them appropriately in the DeepSpeed config file.
|
||||
If you are using an accelerate config file, remove others config variables mentioned in the above specified list.
|
||||
If you are using an accelerate config file, remove other config variables mentioned in the above specified list.
|
||||
The easiest method is to create a new config following the questionnaire via `accelerate config`.
|
||||
It will only ask for the necessary config variables when using `deepspeed_config_file`.
|
||||
```
|
||||
@ -721,3 +729,10 @@ Papers:
|
||||
|
||||
Finally, please, remember that 🤗 `Accelerate` only integrates DeepSpeed, therefore if you
|
||||
have any problems or questions with regards to DeepSpeed usage, please, file an issue with [DeepSpeed GitHub](https://github.com/microsoft/DeepSpeed/issues).
|
||||
|
||||
|
||||
<Tip>
|
||||
|
||||
For those interested in the similarities and differences between FSDP and DeepSpeed, please check out the [concept guide here](../concept_guides/fsdp_and_deepspeed.md)!
|
||||
|
||||
</Tip>
|
||||
@ -140,6 +140,8 @@ with distributed_state.split_between_processes(["a dog", "a cat", "a chicken"],
|
||||
On the first GPU, the prompts will be `["a dog", "a cat"]`, and on the second GPU it will be `["a chicken", "a chicken"]`.
|
||||
Make sure to drop the final sample, as it will be a duplicate of the previous one.
|
||||
|
||||
You can find more complex examples [here](https://github.com/huggingface/accelerate/tree/main/examples/inference/distributed) such as how to use it with LLMs.
|
||||
|
||||
## Memory-efficient pipeline parallelism (experimental)
|
||||
|
||||
This next part will discuss using *pipeline parallelism*. This is an **experimental** API utilizing the [PiPPy library by PyTorch](https://github.com/pytorch/PiPPy/) as a native solution.
|
||||
@ -232,4 +234,4 @@ if PartialState().is_last_process:
|
||||
|
||||
</Tip>
|
||||
|
||||
And that's it! To explore more, please check out the inference examples in the [Accelerate repo](https://github.com/huggingface/accelerate/tree/main/examples/inference) and our [documentation](../package_reference/inference) as we work to improving this integration.
|
||||
And that's it! To explore more, please check out the inference examples in the [Accelerate repo](https://github.com/huggingface/accelerate/tree/main/examples/inference/pippy) and our [documentation](../package_reference/inference) as we work to improving this integration.
|
||||
|
||||
@ -161,6 +161,22 @@ When using transformers `save_pretrained`, pass `state_dict=accelerator.get_stat
|
||||
|
||||
You can then pass `state` into the `save_pretrained` method. There are several modes for `StateDictType` and `FullStateDictConfig` that you can use to control the behavior of `state_dict`. For more information, see the [PyTorch documentation](https://pytorch.org/docs/stable/fsdp.html).
|
||||
|
||||
If you choose to use `StateDictType.SHARDED_STATE_DICT`, the weights of the model during `Accelerator.save_state` will be split into `n` files for each sub-split on the model. To merge them back into
|
||||
a single dictionary to load back into the model later after training you can use the `merge_weights` utility:
|
||||
|
||||
```py
|
||||
from accelerate.utils import merge_fsdp_weights
|
||||
|
||||
# Our weights are saved usually in a `pytorch_model_fsdp_{model_number}` folder
|
||||
merge_fsdp_weights("pytorch_model_fsdp_0", "output_path", safe_serialization=True)
|
||||
```
|
||||
The final output will then either be saved to `model.safetensors` or `pytorch_model.bin` (if `safe_serialization=False` is passed).
|
||||
|
||||
This can also be called using the CLI:
|
||||
```bash
|
||||
accelerate merge-weights pytorch_model_fsdp_0/ output_path
|
||||
```
|
||||
|
||||
|
||||
## Mapping between FSDP sharding strategies and DeepSpeed ZeRO Stages
|
||||
* `FULL_SHARD` maps to the DeepSpeed `ZeRO Stage-3`. Shards optimizer states, gradients and parameters.
|
||||
@ -175,3 +191,10 @@ You can then pass `state` into the `save_pretrained` method. There are several
|
||||
|
||||
For more control, users can leverage the `FullyShardedDataParallelPlugin`. After creating an instance of this class, users can pass it to the Accelerator class instantiation.
|
||||
For more information on these options, please refer to the PyTorch [FullyShardedDataParallel](https://github.com/pytorch/pytorch/blob/0df2e863fbd5993a7b9e652910792bd21a516ff3/torch/distributed/fsdp/fully_sharded_data_parallel.py#L236) code.
|
||||
|
||||
|
||||
<Tip>
|
||||
|
||||
For those interested in the similarities and differences between FSDP and DeepSpeed, please check out the [concept guide here](../concept_guides/fsdp_and_deepspeed.md)!
|
||||
|
||||
</Tip>
|
||||
@ -115,8 +115,11 @@ What is the IP address of the machine that will host the main process? 36.112.23
|
||||
What is the port you will use to communicate with the main process? 29500
|
||||
Are all the machines on the same local network? Answer `no` if nodes are on the cloud and/or on different network hosts [YES/no]: yes
|
||||
Do you want to use Intel PyTorch Extension (IPEX) to speed up training on CPU? [yes/NO]:yes
|
||||
Do you want accelerate to launch mpirun? [yes/NO]: yes
|
||||
Please enter the path to the hostfile to use with mpirun [~/hostfile]: ~/hostfile
|
||||
Enter the number of oneCCL worker threads [1]: 1
|
||||
Do you wish to optimize your script with torch dynamo?[yes/NO]:NO
|
||||
How many CPU(s) should be used for distributed training? [1]:16
|
||||
How many processes should be used for distributed training? [1]:16
|
||||
-----------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
Do you wish to use FP16 or BF16 (mixed precision)?
|
||||
bf16
|
||||
@ -135,6 +138,9 @@ main_process_ip: 36.112.23.24
|
||||
main_process_port: 29500
|
||||
main_training_function: main
|
||||
mixed_precision: bf16
|
||||
mpirun_config:
|
||||
mpirun_ccl: '1'
|
||||
mpirun_hostfile: /home/user/hostfile
|
||||
num_machines: 4
|
||||
num_processes: 16
|
||||
rdzv_backend: static
|
||||
@ -148,6 +154,7 @@ use_cpu: true
|
||||
Set following env and using intel MPI to launch the training
|
||||
|
||||
In node0, you need to create a configuration file which contains the IP addresses of each node (for example hostfile) and pass that configuration file path as an argument.
|
||||
If you selected to have Accelerate launch `mpirun`, ensure that the location of your hostfile matches the path in the config.
|
||||
```bash
|
||||
$ cat hostfile
|
||||
xxx.xxx.xxx.xxx #node0 ip
|
||||
@ -155,7 +162,18 @@ xxx.xxx.xxx.xxx #node1 ip
|
||||
xxx.xxx.xxx.xxx #node2 ip
|
||||
xxx.xxx.xxx.xxx #node3 ip
|
||||
```
|
||||
Now, run the following command in node0 and **16DDP** will be enabled in node0,node1,node2,node3 with BF16 mixed precision:
|
||||
When Accelerate is launching `mpirun`, source the oneCCL bindings setvars.sh to get your Intel MPI environment, and then
|
||||
run your script using `accelerate launch`. Note that the python script and environment needs to exist on all of the
|
||||
machines being used for multi-CPU training.
|
||||
```bash
|
||||
oneccl_bindings_for_pytorch_path=$(python -c "from oneccl_bindings_for_pytorch import cwd; print(cwd)")
|
||||
source $oneccl_bindings_for_pytorch_path/env/setvars.sh
|
||||
|
||||
accelerate launch examples/nlp_example.py
|
||||
```
|
||||
Otherwise, if you selected not to have Accelerate launch `mpirun`, run the following command in node0 and **16DDP** will
|
||||
be enabled in node0,node1,node2,node3 with BF16 mixed precision. When using this method, the python script, python
|
||||
environment, and accelerate config file need to be present on all of the machines used for multi-CPU training.
|
||||
```bash
|
||||
oneccl_bindings_for_pytorch_path=$(python -c "from oneccl_bindings_for_pytorch import cwd; print(cwd)")
|
||||
source $oneccl_bindings_for_pytorch_path/env/setvars.sh
|
||||
|
||||
@ -88,7 +88,7 @@ achieved by adding one `with LocalSGD` statement and one call `local_sgd.step()`
|
||||
+ local_sgd.step()
|
||||
```
|
||||
|
||||
Under the hood, the Local SGD code **disables** automatic gradient synchornization (but accumulation still works as expected!). Instead it averages model parameters every `local_sgd_steps` steps (as well as in the end of the training loop).
|
||||
Under the hood, the Local SGD code **disables** automatic gradient synchronization (but accumulation still works as expected!). Instead it averages model parameters every `local_sgd_steps` steps (as well as at the end of the training loop).
|
||||
|
||||
## Limitations
|
||||
|
||||
|
||||
@ -57,7 +57,7 @@ Of the two, `MS-AMP` is traditionally the easier one to configure as there is on
|
||||
Currently two levels of optimization are supported in the 🤗 Accelerate integration, `"O1"` and `"O2"` (using the letter 'o', not zero).
|
||||
|
||||
* `"O1"` will cast the weight gradients and `all_reduce` communications to happen in 8-bit, while the rest are done in 16 bit. This reduces the general GPU memory usage and speeds up communication bandwidths.
|
||||
* `"O2"` will also cast first-order optimizer states into 8 bit, while the second order states are in FP16. (Currently just the `Adam` optimizer is supported). This tries it's best to minimize final accuracy degradation and will save the highest potential memory.
|
||||
* `"O2"` will also cast first-order optimizer states into 8 bit, while the second order states are in FP16. (Currently just the `Adam` optimizer is supported). This tries its best to minimize final accuracy degradation and will save the highest potential memory.
|
||||
|
||||
To specify an optimization level, pass it to the `FP8KwargsHandler` by setting the `optimization_level` argument:
|
||||
|
||||
@ -70,7 +70,7 @@ accelerator = Accelerator(mixed_precision="fp8", kwarg_handlers=kwargs)
|
||||
|
||||
## Configuring TransformersEngine
|
||||
|
||||
TransformersEngine has much more available for customizing how and what FP8 calculations are performed. A full list of supported arguments and what they mean are available in [NVIDIA's documentation](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html), however they are restated as part of [`FP8KwargsHandler`]'s docstring for your convience.
|
||||
TransformersEngine has much more available for customizing how and what FP8 calculations are performed. A full list of supported arguments and what they mean are available in [NVIDIA's documentation](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html), however they are restated as part of [`FP8KwargsHandler`]'s docstring for your convenience.
|
||||
|
||||
🤗 Accelerate tries to set sensible defaults, but exploring and tweaking the various parameters yourself can lead to better performance potentially.
|
||||
|
||||
@ -83,10 +83,10 @@ kwargs = [FP8RecipeKwargs(backend="te", ...)]
|
||||
accelerator = Accelerator(mixed_precision="fp8", kwarg_handlers=kwargs)
|
||||
```
|
||||
|
||||
## Futher Reading
|
||||
## Further Reading
|
||||
|
||||
To learn more about training in FP8 please check out the following resources:
|
||||
|
||||
* [Our concept guide](../concept_guides/low_precision_training.md) detailing into more about both TransformersEngine and MS-AMP
|
||||
* [The `transformers-engine` documentation](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html)
|
||||
* [The `MS-AMP` documentation](https://azure.github.io/MS-AMP/docs/)
|
||||
* [The `MS-AMP` documentation](https://azure.github.io/MS-AMP/docs/)
|
||||
|
||||
@ -9,7 +9,7 @@ Unless required by applicable law or agreed to in writing, software distributed
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
⚠️ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
@ -107,7 +107,10 @@ cd ..
|
||||
4. Installing Megatron-LM
|
||||
|
||||
```
|
||||
pip install git+https://github.com/huggingface/Megatron-LM.git
|
||||
git clone https://github.com/NVIDIA/Megatron-LM.git
|
||||
cd Megatron-LM
|
||||
git checkout core_r0.5.0
|
||||
pip install --no-use-pep517 -e .
|
||||
```
|
||||
|
||||
## Accelerate Megatron-LM Plugin
|
||||
@ -542,7 +545,7 @@ megatron_lm_plugin = MegatronLMPlugin(other_megatron_args=other_megatron_args)
|
||||
This covers Decoder only, Encode only and Encoder-Decoder model classes.
|
||||
|
||||
2. Only loss is returned from model forward pass as
|
||||
there is quite complex interplay of pipeline, tensor and data parallelsim behind the scenes.
|
||||
there is quite complex interplay of pipeline, tensor and data parallelism behind the scenes.
|
||||
The `model(**batch_data)` call return loss(es) averaged across the data parallel ranks.
|
||||
This is fine for most cases wherein pre-training jobs are run using Megatron-LM features and
|
||||
you can easily compute the `perplexity` using the loss.
|
||||
@ -580,4 +583,4 @@ b. Megatron-LM [GPTModel](https://github.com/NVIDIA/Megatron-LM/blob/main/megatr
|
||||
c. Megatron-LM [T5Model](https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/t5_model.py) :
|
||||
🤗 transformers models with `t5` in config's model type, e.g.,
|
||||
[T5](https://huggingface.co/docs/transformers/model_doc/t5) and
|
||||
[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)
|
||||
[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)
|
||||
|
||||
@ -51,7 +51,7 @@ Below are a few gradio demos related to what was described above. The first is t
|
||||
></iframe>
|
||||
</div>
|
||||
|
||||
A community member has taken the idea and expended it further, allowing you to filter models directly and see if you can run a particular LLM given GPU constraints and LoRA configurations. To play with it, see [here](https://huggingface.co/spaces/Vokturz/can-it-run-llm) for more details.
|
||||
A community member has taken the idea and expanded it further, allowing you to filter models directly and see if you can run a particular LLM given GPU constraints and LoRA configurations. To play with it, see [here](https://huggingface.co/spaces/Vokturz/can-it-run-llm) for more details.
|
||||
|
||||
## The Command
|
||||
|
||||
@ -134,4 +134,4 @@ This calculator will tell you how much memory is needed to purely load the model
|
||||
This calculation is accurate within a few % of the actual value, so it is a very good view of just how much memory it will take. For instance loading `bert-base-cased` actually takes `413.68 MB` when loaded on CUDA in full precision, and the calculator estimates `413.18 MB`.
|
||||
|
||||
When performing inference you can expect to add up to an additional 20% as found by [EleutherAI](https://blog.eleuther.ai/transformer-math/). We'll be conducting research into finding a more accurate estimate to these values, and will update
|
||||
this calculator once done.
|
||||
this calculator once done.
|
||||
|
||||
334
docs/source/usage_guides/profiler.md
Normal file
334
docs/source/usage_guides/profiler.md
Normal file
@ -0,0 +1,334 @@
|
||||
<!--
|
||||
Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Profiler
|
||||
|
||||
Profiler is a tool that allows the collection of performance metrics during training and inference. Profiler’s context manager API can be used to better understand what model operators are the most expensive, examine their input shapes and stack traces, study device kernel activity, and visualize the execution trace. It provides insights into the performance of your model, allowing you to optimize and improve it.
|
||||
|
||||
This guide explains how to use PyTorch Profiler to measure the time and memory consumption of the model’s operators and how to integrate this with 🤗 Accelerate. We will cover various use cases and provide examples for each.
|
||||
|
||||
## Using profiler to analyze execution time
|
||||
|
||||
Profiler allows one to check which operators were called during the execution of a code range wrapped with a profiler context manager.
|
||||
|
||||
Let’s see how we can use profiler to analyze the execution time:
|
||||
|
||||
<hfoptions id="cpu execution time">
|
||||
<hfoption id="PyTorch">
|
||||
|
||||
```python
|
||||
import torch
|
||||
import torchvision.models as models
|
||||
from torch.profiler import profile, record_function, ProfilerActivity
|
||||
|
||||
model = models.resnet18()
|
||||
inputs = torch.randn(5, 3, 224, 224)
|
||||
|
||||
with profile(activities=[ProfilerActivity.CPU], record_shapes=True) as prof:
|
||||
model(inputs)
|
||||
|
||||
print(prof.key_averages().table(sort_by="cpu_time_total", row_limit=10))
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="Accelerate">
|
||||
|
||||
```python
|
||||
from accelerate import Accelerator, ProfileKwargs
|
||||
import torch
|
||||
import torchvision.models as models
|
||||
|
||||
model = models.resnet18()
|
||||
inputs = torch.randn(5, 3, 224, 224)
|
||||
|
||||
profile_kwargs = ProfileKwargs(
|
||||
activities=["cpu"],
|
||||
record_shapes=True
|
||||
)
|
||||
|
||||
accelerator = Accelerator(cpu=True, kwargs_handlers=[profile_kwargs])
|
||||
model = accelerator.prepare(model)
|
||||
|
||||
with accelerator.profile() as prof:
|
||||
with torch.no_grad():
|
||||
model(inputs)
|
||||
|
||||
print(prof.key_averages().table(sort_by="cpu_time_total", row_limit=10))
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
The resulting table output (omitting some columns):
|
||||
|
||||
```
|
||||
--------------------------------- ------------ ------------ ------------ ------------
|
||||
Name Self CPU CPU total CPU time avg # of Calls
|
||||
--------------------------------- ------------ ------------ ------------ ------------
|
||||
aten::conv2d 171.000us 52.260ms 2.613ms 20
|
||||
aten::convolution 227.000us 52.089ms 2.604ms 20
|
||||
aten::_convolution 270.000us 51.862ms 2.593ms 20
|
||||
aten::mkldnn_convolution 51.273ms 51.592ms 2.580ms 20
|
||||
aten::batch_norm 118.000us 7.059ms 352.950us 20
|
||||
aten::_batch_norm_impl_index 315.000us 6.941ms 347.050us 20
|
||||
aten::native_batch_norm 6.305ms 6.599ms 329.950us 20
|
||||
aten::max_pool2d 40.000us 4.008ms 4.008ms 1
|
||||
aten::max_pool2d_with_indices 3.968ms 3.968ms 3.968ms 1
|
||||
aten::add_ 780.000us 780.000us 27.857us 28
|
||||
--------------------------------- ------------ ------------ ------------ ------------
|
||||
Self CPU time total: 67.016ms
|
||||
```
|
||||
|
||||
To get a finer granularity of results and include operator input shapes, pass `group_by_input_shape=True` (note: this requires running the profiler with `record_shapes=True`):
|
||||
|
||||
```python
|
||||
print(prof.key_averages(group_by_input_shape=True).table(sort_by="cpu_time_total", row_limit=10))
|
||||
```
|
||||
|
||||
## Using profiler to analyze memory consumption
|
||||
|
||||
Profiler can also show the amount of memory (used by the model’s tensors) that was allocated (or released) during the execution of the model’s operators. To enable memory profiling functionality pass `profile_memory=True`.
|
||||
|
||||
<hfoptions id="memory consumption">
|
||||
<hfoption id="PyTorch">
|
||||
|
||||
```python
|
||||
model = models.resnet18()
|
||||
inputs = torch.randn(5, 3, 224, 224)
|
||||
|
||||
with profile(activities=[ProfilerActivity.CPU],
|
||||
profile_memory=True, record_shapes=True) as prof:
|
||||
model(inputs)
|
||||
|
||||
print(prof.key_averages().table(sort_by="self_cpu_memory_usage", row_limit=10))
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="Accelerate">
|
||||
|
||||
```python
|
||||
model = models.resnet18()
|
||||
inputs = torch.randn(5, 3, 224, 224)
|
||||
|
||||
profile_kwargs = ProfileKwargs(
|
||||
activities=["cpu"],
|
||||
profile_memory=True,
|
||||
record_shapes=True
|
||||
)
|
||||
|
||||
accelerator = Accelerator(cpu=True, kwargs_handlers=[profile_kwargs])
|
||||
model = accelerator.prepare(model)
|
||||
|
||||
with accelerator.profile() as prof:
|
||||
model(inputs)
|
||||
|
||||
print(prof.key_averages().table(sort_by="self_cpu_memory_usage", row_limit=10))
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
The resulting table output (omitting some columns):
|
||||
|
||||
```
|
||||
--------------------------------- ------------ ------------ ------------
|
||||
Name CPU Mem Self CPU Mem # of Calls
|
||||
--------------------------------- ------------ ------------ ------------
|
||||
aten::empty 94.85 Mb 94.85 Mb 205
|
||||
aten::max_pool2d_with_indices 11.48 Mb 11.48 Mb 1
|
||||
aten::addmm 19.53 Kb 19.53 Kb 1
|
||||
aten::mean 10.00 Kb 10.00 Kb 1
|
||||
aten::empty_strided 492 b 492 b 5
|
||||
aten::cat 240 b 240 b 6
|
||||
aten::abs 480 b 240 b 4
|
||||
aten::masked_select 120 b 112 b 1
|
||||
aten::ne 61 b 53 b 3
|
||||
aten::eq 30 b 30 b 1
|
||||
--------------------------------- ------------ ------------ ------------
|
||||
Self CPU time total: 69.332ms
|
||||
```
|
||||
|
||||
|
||||
## Exporting chrome trace
|
||||
|
||||
You can examine the sequence of profiled operators and CUDA kernels in Chrome trace viewer (`chrome://tracing`):
|
||||
|
||||

|
||||
|
||||
<hfoptions id="exporting chrome trace">
|
||||
<hfoption id="PyTorch">
|
||||
|
||||
```python
|
||||
model = models.resnet18().cuda()
|
||||
inputs = torch.randn(5, 3, 224, 224).cuda()
|
||||
|
||||
with profile(activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA]) as prof:
|
||||
model(inputs)
|
||||
|
||||
prof.export_chrome_trace("trace.json")
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="Accelerate">
|
||||
|
||||
```python
|
||||
profile_kwargs = ProfileKwargs(
|
||||
activities=["cpu", "cuda"],
|
||||
output_trace_dir="trace"
|
||||
)
|
||||
|
||||
accelerator = Accelerator(kwargs_handlers=[profile_kwargs])
|
||||
model = accelerator.prepare(model)
|
||||
|
||||
with accelerator.profile() as prof:
|
||||
model(inputs)
|
||||
|
||||
# The trace will be saved to the specified directory
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
## Using Profiler to Analyze Long-Running Jobs
|
||||
|
||||
Profiler offers an additional API to handle long-running jobs (such as training loops). Tracing all of the execution can be slow and result in very large trace files. To avoid this, use optional arguments:
|
||||
|
||||
- `schedule_option`: Scheduling options allow you to control when profiling is active. This is useful for long-running jobs to avoid collecting too much data. Available keys are `wait`, `warmup`, `active`, `repeat` and `skip_first`. The profiler will skip the first `skip_first` steps, then wait for `wait` steps, then do the warmup for the next `warmup` steps, then do the active recording for the next `active` steps and then repeat the cycle starting with `wait` steps. The optional number of cycles is specified with the `repeat` parameter, the zero value means that the cycles will continue until the profiling is finished.
|
||||
- `on_trace_ready`: specifies a function that takes a reference to the profiler as an input and is called by the profiler each time the new trace is ready.
|
||||
|
||||
To illustrate how the API works, consider the following example:
|
||||
|
||||
<hfoptions id="custom handler">
|
||||
<hfoption id="PyTorch">
|
||||
|
||||
```python
|
||||
from torch.profiler import schedule
|
||||
|
||||
my_schedule = schedule(
|
||||
skip_first=10,
|
||||
wait=5,
|
||||
warmup=1,
|
||||
active=3,
|
||||
repeat=2
|
||||
)
|
||||
|
||||
def trace_handler(p):
|
||||
output = p.key_averages().table(sort_by="self_cuda_time_total", row_limit=10)
|
||||
print(output)
|
||||
p.export_chrome_trace("/tmp/trace_" + str(p.step_num) + ".json")
|
||||
|
||||
with profile(
|
||||
activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA],
|
||||
schedule=my_schedule,
|
||||
on_trace_ready=trace_handler
|
||||
) as p:
|
||||
for idx in range(8):
|
||||
model(inputs)
|
||||
p.step()
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="Accelerate">
|
||||
|
||||
```python
|
||||
def trace_handler(p):
|
||||
output = p.key_averages().table(sort_by="self_cuda_time_total", row_limit=10)
|
||||
print(output)
|
||||
p.export_chrome_trace("/tmp/trace_" + str(p.step_num) + ".json")
|
||||
|
||||
profile_kwargs = ProfileKwargs(
|
||||
activities=["cpu", "cuda"],
|
||||
schedule_option={"wait": 5, "warmup": 1, "active": 3, "repeat": 2, "skip_first": 10},
|
||||
on_trace_ready=trace_handler
|
||||
)
|
||||
|
||||
accelerator = Accelerator(kwargs_handlers=[profile_kwargs])
|
||||
model = accelerator.prepare(model)
|
||||
|
||||
with accelerator.profile() as prof:
|
||||
for idx in range(8):
|
||||
model(inputs)
|
||||
prof.step()
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
## FLOPS
|
||||
|
||||
Use formula to estimate the FLOPs (floating point operations) of specific operators (matrix multiplication and 2D convolution).
|
||||
|
||||
To measure floating-point operations (FLOPS):
|
||||
|
||||
<hfoptions id="FLOPS">
|
||||
<hfoption id="PyTorch">
|
||||
|
||||
```python
|
||||
with profile(
|
||||
activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA],
|
||||
with_flops=True
|
||||
) as prof:
|
||||
model(inputs)
|
||||
|
||||
print(prof.key_averages().table(sort_by="flops", row_limit=10))
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="Accelerate">
|
||||
|
||||
```python
|
||||
profile_kwargs = ProfileKwargs(
|
||||
with_flops=True
|
||||
)
|
||||
accelerator = Accelerator(kwargs_handlers=[profile_kwargs])
|
||||
|
||||
with accelerator.profile() as prof:
|
||||
model(inputs)
|
||||
|
||||
print(prof.key_averages().table(sort_by="flops", row_limit=10))
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
The resulting table output (omitting some columns):
|
||||
|
||||
```
|
||||
------------------------------------------------------- ------------ ------------ ------------
|
||||
Name Self CPU Self CUDA Total FLOPs
|
||||
------------------------------------------------------- ------------ ------------ ------------
|
||||
aten::conv2d 197.000us 0.000us 18135613440.000
|
||||
aten::addmm 103.000us 17.000us 5120000.000
|
||||
aten::mul 29.000us 2.000us 30.000
|
||||
aten::convolution 409.000us 0.000us --
|
||||
aten::_convolution 253.000us 0.000us --
|
||||
aten::cudnn_convolution 5.465ms 2.970ms --
|
||||
cudaEventRecord 138.000us 0.000us --
|
||||
cudaStreamIsCapturing 43.000us 0.000us --
|
||||
cudaStreamGetPriority 40.000us 0.000us --
|
||||
cudaDeviceGetStreamPriorityRange 10.000us 0.000us --
|
||||
------------------------------------------------------- ------------ ------------ ------------
|
||||
Self CPU time total: 21.938ms
|
||||
Self CUDA time total: 4.165ms
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Conclusion and Further Information
|
||||
|
||||
PyTorch Profiler is a powerful tool for analyzing the performance of your models. By integrating it with 🤗 Accelerate, you can easily profile your models and gain insights into their performance, helping you to optimize and improve them.
|
||||
|
||||
For more detailed information, refer to the [PyTorch Profiler documentation](https://pytorch.org/docs/stable/profiler.html).
|
||||
@ -198,7 +198,7 @@ achieve the same outcome with:
|
||||
|
||||
```python
|
||||
wandb_tracker = accelerator.get_tracker("wandb", unwrap=True)
|
||||
with accelerator.on_main_process:
|
||||
if accelerator.is_main_process:
|
||||
wandb_tracker.log_artifact(some_artifact_to_log)
|
||||
```
|
||||
|
||||
|
||||
@ -28,6 +28,7 @@ pip install datasets evaluate transformers
|
||||
|
||||
The same script can be run in any of the following configurations:
|
||||
- single CPU or single GPU
|
||||
- multi CPUs
|
||||
- multi GPUs (using PyTorch distributed mode)
|
||||
- (multi) TPUs
|
||||
- fp16 (mixed-precision) or fp32 (normal precision)
|
||||
@ -58,6 +59,18 @@ To run it in each of these various modes, use the following commands:
|
||||
* from any server with Accelerate launcher
|
||||
```bash
|
||||
accelerate launch --mixed_precision fp16 ./nlp_example.py
|
||||
- multi CPUs (requires Open MPI, Intel MPI, or MVAPICH)
|
||||
* With Accelerate config and launcher, execute the following from node 0:
|
||||
```bash
|
||||
accelerate config # Select to have accelerate launch mpirun
|
||||
accelerate launch ./nlp_example.py # This will run the script on each server
|
||||
```
|
||||
* With Intel MPI:
|
||||
```bash
|
||||
export CCL_WORKER_COUNT=1
|
||||
export MASTER_ADDR=xxx.xxx.xxx.xxx #node0 ip
|
||||
mpirun -f hostfile -n 16 -ppn 4 python ./nlp_example.py
|
||||
```
|
||||
- multi GPUs (using PyTorch distributed mode)
|
||||
* With Accelerate config and launcher
|
||||
```bash
|
||||
@ -100,6 +113,7 @@ The [cv_example.py](./cv_example.py) script is a simple example to fine-tune a R
|
||||
|
||||
The same script can be run in any of the following configurations:
|
||||
- single CPU or single GPU
|
||||
- multi CPUs
|
||||
- multi GPUs (using PyTorch distributed mode)
|
||||
- (multi) TPUs
|
||||
- fp16 (mixed-precision) or fp32 (normal precision)
|
||||
@ -143,6 +157,18 @@ To run it in each of these various modes, use the following commands:
|
||||
* from any server with Accelerate launcher
|
||||
```bash
|
||||
accelerate launch --mixed_precison fp16 ./cv_example.py --data_dir path_to_data
|
||||
- multi CPUs (requires Open MPI, Intel MPI, or MVAPICH)
|
||||
* With Accelerate config and launcher, run the following from node 0:
|
||||
```bash
|
||||
accelerate config --config_file config.yaml # Select to have accelerate launch mpirun
|
||||
accelerate launch ./cv_example.py --data_dir path_to_data # This will run the script on each server
|
||||
```
|
||||
* With Intel MPI, execute mpirun from node 0:
|
||||
```bash
|
||||
export CCL_WORKER_COUNT=1
|
||||
export MASTER_ADDR=xxx.xxx.xxx.xxx #node0 ip
|
||||
mpirun -f hostfile -n 16 -ppn 4 python ./cv_example.py --data_dir path_to_data
|
||||
```
|
||||
- multi GPUs (using PyTorch distributed mode)
|
||||
* With Accelerate config and launcher
|
||||
```bash
|
||||
@ -207,6 +233,8 @@ In [/slurm/submit_multigpu.sh](./slurm/submit_multigpu.sh) the only parameter in
|
||||
|
||||
In [/slurm/submit_multinode.sh](./slurm/submit_multinode.sh) we must specify the number of nodes that will be part of the training (`--num_machines`), how many GPUs we will use in total (`--num_processes`), the [`backend`](https://pytorch.org/docs/stable/elastic/run.html#note-on-rendezvous-backend), `--main_process_ip` which will be the address the master node and the `--main_process_port`.
|
||||
|
||||
In [/slurm/submit_multicpu.sh](./slurm/submit_multicpu.sh) we must specify the number of nodes that will be part of the training (`--num_machines`), how many CPU processes we will use in total (`--num_processes`), the [`backend`](https://pytorch.org/docs/stable/elastic/run.html#note-on-rendezvous-backend), `--main_process_ip` which will be the address the master node and the `--main_process_port`. `mpirun_hostfile` specifies to run the job using MPIRun.
|
||||
|
||||
In both scripts, we run `activateEnviroment.sh` at the beginning. This script should contain the necessary instructions to initialize the environment for execution. Below, we show an example that loads the necessary libraries ([Environment modules](https://github.com/cea-hpc/modules)), activates the Python environment, and sets up various environment variables, most of them to run the scripts in offline mode in case we don't have internet connection from the cluster.
|
||||
|
||||
```bash
|
||||
|
||||
@ -88,4 +88,34 @@ These arguments should be added at the end of any method for starting the python
|
||||
accelerate launch ./local_sgd.py --local_sgd_steps 4
|
||||
```
|
||||
|
||||
### DDP Communication Hook (`ddp_comm_hook.py`)
|
||||
|
||||
- Shows how to use DDP Communication Hooks to control and optimize gradient communication across workers in a DistributedDataParallel setup.
|
||||
- Arguments available:
|
||||
- `ddp_comm_hook`, the type of DDP communication hook to use. Choose between `no`, `fp16`, `bf16`, `power_sgd`, and `batched_power_sgd`.
|
||||
|
||||
These arguments should be added at the end of any method for starting the python script (such as `accelerate launch`, `python -m torch.distributed.run`), such as:
|
||||
|
||||
```bash
|
||||
accelerate launch ./ddp_comm_hook.py --mixed_precision fp16 --ddp_comm_hook power_sgd
|
||||
```
|
||||
|
||||
### Profiler (`profiler.py`)
|
||||
|
||||
- Shows how to use the profiling capabilities of `Accelerate` to profile PyTorch models during training.
|
||||
- Uses the `ProfileKwargs` handler to customize profiling options, including activities, scheduling, and additional profiling options.
|
||||
- Can generate and save profiling traces in JSON format for visualization in Chrome's tracing tool.
|
||||
|
||||
Arguments available:
|
||||
- `--record_shapes`: If passed, records shapes for profiling.
|
||||
- `--profile_memory`: If passed, profiles memory usage.
|
||||
- `--with_stack`: If passed, profiles stack traces.
|
||||
- `--with_flops`: If passed, profiles floating point operations (FLOPS).
|
||||
- `--output_trace_dir`: If specified, saves the profiling trace to the given dir in JSON format.
|
||||
- `--cpu`: If passed, trains on the CPU instead of GPU.
|
||||
|
||||
These arguments should be added at the end of any method for starting the Python script (such as `python`, `accelerate launch`, `python -m torchrun`), such as:
|
||||
|
||||
```bash
|
||||
accelerate launch ./profiler.py --record_shapes --profile_memory --with_flops --output_trace_dir "profiler"
|
||||
```
|
||||
|
||||
@ -248,7 +248,7 @@ def training_function(config, args):
|
||||
# Use accelerator.print to print only on the main process.
|
||||
test_predictions.append(torch.cat(fold_predictions, dim=0))
|
||||
# We now need to release all our memory and get rid of the current model, optimizer, etc
|
||||
accelerator.free_memory()
|
||||
model, optimizer = accelerator.free_memory(model, optimizer)
|
||||
# New Code #
|
||||
# Finally we check the accuracy of our folded results:
|
||||
test_references = torch.cat(test_references, dim=0)
|
||||
|
||||
231
examples/by_feature/ddp_comm_hook.py
Normal file
231
examples/by_feature/ddp_comm_hook.py
Normal file
@ -0,0 +1,231 @@
|
||||
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import argparse
|
||||
import os
|
||||
|
||||
import evaluate
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from accelerate.utils import DDPCommunicationHookType, DistributedDataParallelKwargs
|
||||
|
||||
|
||||
########################################################################
|
||||
# This is a fully working simple example to use Accelerate
|
||||
# and perform ddp communication hook
|
||||
#
|
||||
# This example trains a Bert base model on GLUE MRPC
|
||||
# in any of the following settings (with the same script):
|
||||
# - single CPU or single GPU
|
||||
# - multi GPUS (using PyTorch distributed mode)
|
||||
# - (multi) TPUs
|
||||
# - fp16 (mixed-precision) or fp32 (normal precision)
|
||||
#
|
||||
# To run it in each of these various modes, follow the instructions
|
||||
# in the readme for examples:
|
||||
# https://github.com/huggingface/accelerate/tree/main/examples
|
||||
#
|
||||
########################################################################
|
||||
|
||||
|
||||
MAX_GPU_BATCH_SIZE = 16
|
||||
EVAL_BATCH_SIZE = 32
|
||||
|
||||
|
||||
def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
"""
|
||||
Creates a set of `DataLoader`s for the `glue` dataset,
|
||||
using "bert-base-cased" as the tokenizer.
|
||||
|
||||
Args:
|
||||
accelerator (`Accelerator`):
|
||||
An `Accelerator` object
|
||||
batch_size (`int`, *optional*):
|
||||
The batch size for the train and validation DataLoaders.
|
||||
"""
|
||||
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
|
||||
datasets = load_dataset("glue", "mrpc")
|
||||
|
||||
def tokenize_function(examples):
|
||||
# max_length=None => use the model max length (it's actually the default)
|
||||
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
|
||||
return outputs
|
||||
|
||||
# Apply the method we just defined to all the examples in all the splits of the dataset
|
||||
# starting with the main process first:
|
||||
with accelerator.main_process_first():
|
||||
tokenized_datasets = datasets.map(
|
||||
tokenize_function,
|
||||
batched=True,
|
||||
remove_columns=["idx", "sentence1", "sentence2"],
|
||||
)
|
||||
|
||||
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
|
||||
# transformers library
|
||||
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
elif accelerator.mixed_precision != "no":
|
||||
pad_to_multiple_of = 8
|
||||
else:
|
||||
pad_to_multiple_of = None
|
||||
|
||||
return tokenizer.pad(
|
||||
examples,
|
||||
padding="longest",
|
||||
max_length=max_length,
|
||||
pad_to_multiple_of=pad_to_multiple_of,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
# Instantiate dataloaders.
|
||||
train_dataloader = DataLoader(
|
||||
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
|
||||
)
|
||||
eval_dataloader = DataLoader(
|
||||
tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
|
||||
)
|
||||
|
||||
return train_dataloader, eval_dataloader
|
||||
|
||||
|
||||
# For testing only
|
||||
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
|
||||
from accelerate.test_utils.training import mocked_dataloaders
|
||||
|
||||
get_dataloaders = mocked_dataloaders # noqa: F811
|
||||
|
||||
|
||||
def training_function(config, args):
|
||||
# For testing only
|
||||
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
|
||||
config["num_epochs"] = 2
|
||||
# New Code #
|
||||
ddp_comm_hook_type = DDPCommunicationHookType(args.ddp_comm_hook)
|
||||
ddp_comm_wrapper = DDPCommunicationHookType(args.ddp_comm_wrapper)
|
||||
ddp_kwargs = DistributedDataParallelKwargs(comm_hook=ddp_comm_hook_type, comm_wrapper=ddp_comm_wrapper)
|
||||
# Initialize accelerator
|
||||
accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision, kwargs_handlers=[ddp_kwargs])
|
||||
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
|
||||
lr = config["lr"]
|
||||
num_epochs = int(config["num_epochs"])
|
||||
seed = int(config["seed"])
|
||||
batch_size = int(config["batch_size"])
|
||||
|
||||
metric = evaluate.load("glue", "mrpc")
|
||||
|
||||
set_seed(seed)
|
||||
train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)
|
||||
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
|
||||
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True)
|
||||
|
||||
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
|
||||
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
|
||||
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
|
||||
model = model.to(accelerator.device)
|
||||
|
||||
# Instantiate optimizer
|
||||
optimizer = AdamW(params=model.parameters(), lr=lr)
|
||||
|
||||
# Instantiate scheduler
|
||||
lr_scheduler = get_linear_schedule_with_warmup(
|
||||
optimizer=optimizer,
|
||||
num_warmup_steps=100,
|
||||
num_training_steps=(len(train_dataloader) * num_epochs),
|
||||
)
|
||||
|
||||
# Prepare everything
|
||||
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
|
||||
# prepare method.
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
||||
)
|
||||
|
||||
# Now we train the model
|
||||
for epoch in range(num_epochs):
|
||||
model.train()
|
||||
for step, batch in enumerate(train_dataloader):
|
||||
# We could avoid this line since we set the accelerator with `device_placement=True`.
|
||||
batch.to(accelerator.device)
|
||||
# We use the new `accumulate` context manager to perform gradient accumulation
|
||||
with accelerator.accumulate(model):
|
||||
output = model(**batch)
|
||||
loss = output.loss
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
lr_scheduler.step()
|
||||
optimizer.zero_grad()
|
||||
|
||||
model.eval()
|
||||
for step, batch in enumerate(eval_dataloader):
|
||||
# We could avoid this line since we set the accelerator with `device_placement=True`.
|
||||
batch.to(accelerator.device)
|
||||
with torch.no_grad():
|
||||
outputs = model(**batch)
|
||||
predictions = outputs.logits.argmax(dim=-1)
|
||||
predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"]))
|
||||
metric.add_batch(
|
||||
predictions=predictions,
|
||||
references=references,
|
||||
)
|
||||
|
||||
eval_metric = metric.compute()
|
||||
# Use accelerator.print to print only on the main process.
|
||||
accelerator.print(f"epoch {epoch}:", eval_metric)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Simple example of training script.")
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
default=None,
|
||||
choices=["no", "fp16", "bf16", "fp8"],
|
||||
help="Whether to use mixed precision. Choose"
|
||||
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
||||
"and an Nvidia Ampere GPU.",
|
||||
)
|
||||
# New Code #
|
||||
parser.add_argument(
|
||||
"--ddp_comm_hook",
|
||||
type=str,
|
||||
default="no",
|
||||
choices=["no", "fp16", "bf16", "power_sgd", "batched_power_sgd"],
|
||||
help="DDP Communication hook to use. Choose between `no`, `fp16`, `bf16`, `power_sgd`, and `batched_power_sgd`.",
|
||||
)
|
||||
# New Code #
|
||||
parser.add_argument(
|
||||
"--ddp_comm_wrapper",
|
||||
type=str,
|
||||
default="no",
|
||||
choices=["no", "fp16", "bf16"],
|
||||
help="DDP Communication wrapper to use. Choose between `no`, `fp16`, and `bf16`.",
|
||||
)
|
||||
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.")
|
||||
args = parser.parse_args()
|
||||
config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
|
||||
training_function(config, args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -34,7 +34,7 @@ import datasets
|
||||
import torch
|
||||
import transformers
|
||||
from datasets import load_dataset
|
||||
from huggingface_hub import Repository
|
||||
from huggingface_hub import HfApi
|
||||
from torch.utils.data import DataLoader
|
||||
from tqdm.auto import tqdm
|
||||
from transformers import (
|
||||
@ -47,7 +47,6 @@ from transformers import (
|
||||
default_data_collator,
|
||||
get_scheduler,
|
||||
)
|
||||
from transformers.utils import get_full_repo_name
|
||||
from transformers.utils.versions import require_version
|
||||
|
||||
from accelerate import Accelerator, DistributedType
|
||||
@ -303,11 +302,13 @@ def main():
|
||||
# Handle the repository creation
|
||||
if accelerator.is_main_process:
|
||||
if args.push_to_hub:
|
||||
if args.hub_model_id is None:
|
||||
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
|
||||
else:
|
||||
repo_name = args.hub_model_id
|
||||
repo = Repository(args.output_dir, clone_from=repo_name)
|
||||
api = HfApi(token=args.hub_token)
|
||||
|
||||
# Create repo (repo_name from args or inferred)
|
||||
repo_name = args.hub_model_id
|
||||
if repo_name is None:
|
||||
repo_name = Path(args.output_dir).absolute().name
|
||||
repo_id = api.create_repo(repo_name, exist_ok=True).repo_id
|
||||
|
||||
with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
|
||||
if "step_*" not in gitignore:
|
||||
@ -707,7 +708,11 @@ def main():
|
||||
if accelerator.is_main_process:
|
||||
tokenizer.save_pretrained(args.output_dir)
|
||||
if args.push_to_hub:
|
||||
repo.push_to_hub(commit_message="End of training", auto_lfs_prune=True)
|
||||
api.upload_folder(
|
||||
repo_id=repo_id,
|
||||
folder_path=args.output_dir,
|
||||
commit_message="End of training",
|
||||
)
|
||||
|
||||
with open(os.path.join(args.output_dir, "all_results.json"), "w") as f:
|
||||
json.dump({"perplexity": perplexity, "eval_loss": eval_loss.item()}, f)
|
||||
|
||||
@ -34,7 +34,7 @@ import datasets
|
||||
import torch
|
||||
import transformers
|
||||
from datasets import load_dataset
|
||||
from huggingface_hub import Repository
|
||||
from huggingface_hub import HfApi
|
||||
from torch.utils.data import DataLoader
|
||||
from tqdm.auto import tqdm
|
||||
from transformers import (
|
||||
@ -47,7 +47,7 @@ from transformers import (
|
||||
default_data_collator,
|
||||
get_scheduler,
|
||||
)
|
||||
from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry
|
||||
from transformers.utils import check_min_version, send_example_telemetry
|
||||
from transformers.utils.versions import require_version
|
||||
|
||||
from accelerate import Accelerator, DistributedType
|
||||
@ -277,11 +277,13 @@ def main():
|
||||
# Handle the repository creation
|
||||
if accelerator.is_main_process:
|
||||
if args.push_to_hub:
|
||||
if args.hub_model_id is None:
|
||||
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
|
||||
else:
|
||||
repo_name = args.hub_model_id
|
||||
repo = Repository(args.output_dir, clone_from=repo_name)
|
||||
api = HfApi(token=args.hub_token)
|
||||
|
||||
# Create repo (repo_name from args or inferred)
|
||||
repo_name = args.hub_model_id
|
||||
if repo_name is None:
|
||||
repo_name = Path(args.output_dir).absolute().name
|
||||
repo_id = api.create_repo(repo_name, exist_ok=True).repo_id
|
||||
|
||||
with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
|
||||
if "step_*" not in gitignore:
|
||||
@ -661,8 +663,11 @@ def main():
|
||||
)
|
||||
if accelerator.is_main_process:
|
||||
tokenizer.save_pretrained(args.output_dir)
|
||||
repo.push_to_hub(
|
||||
commit_message=f"Training in progress epoch {epoch}", blocking=False, auto_lfs_prune=True
|
||||
api.upload_folder(
|
||||
repo_id=repo_id,
|
||||
folder_path=args.output_dir,
|
||||
commit_message=f"Training in progress epoch {epoch}",
|
||||
run_as_future=True,
|
||||
)
|
||||
|
||||
if args.checkpointing_steps == "epoch":
|
||||
@ -690,7 +695,11 @@ def main():
|
||||
if accelerator.is_main_process:
|
||||
tokenizer.save_pretrained(args.output_dir)
|
||||
if args.push_to_hub:
|
||||
repo.push_to_hub(commit_message="End of training", auto_lfs_prune=True)
|
||||
api.upload_folder(
|
||||
repo_id=repo_id,
|
||||
folder_path=args.output_dir,
|
||||
commit_message="End of training",
|
||||
)
|
||||
|
||||
with open(os.path.join(args.output_dir, "all_results.json"), "w") as f:
|
||||
json.dump({"perplexity": perplexity}, f)
|
||||
|
||||
254
examples/by_feature/profiler.py
Normal file
254
examples/by_feature/profiler.py
Normal file
@ -0,0 +1,254 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import argparse
|
||||
import os
|
||||
|
||||
import evaluate
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from accelerate.utils import ProfileKwargs
|
||||
|
||||
|
||||
########################################################################
|
||||
# This is a fully working simple example to use Accelerate
|
||||
# and perform profiling
|
||||
#
|
||||
# This example trains a Bert base model on GLUE MRPC
|
||||
# in any of the following settings (with the same script):
|
||||
# - single CPU or single GPU
|
||||
# - multi GPUS (using PyTorch distributed mode)
|
||||
# - (multi) TPUs
|
||||
# - fp16 (mixed-precision) or fp32 (normal precision)
|
||||
#
|
||||
# To run it in each of these various modes, follow the instructions
|
||||
# in the readme for examples:
|
||||
# https://github.com/huggingface/accelerate/tree/main/examples
|
||||
#
|
||||
########################################################################
|
||||
|
||||
|
||||
MAX_GPU_BATCH_SIZE = 16
|
||||
EVAL_BATCH_SIZE = 32
|
||||
|
||||
|
||||
def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
"""
|
||||
Creates a set of `DataLoader`s for the `glue` dataset,
|
||||
using "bert-base-cased" as the tokenizer.
|
||||
|
||||
Args:
|
||||
accelerator (`Accelerator`):
|
||||
An `Accelerator` object
|
||||
batch_size (`int`, *optional*):
|
||||
The batch size for the train and validation DataLoaders.
|
||||
"""
|
||||
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
|
||||
datasets = load_dataset("glue", "mrpc")
|
||||
|
||||
def tokenize_function(examples):
|
||||
# max_length=None => use the model max length (it's actually the default)
|
||||
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
|
||||
return outputs
|
||||
|
||||
# Apply the method we just defined to all the examples in all the splits of the dataset
|
||||
# starting with the main process first:
|
||||
with accelerator.main_process_first():
|
||||
tokenized_datasets = datasets.map(
|
||||
tokenize_function,
|
||||
batched=True,
|
||||
remove_columns=["idx", "sentence1", "sentence2"],
|
||||
)
|
||||
|
||||
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
|
||||
# transformers library
|
||||
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
elif accelerator.mixed_precision != "no":
|
||||
pad_to_multiple_of = 8
|
||||
else:
|
||||
pad_to_multiple_of = None
|
||||
|
||||
return tokenizer.pad(
|
||||
examples,
|
||||
padding="longest",
|
||||
max_length=max_length,
|
||||
pad_to_multiple_of=pad_to_multiple_of,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
# Instantiate dataloaders.
|
||||
train_dataloader = DataLoader(
|
||||
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
|
||||
)
|
||||
eval_dataloader = DataLoader(
|
||||
tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
|
||||
)
|
||||
|
||||
return train_dataloader, eval_dataloader
|
||||
|
||||
|
||||
# For testing only
|
||||
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
|
||||
from accelerate.test_utils.training import mocked_dataloaders
|
||||
|
||||
get_dataloaders = mocked_dataloaders # noqa: F811
|
||||
|
||||
|
||||
def training_function(config, args):
|
||||
# For testing only
|
||||
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
|
||||
config["num_epochs"] = 2
|
||||
# New Code #
|
||||
profile_kwargs = ProfileKwargs(
|
||||
record_shapes=args.record_shapes,
|
||||
profile_memory=args.profile_memory,
|
||||
with_flops=args.with_flops,
|
||||
output_trace_dir=args.output_trace_dir,
|
||||
)
|
||||
# Initialize accelerator
|
||||
accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision, kwargs_handlers=[profile_kwargs])
|
||||
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
|
||||
lr = config["lr"]
|
||||
num_epochs = int(config["num_epochs"])
|
||||
seed = int(config["seed"])
|
||||
batch_size = int(config["batch_size"])
|
||||
|
||||
metric = evaluate.load("glue", "mrpc")
|
||||
|
||||
set_seed(seed)
|
||||
train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)
|
||||
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
|
||||
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True)
|
||||
|
||||
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
|
||||
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
|
||||
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
|
||||
model = model.to(accelerator.device)
|
||||
|
||||
# Instantiate optimizer
|
||||
optimizer = AdamW(params=model.parameters(), lr=lr)
|
||||
|
||||
# Instantiate scheduler
|
||||
lr_scheduler = get_linear_schedule_with_warmup(
|
||||
optimizer=optimizer,
|
||||
num_warmup_steps=100,
|
||||
num_training_steps=(len(train_dataloader) * num_epochs),
|
||||
)
|
||||
|
||||
# Prepare everything
|
||||
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
|
||||
# prepare method.
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
||||
)
|
||||
|
||||
# Now we train the model
|
||||
for epoch in range(num_epochs):
|
||||
model.train()
|
||||
# New Code #
|
||||
with accelerator.profile() as prof:
|
||||
for step, batch in enumerate(train_dataloader):
|
||||
# We could avoid this line since we set the accelerator with `device_placement=True`.
|
||||
batch.to(accelerator.device)
|
||||
# We use the new `accumulate` context manager to perform gradient accumulation
|
||||
with accelerator.accumulate(model):
|
||||
output = model(**batch)
|
||||
loss = output.loss
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
lr_scheduler.step()
|
||||
optimizer.zero_grad()
|
||||
# New Code #
|
||||
accelerator.print(
|
||||
prof.key_averages().table(
|
||||
sort_by="self_cpu_time_total" if args.cpu else "self_cuda_time_total", row_limit=-1
|
||||
)
|
||||
)
|
||||
|
||||
model.eval()
|
||||
for step, batch in enumerate(eval_dataloader):
|
||||
# We could avoid this line since we set the accelerator with `device_placement=True`.
|
||||
batch.to(accelerator.device)
|
||||
with torch.no_grad():
|
||||
outputs = model(**batch)
|
||||
predictions = outputs.logits.argmax(dim=-1)
|
||||
predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"]))
|
||||
metric.add_batch(
|
||||
predictions=predictions,
|
||||
references=references,
|
||||
)
|
||||
|
||||
eval_metric = metric.compute()
|
||||
# Use accelerator.print to print only on the main process.
|
||||
accelerator.print(f"epoch {epoch}:", eval_metric)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Simple example of training script.")
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
default=None,
|
||||
choices=["no", "fp16", "bf16", "fp8"],
|
||||
help="Whether to use mixed precision. Choose"
|
||||
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
||||
"and an Nvidia Ampere GPU.",
|
||||
)
|
||||
# New Code #
|
||||
parser.add_argument(
|
||||
"--record_shapes",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="If passed, will record shapes for profiling.",
|
||||
)
|
||||
# New Code #
|
||||
parser.add_argument(
|
||||
"--profile_memory",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="If passed, will profile memory.",
|
||||
)
|
||||
# New Code #
|
||||
parser.add_argument(
|
||||
"--with_flops",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="If passed, will profile flops.",
|
||||
)
|
||||
# New Code #
|
||||
parser.add_argument(
|
||||
"--output_trace_dir",
|
||||
type=str,
|
||||
default=None,
|
||||
help="If passed, will save a json trace to the specified path.",
|
||||
)
|
||||
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.")
|
||||
args = parser.parse_args()
|
||||
config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
|
||||
training_function(config, args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
225
examples/by_feature/schedule_free.py
Normal file
225
examples/by_feature/schedule_free.py
Normal file
@ -0,0 +1,225 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import argparse
|
||||
import os
|
||||
|
||||
import evaluate
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, set_seed
|
||||
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from accelerate.utils import is_schedulefree_available
|
||||
|
||||
|
||||
if is_schedulefree_available():
|
||||
import schedulefree
|
||||
else:
|
||||
raise ImportError(
|
||||
"This example requires the `schedulefree` library. Please install it with `pip install schedulefree`"
|
||||
)
|
||||
|
||||
|
||||
########################################################################
|
||||
# This is a fully working simple example to use Accelerate and Facebook's
|
||||
# scheduler-free optimizer: https://github.com/facebookresearch/schedule_free/
|
||||
#
|
||||
# This example trains a Bert base model on GLUE MRPC
|
||||
# in any of the following settings (with the same script):
|
||||
# - single CPU or single GPU
|
||||
# - multi GPUS (using PyTorch distributed mode)
|
||||
# - (multi) TPUs
|
||||
# - fp16 (mixed-precision) or fp32 (normal precision)
|
||||
#
|
||||
# To run it in each of these various modes, follow the instructions
|
||||
# in the readme for examples:
|
||||
# https://github.com/huggingface/accelerate/tree/main/examples
|
||||
#
|
||||
########################################################################
|
||||
|
||||
|
||||
MAX_GPU_BATCH_SIZE = 16
|
||||
EVAL_BATCH_SIZE = 32
|
||||
|
||||
|
||||
def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
"""
|
||||
Creates a set of `DataLoader`s for the `glue` dataset,
|
||||
using "bert-base-cased" as the tokenizer.
|
||||
|
||||
Args:
|
||||
accelerator (`Accelerator`):
|
||||
An `Accelerator` object
|
||||
batch_size (`int`, *optional*):
|
||||
The batch size for the train and validation DataLoaders.
|
||||
"""
|
||||
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
|
||||
datasets = load_dataset("glue", "mrpc")
|
||||
|
||||
def tokenize_function(examples):
|
||||
# max_length=None => use the model max length (it's actually the default)
|
||||
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
|
||||
return outputs
|
||||
|
||||
# Apply the method we just defined to all the examples in all the splits of the dataset
|
||||
# starting with the main process first:
|
||||
with accelerator.main_process_first():
|
||||
tokenized_datasets = datasets.map(
|
||||
tokenize_function,
|
||||
batched=True,
|
||||
remove_columns=["idx", "sentence1", "sentence2"],
|
||||
)
|
||||
|
||||
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
|
||||
# transformers library
|
||||
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
|
||||
|
||||
def collate_fn(examples):
|
||||
# For Torchxla, it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
elif accelerator.mixed_precision != "no":
|
||||
pad_to_multiple_of = 8
|
||||
else:
|
||||
pad_to_multiple_of = None
|
||||
|
||||
return tokenizer.pad(
|
||||
examples,
|
||||
padding="longest",
|
||||
max_length=max_length,
|
||||
pad_to_multiple_of=pad_to_multiple_of,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
# Instantiate dataloaders.
|
||||
train_dataloader = DataLoader(
|
||||
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size, drop_last=True
|
||||
)
|
||||
eval_dataloader = DataLoader(
|
||||
tokenized_datasets["validation"],
|
||||
shuffle=False,
|
||||
collate_fn=collate_fn,
|
||||
batch_size=EVAL_BATCH_SIZE,
|
||||
drop_last=(accelerator.mixed_precision == "fp8"),
|
||||
)
|
||||
|
||||
return train_dataloader, eval_dataloader
|
||||
|
||||
|
||||
# For testing only
|
||||
|
||||
|
||||
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
|
||||
from accelerate.test_utils.training import mocked_dataloaders
|
||||
|
||||
get_dataloaders = mocked_dataloaders # noqa: F811
|
||||
|
||||
|
||||
def training_function(config, args):
|
||||
# Initialize accelerator
|
||||
accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)
|
||||
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
|
||||
lr = config["lr"]
|
||||
num_epochs = int(config["num_epochs"])
|
||||
seed = int(config["seed"])
|
||||
batch_size = int(config["batch_size"])
|
||||
|
||||
metric = evaluate.load("glue", "mrpc")
|
||||
|
||||
# If the batch size is too big we use gradient accumulation
|
||||
gradient_accumulation_steps = 1
|
||||
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA:
|
||||
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
|
||||
batch_size = MAX_GPU_BATCH_SIZE
|
||||
|
||||
set_seed(seed)
|
||||
train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)
|
||||
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
|
||||
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True)
|
||||
|
||||
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
|
||||
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
|
||||
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
|
||||
model = model.to(accelerator.device)
|
||||
# Instantiate optimizer with warmup steps
|
||||
optimizer = schedulefree.AdamWScheduleFree(
|
||||
model.parameters(),
|
||||
lr=lr,
|
||||
warmup_steps=100,
|
||||
)
|
||||
|
||||
# Prepare everything
|
||||
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
|
||||
# prepare method.
|
||||
|
||||
model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(
|
||||
model, optimizer, train_dataloader, eval_dataloader
|
||||
)
|
||||
|
||||
# Now we train the model
|
||||
for epoch in range(num_epochs):
|
||||
model.train()
|
||||
optimizer.train()
|
||||
for step, batch in enumerate(train_dataloader):
|
||||
# We could avoid this line since we set the accelerator with `device_placement=True`.
|
||||
batch.to(accelerator.device)
|
||||
outputs = model(**batch)
|
||||
loss = outputs.loss
|
||||
loss = loss / gradient_accumulation_steps
|
||||
accelerator.backward(loss)
|
||||
if step % gradient_accumulation_steps == 0:
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
|
||||
model.eval()
|
||||
optimizer.eval()
|
||||
for step, batch in enumerate(eval_dataloader):
|
||||
# We could avoid this line since we set the accelerator with `device_placement=True`.
|
||||
batch.to(accelerator.device)
|
||||
with torch.no_grad():
|
||||
outputs = model(**batch)
|
||||
predictions = outputs.logits.argmax(dim=-1)
|
||||
predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"]))
|
||||
metric.add_batch(
|
||||
predictions=predictions,
|
||||
references=references,
|
||||
)
|
||||
|
||||
eval_metric = metric.compute()
|
||||
# Use accelerator.print to print only on the main process.
|
||||
accelerator.print(f"epoch {epoch}:", eval_metric)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Simple example of training script.")
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
default=None,
|
||||
choices=["no", "fp16", "bf16", "fp8"],
|
||||
help="Whether to use mixed precision. Choose"
|
||||
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
||||
"and an Nvidia Ampere GPU.",
|
||||
)
|
||||
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.")
|
||||
args = parser.parse_args()
|
||||
config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
|
||||
training_function(config, args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
25
examples/inference/distributed/README.md
Normal file
25
examples/inference/distributed/README.md
Normal file
@ -0,0 +1,25 @@
|
||||
# Distributed inference examples
|
||||
|
||||
This folder contains a variety of tutorials for running distributed inference with the following strategy:
|
||||
|
||||
Load an entire model onto each GPU and sending chunks of a batch through each GPU’s model copy at a time
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
pip install accelerate torch
|
||||
```
|
||||
|
||||
## Running code
|
||||
|
||||
You can either use `torchrun` or the recommended way of `accelerate launch` (without needing to run `accelerate config`) on each script:
|
||||
|
||||
```bash
|
||||
accelerate launch --num_processes {NUM_GPUS} phi2.py
|
||||
```
|
||||
|
||||
Or:
|
||||
|
||||
```bash
|
||||
torchrun --nproc-per-node {NUM_GPUS} phi2.py
|
||||
```
|
||||
86
examples/inference/distributed/phi2.py
Normal file
86
examples/inference/distributed/phi2.py
Normal file
@ -0,0 +1,86 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import torch
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
from accelerate import PartialState
|
||||
from accelerate.utils import gather_object
|
||||
|
||||
|
||||
# Start up the distributed environment without needing the Accelerator.
|
||||
distributed_state = PartialState()
|
||||
|
||||
# You can change the model to any LLM such as mistralai/Mistral-7B-v0.1 or meta-llama/Llama-2-7b-chat-hf
|
||||
model_name = "microsoft/phi-2"
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
model_name, device_map=distributed_state.device, torch_dtype=torch.float16
|
||||
)
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
||||
# Need to set the padding token to the eos token for generation
|
||||
tokenizer.pad_token = tokenizer.eos_token
|
||||
|
||||
prompts = [
|
||||
"I would like to",
|
||||
"hello how are you",
|
||||
"what is going on",
|
||||
"roses are red and",
|
||||
"welcome to the hotel",
|
||||
]
|
||||
|
||||
# You can change the batch size depending on your GPU RAM
|
||||
batch_size = 2
|
||||
# We set it to 8 since it is better for some hardware. More information here https://github.com/huggingface/tokenizers/issues/991
|
||||
pad_to_multiple_of = 8
|
||||
|
||||
# Split into batches
|
||||
# We will get the following results:
|
||||
# [ ["I would like to", "hello how are you"], [ "what is going on", "roses are red and"], [ "welcome to the hotel"] ]
|
||||
formatted_prompts = [prompts[i : i + batch_size] for i in range(0, len(prompts), batch_size)]
|
||||
|
||||
# Apply padding on the left since we are doing generation
|
||||
padding_side_default = tokenizer.padding_side
|
||||
tokenizer.padding_side = "left"
|
||||
# Tokenize each batch
|
||||
tokenized_prompts = [
|
||||
tokenizer(formatted_prompt, padding=True, pad_to_multiple_of=pad_to_multiple_of, return_tensors="pt")
|
||||
for formatted_prompt in formatted_prompts
|
||||
]
|
||||
# Put back the original padding behavior
|
||||
tokenizer.padding_side = padding_side_default
|
||||
|
||||
completions_per_process = []
|
||||
# We automatically split the batched data we passed to it across all the processes. We also set apply_padding=True
|
||||
# so that the GPUs will have the same number of prompts, and you can then gather the results.
|
||||
# For example, if we have 2 gpus, the distribution will be:
|
||||
# GPU 0: ["I would like to", "hello how are you"], "what is going on", "roses are red and"]
|
||||
# GPU 1: ["welcome to the hotel"], ["welcome to the hotel"] -> this prompt is duplicated to ensure that all gpus have the same number of prompts
|
||||
with distributed_state.split_between_processes(tokenized_prompts, apply_padding=True) as batched_prompts:
|
||||
for batch in batched_prompts:
|
||||
# Move the batch to the device
|
||||
batch = batch.to(distributed_state.device)
|
||||
# We generate the text, decode it and add it to the list completions_per_process
|
||||
outputs = model.generate(**batch, max_new_tokens=20)
|
||||
generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
||||
completions_per_process.extend(generated_text)
|
||||
|
||||
# We are gathering string, so we need to use gather_object.
|
||||
# If you need to gather tensors, you can use gather from accelerate.utils
|
||||
completions_gather = gather_object(completions_per_process)
|
||||
|
||||
# Drop duplicates produced by apply_padding in split_between_processes
|
||||
completions = completions_gather[: len(prompts)]
|
||||
|
||||
distributed_state.print(completions)
|
||||
30
examples/inference/distributed/stable_diffusion.py
Normal file
30
examples/inference/distributed/stable_diffusion.py
Normal file
@ -0,0 +1,30 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import torch
|
||||
from diffusers import DiffusionPipeline
|
||||
|
||||
from accelerate import PartialState # Can also be Accelerator or AcceleratorState
|
||||
|
||||
|
||||
pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
|
||||
distributed_state = PartialState()
|
||||
pipe.to(distributed_state.device)
|
||||
|
||||
# Assume two processes
|
||||
# On the first GPU, the prompts will be ["a dog", "a cat"],
|
||||
# and on the second GPU it will be ["a chicken", "a chicken"].
|
||||
# Make sure to drop the final sample, as it will be a duplicate of the previous one.
|
||||
with distributed_state.split_between_processes(["a dog", "a cat", "a chicken"], apply_padding=True) as prompt:
|
||||
result = pipe(prompt).images
|
||||
@ -75,4 +75,4 @@ end_time = time.time()
|
||||
if PartialState().is_last_process:
|
||||
output = torch.stack(tuple(output[0]))
|
||||
print(f"Time of first pass: {first_batch}")
|
||||
print(f"Average time per batch: {(end_time - start_time)/5}")
|
||||
print(f"Average time per batch: {(end_time - start_time) / 5}")
|
||||
@ -74,4 +74,4 @@ end_time = time.time()
|
||||
if PartialState().is_last_process:
|
||||
output = torch.stack(tuple(output[0]))
|
||||
print(f"Time of first pass: {first_batch}")
|
||||
print(f"Average time per batch: {(end_time - start_time)/5}")
|
||||
print(f"Average time per batch: {(end_time - start_time) / 5}")
|
||||
@ -27,7 +27,7 @@ model.eval()
|
||||
# Input configs
|
||||
# Create example inputs for the model
|
||||
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
|
||||
prompts = ("I would like to", "I really like to", "The weather is") # bs = 3
|
||||
prompts = ("I would like to", "I really like to", "The weather is pretty") # bs = 3
|
||||
tokenizer.pad_token = tokenizer.eos_token
|
||||
inputs = tokenizer(prompts, return_tensors="pt", padding=True)
|
||||
|
||||
@ -35,7 +35,7 @@ inputs = tokenizer(prompts, return_tensors="pt", padding=True)
|
||||
# Using `auto` is equivalent to letting `device_map="auto"` figure
|
||||
# out device mapping and will also split the model according to the
|
||||
# number of total GPUs available if it fits on one GPU
|
||||
model = prepare_pippy(model, split_points="auto", example_args=inputs)
|
||||
model = prepare_pippy(model, split_points="auto", example_kwargs=inputs)
|
||||
|
||||
# You can pass `gather_output=True` to have the output from the model
|
||||
# available on all GPUs
|
||||
@ -43,7 +43,7 @@ model = prepare_pippy(model, split_points="auto", example_args=inputs)
|
||||
|
||||
# currently we don't support `model.generate`
|
||||
# output = model.generate(**inputs, max_new_tokens=1)
|
||||
|
||||
inputs = inputs.to(0)
|
||||
with torch.no_grad():
|
||||
output = model(**inputs)
|
||||
|
||||
@ -86,4 +86,4 @@ end_time = time.time()
|
||||
if PartialState().is_last_process:
|
||||
output = torch.stack(tuple(output[0]))
|
||||
print(f"Time of first pass: {first_batch}")
|
||||
print(f"Average time per batch: {(end_time - start_time)/5}")
|
||||
print(f"Average time per batch: {(end_time - start_time) / 5}")
|
||||
@ -1,3 +1,16 @@
|
||||
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import argparse
|
||||
|
||||
import runhouse as rh
|
||||
|
||||
@ -1,3 +1,5 @@
|
||||
accelerate # used to be installed in Amazon SageMaker environment
|
||||
evaluate
|
||||
datasets==2.3.2
|
||||
datasets==2.3.2
|
||||
schedulefree
|
||||
huggingface_hub>=0.20.0
|
||||
|
||||
65
examples/slurm/submit_multicpu.sh
Normal file
65
examples/slurm/submit_multicpu.sh
Normal file
@ -0,0 +1,65 @@
|
||||
#!/bin/bash -l
|
||||
|
||||
#SBATCH --job-name=multicpu
|
||||
#SBATCH --nodes=2 # number of Nodes
|
||||
#SBATCH --ntasks-per-node=1 # number of MP tasks
|
||||
#SBATCH --exclusive
|
||||
#SBATCH --output=O-%x.%j
|
||||
#SBATCH --error=E-%x.%j
|
||||
|
||||
######################
|
||||
### Set enviroment ###
|
||||
######################
|
||||
source activateEnvironment.sh
|
||||
|
||||
######################
|
||||
#### Set network #####
|
||||
######################
|
||||
head_node_ip=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
|
||||
######################
|
||||
|
||||
# Setup env variables for distributed jobs
|
||||
export MASTER_PORT="${MASTER_PORT:-29555 }"
|
||||
echo "head_node_ip=${head_node_ip}"
|
||||
echo "MASTER_PORT=${MASTER_PORT}"
|
||||
|
||||
INSTANCES_PER_NODE="${INSTANCES_PER_NODE:-1}"
|
||||
|
||||
if [[ $SLURM_NNODES == 1 ]] && [[ $INSTANCES_PER_NODE == 1 ]]; then
|
||||
export CCL_WORKER_COUNT=0
|
||||
LAUNCHER=""
|
||||
else
|
||||
# Setup env variables for distributed jobs
|
||||
export CCL_WORKER_COUNT="${CCL_WORKER_COUNT:-2}"
|
||||
echo "CCL_WORKER_COUNT=${CCL_WORKER_COUNT}"
|
||||
|
||||
# Write hostfile
|
||||
HOSTFILE_PATH=hostfile
|
||||
scontrol show hostname $SLURM_JOB_NODELIST | perl -ne 'chomb; print "$_"x1'> ${HOSTFILE_PATH}
|
||||
|
||||
export LAUNCHER="accelerate launch \
|
||||
--num_processes $((SLURM_NNODES * ${INSTANCES_PER_NODE})) \
|
||||
--num_machines $SLURM_NNODES \
|
||||
--rdzv_backend c10d \
|
||||
--main_process_ip $head_node_ip \
|
||||
--main_process_port $MASTER_PORT \
|
||||
--mpirun_hostfile $HOSTFILE_PATH \
|
||||
--mpirun_ccl $CCL_WORKER_COUNT"
|
||||
fi
|
||||
|
||||
# This step is necessary because accelerate launch does not handle multiline arguments properly
|
||||
export ACCELERATE_DIR="${ACCELERATE_DIR:-/accelerate}"
|
||||
export SCRIPT="${ACCELERATE_DIR}/examples/complete_nlp_example.py"
|
||||
export SCRIPT_ARGS=" \
|
||||
--cpu \
|
||||
--output_dir ${ACCELERATE_DIR}/examples/output \
|
||||
"
|
||||
|
||||
# This step is necessary because accelerate launch does not handle multiline arguments properly
|
||||
export CMD="$LAUNCHER $SCRIPT $SCRIPT_ARGS"
|
||||
# Print the command
|
||||
echo $CMD
|
||||
echo ""
|
||||
|
||||
# Run the command
|
||||
eval $CMD
|
||||
@ -13,14 +13,15 @@
|
||||
######################
|
||||
### Set enviroment ###
|
||||
######################
|
||||
source activateEnviroment.sh
|
||||
source activateEnvironment.sh
|
||||
export GPUS_PER_NODE=4
|
||||
######################
|
||||
|
||||
export SCRIPT=/accelerate/examples/complete_nlp_example.py
|
||||
export ACCELERATE_DIR="${ACCELERATE_DIR:-/accelerate}"
|
||||
export SCRIPT="${ACCELERATE_DIR}/examples/complete_nlp_example.py"
|
||||
export SCRIPT_ARGS=" \
|
||||
--mixed_precision fp16 \
|
||||
--output_dir /accelerate/examples/output \
|
||||
--output_dir ${ACCELERATE_DIR}/examples/output \
|
||||
--with_tracking \
|
||||
"
|
||||
|
||||
|
||||
@ -13,7 +13,7 @@
|
||||
######################
|
||||
### Set enviroment ###
|
||||
######################
|
||||
source activateEnviroment.sh
|
||||
source activateEnvironment.sh
|
||||
export GPUS_PER_NODE=4
|
||||
######################
|
||||
|
||||
@ -30,10 +30,11 @@ export LAUNCHER="accelerate launch \
|
||||
--main_process_ip $head_node_ip \
|
||||
--main_process_port 29500 \
|
||||
"
|
||||
export SCRIPT="/accelerate/examples/complete_nlp_example.py"
|
||||
export ACCELERATE_DIR="${ACCELERATE_DIR:-/accelerate}"
|
||||
export SCRIPT="${ACCELERATE_DIR}/examples/complete_nlp_example.py"
|
||||
export SCRIPT_ARGS=" \
|
||||
--mixed_precision fp16 \
|
||||
--output_dir /accelerate/examples/output \
|
||||
--output_dir ${ACCELERATE_DIR}/examples/output \
|
||||
"
|
||||
|
||||
# This step is necessary because accelerate launch does not handle multiline arguments properly
|
||||
|
||||
@ -12,16 +12,21 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
|
||||
from accelerate.test_utils.testing import is_hub_online
|
||||
from manim import *
|
||||
|
||||
|
||||
class HooksModelTester(unittest.TestCase):
|
||||
"Simple tester that checks if the Hub is online or not"
|
||||
|
||||
def test_hub_online(self):
|
||||
self.assertTrue(
|
||||
is_hub_online(),
|
||||
"Hub is offline! This test will fail until the hub is back online. Relevent tests will be skipped.",
|
||||
class Stage0(Scene):
|
||||
def construct(self):
|
||||
mascot = ImageMobject("mascot_bookie.png")
|
||||
mascot.scale(.35)
|
||||
mascot.move_to([-3.75,-1,0])
|
||||
text = Paragraph(
|
||||
"Distributed Training,\nHugging Face Accelerate,\nand PyTorch DataLoaders\n\nHow do they all interact?",
|
||||
font_size=36,
|
||||
line_spacing=1,
|
||||
alignment="center",
|
||||
weight=BOLD,
|
||||
)
|
||||
text.move_to([1.75,.5,0])
|
||||
self.add(mascot)
|
||||
self.add(text)
|
||||
31
manim_animations/dataloaders/stage_1.py
Normal file
31
manim_animations/dataloaders/stage_1.py
Normal file
@ -0,0 +1,31 @@
|
||||
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from manim import *
|
||||
|
||||
class Stage01(Scene):
|
||||
def construct(self):
|
||||
mascot = ImageMobject("mascot_bookie.png")
|
||||
mascot.scale(.35)
|
||||
mascot.move_to([-3.75,-1,0])
|
||||
text = Paragraph(
|
||||
"Distributed Training,\nHugging Face Accelerate,\nand PyTorch DataLoaders\n\nHow do they all interact?",
|
||||
font_size=36,
|
||||
line_spacing=1,
|
||||
alignment="center",
|
||||
weight=BOLD,
|
||||
)
|
||||
text.move_to([1.75,.5,0])
|
||||
self.add(mascot)
|
||||
self.add(text)
|
||||
176
manim_animations/dataloaders/stage_2.py
Normal file
176
manim_animations/dataloaders/stage_2.py
Normal file
@ -0,0 +1,176 @@
|
||||
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from manim import *
|
||||
|
||||
|
||||
class Stage2(Scene):
|
||||
def construct(self):
|
||||
# The dataset items
|
||||
fill = Rectangle(height=0.46,width=0.46).set_stroke(width=0)
|
||||
columns = [
|
||||
VGroup(*[Rectangle(height=0.25,width=0.25,color="green") for i in range(8)]).arrange(RIGHT,buff=0)
|
||||
for j in range(4)
|
||||
]
|
||||
dataset_recs = VGroup(*columns).arrange(UP, buff=0)
|
||||
dataset_text = Text("Dataset", font_size=24)
|
||||
dataset = Group(dataset_recs,dataset_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
|
||||
dataset.move_to([-2,0,0])
|
||||
self.add(dataset)
|
||||
|
||||
code = Code(
|
||||
code="dataloader = DataLoader(...)\nfor batch in dataloader():\n\t...",
|
||||
tab_width=4,
|
||||
background="window",
|
||||
language="Python",
|
||||
font="Monospace",
|
||||
font_size=14,
|
||||
corner_radius=.2,
|
||||
insert_line_no=False,
|
||||
line_spacing=.75,
|
||||
style=Code.styles_list[1],
|
||||
)
|
||||
code.move_to([-3.5, 2.5, 0])
|
||||
self.add(code)
|
||||
|
||||
# The dataloader itself
|
||||
dataloader = Group(
|
||||
Rectangle(color="red", height=2, width=2),
|
||||
Text("DataLoader", font_size=24)
|
||||
).arrange(DOWN, buff=.5, aligned_edge=DOWN)
|
||||
|
||||
sampler = Group(
|
||||
Rectangle(color="blue", height=1, width=1),
|
||||
Text("Sampler", font_size=12)
|
||||
).arrange(DOWN, buff=.25, aligned_edge=DOWN)
|
||||
dataloader.move_to([1, 0, 0])
|
||||
sampler.move_to([.75,.25,0])
|
||||
self.add(dataloader)
|
||||
self.add(sampler)
|
||||
|
||||
gpu_1 = Group(
|
||||
Rectangle(color="white", height=1, width=1),
|
||||
Text("GPU 1", font_size=12)
|
||||
).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4, 2, 0])
|
||||
gpu_2 = Group(
|
||||
Rectangle(color="white", height=1, width=1),
|
||||
Text("GPU 2", font_size=12)
|
||||
).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4, .5, 0])
|
||||
gpu_3 = Group(
|
||||
Rectangle(color="white", height=1, width=1),
|
||||
Text("GPU 3", font_size=12)
|
||||
).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4, -1, 0])
|
||||
gpu_4 = Group(
|
||||
Rectangle(color="white", height=1, width=1),
|
||||
Text("GPU 4", font_size=12)
|
||||
).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4, -2.5, 0])
|
||||
gpus = [gpu_1[0], gpu_2[0], gpu_3[0], gpu_4[0]]
|
||||
self.add(gpu_1, gpu_2, gpu_3, gpu_4)
|
||||
|
||||
# Animate their existence
|
||||
self.play(
|
||||
Create(gpu_1[0], run_time=0.5),
|
||||
Create(gpu_2[0], run_time=0.5),
|
||||
Create(gpu_3[0], run_time=0.5),
|
||||
Create(gpu_4[0], run_time=0.5),
|
||||
Create(dataset_recs, run_time=1),
|
||||
Create(sampler[0], run_time=1),
|
||||
Create(dataloader[0], run_time=1)
|
||||
)
|
||||
|
||||
step_1 = MarkupText(
|
||||
f"Without any special care, \nthe same data is sent though each sampler, \nand the same samples are spit out on each GPU",
|
||||
font_size=18
|
||||
)
|
||||
step_1.move_to([0, -2.5, 0])
|
||||
self.play(
|
||||
Write(step_1, run_time=4),
|
||||
)
|
||||
|
||||
first_animations = []
|
||||
second_animations = []
|
||||
|
||||
|
||||
colors = ["BLUE_E", "DARK_BROWN", "GOLD_E", "GRAY_A"]
|
||||
current_color = colors[0]
|
||||
buff = 0
|
||||
lr_buff = .25
|
||||
old_target = None
|
||||
new_datasets = []
|
||||
for i,data in enumerate(dataset_recs[-1]):
|
||||
if i % 2 == 0:
|
||||
# current_color = colors[i//2]
|
||||
current_color = "BLUE_E"
|
||||
dataset_target = Rectangle(height=0.46/2,width=0.46/2).set_stroke(width=0.).set_fill(current_color, opacity=0.7)
|
||||
dataset_target.move_to(data)
|
||||
dataset_target.generate_target()
|
||||
aligned_edge = ORIGIN
|
||||
if i % 2 == 0:
|
||||
old_target = dataset_target.target
|
||||
buff -= .25
|
||||
aligned_edge = LEFT
|
||||
dataset_target.target.next_to(
|
||||
sampler, buff=buff, direction=UP,
|
||||
aligned_edge=LEFT
|
||||
)
|
||||
else:
|
||||
dataset_target.target.next_to(
|
||||
old_target, direction=RIGHT, buff=0.01,
|
||||
)
|
||||
new_datasets.append(dataset_target)
|
||||
first_animations.append(data.animate(run_time=0.5).set_stroke(current_color))
|
||||
second_animations.append(MoveToTarget(dataset_target, run_time=1.5))
|
||||
self.play(*first_animations)
|
||||
self.play(*second_animations)
|
||||
self.wait()
|
||||
|
||||
move_animation = []
|
||||
|
||||
for j,gpu in enumerate(gpus):
|
||||
buff = 0
|
||||
for i,data in enumerate(new_datasets):
|
||||
if i % 2 == 0:
|
||||
current_color = colors[i//2]
|
||||
if j != 3:
|
||||
data = data.copy()
|
||||
data.generate_target()
|
||||
aligned_edge = ORIGIN
|
||||
if i % 2 == 0:
|
||||
old_target = data.target
|
||||
buff -= .25
|
||||
aligned_edge = LEFT
|
||||
data.target.next_to(
|
||||
gpu, buff=buff, direction=UP,
|
||||
aligned_edge=LEFT
|
||||
)
|
||||
else:
|
||||
data.target.next_to(
|
||||
old_target, direction=RIGHT, buff=0.01,
|
||||
)
|
||||
move_animation.append(MoveToTarget(data, run_time=1.5))
|
||||
|
||||
|
||||
self.play(*move_animation)
|
||||
|
||||
self.remove(step_1)
|
||||
step_2 = MarkupText(
|
||||
f"This behavior is undesireable, because we want\neach GPU to see different data for efficient training.",
|
||||
font_size=18
|
||||
)
|
||||
step_2.move_to([0, -2.5, 0])
|
||||
|
||||
self.play(
|
||||
Write(step_2, run_time=2.5),
|
||||
)
|
||||
self.wait()
|
||||
34
manim_animations/dataloaders/stage_3.py
Normal file
34
manim_animations/dataloaders/stage_3.py
Normal file
@ -0,0 +1,34 @@
|
||||
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from manim import *
|
||||
|
||||
class Stage3(Scene):
|
||||
def construct(self):
|
||||
step_1 = MarkupText(
|
||||
f"To combat this, Accelerate employs one of two different\nSampler wrapper methods depending on the scenario:",
|
||||
font_size=24
|
||||
)
|
||||
step_1.move_to([0, 1.5, 0])
|
||||
self.add(step_1)
|
||||
step_2 = MarkupText(
|
||||
f"1. Sharding the dataset before drawing:\n\t● <span fgcolor='{RED}'>IterableDatasetShard</span>\n\t● <span fgcolor='{RED}'>BatchSamplerShard</span>",
|
||||
font_size=24,
|
||||
).next_to(step_1, direction=DOWN, aligned_edge=LEFT)
|
||||
self.add(step_2)
|
||||
step_3 = MarkupText(
|
||||
f"\n\n2. Splitting the batch after drawing:\n\t● <span fgcolor='{BLUE}'>DataLoaderDispatcher</span>",
|
||||
font_size=24,
|
||||
).next_to(step_2, direction=DOWN, aligned_edge=LEFT)
|
||||
self.add(step_3)
|
||||
52
manim_animations/dataloaders/stage_4.py
Normal file
52
manim_animations/dataloaders/stage_4.py
Normal file
@ -0,0 +1,52 @@
|
||||
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from manim import *
|
||||
|
||||
class Stage4(Scene):
|
||||
def construct(self):
|
||||
|
||||
step_1 = MarkupText(
|
||||
f"To understand the next part fully, let's define two terms,\n<span fgcolor='{RED}'>`batch_size`</span> and <span fgcolor='{BLUE}'>`global_batch_size`</span>:",
|
||||
font_size=18
|
||||
)
|
||||
step_1.move_to([0, 1.5, 0])
|
||||
# <span fgcolor='{YELLOW}'>●</span>
|
||||
step_2 = MarkupText(
|
||||
f"\n\n● <span fgcolor='{RED}'>`batch_size`</span>: \n\tThis will be defined as the batch size seen on a given\n\t*individual* GPU",
|
||||
font_size=18,
|
||||
).next_to(step_1, direction=DOWN, aligned_edge=LEFT)
|
||||
|
||||
step_3 = MarkupText(
|
||||
f"\n\n● <span fgcolor='{BLUE}'>`global_batch_size`</span>:\n\tThis will be defined as the *total* number of\n\tdifferent items seen in the dataset, across all GPUs",
|
||||
font_size=18,
|
||||
).next_to(step_2, direction=DOWN, aligned_edge=LEFT)
|
||||
|
||||
step_4 = MarkupText(
|
||||
f"\n\nSo if we have a dataset of 64 items, 8 GPUs, \nand a `batch_size` of 8, each *step* will go through\nthe entire dataset one time as 8*8=64",
|
||||
font_size=18,
|
||||
).next_to(step_3, direction=DOWN, aligned_edge=LEFT)
|
||||
self.play(
|
||||
Write(step_1, run_time=4),
|
||||
)
|
||||
self.play(
|
||||
Write(step_2, run_time=4)
|
||||
)
|
||||
self.play(
|
||||
Write(step_3, run_time=4)
|
||||
)
|
||||
self.play(
|
||||
Write(step_4, run_time=6)
|
||||
)
|
||||
self.wait()
|
||||
203
manim_animations/dataloaders/stage_5.py
Normal file
203
manim_animations/dataloaders/stage_5.py
Normal file
@ -0,0 +1,203 @@
|
||||
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from manim import *
|
||||
|
||||
class Stage5(Scene):
|
||||
def construct(self):
|
||||
# The dataset items
|
||||
colors = ["BLUE_E", "DARK_BROWN", "GOLD_E", "GRAY_A"]
|
||||
fill = Rectangle(height=0.46,width=0.46).set_stroke(width=0)
|
||||
columns = [
|
||||
VGroup(*[Rectangle(height=0.25,width=0.25,color=colors[j]) for i in range(8)]).arrange(RIGHT,buff=0)
|
||||
for j in range(4)
|
||||
]
|
||||
dataset_recs = VGroup(*columns).arrange(UP, buff=0)
|
||||
dataset_text = Text("Dataset", font_size=24)
|
||||
dataset = Group(dataset_recs,dataset_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
|
||||
dataset.move_to([-2,0,0])
|
||||
self.add(dataset)
|
||||
code = Code(
|
||||
code="# We enable this by default\naccelerator = Accelerator()\ndataloader = DataLoader(...)\ndataloader = accelerator.prepare(dataloader)\nfor batch in dataloader:\n\t...",
|
||||
tab_width=4,
|
||||
background="window",
|
||||
language="Python",
|
||||
font="Monospace",
|
||||
font_size=14,
|
||||
corner_radius=.2,
|
||||
insert_line_no=False,
|
||||
line_spacing=.75,
|
||||
style=Code.styles_list[1],
|
||||
)
|
||||
code.move_to([-3.5, 2.5, 0])
|
||||
self.add(code)
|
||||
|
||||
# The dataloader itself
|
||||
|
||||
sampler_1 = Group(
|
||||
Rectangle(color="blue", height=1, width=1),
|
||||
Text("Sampler GPU 1", font_size=12)
|
||||
).arrange(DOWN, buff=.25, aligned_edge=DOWN)
|
||||
sampler_2 = Group(
|
||||
Rectangle(color="blue", height=1, width=1),
|
||||
Text("Sampler GPU 2", font_size=12)
|
||||
).arrange(DOWN, buff=.25, aligned_edge=DOWN)
|
||||
sampler_3 = Group(
|
||||
Rectangle(color="blue", height=1, width=1),
|
||||
Text("Sampler GPU 3", font_size=12)
|
||||
).arrange(DOWN, buff=.25, aligned_edge=DOWN)
|
||||
sampler_4 = Group(
|
||||
Rectangle(color="blue", height=1, width=1),
|
||||
Text("Sampler GPU 4", font_size=12)
|
||||
).arrange(DOWN, buff=.25, aligned_edge=DOWN)
|
||||
sampler_1.move_to([2,2,0])
|
||||
sampler_2.move_to([2,.5,0])
|
||||
sampler_3.move_to([2,-1.,0])
|
||||
sampler_4.move_to([2,-2.5,0])
|
||||
self.add(sampler_1, sampler_2, sampler_3, sampler_4)
|
||||
samplers = [sampler_1[0], sampler_2[0], sampler_3[0], sampler_4[0]]
|
||||
|
||||
gpu_1 = Group(
|
||||
Rectangle(color="white", height=1, width=1),
|
||||
Text("Output GPU 1", font_size=12)
|
||||
).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4.5, 2, 0])
|
||||
gpu_2 = Group(
|
||||
Rectangle(color="white", height=1, width=1),
|
||||
Text("Output GPU 2", font_size=12)
|
||||
).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4.5, .5, 0])
|
||||
gpu_3 = Group(
|
||||
Rectangle(color="white", height=1, width=1),
|
||||
Text("Output GPU 3", font_size=12)
|
||||
).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4.5, -1, 0])
|
||||
gpu_4 = Group(
|
||||
Rectangle(color="white", height=1, width=1),
|
||||
Text("Output GPU 4", font_size=12)
|
||||
).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4.5, -2.5, 0])
|
||||
gpus = [gpu_1[0], gpu_2[0], gpu_3[0], gpu_4[0]]
|
||||
self.add(gpu_1, gpu_2, gpu_3, gpu_4)
|
||||
|
||||
# Animate their existence
|
||||
self.play(
|
||||
Create(gpu_1[0], run_time=1),
|
||||
Create(gpu_2[0], run_time=1),
|
||||
Create(gpu_3[0], run_time=1),
|
||||
Create(gpu_4[0], run_time=1),
|
||||
Create(dataset_recs, run_time=1),
|
||||
Create(sampler_1[0], run_time=1),
|
||||
Create(sampler_2[0], run_time=1),
|
||||
Create(sampler_3[0], run_time=1),
|
||||
Create(sampler_4[0], run_time=1),
|
||||
)
|
||||
|
||||
first_animations = []
|
||||
second_animations = []
|
||||
|
||||
|
||||
colors = ["BLUE_E", "DARK_BROWN", "GOLD_E", "GRAY_A"]
|
||||
current_color = colors[0]
|
||||
buff = 0
|
||||
lr_buff = .25
|
||||
old_target = None
|
||||
new_datasets = []
|
||||
for i,row_data in enumerate(dataset_recs):
|
||||
new_row = []
|
||||
current_color = colors[i]
|
||||
if i == 0:
|
||||
idx = -3
|
||||
elif i == 1:
|
||||
idx = -2
|
||||
elif i == 2:
|
||||
idx = -1
|
||||
elif i == 3:
|
||||
idx = 0
|
||||
for j,indiv_data in enumerate(row_data):
|
||||
dataset_target = Rectangle(height=0.46/2,width=0.46/2).set_stroke(width=0.).set_fill(current_color, opacity=0.7)
|
||||
dataset_target.move_to(indiv_data)
|
||||
dataset_target.generate_target()
|
||||
aligned_edge = ORIGIN
|
||||
if j % 8 == 0:
|
||||
aligned_edge = LEFT
|
||||
dataset_target.target.next_to(
|
||||
samplers[abs(idx)].get_corner(UP+LEFT), buff=.02, direction=RIGHT+DOWN,
|
||||
)
|
||||
dataset_target.target.set_x(dataset_target.target.get_x())
|
||||
elif j % 4 == 0:
|
||||
old_target = dataset_target.target
|
||||
dataset_target.target.next_to(
|
||||
samplers[abs(idx)].get_corner(UP+LEFT), buff=.02, direction=RIGHT+DOWN,
|
||||
)
|
||||
dataset_target.target.set_x(dataset_target.target.get_x())
|
||||
dataset_target.target.set_y(dataset_target.target.get_y()-.25)
|
||||
else:
|
||||
dataset_target.target.next_to(
|
||||
old_target, direction=RIGHT, buff=0.02,
|
||||
)
|
||||
old_target = dataset_target.target
|
||||
new_row.append(dataset_target)
|
||||
first_animations.append(indiv_data.animate(run_time=0.5).set_stroke(current_color))
|
||||
second_animations.append(MoveToTarget(dataset_target, run_time=1.5))
|
||||
|
||||
new_datasets.append(new_row)
|
||||
step_1 = MarkupText(
|
||||
f"Since we splice the dataset between each GPU,\nthe models weights can be averaged during `backward()`\nActing as though we did one giant epoch\nvery quickly.",
|
||||
font_size=18
|
||||
)
|
||||
step_1.move_to([-2.5, -2, 0])
|
||||
|
||||
self.play(
|
||||
Write(step_1, run_time=3),
|
||||
)
|
||||
self.play(
|
||||
*first_animations,
|
||||
)
|
||||
self.play(*second_animations)
|
||||
self.wait(duration=.5)
|
||||
|
||||
move_animation = []
|
||||
import random
|
||||
for i,row in enumerate(new_datasets):
|
||||
# row = [row[k] for k in random.sample(range(8), 8)]
|
||||
current_color = colors[i]
|
||||
if i == 0:
|
||||
idx = -3
|
||||
elif i == 1:
|
||||
idx = -2
|
||||
elif i == 2:
|
||||
idx = -1
|
||||
elif i == 3:
|
||||
idx = 0
|
||||
for j,indiv_data in enumerate(row):
|
||||
indiv_data.generate_target()
|
||||
aligned_edge = ORIGIN
|
||||
if j % 8 == 0:
|
||||
aligned_edge = LEFT
|
||||
indiv_data.target.next_to(
|
||||
gpus[abs(idx)].get_corner(UP+LEFT), buff=.02, direction=RIGHT+DOWN,
|
||||
)
|
||||
indiv_data.target.set_x(indiv_data.target.get_x())
|
||||
elif j % 4 == 0:
|
||||
indiv_data.target.next_to(
|
||||
gpus[abs(idx)].get_corner(UP+LEFT), buff=.02, direction=RIGHT+DOWN,
|
||||
)
|
||||
indiv_data.target.set_x(indiv_data.target.get_x())
|
||||
indiv_data.target.set_y(indiv_data.target.get_y()-.25)
|
||||
else:
|
||||
indiv_data.target.next_to(
|
||||
old_target, direction=RIGHT, buff=0.02,
|
||||
)
|
||||
old_target = indiv_data.target
|
||||
move_animation.append(MoveToTarget(indiv_data, run_time=1.5))
|
||||
|
||||
self.play(*move_animation)
|
||||
self.wait()
|
||||
193
manim_animations/dataloaders/stage_6.py
Normal file
193
manim_animations/dataloaders/stage_6.py
Normal file
@ -0,0 +1,193 @@
|
||||
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from manim import *
|
||||
|
||||
|
||||
class Stage6(Scene):
|
||||
def construct(self):
|
||||
# The dataset items
|
||||
colors = ["BLUE_E", "DARK_BROWN", "GOLD_E", "GRAY_A"]
|
||||
fill = Rectangle(height=0.46,width=0.46).set_stroke(width=0)
|
||||
columns = [
|
||||
VGroup(*[Rectangle(height=0.25,width=0.25,color=colors[j]) for i in range(8)]).arrange(RIGHT,buff=0)
|
||||
for j in range(4)
|
||||
]
|
||||
dataset_recs = VGroup(*columns).arrange(UP, buff=0)
|
||||
dataset_text = Text("Dataset", font_size=24)
|
||||
dataset = Group(dataset_recs,dataset_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
|
||||
dataset.move_to([-2,0,0])
|
||||
self.add(dataset)
|
||||
code = Code(
|
||||
code="# We enable this by default\naccelerator = Accelerator()\ndataloader = DataLoader(..., shuffle=True)\ndataloader = accelerator.prepare(dataloader)\nfor batch in dataloader:\n\t...",
|
||||
tab_width=4,
|
||||
background="window",
|
||||
language="Python",
|
||||
font="Monospace",
|
||||
font_size=14,
|
||||
corner_radius=.2,
|
||||
insert_line_no=False,
|
||||
line_spacing=.75,
|
||||
style=Code.styles_list[1],
|
||||
)
|
||||
code.move_to([-3.5, 2.5, 0])
|
||||
self.add(code)
|
||||
|
||||
# The dataloader itself
|
||||
|
||||
sampler_1 = Group(
|
||||
Rectangle(color="blue", height=1, width=1),
|
||||
Text("Sampler GPU 1", font_size=12)
|
||||
).arrange(DOWN, buff=.25, aligned_edge=DOWN)
|
||||
sampler_2 = Group(
|
||||
Rectangle(color="blue", height=1, width=1),
|
||||
Text("Sampler GPU 2", font_size=12)
|
||||
).arrange(DOWN, buff=.25, aligned_edge=DOWN)
|
||||
sampler_3 = Group(
|
||||
Rectangle(color="blue", height=1, width=1),
|
||||
Text("Sampler GPU 3", font_size=12)
|
||||
).arrange(DOWN, buff=.25, aligned_edge=DOWN)
|
||||
sampler_4 = Group(
|
||||
Rectangle(color="blue", height=1, width=1),
|
||||
Text("Sampler GPU 4", font_size=12)
|
||||
).arrange(DOWN, buff=.25, aligned_edge=DOWN)
|
||||
sampler_1.move_to([2,2,0])
|
||||
sampler_2.move_to([2,.5,0])
|
||||
sampler_3.move_to([2,-1.,0])
|
||||
sampler_4.move_to([2,-2.5,0])
|
||||
self.add(sampler_1, sampler_2, sampler_3, sampler_4)
|
||||
samplers = [sampler_1[0], sampler_2[0], sampler_3[0], sampler_4[0]]
|
||||
|
||||
gpu_1 = Group(
|
||||
Rectangle(color="white", height=1, width=1),
|
||||
Text("Output GPU 1", font_size=12)
|
||||
).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4.5, 2, 0])
|
||||
gpu_2 = Group(
|
||||
Rectangle(color="white", height=1, width=1),
|
||||
Text("Output GPU 2", font_size=12)
|
||||
).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4.5, .5, 0])
|
||||
gpu_3 = Group(
|
||||
Rectangle(color="white", height=1, width=1),
|
||||
Text("Output GPU 3", font_size=12)
|
||||
).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4.5, -1, 0])
|
||||
gpu_4 = Group(
|
||||
Rectangle(color="white", height=1, width=1),
|
||||
Text("Output GPU 4", font_size=12)
|
||||
).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4.5, -2.5, 0])
|
||||
gpus = [gpu_1[0], gpu_2[0], gpu_3[0], gpu_4[0]]
|
||||
self.add(gpu_1, gpu_2, gpu_3, gpu_4)
|
||||
|
||||
|
||||
first_animations = []
|
||||
second_animations = []
|
||||
|
||||
|
||||
colors = ["BLUE_E", "DARK_BROWN", "GOLD_E", "GRAY_A"]
|
||||
current_color = colors[0]
|
||||
buff = 0
|
||||
lr_buff = .25
|
||||
old_target = None
|
||||
new_datasets = []
|
||||
for i,row_data in enumerate(dataset_recs):
|
||||
new_row = []
|
||||
current_color = colors[i]
|
||||
if i == 0:
|
||||
idx = -3
|
||||
elif i == 1:
|
||||
idx = -2
|
||||
elif i == 2:
|
||||
idx = -1
|
||||
elif i == 3:
|
||||
idx = 0
|
||||
for j,indiv_data in enumerate(row_data):
|
||||
dataset_target = Rectangle(height=0.46/2,width=0.46/2).set_stroke(width=0.).set_fill(current_color, opacity=0.7)
|
||||
dataset_target.move_to(indiv_data)
|
||||
dataset_target.generate_target()
|
||||
aligned_edge = ORIGIN
|
||||
if j % 8 == 0:
|
||||
aligned_edge = LEFT
|
||||
old_target = dataset_target.target
|
||||
dataset_target.target.next_to(
|
||||
samplers[abs(idx)].get_corner(UP+LEFT), buff=.02, direction=RIGHT+DOWN,
|
||||
)
|
||||
dataset_target.target.set_x(dataset_target.target.get_x())
|
||||
elif j % 4 == 0:
|
||||
old_target = dataset_target.target
|
||||
dataset_target.target.next_to(
|
||||
samplers[abs(idx)].get_corner(UP+LEFT), buff=.02, direction=RIGHT+DOWN,
|
||||
)
|
||||
dataset_target.target.set_x(dataset_target.target.get_x())
|
||||
dataset_target.target.set_y(dataset_target.target.get_y()-.25)
|
||||
else:
|
||||
dataset_target.target.next_to(
|
||||
old_target, direction=RIGHT, buff=0.02,
|
||||
)
|
||||
old_target = dataset_target.target
|
||||
new_row.append(dataset_target)
|
||||
first_animations.append(indiv_data.animate(run_time=0.5).set_stroke(current_color))
|
||||
second_animations.append(MoveToTarget(dataset_target, run_time=1.5))
|
||||
|
||||
new_datasets.append(new_row)
|
||||
step_1 = MarkupText(
|
||||
f"During shuffling, each mini-batch's\noutput order will be modified",
|
||||
font_size=18
|
||||
)
|
||||
step_1.move_to([-1.5, -2, 0])
|
||||
|
||||
self.play(
|
||||
Write(step_1, run_time=3),
|
||||
)
|
||||
self.play(
|
||||
*first_animations,
|
||||
)
|
||||
self.play(*second_animations)
|
||||
self.wait(duration=.5)
|
||||
|
||||
move_animation = []
|
||||
import random
|
||||
for i,row in enumerate(new_datasets):
|
||||
row = [row[k] for k in random.sample(range(8), 8)]
|
||||
current_color = colors[i]
|
||||
if i == 0:
|
||||
idx = -3
|
||||
elif i == 1:
|
||||
idx = -2
|
||||
elif i == 2:
|
||||
idx = -1
|
||||
elif i == 3:
|
||||
idx = 0
|
||||
for j,indiv_data in enumerate(row):
|
||||
indiv_data.generate_target()
|
||||
aligned_edge = ORIGIN
|
||||
if j % 8 == 0:
|
||||
aligned_edge = LEFT
|
||||
indiv_data.target.next_to(
|
||||
gpus[abs(idx)].get_corner(UP+LEFT), buff=.02, direction=RIGHT+DOWN,
|
||||
)
|
||||
indiv_data.target.set_x(indiv_data.target.get_x())
|
||||
elif j % 4 == 0:
|
||||
indiv_data.target.next_to(
|
||||
gpus[abs(idx)].get_corner(UP+LEFT), buff=.02, direction=RIGHT+DOWN,
|
||||
)
|
||||
indiv_data.target.set_x(indiv_data.target.get_x())
|
||||
indiv_data.target.set_y(indiv_data.target.get_y()-.25)
|
||||
else:
|
||||
indiv_data.target.next_to(
|
||||
old_target, direction=RIGHT, buff=0.02,
|
||||
)
|
||||
old_target = indiv_data.target
|
||||
move_animation.append(MoveToTarget(indiv_data, run_time=1.5))
|
||||
|
||||
self.play(*move_animation)
|
||||
self.wait()
|
||||
182
manim_animations/dataloaders/stage_7.py
Normal file
182
manim_animations/dataloaders/stage_7.py
Normal file
@ -0,0 +1,182 @@
|
||||
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from manim import *
|
||||
|
||||
class Stage7(Scene):
|
||||
def construct(self):
|
||||
# The dataset items
|
||||
code = Code(
|
||||
code="accelerator = Accelerator(dispatch_batches=True)\ndataloader = DataLoader(...)\ndataloader = accelerator.prepare(dataloader)\nfor batch in dataloader:\n\t...",
|
||||
tab_width=4,
|
||||
background="window",
|
||||
language="Python",
|
||||
font="Monospace",
|
||||
font_size=14,
|
||||
corner_radius=.2,
|
||||
insert_line_no=False,
|
||||
line_spacing=.75,
|
||||
style=Code.styles_list[1],
|
||||
)
|
||||
code.move_to([-3.5, 2.5, 0])
|
||||
self.add(code)
|
||||
colors = ["BLUE_E", "DARK_BROWN", "GOLD_E", "GRAY_A"]
|
||||
fill = Rectangle(height=0.46,width=0.46).set_stroke(width=0)
|
||||
columns = [
|
||||
VGroup(*[Rectangle(height=0.25,width=0.25,color=colors[j]) for i in range(8)]).arrange(RIGHT,buff=0)
|
||||
for j in range(4)
|
||||
]
|
||||
dataset_recs = VGroup(*columns).arrange(UP, buff=0)
|
||||
dataset_text = Text("Dataset", font_size=24)
|
||||
dataset = Group(dataset_recs,dataset_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
|
||||
dataset.move_to([-2,0,0])
|
||||
self.add(dataset)
|
||||
|
||||
# The dataloader itself
|
||||
|
||||
sampler_1 = Group(
|
||||
Rectangle(color="blue", height=1.02, width=1.02),
|
||||
Text("Sampler GPU 1", font_size=12)
|
||||
).arrange(DOWN, buff=.25, aligned_edge=DOWN)
|
||||
sampler_2 = Group(
|
||||
Rectangle(color="blue", height=1.02, width=1.02),
|
||||
Text("Sampler GPU 2", font_size=12)
|
||||
).arrange(DOWN, buff=.25, aligned_edge=DOWN)
|
||||
sampler_3 = Group(
|
||||
Rectangle(color="blue", height=1.02, width=1.02),
|
||||
Text("Sampler GPU 3", font_size=12)
|
||||
).arrange(DOWN, buff=.25, aligned_edge=DOWN)
|
||||
sampler_4 = Group(
|
||||
Rectangle(color="blue", height=1.02, width=1.02),
|
||||
Text("Sampler GPU 4", font_size=12)
|
||||
).arrange(DOWN, buff=.25, aligned_edge=DOWN)
|
||||
sampler_1.move_to([2,2,0])
|
||||
sampler_2.move_to([2,.5,0])
|
||||
sampler_3.move_to([2,-1.,0])
|
||||
sampler_4.move_to([2,-2.5,0])
|
||||
self.add(sampler_1, sampler_2, sampler_3, sampler_4)
|
||||
samplers = [sampler_1[0], sampler_2[0], sampler_3[0], sampler_4[0]]
|
||||
|
||||
gpu_1 = Group(
|
||||
Rectangle(color="white", height=1.02, width=.98),
|
||||
Text("Output GPU 1", font_size=12)
|
||||
).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4.5, 2, 0])
|
||||
gpu_2 = Group(
|
||||
Rectangle(color="white", height=1.02, width=.98),
|
||||
Text("Output GPU 2", font_size=12)
|
||||
).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4.5, .5, 0])
|
||||
gpu_3 = Group(
|
||||
Rectangle(color="white", height=1.02, width=.98),
|
||||
Text("Output GPU 3", font_size=12)
|
||||
).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4.5, -1, 0])
|
||||
gpu_4 = Group(
|
||||
Rectangle(color="white", height=1.02, width=.98),
|
||||
Text("Output GPU 4", font_size=12)
|
||||
).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4.5, -2.5, 0])
|
||||
gpus = [gpu_1[0], gpu_2[0], gpu_3[0], gpu_4[0]]
|
||||
self.add(gpu_1, gpu_2, gpu_3, gpu_4)
|
||||
|
||||
step_1 = MarkupText(
|
||||
f"When using a `DataLoaderDispatcher`, all\nof the samples are collected from GPU 0's dataset,\nthen divided and sent to each GPU.\nAs a result, this will be slower.",
|
||||
font_size=18
|
||||
)
|
||||
step_1.move_to([-2.5, -2, 0])
|
||||
|
||||
self.play(
|
||||
Write(step_1, run_time=3.5),
|
||||
)
|
||||
|
||||
first_animations = []
|
||||
second_animations = []
|
||||
|
||||
|
||||
colors = ["BLUE_E", "DARK_BROWN", "GOLD_E", "GRAY_A"]
|
||||
current_color = colors[0]
|
||||
ud_buff = 0.01
|
||||
lr_buff = 0.01
|
||||
old_target = None
|
||||
new_datasets = []
|
||||
for i,row_data in enumerate(dataset_recs):
|
||||
new_row = []
|
||||
current_color = colors[i]
|
||||
|
||||
for j,indiv_data in enumerate(row_data):
|
||||
dataset_target = Rectangle(height=0.46/4,width=0.46/2).set_stroke(width=0.).set_fill(current_color, opacity=0.7)
|
||||
dataset_target.move_to(indiv_data)
|
||||
dataset_target.generate_target()
|
||||
aligned_edge = ORIGIN
|
||||
if j % 8 == 0:
|
||||
aligned_edge = LEFT
|
||||
dataset_target.target.next_to(
|
||||
samplers[0].get_corner(DOWN+LEFT), buff=0.0125, direction=RIGHT+UP,
|
||||
)
|
||||
dataset_target.target.set_x(dataset_target.target.get_x())
|
||||
dataset_target.target.set_y(dataset_target.target.get_y() + (.25 * i))
|
||||
elif j % 4 == 0:
|
||||
old_target = dataset_target.target
|
||||
dataset_target.target.next_to(
|
||||
samplers[0].get_corner(DOWN+LEFT), buff=0.0125, direction=RIGHT+UP,
|
||||
)
|
||||
dataset_target.target.set_x(dataset_target.target.get_x())
|
||||
dataset_target.target.set_y(dataset_target.target.get_y()+.125 + (.25 * i))
|
||||
else:
|
||||
dataset_target.target.next_to(
|
||||
old_target, direction=RIGHT, buff=0.0125,
|
||||
)
|
||||
old_target = dataset_target.target
|
||||
new_row.append(dataset_target)
|
||||
first_animations.append(indiv_data.animate(run_time=0.5).set_stroke(current_color))
|
||||
second_animations.append(MoveToTarget(dataset_target, run_time=1.5))
|
||||
|
||||
new_datasets.append(new_row)
|
||||
self.play(
|
||||
*first_animations,
|
||||
)
|
||||
self.play(*second_animations)
|
||||
move_animation = []
|
||||
for i,row in enumerate(new_datasets):
|
||||
current_color = colors[i]
|
||||
if i == 0:
|
||||
idx = -3
|
||||
elif i == 1:
|
||||
idx = -2
|
||||
elif i == 2:
|
||||
idx = -1
|
||||
elif i == 3:
|
||||
idx = 0
|
||||
for j,indiv_data in enumerate(row):
|
||||
indiv_data.generate_target()
|
||||
indiv_data.animate.stretch_to_fit_height(0.46/2)
|
||||
aligned_edge = ORIGIN
|
||||
if j % 8 == 0:
|
||||
aligned_edge = LEFT
|
||||
indiv_data.target.next_to(
|
||||
gpus[abs(idx)].get_corner(UP+LEFT), buff=.01, direction=RIGHT+DOWN,
|
||||
)
|
||||
indiv_data.target.set_x(indiv_data.target.get_x())
|
||||
indiv_data.target.set_y(indiv_data.target.get_y()-.25)
|
||||
elif j % 4 == 0:
|
||||
indiv_data.target.next_to(
|
||||
gpus[abs(idx)].get_corner(UP+LEFT), buff=.01, direction=RIGHT+DOWN,
|
||||
)
|
||||
indiv_data.target.set_x(indiv_data.target.get_x())
|
||||
else:
|
||||
indiv_data.target.next_to(
|
||||
old_target, direction=RIGHT, buff=0.01,
|
||||
)
|
||||
old_target = indiv_data.target
|
||||
move_animation.append(MoveToTarget(indiv_data, run_time=1.5))
|
||||
|
||||
self.play(*move_animation)
|
||||
self.wait()
|
||||
@ -3,10 +3,12 @@ line-length = 119
|
||||
target-version = "py38"
|
||||
|
||||
[tool.ruff.lint]
|
||||
preview = true
|
||||
ignore-init-module-imports = true
|
||||
extend-select = [
|
||||
"B009", # static getattr
|
||||
"B010", # static setattr
|
||||
"CPY", # Copyright
|
||||
"E", # PEP8 errors
|
||||
"F", # PEP8 formatting
|
||||
"I", # Import sorting
|
||||
|
||||
12
setup.py
12
setup.py
@ -25,17 +25,18 @@ extras["docs"] = []
|
||||
extras["test_prod"] = ["pytest>=7.2.0,<=8.0.0", "pytest-xdist", "pytest-subtests", "parameterized"]
|
||||
extras["test_dev"] = [
|
||||
"datasets",
|
||||
"diffusers",
|
||||
"evaluate",
|
||||
"torchpippy>=0.2.0",
|
||||
"transformers",
|
||||
"scipy",
|
||||
"scikit-learn",
|
||||
"deepspeed<0.13.0",
|
||||
"tqdm",
|
||||
"bitsandbytes",
|
||||
"timm",
|
||||
]
|
||||
extras["testing"] = extras["test_prod"] + extras["test_dev"]
|
||||
extras["deepspeed"] = ["deepspeed<=0.14.0"]
|
||||
extras["rich"] = ["rich"]
|
||||
|
||||
extras["test_trackers"] = ["wandb", "comet-ml", "tensorboard", "dvclive"]
|
||||
@ -47,14 +48,14 @@ extras["sagemaker"] = [
|
||||
|
||||
setup(
|
||||
name="accelerate",
|
||||
version="0.28.0.dev",
|
||||
version="0.33.0",
|
||||
description="Accelerate",
|
||||
long_description=open("README.md", encoding="utf-8").read(),
|
||||
long_description_content_type="text/markdown",
|
||||
keywords="deep learning",
|
||||
license="Apache",
|
||||
author="The HuggingFace team",
|
||||
author_email="sylvain@huggingface.co",
|
||||
author_email="zach.mueller@huggingface.co",
|
||||
url="https://github.com/huggingface/accelerate",
|
||||
package_dir={"": "src"},
|
||||
packages=find_packages("src"),
|
||||
@ -64,16 +65,17 @@ setup(
|
||||
"accelerate-config=accelerate.commands.config:main",
|
||||
"accelerate-estimate-memory=accelerate.commands.estimate:main",
|
||||
"accelerate-launch=accelerate.commands.launch:main",
|
||||
"accelerate-merge-weights=accelerate.commands.merge:main",
|
||||
]
|
||||
},
|
||||
python_requires=">=3.8.0",
|
||||
install_requires=[
|
||||
"numpy>=1.17",
|
||||
"numpy>=1.17,<2.0.0",
|
||||
"packaging>=20.0",
|
||||
"psutil",
|
||||
"pyyaml",
|
||||
"torch>=1.10.0",
|
||||
"huggingface_hub",
|
||||
"huggingface_hub>=0.21.0",
|
||||
"safetensors>=0.3.1",
|
||||
],
|
||||
extras_require=extras,
|
||||
|
||||
@ -1,4 +1,17 @@
|
||||
__version__ = "0.28.0.dev0"
|
||||
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
__version__ = "0.33.0"
|
||||
|
||||
from .accelerator import Accelerator
|
||||
from .big_modeling import (
|
||||
@ -17,12 +30,14 @@ from .state import PartialState
|
||||
from .utils import (
|
||||
AutocastKwargs,
|
||||
DataLoaderConfiguration,
|
||||
DDPCommunicationHookType,
|
||||
DeepSpeedPlugin,
|
||||
DistributedDataParallelKwargs,
|
||||
DistributedType,
|
||||
FullyShardedDataParallelPlugin,
|
||||
GradScalerKwargs,
|
||||
InitProcessGroupKwargs,
|
||||
ProfileKwargs,
|
||||
find_executable_batch_size,
|
||||
infer_auto_device_map,
|
||||
is_rich_available,
|
||||
|
||||
@ -31,6 +31,7 @@ from typing import Any, Callable, Union
|
||||
|
||||
import torch
|
||||
import torch.utils.hooks as hooks
|
||||
from huggingface_hub import split_torch_state_dict_into_shards
|
||||
|
||||
from .checkpointing import load_accelerator_state, load_custom_state, save_accelerator_state, save_custom_state
|
||||
from .data_loader import DataLoaderDispatcher, prepare_data_loader, skip_first_batches
|
||||
@ -44,8 +45,10 @@ from .utils import (
|
||||
MODEL_NAME,
|
||||
SAFE_WEIGHTS_INDEX_NAME,
|
||||
SAFE_WEIGHTS_NAME,
|
||||
SAFE_WEIGHTS_PATTERN_NAME,
|
||||
WEIGHTS_INDEX_NAME,
|
||||
WEIGHTS_NAME,
|
||||
WEIGHTS_PATTERN_NAME,
|
||||
AutocastKwargs,
|
||||
DataLoaderConfiguration,
|
||||
DeepSpeedPlugin,
|
||||
@ -61,6 +64,7 @@ from .utils import (
|
||||
LoggerType,
|
||||
MegatronLMPlugin,
|
||||
PrecisionType,
|
||||
ProfileKwargs,
|
||||
ProjectConfiguration,
|
||||
RNGType,
|
||||
TorchDynamoPlugin,
|
||||
@ -77,10 +81,12 @@ from .utils import (
|
||||
has_transformer_engine_layers,
|
||||
is_bf16_available,
|
||||
is_deepspeed_available,
|
||||
is_fp8_available,
|
||||
is_ipex_available,
|
||||
is_lomo_available,
|
||||
is_megatron_lm_available,
|
||||
is_mlu_available,
|
||||
is_msamp_available,
|
||||
is_musa_available,
|
||||
is_npu_available,
|
||||
is_torch_version,
|
||||
is_torch_xla_available,
|
||||
@ -95,10 +101,9 @@ from .utils import (
|
||||
save,
|
||||
save_fsdp_model,
|
||||
save_fsdp_optimizer,
|
||||
shard_checkpoint,
|
||||
wait_for_everyone,
|
||||
)
|
||||
from .utils.constants import FSDP_PYTORCH_VERSION
|
||||
from .utils.constants import FSDP_PYTORCH_VERSION, PROFILE_PATTERN_NAME
|
||||
from .utils.modeling import get_state_dict_offloaded_model
|
||||
from .utils.other import is_compiled_module
|
||||
|
||||
@ -112,11 +117,6 @@ if is_deepspeed_available():
|
||||
DummyScheduler,
|
||||
)
|
||||
|
||||
if is_fp8_available():
|
||||
import transformer_engine.common.recipe as te_recipe
|
||||
from transformer_engine.pytorch import fp8_autocast
|
||||
|
||||
|
||||
if is_megatron_lm_available():
|
||||
from .utils import (
|
||||
MegatronEngine,
|
||||
@ -126,9 +126,7 @@ if is_megatron_lm_available():
|
||||
MegatronLMSchedulerWrapper,
|
||||
megatron_lm_initialize,
|
||||
megatron_lm_prepare_data_loader,
|
||||
megatron_lm_prepare_model,
|
||||
megatron_lm_prepare_optimizer,
|
||||
megatron_lm_prepare_scheduler,
|
||||
megatron_lm_prepare_model_optimizer_scheduler,
|
||||
)
|
||||
|
||||
from torch.distributed.algorithms.join import Join
|
||||
@ -214,12 +212,12 @@ class Accelerator:
|
||||
project_dir (`str`, `os.PathLike`, *optional*):
|
||||
A path to a directory for storing data such as logs of locally-compatible loggers and potentially saved
|
||||
checkpoints.
|
||||
step_scheduler_with_optimizer (`bool`, *optional`, defaults to `True`):
|
||||
step_scheduler_with_optimizer (`bool`, *optional*, defaults to `True`):
|
||||
Set `True` if the learning rate scheduler is stepped at the same time as the optimizer, `False` if only
|
||||
done under certain circumstances (at the end of each epoch, for instance).
|
||||
kwargs_handlers (list of [`~utils.KwargsHandler`], *optional*)
|
||||
A list of [`~utils.KwargsHandler`] to customize how the objects related to distributed training or mixed
|
||||
precision are created. See [kwargs](kwargs) for more information.
|
||||
A list of [`~utils.KwargsHandler`] to customize how the objects related to distributed training, profiling
|
||||
or mixed precision are created. See [kwargs](kwargs) for more information.
|
||||
dynamo_backend (`str` or [`~utils.DynamoBackend`], *optional*, defaults to `"no"`):
|
||||
Set to one of the possible dynamo backends to optimize your training with torch dynamo.
|
||||
gradient_accumulation_plugin ([`~utils.GradientAccumulationPlugin`], *optional*):
|
||||
@ -293,7 +291,13 @@ class Accelerator:
|
||||
if deepspeed_plugin:
|
||||
if not is_deepspeed_available():
|
||||
raise ImportError("DeepSpeed is not installed => run `pip install deepspeed` or build it from source.")
|
||||
if compare_versions("deepspeed", "<", "0.9.3"):
|
||||
if is_mlu_available():
|
||||
if compare_versions("deepspeed-mlu", "<", "0.10.1"):
|
||||
raise ImportError("DeepSpeed MLU version must be >= 0.10.1. Please update DeepSpeed MLU.")
|
||||
elif is_musa_available():
|
||||
if compare_versions("deepspeed", ">", "0.14.3"):
|
||||
raise ImportError("DeepSpeed MUSA version must be <= 0.14.3. Please downgrade DeepSpeed.")
|
||||
elif compare_versions("deepspeed", "<", "0.9.3"):
|
||||
raise ImportError("DeepSpeed version must be >= 0.9.3. Please update DeepSpeed.")
|
||||
|
||||
mixed_precision = (
|
||||
@ -336,6 +340,9 @@ class Accelerator:
|
||||
self.init_handler = None
|
||||
self.fp8_recipe_handler = None
|
||||
self.autocast_handler = None
|
||||
self.profile_handler = None
|
||||
self.has_lomo_optimizer = False
|
||||
|
||||
if kwargs_handlers is not None:
|
||||
for handler in kwargs_handlers:
|
||||
assert isinstance(
|
||||
@ -366,6 +373,11 @@ class Accelerator:
|
||||
raise ValueError("You can only pass one `AutocastKwargs` in `kwargs_handler`.")
|
||||
else:
|
||||
self.autocast_handler = handler
|
||||
elif isinstance(handler, ProfileKwargs):
|
||||
if self.profile_handler is not None:
|
||||
raise ValueError("You can only pass one `ProfileKwargs` in `kwargs_handler`.")
|
||||
else:
|
||||
self.profile_handler = handler
|
||||
|
||||
kwargs = self.init_handler.to_kwargs() if self.init_handler is not None else {}
|
||||
self.state = AcceleratorState(
|
||||
@ -379,8 +391,15 @@ class Accelerator:
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
if self.fp8_recipe_handler is None and self.state.mixed_precision == "fp8":
|
||||
self.fp8_recipe_handler = FP8RecipeKwargs(backend="MSAMP" if is_msamp_available() else "TE")
|
||||
self.delayed_fp8_autocast = False
|
||||
if self.fp8_recipe_handler is not None:
|
||||
# We already check if FP8 is available during `self.state`
|
||||
if self.state.mixed_precision != "fp8":
|
||||
raise ValueError("Passing in a `FP8RecipeKwargs` object requires setting `mixed_precision='fp8'`.")
|
||||
self.delayed_fp8_autocast = self.fp8_recipe_handler.backend == "TE" and self.distributed_type in (
|
||||
DistributedType.MULTI_GPU,
|
||||
DistributedType.FSDP,
|
||||
)
|
||||
|
||||
trackers = filter_trackers(log_with, self.logging_dir)
|
||||
if len(trackers) < 1 and log_with is not None:
|
||||
@ -446,7 +465,7 @@ class Accelerator:
|
||||
and self.distributed_type not in (DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM)
|
||||
):
|
||||
self.native_amp = True
|
||||
if self.device.type not in ("xpu", "cuda", "mps", "npu", "xla") or is_torch_xla_available(
|
||||
if self.device.type not in ("xpu", "cuda", "npu", "xla", "mlu", "musa") or is_torch_xla_available(
|
||||
check_is_tpu=True
|
||||
):
|
||||
raise ValueError(f"fp16 mixed precision requires a GPU (not {self.device.type!r}).")
|
||||
@ -457,8 +476,14 @@ class Accelerator:
|
||||
self.scaler = ShardedGradScaler(**kwargs)
|
||||
elif is_torch_xla_available(check_is_gpu=True):
|
||||
self.scaler = xamp.GradScaler(**kwargs)
|
||||
elif is_mlu_available():
|
||||
self.scaler = torch.mlu.amp.GradScaler(**kwargs)
|
||||
elif is_musa_available():
|
||||
self.scalar = torch.musa.amp.GradScaler(**kwargs)
|
||||
elif is_npu_available():
|
||||
self.scaler = torch.npu.amp.GradScaler(**kwargs)
|
||||
elif is_xpu_available():
|
||||
self.scaler = torch.amp.GradScaler("xpu", **kwargs)
|
||||
else:
|
||||
self.scaler = torch.cuda.amp.GradScaler(**kwargs)
|
||||
|
||||
@ -473,6 +498,10 @@ class Accelerator:
|
||||
if mixed_precision == "bf16" and not self.native_amp and not is_torch_xla_available():
|
||||
raise ValueError("bf16 mixed precision requires PyTorch >= 1.10 and a supported device.")
|
||||
|
||||
elif self.state.mixed_precision == "fp8":
|
||||
# We always enable `native_amp` for FP8
|
||||
self.native_amp = True
|
||||
|
||||
# Start of internal step tracking
|
||||
self.step = 0
|
||||
|
||||
@ -544,6 +573,10 @@ class Accelerator:
|
||||
def use_seedable_sampler(self):
|
||||
return self.dataloader_config.use_seedable_sampler
|
||||
|
||||
@property
|
||||
def non_blocking(self):
|
||||
return self.dataloader_config.non_blocking
|
||||
|
||||
@property
|
||||
def project_dir(self):
|
||||
return self.project_configuration.project_dir
|
||||
@ -1026,9 +1059,20 @@ class Accelerator:
|
||||
```
|
||||
"""
|
||||
self._do_sync()
|
||||
|
||||
allow_gradient_sync = (
|
||||
self.sync_gradients # must sync if sync gradients need to complete an optimizer step
|
||||
or (
|
||||
# the no_sync context stops the gradients from reducing during distributed training
|
||||
# bringing speedup (potentially at some costs). Here, no_sync can be prevented
|
||||
# by setting sync_each_batch = True.
|
||||
self.use_distributed # only relevant in distributed settings
|
||||
and self.gradient_state.plugin_kwargs.get("sync_each_batch", False)
|
||||
)
|
||||
)
|
||||
with contextlib.ExitStack() as cm_stack:
|
||||
for m in models:
|
||||
cm_stack.enter_context(contextlib.nullcontext() if self.sync_gradients else self.no_sync(m))
|
||||
cm_stack.enter_context(contextlib.nullcontext() if allow_gradient_sync else self.no_sync(m))
|
||||
yield
|
||||
|
||||
@contextmanager
|
||||
@ -1076,7 +1120,13 @@ class Accelerator:
|
||||
... optimizer.zero_grad()
|
||||
```
|
||||
"""
|
||||
if self.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU):
|
||||
if self.distributed_type in (
|
||||
DistributedType.MULTI_GPU,
|
||||
DistributedType.MULTI_NPU,
|
||||
DistributedType.MULTI_MLU,
|
||||
DistributedType.MULTI_MUSA,
|
||||
DistributedType.MULTI_XPU,
|
||||
):
|
||||
dl_even_batches_values = []
|
||||
|
||||
if even_batches is not None:
|
||||
@ -1246,9 +1296,9 @@ class Accelerator:
|
||||
|
||||
if self.distributed_type in [DistributedType.MULTI_CPU, DistributedType.MULTI_XPU, DistributedType.NO]:
|
||||
if self.device.type == "cpu" and self.state.use_ipex:
|
||||
args = self._prepare_ipex(*args)
|
||||
args = self._prepare_ipex_or_xpu(*args)
|
||||
elif self.device.type == "xpu" and is_xpu_available():
|
||||
args = self._prepare_ipex(*args)
|
||||
args = self._prepare_ipex_or_xpu(*args)
|
||||
if self.distributed_type == DistributedType.DEEPSPEED:
|
||||
result = self._prepare_deepspeed(*args)
|
||||
elif self.distributed_type == DistributedType.MEGATRON_LM:
|
||||
@ -1332,18 +1382,26 @@ class Accelerator:
|
||||
model.forward = MethodType(convert_outputs_to_fp32(model.forward.__func__), model)
|
||||
else:
|
||||
model.forward = convert_outputs_to_fp32(new_forward)
|
||||
elif self.mixed_precision == "fp8" and self.fp8_recipe_handler.backend == "TE":
|
||||
|
||||
# We prepare fp8 after, allowing for bf16 autocast to happen first
|
||||
if getattr(self.fp8_recipe_handler, "backend", None) == "TE":
|
||||
# Import here to keep base imports fast
|
||||
import transformer_engine.common.recipe as te_recipe
|
||||
from transformer_engine.pytorch import fp8_autocast
|
||||
|
||||
if not has_transformer_engine_layers(model):
|
||||
with torch.no_grad():
|
||||
convert_model(model)
|
||||
model._converted_to_transformer_engine = True
|
||||
model._original_forward = model.forward
|
||||
|
||||
kwargs = self.fp8_recipe_handler.to_kwargs() if self.fp8_recipe_handler is not None else {}
|
||||
if "fp8_format" in kwargs:
|
||||
kwargs["fp8_format"] = getattr(te_recipe.Format, kwargs["fp8_format"])
|
||||
fp8_recipe = te_recipe.DelayedScaling(**kwargs)
|
||||
model.forward = fp8_autocast(enabled=True, fp8_recipe=fp8_recipe)(model.forward)
|
||||
# If we are in DDP or FSDP, we delay `autocast` until after FSDP/DDP has been initialized
|
||||
# to make use of the process group
|
||||
if not self.delayed_fp8_autocast:
|
||||
model.forward = fp8_autocast(enabled=True, fp8_recipe=fp8_recipe)(model.forward)
|
||||
|
||||
if (getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False)) and getattr(
|
||||
model, "hf_device_map", False
|
||||
@ -1355,16 +1413,19 @@ class Accelerator:
|
||||
" In order to use 8-bit models that have been loaded across multiple GPUs the solution is to use Naive Pipeline Parallelism."
|
||||
" Therefore you should not specify that you are under any distributed regime in your accelerate config."
|
||||
)
|
||||
current_device = list(model_devices)[0]
|
||||
current_device_index = current_device.index if isinstance(current_device, torch.device) else current_device
|
||||
elif len(model_devices) == 1:
|
||||
current_device = list(model_devices)[0]
|
||||
current_device_index = (
|
||||
current_device.index if isinstance(current_device, torch.device) else current_device
|
||||
)
|
||||
|
||||
if torch.device(current_device_index) != self.device:
|
||||
# if on the first device (GPU 0) we don't care
|
||||
if (self.device.index is not None) or (current_device_index != 0):
|
||||
raise ValueError(
|
||||
"You can't train a model that has been loaded in 8-bit precision on a different device than the one "
|
||||
"you're training on. Make sure you loaded the model on the correct device using for example `device_map={'':torch.cuda.current_device() or device_map={'':torch.xpu.current_device()}"
|
||||
)
|
||||
if torch.device(current_device_index) != self.device:
|
||||
# if on the first device (GPU 0) we don't care
|
||||
if (self.device.index is not None) or (current_device_index != 0):
|
||||
raise ValueError(
|
||||
"You can't train a model that has been loaded in 8-bit precision on a different device than the one "
|
||||
"you're training on. Make sure you loaded the model on the correct device using for example `device_map={'':torch.cuda.current_device()}` or `device_map={'':torch.xpu.current_device()}`"
|
||||
)
|
||||
|
||||
if "cpu" in model_devices or "disk" in model_devices:
|
||||
raise ValueError(
|
||||
@ -1375,6 +1436,8 @@ class Accelerator:
|
||||
if not evaluation_mode:
|
||||
if self.distributed_type in (
|
||||
DistributedType.MULTI_GPU,
|
||||
DistributedType.MULTI_MLU,
|
||||
DistributedType.MULTI_MUSA,
|
||||
DistributedType.MULTI_NPU,
|
||||
DistributedType.MULTI_XPU,
|
||||
):
|
||||
@ -1389,6 +1452,8 @@ class Accelerator:
|
||||
model = torch.nn.parallel.DistributedDataParallel(
|
||||
model, device_ids=device_ids, output_device=output_device, **kwargs
|
||||
)
|
||||
if self.ddp_handler is not None:
|
||||
self.ddp_handler.register_comm_hook(model)
|
||||
elif self.distributed_type == DistributedType.FSDP:
|
||||
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
|
||||
|
||||
@ -1433,6 +1498,73 @@ class Accelerator:
|
||||
),
|
||||
auto_wrap_policy=fsdp_plugin.auto_wrap_policy,
|
||||
)
|
||||
|
||||
# In the event the model had been loaded in low precision, but
|
||||
# mixed precision had also been activated, then we follow DeepSpeed's
|
||||
# strategy to hold the parameters in full precision.
|
||||
# - assume that trainer.args.bf16 and trainer.args.fp16 are already checked against
|
||||
# fsdp_plugin.mixed_precision_policy.
|
||||
# - NOTE: we do not check the mixed_precision attribute on the FSDP root wrapper.
|
||||
# * this attribute will always set by init_utils.init_core_state so its always not None.
|
||||
# * mixed_precision.param_dtype only regards _fwd_bwd_param_dtype
|
||||
# * if model is loaded in 16bit, and even if mixed_precision.param_dtype is None,
|
||||
# we sill want to upcast the flat_param.
|
||||
if self.mixed_precision != "no": # if mixed precision is set
|
||||
upcasted_log = []
|
||||
for module in FSDP.fsdp_modules(model):
|
||||
# Referencing DeepSpeed Zero3
|
||||
# - in Init, params are converted to 16bit while partitioning.
|
||||
# - in accelerator.prepare, deepspeed.initalize is called to:
|
||||
# * creates the DeepSpeeedEngine.
|
||||
# * since zero_optimization() is True , calls engine._configure_zero_optimizer.
|
||||
#
|
||||
# Inside the DeepSpeed Zero3 optimizer configuration, which initalizes
|
||||
# DeepSpeedZeroOptimizer_Stage3, during which:
|
||||
# * trainable_param_groups are obtained from the attached optimizer
|
||||
# (already partitioned in 16bit).
|
||||
# * then _setup_for_real_optimizer -> _create_fp32_partitions
|
||||
# which performs the fp32 upcasting.
|
||||
|
||||
# To mimick DeepSeepds's casting in FSDP, we look at the (single) FlatParameter held
|
||||
# within an FSDP wrapper. This FlatParameter will be seen by the optimizer.
|
||||
# - even though there is a torch.device('meta') guard below, we
|
||||
# expect _init_utils._init_param_handle_from_module to already
|
||||
# sync the parameter.
|
||||
|
||||
if not module._has_params:
|
||||
continue # skip if FSDP module not managing parameters
|
||||
param = module._flat_param
|
||||
if (
|
||||
param.dtype != torch.float32
|
||||
and param.device != torch.device("meta")
|
||||
and param.requires_grad
|
||||
):
|
||||
# keep log of names_params that was upcasted
|
||||
# NOTE: resorted to this because warnings.simplefilter("once") is somehow not working
|
||||
name_param_log = (module.module.__class__.__name__, ", ".join(module._flat_param._fqns))
|
||||
if name_param_log not in upcasted_log:
|
||||
upcasted_log.append(name_param_log)
|
||||
|
||||
# this works because of FSDP's _runtime_utils.lazy_init.
|
||||
# Have to be careful not to call anything before this that
|
||||
# triggers lazy_init (e.g., _is_fsdp_root).
|
||||
param.data = param.data.to(torch.float32) # upcasting
|
||||
module._handle._orig_param_dtype = torch.float32 # update
|
||||
|
||||
# report the warnings
|
||||
# some messages can be quite repetitive, especially when reporting about layers that have identical architecture.
|
||||
if self.is_main_process:
|
||||
for name_log, param_log in upcasted_log:
|
||||
warnings.warn(
|
||||
f"Upcasted low precision parameters in {name_log} because mixed precision turned on in FSDP. "
|
||||
f"Affects: {param_log}."
|
||||
)
|
||||
|
||||
if len(upcasted_log) > 0:
|
||||
warnings.warn(
|
||||
"FSDP upcast of low precision parameters may affect the precision of model checkpoints."
|
||||
)
|
||||
|
||||
# if the previous and current models are same, delete the previous one
|
||||
if len(self._models) > 1 and (self._models[-2] is self._models[-1]):
|
||||
del self._models[-2]
|
||||
@ -1440,8 +1572,15 @@ class Accelerator:
|
||||
elif self.distributed_type == DistributedType.MULTI_CPU:
|
||||
kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}
|
||||
model = torch.nn.parallel.DistributedDataParallel(model, **kwargs)
|
||||
if self.ddp_handler is not None:
|
||||
self.ddp_handler.register_comm_hook(model)
|
||||
elif self.distributed_type == DistributedType.XLA and self.state.fork_launched:
|
||||
model = xmp.MpModelWrapper(model).to(self.device)
|
||||
# Now we can apply the FP8 autocast
|
||||
if self.delayed_fp8_autocast:
|
||||
model.forward = fp8_autocast(enabled=True, fp8_recipe=fp8_recipe, fp8_group=model.process_group)(
|
||||
model.forward
|
||||
)
|
||||
# torch.compile should be called last and only if the model isn't already compiled.
|
||||
if self.state.dynamo_plugin.backend != DynamoBackend.NO and not is_compiled_module(model):
|
||||
if not is_torch_version(">=", "2.0"):
|
||||
@ -1557,6 +1696,8 @@ class Accelerator:
|
||||
)
|
||||
|
||||
if model is not None:
|
||||
# if the model is an MOE, set the appropriate MOE layers as leaf Z3 modules
|
||||
deepspeed_plugin.set_moe_leaf_modules(model)
|
||||
# deal with config keys that use `auto` value and rely on model's hidden_size
|
||||
hidden_size_based_keys = [
|
||||
"zero_optimization.reduce_bucket_size",
|
||||
@ -1586,7 +1727,7 @@ class Accelerator:
|
||||
config_kwargs.update(
|
||||
{
|
||||
"zero_optimization.reduce_bucket_size": hidden_size * hidden_size,
|
||||
"zero_optimization.stage3_prefetch_bucket_size": 0.9 * hidden_size * hidden_size,
|
||||
"zero_optimization.stage3_prefetch_bucket_size": int(0.9 * hidden_size * hidden_size),
|
||||
"zero_optimization.stage3_param_persistence_threshold": 10 * hidden_size,
|
||||
}
|
||||
)
|
||||
@ -1673,6 +1814,7 @@ class Accelerator:
|
||||
|
||||
def _prepare_megatron_lm(self, *args):
|
||||
megatron_lm_plugin = self.state.megatron_lm_plugin
|
||||
micro_batch_size = None
|
||||
if not megatron_lm_plugin.megatron_dataset_flag:
|
||||
batch_sizes = [obj.batch_size for obj in args if hasattr(obj, "batch_size")]
|
||||
if len(batch_sizes) == 0:
|
||||
@ -1691,19 +1833,22 @@ class Accelerator:
|
||||
if isinstance(obj, MegatronLMDummyDataLoader):
|
||||
micro_batch_size = obj.dataset_args["micro_batch_size"]
|
||||
break
|
||||
|
||||
dp_degree = self.num_processes // (megatron_lm_plugin.tp_degree * megatron_lm_plugin.pp_degree)
|
||||
megatron_lm_plugin.set_training_args(micro_batch_size, dp_degree)
|
||||
|
||||
if micro_batch_size is not None:
|
||||
dp_degree = self.num_processes // (megatron_lm_plugin.tp_degree * megatron_lm_plugin.pp_degree)
|
||||
megatron_lm_plugin.set_training_args(micro_batch_size, dp_degree)
|
||||
else:
|
||||
raise ValueError(
|
||||
"When you do not pass the dataloader parameter, the `data_parallel_size`, "
|
||||
"`micro_batch_size`, and `global_batch_size` megatron parameters will not be updated."
|
||||
)
|
||||
model = None
|
||||
optimizer = None
|
||||
scheduler = None
|
||||
is_dummy_scheduler = False
|
||||
batch_data = None
|
||||
for obj in args:
|
||||
if isinstance(obj, torch.utils.data.DataLoader) and batch_data is None:
|
||||
batch_data = next(iter(obj))
|
||||
if isinstance(obj, torch.nn.Module):
|
||||
elif isinstance(obj, torch.nn.Module):
|
||||
model = obj
|
||||
elif isinstance(obj, (torch.optim.Optimizer)):
|
||||
optimizer = obj
|
||||
@ -1715,8 +1860,7 @@ class Accelerator:
|
||||
if optimizer is not None:
|
||||
megatron_lm_plugin.set_optimizer_type(optimizer)
|
||||
if scheduler is not None:
|
||||
is_dummy_scheduler = isinstance(scheduler, MegatronLMDummyScheduler)
|
||||
if not is_dummy_scheduler:
|
||||
if not isinstance(scheduler, MegatronLMDummyScheduler):
|
||||
raise ValueError(
|
||||
"You can't use a custom scheduler with Megatron-LM. Please use the `accelerate.utils.MegatronLMDummyScheduler` instead."
|
||||
)
|
||||
@ -1724,6 +1868,10 @@ class Accelerator:
|
||||
|
||||
# initialize megatron-lm
|
||||
megatron_lm_initialize(self, args_defaults=megatron_lm_plugin.megatron_lm_default_args)
|
||||
|
||||
(model, optimizer, scheduler) = megatron_lm_prepare_model_optimizer_scheduler(self)
|
||||
self.wait_for_everyone()
|
||||
|
||||
counter = 0
|
||||
result = []
|
||||
for obj in args:
|
||||
@ -1739,13 +1887,6 @@ class Accelerator:
|
||||
else:
|
||||
result.append(obj)
|
||||
|
||||
if model is not None:
|
||||
model = megatron_lm_prepare_model(self)
|
||||
if optimizer is not None:
|
||||
optimizer = megatron_lm_prepare_optimizer(self, model)
|
||||
if scheduler is not None:
|
||||
scheduler = megatron_lm_prepare_scheduler(self, optimizer, scheduler)
|
||||
|
||||
if model is not None:
|
||||
model = MegatronEngine(self, model, optimizer, scheduler)
|
||||
if optimizer is not None:
|
||||
@ -1760,26 +1901,32 @@ class Accelerator:
|
||||
result[i] = optimizer
|
||||
elif isinstance(result[i], MegatronLMDummyScheduler):
|
||||
result[i] = scheduler
|
||||
|
||||
if model is not None:
|
||||
self._models.append(model)
|
||||
if len(self._models) > 1:
|
||||
raise AssertionError(
|
||||
"You can't use same `Accelerator()` instance with multiple models when using Megatron-LM"
|
||||
)
|
||||
if optimizer is not None:
|
||||
self._optimizers.append(optimizer)
|
||||
if scheduler is not None:
|
||||
self._schedulers.append(scheduler)
|
||||
if len(self._models) > 1:
|
||||
raise AssertionError(
|
||||
"You can't use same `Accelerator()` instance with multiple models when using Megatron-LM"
|
||||
)
|
||||
|
||||
return tuple(result)
|
||||
|
||||
def _prepare_ipex(self, *args):
|
||||
if not is_ipex_available():
|
||||
raise ImportError(
|
||||
"IPEX is not installed or IPEX's version does not match current PyTorch version. Please refer"
|
||||
" to https://github.com/intel/intel-extension-for-pytorch."
|
||||
)
|
||||
else:
|
||||
import intel_extension_for_pytorch as ipex
|
||||
def _prepare_ipex_or_xpu(self, *args):
|
||||
"""
|
||||
Prepares model and optimizer for training with IPEX or XPU acceleration. This covers 3 cases, IPEX compiled
|
||||
with CPU only support, IPEX compiled with XPU support and training with XPU pytorch backend available in stock
|
||||
pytorch starting from version 2.4.
|
||||
"""
|
||||
if self.state.use_ipex:
|
||||
if not is_ipex_available():
|
||||
raise ImportError(
|
||||
"IPEX is not installed or IPEX's version does not match current PyTorch version. Please refer"
|
||||
" to https://github.com/intel/intel-extension-for-pytorch."
|
||||
)
|
||||
|
||||
model = None
|
||||
optimizer = None
|
||||
@ -1792,12 +1939,12 @@ class Accelerator:
|
||||
optimizer = obj
|
||||
if optimizer is not None and model is not None:
|
||||
dtype = torch.bfloat16 if self.state.mixed_precision == "bf16" else None
|
||||
if self.device.type == "xpu" and is_xpu_available():
|
||||
if self.device.type == "xpu":
|
||||
model = model.to(self.device)
|
||||
model, optimizer = torch.xpu.optimize(
|
||||
model, optimizer=optimizer, dtype=dtype, inplace=True, level="O1"
|
||||
)
|
||||
else:
|
||||
# ipex.optimize() is available only for IPEX, both IPEX-CPU and IPEX-XPU
|
||||
if is_ipex_available():
|
||||
import intel_extension_for_pytorch as ipex
|
||||
|
||||
model, optimizer = ipex.optimize(model, optimizer=optimizer, dtype=dtype, inplace=True, level="O1")
|
||||
for i in range(len(result)):
|
||||
if isinstance(result[i], torch.nn.Module):
|
||||
@ -1890,6 +2037,7 @@ class Accelerator:
|
||||
even_batches=self.even_batches,
|
||||
slice_fn_for_dispatch=slice_fn_for_dispatch,
|
||||
use_seedable_sampler=self.use_seedable_sampler,
|
||||
non_blocking=self.non_blocking,
|
||||
)
|
||||
self._dataloaders.append(prepared_data_loader)
|
||||
return prepared_data_loader
|
||||
@ -1916,6 +2064,14 @@ class Accelerator:
|
||||
>>> optimizer = accelerator.prepare_optimizer(optimizer, device_placement=True)
|
||||
```
|
||||
"""
|
||||
if is_lomo_available():
|
||||
# We need to import locally to avoid circular imports since lomo imports stuff from
|
||||
# transformers & accelerate
|
||||
from lomo_optim import AdaLomo, Lomo
|
||||
|
||||
# Support multiple optimizers: https://github.com/huggingface/accelerate/pull/2695#discussion_r1589164607
|
||||
self.has_lomo_optimizer |= isinstance(optimizer, (Lomo, AdaLomo))
|
||||
|
||||
# Ensure we can't double wrap an optimizer due to `find_batch_size`
|
||||
if getattr(optimizer, "_is_accelerate_prepared", False):
|
||||
if optimizer not in self._optimizers:
|
||||
@ -1986,6 +2142,8 @@ class Accelerator:
|
||||
>>> accelerator.backward(loss)
|
||||
```
|
||||
"""
|
||||
learning_rate = kwargs.get("learning_rate")
|
||||
|
||||
if self.distributed_type != DistributedType.DEEPSPEED:
|
||||
# deepspeed handles loss scaling by gradient_accumulation_steps in its `backward`
|
||||
loss = loss / self.gradient_accumulation_steps
|
||||
@ -1995,6 +2153,8 @@ class Accelerator:
|
||||
return
|
||||
elif self.scaler is not None:
|
||||
self.scaler.scale(loss).backward(**kwargs)
|
||||
elif learning_rate is not None and self.has_lomo_optimizer:
|
||||
self.lomo_backward(loss, learning_rate)
|
||||
else:
|
||||
loss.backward(**kwargs)
|
||||
|
||||
@ -2202,7 +2362,7 @@ class Accelerator:
|
||||
"""
|
||||
return gather(tensor)
|
||||
|
||||
def gather_for_metrics(self, input_data):
|
||||
def gather_for_metrics(self, input_data, use_gather_object=False):
|
||||
"""
|
||||
Gathers `input_data` and potentially drops duplicates in the last batch if on a distributed system. Should be
|
||||
used for gathering the inputs and targets for metric calculation.
|
||||
@ -2210,6 +2370,11 @@ class Accelerator:
|
||||
Args:
|
||||
input (`torch.Tensor`, `object`, a nested tuple/list/dictionary of `torch.Tensor`, or a nested tuple/list/dictionary of `object`):
|
||||
The tensors or objects for calculating metrics across all processes
|
||||
use_gather_object(`bool`):
|
||||
Whether to forcibly use gather_object instead of gather (which is already done if all objects passed do
|
||||
not contain tensors). This flag can be useful for gathering tensors with different sizes that we don't
|
||||
want to pad and concatenate along the first dimension. Using it with GPU tensors is not well supported
|
||||
and inefficient as it incurs GPU -> CPU transfer since tensors would be pickled.
|
||||
|
||||
Example:
|
||||
|
||||
@ -2234,7 +2399,9 @@ class Accelerator:
|
||||
except TypeError:
|
||||
all_tensors = False
|
||||
|
||||
if not all_tensors:
|
||||
use_gather_object = use_gather_object or not all_tensors
|
||||
|
||||
if use_gather_object:
|
||||
data = gather_object(input_data)
|
||||
else:
|
||||
data = self.gather(input_data)
|
||||
@ -2253,7 +2420,11 @@ class Accelerator:
|
||||
def _adjust_samples(tensor):
|
||||
return tensor[: self.gradient_state.remainder]
|
||||
|
||||
return recursively_apply(_adjust_samples, data)
|
||||
if use_gather_object:
|
||||
# gather_object put the objects in a list
|
||||
return _adjust_samples(data)
|
||||
else:
|
||||
return recursively_apply(_adjust_samples, data)
|
||||
else: # remainder is 0
|
||||
# no remainder even though at end of dataloader, so nothing to do.
|
||||
return data
|
||||
@ -2614,9 +2785,11 @@ class Accelerator:
|
||||
if safe_serialization:
|
||||
state_dict = clean_state_dict_for_safetensors(state_dict)
|
||||
weights_name = SAFE_WEIGHTS_NAME if safe_serialization else WEIGHTS_NAME
|
||||
filename_pattern = SAFE_WEIGHTS_PATTERN_NAME if safe_serialization else WEIGHTS_PATTERN_NAME
|
||||
|
||||
# Shard the model if it is too big.
|
||||
shards, index = shard_checkpoint(state_dict, max_shard_size=max_shard_size, weights_name=weights_name)
|
||||
state_dict_split = split_torch_state_dict_into_shards(
|
||||
state_dict, filename_pattern=filename_pattern, max_shard_size=max_shard_size
|
||||
)
|
||||
|
||||
# Clean the folder from a previous save
|
||||
for filename in os.listdir(save_directory):
|
||||
@ -2632,31 +2805,36 @@ class Accelerator:
|
||||
if (
|
||||
filename.startswith(weights_no_suffix)
|
||||
and os.path.isfile(full_filename)
|
||||
and filename not in shards.keys()
|
||||
and filename not in state_dict_split.filename_to_tensors.keys()
|
||||
and reg.fullmatch(filename_no_suffix) is not None
|
||||
and PartialState().is_main_process
|
||||
):
|
||||
os.remove(full_filename)
|
||||
|
||||
# Save the model
|
||||
for shard_file, shard in shards.items():
|
||||
self.save(shard, os.path.join(save_directory, shard_file), safe_serialization=safe_serialization)
|
||||
for filename, tensors in state_dict_split.filename_to_tensors.items():
|
||||
shard = {tensor: state_dict[tensor] for tensor in tensors}
|
||||
self.save(shard, os.path.join(save_directory, filename), safe_serialization=safe_serialization)
|
||||
|
||||
if index is None:
|
||||
path_to_weights = os.path.join(save_directory, WEIGHTS_NAME)
|
||||
logger.info(f"Model weights saved in {path_to_weights}")
|
||||
else:
|
||||
# Save index if sharded
|
||||
if state_dict_split.is_sharded:
|
||||
index = {
|
||||
"metadata": state_dict_split.metadata,
|
||||
"weight_map": state_dict_split.tensor_to_filename,
|
||||
}
|
||||
save_index_file = SAFE_WEIGHTS_INDEX_NAME if safe_serialization else WEIGHTS_INDEX_NAME
|
||||
save_index_file = os.path.join(save_directory, save_index_file)
|
||||
# Save the index as well
|
||||
with open(save_index_file, "w", encoding="utf-8") as f:
|
||||
content = json.dumps(index, indent=2, sort_keys=True) + "\n"
|
||||
f.write(content)
|
||||
logger.info(
|
||||
f"The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be "
|
||||
f"split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the "
|
||||
f"split in {len(state_dict_split.filename_to_tensors)} checkpoint shards. You can find where each parameters has been saved in the "
|
||||
f"index located at {save_index_file}."
|
||||
)
|
||||
else:
|
||||
path_to_weights = os.path.join(save_directory, WEIGHTS_NAME)
|
||||
logger.info(f"Model weights saved in {path_to_weights}")
|
||||
|
||||
def register_save_state_pre_hook(self, hook: Callable[..., None]) -> hooks.RemovableHandle:
|
||||
"""
|
||||
@ -2815,6 +2993,7 @@ class Accelerator:
|
||||
schedulers,
|
||||
dataloaders,
|
||||
self.state.process_index,
|
||||
self.step,
|
||||
self.scaler,
|
||||
save_on_each_node=self.project_configuration.save_on_each_node,
|
||||
safe_serialization=safe_serialization,
|
||||
@ -2955,13 +3134,15 @@ class Accelerator:
|
||||
if map_location is None:
|
||||
if self.num_processes > 1 and self.distributed_type in (
|
||||
DistributedType.MULTI_GPU,
|
||||
DistributedType.MULTI_MLU,
|
||||
DistributedType.MULTI_MUSA,
|
||||
DistributedType.MULTI_NPU,
|
||||
):
|
||||
map_location = "on_device"
|
||||
else:
|
||||
map_location = "cpu"
|
||||
|
||||
load_accelerator_state(
|
||||
override_attributes = load_accelerator_state(
|
||||
input_dir,
|
||||
models,
|
||||
optimizers,
|
||||
@ -2972,11 +3153,14 @@ class Accelerator:
|
||||
map_location,
|
||||
**load_model_func_kwargs,
|
||||
)
|
||||
self.step = override_attributes["step"]
|
||||
custom_checkpoints = [
|
||||
f for f in os.listdir(input_dir) if re.search(r"^custom_checkpoint_\d+\.pkl$", f) is not None
|
||||
]
|
||||
if len(custom_checkpoints) != len(self._custom_objects):
|
||||
err = "Number of custom checkpoints in folder {input_dir} does not match the number of registered objects:"
|
||||
err = (
|
||||
f"Number of custom checkpoints in folder {input_dir} does not match the number of registered objects:"
|
||||
)
|
||||
err += f"\n\tFound checkpoints: {len(custom_checkpoints)}"
|
||||
err += f"\n\tRegistered objects: {len(self._custom_objects)}\n"
|
||||
err += "Please make sure to only load checkpoints from folders that were created with the same set of registered objects,"
|
||||
@ -2987,7 +3171,7 @@ class Accelerator:
|
||||
for index, obj in enumerate(self._custom_objects):
|
||||
load_custom_state(obj, input_dir, index)
|
||||
|
||||
def free_memory(self):
|
||||
def free_memory(self, *objects):
|
||||
"""
|
||||
Will release all references to the internal objects stored and call the garbage collector. You should call this
|
||||
method between two trainings with different models/optimizers. Also will reset `Accelerator.step` to 0.
|
||||
@ -3000,19 +3184,23 @@ class Accelerator:
|
||||
>>> accelerator = Accelerator()
|
||||
>>> model, optimizer, scheduler = ...
|
||||
>>> model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler)
|
||||
>>> accelerator.free_memory()
|
||||
>>> del model, optimizer, scheduler
|
||||
>>> model, optimizer, scheduler = accelerator.free_memory(model, optimizer, scheduler)
|
||||
```
|
||||
"""
|
||||
# Deepspeed needs a bit more prep that should be done first
|
||||
if hasattr(self, "deepspeed_engine_wrapped"):
|
||||
if self.deepspeed_engine_wrapped is not None:
|
||||
self.deepspeed_engine_wrapped.engine.destroy()
|
||||
self.deepspeed_engine_wrapped = None
|
||||
objects = release_memory(*objects)
|
||||
self._schedulers = []
|
||||
self._optimizers = []
|
||||
self._models = []
|
||||
self._dataloaders = []
|
||||
self.deepspeed_engine_wrapped = None
|
||||
self.step = 0
|
||||
release_memory()
|
||||
return objects
|
||||
|
||||
def clear(self):
|
||||
def clear(self, *objects):
|
||||
"""
|
||||
Alias for [`Accelerate.free_memory`], releases all references to the internal objects stored and call the
|
||||
garbage collector. You should call this method between two trainings with different models/optimizers.
|
||||
@ -3025,11 +3213,10 @@ class Accelerator:
|
||||
>>> accelerator = Accelerator()
|
||||
>>> model, optimizer, scheduler = ...
|
||||
>>> model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler)
|
||||
>>> accelerator.free_memory()
|
||||
>>> del model, optimizer, scheduler
|
||||
>>> model, optimizer, scheduler = accelerator.clear(model, optimizer, scheduler)
|
||||
```
|
||||
"""
|
||||
self.free_memory()
|
||||
return self.free_memory(*objects)
|
||||
|
||||
def _get_named_parameters(self, *args):
|
||||
named_parameters = {}
|
||||
@ -3185,6 +3372,66 @@ class Accelerator:
|
||||
yield
|
||||
autocast_context.__exit__(*sys.exc_info())
|
||||
|
||||
@contextmanager
|
||||
def profile(self, profile_handler: ProfileKwargs | None = None):
|
||||
"""
|
||||
Will profile the code inside the context manager. The profile will be saved to a Chrome Trace file if
|
||||
`profile_handler.output_trace_dir` is set.
|
||||
|
||||
A different `profile_handler` can be passed in to override the one set in the `Accelerator` object.
|
||||
|
||||
Args:
|
||||
profile_handler (`ProfileKwargs`, *optional*):
|
||||
The profile handler to use for this context manager. If not passed, will use the one set in the
|
||||
`Accelerator` object.
|
||||
|
||||
Example:
|
||||
|
||||
```python
|
||||
# Profile with default settings
|
||||
from accelerate import Accelerator
|
||||
from accelerate.utils import ProfileKwargs
|
||||
|
||||
accelerator = Accelerator()
|
||||
with accelerator.profile() as prof:
|
||||
train()
|
||||
accelerator.print(prof.key_averages().table())
|
||||
|
||||
|
||||
# Profile with the custom handler
|
||||
def custom_handler(prof):
|
||||
print(prof.key_averages().table(sort_by="self_cpu_time_total", row_limit=10))
|
||||
|
||||
|
||||
kwargs = ProfileKwargs(schedule_option=dict(wait=1, warmup=1, active=1), on_trace_ready=custom_handler)
|
||||
accelerator = Accelerator(kwarg_handler=[kwargs])
|
||||
with accelerator.profile() as prof:
|
||||
for _ in range(10):
|
||||
train_iteration()
|
||||
prof.step()
|
||||
|
||||
|
||||
# Profile and export to Chrome Trace
|
||||
kwargs = ProfileKwargs(output_trace_dir="output_trace")
|
||||
accelerator = Accelerator(kwarg_handler=[kwargs])
|
||||
with accelerator.profile():
|
||||
train()
|
||||
```
|
||||
"""
|
||||
profile_handler = profile_handler or self.profile_handler or ProfileKwargs()
|
||||
|
||||
with profile_handler.build() as profiler:
|
||||
yield profiler
|
||||
|
||||
if profile_handler.output_trace_dir is None:
|
||||
return
|
||||
|
||||
os.makedirs(profile_handler.output_trace_dir, exist_ok=True)
|
||||
profiler.export_chrome_trace(
|
||||
os.path.join(profile_handler.output_trace_dir, PROFILE_PATTERN_NAME.format(suffix=self.process_index))
|
||||
)
|
||||
self.wait_for_everyone()
|
||||
|
||||
@property
|
||||
def optimizer_step_was_skipped(self):
|
||||
"""
|
||||
@ -3242,3 +3489,27 @@ class Accelerator:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def lomo_backward(self, loss: torch.Tensor, learning_rate: float) -> None:
|
||||
"""
|
||||
Runs backward pass on LOMO optimizers.
|
||||
"""
|
||||
if is_lomo_available():
|
||||
# We need to import locally to avoid circular imports since lomo imports stuff from
|
||||
# transformers & accelerate
|
||||
from lomo_optim import AdaLomo, Lomo
|
||||
|
||||
if learning_rate is None:
|
||||
raise ValueError("A learning rate must be passed in order to call backward pass with LOMO optimizers.")
|
||||
|
||||
_backward_called = False
|
||||
|
||||
for optimizer in self._optimizers:
|
||||
if isinstance(optimizer.optimizer, (Lomo, AdaLomo)):
|
||||
optimizer.optimizer.fused_backward(loss, learning_rate)
|
||||
_backward_called = True
|
||||
|
||||
if not _backward_called:
|
||||
raise ValueError(
|
||||
"Backward pass not properly called on LOMO optimizers. Are you sure you passed a LOMO optimizer in accelerator.prepare()?"
|
||||
)
|
||||
|
||||
@ -31,11 +31,14 @@ from .hooks import (
|
||||
)
|
||||
from .utils import (
|
||||
OffloadedWeightsLoader,
|
||||
check_cuda_p2p_ib_support,
|
||||
check_device_map,
|
||||
extract_submodules_state_dict,
|
||||
find_tied_parameters,
|
||||
get_balanced_memory,
|
||||
infer_auto_device_map,
|
||||
is_mlu_available,
|
||||
is_musa_available,
|
||||
is_npu_available,
|
||||
is_torch_version,
|
||||
is_xpu_available,
|
||||
@ -430,7 +433,7 @@ def dispatch_model(
|
||||
[device for device in set(device_map.values()) if device in ("cpu", "disk")]
|
||||
)
|
||||
if len(offloaded_devices_str) > 0:
|
||||
logging.warning(
|
||||
logger.warning(
|
||||
f"Some parameters are on the meta device device because they were offloaded to the {offloaded_devices_str}."
|
||||
)
|
||||
|
||||
@ -455,19 +458,36 @@ def dispatch_model(
|
||||
|
||||
return wrapper
|
||||
|
||||
# Make sure to update _accelerate_added_attributes in hooks.py if you add any hook
|
||||
model.to = add_warning(model.to, model)
|
||||
if is_npu_available():
|
||||
model.npu = add_warning(model.npu, model)
|
||||
elif is_mlu_available():
|
||||
model.mlu = add_warning(model.mlu, model)
|
||||
elif is_musa_available():
|
||||
model.musa = add_warning(model.musa, model)
|
||||
elif is_xpu_available():
|
||||
model.xpu = add_warning(model.xpu, model)
|
||||
else:
|
||||
model.cuda = add_warning(model.cuda, model)
|
||||
|
||||
# Check if we are using multi-gpus with RTX 4000 series
|
||||
use_multi_gpu = len([device for device in set(device_map.values()) if device not in ("cpu", "disk")]) > 1
|
||||
if use_multi_gpu and not check_cuda_p2p_ib_support():
|
||||
logger.warning(
|
||||
"We've detected an older driver with an RTX 4000 series GPU. These drivers have issues with P2P. "
|
||||
"This can affect the multi-gpu inference when using accelerate device_map."
|
||||
"Please make sure to update your driver to the latest version which resolves this."
|
||||
)
|
||||
else:
|
||||
device = list(device_map.values())[0]
|
||||
# `torch.Tensor.to(<int num>)` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)).
|
||||
if is_npu_available() and isinstance(device, int):
|
||||
device = f"npu:{device}"
|
||||
elif is_mlu_available() and isinstance(device, int):
|
||||
device = f"mlu:{device}"
|
||||
elif is_musa_available() and isinstance(device, int):
|
||||
device = f"musa:{device}"
|
||||
elif is_xpu_available() and isinstance(device, int):
|
||||
device = f"xpu:{device}"
|
||||
if device != "disk":
|
||||
@ -494,6 +514,7 @@ def load_checkpoint_and_dispatch(
|
||||
skip_keys: Optional[Union[str, List[str]]] = None,
|
||||
preload_module_classes: Optional[List[str]] = None,
|
||||
force_hooks: bool = False,
|
||||
strict: bool = False,
|
||||
):
|
||||
"""
|
||||
Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are
|
||||
@ -540,6 +561,9 @@ def load_checkpoint_and_dispatch(
|
||||
force_hooks (`bool`, *optional*, defaults to `False`):
|
||||
Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a
|
||||
single device.
|
||||
strict (`bool`, *optional*, defaults to `False`):
|
||||
Whether to strictly enforce that the keys in the checkpoint state_dict match the keys of the model's
|
||||
state_dict.
|
||||
|
||||
Example:
|
||||
|
||||
@ -578,7 +602,11 @@ def load_checkpoint_and_dispatch(
|
||||
low_zero=(device_map == "balanced_low_0"),
|
||||
)
|
||||
device_map = infer_auto_device_map(
|
||||
model, max_memory=max_memory, no_split_module_classes=no_split_module_classes, dtype=dtype
|
||||
model,
|
||||
max_memory=max_memory,
|
||||
no_split_module_classes=no_split_module_classes,
|
||||
dtype=dtype,
|
||||
offload_buffers=offload_buffers,
|
||||
)
|
||||
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
|
||||
offload_state_dict = True
|
||||
@ -590,6 +618,7 @@ def load_checkpoint_and_dispatch(
|
||||
dtype=dtype,
|
||||
offload_state_dict=offload_state_dict,
|
||||
offload_buffers=offload_buffers,
|
||||
strict=strict,
|
||||
)
|
||||
if device_map is None:
|
||||
return model
|
||||
|
||||
@ -18,7 +18,7 @@ from typing import List
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from safetensors.torch import load_file
|
||||
from safetensors.torch import load_model
|
||||
from torch.cuda.amp import GradScaler
|
||||
|
||||
from .utils import (
|
||||
@ -55,6 +55,7 @@ def save_accelerator_state(
|
||||
schedulers: list,
|
||||
dataloaders: list,
|
||||
process_index: int,
|
||||
step: int,
|
||||
scaler: GradScaler = None,
|
||||
save_on_each_node: bool = False,
|
||||
safe_serialization: bool = True,
|
||||
@ -82,6 +83,8 @@ def save_accelerator_state(
|
||||
A list of dataloader instances to save their sampler states
|
||||
process_index (`int`):
|
||||
The current process index in the Accelerator state
|
||||
step (`int`):
|
||||
The current step in the internal step tracker
|
||||
scaler (`torch.cuda.amp.GradScaler`, *optional*):
|
||||
An optional gradient scaler instance to save
|
||||
save_on_each_node (`bool`, *optional*):
|
||||
@ -120,8 +123,7 @@ def save_accelerator_state(
|
||||
from .data_loader import IterableDatasetShard, SeedableRandomSampler
|
||||
|
||||
if isinstance(dataloader.dataset, IterableDatasetShard):
|
||||
sampler = dataloader.sampler.sampler
|
||||
|
||||
sampler = dataloader.get_sampler()
|
||||
if isinstance(sampler, SeedableRandomSampler):
|
||||
save(sampler, output_sampler_file, save_on_each_node=save_on_each_node, safe_serialization=False)
|
||||
logger.info(f"Sampler state for dataloader {i} saved in {output_sampler_file}")
|
||||
@ -135,6 +137,7 @@ def save_accelerator_state(
|
||||
# Random number generator states
|
||||
states = {}
|
||||
states_name = f"{RNG_STATE_NAME}_{process_index}.pkl"
|
||||
states["step"] = step
|
||||
states["random_state"] = random.getstate()
|
||||
states["numpy_random_seed"] = np.random.get_state()
|
||||
states["torch_manual_seed"] = torch.get_rng_state()
|
||||
@ -181,7 +184,12 @@ def load_accelerator_state(
|
||||
What device to load the optimizer state onto. Should be one of either "cpu" or "on_device".
|
||||
load_model_func_kwargs (`dict`, *optional*):
|
||||
Additional arguments that can be passed to the model's `load_state_dict` method.
|
||||
|
||||
Returns:
|
||||
`dict`: Contains the `Accelerator` attributes to override while loading the state.
|
||||
"""
|
||||
# stores the `Accelerator` attributes to override
|
||||
override_attributes = dict()
|
||||
if map_location not in [None, "cpu", "on_device"]:
|
||||
raise TypeError(
|
||||
"Unsupported optimizer map location passed, please choose one of `None`, `'cpu'`, or `'on_device'`"
|
||||
@ -197,12 +205,12 @@ def load_accelerator_state(
|
||||
ending = f"_{i}" if i > 0 else ""
|
||||
input_model_file = input_dir.joinpath(f"{SAFE_MODEL_NAME}{ending}.safetensors")
|
||||
if input_model_file.exists():
|
||||
state_dict = load_file(input_model_file, device=str(map_location))
|
||||
load_model(model, input_model_file, device=str(map_location), **load_model_func_kwargs)
|
||||
else:
|
||||
# Load with torch
|
||||
input_model_file = input_dir.joinpath(f"{MODEL_NAME}{ending}.bin")
|
||||
state_dict = torch.load(input_model_file, map_location=map_location)
|
||||
models[i].load_state_dict(state_dict, **load_model_func_kwargs)
|
||||
model.load_state_dict(state_dict, **load_model_func_kwargs)
|
||||
logger.info("All model weights loaded successfully")
|
||||
|
||||
# Optimizer states
|
||||
@ -227,10 +235,9 @@ def load_accelerator_state(
|
||||
from .data_loader import IterableDatasetShard, SeedableRandomSampler
|
||||
|
||||
if isinstance(dataloader.dataset, IterableDatasetShard):
|
||||
sampler = dataloader.sampler.sampler
|
||||
|
||||
sampler = dataloader.get_sampler()
|
||||
if isinstance(sampler, SeedableRandomSampler):
|
||||
dataloader.sampler.sampler = torch.load(input_sampler_file)
|
||||
sampler = dataloader.set_sampler(torch.load(input_sampler_file))
|
||||
logger.info("All dataloader sampler states loaded successfully")
|
||||
|
||||
# GradScaler state
|
||||
@ -242,6 +249,7 @@ def load_accelerator_state(
|
||||
# Random states
|
||||
try:
|
||||
states = torch.load(input_dir.joinpath(f"{RNG_STATE_NAME}_{process_index}.pkl"))
|
||||
override_attributes["step"] = states["step"]
|
||||
random.setstate(states["random_state"])
|
||||
np.random.set_state(states["numpy_random_seed"])
|
||||
torch.set_rng_state(states["torch_manual_seed"])
|
||||
@ -255,6 +263,8 @@ def load_accelerator_state(
|
||||
except Exception:
|
||||
logger.info("Could not load random states")
|
||||
|
||||
return override_attributes
|
||||
|
||||
|
||||
def save_custom_state(obj, path, index: int = 0, save_on_each_node: bool = False):
|
||||
"""
|
||||
|
||||
@ -0,0 +1,13 @@
|
||||
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
@ -14,18 +14,18 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from argparse import ArgumentParser
|
||||
|
||||
from accelerate.commands.config import get_config_parser
|
||||
from accelerate.commands.env import env_command_parser
|
||||
from accelerate.commands.estimate import estimate_command_parser
|
||||
from accelerate.commands.launch import launch_command_parser
|
||||
from accelerate.commands.merge import merge_command_parser
|
||||
from accelerate.commands.test import test_command_parser
|
||||
from accelerate.commands.tpu import tpu_command_parser
|
||||
from accelerate.commands.utils import CustomArgumentParser
|
||||
|
||||
|
||||
def main():
|
||||
parser = ArgumentParser("Accelerate CLI tool", usage="accelerate <command> [<args>]", allow_abbrev=False)
|
||||
parser = CustomArgumentParser("Accelerate CLI tool", usage="accelerate <command> [<args>]", allow_abbrev=False)
|
||||
subparsers = parser.add_subparsers(help="accelerate command helpers")
|
||||
|
||||
# Register commands
|
||||
@ -33,6 +33,7 @@ def main():
|
||||
estimate_command_parser(subparsers=subparsers)
|
||||
env_command_parser(subparsers=subparsers)
|
||||
launch_command_parser(subparsers=subparsers)
|
||||
merge_command_parser(subparsers=subparsers)
|
||||
tpu_command_parser(subparsers=subparsers)
|
||||
test_command_parser(subparsers=subparsers)
|
||||
|
||||
|
||||
@ -20,7 +20,9 @@ from ...utils import (
|
||||
ComputeEnvironment,
|
||||
DistributedType,
|
||||
is_deepspeed_available,
|
||||
is_mlu_available,
|
||||
is_mps_available,
|
||||
is_musa_available,
|
||||
is_npu_available,
|
||||
is_transformers_available,
|
||||
is_xpu_available,
|
||||
@ -48,7 +50,16 @@ from .config_utils import (
|
||||
def get_cluster_input():
|
||||
distributed_type = _ask_options(
|
||||
"Which type of machine are you using?",
|
||||
["No distributed training", "multi-CPU", "multi-XPU", "multi-GPU", "multi-NPU", "TPU"],
|
||||
[
|
||||
"No distributed training",
|
||||
"multi-CPU",
|
||||
"multi-XPU",
|
||||
"multi-GPU",
|
||||
"multi-NPU",
|
||||
"multi-MLU",
|
||||
"multi-MUSA",
|
||||
"TPU",
|
||||
],
|
||||
_convert_distributed_mode,
|
||||
)
|
||||
|
||||
@ -64,6 +75,8 @@ def get_cluster_input():
|
||||
|
||||
if distributed_type in [
|
||||
DistributedType.MULTI_GPU,
|
||||
DistributedType.MULTI_MLU,
|
||||
DistributedType.MULTI_MUSA,
|
||||
DistributedType.MULTI_NPU,
|
||||
DistributedType.MULTI_XPU,
|
||||
DistributedType.MULTI_CPU,
|
||||
@ -116,6 +129,7 @@ def get_cluster_input():
|
||||
use_cpu = False
|
||||
|
||||
ipex_config = {}
|
||||
mpirun_config = {}
|
||||
if use_cpu:
|
||||
ipex_config["ipex"] = _ask_field(
|
||||
"Do you want to use Intel PyTorch Extension (IPEX) to speed up training on CPU? [yes/NO]:",
|
||||
@ -123,10 +137,32 @@ def get_cluster_input():
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
if distributed_type == DistributedType.MULTI_CPU:
|
||||
use_mpirun = _ask_field(
|
||||
"Do you want accelerate to launch mpirun? [yes/NO]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
if use_mpirun:
|
||||
mpirun_hostfile = _ask_field(
|
||||
"Please enter the path to the hostfile to use with mpirun [~/hostfile]: ",
|
||||
str,
|
||||
default="~/hostfile",
|
||||
)
|
||||
mpirun_config["mpirun_hostfile"] = os.path.expanduser(mpirun_hostfile.strip())
|
||||
mpirun_config["mpirun_ccl"] = _ask_field("Enter the number of oneCCL worker threads [1]: ", default=1)
|
||||
if (
|
||||
not use_cpu
|
||||
and is_xpu_available()
|
||||
and distributed_type not in [DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.XLA]
|
||||
and distributed_type
|
||||
not in [
|
||||
DistributedType.MULTI_GPU,
|
||||
DistributedType.MULTI_NPU,
|
||||
DistributedType.MULTI_MLU,
|
||||
DistributedType.XLA,
|
||||
DistributedType.MULTI_MUSA,
|
||||
]
|
||||
):
|
||||
ipex_config["use_xpu"] = _ask_field(
|
||||
"Do you want to use XPU plugin to speed up training on XPU? [yes/NO]:",
|
||||
@ -181,7 +217,14 @@ def get_cluster_input():
|
||||
deepspeed_config = {}
|
||||
if (
|
||||
distributed_type
|
||||
in [DistributedType.MULTI_GPU, DistributedType.MULTI_XPU, DistributedType.MULTI_NPU, DistributedType.NO]
|
||||
in [
|
||||
DistributedType.MULTI_GPU,
|
||||
DistributedType.MULTI_XPU,
|
||||
DistributedType.MULTI_NPU,
|
||||
DistributedType.MULTI_MLU,
|
||||
DistributedType.MULTI_MUSA,
|
||||
DistributedType.NO,
|
||||
]
|
||||
and not use_mps
|
||||
):
|
||||
use_deepspeed = _ask_field(
|
||||
@ -273,6 +316,18 @@ def get_cluster_input():
|
||||
"When `zero3_init_flag` is set, it requires Transformers to be installed. "
|
||||
"Please run `pip3 install transformers`."
|
||||
)
|
||||
use_moe = _ask_field(
|
||||
"Do you want to enable Mixture-of-Experts training (MoE)? [yes/NO]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
if use_moe:
|
||||
deepspeed_config["deepspeed_moe_layer_cls_names"] = _ask_field(
|
||||
"Specify the comma-separated list of transformers MoE layer class names (case-sensitive), e.g : "
|
||||
" `MixtralSparseMoeBlock`, `Qwen2MoeSparseMoeBlock`, `JetMoEAttention,JetMoEBlock` ... : ",
|
||||
str,
|
||||
)
|
||||
|
||||
if num_machines > 1:
|
||||
launcher_query = "Which Type of launcher do you want to use?"
|
||||
@ -317,7 +372,13 @@ def get_cluster_input():
|
||||
)
|
||||
|
||||
fsdp_config = {}
|
||||
if distributed_type in [DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU]:
|
||||
if distributed_type in [
|
||||
DistributedType.MULTI_GPU,
|
||||
DistributedType.MULTI_NPU,
|
||||
DistributedType.MULTI_MLU,
|
||||
DistributedType.MULTI_MUSA,
|
||||
DistributedType.MULTI_XPU,
|
||||
]:
|
||||
use_fsdp = _ask_field(
|
||||
"Do you want to use FullyShardedDataParallel? [yes/NO]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
@ -404,6 +465,12 @@ def get_cluster_input():
|
||||
default=True,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
fsdp_config["fsdp_activation_checkpointing"] = _ask_field(
|
||||
"Do you want to enable FSDP activation checkpointing? [yes/NO]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
|
||||
megatron_lm_config = {}
|
||||
if distributed_type in [DistributedType.MULTI_GPU]:
|
||||
@ -480,12 +547,16 @@ def get_cluster_input():
|
||||
DistributedType.MULTI_CPU,
|
||||
DistributedType.MULTI_XPU,
|
||||
DistributedType.MULTI_GPU,
|
||||
DistributedType.MULTI_MLU,
|
||||
DistributedType.MULTI_MUSA,
|
||||
DistributedType.MULTI_NPU,
|
||||
DistributedType.XLA,
|
||||
]:
|
||||
machine_type = str(distributed_type).split(".")[1].replace("MULTI_", "")
|
||||
if machine_type == "TPU":
|
||||
machine_type += " cores"
|
||||
elif machine_type == "CPU":
|
||||
machine_type = "processes"
|
||||
else:
|
||||
machine_type += "(s)"
|
||||
num_processes = _ask_field(
|
||||
@ -513,6 +584,8 @@ def get_cluster_input():
|
||||
distributed_type
|
||||
in [
|
||||
DistributedType.MULTI_GPU,
|
||||
DistributedType.MULTI_MLU,
|
||||
DistributedType.MULTI_MUSA,
|
||||
DistributedType.MULTI_NPU,
|
||||
DistributedType.MULTI_XPU,
|
||||
DistributedType.NO,
|
||||
@ -522,6 +595,10 @@ def get_cluster_input():
|
||||
):
|
||||
if is_npu_available():
|
||||
machine_type = "NPU(s)"
|
||||
elif is_mlu_available():
|
||||
machine_type = "MLU(s)"
|
||||
elif is_musa_available():
|
||||
machine_type = "MUSA(s)"
|
||||
else:
|
||||
machine_type = "GPU(s)"
|
||||
gpu_ids = _ask_field(
|
||||
@ -529,6 +606,16 @@ def get_cluster_input():
|
||||
default="all",
|
||||
)
|
||||
|
||||
# CPU affinity is only supported on NVIDIA hardware for now
|
||||
enable_cpu_affinity = False
|
||||
if distributed_type in (DistributedType.NO, DistributedType.MULTI_GPU) and not use_cpu and not use_mps:
|
||||
enable_cpu_affinity = _ask_field(
|
||||
"Would you like to enable numa efficiency? (Currently only supported on NVIDIA hardware). [yes/NO]: ",
|
||||
_convert_yes_no_to_bool,
|
||||
default=False,
|
||||
error_message="Please enter yes or no.",
|
||||
)
|
||||
|
||||
if distributed_type == DistributedType.XLA:
|
||||
mixed_precision = "no"
|
||||
main_training_function = _ask_field(
|
||||
@ -641,6 +728,7 @@ def get_cluster_input():
|
||||
fsdp_config=fsdp_config,
|
||||
megatron_lm_config=megatron_lm_config,
|
||||
ipex_config=ipex_config,
|
||||
mpirun_config=mpirun_config,
|
||||
use_cpu=use_cpu,
|
||||
rdzv_backend=rdzv_backend,
|
||||
same_network=same_network,
|
||||
@ -654,4 +742,5 @@ def get_cluster_input():
|
||||
tpu_use_cluster=tpu_use_cluster,
|
||||
dynamo_config=dynamo_config,
|
||||
debug=debug,
|
||||
enable_cpu_affinity=enable_cpu_affinity,
|
||||
)
|
||||
|
||||
@ -45,7 +45,7 @@ def load_config_from_file(config_file):
|
||||
if not os.path.isfile(config_file):
|
||||
raise FileNotFoundError(
|
||||
f"The passed configuration file `{config_file}` does not exist. "
|
||||
"Please pass an existing file to `accelerate launch`, or use the the default one "
|
||||
"Please pass an existing file to `accelerate launch`, or use the default one "
|
||||
"created through `accelerate config` and run `accelerate launch` "
|
||||
"without the `--config_file` argument."
|
||||
)
|
||||
@ -109,6 +109,8 @@ class BaseConfig:
|
||||
config_dict["use_cpu"] = False
|
||||
if "debug" not in config_dict:
|
||||
config_dict["debug"] = False
|
||||
if "enable_cpu_affinity" not in config_dict:
|
||||
config_dict["enable_cpu_affinity"] = False
|
||||
extra_keys = sorted(set(config_dict.keys()) - set(cls.__dataclass_fields__.keys()))
|
||||
if len(extra_keys) > 0:
|
||||
raise ValueError(
|
||||
@ -143,6 +145,8 @@ class BaseConfig:
|
||||
config_dict["use_cpu"] = False
|
||||
if "debug" not in config_dict:
|
||||
config_dict["debug"] = False
|
||||
if "enable_cpu_affinity" not in config_dict:
|
||||
config_dict["enable_cpu_affinity"] = False
|
||||
extra_keys = sorted(set(config_dict.keys()) - set(cls.__dataclass_fields__.keys()))
|
||||
if len(extra_keys) > 0:
|
||||
raise ValueError(
|
||||
@ -163,7 +167,7 @@ class BaseConfig:
|
||||
self.distributed_type = SageMakerDistributedType(self.distributed_type)
|
||||
else:
|
||||
self.distributed_type = DistributedType(self.distributed_type)
|
||||
if self.dynamo_config is None:
|
||||
if getattr(self, "dynamo_config", None) is None:
|
||||
self.dynamo_config = {}
|
||||
|
||||
|
||||
@ -178,6 +182,7 @@ class ClusterConfig(BaseConfig):
|
||||
rdzv_backend: Optional[str] = "static"
|
||||
same_network: Optional[bool] = False
|
||||
main_training_function: str = "main"
|
||||
enable_cpu_affinity: bool = False
|
||||
|
||||
# args for deepspeed_plugin
|
||||
deepspeed_config: dict = None
|
||||
@ -187,6 +192,8 @@ class ClusterConfig(BaseConfig):
|
||||
megatron_lm_config: dict = None
|
||||
# args for ipex
|
||||
ipex_config: dict = None
|
||||
# args for mpirun
|
||||
mpirun_config: dict = None
|
||||
# args for TPU
|
||||
downcast_bf16: bool = False
|
||||
|
||||
@ -212,6 +219,8 @@ class ClusterConfig(BaseConfig):
|
||||
self.megatron_lm_config = {}
|
||||
if self.ipex_config is None:
|
||||
self.ipex_config = {}
|
||||
if self.mpirun_config is None:
|
||||
self.mpirun_config = {}
|
||||
return super().__post_init__()
|
||||
|
||||
|
||||
@ -232,3 +241,4 @@ class SageMakerConfig(BaseConfig):
|
||||
sagemaker_metrics_file: str = None
|
||||
additional_args: dict = None
|
||||
dynamo_config: dict = None
|
||||
enable_cpu_affinity: bool = False
|
||||
|
||||
@ -37,6 +37,8 @@ DYNAMO_BACKENDS = [
|
||||
"FX2TRT",
|
||||
"ONNXRT",
|
||||
"TENSORRT",
|
||||
"AOT_TORCHXLA_TRACE_ONCE",
|
||||
"TORHCHXLA_TRACE_ONCE",
|
||||
"IPEX",
|
||||
"TVM",
|
||||
]
|
||||
@ -68,7 +70,9 @@ def _convert_compute_environment(value):
|
||||
|
||||
def _convert_distributed_mode(value):
|
||||
value = int(value)
|
||||
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "XLA"][value])
|
||||
return DistributedType(
|
||||
["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "MULTI_MLU", "MULTI_MUSA", "XLA"][value]
|
||||
)
|
||||
|
||||
|
||||
def _convert_dynamo_backend(value):
|
||||
|
||||
@ -18,7 +18,7 @@ from pathlib import Path
|
||||
|
||||
import torch
|
||||
|
||||
from ...utils import is_npu_available, is_xpu_available
|
||||
from ...utils import is_mlu_available, is_musa_available, is_npu_available, is_xpu_available
|
||||
from .config_args import ClusterConfig, default_json_config_file
|
||||
from .config_utils import SubcommandHelpFormatter
|
||||
|
||||
@ -57,7 +57,23 @@ def write_basic_config(mixed_precision="no", save_location: str = default_json_c
|
||||
"compute_environment": "LOCAL_MACHINE",
|
||||
"mixed_precision": mixed_precision,
|
||||
}
|
||||
if torch.cuda.is_available():
|
||||
if is_mlu_available():
|
||||
num_mlus = torch.mlu.device_count()
|
||||
config["num_processes"] = num_mlus
|
||||
config["use_cpu"] = False
|
||||
if num_mlus > 1:
|
||||
config["distributed_type"] = "MULTI_MLU"
|
||||
else:
|
||||
config["distributed_type"] = "NO"
|
||||
elif is_musa_available():
|
||||
num_musas = torch.musa.device_count()
|
||||
config["num_processes"] = num_musas
|
||||
config["use_cpu"] = False
|
||||
if num_musas > 1:
|
||||
config["distributed_type"] = "MULTI_MUSA"
|
||||
else:
|
||||
config["distributed_type"] = "NO"
|
||||
elif torch.cuda.is_available():
|
||||
num_gpus = torch.cuda.device_count()
|
||||
config["num_processes"] = num_gpus
|
||||
config["use_cpu"] = False
|
||||
@ -87,6 +103,7 @@ def write_basic_config(mixed_precision="no", save_location: str = default_json_c
|
||||
config["num_processes"] = 1
|
||||
config["distributed_type"] = "NO"
|
||||
config["debug"] = False
|
||||
config["enable_cpu_affinity"] = False
|
||||
config = ClusterConfig(**config)
|
||||
config.to_json_file(path)
|
||||
return path
|
||||
|
||||
@ -17,6 +17,7 @@
|
||||
import argparse
|
||||
import os
|
||||
import platform
|
||||
import subprocess
|
||||
|
||||
import numpy as np
|
||||
import psutil
|
||||
@ -25,7 +26,7 @@ import torch
|
||||
from accelerate import __version__ as version
|
||||
from accelerate.commands.config import default_config_file, load_config_from_file
|
||||
|
||||
from ..utils import is_npu_available, is_xpu_available
|
||||
from ..utils import is_mlu_available, is_musa_available, is_npu_available, is_xpu_available
|
||||
|
||||
|
||||
def env_command_parser(subparsers=None):
|
||||
@ -47,6 +48,8 @@ def env_command(args):
|
||||
pt_version = torch.__version__
|
||||
pt_cuda_available = torch.cuda.is_available()
|
||||
pt_xpu_available = is_xpu_available()
|
||||
pt_mlu_available = is_mlu_available()
|
||||
pt_musa_available = is_musa_available()
|
||||
pt_npu_available = is_npu_available()
|
||||
|
||||
accelerate_config = "Not found"
|
||||
@ -54,18 +57,32 @@ def env_command(args):
|
||||
if args.config_file is not None or os.path.isfile(default_config_file):
|
||||
accelerate_config = load_config_from_file(args.config_file).to_dict()
|
||||
|
||||
# if we can run which, get it
|
||||
command = None
|
||||
bash_location = "Not found"
|
||||
if os.name == "nt":
|
||||
command = ["where", "accelerate"]
|
||||
elif os.name == "posix":
|
||||
command = ["which", "accelerate"]
|
||||
if command is not None:
|
||||
bash_location = subprocess.check_output(command, text=True, stderr=subprocess.STDOUT).strip()
|
||||
info = {
|
||||
"`Accelerate` version": version,
|
||||
"Platform": platform.platform(),
|
||||
"`accelerate` bash location": bash_location,
|
||||
"Python version": platform.python_version(),
|
||||
"Numpy version": np.__version__,
|
||||
"PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})",
|
||||
"PyTorch XPU available": str(pt_xpu_available),
|
||||
"PyTorch NPU available": str(pt_npu_available),
|
||||
"PyTorch MLU available": str(pt_mlu_available),
|
||||
"PyTorch MUSA available": str(pt_musa_available),
|
||||
"System RAM": f"{psutil.virtual_memory().total / 1024 ** 3:.2f} GB",
|
||||
}
|
||||
if pt_cuda_available:
|
||||
info["GPU type"] = torch.cuda.get_device_name()
|
||||
if pt_npu_available:
|
||||
info["CANN version"] = torch.version.cann
|
||||
|
||||
print("\nCopy-and-paste the text below in your GitHub issue\n")
|
||||
print("\n".join([f"- {prop}: {val}" for prop, val in info.items()]))
|
||||
|
||||
@ -13,12 +13,11 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import argparse
|
||||
|
||||
from huggingface_hub import model_info
|
||||
from huggingface_hub.utils import GatedRepoError, RepositoryNotFoundError
|
||||
|
||||
from accelerate import init_empty_weights
|
||||
from accelerate.commands.utils import CustomArgumentParser
|
||||
from accelerate.utils import (
|
||||
calculate_maximum_sizes,
|
||||
convert_bytes,
|
||||
@ -110,7 +109,6 @@ def create_empty_model(model_name: str, library_name: str, trust_remote_code: bo
|
||||
|
||||
auto_map = model_info.config.get("auto_map", False)
|
||||
config = AutoConfig.from_pretrained(model_name, trust_remote_code=trust_remote_code, token=access_token)
|
||||
|
||||
with init_empty_weights():
|
||||
# remote code could specify a specific `AutoModel` class in the `auto_map`
|
||||
constructor = AutoModel
|
||||
@ -183,7 +181,7 @@ def estimate_command_parser(subparsers=None):
|
||||
if subparsers is not None:
|
||||
parser = subparsers.add_parser("estimate-memory")
|
||||
else:
|
||||
parser = argparse.ArgumentParser(description="Model size estimator for fitting a model onto CUDA memory.")
|
||||
parser = CustomArgumentParser(description="Model size estimator for fitting a model onto CUDA memory.")
|
||||
|
||||
parser.add_argument("model_name", type=str, help="The model name on the Hugging Face Hub.")
|
||||
parser.add_argument(
|
||||
@ -206,6 +204,7 @@ def estimate_command_parser(subparsers=None):
|
||||
help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. This flag
|
||||
should only be used for repositories you trust and in which you have read the code, as it will execute
|
||||
code present on the Hub on your local machine.""",
|
||||
default=False,
|
||||
)
|
||||
|
||||
if subparsers is not None:
|
||||
@ -213,6 +212,41 @@ def estimate_command_parser(subparsers=None):
|
||||
return parser
|
||||
|
||||
|
||||
def estimate_training_usage(bytes: int, mixed_precision: str, msamp_config: str = None) -> dict:
|
||||
"""
|
||||
Given an amount of `bytes` and `mixed_precision`, calculates how much training memory is needed for a batch size of
|
||||
1.
|
||||
|
||||
Args:
|
||||
bytes (`int`):
|
||||
The size of the model being trained.
|
||||
mixed_precision (`str`):
|
||||
The mixed precision that would be ran.
|
||||
msamp_config (`str`):
|
||||
The msamp config to estimate the training memory for if `mixed_precision` is set to `"fp8"`.
|
||||
"""
|
||||
memory_sizes = {"model": -1, "optimizer": -1, "gradients": -1, "step": -1}
|
||||
fp32_size = bytes
|
||||
fp16_size = bytes // 2
|
||||
|
||||
if mixed_precision == "float32":
|
||||
memory_sizes["model"] = fp32_size
|
||||
memory_sizes["gradients"] = fp32_size
|
||||
memory_sizes["optimizer"] = fp32_size * 2
|
||||
memory_sizes["step"] = fp32_size * 4
|
||||
elif mixed_precision in ("float16", "bfloat16") or (mixed_precision == "fp8" and msamp_config is None):
|
||||
# With native `TransformersEngine`, there is no memory savings with FP8
|
||||
# With mixed precision training, the model has weights stored
|
||||
# in FP16 and FP32
|
||||
memory_sizes["model"] = fp32_size
|
||||
# 1.5 from weight gradient + computation (GEMM)
|
||||
memory_sizes["gradients"] = fp32_size + fp16_size
|
||||
# 2x from optimizer states
|
||||
memory_sizes["optimizer"] = fp32_size * 2 # Optimizer states
|
||||
memory_sizes["step"] = memory_sizes["optimizer"]
|
||||
return memory_sizes
|
||||
|
||||
|
||||
def gather_data(args):
|
||||
"Creates an empty model and gathers the data for the sizes"
|
||||
try:
|
||||
@ -234,6 +268,7 @@ def gather_data(args):
|
||||
for dtype in args.dtypes:
|
||||
dtype_total_size = total_size
|
||||
dtype_largest_layer = largest_layer[0]
|
||||
dtype_training_size = estimate_training_usage(dtype_total_size, dtype)
|
||||
if dtype == "float16":
|
||||
dtype_total_size /= 2
|
||||
dtype_largest_layer /= 2
|
||||
@ -243,7 +278,6 @@ def gather_data(args):
|
||||
elif dtype == "int4":
|
||||
dtype_total_size /= 8
|
||||
dtype_largest_layer /= 8
|
||||
dtype_training_size = dtype_total_size * 4
|
||||
data.append([dtype, dtype_largest_layer, dtype_total_size, dtype_training_size])
|
||||
return data
|
||||
|
||||
@ -254,6 +288,9 @@ def estimate_command(args):
|
||||
for i, item in enumerate(row):
|
||||
if isinstance(item, (int, float)):
|
||||
row[i] = convert_bytes(item)
|
||||
elif isinstance(item, dict):
|
||||
training_usage = max(item.values())
|
||||
row[i] = convert_bytes(training_usage) if training_usage != -1 else "N/A"
|
||||
|
||||
headers = ["dtype", "Largest Layer", "Total Size", "Training using Adam"]
|
||||
|
||||
|
||||
@ -28,6 +28,7 @@ import torch
|
||||
from accelerate.commands.config import default_config_file, load_config_from_file
|
||||
from accelerate.commands.config.config_args import SageMakerConfig
|
||||
from accelerate.commands.config.config_utils import DYNAMO_BACKENDS
|
||||
from accelerate.commands.utils import CustomArgumentParser
|
||||
from accelerate.state import get_int_from_env
|
||||
from accelerate.utils import (
|
||||
ComputeEnvironment,
|
||||
@ -35,8 +36,11 @@ from accelerate.utils import (
|
||||
PrepareForLaunch,
|
||||
_filter_args,
|
||||
check_cuda_p2p_ib_support,
|
||||
convert_dict_to_env_variables,
|
||||
is_bf16_available,
|
||||
is_deepspeed_available,
|
||||
is_mlu_available,
|
||||
is_musa_available,
|
||||
is_npu_available,
|
||||
is_rich_available,
|
||||
is_sagemaker_available,
|
||||
@ -63,80 +67,93 @@ if is_rich_available():
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
options_to_group = {
|
||||
"--multi-gpu": "Distributed GPUs",
|
||||
"--tpu": "TPU",
|
||||
"--use_deepspeed": "DeepSpeed Arguments",
|
||||
"--use_fsdp": "FSDP Arguments",
|
||||
"--use_megatron_lm": "Megatron-LM Arguments",
|
||||
"multi_gpu": "Distributed GPUs",
|
||||
"tpu": "TPU",
|
||||
"use_deepspeed": "DeepSpeed Arguments",
|
||||
"use_fsdp": "FSDP Arguments",
|
||||
"use_megatron_lm": "Megatron-LM Arguments",
|
||||
}
|
||||
|
||||
|
||||
def clean_option(option):
|
||||
"Finds all cases of - after the first two characters and changes them to _"
|
||||
if option.startswith("--"):
|
||||
return option[:3] + option[3:].replace("-", "_")
|
||||
return option[2:].replace("-", "_")
|
||||
|
||||
|
||||
class _CustomHelpAction(argparse._HelpAction):
|
||||
class CustomHelpFormatter(argparse.HelpFormatter):
|
||||
"""
|
||||
This is a custom help action that will hide all arguments that are not used in the command line when the help is
|
||||
This is a custom help formatter that will hide all arguments that are not used in the command line when the help is
|
||||
called. This is useful for the case where the user is using a specific platform and only wants to see the arguments
|
||||
for that platform.
|
||||
"""
|
||||
|
||||
def __call__(self, parser, namespace, values, option_string=None):
|
||||
if "accelerate" in sys.argv[0] and "launch" in sys.argv[1:]:
|
||||
args = sys.argv[2:]
|
||||
else:
|
||||
args = sys.argv[1:]
|
||||
opts = parser._actions
|
||||
titles = [
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.titles = [
|
||||
"Hardware Selection Arguments",
|
||||
"Resource Selection Arguments",
|
||||
"Training Paradigm Arguments",
|
||||
"positional arguments",
|
||||
"optional arguments",
|
||||
]
|
||||
if len(args) > 1:
|
||||
used_platforms = [arg for arg in args if arg in options_to_group.keys()]
|
||||
args = list(map(clean_option, args))
|
||||
used_titles = [options_to_group[o] for o in used_platforms]
|
||||
for i, arg in enumerate(opts):
|
||||
# If the argument's container is outside of the used titles, hide it
|
||||
if arg.container.title not in titles + used_titles:
|
||||
opts[i].help = argparse.SUPPRESS
|
||||
# If the argument is hardware selection, but not being passed, hide it
|
||||
elif arg.container.title == "Hardware Selection Arguments":
|
||||
if set(arg.option_strings).isdisjoint(set(args)):
|
||||
opts[i].help = argparse.SUPPRESS
|
||||
else:
|
||||
opts[i].help = arg.help + " (currently selected)"
|
||||
# If the argument is a training paradigm, but not being passed, hide it
|
||||
elif arg.container.title == "Training Paradigm Arguments":
|
||||
if set(arg.option_strings).isdisjoint(set(used_platforms)):
|
||||
opts[i].help = argparse.SUPPRESS
|
||||
else:
|
||||
opts[i].help = arg.help + " (currently selected)"
|
||||
for i, group in enumerate(list(parser._action_groups)):
|
||||
# If all arguments in the group are hidden, hide the group
|
||||
if all([arg.help == argparse.SUPPRESS for arg in group._group_actions]):
|
||||
parser._action_groups.remove(group)
|
||||
|
||||
super().__call__(parser, namespace, values, option_string)
|
||||
def add_argument(self, action: argparse.Action):
|
||||
if "accelerate" in sys.argv[0] and "launch" in sys.argv[1:]:
|
||||
args = sys.argv[2:]
|
||||
else:
|
||||
args = sys.argv[1:]
|
||||
|
||||
if len(args) > 1:
|
||||
args = list(map(clean_option, args))
|
||||
used_platforms = [arg for arg in args if arg in options_to_group.keys()]
|
||||
used_titles = [options_to_group[o] for o in used_platforms]
|
||||
if action.container.title not in self.titles + used_titles:
|
||||
action.help = argparse.SUPPRESS
|
||||
elif action.container.title == "Hardware Selection Arguments":
|
||||
if set(action.option_strings).isdisjoint(set(args)):
|
||||
action.help = argparse.SUPPRESS
|
||||
else:
|
||||
action.help = action.help + " (currently selected)"
|
||||
elif action.container.title == "Training Paradigm Arguments":
|
||||
if set(action.option_strings).isdisjoint(set(args)):
|
||||
action.help = argparse.SUPPRESS
|
||||
else:
|
||||
action.help = action.help + " (currently selected)"
|
||||
|
||||
action.option_strings = [s for s in action.option_strings if "-" not in s[2:]]
|
||||
super().add_argument(action)
|
||||
|
||||
def end_section(self):
|
||||
if len(self._current_section.items) < 2:
|
||||
self._current_section.items = []
|
||||
self._current_section.heading = ""
|
||||
super().end_section()
|
||||
|
||||
|
||||
def launch_command_parser(subparsers=None):
|
||||
description = "Launch a python script in a distributed scenario. Arguments can be passed in with either hyphens (`--num-processes=2`) or underscores (`--num_processes=2`)"
|
||||
if subparsers is not None:
|
||||
parser = subparsers.add_parser("launch", add_help=False, allow_abbrev=False)
|
||||
parser = subparsers.add_parser(
|
||||
"launch", description=description, add_help=False, allow_abbrev=False, formatter_class=CustomHelpFormatter
|
||||
)
|
||||
else:
|
||||
parser = argparse.ArgumentParser("Accelerate launch command", add_help=False, allow_abbrev=False)
|
||||
parser = CustomArgumentParser(
|
||||
"Accelerate launch command",
|
||||
description=description,
|
||||
add_help=False,
|
||||
allow_abbrev=False,
|
||||
formatter_class=CustomHelpFormatter,
|
||||
)
|
||||
|
||||
parser.register("action", "help", _CustomHelpAction)
|
||||
parser.add_argument("-h", "--help", action="help", help="Show this help message and exit.")
|
||||
|
||||
parser.add_argument(
|
||||
"--config_file", default=None, help="The config file to use for the default values in the launching script."
|
||||
"--config_file",
|
||||
default=None,
|
||||
help="The config file to use for the default values in the launching script.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--quiet",
|
||||
@ -191,6 +208,12 @@ def launch_command_parser(subparsers=None):
|
||||
default=None,
|
||||
help="The number of CPU threads per process. Can be tuned for optimal performance.",
|
||||
)
|
||||
resource_args.add_argument(
|
||||
"--enable_cpu_affinity",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="Whether or not CPU affinity and balancing should be enabled. Currently only supported on NVIDIA hardware.",
|
||||
)
|
||||
|
||||
# Dynamo arguments
|
||||
resource_args.add_argument(
|
||||
@ -281,6 +304,15 @@ def launch_command_parser(subparsers=None):
|
||||
type=str,
|
||||
help="Tee std streams into a log file and also to console.",
|
||||
)
|
||||
distributed_args.add_argument(
|
||||
"--log_dir",
|
||||
type=str,
|
||||
default=None,
|
||||
help=(
|
||||
"Base directory to use for log files when using torchrun/torch.distributed.run as launcher. "
|
||||
"Use with --tee to redirect std streams info log files."
|
||||
),
|
||||
)
|
||||
distributed_args.add_argument(
|
||||
"--role",
|
||||
type=str,
|
||||
@ -309,7 +341,7 @@ def launch_command_parser(subparsers=None):
|
||||
distributed_args.add_argument(
|
||||
"--monitor_interval",
|
||||
type=float,
|
||||
default=5,
|
||||
default=0.1,
|
||||
help="Interval, in seconds, to monitor the state of workers.",
|
||||
)
|
||||
parser.add_argument(
|
||||
@ -465,6 +497,13 @@ def launch_command_parser(subparsers=None):
|
||||
type=str,
|
||||
help="DeepSpeed multi-node launcher to use. If unspecified, will default to `pdsh`.",
|
||||
)
|
||||
deepspeed_args.add_argument(
|
||||
"--deepspeed_moe_layer_cls_names",
|
||||
default=None,
|
||||
type=str,
|
||||
help="comma-separated list of transformer MoE layer class names (case-sensitive) to wrap ,e.g, `MixtralSparseMoeBlock`, `Qwen2MoeSparseMoeBlock`, `JetMoEAttention,JetMoEBlock` ..."
|
||||
" (useful only when `use_deepspeed` flag is passed).",
|
||||
)
|
||||
|
||||
# fsdp arguments
|
||||
fsdp_args = parser.add_argument_group("FSDP Arguments", "Arguments related to Fully Shared Data Parallelism.")
|
||||
@ -546,6 +585,12 @@ def launch_command_parser(subparsers=None):
|
||||
help="If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0."
|
||||
" (useful only when `use_fsdp` flag is passed).",
|
||||
)
|
||||
fsdp_args.add_argument(
|
||||
"--fsdp_activation_checkpointing",
|
||||
default="false",
|
||||
type=str,
|
||||
help="Decides Whether (true|false) intermediate activations are freed during the forward pass, and a checkpoint is left as a placeholder. (useful only when `use_fsdp` flag is passed).",
|
||||
)
|
||||
|
||||
# megatron_lm args
|
||||
megatron_lm_args = parser.add_argument_group("Megatron-LM Arguments", "Arguments related to Megatron-LM.")
|
||||
@ -625,6 +670,22 @@ def launch_command_parser(subparsers=None):
|
||||
),
|
||||
)
|
||||
|
||||
# MPI arguments
|
||||
mpirun_args = parser.add_argument_group("MPI Arguments", "Arguments related to mpirun for Multi-CPU")
|
||||
mpirun_args.add_argument(
|
||||
"--mpirun_hostfile",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Location for a hostfile for using Accelerate to launch a multi-CPU training job with mpirun. This will "
|
||||
"get passed to the MPI --hostfile or -f parameter, depending on which MPI program is installed.",
|
||||
)
|
||||
mpirun_args.add_argument(
|
||||
"--mpirun_ccl",
|
||||
type=int,
|
||||
default=1,
|
||||
help="The number of oneCCL worker threads when using Accelerate to launch multi-CPU training with mpirun.",
|
||||
)
|
||||
|
||||
# Other arguments of the training scripts
|
||||
parser.add_argument("training_script_args", nargs=argparse.REMAINDER, help="Arguments of the training script.")
|
||||
|
||||
@ -667,6 +728,7 @@ def multi_gpu_launcher(args):
|
||||
distrib_run.get_args_parser(),
|
||||
["--training_script", args.training_script, "--training_script_args", args.training_script_args],
|
||||
)
|
||||
|
||||
with patch_environment(**current_env):
|
||||
try:
|
||||
distrib_run.run(args)
|
||||
@ -684,6 +746,8 @@ def deepspeed_launcher(args):
|
||||
|
||||
if not is_deepspeed_available():
|
||||
raise ImportError("DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source.")
|
||||
else:
|
||||
from deepspeed.launcher.runner import DEEPSPEED_ENVIRONMENT_NAME
|
||||
|
||||
cmd, current_env = prepare_deepspeed_cmd_env(args)
|
||||
if not check_cuda_p2p_ib_support():
|
||||
@ -699,11 +763,10 @@ def deepspeed_launcher(args):
|
||||
logger.warning(message)
|
||||
|
||||
if args.num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]:
|
||||
with open(".deepspeed_env", "a") as f:
|
||||
for key, value in current_env.items():
|
||||
if ";" in value or " " in value:
|
||||
continue
|
||||
f.write(f"{key}={value}\n")
|
||||
with open(DEEPSPEED_ENVIRONMENT_NAME, "a") as f:
|
||||
valid_env_items = convert_dict_to_env_variables(current_env)
|
||||
if len(valid_env_items) > 1:
|
||||
f.writelines(valid_env_items)
|
||||
|
||||
process = subprocess.Popen(cmd, env=current_env)
|
||||
process.wait()
|
||||
@ -868,7 +931,13 @@ def _validate_launch_command(args):
|
||||
args.multi_gpu = (
|
||||
True
|
||||
if defaults.distributed_type
|
||||
in (DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU)
|
||||
in (
|
||||
DistributedType.MULTI_GPU,
|
||||
DistributedType.MULTI_NPU,
|
||||
DistributedType.MULTI_MLU,
|
||||
DistributedType.MULTI_MUSA,
|
||||
DistributedType.MULTI_XPU,
|
||||
)
|
||||
else False
|
||||
)
|
||||
args.tpu = defaults.distributed_type == DistributedType.XLA
|
||||
@ -906,6 +975,8 @@ def _validate_launch_command(args):
|
||||
setattr(args, k, defaults.dynamo_config[k])
|
||||
for k in defaults.ipex_config:
|
||||
setattr(args, k, defaults.ipex_config[k])
|
||||
for k in defaults.mpirun_config:
|
||||
setattr(args, k, defaults.mpirun_config[k])
|
||||
continue
|
||||
|
||||
# Those args are handled separately
|
||||
@ -942,6 +1013,10 @@ def _validate_launch_command(args):
|
||||
if args.num_processes is None:
|
||||
if args.use_xpu and is_xpu_available():
|
||||
args.num_processes = torch.xpu.device_count()
|
||||
elif is_mlu_available():
|
||||
args.num_processes = torch.mlu.device_count()
|
||||
elif is_musa_available():
|
||||
args.num_processes = torch.musa.device_count()
|
||||
elif is_npu_available():
|
||||
args.num_processes = torch.npu.device_count()
|
||||
else:
|
||||
@ -949,10 +1024,16 @@ def _validate_launch_command(args):
|
||||
warned.append(f"\t`--num_processes` was set to a value of `{args.num_processes}`")
|
||||
if args.debug is None:
|
||||
args.debug = False
|
||||
if not args.multi_gpu and (
|
||||
(args.use_xpu and is_xpu_available() and torch.xpu.device_count() > 1)
|
||||
or (is_npu_available() and torch.npu.device_count() > 1)
|
||||
or (torch.cuda.device_count() > 1)
|
||||
if (
|
||||
not args.multi_gpu
|
||||
and args.num_processes > 1
|
||||
and (
|
||||
(args.use_xpu and is_xpu_available() and torch.xpu.device_count() > 1)
|
||||
or (is_mlu_available() and torch.mlu.device_count() > 1)
|
||||
or (is_musa_available() and torch.musa.device_count() > 1)
|
||||
or (is_npu_available() and torch.npu.device_count() > 1)
|
||||
or (torch.cuda.device_count() > 1)
|
||||
)
|
||||
):
|
||||
warned.append(
|
||||
"\t\tMore than one GPU was found, enabling multi-GPU training.\n"
|
||||
@ -977,8 +1058,8 @@ def _validate_launch_command(args):
|
||||
defaults is not None and defaults.compute_environment != ComputeEnvironment.AMAZON_SAGEMAKER
|
||||
)
|
||||
if is_aws_env_disabled and args.num_cpu_threads_per_process is None:
|
||||
args.num_cpu_threads_per_process = 1
|
||||
if args.use_cpu and args.num_processes >= 1:
|
||||
args.num_cpu_threads_per_process = get_int_from_env(["OMP_NUM_THREADS"], 1)
|
||||
if args.use_cpu and args.num_processes >= 1 and get_int_from_env(["OMP_NUM_THREADS"], 0) == 0:
|
||||
local_size = get_int_from_env(
|
||||
["MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"], 1
|
||||
)
|
||||
|
||||
@ -1 +1,14 @@
|
||||
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from .selection_menu import BulletMenu
|
||||
|
||||
@ -16,7 +16,6 @@
|
||||
Utilities relating to parsing raw characters from the keyboard, based on https://github.com/bchao1/bullet
|
||||
"""
|
||||
|
||||
|
||||
import os
|
||||
import string
|
||||
import sys
|
||||
|
||||
@ -15,6 +15,7 @@
|
||||
"""
|
||||
Main driver for the selection menu, based on https://github.com/bchao1/bullet
|
||||
"""
|
||||
|
||||
import builtins
|
||||
import sys
|
||||
|
||||
|
||||
69
src/accelerate/commands/merge.py
Normal file
69
src/accelerate/commands/merge.py
Normal file
@ -0,0 +1,69 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from accelerate.commands.utils import CustomArgumentParser
|
||||
from accelerate.utils import merge_fsdp_weights
|
||||
|
||||
|
||||
description = """Utility to merge the weights from multiple FSDP checkpoints into a single combined checkpoint. Should be used if
|
||||
`SHARDED_STATE_DICT` was used for the model. Weights will be saved to `{output_path}`.
|
||||
|
||||
This is a CPU-bound process and requires enough RAM to load the entire model state dict."""
|
||||
|
||||
|
||||
def merge_command(args):
|
||||
merge_fsdp_weights(
|
||||
args.checkpoint_directory, args.output_path, not args.unsafe_serialization, args.remove_checkpoint_dir
|
||||
)
|
||||
|
||||
|
||||
def merge_command_parser(subparsers=None):
|
||||
if subparsers is not None:
|
||||
parser = subparsers.add_parser("merge-weights", description=description)
|
||||
else:
|
||||
parser = CustomArgumentParser(description=description)
|
||||
|
||||
parser.add_argument("checkpoint_directory", type=str, help="A directory containing sharded weights saved by FSDP.")
|
||||
parser.add_argument(
|
||||
"output_path",
|
||||
type=str,
|
||||
help="The path to save the merged weights. Defaults to the current directory. ",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--unsafe_serialization",
|
||||
action="store_false",
|
||||
default=False,
|
||||
help="Whether to save the merged weights as `.bin` rather than `.safetensors` (not recommended).",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--remove_checkpoint_dir",
|
||||
action="store_true",
|
||||
help="Whether to remove the checkpoint directory after merging.",
|
||||
default=False,
|
||||
)
|
||||
|
||||
if subparsers is not None:
|
||||
parser.set_defaults(func=merge_command)
|
||||
return parser
|
||||
|
||||
|
||||
def main():
|
||||
parser = merge_command_parser()
|
||||
args = parser.parse_args()
|
||||
merge_command(args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -15,7 +15,6 @@
|
||||
# limitations under the License.
|
||||
|
||||
import argparse
|
||||
import os
|
||||
|
||||
from accelerate.test_utils import execute_subprocess_async, path_in_accelerate_package
|
||||
|
||||
@ -51,7 +50,7 @@ def test_command(args):
|
||||
test_args = f"--config_file={args.config_file} {script_name}".split()
|
||||
|
||||
cmd = ["accelerate-launch"] + test_args
|
||||
result = execute_subprocess_async(cmd, env=os.environ.copy())
|
||||
result = execute_subprocess_async(cmd)
|
||||
if result.returncode == 0:
|
||||
print("Test is a success! You are ready for your distributed training!")
|
||||
|
||||
|
||||
120
src/accelerate/commands/utils.py
Normal file
120
src/accelerate/commands/utils.py
Normal file
@ -0,0 +1,120 @@
|
||||
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import argparse
|
||||
|
||||
|
||||
class _StoreAction(argparse.Action):
|
||||
"""
|
||||
Custom action that allows for `-` or `_` to be passed in for an argument.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
new_option_strings = []
|
||||
for option_string in self.option_strings:
|
||||
new_option_strings.append(option_string)
|
||||
if "_" in option_string[2:]:
|
||||
# Add `-` version to the option string
|
||||
new_option_strings.append(option_string.replace("_", "-"))
|
||||
self.option_strings = new_option_strings
|
||||
|
||||
def __call__(self, parser, namespace, values, option_string=None):
|
||||
setattr(namespace, self.dest, values)
|
||||
|
||||
|
||||
class _StoreConstAction(_StoreAction):
|
||||
"""
|
||||
Same as `argparse._StoreConstAction` but uses the custom `_StoreAction`.
|
||||
"""
|
||||
|
||||
def __init__(self, option_strings, dest, const, default=None, required=False, help=None):
|
||||
super().__init__(
|
||||
option_strings=option_strings,
|
||||
dest=dest,
|
||||
nargs=0,
|
||||
const=const,
|
||||
default=default,
|
||||
required=required,
|
||||
help=help,
|
||||
)
|
||||
|
||||
def __call__(self, parser, namespace, values, option_string=None):
|
||||
setattr(namespace, self.dest, self.const)
|
||||
|
||||
|
||||
class _StoreTrueAction(_StoreConstAction):
|
||||
"""
|
||||
Same as `argparse._StoreTrueAction` but uses the custom `_StoreConstAction`.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
option_strings,
|
||||
dest,
|
||||
default=None,
|
||||
required=False,
|
||||
help=None,
|
||||
):
|
||||
super().__init__(
|
||||
option_strings=option_strings, dest=dest, const=True, default=default, required=required, help=help
|
||||
)
|
||||
|
||||
|
||||
class CustomArgumentGroup(argparse._ArgumentGroup):
|
||||
"""
|
||||
Custom argument group that allows for the use of `-` or `_` in arguments passed and overrides the help for each
|
||||
when applicable.
|
||||
"""
|
||||
|
||||
def _add_action(self, action):
|
||||
args = vars(action)
|
||||
if isinstance(action, argparse._StoreTrueAction):
|
||||
action = _StoreTrueAction(
|
||||
args["option_strings"], args["dest"], args["default"], args["required"], args["help"]
|
||||
)
|
||||
elif isinstance(action, argparse._StoreConstAction):
|
||||
action = _StoreConstAction(
|
||||
args["option_strings"],
|
||||
args["dest"],
|
||||
args["const"],
|
||||
args["default"],
|
||||
args["required"],
|
||||
args["help"],
|
||||
)
|
||||
elif isinstance(action, argparse._StoreAction):
|
||||
action = _StoreAction(**args)
|
||||
action = super()._add_action(action)
|
||||
return action
|
||||
|
||||
|
||||
class CustomArgumentParser(argparse.ArgumentParser):
|
||||
"""
|
||||
Custom argument parser that allows for the use of `-` or `_` in arguments passed and overrides the help for each
|
||||
when applicable.
|
||||
"""
|
||||
|
||||
def add_argument(self, *args, **kwargs):
|
||||
if "action" in kwargs:
|
||||
# Translate action -> class
|
||||
if kwargs["action"] == "store_true":
|
||||
kwargs["action"] = _StoreTrueAction
|
||||
else:
|
||||
kwargs["action"] = _StoreAction
|
||||
super().add_argument(*args, **kwargs)
|
||||
|
||||
def add_argument_group(self, *args, **kwargs):
|
||||
group = CustomArgumentGroup(self, *args, **kwargs)
|
||||
self._action_groups.append(group)
|
||||
return group
|
||||
@ -409,7 +409,7 @@ class DataLoaderShard(DataLoader, DataLoaderStateMixin):
|
||||
A random number generator to keep synchronized across processes.
|
||||
skip_batches (`int`, *optional*, defaults to 0):
|
||||
The number of batches to skip at the beginning.
|
||||
kwargs:
|
||||
**kwargs (additional keyword arguments, *optional*):
|
||||
All other keyword arguments to pass to the regular `DataLoader` initialization.
|
||||
|
||||
**Available attributes:**
|
||||
@ -429,6 +429,7 @@ class DataLoaderShard(DataLoader, DataLoaderStateMixin):
|
||||
synchronized_generator=None,
|
||||
skip_batches=0,
|
||||
_drop_last: bool = False,
|
||||
_non_blocking: bool = False,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(dataset, **kwargs)
|
||||
@ -438,6 +439,7 @@ class DataLoaderShard(DataLoader, DataLoaderStateMixin):
|
||||
self.skip_batches = skip_batches
|
||||
self.gradient_state = GradientState()
|
||||
self._drop_last = _drop_last
|
||||
self._non_blocking = _non_blocking
|
||||
self.iteration = 0
|
||||
|
||||
def __iter__(self):
|
||||
@ -458,7 +460,7 @@ class DataLoaderShard(DataLoader, DataLoaderStateMixin):
|
||||
try:
|
||||
# But we still move it to the device so it is done before `StopIteration` is reached
|
||||
if self.device is not None:
|
||||
current_batch = send_to_device(current_batch, self.device)
|
||||
current_batch = send_to_device(current_batch, self.device, non_blocking=self._non_blocking)
|
||||
next_batch = next(dataloader_iter)
|
||||
if batch_index >= self.skip_batches:
|
||||
yield current_batch
|
||||
@ -500,6 +502,18 @@ class DataLoaderShard(DataLoader, DataLoaderStateMixin):
|
||||
else:
|
||||
return len(self.dataset)
|
||||
|
||||
def get_sampler(self):
|
||||
return get_sampler(self)
|
||||
|
||||
def set_sampler(self, sampler):
|
||||
sampler_is_batch_sampler = isinstance(self.sampler, BatchSampler)
|
||||
if sampler_is_batch_sampler:
|
||||
self.sampler.sampler = sampler
|
||||
else:
|
||||
self.batch_sampler.sampler = sampler
|
||||
if hasattr(self.batch_sampler, "batch_sampler"):
|
||||
self.batch_sampler.batch_sampler.sampler = sampler
|
||||
|
||||
|
||||
if is_torch_xla_available():
|
||||
import torch_xla.distributed.parallel_loader as xpl
|
||||
@ -571,7 +585,14 @@ class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, dataset, split_batches: bool = False, skip_batches=0, _drop_last: bool = False, slice_fn=None, **kwargs
|
||||
self,
|
||||
dataset,
|
||||
split_batches: bool = False,
|
||||
skip_batches=0,
|
||||
_drop_last: bool = False,
|
||||
_non_blocking: bool = False,
|
||||
slice_fn=None,
|
||||
**kwargs,
|
||||
):
|
||||
shuffle = False
|
||||
if is_torch_version(">=", "1.11.0"):
|
||||
@ -588,6 +609,7 @@ class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
|
||||
self.gradient_state = GradientState()
|
||||
self.state = AcceleratorState()
|
||||
self._drop_last = _drop_last
|
||||
self._non_blocking = _non_blocking
|
||||
self.skip_batches = skip_batches
|
||||
|
||||
self.slice_fn = slice_tensors if slice_fn is None else slice_fn
|
||||
@ -660,7 +682,7 @@ class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
|
||||
if self.state.process_index != 0:
|
||||
# Initialize tensors on other processes than process 0.
|
||||
batch = initialize_tensors(batch_info[0])
|
||||
batch = send_to_device(batch, self.state.device)
|
||||
batch = send_to_device(batch, self.state.device, non_blocking=self._non_blocking)
|
||||
# Broadcast the batch before splitting it.
|
||||
batch = broadcast(batch, from_process=0)
|
||||
|
||||
@ -741,6 +763,36 @@ class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
|
||||
def total_dataset_length(self):
|
||||
return len(self.dataset)
|
||||
|
||||
def get_sampler(self):
|
||||
return get_sampler(self)
|
||||
|
||||
def set_sampler(self, sampler):
|
||||
sampler_is_batch_sampler = isinstance(self.sampler, BatchSampler)
|
||||
if sampler_is_batch_sampler:
|
||||
self.sampler.sampler = sampler
|
||||
else:
|
||||
self.batch_sampler.sampler = sampler
|
||||
if hasattr(self.batch_sampler, "batch_sampler"):
|
||||
self.batch_sampler.batch_sampler.sampler = sampler
|
||||
|
||||
|
||||
def get_sampler(dataloader):
|
||||
"""
|
||||
Get the sampler associated to the dataloader
|
||||
|
||||
Args:
|
||||
dataloader (`torch.utils.data.dataloader.DataLoader`):
|
||||
The data loader to split across several devices.
|
||||
Returns:
|
||||
`torch.utils.data.Sampler`: The sampler associated to the dataloader
|
||||
"""
|
||||
sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler)
|
||||
if sampler_is_batch_sampler:
|
||||
sampler = getattr(dataloader.sampler, "sampler", None)
|
||||
else:
|
||||
sampler = getattr(dataloader.batch_sampler, "sampler", None)
|
||||
return sampler
|
||||
|
||||
|
||||
def prepare_data_loader(
|
||||
dataloader: DataLoader,
|
||||
@ -754,6 +806,7 @@ def prepare_data_loader(
|
||||
even_batches: bool = True,
|
||||
slice_fn_for_dispatch: Optional[Callable] = None,
|
||||
use_seedable_sampler: bool = False,
|
||||
non_blocking: bool = False,
|
||||
) -> DataLoader:
|
||||
"""
|
||||
Wraps a PyTorch `DataLoader` to generate batches for one of the processes only.
|
||||
@ -812,6 +865,10 @@ def prepare_data_loader(
|
||||
reproducability. Comes at a cost of potentially different performances due to different shuffling
|
||||
algorithms but ensures results will be the *exact* same. Should be paired with `set_seed()` at every
|
||||
`self.set_epoch`
|
||||
non_blocking (`bool`, *optional*, defaults to `False`):
|
||||
If set to `True`, dataloader will utilize non-blocking host-to-device transfers. If the dataloader has
|
||||
`pin_memory` set to `True`, this will help to increase overlap between data transfer and computations.
|
||||
|
||||
|
||||
Returns:
|
||||
`torch.utils.data.dataloader.DataLoader`: A new data loader that will yield the portion of the batches
|
||||
@ -863,13 +920,10 @@ def prepare_data_loader(
|
||||
new_dataset = dataloader.dataset
|
||||
# Iterable dataset doesn't like batch_sampler, but data_loader creates a default one for it
|
||||
new_batch_sampler = dataloader.batch_sampler if not isinstance(new_dataset, IterableDataset) else None
|
||||
sampler_is_batch_sampler = False
|
||||
synchronized_generator = None
|
||||
sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler)
|
||||
if sampler_is_batch_sampler:
|
||||
sampler = getattr(dataloader.sampler, "sampler", None)
|
||||
else:
|
||||
sampler = getattr(dataloader.batch_sampler, "sampler", None)
|
||||
synchronized_generator = None
|
||||
|
||||
sampler = get_sampler(dataloader)
|
||||
if isinstance(sampler, RandomSampler) and use_seedable_sampler:
|
||||
# When iterating through the dataloader during distributed processes
|
||||
# we want to ensure that on each process we are iterating through the same
|
||||
@ -882,6 +936,11 @@ def prepare_data_loader(
|
||||
generator=getattr(sampler, "generator", torch.Generator()),
|
||||
)
|
||||
|
||||
if isinstance(dataloader.sampler, RandomSampler) and state.distributed_type == DistributedType.XLA:
|
||||
# isinstance(dataloader.sampler, RandomSampler) indicates the original dataloader has `shuffle` enabled.
|
||||
generator = torch.Generator().manual_seed(42)
|
||||
dataloader.generator = generator
|
||||
dataloader.sampler.generator = generator
|
||||
# No change if no multiprocess
|
||||
if (num_processes != 1 or state.distributed_type == DistributedType.MEGATRON_LM) and not dispatch_batches:
|
||||
if isinstance(new_dataset, IterableDataset):
|
||||
@ -896,6 +955,10 @@ def prepare_data_loader(
|
||||
split_batches=split_batches,
|
||||
)
|
||||
else:
|
||||
if not use_seedable_sampler and hasattr(sampler, "generator"):
|
||||
if sampler.generator is None:
|
||||
sampler.generator = torch.Generator()
|
||||
synchronized_generator = sampler.generator
|
||||
batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler
|
||||
new_batch_sampler = BatchSamplerShard(
|
||||
batch_sampler,
|
||||
@ -936,6 +999,7 @@ def prepare_data_loader(
|
||||
split_batches=split_batches,
|
||||
batch_sampler=new_batch_sampler,
|
||||
_drop_last=dataloader.drop_last,
|
||||
_non_blocking=non_blocking,
|
||||
slice_fn=slice_fn_for_dispatch,
|
||||
**kwargs,
|
||||
)
|
||||
@ -947,6 +1011,7 @@ def prepare_data_loader(
|
||||
batch_size=dataloader.batch_size,
|
||||
rng_types=rng_types,
|
||||
_drop_last=dataloader.drop_last,
|
||||
_non_blocking=non_blocking,
|
||||
synchronized_generator=synchronized_generator,
|
||||
**kwargs,
|
||||
)
|
||||
@ -958,14 +1023,12 @@ def prepare_data_loader(
|
||||
rng_types=rng_types,
|
||||
synchronized_generator=synchronized_generator,
|
||||
_drop_last=dataloader.drop_last,
|
||||
_non_blocking=non_blocking,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
if isinstance(sampler, SeedableRandomSampler) and use_seedable_sampler:
|
||||
if sampler_is_batch_sampler:
|
||||
dataloader.sampler.sampler = sampler
|
||||
else:
|
||||
dataloader.batch_sampler.sampler = sampler
|
||||
dataloader.set_sampler(sampler)
|
||||
if state.distributed_type == DistributedType.XLA:
|
||||
return MpDeviceLoaderWrapper(dataloader, device)
|
||||
return dataloader
|
||||
|
||||
@ -30,6 +30,9 @@ from .utils.modeling import get_non_persistent_buffers
|
||||
from .utils.other import recursive_getattr
|
||||
|
||||
|
||||
_accelerate_added_attributes = ["to", "cuda", "npu", "xpu", "mlu", "musa"]
|
||||
|
||||
|
||||
class ModelHook:
|
||||
"""
|
||||
A hook that contains callbacks to be executed just before and after the forward method of a model. The difference
|
||||
@ -202,6 +205,10 @@ def remove_hook_from_module(module: nn.Module, recurse=False):
|
||||
module.forward = module._old_forward
|
||||
delattr(module, "_old_forward")
|
||||
|
||||
# Remove accelerate added warning hooks from dispatch_model
|
||||
for attr in _accelerate_added_attributes:
|
||||
module.__dict__.pop(attr, None)
|
||||
|
||||
if recurse:
|
||||
for child in module.children():
|
||||
remove_hook_from_module(child, recurse)
|
||||
|
||||
@ -1,3 +1,16 @@
|
||||
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import math
|
||||
from types import MethodType
|
||||
from typing import Any, Dict, List, Optional, Tuple, Union
|
||||
@ -15,11 +28,6 @@ from .utils import (
|
||||
)
|
||||
|
||||
|
||||
if is_pippy_available():
|
||||
from pippy.IR import Pipe, PipeSplitWrapper, annotate_split_points
|
||||
from pippy.PipelineStage import PipelineStage
|
||||
|
||||
|
||||
def generate_device_map(model, num_processes: int = 1, no_split_module_classes=None, max_memory: dict = None):
|
||||
"""
|
||||
Calculates the device map for `model` with an offset for PiPPy
|
||||
@ -70,6 +78,10 @@ def build_pipeline(model, split_points, args, kwargs, num_chunks):
|
||||
Users can pass in custom `num_chunks` as an optional hyper-parameter. By default will use
|
||||
`AcceleratorState.num_processes`
|
||||
"""
|
||||
# Note: We import here to reduce import time from general modules, and isolate outside dependencies
|
||||
from pippy.IR import Pipe, PipeSplitWrapper, annotate_split_points
|
||||
from pippy.PipelineStage import PipelineStage
|
||||
|
||||
# We need to annotate the split points in the model for PiPPy
|
||||
state = PartialState()
|
||||
annotate_split_points(model, {split_point: PipeSplitWrapper.SplitPoint.BEGINNING for split_point in split_points})
|
||||
|
||||
@ -24,9 +24,12 @@ from .utils import (
|
||||
PrepareForLaunch,
|
||||
are_libraries_initialized,
|
||||
check_cuda_p2p_ib_support,
|
||||
get_gpu_info,
|
||||
is_mps_available,
|
||||
is_torch_version,
|
||||
patch_environment,
|
||||
)
|
||||
from .utils.constants import ELASTIC_LOG_LINE_PREFIX_TEMPLATE_PYTORCH_VERSION
|
||||
|
||||
|
||||
def test_launch():
|
||||
@ -43,6 +46,13 @@ def notebook_launcher(
|
||||
master_addr="127.0.0.1",
|
||||
node_rank=0,
|
||||
num_nodes=1,
|
||||
rdzv_backend="static",
|
||||
rdzv_endpoint="",
|
||||
rdzv_conf=None,
|
||||
rdzv_id="none",
|
||||
max_restarts=0,
|
||||
monitor_interval=0.1,
|
||||
log_line_prefix_template=None,
|
||||
):
|
||||
"""
|
||||
Launches a training function, using several processes or multiple nodes if it's possible in the current environment
|
||||
@ -77,6 +87,20 @@ def notebook_launcher(
|
||||
The rank of the current node.
|
||||
num_nodes (`int`, *optional*, defaults to 1):
|
||||
The number of nodes to use for training.
|
||||
rdzv_backend (`str`, *optional*, defaults to `"static"`):
|
||||
The rendezvous method to use, such as 'static' (the default) or 'c10d'
|
||||
rdzv_endpoint (`str`, *optional*, defaults to `""`):
|
||||
The endpoint of the rdzv sync. storage.
|
||||
rdzv_conf (`Dict`, *optional*, defaults to `None`):
|
||||
Additional rendezvous configuration.
|
||||
rdzv_id (`str`, *optional*, defaults to `"none"`):
|
||||
The unique run id of the job.
|
||||
max_restarts (`int`, *optional*, defaults to 0):
|
||||
The maximum amount of restarts that elastic agent will conduct on workers before failure.
|
||||
monitor_interval (`float`, *optional*, defaults to 0.1):
|
||||
The interval in seconds that is used by the elastic_agent as a period of monitoring workers.
|
||||
log_line_prefix_template (`str`, *optional*, defaults to `None`):
|
||||
The prefix template for elastic launch logging. Available from PyTorch 2.2.0.
|
||||
|
||||
Example:
|
||||
|
||||
@ -124,7 +148,7 @@ def notebook_launcher(
|
||||
launcher = PrepareForLaunch(function, distributed_type="TPU")
|
||||
print(f"Launching a training on {num_processes} TPU cores.")
|
||||
xmp.spawn(launcher, args=args, nprocs=num_processes, start_method="fork")
|
||||
elif in_colab:
|
||||
elif in_colab and get_gpu_info()[1] < 2:
|
||||
# No need for a distributed launch otherwise as it's either CPU or one GPU.
|
||||
if torch.cuda.is_available():
|
||||
print("Launching training on one GPU.")
|
||||
@ -140,6 +164,7 @@ def notebook_launcher(
|
||||
raise ValueError("The node_rank must be less than the number of nodes.")
|
||||
if num_processes > 1:
|
||||
# Multi-GPU launch
|
||||
from torch.distributed.launcher.api import LaunchConfig, elastic_launch
|
||||
from torch.multiprocessing import start_processes
|
||||
from torch.multiprocessing.spawn import ProcessRaisedException
|
||||
|
||||
@ -197,7 +222,27 @@ def notebook_launcher(
|
||||
launcher = PrepareForLaunch(function, distributed_type="MULTI_GPU")
|
||||
print(f"Launching training on {num_processes} GPUs.")
|
||||
try:
|
||||
start_processes(launcher, args=args, nprocs=num_processes, start_method="fork")
|
||||
if rdzv_conf is None:
|
||||
rdzv_conf = {}
|
||||
if rdzv_backend == "static":
|
||||
rdzv_conf["rank"] = node_rank
|
||||
if not rdzv_endpoint:
|
||||
rdzv_endpoint = f"{master_addr}:{use_port}"
|
||||
launch_config_kwargs = dict(
|
||||
min_nodes=num_nodes,
|
||||
max_nodes=num_nodes,
|
||||
nproc_per_node=num_processes,
|
||||
run_id=rdzv_id,
|
||||
rdzv_endpoint=rdzv_endpoint,
|
||||
rdzv_backend=rdzv_backend,
|
||||
rdzv_configs=rdzv_conf,
|
||||
max_restarts=max_restarts,
|
||||
monitor_interval=monitor_interval,
|
||||
start_method="fork",
|
||||
)
|
||||
if is_torch_version(">=", ELASTIC_LOG_LINE_PREFIX_TEMPLATE_PYTORCH_VERSION):
|
||||
launch_config_kwargs["log_line_prefix_template"] = log_line_prefix_template
|
||||
elastic_launch(config=LaunchConfig(**launch_config_kwargs), entrypoint=function)(*args)
|
||||
except ProcessRaisedException as e:
|
||||
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
|
||||
raise RuntimeError(
|
||||
|
||||
@ -69,6 +69,8 @@ class LocalSGD:
|
||||
DistributedType.NO,
|
||||
DistributedType.MULTI_CPU,
|
||||
DistributedType.MULTI_GPU,
|
||||
DistributedType.MULTI_MLU,
|
||||
DistributedType.MULTI_MUSA,
|
||||
DistributedType.MULTI_NPU,
|
||||
]:
|
||||
raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)")
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user