mirror of
https://github.com/huggingface/accelerate.git
synced 2025-11-13 07:45:32 +08:00
Compare commits
448 Commits
check-docs
...
v0.34.0
| Author | SHA1 | Date | |
|---|---|---|---|
| 159c0dd02a | |||
| 8931e5e48c | |||
| a84859242d | |||
| 758d6243a7 | |||
| b07ad2adf2 | |||
| 1d09a20fc1 | |||
| 3fcc9461c4 | |||
| 939ce400cb | |||
| c2120927b0 | |||
| 654e1d9984 | |||
| 8c3aded21a | |||
| 2789933938 | |||
| 726140cad2 | |||
| 2d4f1dda7e | |||
| c0cf860dc6 | |||
| ad3f574a3b | |||
| 1a6af0bd6d | |||
| 52fae0960c | |||
| 7ffe7662ca | |||
| 5536a3a893 | |||
| 7ec8eab955 | |||
| 589fddd317 | |||
| 99c69aaf73 | |||
| 00785cd9fc | |||
| a452327e8e | |||
| 851cf34351 | |||
| cd5698bb32 | |||
| 90d5023901 | |||
| 3bde615607 | |||
| dc3b5ad82e | |||
| 12a5befdd6 | |||
| 79ca85c27d | |||
| 13d93c4f50 | |||
| d982751aec | |||
| 95edc68cb3 | |||
| 288accc0ec | |||
| 83b0610155 | |||
| 386f7d2825 | |||
| 308a8e9689 | |||
| f35cbd1f02 | |||
| a14260c9da | |||
| 32f368ec3f | |||
| 415eddf1be | |||
| 230857691a | |||
| a5a3e57125 | |||
| 0af1d8b8de | |||
| d16d7371a1 | |||
| 7a5c231b9e | |||
| 4f02bb764a | |||
| 709fd1e42b | |||
| f4f1260a0e | |||
| c6da9f8693 | |||
| 3ebbe573ad | |||
| 24bf5ec546 | |||
| e1247de01e | |||
| 12a007d559 | |||
| 5bdcd7e169 | |||
| 2471eacdd6 | |||
| 167cb5eb20 | |||
| 947f64ee62 | |||
| 8330b375d4 | |||
| 92404fbf5f | |||
| 3a02754915 | |||
| fec1170e35 | |||
| eac206f063 | |||
| 6882ff2bea | |||
| 57a4c7465e | |||
| 404510a5ec | |||
| 3086e26db9 | |||
| 5d5d07abfc | |||
| 5a0b7dc597 | |||
| c799c198e9 | |||
| 1f7a79b428 | |||
| 4cc3530b64 | |||
| 5d4a3beb01 | |||
| 0284f9a9f6 | |||
| 573d22d48f | |||
| 13ca7dccb6 | |||
| 3b5a00e048 | |||
| 3c4eaedd46 | |||
| c0faec766c | |||
| 91a2599f93 | |||
| 5f9235a731 | |||
| 7a36a75c7c | |||
| f62854a281 | |||
| a9869ea0dc | |||
| 6d59614603 | |||
| 2d74c0c077 | |||
| 40007b4e97 | |||
| 7141881b1f | |||
| f0049b2cfb | |||
| 83bad87559 | |||
| 24d8b63fc3 | |||
| 4a83ee5382 | |||
| 05d240af95 | |||
| bad2ce42ed | |||
| 30cb7ece76 | |||
| b7fa2fa956 | |||
| d5d378d64e | |||
| 065e74d11a | |||
| 86b6deaea1 | |||
| b24a0ef5db | |||
| e061edc6e7 | |||
| c3f422699a | |||
| 0553483638 | |||
| 415789d0e4 | |||
| ae472bac48 | |||
| 4f2c2ba45c | |||
| e26065a265 | |||
| 1cb6fdcf7b | |||
| 4ba436eccc | |||
| 91e8a3ced4 | |||
| 4ad4d28c49 | |||
| befd87f043 | |||
| abce3604f0 | |||
| 27a607ea90 | |||
| aa21174de9 | |||
| 6cf1cc0a39 | |||
| bb465a9cf0 | |||
| 67308ca6ef | |||
| 63772f6ac2 | |||
| 8798cf06ab | |||
| 47bb2dd53e | |||
| 724824abbe | |||
| afc2c99e6a | |||
| 0fb95a2d3b | |||
| 7ac153f404 | |||
| 0f1b91bb74 | |||
| d1eb44c856 | |||
| 11a363287a | |||
| 5cfe409443 | |||
| 5b3a7f3892 | |||
| 060361fca3 | |||
| 6ac27e2383 | |||
| ba5f49219f | |||
| 2c767338f2 | |||
| 234a85506d | |||
| 232ebd159a | |||
| 4d3d4bc88f | |||
| 2b1e7bd462 | |||
| c7e5e41b8c | |||
| 9557598c45 | |||
| 156331aecd | |||
| cd7df4117d | |||
| 6af157ea93 | |||
| 83317b3081 | |||
| e831bcb3b1 | |||
| 092c3af0c4 | |||
| 3e944c5583 | |||
| f67737363c | |||
| f7daaaa305 | |||
| 3dc131cd8d | |||
| ef0f62c12a | |||
| baafaf4a6e | |||
| abc86c0e35 | |||
| 4450cb3132 | |||
| fd0dcd1c45 | |||
| f478201c28 | |||
| c7046845e7 | |||
| 701e24c539 | |||
| 37da848e6c | |||
| c470a1336a | |||
| 581a390e2f | |||
| 2fc48c7eee | |||
| 1024231133 | |||
| 5ca095a34f | |||
| b77c65398c | |||
| a91691463b | |||
| 5056d327f8 | |||
| c0a37015e3 | |||
| e9b9c7d022 | |||
| 6c09584f73 | |||
| b8c8583953 | |||
| df485ae1e3 | |||
| 6386f70103 | |||
| 6d92198ef4 | |||
| 16488be9a4 | |||
| 685bd3a439 | |||
| 2e69948c1a | |||
| 7531e8c13e | |||
| 8e439de744 | |||
| d96a5aa730 | |||
| d7bcd85d4d | |||
| d927b8f3a2 | |||
| f579d9550d | |||
| bbecad4e8e | |||
| b82999a84b | |||
| 11568e562c | |||
| d9a1b8f975 | |||
| b634388ef1 | |||
| 4d415f2129 | |||
| 829171a9a4 | |||
| 5a232de2fa | |||
| 5f8048cd04 | |||
| 4378b560e8 | |||
| 8644e23b71 | |||
| b2fc3a3b0e | |||
| 290446d446 | |||
| 85a75d4c3d | |||
| f94f0ff912 | |||
| 1b2e634970 | |||
| dd62fc90ce | |||
| 10b418495e | |||
| c2f193a25c | |||
| 1812152392 | |||
| b8b353b7a7 | |||
| f2778d6502 | |||
| 2ad42e77c3 | |||
| e8aaee5d9b | |||
| 910c1b6a8f | |||
| 92d3240bb5 | |||
| 02a8a9a3a7 | |||
| ee163b66fb | |||
| 354db5b5f7 | |||
| 92b1ad01f3 | |||
| 60bfdaa934 | |||
| 16eb6d76bf | |||
| c8acfa700b | |||
| e70e3c87de | |||
| bc8dfe3caf | |||
| e3d324240f | |||
| 10882eeddd | |||
| 145a98fc12 | |||
| 64ae9ea3fe | |||
| 8aa72b9748 | |||
| 97d115a266 | |||
| 63cfd9efdc | |||
| 6cf8221a09 | |||
| 7a2feecad4 | |||
| ee004674b9 | |||
| 65544d8fe9 | |||
| 5fce525f90 | |||
| ca37b0e471 | |||
| 82a1258ffc | |||
| 21b225e8d5 | |||
| 25ee6ab3b7 | |||
| 2d3e822d11 | |||
| 811dc1e464 | |||
| c59c6c9bff | |||
| 422bd23f3f | |||
| c0b16b684f | |||
| 78b15561a1 | |||
| 8f9673f509 | |||
| 9c071103f0 | |||
| 1127e670ca | |||
| fa83efc33e | |||
| 4aa71049c3 | |||
| c0b441f6be | |||
| 34fdddd7df | |||
| 3fb9a3a231 | |||
| 065d88729b | |||
| 67e698cf4d | |||
| 46ac6c9bba | |||
| 9b24f56e42 | |||
| f20445d4ac | |||
| 97d2168e59 | |||
| 79016eb163 | |||
| 164193fa7e | |||
| 482a9f9fa4 | |||
| d7de8d1794 | |||
| b443be70fb | |||
| 613ad7089a | |||
| 13e79ccfab | |||
| aba3b8c72f | |||
| 70cdf5fe52 | |||
| b38590a28a | |||
| 5318bc7733 | |||
| ef68b4655c | |||
| ecebfa19c9 | |||
| 5a39359fb2 | |||
| b3d2111708 | |||
| f75c6245ba | |||
| 9c1d5bac15 | |||
| b0b867da85 | |||
| 433d693b70 | |||
| c3aec59b12 | |||
| 9467a62744 | |||
| 86228e321d | |||
| 06b138d845 | |||
| 0867c09318 | |||
| 0e1ee4b92d | |||
| d8a64cb79d | |||
| b703efdcc3 | |||
| 68f54720dc | |||
| 46f1391b79 | |||
| cd7ff5e137 | |||
| f4b411f84b | |||
| 7ba64e632c | |||
| 8b770a7dab | |||
| 3d8b998fbb | |||
| 03365a3d17 | |||
| 7aafa25673 | |||
| f88661b5d9 | |||
| 581fabba48 | |||
| e909eb34e2 | |||
| 7644a02e6b | |||
| 162a82164e | |||
| 0d6a5fa8ee | |||
| 53845d2596 | |||
| 5ec00da2be | |||
| 649e65b542 | |||
| 14d7c3fca6 | |||
| c7d11d7e40 | |||
| ec4f01a099 | |||
| f5c01eeb63 | |||
| 20ff458d80 | |||
| 6719cb6db3 | |||
| 31fd2b1ad6 | |||
| fce61a99ec | |||
| 6ec92cf06b | |||
| 2a4037322f | |||
| f823404f69 | |||
| ef2fe912c5 | |||
| e3e9b87592 | |||
| 456afd92ce | |||
| 0d2280dadc | |||
| 55d4a496dd | |||
| 2a8829d9a5 | |||
| 3969731ce8 | |||
| 411aa58a77 | |||
| 4420ec641d | |||
| 2241725ad6 | |||
| 5cac878984 | |||
| 5d31423308 | |||
| 2721387b98 | |||
| 2cfa88bdf1 | |||
| 102caf4fab | |||
| 07df5d268f | |||
| 68b3dbf666 | |||
| 403c0714d1 | |||
| 848ed800fa | |||
| ad957ce556 | |||
| 3db088f5d6 | |||
| d1abd59114 | |||
| ceb7c699bc | |||
| c5baa055c0 | |||
| 349be97ccb | |||
| b60061dfd2 | |||
| b565a6c58a | |||
| a03c361ffb | |||
| b0528392c8 | |||
| 060678415a | |||
| 6b2d968897 | |||
| ad3a5bc920 | |||
| eafcea07f6 | |||
| eff30e2130 | |||
| 694f2e2c12 | |||
| 9964f90fd7 | |||
| f86876d56d | |||
| 0a37e2042e | |||
| 54d670be41 | |||
| 339854a9a4 | |||
| 5296419df4 | |||
| 6a4857fec2 | |||
| 9569150174 | |||
| 8f871f41f1 | |||
| 47e6c36155 | |||
| 47c144570c | |||
| 6a54d0781b | |||
| 0482548363 | |||
| 0e48b2358d | |||
| 3499cf25aa | |||
| 68d63ee15f | |||
| 151637920d | |||
| 0ba3e9bb50 | |||
| b04d36c75f | |||
| 5fc1b230d3 | |||
| 244122c736 | |||
| d25efa71ce | |||
| 1aeb1e8997 | |||
| 0e51680994 | |||
| 7d430cf8de | |||
| b8ca803f98 | |||
| 1243191ecb | |||
| 2b25b8b3c5 | |||
| ca300c0a04 | |||
| 427ef8bd00 | |||
| 35b0206353 | |||
| fbe00d7897 | |||
| 62af737219 | |||
| cd51581248 | |||
| a5a7c039a0 | |||
| cf745c936d | |||
| 99877f56d6 | |||
| 0f2686c8d3 | |||
| a912b2ee09 | |||
| e9fd72a613 | |||
| 8dedb140ef | |||
| b55855a3d4 | |||
| 2b53a9089c | |||
| 39d255b3d0 | |||
| 99dff1a167 | |||
| a0a16e118a | |||
| 15458c5737 | |||
| fc0a43c3c1 | |||
| 8256a9c2d4 | |||
| 6727ac4394 | |||
| 9674b40580 | |||
| 0b0d9215a9 | |||
| e638b1e21a | |||
| 76de60dbdc | |||
| 217e1a248c | |||
| 5e0eb0d750 | |||
| 183c9dd3ce | |||
| 4f100318f4 | |||
| fa6f43033c | |||
| 820fc4ca7a | |||
| bd72a5f1a8 | |||
| 55088a2cf5 | |||
| c2d8e245e9 | |||
| d8e1285409 | |||
| 5b3f3b99d6 | |||
| 2935057606 | |||
| bb6759d634 | |||
| 55747318a0 | |||
| 217faafe08 | |||
| 5440387529 | |||
| e1fab05ce7 | |||
| c3ec7ff5a9 | |||
| d8535921ad | |||
| eb8c535c17 | |||
| b7686ccb44 | |||
| f3229872bc | |||
| 7843286f2e | |||
| 11e2e99cfc | |||
| 07e745f1c4 | |||
| c7c99a30ea | |||
| 8f45a2eae8 | |||
| 9fd64b7ea9 | |||
| 5be16ad90b | |||
| dab62832de | |||
| caa9f9bcbb | |||
| 943efedb88 | |||
| 50acb0c2ec | |||
| e6d96e5f70 | |||
| 1dfb6e9304 | |||
| 4bef6bc511 | |||
| 73640d0463 | |||
| 7a1159143e | |||
| cbb0b82fa2 | |||
| 5ae6111180 | |||
| 230a5f541b | |||
| 956114ac92 | |||
| 76ee7f211d | |||
| 420743af22 | |||
| 206ab491ed | |||
| 936d2f4f5c | |||
| da98d601b5 |
8
.github/PULL_REQUEST_TEMPLATE.md
vendored
8
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -37,11 +37,11 @@ members/contributors who may be interested in your PR.
|
||||
If you know how to use git blame, that is the easiest way, otherwise, here is a rough guide of **who to tag**.
|
||||
|
||||
- Big modeling: @SunMarc
|
||||
- Fully-Sharded Data Parallism: @pacman100
|
||||
- DeepSpeed: @pacman100
|
||||
- Fully-Sharded Data Parallism: @muellerzr
|
||||
- DeepSpeed: @muellerzr
|
||||
- Command Line Interface: @muellerzr
|
||||
- Documentation: @muellerzr
|
||||
- Core parts of the library: @muellerzr @BenjaminBossan
|
||||
- Maintained examples: @muellerzr or @pacman100
|
||||
- Core parts of the library: @muellerzr @BenjaminBossan @SunMarc
|
||||
- Maintained examples: @muellerzr or @SunMarc
|
||||
|
||||
-->
|
||||
@ -15,13 +15,14 @@ jobs:
|
||||
outputs:
|
||||
version: ${{ steps.step1.outputs.version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v3.1.0
|
||||
- id: step1
|
||||
run: echo "version=$(python setup.py --version)" >> $GITHUB_OUTPUT
|
||||
|
||||
version-cpu:
|
||||
name: "Latest Accelerate CPU [version]"
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
runs-on:
|
||||
group: aws-general-8-plus
|
||||
needs: get-version
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
@ -37,11 +38,12 @@ jobs:
|
||||
with:
|
||||
file: docker/accelerate-cpu/Dockerfile
|
||||
push: true
|
||||
tags: huggingface/accelerate-cpu:${{needs.get-version.outputs.version}}
|
||||
tags: huggingface/accelerate:cpu-release-${{ needs.get-version.outputs.version }}
|
||||
|
||||
version-cuda:
|
||||
name: "Latest Accelerate GPU [version]"
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
runs-on:
|
||||
group: aws-g6-4xlarge-plus
|
||||
needs: get-version
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
@ -57,4 +59,46 @@ jobs:
|
||||
with:
|
||||
file: docker/accelerate-gpu/Dockerfile
|
||||
push: true
|
||||
tags: huggingface/accelerate-gpu:${{needs.get-version.outputs.version}}
|
||||
tags: huggingface/accelerate:gpu-release-${{needs.get-version.outputs.version}}
|
||||
|
||||
version-cuda-deepspeed:
|
||||
name: "Latest Accelerate GPU DeepSpeed [version]"
|
||||
runs-on:
|
||||
group: aws-g6-4xlarge-plus
|
||||
needs: get-version
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Build and Push GPU
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
file: docker/accelerate-gpu-deepspeed/Dockerfile
|
||||
push: true
|
||||
tags: huggingface/accelerate:gpu-deepspeed-release-${{needs.get-version.outputs.version}}
|
||||
|
||||
version-cuda-fp8-transformerengine:
|
||||
name: "Latest Accelerate GPU FP8 TransformerEngine [version]"
|
||||
runs-on:
|
||||
group: aws-g6-4xlarge-plus
|
||||
needs: get-version
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Build and Push GPU
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
file: docker/accelerate-gpu/Dockerfile
|
||||
push: true
|
||||
tags: huggingface/accelerate:gpu-fp8-transformerengine-release-${{needs.get-version.outputs.version}}
|
||||
4
.github/workflows/build_and_run_tests.yml
vendored
4
.github/workflows/build_and_run_tests.yml
vendored
@ -22,7 +22,7 @@ jobs:
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v22.2
|
||||
uses: tj-actions/changed-files@v41
|
||||
|
||||
- name: Was setup changed
|
||||
id: was_changed
|
||||
@ -45,6 +45,6 @@ jobs:
|
||||
uses: ./.github/workflows/run_merge_tests.yml
|
||||
|
||||
run-integration-tests:
|
||||
needs: run-merge-tests
|
||||
needs: build-docker-containers
|
||||
if: always()
|
||||
uses: ./.github/workflows/self_hosted_integration_tests.yml
|
||||
86
.github/workflows/build_docker_images.yml
vendored
86
.github/workflows/build_docker_images.yml
vendored
@ -11,19 +11,10 @@ concurrency:
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
clean-storage:
|
||||
name: "Clean docker image storage"
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
steps:
|
||||
- name: Clean storage
|
||||
run: |
|
||||
docker image prune --all -f --filter "until=48h"
|
||||
docker system prune --all -f --filter "until=48h"
|
||||
|
||||
latest-cpu:
|
||||
name: "Latest Accelerate CPU [dev]"
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
needs: clean-storage
|
||||
runs-on:
|
||||
group: aws-general-8-plus
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
@ -32,17 +23,23 @@ jobs:
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
- name: Get current date
|
||||
id: date
|
||||
run: |
|
||||
echo "date=$(date '+%Y-%m-%d')" >> $GITHUB_ENV
|
||||
- name: Build and Push CPU
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
file: docker/accelerate-cpu/Dockerfile
|
||||
file: docker/accelerate-cpu/Dockerfile
|
||||
push: true
|
||||
tags: huggingface/accelerate-cpu
|
||||
tags: |
|
||||
huggingface/accelerate:cpu-nightly
|
||||
huggingface/accelerate:cpu-nightly-${{ env.date }}
|
||||
|
||||
latest-cuda:
|
||||
name: "Latest Accelerate GPU [dev]"
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
needs: clean-storage
|
||||
runs-on:
|
||||
group: aws-g6-4xlarge-plus
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
@ -51,10 +48,63 @@ jobs:
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Get current date
|
||||
id: date
|
||||
run: |
|
||||
echo "date=$(date '+%Y-%m-%d')" >> $GITHUB_ENV
|
||||
- name: Build and Push GPU
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
file: docker/accelerate-gpu/Dockerfile
|
||||
file: docker/accelerate-gpu/Dockerfile
|
||||
push: true
|
||||
tags: huggingface/accelerate-gpu
|
||||
tags: |
|
||||
huggingface/accelerate:gpu-nightly
|
||||
huggingface/accelerate:gpu-nightly-${{ env.date }}
|
||||
|
||||
latest-cuda-deepspeed:
|
||||
name: "Latest Accelerate GPU DeepSpeed [dev]"
|
||||
runs-on:
|
||||
group: aws-g6-4xlarge-plus
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
- name: Get current date
|
||||
id: date
|
||||
run: |
|
||||
echo "date=$(date '+%Y-%m-%d')" >> $GITHUB_ENV
|
||||
- name: Build and Push GPU
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
file: docker/accelerate-gpu-deepspeed/Dockerfile
|
||||
push: true
|
||||
tags: |
|
||||
huggingface/accelerate:gpu-deepspeed-nightly
|
||||
huggingface/accelerate:gpu-deepspeed-nightly-${{ env.date }}
|
||||
|
||||
latest-cuda-fp8-transformerengine:
|
||||
name: "Latest Accelerate GPU FP8 TransformerEngine [dev]"
|
||||
runs-on:
|
||||
group: aws-g6-4xlarge-plus
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
- name: Get current date
|
||||
id: date
|
||||
run: |
|
||||
echo "date=$(date '+%Y-%m-%d')" >> $GITHUB_ENV
|
||||
- name: Build and Push GPU
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
file: benchmarks/fp8/Dockerfile
|
||||
push: true
|
||||
tags: huggingface/accelerate:gpu-fp8-transformerengine-nightly-${{ env.date }}
|
||||
2
.github/workflows/build_documentation.yml
vendored
2
.github/workflows/build_documentation.yml
vendored
@ -13,6 +13,6 @@ jobs:
|
||||
with:
|
||||
commit_sha: ${{ github.sha }}
|
||||
package: accelerate
|
||||
custom_container: huggingface/transformers-doc-builder
|
||||
secrets:
|
||||
token: ${{ secrets.HUGGINGFACE_PUSH }}
|
||||
hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}
|
||||
|
||||
1
.github/workflows/build_pr_documentation.yml
vendored
1
.github/workflows/build_pr_documentation.yml
vendored
@ -14,3 +14,4 @@ jobs:
|
||||
commit_sha: ${{ github.event.pull_request.head.sha }}
|
||||
pr_number: ${{ github.event.number }}
|
||||
package: accelerate
|
||||
custom_container: huggingface/transformers-doc-builder
|
||||
|
||||
14
.github/workflows/delete_doc_comment.yml
vendored
14
.github/workflows/delete_doc_comment.yml
vendored
@ -1,14 +0,0 @@
|
||||
name: Delete doc comment
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: ["Delete doc comment trigger"]
|
||||
types:
|
||||
- completed
|
||||
|
||||
|
||||
jobs:
|
||||
delete:
|
||||
uses: huggingface/doc-builder/.github/workflows/delete_doc_comment.yml@main
|
||||
secrets:
|
||||
comment_bot_token: ${{ secrets.COMMENT_BOT_TOKEN }}
|
||||
12
.github/workflows/delete_doc_comment_trigger.yml
vendored
12
.github/workflows/delete_doc_comment_trigger.yml
vendored
@ -1,12 +0,0 @@
|
||||
name: Delete doc comment trigger
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [ closed ]
|
||||
|
||||
|
||||
jobs:
|
||||
delete:
|
||||
uses: huggingface/doc-builder/.github/workflows/delete_doc_comment_trigger.yml@main
|
||||
with:
|
||||
pr_number: ${{ github.event.number }}
|
||||
10
.github/workflows/integration_tests.yml
vendored
10
.github/workflows/integration_tests.yml
vendored
@ -25,17 +25,14 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
transformers-version: [
|
||||
pypi,
|
||||
github
|
||||
]
|
||||
steps:
|
||||
- uses: actions/checkout@v3.1.0
|
||||
- name: Set up python 3.8
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: 3.8
|
||||
cache: 'pip'
|
||||
cache-dependency-path: 'setup.py'
|
||||
|
||||
- name: Install Accelerate from source
|
||||
run: |
|
||||
@ -47,9 +44,6 @@ jobs:
|
||||
cd ..
|
||||
git clone https://github.com/huggingface/transformers
|
||||
cd transformers
|
||||
if [[ ${{ matrix.transformers-version }} = pypi ]]; then
|
||||
git checkout $(git describe --tags `git rev-list --tags --max-count=1`)
|
||||
fi
|
||||
pip install .[torch,testing]
|
||||
|
||||
- name: Show installed libraries
|
||||
|
||||
162
.github/workflows/nightly.yml
vendored
162
.github/workflows/nightly.yml
vendored
@ -12,67 +12,138 @@ env:
|
||||
|
||||
|
||||
jobs:
|
||||
run_all_tests_single_gpu:
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
run_core_tests_single_gpu:
|
||||
runs-on:
|
||||
group: aws-g6-4xlarge-plus
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: "0"
|
||||
TEST_TYPE: "single_gpu"
|
||||
container:
|
||||
image: huggingface/accelerate-gpu:latest
|
||||
image: huggingface/accelerate:gpu-nightly
|
||||
options: --gpus all --shm-size "16gb"
|
||||
defaults:
|
||||
run:
|
||||
working-directory: accelerate/
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Update clone & pip install
|
||||
run: |
|
||||
source activate accelerate
|
||||
git config --global --add safe.directory '*'
|
||||
git fetch && git checkout ${{ github.sha }}
|
||||
git clone https://github.com/huggingface/accelerate;
|
||||
cd accelerate;
|
||||
git checkout ${{ github.sha }};
|
||||
pip install -e . --no-deps
|
||||
pip install pytest-reportlog tabulate
|
||||
|
||||
- name: Show installed libraries
|
||||
run: |
|
||||
source activate accelerate;
|
||||
pip freeze
|
||||
|
||||
- name: Run test on GPUs
|
||||
working-directory: accelerate
|
||||
run: |
|
||||
source activate accelerate
|
||||
make test
|
||||
|
||||
|
||||
- name: Run examples on GPUs
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate
|
||||
pip uninstall comet_ml -y
|
||||
make test_examples
|
||||
|
||||
|
||||
- name: Generate Report
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
pip install slack_sdk tabulate
|
||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
run_all_tests_multi_gpu:
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
run_deepspeed_tests_single_gpu:
|
||||
runs-on:
|
||||
group: aws-g6-4xlarge-plus
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: "0"
|
||||
TEST_TYPE: "single_gpu_deepspeed"
|
||||
container:
|
||||
image: huggingface/accelerate:gpu-deepspeed-nightly
|
||||
options: --gpus all --shm-size "16gb"
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Update clone & pip install
|
||||
run: |
|
||||
source activate accelerate
|
||||
git clone https://github.com/huggingface/accelerate;
|
||||
cd accelerate;
|
||||
git checkout ${{ github.sha }};
|
||||
pip install -e . --no-deps
|
||||
pip install pytest-reportlog tabulate
|
||||
|
||||
- name: Show installed libraries
|
||||
run: |
|
||||
source activate accelerate;
|
||||
pip freeze
|
||||
|
||||
- name: Run test on GPUs
|
||||
working-directory: accelerate
|
||||
run: |
|
||||
source activate accelerate
|
||||
make test_deepspeed
|
||||
|
||||
- name: Run Integration tests on GPUs
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate
|
||||
make test_integrations
|
||||
|
||||
- name: Run examples on GPUs
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate
|
||||
pip uninstall comet_ml -y
|
||||
make test_examples
|
||||
|
||||
- name: Generate Report
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
pip install slack_sdk tabulate
|
||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
run_core_tests_multi_gpu:
|
||||
runs-on:
|
||||
group: aws-g6-12xlarge-plus
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: "0,1"
|
||||
TEST_TYPE: "multi_gpu"
|
||||
container:
|
||||
image: huggingface/accelerate-gpu:latest
|
||||
image: huggingface/accelerate:gpu-nightly
|
||||
options: --gpus all --shm-size "16gb"
|
||||
defaults:
|
||||
run:
|
||||
working-directory: accelerate/
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Update clone
|
||||
run: |
|
||||
source activate accelerate
|
||||
git config --global --add safe.directory '*'
|
||||
git fetch && git checkout ${{ github.sha }}
|
||||
git clone https://github.com/huggingface/accelerate;
|
||||
cd accelerate;
|
||||
git checkout ${{ github.sha }};
|
||||
pip install -e . --no-deps
|
||||
pip install pytest-reportlog tabulate
|
||||
|
||||
- name: Show installed libraries
|
||||
run: |
|
||||
source activate accelerate;
|
||||
pip freeze
|
||||
|
||||
- name: Run core and big modeling tests on GPUs
|
||||
working-directory: accelerate
|
||||
run: |
|
||||
source activate accelerate
|
||||
make test_core
|
||||
@ -80,12 +151,14 @@ jobs:
|
||||
make test_cli
|
||||
|
||||
- name: Run Integration tests on GPUs
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate
|
||||
make test_integrations
|
||||
|
||||
- name: Run examples on GPUs
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate
|
||||
@ -93,13 +166,68 @@ jobs:
|
||||
make test_examples
|
||||
|
||||
- name: Generate Report
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
pip install slack_sdk tabulate
|
||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
|
||||
run_deepspeed_tests_multi_gpu:
|
||||
runs-on:
|
||||
group: aws-g6-12xlarge-plus
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: "0,1"
|
||||
TEST_TYPE: "multi_gpu_deepspeed"
|
||||
container:
|
||||
image: huggingface/accelerate:gpu-deepspeed-nightly
|
||||
options: --gpus all --shm-size "16gb"
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Update clone
|
||||
run: |
|
||||
source activate accelerate
|
||||
git clone https://github.com/huggingface/accelerate;
|
||||
cd accelerate;
|
||||
git checkout ${{ github.sha }};
|
||||
pip install -e . --no-deps
|
||||
pip install pytest-reportlog tabulate
|
||||
|
||||
- name: Show installed libraries
|
||||
run: |
|
||||
source activate accelerate;
|
||||
pip freeze
|
||||
|
||||
- name: Run DeepSpeed tests
|
||||
working-directory: accelerate
|
||||
run: |
|
||||
source activate accelerate
|
||||
make test_deepspeed
|
||||
|
||||
- name: Run Integration tests on GPUs
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate
|
||||
make test_integrations
|
||||
|
||||
- name: Run examples on GPUs
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate
|
||||
pip uninstall comet_ml -y
|
||||
make test_examples
|
||||
|
||||
- name: Generate Report
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
pip install slack_sdk tabulate
|
||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
|
||||
run-integration-tests:
|
||||
needs: [run_all_tests_single_gpu, run_all_tests_multi_gpu]
|
||||
if: always()
|
||||
uses: ./.github/workflows/self_hosted_integration_tests.yml
|
||||
uses: ./.github/workflows/self_hosted_integration_tests.yml
|
||||
|
||||
4
.github/workflows/quality.yml
vendored
4
.github/workflows/quality.yml
vendored
@ -6,11 +6,13 @@ jobs:
|
||||
quality:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3.1.0
|
||||
- name: Set up Python 3.8
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: 3.8
|
||||
cache: 'pip'
|
||||
cache-dependency-path: 'setup.py'
|
||||
- name: Install Python dependencies
|
||||
run: pip install -e .[quality]
|
||||
- name: Run Quality check
|
||||
|
||||
161
.github/workflows/run_merge_tests.yml
vendored
161
.github/workflows/run_merge_tests.yml
vendored
@ -9,81 +9,180 @@ env:
|
||||
IS_GITHUB_CI: "1"
|
||||
|
||||
jobs:
|
||||
run_all_tests_single_gpu:
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
run_core_tests_single_gpu:
|
||||
runs-on:
|
||||
group: aws-g6-4xlarge-plus
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: "0"
|
||||
container:
|
||||
image: huggingface/accelerate-gpu:latest
|
||||
image: huggingface/accelerate:gpu-nightly
|
||||
options: --gpus all --shm-size "16gb"
|
||||
defaults:
|
||||
run:
|
||||
working-directory: accelerate/
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Update clone & pip install
|
||||
- name: Install accelerate
|
||||
run: |
|
||||
source activate accelerate
|
||||
git config --global --add safe.directory '*'
|
||||
git fetch && git checkout ${{ github.sha }}
|
||||
pip install -e .[testing,test_trackers] -U
|
||||
pip install pytest-reportlog tabulate
|
||||
source activate accelerate;
|
||||
git clone https://github.com/huggingface/accelerate;
|
||||
cd accelerate;
|
||||
git checkout ${{ github.sha }};
|
||||
pip install -e .[testing,test_trackers] -U;
|
||||
pip install pytest-reportlog tabulate ;
|
||||
|
||||
- name: Run CLI tests
|
||||
- name: Show installed libraries
|
||||
run: |
|
||||
source activate accelerate
|
||||
source activate accelerate;
|
||||
pip freeze
|
||||
|
||||
- name: Run CLI tests (use make cli)
|
||||
working-directory: accelerate
|
||||
run: |
|
||||
source activate accelerate;
|
||||
make test_cli
|
||||
|
||||
|
||||
- name: Run test on GPUs
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate
|
||||
source activate accelerate;
|
||||
make test
|
||||
- name: Run examples on GPUs
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate
|
||||
pip uninstall comet_ml -y
|
||||
source activate accelerate;
|
||||
pip uninstall comet_ml -y;
|
||||
make test_examples
|
||||
|
||||
- name: Generate Report
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
pip install tabulate
|
||||
pip install tabulate;
|
||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
run_all_tests_multi_gpu:
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
run_deepspeed_tests_single_gpu:
|
||||
runs-on:
|
||||
group: aws-g6-4xlarge-plus
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: "0"
|
||||
container:
|
||||
image: huggingface/accelerate-gpu:latest
|
||||
image: huggingface/accelerate:gpu-deepspeed-nightly
|
||||
options: --gpus all --shm-size "16gb"
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Install accelerate
|
||||
run: |
|
||||
source activate accelerate;
|
||||
git clone https://github.com/huggingface/accelerate;
|
||||
cd accelerate;
|
||||
git checkout ${{ github.sha }};
|
||||
pip install -e .[testing,test_trackers] -U;
|
||||
pip install pytest-reportlog tabulate ;
|
||||
|
||||
- name: Show installed libraries
|
||||
run: |
|
||||
source activate accelerate;
|
||||
pip freeze
|
||||
|
||||
- name: Run test on GPUs
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate;
|
||||
make test_deepspeed
|
||||
|
||||
- name: Generate Report
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
pip install tabulate;
|
||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
run_core_tests_multi_gpu:
|
||||
runs-on:
|
||||
group: aws-g6-12xlarge-plus
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: 0,1
|
||||
container:
|
||||
image: huggingface/accelerate:gpu-nightly
|
||||
options: --gpus all --shm-size "16gb"
|
||||
defaults:
|
||||
run:
|
||||
working-directory: accelerate/
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Update clone
|
||||
run: |
|
||||
source activate accelerate
|
||||
git config --global --add safe.directory '*'
|
||||
git fetch && git checkout ${{ github.sha }}
|
||||
pip install -e .[testing,test_trackers] -U
|
||||
source activate accelerate;
|
||||
git clone https://github.com/huggingface/accelerate;
|
||||
cd accelerate;
|
||||
git checkout ${{ github.sha }};
|
||||
pip install -e .[testing,test_trackers] -U;
|
||||
pip install pytest-reportlog tabulate
|
||||
|
||||
- name: Run test on GPUs
|
||||
- name: Show installed libraries
|
||||
run: |
|
||||
source activate accelerate
|
||||
source activate accelerate;
|
||||
pip freeze
|
||||
|
||||
- name: Run test on GPUs
|
||||
working-directory: accelerate
|
||||
run: |
|
||||
source activate accelerate;
|
||||
make test
|
||||
|
||||
- name: Run examples on GPUs
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate
|
||||
pip uninstall comet_ml -y
|
||||
source activate accelerate;
|
||||
pip uninstall comet_ml -y;
|
||||
make test_examples
|
||||
|
||||
- name: Generate Report
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
pip install tabulate
|
||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
source activate accelerate;
|
||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
run_deepspeed_tests_multi_gpu:
|
||||
runs-on:
|
||||
group: aws-g6-12xlarge-plus
|
||||
container:
|
||||
image: huggingface/accelerate:gpu-deepspeed-nightly
|
||||
options: --gpus all --shm-size "16gb"
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Install accelerate
|
||||
run: |
|
||||
source activate accelerate;
|
||||
git clone https://github.com/huggingface/accelerate;
|
||||
cd accelerate;
|
||||
git checkout ${{ github.sha }};
|
||||
pip install -e .[testing,test_trackers] -U;
|
||||
pip install pytest-reportlog tabulate ;
|
||||
|
||||
- name: Show installed libraries
|
||||
run: |
|
||||
source activate accelerate;
|
||||
pip freeze
|
||||
|
||||
- name: Run test on GPUs
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate;
|
||||
make test_deepspeed
|
||||
|
||||
- name: Generate Report
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
pip install tabulate;
|
||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
107
.github/workflows/self_hosted_integration_tests.yml
vendored
107
.github/workflows/self_hosted_integration_tests.yml
vendored
@ -1,7 +1,7 @@
|
||||
# CI for specifically ensuring integrations work fine (`transformers` mainly) on GPUs
|
||||
# Useful tips:
|
||||
# - `working-directory` should be set to the root of the repo, which is cloned on the actual CI runner.
|
||||
# It follows the directory structure of `actions-runner/_work/{repo_name}/{repo_name}/{cloned_repo} on
|
||||
# It follows the directory structure of `actions-runner/_work/{repo_name}/{repo_name}/{cloned_repo} on
|
||||
# prem, but in Actions setting `working-directory` looks just in the `{repo_name}` level.
|
||||
# - New integrations to test should have its own job, and follow a strategy method where we check both
|
||||
# the pypi and github versions.
|
||||
@ -23,40 +23,36 @@ defaults:
|
||||
jobs:
|
||||
run-trainer-tests:
|
||||
container:
|
||||
image: huggingface/accelerate-gpu:latest
|
||||
image: huggingface/accelerate:gpu-deepspeed-nightly
|
||||
options: --gpus all --shm-size "16gb"
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
runs-on:
|
||||
group: aws-g6-12xlarge-plus
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
transformers-version: [
|
||||
pypi,
|
||||
github
|
||||
]
|
||||
cuda_visible_devices: [
|
||||
"0",
|
||||
"0",
|
||||
"0,1"
|
||||
]
|
||||
steps:
|
||||
- name: Update accelerate clone and pip install
|
||||
working-directory: accelerate/
|
||||
run:
|
||||
source activate accelerate;
|
||||
git config --global --add safe.directory '*';
|
||||
git checkout main && git fetch && git checkout ${{ github.sha }};
|
||||
pip install -e .;
|
||||
|
||||
- name: Update transformers clone & pip install
|
||||
working-directory: transformers/
|
||||
- name: Install transformers
|
||||
run: |
|
||||
source activate accelerate
|
||||
git config --global --add safe.directory '*'
|
||||
git checkout main && git pull
|
||||
if [[ ${{ matrix.transformers-version }} = pypi ]]; then
|
||||
git checkout $(git describe --tags `git rev-list --tags --max-count=1`)
|
||||
fi
|
||||
pip install .[torch,deepspeed-testing]
|
||||
|
||||
source activate accelerate;
|
||||
git clone https://github.com/huggingface/transformers --depth 1;
|
||||
cd transformers;
|
||||
pip install .[torch,deepspeed-testing];
|
||||
cd ..;
|
||||
|
||||
- name: Install accelerate
|
||||
run: |
|
||||
source activate accelerate;
|
||||
git clone https://github.com/huggingface/accelerate;
|
||||
cd accelerate;
|
||||
git checkout ${{ github.sha }} ;
|
||||
pip install -e .[testing];
|
||||
pip uninstall comet_ml wandb dvclive -y
|
||||
cd ..;
|
||||
|
||||
- name: Show installed libraries
|
||||
run: |
|
||||
source activate accelerate;
|
||||
@ -81,36 +77,41 @@ jobs:
|
||||
source activate accelerate;
|
||||
pytest -sv tests/deepspeed
|
||||
|
||||
run-skorch-tests:
|
||||
container:
|
||||
image: huggingface/accelerate-gpu:latest
|
||||
options: --gpus all --shm-size "16gb"
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
skorch-version: [
|
||||
pypi,
|
||||
github
|
||||
]
|
||||
steps:
|
||||
- name: Update accelerate clone and pip install
|
||||
working-directory: accelerate/
|
||||
run:
|
||||
source activate accelerate;
|
||||
git config --global --add safe.directory '*';
|
||||
git checkout main && git fetch && git checkout ${{ github.sha }};
|
||||
pip install -e .;
|
||||
|
||||
- name: Update skorch clone & pip install
|
||||
working-directory: skorch/
|
||||
- name: Run transformers examples tests
|
||||
working-directory: transformers/
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: ${{ matrix.cuda_visible_devices }}
|
||||
WANDB_DISABLED: true
|
||||
run: |
|
||||
source activate accelerate
|
||||
pip install -r examples/pytorch/_tests_requirements.txt
|
||||
pytest -sv examples/pytorch/test_accelerate_examples.py examples/pytorch/test_pytorch_examples.py
|
||||
|
||||
run-skorch-tests:
|
||||
container:
|
||||
image: huggingface/accelerate:gpu-nightly
|
||||
options: --gpus all --shm-size "16gb"
|
||||
runs-on:
|
||||
group: aws-g6-12xlarge-plus
|
||||
strategy:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- name: Install accelerate
|
||||
run:
|
||||
source activate accelerate;
|
||||
git clone https://github.com/huggingface/accelerate;
|
||||
cd accelerate;
|
||||
git checkout ${{ github.sha }};
|
||||
pip install -e .[testing];
|
||||
cd ..
|
||||
|
||||
- name: Install skorch
|
||||
run: |
|
||||
source activate accelerate
|
||||
git clone https://github.com/skorch-dev/skorch;
|
||||
cd skorch;
|
||||
git config --global --add safe.directory '*'
|
||||
git checkout master && git pull
|
||||
if [[ ${{ matrix.skorch-version }} = pypi ]]; then
|
||||
git checkout $(git describe --tags `git rev-list --tags --max-count=1`)
|
||||
fi
|
||||
pip install .[testing]
|
||||
pip install flaky
|
||||
|
||||
@ -123,4 +124,4 @@ jobs:
|
||||
working-directory: skorch/
|
||||
run: |
|
||||
source activate accelerate;
|
||||
pytest -sv -k TestAccelerate
|
||||
pytest -sv -k TestAccelerate
|
||||
|
||||
8
.github/workflows/stale.yml
vendored
8
.github/workflows/stale.yml
vendored
@ -13,16 +13,18 @@ jobs:
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3.1.0
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v1
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: 3.8
|
||||
cache: 'pip'
|
||||
cache-dependency-path: 'setup.py'
|
||||
|
||||
- name: Install requirements
|
||||
run: |
|
||||
pip install PyGithub
|
||||
- name: Close stale issues
|
||||
run: |
|
||||
python utils/stale.py
|
||||
python utils/stale.py
|
||||
|
||||
21
.github/workflows/test.yml
vendored
21
.github/workflows/test.yml
vendored
@ -43,23 +43,20 @@ jobs:
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: 3.8
|
||||
|
||||
- name: Activate python cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
${{ env.pythonLocation }}
|
||||
${{ env.HF_HOME }}
|
||||
key: ${{ env.pythonLocation }}-${{ matrix.pytorch-version }}-${{ matrix.test-kind }}-${{ hashFiles('setup.py') }}
|
||||
cache: 'pip'
|
||||
cache-dependency-path: 'setup.py'
|
||||
|
||||
- name: Install the library
|
||||
run: |
|
||||
pip install --upgrade pip
|
||||
if [[ ${{ matrix.test-kind }} = test_prod ]]; then pip install -e .[test_prod]; fi
|
||||
if [[ ${{ matrix.test-kind }} != test_prod ]]; then pip install -e .[testing,test_trackers]; fi
|
||||
if [[ ${{ matrix.test-kind }} = test_rest ]]; then pip uninstall comet_ml -y; fi
|
||||
if [[ ${{ matrix.test-kind }} = minimum ]]; then pip install torch==1.10.0; fi
|
||||
pip install pytest-reportlog tabulate
|
||||
if [[ ${{ matrix.pytorch-version }} = minimum ]]; then pip install torchvision==0.18.1 torch==2.3.1; fi
|
||||
pip install pytest-reportlog tabulate setuptools
|
||||
|
||||
- name: Show installed libraries
|
||||
run: |
|
||||
pip freeze
|
||||
|
||||
- name: Run Tests
|
||||
env:
|
||||
@ -70,4 +67,4 @@ jobs:
|
||||
- name: Generate Report
|
||||
if: always()
|
||||
run: |
|
||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
55
.github/workflows/test_imports.yml
vendored
Normal file
55
.github/workflows/test_imports.yml
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
name: Run Import Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "src/**"
|
||||
- "tests/**"
|
||||
- ".github/**"
|
||||
- "examples/**"
|
||||
- "setup.py"
|
||||
types: [opened, synchronize, reopened]
|
||||
|
||||
env:
|
||||
HF_HOME: ~/hf_cache
|
||||
TESTING_MOCKED_DATALOADERS: "1"
|
||||
IS_GITHUB_CI: "1"
|
||||
|
||||
jobs:
|
||||
run-tests:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
pytorch-version: [
|
||||
latest,
|
||||
minimum,
|
||||
]
|
||||
steps:
|
||||
- uses: actions/checkout@v3.1.0
|
||||
- name: Set up python 3.8
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: 3.8
|
||||
cache: 'pip'
|
||||
cache-dependency-path: 'setup.py'
|
||||
|
||||
- name: Install the library
|
||||
run: |
|
||||
pip install -e .
|
||||
pip install pytest-reportlog tabulate setuptools git+https://github.com/muellerzr/import-timer
|
||||
|
||||
- name: Show installed libraries
|
||||
run: |
|
||||
pip freeze
|
||||
|
||||
- name: Run Import Tests
|
||||
env:
|
||||
PYTORCH_VERSION: ${{ matrix.pytorch-version }}
|
||||
run: |
|
||||
pytest -sv tests/test_imports.py
|
||||
|
||||
- name: Generate Report
|
||||
if: always()
|
||||
run: |
|
||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
15
.github/workflows/trufflehog.yml
vendored
Normal file
15
.github/workflows/trufflehog.yml
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
on:
|
||||
push:
|
||||
|
||||
name: Secret Leaks
|
||||
|
||||
jobs:
|
||||
trufflehog:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Secret Scanning
|
||||
uses: trufflesecurity/trufflehog@main
|
||||
13
.pre-commit-config.yaml
Normal file
13
.pre-commit-config.yaml
Normal file
@ -0,0 +1,13 @@
|
||||
repos:
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.2.1
|
||||
hooks:
|
||||
- id: ruff
|
||||
args:
|
||||
- --fix
|
||||
- id: ruff-format
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.5.0
|
||||
hooks:
|
||||
- id: check-merge-conflict
|
||||
- id: check-yaml
|
||||
@ -123,12 +123,15 @@ Follow these steps to start contributing:
|
||||
4. Set up a development environment by running the following command in a conda or a virtual environment you've created for working on this library:
|
||||
|
||||
```bash
|
||||
$ pip install -e ".[quality]"
|
||||
$ pip install -e ".[dev]"
|
||||
```
|
||||
|
||||
This will install all testing and linting/code quality dependencies for the library (see `quality`, `test_dev`,
|
||||
`test_prod` targets in [`setup.py`](./setup.py)).
|
||||
|
||||
(If accelerate was already installed in the virtual environment, remove
|
||||
it with `pip uninstall accelerate` before reinstalling it in editable
|
||||
mode with the `-e` flag.)
|
||||
mode with the `-e` flag).
|
||||
|
||||
Alternatively, if you are using [Visual Studio Code](https://code.visualstudio.com/Download), the fastest way to get set up is by using
|
||||
the provided Dev Container. Documentation on how to get started with dev containers is available [here](https://code.visualstudio.com/docs/remote/containers).
|
||||
@ -152,7 +155,7 @@ Follow these steps to start contributing:
|
||||
$ make test
|
||||
```
|
||||
|
||||
`accelerate` relies on `black` and `ruff` to format its source code
|
||||
`accelerate` relies on `ruff` to format its source code
|
||||
consistently. After you make changes, apply automatic style corrections and code verifications
|
||||
that can't be automated in one go with:
|
||||
|
||||
@ -172,6 +175,14 @@ Follow these steps to start contributing:
|
||||
$ make quality
|
||||
```
|
||||
|
||||
You can also set up [`pre-commit`](https://pre-commit.com/) to run these checks
|
||||
automatically as Git commit hooks.
|
||||
|
||||
```bash
|
||||
$ pip install pre-commit
|
||||
$ pre-commit install
|
||||
```
|
||||
|
||||
Once you're happy with your changes, add changed files using `git add` and
|
||||
make a commit with `git commit` to record your changes locally:
|
||||
|
||||
@ -235,4 +246,4 @@ $ python -m pytest -sv ./tests
|
||||
In fact, that's how `make test` is implemented (sans the `pip install` line)!
|
||||
|
||||
You can specify a smaller set of tests in order to test only the feature
|
||||
you're working on.
|
||||
you're working on.
|
||||
|
||||
22
Makefile
22
Makefile
@ -1,6 +1,6 @@
|
||||
.PHONY: quality style test docs utils
|
||||
|
||||
check_dirs := tests src examples benchmarks utils
|
||||
check_dirs := .
|
||||
|
||||
# Check that source code meets quality standards
|
||||
|
||||
@ -12,20 +12,17 @@ extra_quality_checks:
|
||||
|
||||
# this target runs checks on all files
|
||||
quality:
|
||||
black --required-version 23 --check $(check_dirs)
|
||||
ruff $(check_dirs)
|
||||
ruff check $(check_dirs)
|
||||
ruff format --check $(check_dirs)
|
||||
doc-builder style src/accelerate docs/source --max_len 119 --check_only
|
||||
|
||||
# Format source code automatically and check is there are any problems left that need manual fixing
|
||||
style:
|
||||
black --required-version 23 $(check_dirs)
|
||||
ruff $(check_dirs) --fix
|
||||
ruff check $(check_dirs) --fix
|
||||
ruff format $(check_dirs)
|
||||
doc-builder style src/accelerate docs/source --max_len 119
|
||||
|
||||
# Run tests for the library
|
||||
test:
|
||||
python -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_all.log",)
|
||||
|
||||
test_big_modeling:
|
||||
python -m pytest -s -v ./tests/test_big_modeling.py ./tests/test_modeling_utils.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_big_modeling.log",)
|
||||
|
||||
@ -42,6 +39,15 @@ test_deepspeed:
|
||||
test_fsdp:
|
||||
python -m pytest -s -v ./tests/fsdp $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_fsdp.log",)
|
||||
|
||||
# Since the new version of pytest will *change* how things are collected, we need `deepspeed` to
|
||||
# run after test_core and test_cli
|
||||
test:
|
||||
$(MAKE) test_core
|
||||
$(MAKE) test_cli
|
||||
$(MAKE) test_big_modeling
|
||||
$(MAKE) test_deepspeed
|
||||
$(MAKE) test_fsdp
|
||||
|
||||
test_examples:
|
||||
python -m pytest -s -v ./tests/test_examples.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_examples.log",)
|
||||
|
||||
|
||||
37
README.md
37
README.md
@ -22,22 +22,12 @@ limitations under the License.
|
||||
|
||||
<p align="center">
|
||||
<!-- Uncomment when CircleCI is set up
|
||||
<a href="https://circleci.com/gh/huggingface/accelerate">
|
||||
<img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/master">
|
||||
</a>
|
||||
<a href="https://circleci.com/gh/huggingface/accelerate"><img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/master"></a>
|
||||
-->
|
||||
<a href="https://github.com/huggingface/accelerate/blob/main/LICENSE">
|
||||
<img alt="License" src="https://img.shields.io/github/license/huggingface/accelerate.svg?color=blue">
|
||||
</a>
|
||||
<a href="https://huggingface.co/docs/accelerate/index.html">
|
||||
<img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/accelerate/index.html.svg?down_color=red&down_message=offline&up_message=online">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/accelerate/releases">
|
||||
<img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/accelerate.svg">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/accelerate/blob/main/CODE_OF_CONDUCT.md">
|
||||
<img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/accelerate/blob/main/LICENSE"><img alt="License" src="https://img.shields.io/github/license/huggingface/accelerate.svg?color=blue"></a>
|
||||
<a href="https://huggingface.co/docs/accelerate/index.html"><img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/accelerate/index.html.svg?down_color=red&down_message=offline&up_message=online"></a>
|
||||
<a href="https://github.com/huggingface/accelerate/releases"><img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/accelerate.svg"></a>
|
||||
<a href="https://github.com/huggingface/accelerate/blob/main/CODE_OF_CONDUCT.md"><img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg"></a>
|
||||
</p>
|
||||
|
||||
<h3 align="center">
|
||||
@ -167,11 +157,21 @@ accelerate launch --multi_gpu --num_processes 2 examples/nlp_example.py
|
||||
|
||||
To learn more, check the CLI documentation available [here](https://huggingface.co/docs/accelerate/package_reference/cli).
|
||||
|
||||
Or view the configuration zoo [here](https://github.com/huggingface/accelerate/blob/main/examples/config_yaml_templates/)
|
||||
|
||||
## Launching multi-CPU run using MPI
|
||||
|
||||
🤗 Here is another way to launch multi-CPU run using MPI. You can learn how to install Open MPI on [this page](https://www.open-mpi.org/faq/?category=building#easy-build). You can use Intel MPI or MVAPICH as well.
|
||||
Once you have MPI setup on your cluster, just run:
|
||||
|
||||
```bash
|
||||
accelerate config
|
||||
```
|
||||
Answer the questions that are asked, selecting to run using multi-CPU, and answer "yes" when asked if you want accelerate to launch mpirun.
|
||||
Then, use `accelerate launch` with your script like:
|
||||
```bash
|
||||
accelerate launch examples/nlp_example.py
|
||||
```
|
||||
Alternatively, you can use mpirun directly, without using the CLI like:
|
||||
```bash
|
||||
mpirun -np 2 python examples/nlp_example.py
|
||||
```
|
||||
@ -220,6 +220,7 @@ You shouldn't use 🤗 Accelerate if you don't want to write a training loop you
|
||||
|
||||
If you like the simplicity of 🤗 Accelerate but would prefer a higher-level abstraction around its capabilities, some frameworks and libraries that are built on top of 🤗 Accelerate are listed below:
|
||||
|
||||
* [Amphion](https://github.com/open-mmlab/Amphion) is a toolkit for Audio, Music, and Speech Generation. Its purpose is to support reproducible research and help junior researchers and engineers get started in the field of audio, music, and speech generation research and development.
|
||||
* [Animus](https://github.com/Scitator/animus) is a minimalistic framework to run machine learning experiments. Animus highlights common "breakpoints" in ML experiments and provides a unified interface for them within [IExperiment](https://github.com/Scitator/animus/blob/main/animus/core.py#L76).
|
||||
* [Catalyst](https://github.com/catalyst-team/catalyst#getting-started) is a PyTorch framework for Deep Learning Research and Development. It focuses on reproducibility, rapid experimentation, and codebase reuse so you can create something new rather than write yet another train loop. Catalyst provides a [Runner](https://catalyst-team.github.io/catalyst/api/core.html#runner) to connect all parts of the experiment: hardware backend, data transformations, model training, and inference logic.
|
||||
* [fastai](https://github.com/fastai/fastai#installing) is a PyTorch framework for Deep Learning that simplifies training fast and accurate neural nets using modern best practices. fastai provides a [Learner](https://docs.fast.ai/learner.html#Learner) to handle the training, fine-tuning, and inference of deep learning algorithms.
|
||||
@ -257,7 +258,7 @@ pip install accelerate
|
||||
- multi-GPU on several nodes (machines)
|
||||
- TPU
|
||||
- FP16/BFloat16 mixed precision
|
||||
- FP8 mixed precision with [Transformer Engine](https://github.com/NVIDIA/TransformerEngine)
|
||||
- FP8 mixed precision with [Transformer Engine](https://github.com/NVIDIA/TransformerEngine) or [MS-AMP](https://github.com/Azure/MS-AMP/)
|
||||
- DeepSpeed support (Experimental)
|
||||
- PyTorch Fully Sharded Data Parallel (FSDP) support (Experimental)
|
||||
- Megatron-LM support (Experimental)
|
||||
@ -269,7 +270,7 @@ If you use 🤗 Accelerate in your publication, please cite it by using the foll
|
||||
```bibtex
|
||||
@Misc{accelerate,
|
||||
title = {Accelerate: Training and inference at scale made simple, efficient and adaptable.},
|
||||
author = {Sylvain Gugger, Lysandre Debut, Thomas Wolf, Philipp Schmid, Zachary Mueller, Sourab Mangrulkar, Marc Sun, Benjamin Bossan},
|
||||
author = {Sylvain Gugger and Lysandre Debut and Thomas Wolf and Philipp Schmid and Zachary Mueller and Sourab Mangrulkar and Marc Sun and Benjamin Bossan},
|
||||
howpublished = {\url{https://github.com/huggingface/accelerate}},
|
||||
year = {2022}
|
||||
}
|
||||
|
||||
@ -1,46 +1,5 @@
|
||||
# Big model inference benchmarks
|
||||
# Benchmarks
|
||||
|
||||
Running inference with Accelerate on big models.
|
||||
The folders below contain suites to test various functionalities in Accelerate.
|
||||
|
||||
## Setup
|
||||
|
||||
These benchmarks use the `transformers` library:
|
||||
|
||||
```bash
|
||||
pip install transformers
|
||||
```
|
||||
|
||||
To reproduce or test a new setup, run
|
||||
|
||||
```py
|
||||
python inference_acc.py model_name
|
||||
```
|
||||
|
||||
This script supports `gpt-j-6b`, `gpt-neox`, `opt` (30B version) and `T0pp` out of the box, but you can specify any valid checkpoint for `model_name`.
|
||||
|
||||
To force a different `torch_dtype` than the one in the config: `--torch_dtype xxx`.
|
||||
|
||||
If you get an error linked to disk offload, you need to add the option `--disk-offload`
|
||||
|
||||
## Results
|
||||
|
||||
On a setup with two Titan RTXs (24GB of RAM) and 32GB of RAM, we get the following benchmarks (T0pp does not run in float16, which is why it's not included).
|
||||
|
||||
| Model | Model load time | Generation time | dtype | GPU 0 use | GPU 1 use | CPU use | Disk offload |
|
||||
|:-----:|:---------------:|:---------------:|:-----:|:---------:|:---------:|:-------:|:------------:|
|
||||
| GPT-J-6B | 8.7s | 0.05s per token | float16 | 11.7GB | 0GB | 0GB | no |
|
||||
| GPT-J-6B | 12.4s | 0.06s per token | float32 | 21.9GB | 1.5GB | 0GB | no |
|
||||
| GPT-Neo-X-20B | 30.9s | 0.08s per token | float16 | 21.5GB | 18GB | 0GB | no |
|
||||
| GPT-Neo-X-20B | 78.2s | 10.72s per token | float32 | 20.3GB | 22.7 GB | 24.4GB | yes |
|
||||
| T0pp (11B) | 29.4s | 0.05s per token | float32 | 21.1GB | 21.3GB | 0GB | no |
|
||||
| OPT-30B | 34.5s | 2.37s per token | float16 | 20.7GB | 22.3GB | 14.1GB | no |
|
||||
| OPT-30B | 112.3s | 33.9s per token | float32 | 20.2GB | 21.2GB | 23.5GB | yes |
|
||||
|
||||
Note on the results:
|
||||
- using two GPUs instead of one does not slow down generation
|
||||
- using CPU offload slows down a bit (see OPT-30b)
|
||||
- using disk offload slows down a lot (need to implement prefetching)
|
||||
|
||||
You will also note that Accelerate does not use anymore GPU and CPU RAM than necessary:
|
||||
- peak GPU memory is exactly the size of the model put on a given GPU
|
||||
- peak CPU memory is either the size of the biggest checkpoint shard or the part of the model offloaded on CPU, whichever is bigger.
|
||||
See their relevant README.md's for more information.
|
||||
|
||||
46
benchmarks/big_model_inference/README.md
Normal file
46
benchmarks/big_model_inference/README.md
Normal file
@ -0,0 +1,46 @@
|
||||
# Big model inference benchmarks
|
||||
|
||||
Running inference with Accelerate on big models.
|
||||
|
||||
## Setup
|
||||
|
||||
These benchmarks use the `transformers` library:
|
||||
|
||||
```bash
|
||||
pip install transformers
|
||||
```
|
||||
|
||||
To reproduce or test a new setup, run
|
||||
|
||||
```py
|
||||
python inference_acc.py model_name
|
||||
```
|
||||
|
||||
This script supports `gpt-j-6b`, `gpt-neox`, `opt` (30B version) and `T0pp` out of the box, but you can specify any valid checkpoint for `model_name`.
|
||||
|
||||
To force a different `torch_dtype` than the one in the config: `--torch_dtype xxx`.
|
||||
|
||||
If you get an error linked to disk offload, you need to add the option `--disk-offload`
|
||||
|
||||
## Results
|
||||
|
||||
On a setup with two Titan RTXs (24GB of RAM) and 32GB of RAM, we get the following benchmarks (T0pp does not run in float16, which is why it's not included).
|
||||
|
||||
| Model | Model load time | Generation time | dtype | GPU 0 use | GPU 1 use | CPU use | Disk offload |
|
||||
|:-----:|:---------------:|:---------------:|:-----:|:---------:|:---------:|:-------:|:------------:|
|
||||
| GPT-J-6B | 8.7s | 0.05s per token | float16 | 11.7GB | 0GB | 0GB | no |
|
||||
| GPT-J-6B | 12.4s | 0.06s per token | float32 | 21.9GB | 1.5GB | 0GB | no |
|
||||
| GPT-Neo-X-20B | 30.9s | 0.08s per token | float16 | 21.5GB | 18GB | 0GB | no |
|
||||
| GPT-Neo-X-20B | 78.2s | 10.72s per token | float32 | 20.3GB | 22.7 GB | 24.4GB | yes |
|
||||
| T0pp (11B) | 29.4s | 0.05s per token | float32 | 21.1GB | 21.3GB | 0GB | no |
|
||||
| OPT-30B | 34.5s | 2.37s per token | float16 | 20.7GB | 22.3GB | 14.1GB | no |
|
||||
| OPT-30B | 112.3s | 33.9s per token | float32 | 20.2GB | 21.2GB | 23.5GB | yes |
|
||||
|
||||
Note on the results:
|
||||
- using two GPUs instead of one does not slow down generation
|
||||
- using CPU offload slows down a bit (see OPT-30b)
|
||||
- using disk offload slows down a lot (need to implement prefetching)
|
||||
|
||||
You will also note that Accelerate does not use anymore GPU and CPU RAM than necessary:
|
||||
- peak GPU memory is exactly the size of the model put on a given GPU
|
||||
- peak CPU memory is either the size of the biggest checkpoint shard or the part of the model offloaded on CPU, whichever is bigger.
|
||||
@ -1,3 +1,16 @@
|
||||
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import gc
|
||||
import threading
|
||||
import time
|
||||
12
benchmarks/fp8/Dockerfile
Normal file
12
benchmarks/fp8/Dockerfile
Normal file
@ -0,0 +1,12 @@
|
||||
FROM nvcr.io/nvidia/pytorch:24.07-py3
|
||||
|
||||
RUN pip install transformers evaluate datasets
|
||||
RUN git clone https://github.com/huggingface/accelerate.git
|
||||
|
||||
RUN cd accelerate && \
|
||||
pip install -e . && \
|
||||
cd benchmarks/fp8
|
||||
|
||||
RUN /bin/bash
|
||||
|
||||
|
||||
32
benchmarks/fp8/README.md
Normal file
32
benchmarks/fp8/README.md
Normal file
@ -0,0 +1,32 @@
|
||||
# FP8 Benchmarks
|
||||
|
||||
Comparing and running [TransformerEngine](https://github.com/NVIDIA/TransformerEngine) FP8 with accelerate
|
||||
|
||||
## Overview
|
||||
|
||||
This repo provides scripts which compare native TransformerEngine model training against `accelerate`'s own integration. Each modeling type is segmented out via a script, supporting the following:
|
||||
|
||||
* Single GPU training (`non_distributed.py`)
|
||||
* Multi-GPU training via DistributedDataParallelism (`ddp.py`)
|
||||
* Fully Sharded Data Parallelism (`fsdp.py`)
|
||||
* DeepSpeed ZeRO 1-3 (`deepspeed.py`)
|
||||
|
||||
To run them, it's recommended to use a docker image (see the attached `Dockerfile`) and not install `TransformerEngine` manually.
|
||||
|
||||
## Running:
|
||||
|
||||
There are official Docker images located at `huggingface/accelerate:gpu-fp8-transformerengine-nightly` which can be used.
|
||||
|
||||
You can run all scripts using the core `accelerate launch` command without any `accelerate config` being needed.
|
||||
|
||||
For single GPU, run it via `python`:
|
||||
|
||||
```bash
|
||||
python non_distributed.py
|
||||
```
|
||||
|
||||
For the rest, run it via `accelerate launch`:
|
||||
|
||||
```bash
|
||||
accelerate launch ddp.py # or distrib_deepspeed.py, ddp.py
|
||||
```
|
||||
143
benchmarks/fp8/ddp.py
Normal file
143
benchmarks/fp8/ddp.py
Normal file
@ -0,0 +1,143 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
This script tests to ensure that `accelerate` performs at the same level as raw `TransformersEngine`.
|
||||
|
||||
This particular script verifies this for DDP training.
|
||||
"""
|
||||
import evaluate
|
||||
import torch
|
||||
import transformer_engine.common.recipe as te_recipe
|
||||
import transformer_engine.pytorch as te
|
||||
from fp8_utils import evaluate_model, get_named_parameters, get_training_utilities
|
||||
from torch.nn.parallel import DistributedDataParallel as DDP
|
||||
from transformer_engine.common.recipe import DelayedScaling
|
||||
|
||||
from accelerate import Accelerator
|
||||
from accelerate.state import AcceleratorState
|
||||
from accelerate.utils import FP8RecipeKwargs, set_seed
|
||||
from accelerate.utils.transformer_engine import convert_model
|
||||
|
||||
|
||||
MODEL_NAME = "bert-base-cased"
|
||||
METRIC = evaluate.load("glue", "mrpc")
|
||||
|
||||
|
||||
def train_baseline():
|
||||
set_seed(42)
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(MODEL_NAME)
|
||||
accelerator = Accelerator()
|
||||
device = accelerator.device
|
||||
model.to(device)
|
||||
|
||||
# Convert the model to TE
|
||||
old_named_params = get_named_parameters(model)
|
||||
|
||||
with torch.no_grad():
|
||||
convert_model(model)
|
||||
|
||||
FP8_RECIPE_KWARGS = {"fp8_format": te_recipe.Format.HYBRID, "amax_history_len": 32, "amax_compute_algo": "max"}
|
||||
fp8_recipe = DelayedScaling(**FP8_RECIPE_KWARGS)
|
||||
|
||||
new_named_params = get_named_parameters(model)
|
||||
|
||||
# Convert the model to DDP
|
||||
device_ids, output_device = [accelerator.local_process_index], accelerator.local_process_index
|
||||
model = DDP(model, device_ids=device_ids, output_device=output_device)
|
||||
|
||||
mapping = {p: new_named_params[n] for n, p in old_named_params.items()}
|
||||
for param_group in optimizer.param_groups:
|
||||
param_group["params"] = [mapping[p] for p in param_group["params"]]
|
||||
|
||||
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
model.train()
|
||||
|
||||
for _ in range(2):
|
||||
for batch in train_dataloader:
|
||||
with te.fp8_autocast(enabled=True, fp8_recipe=fp8_recipe):
|
||||
with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
|
||||
batch = batch.to(device)
|
||||
outputs = model(**batch)
|
||||
loss = outputs.loss
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
lr_scheduler.step()
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
|
||||
def train_integration():
|
||||
FP8_RECIPE_KWARGS = {"fp8_format": "HYBRID", "amax_history_len": 32, "amax_compute_algo": "max"}
|
||||
kwargs_handlers = [FP8RecipeKwargs(backend="TE", **FP8_RECIPE_KWARGS)]
|
||||
AcceleratorState()._reset_state(True)
|
||||
accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=kwargs_handlers)
|
||||
set_seed(42)
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
|
||||
MODEL_NAME, accelerator=accelerator
|
||||
)
|
||||
|
||||
model, optimizer = accelerator.prepare(model, optimizer)
|
||||
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
model.train()
|
||||
|
||||
for _ in range(2):
|
||||
for batch in train_dataloader:
|
||||
outputs = model(**batch)
|
||||
loss = outputs.loss
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
lr_scheduler.step()
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
baseline_not_trained, baseline_trained = train_baseline()
|
||||
accelerator_not_trained, accelerator_trained = train_integration()
|
||||
|
||||
assert (
|
||||
baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"]
|
||||
), f'Accuracy should be the same for the baseline and accelerator: {baseline_not_trained["accuracy"]} == {accelerator_not_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_not_trained["f1"] == accelerator_not_trained["f1"]
|
||||
), f'F1 score should be the same for the baseline and accelerator: {baseline_not_trained["f1"]} == {accelerator_not_trained["f1"]}'
|
||||
assert (
|
||||
baseline_trained["accuracy"] == accelerator_trained["accuracy"]
|
||||
), f'Accuracy should be the same for the baseline and accelerator: {baseline_trained["accuracy"]} == {accelerator_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_trained["f1"] == accelerator_trained["f1"]
|
||||
), f'F1 score should be the same for the baseline and accelerator: {baseline_trained["f1"]} == {accelerator_trained["f1"]}'
|
||||
|
||||
torch.distributed.destroy_process_group()
|
||||
189
benchmarks/fp8/distrib_deepspeed.py
Normal file
189
benchmarks/fp8/distrib_deepspeed.py
Normal file
@ -0,0 +1,189 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
This script tests to ensure that `accelerate` performs at the same level as raw `TransformersEngine`.
|
||||
|
||||
This particular script verifies this for DDP training.
|
||||
"""
|
||||
from unittest.mock import patch
|
||||
|
||||
import deepspeed
|
||||
import evaluate
|
||||
import torch
|
||||
import transformer_engine.common.recipe as te_recipe
|
||||
import transformer_engine.pytorch as te
|
||||
from fp8_utils import evaluate_model, get_named_parameters, get_training_utilities
|
||||
from transformer_engine.common.recipe import DelayedScaling
|
||||
|
||||
from accelerate import Accelerator, DeepSpeedPlugin
|
||||
from accelerate.state import AcceleratorState
|
||||
from accelerate.utils import FP8RecipeKwargs, set_seed
|
||||
from accelerate.utils.transformer_engine import convert_model
|
||||
|
||||
|
||||
MODEL_NAME = "bert-base-cased"
|
||||
METRIC = evaluate.load("glue", "mrpc")
|
||||
|
||||
|
||||
def train_baseline(zero_stage: int = 1):
|
||||
# This forces transformers to think Zero-3 Init should be used
|
||||
with patch("transformers.integrations.deepspeed.is_deepspeed_zero3_enabled") as mock:
|
||||
mock.return_value = zero_stage == 3
|
||||
set_seed(42)
|
||||
|
||||
accelerator = Accelerator()
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
|
||||
MODEL_NAME, accelerator=accelerator
|
||||
)
|
||||
|
||||
# Convert the model to TE
|
||||
old_named_params = get_named_parameters(model)
|
||||
|
||||
with torch.no_grad():
|
||||
convert_model(model)
|
||||
new_named_params = get_named_parameters(model)
|
||||
|
||||
mapping = {p: new_named_params[n] for n, p in old_named_params.items()}
|
||||
for param_group in optimizer.param_groups:
|
||||
param_group["params"] = [mapping[p] for p in param_group["params"]]
|
||||
|
||||
FP8_RECIPE_KWARGS = {"fp8_format": te_recipe.Format.HYBRID, "amax_history_len": 32, "amax_compute_algo": "max"}
|
||||
fp8_recipe = DelayedScaling(**FP8_RECIPE_KWARGS)
|
||||
|
||||
import numpy as np
|
||||
|
||||
config = {
|
||||
"train_batch_size": 32,
|
||||
"train_micro_batch_size_per_gpu": 16,
|
||||
"gradient_accumulation_steps": 1,
|
||||
"zero_optimization": {
|
||||
"stage": zero_stage,
|
||||
"offload_optimizer": {"device": "none", "nvme_path": None},
|
||||
"offload_param": {"device": "none", "nvme_path": None},
|
||||
"stage3_gather_16bit_weights_on_model_save": False,
|
||||
},
|
||||
"gradient_clipping": 1.0,
|
||||
"steps_per_print": np.inf,
|
||||
"bf16": {"enabled": True},
|
||||
"fp16": {"enabled": False},
|
||||
"zero_allow_untested_optimizer": True,
|
||||
}
|
||||
|
||||
(
|
||||
model,
|
||||
optimizer,
|
||||
_,
|
||||
_,
|
||||
) = deepspeed.initialize(
|
||||
model=model,
|
||||
optimizer=optimizer,
|
||||
config_params=config,
|
||||
)
|
||||
|
||||
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
model.train()
|
||||
|
||||
model_outputs = []
|
||||
data = []
|
||||
|
||||
for _ in range(2):
|
||||
for batch in train_dataloader:
|
||||
with te.fp8_autocast(enabled=True, fp8_recipe=fp8_recipe):
|
||||
outputs = model(**batch)
|
||||
data.append(batch.to("cpu"))
|
||||
model_outputs.append(outputs.logits.to("cpu"))
|
||||
loss = outputs.loss
|
||||
model.backward(loss)
|
||||
model.step()
|
||||
for _ in range(accelerator.num_processes):
|
||||
lr_scheduler.step()
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
model.destroy()
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
|
||||
return base_model_results, trained_model_results, model_outputs, data
|
||||
|
||||
|
||||
def train_integration(zero_stage: int = 1):
|
||||
set_seed(42)
|
||||
FP8_RECIPE_KWARGS = {"fp8_format": "HYBRID", "amax_history_len": 32, "amax_compute_algo": "max"}
|
||||
kwargs_handlers = [FP8RecipeKwargs(backend="TE", **FP8_RECIPE_KWARGS)]
|
||||
AcceleratorState()._reset_state(True)
|
||||
deepspeed_plugin = DeepSpeedPlugin(
|
||||
zero_stage=zero_stage,
|
||||
zero3_init_flag=zero_stage == 3,
|
||||
)
|
||||
accelerator = Accelerator(
|
||||
mixed_precision="fp8", kwargs_handlers=kwargs_handlers, deepspeed_plugin=deepspeed_plugin
|
||||
)
|
||||
accelerator.state.deepspeed_plugin.deepspeed_config["train_micro_batch_size_per_gpu"] = 16
|
||||
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
|
||||
MODEL_NAME, accelerator=accelerator
|
||||
)
|
||||
|
||||
model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler)
|
||||
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
model.train()
|
||||
model_outputs = []
|
||||
data = []
|
||||
for _ in range(2):
|
||||
for batch in train_dataloader:
|
||||
outputs = model(**batch)
|
||||
data.append(batch.to("cpu"))
|
||||
model_outputs.append(outputs.logits.to("cpu"))
|
||||
loss = outputs.loss
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
lr_scheduler.step()
|
||||
optimizer.zero_grad()
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
model.destroy()
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
|
||||
return base_model_results, trained_model_results, model_outputs, data
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# for zero_stage in [1, 2, 3]:
|
||||
zero_stage = 1
|
||||
baseline_not_trained, baseline_trained, baseline_outputs, baseline_data = train_baseline(zero_stage)
|
||||
accelerator_not_trained, accelerator_trained, accelerator_outputs, accelerator_data = train_integration(zero_stage)
|
||||
assert (
|
||||
baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"]
|
||||
), f'ZERO stage {zero_stage}: Accuracy should be the same for the baseline and accelerator: {baseline_not_trained["accuracy"]} == {accelerator_not_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_not_trained["f1"] == accelerator_not_trained["f1"]
|
||||
), f'ZERO stage {zero_stage}: F1 score should be the same for the baseline and accelerator: {baseline_not_trained["f1"]} == {accelerator_not_trained["f1"]}'
|
||||
assert (
|
||||
baseline_trained["accuracy"] == accelerator_trained["accuracy"]
|
||||
), f'ZERO stage {zero_stage}: Accuracy should be the same for the baseline and accelerator: {baseline_trained["accuracy"]} == {accelerator_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_trained["f1"] == accelerator_trained["f1"]
|
||||
), f'ZERO stage {zero_stage}: F1 score should be the same for the baseline and accelerator: {baseline_trained["f1"]} == {accelerator_trained["f1"]}'
|
||||
|
||||
torch.distributed.destroy_process_group()
|
||||
116
benchmarks/fp8/fp8_utils.py
Normal file
116
benchmarks/fp8/fp8_utils.py
Normal file
@ -0,0 +1,116 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import torch
|
||||
|
||||
|
||||
def get_dataloaders(model_name: str, batch_size: int = 16):
|
||||
from datasets import load_dataset
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
||||
datasets = load_dataset("glue", "mrpc")
|
||||
|
||||
def tokenize_function(examples):
|
||||
# max_length=None => use the model max length (it's actually the default)
|
||||
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
|
||||
return outputs
|
||||
|
||||
# Apply the method we just defined to all the examples in all the splits of the dataset
|
||||
# starting with the main process first:
|
||||
tokenized_datasets = datasets.map(
|
||||
tokenize_function,
|
||||
batched=True,
|
||||
remove_columns=["idx", "sentence1", "sentence2"],
|
||||
)
|
||||
|
||||
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
|
||||
# transformers library
|
||||
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
|
||||
|
||||
def collate_fn(examples):
|
||||
return tokenizer.pad(
|
||||
examples,
|
||||
padding="longest",
|
||||
pad_to_multiple_of=16, # Specific for FP8
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
# Instantiate dataloaders.
|
||||
train_dataloader = DataLoader(
|
||||
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size, drop_last=True
|
||||
)
|
||||
eval_dataloader = DataLoader(
|
||||
tokenized_datasets["validation"],
|
||||
shuffle=False,
|
||||
collate_fn=collate_fn,
|
||||
batch_size=16,
|
||||
drop_last=True,
|
||||
)
|
||||
|
||||
return train_dataloader, eval_dataloader
|
||||
|
||||
|
||||
def get_training_utilities(model_name: str, batch_size: int = 16, accelerator=None):
|
||||
"""
|
||||
Returns a tuple of:
|
||||
- Model
|
||||
- Optimizer
|
||||
- Train dataloader (prepared)
|
||||
- Eval dataloader (prepared)
|
||||
- LR Scheduler
|
||||
Suitable for training on the MRPC dataset
|
||||
"""
|
||||
from torch.optim import AdamW
|
||||
from transformers import AutoModelForSequenceClassification, get_linear_schedule_with_warmup
|
||||
|
||||
from accelerate import Accelerator
|
||||
|
||||
if accelerator is None:
|
||||
accelerator = Accelerator()
|
||||
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
||||
train_dataloader, eval_dataloader = get_dataloaders(model_name, batch_size)
|
||||
optimizer = AdamW(model.parameters(), lr=0.0001)
|
||||
lr_scheduler = get_linear_schedule_with_warmup(
|
||||
optimizer=optimizer,
|
||||
num_warmup_steps=100,
|
||||
num_training_steps=len(train_dataloader) * 2,
|
||||
)
|
||||
train_dataloader, eval_dataloader = accelerator.prepare(train_dataloader, eval_dataloader)
|
||||
return model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
||||
|
||||
|
||||
def get_named_parameters(model):
|
||||
"""
|
||||
Same thing as `Accelerator.get_named_parameters` Returns a list of the named parameters of the model (extracted
|
||||
from parallel)
|
||||
"""
|
||||
from accelerate.utils import extract_model_from_parallel
|
||||
|
||||
model = extract_model_from_parallel(model)
|
||||
return {n: p for n, p in model.named_parameters()}
|
||||
|
||||
|
||||
def evaluate_model(model, dataloader, metric, accelerator=None):
|
||||
"Turns model to .eval(), runs dataloader, calculates metric, then turns eval back on"
|
||||
model.eval()
|
||||
for step, batch in enumerate(dataloader):
|
||||
with torch.no_grad():
|
||||
outputs = model(**batch)
|
||||
predictions = outputs.logits.argmax(dim=-1)
|
||||
references = batch["labels"]
|
||||
if accelerator is not None and accelerator.num_processes > 1:
|
||||
predictions, references = accelerator.gather_for_metrics((predictions, references))
|
||||
metric.add_batch(predictions=predictions, references=references)
|
||||
return metric.compute()
|
||||
160
benchmarks/fp8/fsdp.py
Normal file
160
benchmarks/fp8/fsdp.py
Normal file
@ -0,0 +1,160 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
This script tests to ensure that `accelerate` performs at the same level as raw `TransformersEngine`.
|
||||
|
||||
This particular script verifies this for FSDP training.
|
||||
"""
|
||||
from functools import partial
|
||||
|
||||
import evaluate
|
||||
import torch
|
||||
import transformer_engine.common.recipe as te_recipe
|
||||
import transformer_engine.pytorch as te
|
||||
from fp8_utils import evaluate_model, get_named_parameters, get_training_utilities
|
||||
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
|
||||
from torch.distributed.fsdp import MixedPrecision
|
||||
from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy
|
||||
from transformer_engine.common.recipe import DelayedScaling
|
||||
from transformers.models.bert import BertLayer
|
||||
|
||||
from accelerate import Accelerator
|
||||
from accelerate import FullyShardedDataParallelPlugin as FSDPPlugin
|
||||
from accelerate.state import AcceleratorState
|
||||
from accelerate.utils import FP8RecipeKwargs, set_seed
|
||||
from accelerate.utils.transformer_engine import convert_model
|
||||
|
||||
|
||||
MODEL_NAME = "bert-base-cased"
|
||||
METRIC = evaluate.load("glue", "mrpc")
|
||||
|
||||
FSDP_WRAP_POLICY = partial(transformer_auto_wrap_policy, transformer_layer_cls={BertLayer})
|
||||
|
||||
|
||||
def train_baseline():
|
||||
set_seed(42)
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(MODEL_NAME)
|
||||
accelerator = Accelerator()
|
||||
device = accelerator.device
|
||||
model.to(device)
|
||||
|
||||
# Convert the model to TE
|
||||
old_named_params = get_named_parameters(model)
|
||||
|
||||
with torch.no_grad():
|
||||
convert_model(model)
|
||||
|
||||
FP8_RECIPE_KWARGS = {"fp8_format": te_recipe.Format.HYBRID, "amax_history_len": 32, "amax_compute_algo": "max"}
|
||||
fp8_recipe = DelayedScaling(**FP8_RECIPE_KWARGS)
|
||||
|
||||
new_named_params = get_named_parameters(model)
|
||||
|
||||
# Convert the model to FSDP
|
||||
model = FSDP(
|
||||
model,
|
||||
use_orig_params=True,
|
||||
mixed_precision=MixedPrecision(param_dtype=torch.bfloat16, reduce_dtype=torch.float32),
|
||||
auto_wrap_policy=FSDP_WRAP_POLICY,
|
||||
)
|
||||
|
||||
mapping = {p: new_named_params[n] for n, p in old_named_params.items()}
|
||||
for param_group in optimizer.param_groups:
|
||||
param_group["params"] = [mapping[p] for p in param_group["params"]]
|
||||
|
||||
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
model.train()
|
||||
|
||||
for _ in range(2):
|
||||
for batch in train_dataloader:
|
||||
with te.fp8_autocast(enabled=True, fp8_recipe=fp8_recipe):
|
||||
with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
|
||||
batch = batch.to(device)
|
||||
outputs = model(**batch)
|
||||
loss = outputs.loss
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
lr_scheduler.step()
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
|
||||
def train_integration():
|
||||
FP8_RECIPE_KWARGS = {"fp8_format": "HYBRID", "amax_history_len": 32, "amax_compute_algo": "max"}
|
||||
kwargs_handlers = [FP8RecipeKwargs(backend="TE", **FP8_RECIPE_KWARGS)]
|
||||
AcceleratorState()._reset_state(True)
|
||||
fsdp_plugin = FSDPPlugin(
|
||||
auto_wrap_policy=FSDP_WRAP_POLICY,
|
||||
use_orig_params=True,
|
||||
mixed_precision_policy=MixedPrecision(param_dtype=torch.bfloat16, reduce_dtype=torch.float32),
|
||||
)
|
||||
accelerator = Accelerator(mixed_precision="fp8", fsdp_plugin=fsdp_plugin, kwargs_handlers=kwargs_handlers)
|
||||
set_seed(42)
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
|
||||
MODEL_NAME, accelerator=accelerator
|
||||
)
|
||||
|
||||
model, optimizer = accelerator.prepare(model, optimizer)
|
||||
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
model.train()
|
||||
|
||||
for _ in range(2):
|
||||
for batch in train_dataloader:
|
||||
outputs = model(**batch)
|
||||
loss = outputs.loss
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
lr_scheduler.step()
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
baseline_not_trained, baseline_trained = train_baseline()
|
||||
accelerator_not_trained, accelerator_trained = train_integration()
|
||||
|
||||
assert (
|
||||
baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"]
|
||||
), f'Accuracy should be the same for the baseline and accelerator: {baseline_not_trained["accuracy"]} == {accelerator_not_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_not_trained["f1"] == accelerator_not_trained["f1"]
|
||||
), f'F1 score should be the same for the baseline and accelerator: {baseline_not_trained["f1"]} == {accelerator_not_trained["f1"]}'
|
||||
assert (
|
||||
baseline_trained["accuracy"] == accelerator_trained["accuracy"]
|
||||
), f'Accuracy should be the same for the baseline and accelerator: {baseline_trained["accuracy"]} == {accelerator_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_trained["f1"] == accelerator_trained["f1"]
|
||||
), f'F1 score should be the same for the baseline and accelerator: {baseline_trained["f1"]} == {accelerator_trained["f1"]}'
|
||||
|
||||
torch.distributed.destroy_process_group()
|
||||
131
benchmarks/fp8/non_distributed.py
Normal file
131
benchmarks/fp8/non_distributed.py
Normal file
@ -0,0 +1,131 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
This script tests to ensure that `accelerate` performs at the same level as raw `TransformersEngine`.
|
||||
|
||||
This particular script verifies this for single GPU training.
|
||||
"""
|
||||
import evaluate
|
||||
import torch
|
||||
import transformer_engine.common.recipe as te_recipe
|
||||
import transformer_engine.pytorch as te
|
||||
from fp8_utils import evaluate_model, get_named_parameters, get_training_utilities
|
||||
from transformer_engine.common.recipe import DelayedScaling
|
||||
|
||||
from accelerate import Accelerator
|
||||
from accelerate.state import AcceleratorState
|
||||
from accelerate.utils import FP8RecipeKwargs, set_seed
|
||||
from accelerate.utils.transformer_engine import convert_model
|
||||
|
||||
|
||||
MODEL_NAME = "bert-base-cased"
|
||||
METRIC = evaluate.load("glue", "mrpc")
|
||||
|
||||
|
||||
def train_baseline():
|
||||
set_seed(42)
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(MODEL_NAME)
|
||||
|
||||
# Convert the model to TE
|
||||
old_named_params = get_named_parameters(model)
|
||||
|
||||
with torch.no_grad():
|
||||
convert_model(model)
|
||||
|
||||
new_named_params = get_named_parameters(model)
|
||||
mapping = {p: new_named_params[n] for n, p in old_named_params.items()}
|
||||
for param_group in optimizer.param_groups:
|
||||
param_group["params"] = [mapping[p] for p in param_group["params"]]
|
||||
|
||||
FP8_RECIPE_KWARGS = {"fp8_format": te_recipe.Format.HYBRID, "amax_history_len": 32, "amax_compute_algo": "max"}
|
||||
fp8_recipe = DelayedScaling(**FP8_RECIPE_KWARGS)
|
||||
|
||||
model.to("cuda")
|
||||
base_model_results = evaluate_model(model, eval_dataloader, METRIC)
|
||||
model.train()
|
||||
|
||||
for batch in train_dataloader:
|
||||
with te.fp8_autocast(enabled=True, fp8_recipe=fp8_recipe):
|
||||
with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
|
||||
batch = batch.to("cuda")
|
||||
outputs = model(**batch)
|
||||
loss = outputs.loss
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
lr_scheduler.step()
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC)
|
||||
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
|
||||
def train_integration():
|
||||
FP8_RECIPE_KWARGS = {"fp8_format": "HYBRID", "amax_history_len": 32, "amax_compute_algo": "max"}
|
||||
kwargs_handlers = [FP8RecipeKwargs(backend="TE", **FP8_RECIPE_KWARGS)]
|
||||
AcceleratorState()._reset_state(True)
|
||||
accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=kwargs_handlers)
|
||||
set_seed(42)
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
|
||||
MODEL_NAME, accelerator=accelerator
|
||||
)
|
||||
|
||||
model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler)
|
||||
base_model_results = evaluate_model(model, eval_dataloader, METRIC)
|
||||
model.train()
|
||||
|
||||
for batch in train_dataloader:
|
||||
outputs = model(**batch)
|
||||
loss = outputs.loss
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
lr_scheduler.step()
|
||||
|
||||
trained_model_results = evaluate_model(model, eval_dataloader, METRIC)
|
||||
|
||||
assert (
|
||||
trained_model_results["accuracy"] > base_model_results["accuracy"]
|
||||
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
|
||||
assert (
|
||||
trained_model_results["f1"] > base_model_results["f1"]
|
||||
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
|
||||
|
||||
return base_model_results, trained_model_results
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
baseline_not_trained, baseline_trained = train_baseline()
|
||||
accelerator_not_trained, accelerator_trained = train_integration()
|
||||
|
||||
assert (
|
||||
baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"]
|
||||
), f'Accuracy should be the same for the baseline and accelerator: {baseline_not_trained["accuracy"]} == {accelerator_not_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_not_trained["f1"] == accelerator_not_trained["f1"]
|
||||
), f'F1 score should be the same for the baseline and accelerator: {baseline_not_trained["f1"]} == {accelerator_not_trained["f1"]}'
|
||||
assert (
|
||||
baseline_trained["accuracy"] == accelerator_trained["accuracy"]
|
||||
), f'Accuracy should be the same for the baseline and accelerator: {baseline_trained["accuracy"]} == {accelerator_trained["accuracy"]}'
|
||||
assert (
|
||||
baseline_trained["f1"] == accelerator_trained["f1"]
|
||||
), f'F1 score should be the same for the baseline and accelerator: {baseline_trained["f1"]} == {accelerator_trained["f1"]}'
|
||||
74
docker/README.md
Normal file
74
docker/README.md
Normal file
@ -0,0 +1,74 @@
|
||||
<!---
|
||||
Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
|
||||
# Official Hugging Face Accelerate Docker Images
|
||||
|
||||
Accelerate publishes a variety of docker versions as part of our CI that users can also use. These are stable images that Accelerate can run off of which comes with a variety of different setup configurations, all of which are officially hosted on [Docker Hub](https://hub.docker.com/r/huggingface/accelerate).
|
||||
|
||||
A breakdown of each are given below
|
||||
|
||||
## Naming Conventions
|
||||
|
||||
Accelerate docker images follow a tagging convention of:
|
||||
|
||||
```bash
|
||||
huggingface/accelerate:{accelerator}-{nightly,release}
|
||||
```
|
||||
|
||||
`accelerator` in this instance is one of many applical pre-configured backend supports:
|
||||
* `gpu`: Comes compiled off of the `nvidia/cuda` image and includes core parts like `bitsandbytes`. Runs off python 3.9.
|
||||
* `cpu`: Comes compiled off of `python:3.9-slim` and is designed for non-CUDA based workloads.
|
||||
* More to come soon
|
||||
* `gpu-deepspeed`: Comes compiled off of the `nvidia/cuda` image and includes core parts like `bitsandbytes` as well as the latest `deepspeed` version. Runs off python 3.10.
|
||||
* `gpu-fp8-transformerengine`: Comes compiled off of `nvcr.io/nvidia/pytorch` and is specifically for running the `benchmarks/fp8` scripts on devices which support FP8 operations using the `TransformerEngine` library (RTX 4090, H100, etc)
|
||||
|
||||
## Nightlies vs Releases
|
||||
|
||||
Each release a new build is pushed with a version number included in the name. For a GPU-supported image of version 0.28.0 for instance, it would look like the following:
|
||||
|
||||
```bash
|
||||
huggingface/accelerate:gpu-release-0.28.0
|
||||
```
|
||||
|
||||
Nightlies contain two different image tags. There is a general `nightly` tag which is built each night, and a `nightly-YYYY-MM-DD` which corresponds to a build from a particular date.
|
||||
|
||||
For instance, here is an example nightly CPU image from 3/14/2024
|
||||
|
||||
```bash
|
||||
huggingface/accelerate:cpu-nightly-2024-03-14
|
||||
```
|
||||
|
||||
## Running the images
|
||||
|
||||
Each image comes compiled with `conda` and an `accelerate` environment contains all of the installed dependencies.
|
||||
|
||||
To pull down the latest nightly run:
|
||||
|
||||
```bash
|
||||
docker pull huggingface/accelerate:gpu-nightly
|
||||
```
|
||||
|
||||
To then run it in interactive mode with GPU-memory available, run:
|
||||
|
||||
```bash
|
||||
docker container run --gpus all -it huggingface/accelerate:gpu-nightly
|
||||
```
|
||||
|
||||
## DEPRECATED IMAGES
|
||||
|
||||
CPU and GPU docker images were hosted at `huggingface/accelerate-gpu` and `huggingface/accelerate-cpu`. These builds are now outdated and will not receive updates.
|
||||
|
||||
The builds at the corresponding `huggingface/accelerate:{gpu,cpu}` contain the same `Dockerfile`, so it's as simple as changing the docker image to the desired ones from above. We will not be deleting these images for posterity, but they will not be receiving updates going forward.
|
||||
46
docker/accelerate-gpu-deepspeed/Dockerfile
Normal file
46
docker/accelerate-gpu-deepspeed/Dockerfile
Normal file
@ -0,0 +1,46 @@
|
||||
# Builds GPU docker image of PyTorch specifically
|
||||
# Uses multi-staged approach to reduce size
|
||||
# Stage 1
|
||||
# Use base conda image to reduce time
|
||||
FROM continuumio/miniconda3:latest AS compile-image
|
||||
# Specify py version
|
||||
# Note: DeepSpeed beyond v0.12.6 requires py 3.10
|
||||
ENV PYTHON_VERSION=3.10
|
||||
# Install apt libs
|
||||
RUN apt-get update && \
|
||||
apt-get install -y curl git wget && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists*
|
||||
|
||||
# Create our conda env
|
||||
RUN conda create --name accelerate python=${PYTHON_VERSION} ipython jupyter pip
|
||||
# We don't install pytorch here yet since CUDA isn't available
|
||||
# instead we use the direct torch wheel
|
||||
ENV PATH /opt/conda/envs/accelerate/bin:$PATH
|
||||
# Activate our bash shell
|
||||
RUN chsh -s /bin/bash
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
# Activate the conda env, install mpy4pi, and install torch + accelerate
|
||||
RUN source activate accelerate && conda install -c conda-forge mpi4py
|
||||
RUN source activate accelerate && \
|
||||
python3 -m pip install --no-cache-dir \
|
||||
git+https://github.com/huggingface/accelerate#egg=accelerate[testing,test_trackers,deepspeed] \
|
||||
--extra-index-url https://download.pytorch.org/whl/cu117
|
||||
|
||||
RUN python3 -m pip install --no-cache-dir bitsandbytes
|
||||
|
||||
# Stage 2
|
||||
FROM nvidia/cuda:12.1.0-cudnn8-devel-ubuntu20.04 AS build-image
|
||||
COPY --from=compile-image /opt/conda /opt/conda
|
||||
ENV PATH /opt/conda/bin:$PATH
|
||||
|
||||
# Install apt libs
|
||||
RUN apt-get update && \
|
||||
apt-get install -y curl git wget && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists*
|
||||
|
||||
RUN echo "source activate accelerate" >> ~/.profile
|
||||
|
||||
# Activate the virtualenv
|
||||
CMD ["/bin/bash"]
|
||||
@ -1,10 +1,10 @@
|
||||
# Builds GPU docker image of PyTorch
|
||||
# Builds GPU docker image of PyTorch specifically
|
||||
# Uses multi-staged approach to reduce size
|
||||
# Stage 1
|
||||
# Use base conda image to reduce time
|
||||
FROM continuumio/miniconda3:latest AS compile-image
|
||||
# Specify py version
|
||||
ENV PYTHON_VERSION=3.8
|
||||
ENV PYTHON_VERSION=3.9
|
||||
# Install apt libs
|
||||
RUN apt-get update && \
|
||||
apt-get install -y curl git wget && \
|
||||
@ -19,7 +19,8 @@ ENV PATH /opt/conda/envs/accelerate/bin:$PATH
|
||||
# Activate our bash shell
|
||||
RUN chsh -s /bin/bash
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
# Activate the conda env and install torch + accelerate
|
||||
# Activate the conda env, install mpy4pi, and install torch + accelerate
|
||||
RUN source activate accelerate && conda install -c conda-forge mpi4py
|
||||
RUN source activate accelerate && \
|
||||
python3 -m pip install --no-cache-dir \
|
||||
git+https://github.com/huggingface/accelerate#egg=accelerate[testing,test_trackers] \
|
||||
@ -28,7 +29,7 @@ RUN source activate accelerate && \
|
||||
RUN python3 -m pip install --no-cache-dir bitsandbytes
|
||||
|
||||
# Stage 2
|
||||
FROM nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04 AS build-image
|
||||
FROM nvidia/cuda:12.1.0-cudnn8-devel-ubuntu20.04 AS build-image
|
||||
COPY --from=compile-image /opt/conda /opt/conda
|
||||
ENV PATH /opt/conda/bin:$PATH
|
||||
|
||||
|
||||
@ -10,51 +10,70 @@
|
||||
- local: basic_tutorials/overview
|
||||
title: Overview
|
||||
- local: basic_tutorials/migration
|
||||
title: Migrating to 🤗 Accelerate
|
||||
title: Add Accelerate to your code
|
||||
- local: basic_tutorials/execution
|
||||
title: Execution process
|
||||
- local: basic_tutorials/tpu
|
||||
title: TPU training
|
||||
- local: basic_tutorials/launch
|
||||
title: Launching distributed code
|
||||
- local: basic_tutorials/notebook
|
||||
title: Launching distributed training from Jupyter Notebooks
|
||||
title: Tutorials
|
||||
- sections:
|
||||
- local: usage_guides/explore
|
||||
title: Start Here!
|
||||
- local: usage_guides/training_zoo
|
||||
title: Example Zoo
|
||||
- local: usage_guides/big_modeling
|
||||
title: How to perform inference on large models with small resources
|
||||
- local: usage_guides/model_size_estimator
|
||||
title: Knowing how big of a model you can fit into memory
|
||||
- local: usage_guides/quantization
|
||||
title: How to quantize model
|
||||
- local: usage_guides/distributed_inference
|
||||
title: How to perform distributed inference with normal resources
|
||||
- local: usage_guides/gradient_accumulation
|
||||
title: Performing gradient accumulation
|
||||
- local: usage_guides/local_sgd
|
||||
title: Accelerating training with local SGD
|
||||
- local: usage_guides/checkpoint
|
||||
title: Saving and loading training states
|
||||
- local: usage_guides/tracking
|
||||
title: Using experiment trackers
|
||||
- local: usage_guides/debug
|
||||
title: Debugging timeout errors
|
||||
- local: usage_guides/memory
|
||||
title: How to avoid CUDA Out-of-Memory
|
||||
- local: usage_guides/mps
|
||||
title: How to use Apple Silicon M1 GPUs
|
||||
- local: usage_guides/deepspeed
|
||||
title: How to use DeepSpeed
|
||||
- local: usage_guides/fsdp
|
||||
title: How to use Fully Sharded Data Parallelism
|
||||
- local: usage_guides/megatron_lm
|
||||
title: How to use Megatron-LM
|
||||
- local: usage_guides/sagemaker
|
||||
title: How to use 🤗 Accelerate with SageMaker
|
||||
- local: usage_guides/ipex
|
||||
title: How to use 🤗 Accelerate with Intel® Extension for PyTorch for cpu
|
||||
title: How-To Guides
|
||||
- isExpanded: true
|
||||
sections:
|
||||
- local: usage_guides/explore
|
||||
title: Start Here!
|
||||
- local: usage_guides/model_size_estimator
|
||||
title: Model memory estimator
|
||||
- local: usage_guides/quantization
|
||||
title: Model quantization
|
||||
- local: usage_guides/tracking
|
||||
title: Experiment trackers
|
||||
- local: usage_guides/profiler
|
||||
title: Profiler
|
||||
- local: usage_guides/checkpoint
|
||||
title: Save and load training states
|
||||
- local: basic_tutorials/troubleshooting
|
||||
title: Troubleshoot
|
||||
- local: usage_guides/training_zoo
|
||||
title: Example Zoo
|
||||
title: Accelerate
|
||||
- isExpanded: true
|
||||
sections:
|
||||
- local: usage_guides/gradient_accumulation
|
||||
title: Gradient accumulation
|
||||
- local: usage_guides/local_sgd
|
||||
title: Local SGD
|
||||
- local: usage_guides/low_precision_training
|
||||
title: Low precision (FP8) training
|
||||
- local: usage_guides/deepspeed
|
||||
title: DeepSpeed
|
||||
- local: usage_guides/ddp_comm_hook
|
||||
title: DDP Communication Hooks
|
||||
- local: usage_guides/fsdp
|
||||
title: Fully Sharded Data Parallelism
|
||||
- local: usage_guides/megatron_lm
|
||||
title: Megatron-LM
|
||||
- local: usage_guides/sagemaker
|
||||
title: Amazon SageMaker
|
||||
- local: usage_guides/mps
|
||||
title: Apple M1 GPUs
|
||||
- local: usage_guides/ipex
|
||||
title: IPEX training with CPU
|
||||
title: Training
|
||||
- isExpanded: true
|
||||
sections:
|
||||
- local: usage_guides/big_modeling
|
||||
title: Big Model Inference
|
||||
- local: usage_guides/distributed_inference
|
||||
title: Distributed inference
|
||||
title: Inference
|
||||
title: How to guides
|
||||
- sections:
|
||||
- local: concept_guides/internal_mechanism
|
||||
title: 🤗 Accelerate's internal mechanism
|
||||
- local: concept_guides/big_model_inference
|
||||
title: Loading big models into memory
|
||||
- local: concept_guides/performance
|
||||
@ -63,12 +82,16 @@
|
||||
title: Executing and deferring jobs
|
||||
- local: concept_guides/gradient_synchronization
|
||||
title: Gradient synchronization
|
||||
- local: concept_guides/fsdp_and_deepspeed
|
||||
title: FSDP vs DeepSpeed
|
||||
- local: concept_guides/low_precision_training
|
||||
title: How training in low-precision environments is possible (FP8)
|
||||
- local: concept_guides/training_tpu
|
||||
title: TPU best practices
|
||||
title: Concepts and fundamentals
|
||||
- sections:
|
||||
- local: package_reference/accelerator
|
||||
title: Main Accelerator class
|
||||
title: Accelerator
|
||||
- local: package_reference/state
|
||||
title: Stateful configuration classes
|
||||
- local: package_reference/cli
|
||||
@ -85,8 +108,12 @@
|
||||
title: Logging
|
||||
- local: package_reference/big_modeling
|
||||
title: Working with large models
|
||||
- local: package_reference/inference
|
||||
title: Distributed inference with big models
|
||||
- local: package_reference/kwargs
|
||||
title: Kwargs handlers
|
||||
- local: package_reference/fp8
|
||||
title: FP8 Functionality
|
||||
- local: package_reference/utilities
|
||||
title: Utility functions and classes
|
||||
- local: package_reference/megatron_lm
|
||||
|
||||
128
docs/source/basic_tutorials/execution.md
Normal file
128
docs/source/basic_tutorials/execution.md
Normal file
@ -0,0 +1,128 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Execution process
|
||||
|
||||
When working with distributed training systems, it is important to manage how and when processes are executed across GPUs. Some processes are completed faster than others, and some processes shouldn't begin if others haven't finished yet. Accelerate provides tools for orchestrating when processes are executed to ensure everything remains synchronized across all devices.
|
||||
|
||||
This tutorial will teach you how to execute a process on only one machine and how to delay execution until all processes have reached a certain point.
|
||||
|
||||
## Execute on one process
|
||||
|
||||
Certain code only needs to be run once on a given machine, such as printing a log statement or only displaying one progress bar on the local main process.
|
||||
|
||||
<hfoptions id="local-execution">
|
||||
<hfoption id="statements">
|
||||
|
||||
You should use `accelerator.is_local_main_process` to indicate code that should only be executed once.
|
||||
|
||||
```py
|
||||
from tqdm.auto import tqdm
|
||||
|
||||
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
|
||||
```
|
||||
|
||||
You could also wrap a statement with `accelerator.is_local_main_process`.
|
||||
|
||||
> [!TIP]
|
||||
> For standalone `print` statements that aren't wrapped in `accelerator.is_local_main_process`, replace `print` with Accelerate's [`~Accelerator.print`] method to only print once per process.
|
||||
|
||||
```py
|
||||
if accelerator.is_local_main_process:
|
||||
print("Accelerate is the best")
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="function">
|
||||
|
||||
For a function that should only be executed once, use [`~Accelerator.on_local_main_process`].
|
||||
|
||||
```py
|
||||
@accelerator.on_local_main_process
|
||||
def do_my_thing():
|
||||
"Something done once per server"
|
||||
do_thing_once_per_server()
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
You could also direct Accelerate to execute code once across *all processes* regardless of the number of machines. This is useful if you're uploading a final model to the Hub.
|
||||
|
||||
<hfoptions id="main-execution">
|
||||
<hfoption id="statement">
|
||||
|
||||
You should use `accelerator.is_main_process` to indicate code that should only be executed once across all processes.
|
||||
|
||||
```py
|
||||
if accelerator.is_main_process:
|
||||
repo.push_to_hub()
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="function">
|
||||
|
||||
For a function that should only be executed once across all processes, use [`~Accelerator.on_main_process`].
|
||||
|
||||
```py
|
||||
@accelerator.on_main_process
|
||||
def do_my_thing():
|
||||
"Something done once per server"
|
||||
do_thing_once()
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
## Execute on a specific process
|
||||
|
||||
Accelerate can also help you execute functions that should only be executed on a specific process or a local process index.
|
||||
|
||||
<hfoptions id="specific-execution">
|
||||
<hfoption id="specific process">
|
||||
|
||||
Use the [`~Accelerator.on_process`] method and specify the process index to execute a function on.
|
||||
|
||||
```py
|
||||
@accelerator.on_process(process_index=0)
|
||||
def do_my_thing():
|
||||
"Something done on process index 0"
|
||||
do_thing_on_index_zero()
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="local process">
|
||||
|
||||
Use the [`~Accelerator.on_local_process`] method and specify the local process index to execute a function on.
|
||||
|
||||
```py
|
||||
@accelerator.on_local_process(local_process_idx=0)
|
||||
def do_my_thing():
|
||||
"Something done on process index 0 on each server"
|
||||
do_thing_on_index_zero_on_each_server()
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
## Defer execution
|
||||
|
||||
When you run your script on several GPUs at the same time, some code may be executed faster than others. You might need to wait for all processes to reach a certain point before executing the next set of instructions. For instance, you shouldn’t save a model before making sure every process is done with training.
|
||||
|
||||
To do this, add [`~Accelerator.wait_for_everyone`] in your code. This blocks all processes that have finished first from continuing until all remaining processes have reached the same point (this has no effect if you're running on a single GPU or CPU).
|
||||
|
||||
```py
|
||||
accelerator.wait_for_everyone()
|
||||
```
|
||||
@ -153,6 +153,15 @@ the below example enabling unbuffered stdout and stderr:
|
||||
python -u -m accelerate.commands.launch --num_processes=2 {script_name.py} {--arg1} {--arg2}
|
||||
```
|
||||
|
||||
<Tip>
|
||||
|
||||
You can run your code on CPU as well! This is helpful for debugging and testing purposes on toy models and datasets.
|
||||
|
||||
```bash
|
||||
accelerate launch --cpu {script_name.py} {--arg1} {--arg2}
|
||||
```
|
||||
|
||||
</Tip>
|
||||
|
||||
## Why you should always use `accelerate config`
|
||||
|
||||
@ -200,3 +209,24 @@ Launching a script from the location of that custom yaml file looks like the fol
|
||||
```bash
|
||||
accelerate launch --config_file {path/to/config/my_config_file.yaml} {script_name.py} {--arg1} {--arg2} ...
|
||||
```
|
||||
|
||||
## Multi-node training
|
||||
Multi-node training with 🤗Accelerate is similar to [multi-node training with torchrun](https://pytorch.org/tutorials/intermediate/ddp_series_multinode.html). The simplest way to launch a multi-node training run is to do the following:
|
||||
|
||||
- Copy your codebase and data to all nodes. (or place them on a shared filesystem)
|
||||
- Setup your python packages on all nodes.
|
||||
- Run `accelerate config` on the main single node first. After specifying the number of nodes, you will be asked to specify the rank of each node (this will be 0 for the main/master node), along with the IP address and port for the main process. This is required for the worker nodes to communicate with the main process. Afterwards, you can copy or send this config file across all of your nodes, changing the `machine_rank` to 1, 2,3, etc. to avoid having to run the command (or just follow their directions directly for launching with `torchrun` as well)
|
||||
|
||||
Once you have done this, you can start your multi-node training run by running `accelerate launch` (or `torchrun`) on all nodes.
|
||||
|
||||
<Tip>
|
||||
It is required that the command be ran on all nodes for everything to start, not just running it from the main node. You can use something like SLURM or a different process executor to wrap around this requirement and call everything from a single command.
|
||||
</Tip>
|
||||
|
||||
<Tip>
|
||||
|
||||
It is recommended to use the intranet IP of your main node over the public IP for better latency. This is the `192.168.x.x` or the `172.x.x.x` address you see when you run `hostname -I` on the main node.
|
||||
|
||||
</Tip>
|
||||
|
||||
To get a better idea about multi-node training, check out our example for [multi-node training with FSDP](https://huggingface.co/blog/ram-efficient-pytorch-fsdp).
|
||||
|
||||
@ -13,21 +13,11 @@ specific language governing permissions and limitations under the License.
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Migrating your code to 🤗 Accelerate
|
||||
# Add Accelerate to your code
|
||||
|
||||
This tutorial will detail how to easily convert existing PyTorch code to use 🤗 Accelerate!
|
||||
You'll see that by just changing a few lines of code, 🤗 Accelerate can perform its magic and get you on
|
||||
your way toward running your code on distributed systems with ease!
|
||||
Each distributed training framework has their own way of doing things which can require writing a lot of custom code to adapt it to your PyTorch training code and training environment. Accelerate offers a friendly way to interface with these distributed training frameworks without having to learn the specific details of each one. Accelerate takes care of those details for you, so you can focus on the training code and scale it to any distributed training environment.
|
||||
|
||||
## The base training loop
|
||||
|
||||
To begin, write out a very basic PyTorch training loop.
|
||||
|
||||
<Tip>
|
||||
|
||||
We are under the presumption that `training_dataloader`, `model`, `optimizer`, `scheduler`, and `loss_function` have been defined beforehand.
|
||||
|
||||
</Tip>
|
||||
In this tutorial, you'll learn how to adapt your existing PyTorch code with Accelerate and get you on your way toward training on distributed systems with ease! You'll start with a basic PyTorch training loop (it assumes all the training objects like `model` and `optimizer` have been setup already) and progressively integrate Accelerate into it.
|
||||
|
||||
```python
|
||||
device = "cuda"
|
||||
@ -45,50 +35,44 @@ for batch in training_dataloader:
|
||||
scheduler.step()
|
||||
```
|
||||
|
||||
## Add in 🤗 Accelerate
|
||||
## Accelerator
|
||||
|
||||
The [`Accelerator`] is the main class for adapting your code to work with Accelerate. It knows about the distributed setup you're using such as the number of different processes and your hardware type. This class also provides access to many of the necessary methods for enabling your PyTorch code to work in any distributed training environment and for managing and executing processes across devices.
|
||||
|
||||
That's why you should always start by importing and creating an [`Accelerator`] instance in your script.
|
||||
|
||||
To start using 🤗 Accelerate, first import and create an [`Accelerator`] instance:
|
||||
```python
|
||||
from accelerate import Accelerator
|
||||
|
||||
accelerator = Accelerator()
|
||||
```
|
||||
[`Accelerator`] is the main force behind utilizing all the possible options for distributed training!
|
||||
|
||||
### Setting the right device
|
||||
|
||||
The [`Accelerator`] class knows the right device to move any PyTorch object to at any time, so you should
|
||||
change the definition of `device` to come from [`Accelerator`]:
|
||||
The [`Accelerator`] also knows which device to move your PyTorch objects to, so it is recommended to let Accelerate handle this for you.
|
||||
|
||||
```diff
|
||||
- device = 'cuda'
|
||||
- device = "cuda"
|
||||
+ device = accelerator.device
|
||||
model.to(device)
|
||||
```
|
||||
|
||||
### Preparing your objects
|
||||
## Prepare PyTorch objects
|
||||
|
||||
Next, you need to pass all of the important objects related to training into [`~Accelerator.prepare`]. 🤗 Accelerate will
|
||||
make sure everything is setup in the current environment for you to start training:
|
||||
Next, you need to prepare your PyTorch objects (model, optimizer, scheduler, etc.) for distributed training. The [`~Accelerator.prepare`] method takes care of placing your model in the appropriate container (like single GPU or multi-GPU) for your training setup, adapting the optimizer and scheduler to use Accelerate's [`~optimizer.AcceleratedOptimizer`] and [`~scheduler.AcceleratedScheduler`], and creating a new dataloader that can be sharded across processes.
|
||||
|
||||
```
|
||||
> [!TIP]
|
||||
> Accelerate only prepares objects that inherit from their respective PyTorch classes such as `torch.optim.Optimizer`.
|
||||
|
||||
The PyTorch objects are returned in the same order they're sent.
|
||||
|
||||
```py
|
||||
model, optimizer, training_dataloader, scheduler = accelerator.prepare(
|
||||
model, optimizer, training_dataloader, scheduler
|
||||
)
|
||||
```
|
||||
These objects are returned in the same order they were sent in. By default when using `device_placement=True`, all of the objects that can be sent to the right device will be.
|
||||
If you need to work with data that isn't passed to [~Accelerator.prepare] but should be on the active device, you should pass in the `device` you made earlier.
|
||||
|
||||
<Tip warning={true}>
|
||||
## Training loop
|
||||
|
||||
Accelerate will only prepare objects that inherit from their respective PyTorch classes (such as `torch.optim.Optimizer`).
|
||||
|
||||
</Tip>
|
||||
|
||||
### Modifying the training loop
|
||||
|
||||
Finally, three lines of code need to be changed in the training loop. 🤗 Accelerate's DataLoader classes will automatically handle the device placement by default,
|
||||
and [`~Accelerator.backward`] should be used for performing the backward pass:
|
||||
Finally, remove the `to(device)` calls to the inputs and targets in the training loop because Accelerate's DataLoader classes automatically places them on the right device. You should also replace the usual `backward()` pass with Accelerate's [`~Accelerator.backward`] method which scales the gradients for you and uses the appropriate `backward()` method depending on your distributed setup (for example, DeepSpeed or Megatron).
|
||||
|
||||
```diff
|
||||
- inputs = inputs.to(device)
|
||||
@ -99,17 +83,13 @@ and [`~Accelerator.backward`] should be used for performing the backward pass:
|
||||
+ accelerator.backward(loss)
|
||||
```
|
||||
|
||||
With that, your training loop is now ready to use 🤗 Accelerate!
|
||||
|
||||
## The finished code
|
||||
|
||||
Below is the final version of the converted code:
|
||||
Put everything together and your new Accelerate training loop should now look like this!
|
||||
|
||||
```python
|
||||
from accelerate import Accelerator
|
||||
|
||||
accelerator = Accelerator()
|
||||
|
||||
device = accelerator.device
|
||||
model, optimizer, training_dataloader, scheduler = accelerator.prepare(
|
||||
model, optimizer, training_dataloader, scheduler
|
||||
)
|
||||
@ -124,6 +104,124 @@ for batch in training_dataloader:
|
||||
scheduler.step()
|
||||
```
|
||||
|
||||
## More Resources
|
||||
## Training features
|
||||
|
||||
To check out more ways on how to migrate to 🤗 Accelerate, check out our [interactive migration tutorial](https://huggingface.co/docs/accelerate/usage_guides/explore) which showcases other items that need to be watched for when using Accelerate and how to do so quickly.
|
||||
Accelerate offers additional features - like gradient accumulation, gradient clipping, mixed precision training and more - you can add to your script to improve your training run. Let's explore these three features.
|
||||
|
||||
### Gradient accumulation
|
||||
|
||||
Gradient accumulation enables you to train on larger batch sizes by accumulating the gradients over multiple batches before updating the weights. This can be useful for getting around memory limitations. To enable this feature in Accelerate, specify the `gradient_accumulation_steps` parameter in the [`Accelerator`] class and add the [`~Accelerator.accumulate`] context manager to your script.
|
||||
|
||||
```diff
|
||||
+ accelerator = Accelerator(gradient_accumulation_steps=2)
|
||||
model, optimizer, training_dataloader = accelerator.prepare(model, optimizer, training_dataloader)
|
||||
|
||||
for input, label in training_dataloader:
|
||||
+ with accelerator.accumulate(model):
|
||||
predictions = model(input)
|
||||
loss = loss_function(predictions, label)
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
scheduler.step()
|
||||
optimizer.zero_grad()
|
||||
```
|
||||
|
||||
### Gradient clipping
|
||||
|
||||
Gradient clipping is a technique to prevent "exploding gradients", and Accelerate offers:
|
||||
|
||||
* [`~Accelerator.clip_grad_value_`] to clip gradients to a minimum and maximum value
|
||||
* [`~Accelerator.clip_grad_norm_`] for normalizing gradients to a certain value
|
||||
|
||||
### Mixed precision
|
||||
|
||||
Mixed precision accelerates training by using a lower precision data type like fp16 (half-precision) to calculate the gradients. For the best performance with Accelerate, the loss should be computed inside your model (like in Transformers models) because computations outside of the model are computed in full precision.
|
||||
|
||||
Set the mixed precision type to use in the [`Accelerator`], and then use the [`~Accelerator.autocast`] context manager to automatically cast the values to the specified data type.
|
||||
|
||||
> [!WARNING]
|
||||
> Accelerate enables automatic mixed precision, so [`~Accelerator.autocast`] is only needed if there are other mixed precision operations besides those performed on loss by [`~Accelerator.backward`] which already handles the scaling.
|
||||
|
||||
```diff
|
||||
+ accelerator = Accelerator(mixed_precision="fp16")
|
||||
+ with accelerator.autocast():
|
||||
loss = complex_loss_function(outputs, target):
|
||||
```
|
||||
|
||||
## Save and load
|
||||
|
||||
Accelerate can also save and load a *model* once training is complete or you can also save the model and optimizer *state* which could be useful for resuming training.
|
||||
|
||||
### Model
|
||||
|
||||
Once all processes are complete, unwrap the model with the [`~Accelerator.unwrap_model`] method before saving it because the [`~Accelerator.prepare`] method wrapped your model into the proper interface for distributed training. If you don't unwrap the model, saving the model state dictionary also saves any potential extra layers from the larger model and you won't be able to load the weights back into your base model.
|
||||
|
||||
You should use the [`~Accelerator.save_model`] method to unwrap and save the model state dictionary. This method can also save a model into sharded checkpoints or into the [safetensors](https://hf.co/docs/safetensors/index) format.
|
||||
|
||||
<hfoptions id="save">
|
||||
<hfoption id="single checkpoint">
|
||||
|
||||
```py
|
||||
accelerator.wait_for_everyone()
|
||||
accelerator.save_model(model, save_directory)
|
||||
```
|
||||
|
||||
<Tip>
|
||||
|
||||
For models from the [Transformers](https://hf.co/docs/transformers/index) library, save the model with the [`~transformers.PreTrainedModel.save_pretrained`] method so that it can be reloaded with the [`~transformers.PreTrainedModel.from_pretrained`] method.
|
||||
|
||||
```py
|
||||
from transformers import AutoModel
|
||||
|
||||
unwrapped_model = accelerator.unwrap_model(model)
|
||||
unwrapped_model.save_pretrained(
|
||||
"path/to/my_model_directory",
|
||||
is_main_process=accelerator.is_main_process,
|
||||
save_function=accelerator.save,
|
||||
)
|
||||
|
||||
model = AutoModel.from_pretrained("path/to/my_model_directory")
|
||||
```
|
||||
|
||||
</Tip>
|
||||
|
||||
To load your weights, use the [`~Accelerator.unwrap_model`] method to unwrap the model first before loading the weights. All model parameters are references to tensors, so this loads your weights inside `model`.
|
||||
|
||||
```py
|
||||
unwrapped_model = accelerator.unwrap_model(model)
|
||||
path_to_checkpoint = os.path.join(save_directory,"pytorch_model.bin")
|
||||
unwrapped_model.load_state_dict(torch.load(path_to_checkpoint))
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="sharded checkpoint">
|
||||
|
||||
Set `safe_serialization=True` to save the model in the safetensor format.
|
||||
|
||||
```py
|
||||
accelerator.wait_for_everyone()
|
||||
accelerator.save_model(model, save_directory, max_shard_size="1GB", safe_serialization=True)
|
||||
```
|
||||
|
||||
To load a sharded checkpoint or a safetensor formatted checkpoint, use the [`~accelerate.load_checkpoint_in_model`] method. This method allows you to load a checkpoint onto a specific device.
|
||||
|
||||
```py
|
||||
load_checkpoint_in_model(unwrapped_model, save_directory, device_map={"":device})
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
### State
|
||||
|
||||
During training, you may want to save the current state of the model, optimizer, random generators, and potentially learning rate schedulers so they can be restored in the *same script*. You should add the [`~Accelerator.save_state`] and [`~Accelerator.load_state`] methods to your script to save and load states.
|
||||
|
||||
To further customize where and how states are saved through [`~Accelerator.save_state`], use the [`~utils.ProjectConfiguration`] class. For example, if `automatic_checkpoint_naming` is enabled, each saved checkpoint is stored at `Accelerator.project_dir/checkpoints/checkpoint_{checkpoint_number}`.
|
||||
|
||||
Any other stateful items to be stored should be registered with the [`~Accelerator.register_for_checkpointing`] method so they can be saved and loaded. Every object passed to this method to be stored must have a `load_state_dict` and `state_dict` function.
|
||||
|
||||
<Note>
|
||||
|
||||
If you have [`torchdata>=0.8.0`](https://github.com/pytorch/data/tree/main) installed, you can additionally pass `use_stateful_dataloader=True` into your [`~utils.DataLoaderConfiguration`]. This extends Accelerate's DataLoader classes with a `load_state_dict` and `state_dict` function, and makes it so `Accelerator.save_state` and `Accelerator.load_state` also track how far into the training dataset it has read when persisting the model.
|
||||
|
||||
</Note>
|
||||
|
||||
@ -186,7 +186,7 @@ Here is a basic training loop for the animal classification problem:
|
||||
|
||||
<Tip>
|
||||
|
||||
The code has been split up to allow for explainations on each section. A full version that can be copy and pasted will be available at the end
|
||||
The code has been split up to allow for explanations on each section. A full version that can be copy and pasted will be available at the end
|
||||
|
||||
</Tip>
|
||||
|
||||
@ -344,7 +344,7 @@ def training_loop(mixed_precision="fp16", seed: int = 42, batch_size: int = 64):
|
||||
mean = mean.to(accelerator.device)
|
||||
std = std.to(accelerator.device)
|
||||
|
||||
# Intantiate the optimizer
|
||||
# Instantiate the optimizer
|
||||
optimizer = torch.optim.Adam(params=model.parameters(), lr=3e-2 / 25)
|
||||
|
||||
# Instantiate the learning rate scheduler
|
||||
@ -430,6 +430,17 @@ args = (model, "fp16", 42, 64)
|
||||
notebook_launcher(training_loop, args, num_processes=8)
|
||||
```
|
||||
|
||||
To launch the training process with elasticity, enabling fault tolerance, you can use the `elastic_launch` feature provided by PyTorch. This requires setting additional parameters such as `rdzv_backend` and `max_restarts`. Here is an example of how to use `notebook_launcher` with elastic capabilities:
|
||||
|
||||
```python
|
||||
notebook_launcher(
|
||||
training_loop,
|
||||
args,
|
||||
num_processes=2,
|
||||
max_restarts=3
|
||||
)
|
||||
```
|
||||
|
||||
As it's running it will print the progress as well as state how many devices you ran on. This tutorial was ran with two GPUs:
|
||||
|
||||
```python out
|
||||
@ -443,6 +454,12 @@ epoch 4: 94.71
|
||||
|
||||
And that's it!
|
||||
|
||||
Please note that [`notebook_launcher`] ignores the 🤗 Accelerate config file, to launch based on the config use:
|
||||
|
||||
```bash
|
||||
accelerate launch
|
||||
```
|
||||
|
||||
## Debugging
|
||||
|
||||
A common issue when running the `notebook_launcher` is receiving a CUDA has already been initialized issue. This usually stems
|
||||
|
||||
38
docs/source/basic_tutorials/tpu.md
Normal file
38
docs/source/basic_tutorials/tpu.md
Normal file
@ -0,0 +1,38 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# TPU training
|
||||
|
||||
A [TPU (Tensor Processing Unit)](https://cloud.google.com/tpu/docs/intro-to-tpu) is a type of hardware specifically designed for training models efficiently. Accelerate supports TPU training, but there are a few things you should be aware of, namely graph compilation. This tutorial briefly discusses compilation, and for more details, take a look at the [Training on TPUs with Accelerate](../concept_guides/training_tpu) guide.
|
||||
|
||||
## Compilation
|
||||
|
||||
A TPU creates a graph of all the operations in the training step such as the forward pass, backward pass and optimizer step. This is why the first training step always takes a while because building and compiling this graph takes time. But once compilation is complete, it is cached and all subsequent steps are much faster.
|
||||
|
||||
The key is to avoid compiling your code again or else training is super slow. This means all your operations must be exactly the same:
|
||||
|
||||
* all tensors in your batches must have the same length (for example, no dynamic padding for NLP tasks)
|
||||
* your code must be static (for example, no layers with for loops that have different lengths depending on the input such as a LSTM)
|
||||
|
||||
## Weight tying
|
||||
|
||||
A common language model design is to tie the weights of the embedding and softmax layers. However, moving the model to a TPU (either yourself or passing it to the [`~Accelerator.prepare`] method) breaks the weight tying and you'll need to retie the weights.
|
||||
|
||||
To add special behavior (like weight tying) in your script for TPUs, set [`~Accelerator.distributed_type`] to `DistributedType.TPU` first. Then you can use the [`~transformers.PreTrainedModel.tie_weights`] method to tie the weights.
|
||||
|
||||
```py
|
||||
if accelerator.distributed_type == DistributedType.TPU:
|
||||
model.tie_weights()
|
||||
```
|
||||
211
docs/source/basic_tutorials/troubleshooting.md
Normal file
211
docs/source/basic_tutorials/troubleshooting.md
Normal file
@ -0,0 +1,211 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Troubleshoot
|
||||
|
||||
This guide provides solutions to some issues you might encounter when using Accelerate. Not all errors are covered because Accelerate is an active library that is continuously evolving and there are many different use cases and distributed training setups. If the solutions described here don't help with your specific error, please take a look at the [Ask for help](#ask-for-help) section to learn where and how to get help.
|
||||
|
||||
## Logging
|
||||
|
||||
Logging can help you identify where an error is coming from. In a distributed setup with multiple processes, logging can be a challenge, but Accelerate provides the [`~accelerate.logging`] utility to ensure logs are synchronized.
|
||||
|
||||
To troubleshoot an issue, use [`~accelerate.logging`] instead of the standard Python [`logging`](https://docs.python.org/3/library/logging.html#module-logging) module. Set the verbosity level (`INFO`, `DEBUG`, `WARNING`, `ERROR`, `CRITICAL`) with the `log_level` parameter, and then you can either:
|
||||
|
||||
1. Export the `log_level` as the `ACCELERATE_LOG_LEVEL` environment variable.
|
||||
2. Pass the `log_level` directly to `get_logger`.
|
||||
|
||||
For example, to set `log_level="INFO"`:
|
||||
|
||||
```py
|
||||
from accelerate.logging import get_logger
|
||||
|
||||
logger = get_logger(__name__, log_level="DEBUG")
|
||||
```
|
||||
|
||||
By default, the log is called on main processes only. To call it on all processes, pass `main_process_only=False`.
|
||||
If a log should be called on all processes and in order, also pass `in_order=True`.
|
||||
|
||||
```py
|
||||
from accelerate.logging import get_logger
|
||||
|
||||
logger = get_logger(__name__, log_level="DEBUG")
|
||||
# log all processes
|
||||
logger.debug("thing_to_log", main_process_only=False)
|
||||
# log all processes in order
|
||||
logger.debug("thing_to_log", main_process_only=False, in_order=True)
|
||||
```
|
||||
|
||||
## Hanging code and timeout errors
|
||||
|
||||
There can be many reasons why your code is hanging. Let's take a look at how to solve some of the most common issues that can cause your code to hang.
|
||||
|
||||
### Mismatched tensor shapes
|
||||
|
||||
Mismatched tensor shapes is a common issue that can cause your code to hang for a significant amount of time on a distributed setup.
|
||||
|
||||
When running scripts in a distributed setup, functions such as [`Accelerator.gather`] and [`Accelerator.reduce`] are necessary to grab tensors across devices to collectively perform operations on them. These (and other) functions rely on `torch.distributed` to perform a `gather` operation, which requires tensors to have the **exact same shape** across all processes. When the tensor shapes don't match, your code hangs and you'll eventually hit a timeout exception.
|
||||
|
||||
You can use Accelerate's operational debug mode to immediately catch this issue. We recommend enabling this mode during the `accelerate config` setup, but you can also enable it from the CLI, as an environment variable, or by manually editing the `config.yaml` file.
|
||||
|
||||
<hfoptions id="mismatch">
|
||||
<hfoption id="CLI">
|
||||
|
||||
```bash
|
||||
accelerate launch --debug {my_script.py} --arg1 --arg2
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="environment variable">
|
||||
|
||||
If enabling debug mode as an environment variable, you don't need to call `accelerate launch`.
|
||||
|
||||
```bash
|
||||
ACCELERATE_DEBUG_MODE="1" torchrun {my_script.py} --arg1 --arg2
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="config.yaml">
|
||||
|
||||
Add `debug: true` to your `config.yaml` file.
|
||||
|
||||
```yaml
|
||||
compute_environment: LOCAL_MACHINE
|
||||
debug: true
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
Once you enable debug mode, you should get a traceback that points to the tensor shape mismatch issue.
|
||||
|
||||
```py
|
||||
Traceback (most recent call last):
|
||||
File "/home/zach_mueller_huggingface_co/test.py", line 18, in <module>
|
||||
main()
|
||||
File "/home/zach_mueller_huggingface_co/test.py", line 15, in main
|
||||
broadcast_tensor = broadcast(tensor)
|
||||
File "/home/zach_mueller_huggingface_co/accelerate/src/accelerate/utils/operations.py", line 303, in wrapper
|
||||
accelerate.utils.operations.DistributedOperationException:
|
||||
|
||||
Cannot apply desired operation due to shape mismatches. All shapes across devices must be valid.
|
||||
|
||||
Operation: `accelerate.utils.operations.broadcast`
|
||||
Input shapes:
|
||||
- Process 0: [1, 5]
|
||||
- Process 1: [1, 2, 5]
|
||||
```
|
||||
|
||||
### Early stopping
|
||||
|
||||
For early stopping in distributed training, if each process has a specific stopping condition (e.g. validation loss), it may not be synchronized across all processes. As a result, a break can happen on process 0 but not on process 1 which will cause your code to hang indefinitely until a timeout occurs.
|
||||
|
||||
If you have early stopping conditionals, use the `set_breakpoint` and `check_breakpoint` methods to make sure all the processes
|
||||
are ended correctly.
|
||||
|
||||
```py
|
||||
# Assume `should_do_breakpoint` is a custom defined function that returns a conditional,
|
||||
# and that conditional might be true only on process 1
|
||||
if should_do_breakpoint(loss):
|
||||
accelerator.set_breakpoint()
|
||||
|
||||
# Later in the training script when we need to check for the breakpoint
|
||||
if accelerator.check_breakpoint():
|
||||
break
|
||||
```
|
||||
|
||||
### Low kernel versions on Linux
|
||||
|
||||
On Linux with kernel version < 5.5, hanging processes have been reported. To avoid this problem, upgrade your system to a later kernel version.
|
||||
|
||||
### MPI
|
||||
|
||||
If your distributed CPU training job using MPI is hanging, ensure that you have
|
||||
[passwordless SSH](https://www.open-mpi.org/faq/?category=rsh#ssh-keys) setup (using keys) between the nodes. This means
|
||||
that for all nodes in your hostfile, you should to be able to SSH from one node to another without being prompted for a password.
|
||||
|
||||
Next, try to run the `mpirun` command as a sanity check. For example, the command below should print out the
|
||||
hostnames for each of the nodes.
|
||||
|
||||
```bash
|
||||
mpirun -f hostfile -n {number of nodes} -ppn 1 hostname
|
||||
```
|
||||
|
||||
## CUDA Out-of-Memory
|
||||
|
||||
One of the most frustrating errors when it comes to running training scripts is hitting "CUDA Out-of-Memory". The entire script needs to be restarted and any progress is lost.
|
||||
|
||||
To address this problem, Accelerate provides the [`find_executable_batch_size`] utility that is heavily based on [toma](https://github.com/BlackHC/toma).
|
||||
This utility retries code that fails due to OOM (out-of-memory) conditions and automatically lowers batch sizes. For each OOM condition, the algorithm decreases the batch size by half and retries the code until it succeeds.
|
||||
|
||||
To use [`find_executable_batch_size`], restructure your training function to include an inner function with `find_executable_batch_size` and build your dataloaders inside it. At a minimum, this only takes 4 new lines of code.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
The inner function **must** take batch size as the first parameter, but we do not pass one to it when called. The wrapper will handles this for you. Any object (models, optimizers) that consumes CUDA memory and is passed to the [`Accelerator`] also **must** be declared inside the inner function.
|
||||
|
||||
</Tip>
|
||||
|
||||
```diff
|
||||
def training_function(args):
|
||||
accelerator = Accelerator()
|
||||
|
||||
+ @find_executable_batch_size(starting_batch_size=args.batch_size)
|
||||
+ def inner_training_loop(batch_size):
|
||||
+ nonlocal accelerator # Ensure they can be used in our context
|
||||
+ accelerator.free_memory() # Free all lingering references
|
||||
model = get_model()
|
||||
model.to(accelerator.device)
|
||||
optimizer = get_optimizer()
|
||||
train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)
|
||||
lr_scheduler = get_scheduler(
|
||||
optimizer,
|
||||
num_training_steps=len(train_dataloader)*num_epochs
|
||||
)
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
||||
)
|
||||
train(model, optimizer, train_dataloader, lr_scheduler)
|
||||
validate(model, eval_dataloader)
|
||||
+ inner_training_loop()
|
||||
```
|
||||
|
||||
## Non-reproducible results between device setups
|
||||
|
||||
If you changed the device setup and observe different model performance, it is likely you didn't update your script when moving from one setup to another. Even if you're using the same script with the same batch size, the results will still be different on a TPU, multi-GPU, and single GPU.
|
||||
|
||||
For example, if you were training on a single GPU with a batch size of 16 and you move to a dual GPU setup, you need to change the batch size to 8 to have the same effective batch size. This is because when training with Accelerate, the batch size passed to the dataloader is the **batch size per GPU**.
|
||||
|
||||
To make sure you can reproduce the results between the setups, make sure to use the same seed, adjust the batch size accordingly, and consider scaling the learning rate.
|
||||
|
||||
For more details and a quick reference for batch sizes, check out the [Comparing performance between different device setups](../concept_guides/performance) guide.
|
||||
|
||||
## Performance issues on different GPUs
|
||||
|
||||
If your multi-GPU setup consists of different GPUs, you may encounter some performance issues:
|
||||
|
||||
- There may be an imbalance in GPU memory between the GPUs. In this case, the GPU with the smaller memory will limit the batch size or the size of the model that can be loaded onto the GPUs.
|
||||
- If you are using GPUs with different performance profiles, the performance will be driven by the slowest GPU you are using because the other GPUs will have to wait for it to complete its workload.
|
||||
|
||||
Vastly different GPUs within the same setup can lead to performance bottlenecks.
|
||||
|
||||
## Ask for help
|
||||
|
||||
If none of the solutions and advice here helped resolve your issue, you can always reach out to the community and Accelerate team for help.
|
||||
|
||||
- Ask for help on the Hugging Face forums by posting your question in the [🤗 Accelerate category](https://discuss.huggingface.co/c/accelerate/18). Make sure to write a descriptive post with relevant context about your setup and reproducible code to maximize the likelihood that your problem is solved!
|
||||
|
||||
- Post a question on [Discord](http://hf.co/join/discord), and let the team and the community help you.
|
||||
|
||||
- Create an Issue on the 🤗 Accelerate [GitHub repository](https://github.com/huggingface/accelerate/issues) if you think you've found a bug related to the library. Include context regarding the bug and details about your distributed setup to help us better figure out what's wrong and how we can fix it.
|
||||
@ -154,7 +154,7 @@ By passing `device_map="auto"`, we tell 🤗 Accelerate to determine automatical
|
||||
#### `no_split_module_classes`
|
||||
|
||||
This parameter will indicate that some of the modules with the name `"Block"` should not be split across different devices. You should set here all blocks that
|
||||
include a residutal connection of some kind.
|
||||
include a residual connection of some kind.
|
||||
|
||||
|
||||
#### The `device_map`
|
||||
@ -295,11 +295,44 @@ device_map = {"block1": 0, "block2.linear1": 1, "block2.linear2": 1}
|
||||
|
||||
</Tip>
|
||||
|
||||
## CPU offload only
|
||||
|
||||
If you want to offload your model on CPU, you can use [`cpu_offload`]. As a result, all parameters of the model will be offloaded and only one copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that state dict and put on the execution device and passed as they are needed, then offloaded again.
|
||||
|
||||
```python
|
||||
cpu_offload(model, execution_device)
|
||||
```
|
||||
|
||||
You can also use [`cpu_offload_with_hook`]. This function will offloads a model on the CPU and puts it back to an execution device when executed. The difference with [`cpu_offload`] is that the model stays on the execution device after the forward and is only offloaded again when the `offload` method of the returned `hook` is called. Furthermore, [`cpu_offload_with_hook`] is more performant but less memory saving. It is useful for pipelines running a model in a loop:
|
||||
|
||||
```python
|
||||
model_1, hook_1 = cpu_offload_with_hook(model_1, execution_device)
|
||||
model_2, hook_2 = cpu_offload_with_hook(model_2, execution_device, prev_module_hook=hook_1)
|
||||
model_3, hook_3 = cpu_offload_with_hook(model_3, execution_device, prev_module_hook=hook_2)
|
||||
|
||||
hid_1 = model_1(input)
|
||||
for i in range(50):
|
||||
# model1 is offloaded on the CPU at the first iteration, model 2 stays on the GPU for this whole loop.
|
||||
hid_2 = model_2(hid_1)
|
||||
# model2 is offloaded to the CPU just before this forward.
|
||||
hid_3 = model_3(hid_3)
|
||||
|
||||
# For model3, you need to manually call the hook offload method.
|
||||
hook_3.offload()
|
||||
```
|
||||
|
||||
## Disk offload only
|
||||
|
||||
To perform disk offload, you can use [`disk_offload`]. As a result, all parameters of the model will be offloaded as memory-mapped array in a given folder. During the forward pass, parameters will be accessed from that folder and put on the execution device passed as they are needed, then offloaded again.
|
||||
|
||||
```python
|
||||
disk_offload(model, offload_dir, execution_device)
|
||||
```
|
||||
|
||||
## Limits and further development
|
||||
|
||||
We are aware of the current limitations in the API:
|
||||
|
||||
- While this could theoretically work on just one CPU with potential disk offload, you need at least one GPU to run this API. This will be fixed in further development.
|
||||
- [`infer_auto_device_map`] (or `device_map="auto"` in [`load_checkpoint_and_dispatch`]) tries to maximize GPU and CPU RAM it sees available when you execute it. While PyTorch is very good at managing GPU RAM efficiently (and giving it back when not needed), it's not entirely true with Python and CPU RAM. Therefore, an automatically computed device map might be too intense on the CPU. Move a few modules to the disk device if you get crashes due to a lack of RAM.
|
||||
- [`infer_auto_device_map`] (or `device_map="auto"` in [`load_checkpoint_and_dispatch`]) attributes devices sequentially (to avoid moving things back and forth) so if your first layer is bigger than the size of the GPU you have, it will end up with everything on the CPU/Disk.
|
||||
- [`load_checkpoint_and_dispatch`] and [`load_checkpoint_in_model`] do not perform any check on the correctness of your state dict compared to your model at the moment (this will be fixed in a future version), so you may get some weird errors if trying to load a checkpoint with mismatched or missing keys.
|
||||
|
||||
192
docs/source/concept_guides/fsdp_and_deepspeed.md
Normal file
192
docs/source/concept_guides/fsdp_and_deepspeed.md
Normal file
@ -0,0 +1,192 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Moving between FSDP And DeepSpeed
|
||||
|
||||
🤗 Accelerate offers flexibilty of training frameworks, by integrating two extremely powerful tools for distributed training, namely [Pytorch FSDP](../usage_guides/fsdp) and [Microsoft DeepSpeed](../usage_guides/deepspeed). The aim of this tutorial is to draw parallels, as well as to outline potential differences, to empower the user to switch seamlessly between these two frameworks.
|
||||
|
||||
<Tip>
|
||||
|
||||
To switch between the frameworks, we recommend launching code 🤗 `accelerate launch` passing in the correct config file with `--config_file`, or passing in the respective arguments directly for [FSDP and DeepSpeed](../package_reference/cli#accelerate-launch) .
|
||||
|
||||
Example 🤗 Accelerate configurations can be found here for [DeepSpeed](../usage_guides/deepspeed#accelerate-deepspeed-plugin) and [FSDP](../usage_guides/fsdp#how-it-works-out-of-the-box), or in the [example zoo under "Launch Configurations"](../usage_guides/explore)
|
||||
|
||||
</Tip>
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
This tutorial is for single-node, multi-GPU, scenarios only.
|
||||
|
||||
</Tip>
|
||||
|
||||
## Configuring Functionalities
|
||||
|
||||
Model tensors are split into different GPUs in an attempt to scale up model sizes; this is termed *sharding* in FSDP, and *partitioning* in DeepSpeed. FSDP sharding and DeepSpeed ZeRO (partitioning) stages are configured by `--fsdp_sharding_strategy`, and `--zero_stage`, respectively. In particular, FSDP `FULL_SHARD` maps to DeepSpeed ZeRO stage `3`; see this [comprehensive mapping between FSDP sharding and DeepSpeed ZeRO settings](../usage_guides/fsdp#mapping-between-fsdp-sharding-strategies-and-deepspeed-zero-stages). The below table summarizes and groups similar settings:
|
||||
|
||||
Group | Framework | Configuration | Example | Restrictions (if any)
|
||||
--|--|--|--|--
|
||||
sharding / partitioning | FSDP<br>DeepSpeed | `--fsdp_sharding_strategy`<br>`--zero_stage` | `1` (`FULL_SHARD`) <br>`3` |
|
||||
offload | FSDP<br>DeepSpeed | `--fsdp_offload_params`<br>`--offload_param_device`<br>`--offload_optimizer_device` | `true`<br>`cpu`<br>`cpu` | all or nothing <br><br>
|
||||
model loading | FSDP<br>DeepSpeed | <span style="white-space:nowrap;">`--fsdp_cpu_ram_efficient_loading`</span><br>`--zero3_init_flag` | `true`<br>`true` | <br>only ZeRO 3
|
||||
efficient checkpointing | FSDP<br>DeepSpeed | `--fsdp_state_dict_type`<br>`--zero3_save_16bit_model` | `SHARDED_STATE_DICT`<br>`true` | <br>only ZeRO 3
|
||||
weights prefetching | FSDP<br><br>DeepSpeed | `--fsdp_forward_prefetch`<br>`--fsdp_backward_prefetch`<br>None | `true`<br>`BACKWARD_PRE` | <br><br>
|
||||
model | FSDP<br><br>DeepSpeed | `--fsdp_auto_wrap_policy`<br><span style="white-space:nowrap;">`--fsdp_transformer_layer_cls_to_wrap`</span><br>None | `TRANSFORMER_BASED_WRAP`<br><Layer Class> |<br>Usually not needed <br>Transparent to user.
|
||||
parameters summoning | FSDP<br>DeepSpeed | `--fsdp_use_orig_params`<br>None | `true` | required for `torch.compile`<br>Transparent to user
|
||||
parameters syncing | FSDP<br>DeepSpeed | `--fsdp_sync_module_states`<br>None | `true` |
|
||||
training | FSDP<br>DeepSpeed | None<br>`--gradient_accumulation_steps`<br>`--gradient_clipping` | <br>`auto`<br>`auto` | Transparent to user
|
||||
|
||||
For detailed descriptions of the above, refer to [🤗 `Accelerate` launch documentation](../package_reference/cli#accelerate-launch).
|
||||
|
||||
<Tip>
|
||||
|
||||
To access other DeepSpeed configurations, such as mixed precision settings,
|
||||
you need to pass in a `--deepspeed_config_file`, see the [documentation](../usage_guides/deepspeed#deepspeed-config-file).
|
||||
|
||||
DeepSpeed can be also configured via [`DeepSpeedPlugin`], e.g., `DeepSpeedPlugin.zero_stage` is equivalent of `--zero_stage`, and `DeepSpeedPlugin.hf_ds_config` can be used to pass `--deepeed_config_file.`
|
||||
|
||||
</Tip>
|
||||
|
||||
<Tip>
|
||||
|
||||
FSDP can be also configured via [`FullyShardedDataParallelPlugin`], e.g., `FullyShardedDataParallelPlugin.sharding_strategy` is equivalent of `--fsdp_sharding_strategy`.
|
||||
|
||||
</Tip>
|
||||
|
||||
### Checkpointing
|
||||
|
||||
Do note that while FSDP can be configured via `--fsdp_state_dict_type` to save either full / sharded checkpoints.
|
||||
|
||||
<Tip>
|
||||
|
||||
For DeepSpeed Zero3, one could pass a `--zero3_save_16bit_model true`, which conveniently consolidates the model to a single rank and saves; this is the FSDP equivalent of `fsdp_state_dict_type: FULL_STATE_DICT`.
|
||||
|
||||
</Tip>
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
For large models, consolidating the model to a single rank can be very slow.
|
||||
|
||||
</Tip>
|
||||
|
||||
<Tip>
|
||||
|
||||
For quicker checkpointing, for FSDP use `fsdp_state_dict_type: SHARDED_STATE_DICT`, and for DeepSpeed Zero3 [use the `zero_to_fp32.py` script to post-convert sharded checkpoints](https://www.deepspeed.ai/tutorials/zero/#extracting-weights).
|
||||
|
||||
|
||||
</Tip>
|
||||
|
||||
### Offloading
|
||||
|
||||
FSDP only allows *all-or-nothing* offload (i.e., either offload parameters, gradients, and optimizer, or keep them all in GPU), but DeepSpeed can offload parameters and optimizer differently. Furthermore, DeepSpeed also supports [offloading to NVME](https://www.deepspeed.ai/docs/config-json/#parameter-offloading).
|
||||
|
||||
### Prefetching
|
||||
|
||||
FSDP allows two prefetching configurations `--fsdp_forward_prefetch` and `--fsdp_backward_prefetch` to improve overlap of comms / computation at a cost of extra memory, see [FSDP documentation](https://pytorch.org/docs/stable/fsdp.html).
|
||||
For DeepSpeed, the prefetching will be turned on when needed, and it turns on depending on certain hyper-params like `stage3_param_persistence_threshold`, `stage3_max_reuse_distance`, etc, [that can be configured for Zero3](https://www.deepspeed.ai/docs/config-json/#parameter-offloading); 🤗 `accelerate` may set these hyper-params automatically if you don't set those explicitly in the deepspeed config file.
|
||||
|
||||
<Tip>
|
||||
|
||||
For FSDP set `fsdp_backward_prefetch: BACKWARD_PRE` for improved throughputs if memory allows.
|
||||
|
||||
</Tip>
|
||||
|
||||
### Model Loading
|
||||
|
||||
While FSDP require an explicit `--fsdp_cpu_ram_efficient_loading true` to activate efficient model loading, 🤗 `transformers` will activate the similar feature whenever DeepSpeed Zero3 is used.
|
||||
|
||||
<Tip>
|
||||
|
||||
For FSDP, whenever setting `--fsdp_cpu_ram_efficient_loading true`, 🤗 `accelerate` will automatically set `sync_module_states` to true.
|
||||
For RAM efficient loading the weights will be loaded only in a singe rank, and thus requires `sync_module_states` to broadcast weights to other ranks.
|
||||
|
||||
</Tip>
|
||||
|
||||
### Model
|
||||
|
||||
FSDP requires an explicit `--fsdp_auto_wrap_policy` for the algorithm to decide how to schedule the all-gather and reduce-scatter operations. But for DeepSpeed this is transparent to the user.
|
||||
|
||||
<Tip>
|
||||
|
||||
For FSDP, simply set `fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP`. With the latest [`transformers`] versions, we try our best to figure out the suitable `fsdp_transformer_layer_cls_to_wrap` for HF transformers models. However, if you get an error regarding it, please specify this.
|
||||
|
||||
</Tip>
|
||||
|
||||
### Parameters Summoning
|
||||
|
||||
FSDP requires an explicit `--fsdp_use_orig_params` flag if using `torch.compile`, see [the pytorch documenation](https://pytorch.org/docs/stable/fsdp.html#module-torch.distributed.fsdp). For DeepSpeed this is transparent to the user.
|
||||
|
||||
<Tip>
|
||||
|
||||
For FSDP, when using `torch.compile` please set `fsdp_use_orig_params: True`.
|
||||
|
||||
</Tip>
|
||||
|
||||
|
||||
## Training
|
||||
|
||||
Deepspeed requires explicit `--gradient_accumulation_steps` and `--gradient_clipping` flags. For FSDP this is transparent to the user.
|
||||
|
||||
<Tip>
|
||||
|
||||
When using DeepSpeed, set `gradient_accumulation_steps: "auto"` and `gradient_clipping: "auto"` to automatically pick up values set in the [`Accelerator`] or [`TrainingArguments`] (if using `transformers`).
|
||||
|
||||
</Tip>
|
||||
|
||||
|
||||
## On Differences in Data Precision Handling
|
||||
|
||||
To discuss the how data precision is handled in both FSDP and Deepspeed, it is instructive to first give an overview of how model parameters are handled in these frameworks. Before the model / optimizer parameters are distributed across GPUs, parameter preparation is involved to first "flatten" them to one-dimensional [`torch.Tensor`](https://pytorch.org/docs/stable/tensors.html#torch-tensor). The implementation of FSDP / DeepSpeed varies in the respect of the `dtype` in which these "flattened" parameters are stored, and there are ramifications with regards to how [`torch.Optimizer`](https://pytorch.org/docs/stable/optim.html#module-torch.optim) allocate their `dtype`s. The table below outlines the processes for both frameworks; the "Local" column indicates the process occurring at a per-gpu level, therefore any memory overheads by upcasting should be understood to be amortized by the number of gpus used.
|
||||
|
||||
<Tip>
|
||||
|
||||
As a rule of thumb, for stable training with automatic mixed precision, all the trainable parameters have to be in `torch.float32`.
|
||||
|
||||
</Tip>
|
||||
|
||||
Process | Local | Framework | Details
|
||||
--|--|--|--
|
||||
Loading, i.e., [`AutoModel.from_pretrained(..., torch_dtype=torch_dtype)`] |
|
||||
Preparation, i.e., creation of "flat params" | ✅ | FSDP<br>DeepSpeed | created in `torch_dtype`.<br> disregards `torch_dtype`, created in `float32`.
|
||||
Optimizer initialization | ✅ | FSDP<br>DeepSpeed | creates parameters in `torch_dtype`<br> creates parameters in `float32`
|
||||
Training Step, i.e, forward, backward, reduction | | FSDP<br>DeepSpeed | follows [`MixedPrecision`](https://pytorch.org/docs/stable/fsdp.html#torch.distributed.fsdp.MixedPrecision)<br> follows `deepspeed_config_file` mixed precision settings.
|
||||
Optimizer (Pre-Step) | ✅ | FSDP<br>DeepSpeed | upcasting (if any) to `torch_dtype`<br>upcasted to `float32`
|
||||
Optimizer (Actual Step) | ✅ | FSDP<br>DeepSpeed | occurs in `torch_dtype` <br> occurs in `float32`.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
Therefore when using DeepSpeed a small number of GPUs, be aware of potentially significant memory overheads due to the upcasting during preperation.
|
||||
|
||||
</Tip>
|
||||
|
||||
<Tip>
|
||||
|
||||
With FSDP, in the absence of mixed precision, it is possible to operate the [`torch.Optimizer`](https://pytorch.org/docs/stable/optim.html#module-torch.optim) in low precision `torch_dtype`, which may be helpful when using small number of GPUs.
|
||||
|
||||
</Tip>
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
With mixed precision, FSDP and DeepSpeed will upcast in the model preparation step (c.f. table above). But do note that FSDP will then save checkpoints in the upcasted precision; Deepspeed may still save low precision checkpoints if `--zero3_save_16bit_model` is specified.
|
||||
|
||||
</Tip>
|
||||
|
||||
|
||||
To clarify the above table consider the concrete examples below; the optimizer pre- and actual step combined for brevity. With FSDP it is possible to operate in the two modes shown below, but DeepSpeed can only operate in one.
|
||||
|
||||
Framework | Model Loading (`torch_dtype`) | Mixed Precision | Preparation (Local) | Training | Optimizer (Local)
|
||||
--|--|--|--|--|--
|
||||
FSDP | bf16 | default (none) | bf16 | bf16 | bf16
|
||||
FSDP | bf16 | bf16 | fp32 | bf16 | fp32
|
||||
DeepSpeed | bf16 | bf16 | fp32 | bf16 | fp32
|
||||
@ -55,8 +55,8 @@ their gradients computed, collated, and updated before moving on to the next
|
||||
batch of data.
|
||||
When performing gradient accumulation, you accumulate `n` loss gradients and
|
||||
skip `optimizer.step()` until `n` batches have been reached. As all training
|
||||
processes only need to sychronize by the time `optimizer.step()` is called,
|
||||
without any modification to your training step, this neededless inter-process
|
||||
processes only need to synchronize by the time `optimizer.step()` is called,
|
||||
without any modification to your training step, this needless inter-process
|
||||
communication can cause a significant slowdown.
|
||||
|
||||
How can you avoid this overhead?
|
||||
@ -167,3 +167,18 @@ As you can see, if you are not careful about how you set up your gradient synchr
|
||||
|
||||
If you are worried about making sure everything is done properly, we highly recommend utilizing the [`~Accelerator.accumulate`] function and passing in
|
||||
`gradient_accumulation_steps` or `gradient_accumulation_plugin` to the [`Accelerator`] object so Accelerate can handle this for you.
|
||||
|
||||
### `no_sync` requires additional GPU memory when using FSDP
|
||||
|
||||
Be aware that not syncing gradients can have adverse effects while performing FSDP training. As it has been warned in `torch`, the [`no_sync` context manager for FSDP](https://pytorch.org/docs/stable/fsdp.html#torch.distributed.fsdp.FullyShardedDataParallel.no_sync) will require additional memory.
|
||||
|
||||
Therefore in memory intensive situations while using FSDP, we recommend to set `sync_each_batch` to `True` in the [`~utils.GradientAccumulationPlugin`] to disable `no_sync`.
|
||||
|
||||
See the example below where we fine-tune Mixtral (47B parameters) on 8 A100-80GB GPUs. We see that even for a modest `gradient_accumulation_steps=2` we quickly go out-of-memory (OOM) if `no_sync` is enabled. Again, this is due to additional memory overheads due to FSDP's `no_sync`. However, if `no_sync` is disabled via `sync_each_batch=True`, then the memory consumption for `gradient_accumulation_steps=16` reverts to that of `gradient_accumulation_steps=1`.
|
||||
|
||||
| Model | `no_sync` (accum=1) | `no_sync` (accum=2) | `no_sync` disabled (accum=16)
|
||||
| :-------------: | :-----------------: | :-----------------: | :-----------------:
|
||||
mixtral 8x7B | 69G | OOM | 69G
|
||||
|
||||
> [!WARNING]
|
||||
> Disabling `no_sync` means there _will be slowdown_ due the extra data syncs, as explained by the earlier sections of this guide.
|
||||
78
docs/source/concept_guides/internal_mechanism.md
Normal file
78
docs/source/concept_guides/internal_mechanism.md
Normal file
@ -0,0 +1,78 @@
|
||||
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# 🤗 Accelerate's internal mechanisms
|
||||
|
||||
Internally, 🤗 Accelerate works by first analyzing the environment in which the script is launched to determine which
|
||||
kind of distributed setup is used, how many different processes there are and which one the current script is in. All
|
||||
that information is stored in the [`~AcceleratorState`].
|
||||
|
||||
This class is initialized the first time you instantiate an [`~Accelerator`] as well as performing any
|
||||
specific initialization your distributed setup needs. Its state is then uniquely shared through all instances of
|
||||
[`~state.AcceleratorState`]. (The same can also be done with the [`PartialState`], a more barebones version it inherits)
|
||||
|
||||
Then, when calling [`~Accelerator.prepare`], the library:
|
||||
|
||||
- wraps your model(s) in the container adapted for the distributed setup,
|
||||
- wraps your optimizer(s) in an [`~optimizer.AcceleratedOptimizer`],
|
||||
- wraps your scheduler(s) in an [`~scheduler.AcceleratedScheduler`]
|
||||
- creates a new version of your dataloader(s) in a [`~data_loader.DataLoaderShard`] or [`~data_loader.DataLoaderDispatcher`]
|
||||
|
||||
While the model(s), optimizer(s), and scheduler(s) are just put in simple wrappers, the dataloader(s) are re-created. This is mostly
|
||||
because PyTorch does not let the user change the `batch_sampler` of a dataloader once it's been created and the
|
||||
library handles the sharding of your data between processes by changing that `batch_sampler` to yield every other
|
||||
`num_processes` batches (if enabled).
|
||||
|
||||
The [`~data_loader.DataLoaderShard`] subclasses `DataLoader` to add the following functionality:
|
||||
|
||||
- it synchronizes the appropriate random number generator of all processes at each new iteration, to ensure any
|
||||
randomization (like shuffling) is done the exact same way across processes.
|
||||
- it puts the batches on the proper device before yielding them (unless you have opted out of
|
||||
`device_placement=True`).
|
||||
|
||||
The [`~data_loader.DataLoaderDispatcher`] subclasses differs from the [`~data_loader.DataLoaderShard`] in that when iterating through the `DataLoader`, the data is all starting from process 0 and *then* split and sent off to each process rather than it happening at the dataset level.
|
||||
|
||||
The random number generator synchronization will by default synchronize:
|
||||
|
||||
- the `generator` attribute of a given sampler (like the PyTorch `RandomSampler`) for PyTorch >= 1.6
|
||||
- the main random number generator in PyTorch <=1.5.1
|
||||
|
||||
You can choose which random number generator(s) to synchronize with the `rng_types` argument of the main
|
||||
[`Accelerator`]. In PyTorch >= 1.6, it is recommended to rely on a local `generator` to avoid
|
||||
setting the same seed in the main random number generator in all processes.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
Synchronization of the main torch (or CUDA or XLA) random number generator will affect any other potential random
|
||||
artifacts you could have in your dataset (like random data augmentation) in the sense that all processes will get
|
||||
the same random numbers from the torch random modules (so will apply the same random data augmentation if it's
|
||||
controlled by torch).
|
||||
|
||||
</Tip>
|
||||
|
||||
<Tip>
|
||||
|
||||
The randomization part of your custom sampler, batch sampler or iterable dataset should be done using a local
|
||||
`torch.Generator` object (in PyTorch >= 1.6), see the traditional `RandomSampler`, as an example.
|
||||
|
||||
</Tip>
|
||||
|
||||
<Note>
|
||||
|
||||
If you have [`torchdata>=0.8.0`](https://github.com/pytorch/data/tree/main) installed, and you have passed `use_stateful_dataloader=True` into your [`~utils.DataLoaderConfiguration`], these classes will directly inherit from `StatefulDataLoader` instead, and maintain a `state_dict`.
|
||||
|
||||
</Note>
|
||||
|
||||
For more details about the internals, see the [Internals page](package_reference/torch_wrappers).
|
||||
74
docs/source/concept_guides/low_precision_training.md
Normal file
74
docs/source/concept_guides/low_precision_training.md
Normal file
@ -0,0 +1,74 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Low Precision Training Methods
|
||||
|
||||
The release of new kinds of hardware led to the emergence of new training paradigms that better utilize them. Currently, this is in the form of training
|
||||
in 8-bit precision using packages such as [TransformersEngine](https://github.com/NVIDIA/TransformerEngine) (TE) or [MS-AMP](https://github.com/Azure/MS-AMP/tree/main).
|
||||
|
||||
For an introduction to the topics discussed today, we recommend reviewing the [low-precision usage guide](../usage_guides/low_precision_training) as this documentation will reference it regularly.
|
||||
|
||||
## A Quick Chart
|
||||
|
||||
Below is a quick chart from the MS-AMP documentation showing the different bit-precisions for each solution during training:
|
||||
|
||||
Optimization Level | Computation(GEMM) | Comm | Weight | Master Weight | Weight Gradient | Optimizer States
|
||||
-- | -- | -- | -- | -- | -- | --
|
||||
FP16 AMP | FP16 | FP32 | FP32 | N/A | FP32 | FP32+FP32
|
||||
Nvidia TE | FP8 | FP32 | FP32 | N/A | FP32 | FP32+FP32
|
||||
MS-AMP O1 | FP8 | FP8 | FP16 | N/A | FP8 | FP32+FP32
|
||||
MS-AMP O2 | FP8 | FP8 | FP16 | N/A | FP8 | FP8+FP16
|
||||
MS-AMP O3 | FP8 | FP8 | FP8 | FP16 | FP8 | FP8+FP16
|
||||
|
||||
## `TransformersEngine`
|
||||
|
||||
`TransformersEngine` is the first solution to trying to train in 8-bit floating point. It works by using drop-in replacement layers for certain ones in a model that utilizes their FP8-engine to reduce the number of bits (such as 32 to 8) without degrading the final accuracy of the model.
|
||||
|
||||
Specifically, 🤗 Accelerate will find and replace the following layers with `TransformersEngine` versions:
|
||||
|
||||
* `nn.LayerNorm` for `te.LayerNorm`
|
||||
* `nn.Linear` for `te.Linear`
|
||||
|
||||
As a result we wind up with a model that has most of its layers in BF16, while some layers are in FP8 reducing some of the memory.
|
||||
|
||||
Anecdotally, we have noticed that performance gains don't really start showing when using `TransformerEngine` until a large majority of the layers
|
||||
in the model are made up of those two layers to replace. As a result, only larger models have shown performance improvements when the number of parameters is around and upwards of a few billion.
|
||||
|
||||
The `TransformerEngine` can receive many different arguments that customize how it performs FP8 calculations and what they do. A full list of the arguments is available below:
|
||||
|
||||
* `margin`: The margin to use for the gradient scaling.
|
||||
* `interval`: The interval to use for how often the scaling factor is recomputed.
|
||||
* `fp8_format``: The format to use for the FP8 recipe. Must be one of `HYBRID` or `E4M3`. (Generally `HYBRID` for training, `E4M3` for evaluation)
|
||||
* `amax_history_len`: The length of the history to use for the scaling factor computation
|
||||
* `amax_compute_algo`: The algorithm to use for the scaling factor computation. Must be one of `max` or `most_recent`.
|
||||
* `override_linear_precision`: Whether or not to execute `fprop`, `dgrad`, and `wgrad` GEMMS in higher precision.
|
||||
|
||||
You can customize each of these as part of [`utils.FP8RecipeKwargs`] to help optimize performance of your models.
|
||||
|
||||
If we notice in the chart mentioned earlier, TE simply casts the computation layers into FP8, while everything else is in FP32. As a result this winds up utilizing the most memory but does so with the benefit of guaranteeing the least amount of loss in end accuracy during training.
|
||||
|
||||
## `MS-AMP`
|
||||
|
||||
MS-AMP takes a different approach to `TransformersEngine` by providing three different optimization levels to convert more operations in FP8 or FP16.
|
||||
|
||||
* The base optimization level (`O1`), passes communications of the weights (such as in DDP) in FP8, stores the weights of the model in FP16, and leaves the optimizer states in FP32. The main benefit of this optimization level is that we can reduce the communication bandwidth by essentially half. Additionally, more GPU memory is saved due to 1/2 of everything being cast in FP8, and the weights being cast to FP16. Notably, both the optimizer states remain in FP32.
|
||||
|
||||
* The second optimization level (`O2`) improves upon this by also reducing the precision of the optimizer states. One is in FP8 while the other is in FP16. Generally it's been shown that this will only provide a net-gain of no degraded end accuracy, increased training speed, and reduced memory as now every state is either in FP16 or FP8.
|
||||
|
||||
* Finally, MS-AMP has a third optimization level (`O3`) which helps during DDP scenarios such as DeepSpeed. The weights of the model in memory are fully cast to FP8, and the master weights are now stored in FP16. This fully reduces memory by the highest factor as now not only is almost everything in FP8, only two states are left in FP16. Currently, only DeepSpeed versions up through 0.9.2 are supported, so this capability is not included in the 🤗 Accelerate integration
|
||||
|
||||
## Combining the two
|
||||
|
||||
More experiments need to be performed but it's been noted that combining both MS-AMP and TransformersEngine can lead to the highest throughput by relying on NVIDIA's optimized FP8 operators and utilizing how MS-AMP reduces the memory overhead.
|
||||
@ -45,7 +45,7 @@ Why is this important? Under the hood this will set **5** different seed setting
|
||||
torch.manual_seed(seed)
|
||||
torch.cuda.manual_seed_all(seed)
|
||||
# ^^ safe to call this function even if cuda is not available
|
||||
if is_tpu_available():
|
||||
if is_torch_xla_available():
|
||||
xm.set_rng_state(seed)
|
||||
```
|
||||
|
||||
@ -74,7 +74,7 @@ In this example, there are two GPUs for "Multi-GPU" and a TPU pod with 8 workers
|
||||
|
||||
## Learning Rates
|
||||
|
||||
As noted in multiple sources[[1](https://aws.amazon.com/blogs/machine-learning/scalable-multi-node-deep-learning-training-using-gpus-in-the-aws-cloud/)][[2](https://docs.nvidia.com/clara/tlt-mi_archive/clara-train-sdk-v2.0/nvmidl/appendix/training_with_multiple_gpus.html)], the learning rate should be scaled *linearly* based on the number of devices present. The below
|
||||
As noted in multiple sources[[1](https://aws.amazon.com/blogs/machine-learning/scalable-multi-node-deep-learning-training-using-gpus-in-the-aws-cloud/)][[2](https://docs.nvidia.com/clara/clara-train-sdk/pt/model.html#classification-models-multi-gpu-training)], the learning rate should be scaled *linearly* based on the number of devices present. The below
|
||||
snippet shows doing so with Accelerate:
|
||||
|
||||
<Tip>
|
||||
|
||||
@ -36,7 +36,7 @@ Below is an example of a training function passed to the [`notebook_launcher`] i
|
||||
|
||||
<Tip>
|
||||
|
||||
This code snippet is based off the one from the `simple_nlp_example` notebook found [here](https://github.com/huggingface/notebooks/blob/main/examples/accelerate/simple_nlp_example.ipynb) with slight
|
||||
This code snippet is based off the one from the `simple_nlp_example` notebook found [here](https://github.com/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_nlp_example.ipynb) with slight
|
||||
modifications for the sake of simplicity
|
||||
|
||||
</Tip>
|
||||
|
||||
BIN
docs/source/imgs/profile_export.png
Normal file
BIN
docs/source/imgs/profile_export.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 105 KiB |
@ -15,197 +15,12 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
# Accelerator
|
||||
|
||||
The [`Accelerator`] is the main class provided by 🤗 Accelerate.
|
||||
It serves at the main entry point for the API.
|
||||
The [`Accelerator`] is the main class for enabling distributed training on any type of training setup. Read the [Add Accelerator to your code](../basic_tutorials/migration) tutorial to learn more about how to add the [`Accelerator`] to your script.
|
||||
|
||||
## Quick adaptation of your code
|
||||
|
||||
To quickly adapt your script to work on any kind of setup with 🤗 Accelerate just:
|
||||
|
||||
1. Initialize an [`Accelerator`] object (that we will call `accelerator` throughout this page) as early as possible in your script.
|
||||
2. Pass your dataloader(s), model(s), optimizer(s), and scheduler(s) to the [`~Accelerator.prepare`] method.
|
||||
3. Remove all the `.cuda()` or `.to(device)` from your code and let the `accelerator` handle the device placement for you.
|
||||
|
||||
<Tip>
|
||||
|
||||
Step three is optional, but considered a best practice.
|
||||
|
||||
</Tip>
|
||||
|
||||
4. Replace `loss.backward()` in your code with `accelerator.backward(loss)`
|
||||
5. Gather your predictions and labels before storing them or using them for metric computation using [`~Accelerator.gather`]
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
Step five is mandatory when using distributed evaluation
|
||||
|
||||
</Tip>
|
||||
|
||||
In most cases this is all that is needed. The next section lists a few more advanced use cases and nice features
|
||||
you should search for and replace by the corresponding methods of your `accelerator`:
|
||||
|
||||
## Advanced recommendations
|
||||
|
||||
### Printing
|
||||
|
||||
`print` statements should be replaced by [`~Accelerator.print`] to be printed once per process:
|
||||
|
||||
```diff
|
||||
- print("My thing I want to print!")
|
||||
+ accelerator.print("My thing I want to print!")
|
||||
```
|
||||
|
||||
### Executing processes
|
||||
|
||||
#### Once on a single server
|
||||
|
||||
For statements that should be executed once per server, use [`~Accelerator.is_local_main_process`]:
|
||||
|
||||
```python
|
||||
if accelerator.is_local_main_process:
|
||||
do_thing_once_per_server()
|
||||
```
|
||||
|
||||
A function can be wrapped using the [`~Accelerator.on_local_main_process`] function to achieve the same
|
||||
behavior on a function's execution:
|
||||
|
||||
```python
|
||||
@accelerator.on_local_main_process
|
||||
def do_my_thing():
|
||||
"Something done once per server"
|
||||
do_thing_once_per_server()
|
||||
```
|
||||
|
||||
#### Only ever once across all servers
|
||||
|
||||
For statements that should only ever be executed once, use [`~Accelerator.is_main_process`]:
|
||||
|
||||
```python
|
||||
if accelerator.is_main_process:
|
||||
do_thing_once()
|
||||
```
|
||||
|
||||
A function can be wrapped using the [`~Accelerator.on_main_process`] function to achieve the same
|
||||
behavior on a function's execution:
|
||||
|
||||
```python
|
||||
@accelerator.on_main_process
|
||||
def do_my_thing():
|
||||
"Something done once per server"
|
||||
do_thing_once()
|
||||
```
|
||||
|
||||
#### On specific processes
|
||||
|
||||
If a function should be ran on a specific overall or local process index, there are similar decorators
|
||||
to achieve this:
|
||||
|
||||
```python
|
||||
@accelerator.on_local_process(local_process_idx=0)
|
||||
def do_my_thing():
|
||||
"Something done on process index 0 on each server"
|
||||
do_thing_on_index_zero_on_each_server()
|
||||
```
|
||||
|
||||
```python
|
||||
@accelerator.on_process(process_index=0)
|
||||
def do_my_thing():
|
||||
"Something done on process index 0"
|
||||
do_thing_on_index_zero()
|
||||
```
|
||||
|
||||
### Synchronicity control
|
||||
|
||||
Use [`~Accelerator.wait_for_everyone`] to make sure all processes join that point before continuing. (Useful before a model save for instance).
|
||||
|
||||
### Saving and loading
|
||||
|
||||
```python
|
||||
model = MyModel()
|
||||
model = accelerator.prepare(model)
|
||||
```
|
||||
|
||||
Use [`~Accelerator.save_model`] instead of `torch.save` to save a model. It will remove all model wrappers added during the distributed process, get the state_dict of the model and save it. The state_dict will be in the same precision as the model being trained.
|
||||
|
||||
```diff
|
||||
- torch.save(state_dict, "my_state.pkl")
|
||||
+ accelerator.save_model(model, save_directory)
|
||||
```
|
||||
|
||||
[`~Accelerator.save_model`] can also save a model into sharded checkpoints or with safetensors format.
|
||||
Here is an example:
|
||||
|
||||
```python
|
||||
accelerator.save_model(model, save_directory, max_shard_size="1GB", safe_serialization=True)
|
||||
```
|
||||
|
||||
#### 🤗 Transformers models
|
||||
|
||||
If you are using models from the [🤗 Transformers](https://huggingface.co/docs/transformers/) library, you can use the `.save_pretrained()` method.
|
||||
|
||||
```python
|
||||
from transformers import AutoModel
|
||||
|
||||
model = AutoModel.from_pretrained("bert-base-cased")
|
||||
model = accelerator.prepare(model)
|
||||
|
||||
# ...fine-tune with PyTorch...
|
||||
|
||||
unwrapped_model = accelerator.unwrap_model(model)
|
||||
unwrapped_model.save_pretrained(
|
||||
"path/to/my_model_directory",
|
||||
is_main_process=accelerator.is_main_process,
|
||||
save_function=accelerator.save,
|
||||
)
|
||||
```
|
||||
|
||||
This will ensure your model stays compatible with other 🤗 Transformers functionality like the `.from_pretrained()` method.
|
||||
|
||||
```python
|
||||
from transformers import AutoModel
|
||||
|
||||
model = AutoModel.from_pretrained("path/to/my_model_directory")
|
||||
```
|
||||
|
||||
### Operations
|
||||
|
||||
Use [`~Accelerator.clip_grad_norm_`] instead of ``torch.nn.utils.clip_grad_norm_`` and [`~Accelerator.clip_grad_value_`] instead of ``torch.nn.utils.clip_grad_value``
|
||||
|
||||
### Gradient Accumulation
|
||||
|
||||
To perform gradient accumulation use [`~Accelerator.accumulate`] and specify a gradient_accumulation_steps.
|
||||
This will also automatically ensure the gradients are synced or unsynced when on
|
||||
multi-device training, check if the step should actually be performed, and auto-scale the loss:
|
||||
|
||||
```diff
|
||||
- accelerator = Accelerator()
|
||||
+ accelerator = Accelerator(gradient_accumulation_steps=2)
|
||||
|
||||
for (input, label) in training_dataloader:
|
||||
+ with accelerator.accumulate(model):
|
||||
predictions = model(input)
|
||||
loss = loss_function(predictions, labels)
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
scheduler.step()
|
||||
optimizer.zero_grad()
|
||||
```
|
||||
#### GradientAccumulationPlugin
|
||||
[[autodoc]] utils.GradientAccumulationPlugin
|
||||
|
||||
|
||||
Instead of passing `gradient_accumulation_steps` you can instantiate a GradientAccumulationPlugin and pass it to the [`Accelerator`]'s `__init__`
|
||||
as `gradient_accumulation_plugin`. You can only pass either one of `gradient_accumulation_plugin` or `gradient_accumulation_steps` passing both will raise an error.
|
||||
```diff
|
||||
from accelerate.utils import GradientAccumulationPlugin
|
||||
|
||||
gradient_accumulation_plugin = GradientAccumulationPlugin(num_steps=2)
|
||||
- accelerator = Accelerator()
|
||||
+ accelerator = Accelerator(gradient_accumulation_plugin=gradient_accumulation_plugin)
|
||||
```
|
||||
|
||||
In addition to the number of steps, this also lets you configure whether or not you adjust your learning rate scheduler to account for the change in steps due to accumulation.
|
||||
|
||||
## Overall API documentation:
|
||||
## Accelerator[[api]]
|
||||
|
||||
[[autodoc]] Accelerator
|
||||
|
||||
## Utilities
|
||||
|
||||
[[autodoc]] accelerate.utils.gather_object
|
||||
|
||||
@ -19,6 +19,7 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
[[autodoc]] big_modeling.init_empty_weights
|
||||
[[autodoc]] big_modeling.cpu_offload
|
||||
[[autodoc]] big_modeling.cpu_offload_with_hook
|
||||
[[autodoc]] big_modeling.disk_offload
|
||||
[[autodoc]] big_modeling.dispatch_model
|
||||
[[autodoc]] big_modeling.load_checkpoint_and_dispatch
|
||||
|
||||
@ -145,10 +145,11 @@ values. They can also be passed in manually.
|
||||
|
||||
The following arguments are useful for fine-tuning how available hardware should be used
|
||||
|
||||
* `--mixed_precision {no,fp16,bf16}` (`str`) -- Whether or not to use mixed precision training. Choose between FP16 and BF16 (bfloat16) training. BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.
|
||||
* `--mixed_precision {no,fp16,bf16,fp8}` (`str`) -- Whether or not to use mixed precision training. Choose between FP16 and BF16 (bfloat16) training. BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.
|
||||
* `--num_processes NUM_PROCESSES` (`int`) -- The total number of processes to be launched in parallel.
|
||||
* `--num_machines NUM_MACHINES` (`int`) -- The total number of machines used in this training.
|
||||
* `--num_cpu_threads_per_process NUM_CPU_THREADS_PER_PROCESS` (`int`) -- The number of CPU threads per process. Can be tuned for optimal performance.
|
||||
* `--enable_cpu_affinity` (`bool`) -- Whether or not CPU affinity and balancing should be enabled. Currently only supported on NVIDIA hardware.
|
||||
|
||||
**Training Paradigm Arguments**:
|
||||
|
||||
@ -165,19 +166,26 @@ The following arguments are only useful when `multi_gpu` is passed or multi-gpu
|
||||
|
||||
* `--gpu_ids` (`str`) -- What GPUs (by id) should be used for training on this machine as a comma-seperated list
|
||||
* `--same_network` (`bool`) -- Whether all machines used for multinode training exist on the same local network.
|
||||
* `--machine_rank MACHINE_RANK` (`int`) -- The rank of the machine on which this script is launched.
|
||||
* `--main_process_ip MAIN_PROCESS_IP` (`str`) -- The IP address of the machine of rank 0.
|
||||
* `--main_process_port MAIN_PROCESS_PORT` (`int`) -- The port to use to communicate with the machine of rank 0.
|
||||
* `--rdzv_backend` (`str`) -- The rendezvous method to use, such as "static" or "c10d"
|
||||
* `--machine_rank` (`int`) -- The rank of the machine on which this script is launched.
|
||||
* `--main_process_ip` (`str`) -- The IP address of the machine of rank 0.
|
||||
* `--main_process_port` (`int`) -- The port to use to communicate with the machine of rank 0.
|
||||
* `-t`, `--tee` (`str`) -- Tee std streams into a log file and also to console.
|
||||
* `--log_dir` (`str`) -- Base directory to use for log files when using torchrun/torch.distributed.run as launcher. Use with --tee to redirect std streams info log files.
|
||||
* `--role` (`str`) -- User-defined role for the workers.
|
||||
* `--rdzv_backend` (`str`) -- The rendezvous method to use, such as 'static' (the default) or 'c10d'
|
||||
* `--rdzv_conf` (`str`) -- Additional rendezvous configuration (<key1>=<value1>,<key2>=<value2>,...).
|
||||
* `--max_restarts` (`int`) -- Maximum number of worker group restarts before failing.
|
||||
* `--monitor_interval` (`float`) -- Interval, in seconds, to monitor the state of workers.
|
||||
* `--monitor_interval` (`int`) -- Interval, in seconds, to monitor the state of workers.
|
||||
|
||||
**TPU Arguments**:
|
||||
|
||||
The following arguments are only useful when `tpu` is passed or TPU training is configured through `accelerate config`:
|
||||
|
||||
* `--main_training_function MAIN_TRAINING_FUNCTION` (`str`) -- The name of the main function to be executed in your script.
|
||||
* `--tpu_cluster` (`bool`) -- Whether to use a GCP TPU pod for training.
|
||||
* `--tpu_use_sudo` (`bool`) -- Whether to use `sudo` when running the TPU training script in each pod.
|
||||
* `--vm` (`str`) -- List of single Compute VM instance names. If not provided we assume usage of instance groups. For TPU pods.
|
||||
* `--env` (`str`) -- List of environment variables to set on the Compute VM instances. For TPU pods.
|
||||
* `--main_training_function` (`str`) -- The name of the main function to be executed in your script (only for TPU training).
|
||||
* `--downcast_bf16` (`bool`) -- Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32.
|
||||
|
||||
**DeepSpeed Arguments**:
|
||||
@ -188,6 +196,7 @@ The following arguments are only useful when `use_deepspeed` is passed or `deeps
|
||||
* `--zero_stage` (`int`) -- DeepSpeed's ZeRO optimization stage.
|
||||
* `--offload_optimizer_device` (`str`) -- Decides where (none|cpu|nvme) to offload optimizer states.
|
||||
* `--offload_param_device` (`str`) -- Decides where (none|cpu|nvme) to offload parameters.
|
||||
* `--offload_optimizer_nvme_path` (`str`) -- Decides Nvme Path to offload optimizer states.
|
||||
* `--gradient_accumulation_steps` (`int`) -- No of gradient_accumulation_steps used in your training script.
|
||||
* `--gradient_clipping` (`float`) -- Gradient clipping value used in your training script.
|
||||
* `--zero3_init_flag` (`str`) -- Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. Only applicable with DeepSpeed ZeRO Stage-3.
|
||||
@ -196,10 +205,11 @@ The following arguments are only useful when `use_deepspeed` is passed or `deeps
|
||||
* `--deepspeed_exclusion_filter` (`str`) -- DeepSpeed exclusion filter string when using mutli-node setup.
|
||||
* `--deepspeed_inclusion_filter` (`str`) -- DeepSpeed inclusion filter string when using mutli-node setup.
|
||||
* `--deepspeed_multinode_launcher` (`str`) -- DeepSpeed multi-node launcher to use.
|
||||
* `--deepspeed_moe_layer_cls_names` (`str`) -- comma-separated list of transformer MoE layer class names (case-sensitive) to wrap, e.g, `MixtralSparseMoeBlock` `Qwen2MoeSparseMoeBlock`, `JetMoEAttention,JetMoEBlock`
|
||||
|
||||
**Fully Sharded Data Parallelism Arguments**:
|
||||
|
||||
The following arguments are only useful when `use_fdsp` is passed or Fully Sharded Data Parallelism is configured through `accelerate config`:
|
||||
The following arguments are only useful when `use_fsdp` is passed or Fully Sharded Data Parallelism is configured through `accelerate config`:
|
||||
|
||||
* `--fsdp_offload_params` (`str`) -- Decides Whether (true|false) to offload parameters and gradients to CPU.
|
||||
* `--fsdp_min_num_params` (`int`) -- FSDP's minimum number of parameters for Default Auto Wrapping.
|
||||
@ -208,6 +218,11 @@ The following arguments are only useful when `use_fdsp` is passed or Fully Shard
|
||||
* `--fsdp_transformer_layer_cls_to_wrap` (`str`) -- Transformer layer class name (case-sensitive) to wrap, e.g, `BertLayer`, `GPTJBlock`, `T5Block` ...
|
||||
* `--fsdp_backward_prefetch_policy` (`str`) -- FSDP's backward prefetch policy.
|
||||
* `--fsdp_state_dict_type` (`str`) -- FSDP's state dict type.
|
||||
* `--fsdp_forward_prefetch` (`str`) -- FSDP forward prefetch.
|
||||
* `--fsdp_use_orig_params` (`str`) -- If True, allows non-uniform `requires_grad` mixed in a FSDP unit.
|
||||
* `--fsdp_cpu_ram_efficient_loading` (`str`) -- If true, only the first process loads the pretrained model checkoint while all other processes have empty weights. When using this, `--fsdp_sync_module_states` needs to True.
|
||||
* `--fsdp_sync_module_states` (`str`) -- If true, each individually wrapped FSDP unit will broadcast module parameters from rank 0.
|
||||
* `--fsdp_activation_checkpointing` (`bool`) -- Decides Whether intermediate activations are freed during the forward pass, and a checkpoint is left as a placeholder
|
||||
|
||||
**Megatron-LM Arguments**:
|
||||
|
||||
@ -218,9 +233,21 @@ The following arguments are only useful when `use_megatron_lm` is passed or Mega
|
||||
* `--megatron_lm_num_micro_batches` (``) -- Megatron-LM's number of micro batches when PP degree > 1.
|
||||
* `--megatron_lm_sequence_parallelism` (``) -- Decides Whether (true|false) to enable Sequence Parallelism when TP degree > 1.
|
||||
* `--megatron_lm_recompute_activations` (``) -- Decides Whether (true|false) to enable Selective Activation Recomputation.
|
||||
* `--megatron_lm_use_distributed_optimizer` (``) -- Decides Whether (true|false) to use distributed optimizer which shards optimizer state and gradients across Data Pralellel (DP) ranks.
|
||||
* `--megatron_lm_use_distributed_optimizer` (``) -- Decides Whether (true|false) to use distributed optimizer which shards optimizer state and gradients across Data Parallel (DP) ranks.
|
||||
* `--megatron_lm_gradient_clipping` (``) -- Megatron-LM's gradient clipping value based on global L2 Norm (0 to disable).
|
||||
|
||||
**FP8 Arguments**:
|
||||
|
||||
* `--fp8_backend` (`str`) -- Choose a backend to train with FP8 (`te` or `msamp`)
|
||||
* `--fp8_use_autocast_during_eval` (`bool`) -- Whether to use FP8 autocast during eval mode (useful only when `--fp8_backend=te` is passed). Generally better metrics are found when this is not passed.
|
||||
* `--fp8_margin` (`int`) -- The margin to use for the gradient scaling (useful only when `--fp8_backend=te` is passed).
|
||||
* `--fp8_interval` (`int`) -- The interval to use for how often the scaling factor is recomputed (useful only when `--fp8_backend=te` is passed).
|
||||
* `--fp8_format` (`str`) -- The format to use for the FP8 recipe (useful only when `--fp8_backend=te` is passed).
|
||||
* `--fp8_amax_history_len` (`int`) -- The length of the history to use for the scaling factor computation (useful only when `--fp8_backend=te` is passed).
|
||||
* `--fp8_amax_compute_algo` (`str`) -- The algorithm to use for the scaling factor computation. (useful only when `--fp8_backend=te` is passed).
|
||||
* `--fp8_override_linear_precision` (`Tuple[bool, bool, bool]`) -- Whether or not to execute `fprop`, `dgrad`, and `wgrad` GEMMS in higher precision.
|
||||
* `--fp8_opt_level` (`str`) -- What level of 8-bit collective communication should be used with MS-AMP (useful only when `--fp8_backend=msamp` is passed)
|
||||
|
||||
**AWS SageMaker Arguments**:
|
||||
|
||||
The following arguments are only useful when training in SageMaker
|
||||
|
||||
@ -17,12 +17,12 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
[[autodoc]] utils.DeepSpeedPlugin
|
||||
|
||||
[[autodoc]] utils.DummyOptim
|
||||
[[autodoc]] utils.deepspeed.DummyOptim
|
||||
|
||||
[[autodoc]] utils.DummyScheduler
|
||||
[[autodoc]] utils.deepspeed.DummyScheduler
|
||||
|
||||
[[autodoc]] utils.DeepSpeedEngineWrapper
|
||||
[[autodoc]] utils.deepspeed.DeepSpeedEngineWrapper
|
||||
|
||||
[[autodoc]] utils.DeepSpeedOptimizerWrapper
|
||||
[[autodoc]] utils.deepspeed.DeepSpeedOptimizerWrapper
|
||||
|
||||
[[autodoc]] utils.DeepSpeedSchedulerWrapper
|
||||
[[autodoc]] utils.deepspeed.DeepSpeedSchedulerWrapper
|
||||
|
||||
28
docs/source/package_reference/fp8.md
Normal file
28
docs/source/package_reference/fp8.md
Normal file
@ -0,0 +1,28 @@
|
||||
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# FP8 Functionality
|
||||
|
||||
Below are functions and classes relative to the underlying FP8 implementation
|
||||
|
||||
[[autodoc]] utils.FP8RecipeKwargs
|
||||
|
||||
[[autodoc]] utils.convert_model
|
||||
|
||||
[[autodoc]] utils.has_transformer_engine_layers
|
||||
|
||||
[[autodoc]] utils.contextual_fp8_autocast
|
||||
|
||||
[[autodoc]] utils.apply_fp8_autowrap
|
||||
@ -15,4 +15,10 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
# Utilities for Fully Sharded Data Parallelism
|
||||
|
||||
[[autodoc]] utils.FullyShardedDataParallelPlugin
|
||||
[[autodoc]] utils.enable_fsdp_ram_efficient_loading
|
||||
|
||||
[[autodoc]] utils.disable_fsdp_ram_efficient_loading
|
||||
|
||||
[[autodoc]] utils.merge_fsdp_weights
|
||||
|
||||
[[autodoc]] utils.FullyShardedDataParallelPlugin
|
||||
|
||||
20
docs/source/package_reference/inference.md
Normal file
20
docs/source/package_reference/inference.md
Normal file
@ -0,0 +1,20 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# The inference API
|
||||
|
||||
These docs refer to the [PiPPy](https://github.com/PyTorch/PiPPy) integration.
|
||||
|
||||
[[autodoc]] inference.prepare_pippy
|
||||
@ -30,6 +30,10 @@ related to distributed training or mixed precision are created.
|
||||
|
||||
[[autodoc]] utils.FP8RecipeKwargs
|
||||
|
||||
## ProfileKwargs
|
||||
|
||||
[[autodoc]] utils.ProfileKwargs
|
||||
|
||||
## GradScalerKwargs
|
||||
|
||||
[[autodoc]] GradScalerKwargs
|
||||
@ -37,3 +41,7 @@ related to distributed training or mixed precision are created.
|
||||
## InitProcessGroupKwargs
|
||||
|
||||
[[autodoc]] InitProcessGroupKwargs
|
||||
|
||||
## KwargsHandler
|
||||
|
||||
[[autodoc]] utils.KwargsHandler
|
||||
|
||||
@ -15,23 +15,7 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
# Logging with Accelerate
|
||||
|
||||
Accelerate has its own logging utility to handle logging while in a distributed system.
|
||||
To utilize this replace cases of `logging` with `accelerate.logging`:
|
||||
```diff
|
||||
- import logging
|
||||
+ from accelerate.logging import get_logger
|
||||
- logger = logging.getLogger(__name__)
|
||||
+ logger = get_logger(__name__)
|
||||
```
|
||||
|
||||
## Setting the log level
|
||||
|
||||
The log level can be set with the `ACCELERATE_LOG_LEVEL` environment variable or by passing
|
||||
`log_level` to `get_logger`:
|
||||
```python
|
||||
from accelerate.logging import get_logger
|
||||
|
||||
logger = get_logger(__name__, log_level="INFO")
|
||||
```
|
||||
Refer to the [Troubleshooting guide](../usage_guides/troubleshooting#logging) or to the example below to learn
|
||||
how to use 🤗 Accelerate's logger.
|
||||
|
||||
[[autodoc]] logging.get_logger
|
||||
@ -31,3 +31,5 @@ rendered properly in your Markdown viewer.
|
||||
- __init__
|
||||
[[autodoc]] tracking.MLflowTracker
|
||||
- __init__
|
||||
[[autodoc]] tracking.ClearMLTracker
|
||||
- __init__
|
||||
|
||||
@ -40,6 +40,12 @@ The following are constants used when utilizing [`Accelerator.save_model`]
|
||||
|
||||
These are basic dataclasses used throughout 🤗 Accelerate and they can be passed in as parameters.
|
||||
|
||||
### Standalone
|
||||
|
||||
These are standalone dataclasses used for checks, such as the type of distributed system being used
|
||||
|
||||
[[autodoc]] utils.ComputeEnvironment
|
||||
|
||||
[[autodoc]] utils.DistributedType
|
||||
|
||||
[[autodoc]] utils.DynamoBackend
|
||||
@ -48,12 +54,30 @@ These are basic dataclasses used throughout 🤗 Accelerate and they can be pass
|
||||
|
||||
[[autodoc]] utils.PrecisionType
|
||||
|
||||
[[autodoc]] utils.ProjectConfiguration
|
||||
[[autodoc]] utils.RNGType
|
||||
|
||||
[[autodoc]] utils.SageMakerDistributedType
|
||||
|
||||
### Kwargs
|
||||
|
||||
These are configurable arguments for specific interactions throughout the PyTorch ecosystem that Accelerate handles under the hood.
|
||||
|
||||
[[autodoc]] utils.AutocastKwargs
|
||||
|
||||
[[autodoc]] utils.DistributedDataParallelKwargs
|
||||
|
||||
[[autodoc]] utils.FP8RecipeKwargs
|
||||
|
||||
[[autodoc]] utils.GradScalerKwargs
|
||||
|
||||
[[autodoc]] utils.InitProcessGroupKwargs
|
||||
|
||||
[[autodoc]] utils.KwargsHandler
|
||||
|
||||
## Plugins
|
||||
|
||||
These are plugins that can be passed to the [`Accelerator`] object. While they are defined elsewhere in the documentation,
|
||||
for convience all of them are available to see here:
|
||||
for convenience all of them are available to see here:
|
||||
|
||||
[[autodoc]] utils.DeepSpeedPlugin
|
||||
|
||||
@ -65,6 +89,24 @@ for convience all of them are available to see here:
|
||||
|
||||
[[autodoc]] utils.TorchDynamoPlugin
|
||||
|
||||
## Configurations
|
||||
|
||||
These are classes which can be configured and passed through to the appropriate integration
|
||||
|
||||
[[autodoc]] utils.BnbQuantizationConfig
|
||||
|
||||
[[autodoc]] utils.DataLoaderConfiguration
|
||||
|
||||
[[autodoc]] utils.ProjectConfiguration
|
||||
|
||||
## Environmental Variables
|
||||
|
||||
These are environmental variables that can be enabled for different use cases
|
||||
|
||||
* `ACCELERATE_DEBUG_MODE` (`str`): Whether to run accelerate in debug mode. More info available [here](../usage_guides/debug.md).
|
||||
|
||||
|
||||
|
||||
|
||||
## Data Manipulation and Operations
|
||||
|
||||
@ -72,16 +114,30 @@ These include data operations that mimic the same `torch` ops but can be used on
|
||||
|
||||
[[autodoc]] utils.broadcast
|
||||
|
||||
[[autodoc]] utils.broadcast_object_list
|
||||
|
||||
[[autodoc]] utils.concatenate
|
||||
|
||||
[[autodoc]] utils.convert_outputs_to_fp32
|
||||
|
||||
[[autodoc]] utils.convert_to_fp32
|
||||
|
||||
[[autodoc]] utils.gather
|
||||
|
||||
[[autodoc]] utils.gather_object
|
||||
|
||||
[[autodoc]] utils.listify
|
||||
|
||||
[[autodoc]] utils.pad_across_processes
|
||||
|
||||
[[autodoc]] utils.recursively_apply
|
||||
|
||||
[[autodoc]] utils.reduce
|
||||
|
||||
[[autodoc]] utils.send_to_device
|
||||
|
||||
[[autodoc]] utils.slice_tensors
|
||||
|
||||
## Environment Checks
|
||||
|
||||
These functionalities check the state of the current working environment including information about the operating system itself, what it can support, and if particular dependencies are installed.
|
||||
@ -96,7 +152,7 @@ These functionalities check the state of the current working environment includi
|
||||
|
||||
[[autodoc]] utils.is_torch_version
|
||||
|
||||
[[autodoc]] utils.is_tpu_available
|
||||
[[autodoc]] utils.is_torch_xla_available
|
||||
|
||||
[[autodoc]] utils.is_xpu_available
|
||||
|
||||
@ -110,9 +166,11 @@ These functionalities check the state of the current working environment includi
|
||||
|
||||
When setting up 🤗 Accelerate for the first time, rather than running `accelerate config` [~utils.write_basic_config] can be used as an alternative for quick configuration.
|
||||
|
||||
## Memory
|
||||
[[autodoc]] utils.set_numa_affinity
|
||||
|
||||
[[autodoc]] utils.get_max_memory
|
||||
[[autodoc]] utils.environment.override_numa_affinity
|
||||
|
||||
## Memory
|
||||
|
||||
[[autodoc]] utils.find_executable_batch_size
|
||||
|
||||
@ -120,12 +178,32 @@ When setting up 🤗 Accelerate for the first time, rather than running `acceler
|
||||
|
||||
These utilities relate to interacting with PyTorch models
|
||||
|
||||
[[autodoc]] utils.calculate_maximum_sizes
|
||||
|
||||
[[autodoc]] utils.compute_module_sizes
|
||||
|
||||
[[autodoc]] utils.extract_model_from_parallel
|
||||
|
||||
[[autodoc]] utils.get_balanced_memory
|
||||
|
||||
[[autodoc]] utils.get_max_layer_size
|
||||
|
||||
[[autodoc]] utils.infer_auto_device_map
|
||||
|
||||
[[autodoc]] utils.load_checkpoint_in_model
|
||||
|
||||
[[autodoc]] utils.load_offloaded_weights
|
||||
|
||||
[[autodoc]] utils.load_state_dict
|
||||
|
||||
[[autodoc]] utils.offload_state_dict
|
||||
|
||||
[[autodoc]] utils.retie_parameters
|
||||
|
||||
[[autodoc]] utils.set_module_tensor_to_device
|
||||
|
||||
[[autodoc]] utils.shard_checkpoint
|
||||
|
||||
|
||||
## Parallel
|
||||
|
||||
@ -166,5 +244,3 @@ These include utilities that are useful to load checkpoints.
|
||||
These include utilities that are useful to quantize model.
|
||||
|
||||
[[autodoc]] utils.load_and_quantize_model
|
||||
|
||||
[[autodoc]] utils.BnbQuantizationConfig
|
||||
@ -9,19 +9,80 @@ Unless required by applicable law or agreed to in writing, software distributed
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
⚠️ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Quick tour
|
||||
# Quicktour
|
||||
|
||||
Let's have a look at the 🤗 Accelerate main features and traps to avoid.
|
||||
There are many ways to launch and run your code depending on your training environment ([torchrun](https://pytorch.org/docs/stable/elastic/run.html), [DeepSpeed](https://www.deepspeed.ai/), etc.) and available hardware. Accelerate offers a unified interface for launching and training on different distributed setups, allowing you to focus on your PyTorch training code instead of the intricacies of adapting your code to these different setups. This allows you to easily scale your PyTorch code for training and inference on distributed setups with hardware like GPUs and TPUs. Accelerate also provides Big Model Inference to make loading and running inference with really large models that usually don't fit in memory more accessible.
|
||||
|
||||
## Main use
|
||||
This quicktour introduces the three main features of Accelerate:
|
||||
|
||||
To use 🤗 Accelerate in your own script, you have to change four things:
|
||||
* a unified command line launching interface for distributed training scripts
|
||||
* a training library for adapting PyTorch training code to run on different distributed setups
|
||||
* Big Model Inference
|
||||
|
||||
1. Import the [`Accelerator`] main class and instantiate one in an `accelerator` object:
|
||||
## Unified launch interface
|
||||
|
||||
Accelerate automatically selects the appropriate configuration values for any given distributed training framework (DeepSpeed, FSDP, etc.) through a unified configuration file generated from the [`accelerate config`](package_reference/cli#accelerate-config) command. You could also pass the configuration values explicitly to the command line which is helpful in certain situations like if you're using SLURM.
|
||||
|
||||
|
||||
But in most cases, you should always run [`accelerate config`](package_reference/cli#accelerate-config) first to help Accelerate learn about your training setup.
|
||||
|
||||
```bash
|
||||
accelerate config
|
||||
```
|
||||
|
||||
The [`accelerate config`](package_reference/cli#accelerate-config) command creates and saves a default_config.yaml file in Accelerates cache folder. This file stores the configuration for your training environment, which helps Accelerate correctly launch your training script based on your machine.
|
||||
|
||||
After you've configured your environment, you can test your setup with [`accelerate test`](package_reference/cli#accelerate-test), which launches a short script to test the distributed environment.
|
||||
|
||||
```bash
|
||||
accelerate test
|
||||
```
|
||||
|
||||
> [!TIP]
|
||||
> Add `--config_file` to the `accelerate test` or `accelerate launch` command to specify the location of the configuration file if it is saved in a non-default location like the cache.
|
||||
|
||||
Once your environment is setup, launch your training script with [`accelerate launch`](package_reference/cli#accelerate-launch)!
|
||||
|
||||
```bash
|
||||
accelerate launch path_to_script.py --args_for_the_script
|
||||
```
|
||||
|
||||
To learn more, check out the [Launch distributed code](basic_tutorials/launch) tutorial for more information about launching your scripts.
|
||||
|
||||
We also have a [configuration zoo](https://github.com/huggingface/accelerate/blob/main/examples/config_yaml_templates) which showcases a number of premade **minimal** example configurations for a variety of setups you can run.
|
||||
|
||||
## Adapt training code
|
||||
|
||||
The next main feature of Accelerate is the [`Accelerator`] class which adapts your PyTorch code to run on different distributed setups.
|
||||
|
||||
You only need to add a few lines of code to your training script to enable it to run on multiple GPUs or TPUs.
|
||||
|
||||
```diff
|
||||
+ from accelerate import Accelerator
|
||||
+ accelerator = Accelerator()
|
||||
|
||||
+ device = accelerator.device
|
||||
+ model, optimizer, training_dataloader, scheduler = accelerator.prepare(
|
||||
+ model, optimizer, training_dataloader, scheduler
|
||||
+ )
|
||||
|
||||
for batch in training_dataloader:
|
||||
optimizer.zero_grad()
|
||||
inputs, targets = batch
|
||||
- inputs = inputs.to(device)
|
||||
- targets = targets.to(device)
|
||||
outputs = model(inputs)
|
||||
loss = loss_function(outputs, targets)
|
||||
+ accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
scheduler.step()
|
||||
```
|
||||
|
||||
1. Import and instantiate the [`Accelerator`] class at the beginning of your training script. The [`Accelerator`] class initializes everything necessary for distributed training, and it automatically detects your training environment (a single machine with a GPU, a machine with several GPUs, several machines with multiple GPUs or a TPU, etc.) based on how the code was launched.
|
||||
|
||||
```python
|
||||
from accelerate import Accelerator
|
||||
@ -29,27 +90,19 @@ from accelerate import Accelerator
|
||||
accelerator = Accelerator()
|
||||
```
|
||||
|
||||
This should happen as early as possible in your training script as it will initialize everything necessary for
|
||||
distributed training. You don't need to indicate the kind of environment you are in (just one machine with a GPU, one
|
||||
machines with several GPUs, several machines with multiple GPUs or a TPU), the library will detect this automatically.
|
||||
2. Remove calls like `.cuda()` on your model and input data. The [`Accelerator`] class automatically places these objects on the appropriate device for you.
|
||||
|
||||
2. Remove the call `.to(device)` or `.cuda()` for your model and input data. The `accelerator` object
|
||||
will handle this for you and place all those objects on the right device for you. If you know what you're doing, you
|
||||
can leave those `.to(device)` calls but you should use the device provided by the `accelerator` object:
|
||||
`accelerator.device`.
|
||||
> [!WARNING]
|
||||
> This step is *optional* but it is considered best practice to allow Accelerate to handle device placement. You could also deactivate automatic device placement by passing `device_placement=False` when initializing the [`Accelerator`]. If you want to explicitly place objects on a device with `.to(device)`, make sure you use `accelerator.device` instead. For example, if you create an optimizer before placing a model on `accelerator.device`, training fails on a TPU.
|
||||
|
||||
To fully deactivate the automatic device placement, pass along `device_placement=False` when initializing your
|
||||
[`Accelerator`].
|
||||
> [!WARNING]
|
||||
> Accelerate does not use non-blocking transfers by default for its automatic device placement, which can result in potentially unwanted CUDA synchronizations. You can enable non-blocking transfers by passing a [`~utils.dataclasses.DataLoaderConfiguration`] with `non_blocking=True` set as the `dataloader_config` when initializing the [`Accelerator`]. As usual, non-blocking transfers will only work if the dataloader also has `pin_memory=True` set. Be wary that using non-blocking transfers from GPU to CPU may cause incorrect results if it results in CPU operations being performed on non-ready tensors.
|
||||
|
||||
<Tip warning={true}>
|
||||
```py
|
||||
device = accelerator.device
|
||||
```
|
||||
|
||||
If you place your objects manually on the proper device, be careful to create your optimizer after putting your
|
||||
model on `accelerator.device` or your training will fail on TPU.
|
||||
|
||||
</Tip>
|
||||
|
||||
3. Pass all objects relevant to training (optimizer, model, training dataloader, learning rate scheduler) to the
|
||||
[`~Accelerator.prepare`] method. This will make sure everything is ready for training.
|
||||
3. Pass all relevant PyTorch objects for training (optimizer, model, dataloader(s), learning rate scheduler) to the [`~Accelerator.prepare`] method as soon as they're created. This method wraps the model in a container optimized for your distributed setup, uses Accelerates version of the optimizer and scheduler, and creates a sharded version of your dataloader for distribution across GPUs or TPUs.
|
||||
|
||||
```python
|
||||
model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
|
||||
@ -57,73 +110,23 @@ model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
|
||||
)
|
||||
```
|
||||
|
||||
In particular, your training dataloader will be sharded across all GPUs/TPU cores available so that each one sees a
|
||||
different portion of the training dataset. Also, the random states of all processes will be synchronized at the
|
||||
beginning of each iteration through your dataloader, to make sure the data is shuffled the same way (if you decided to
|
||||
use `shuffle=True` or any kind of random sampler).
|
||||
4. Replace `loss.backward()` with [`~Accelerator.backward`] to use the correct `backward()` method for your training setup.
|
||||
|
||||
<Tip>
|
||||
```py
|
||||
accelerator.backward(loss)
|
||||
```
|
||||
|
||||
The actual batch size for your training will be the number of devices used multiplied by the batch size you set in
|
||||
your script: for instance training on 4 GPUs with a batch size of 16 set when creating the training dataloader will
|
||||
train at an actual batch size of 64.
|
||||
Read [Accelerate’s internal mechanisms](concept_guides/internal_mechanism) guide to learn more details about how Accelerate adapts your code.
|
||||
|
||||
</Tip>
|
||||
### Distributed evaluation
|
||||
|
||||
Alternatively, you can use the option `split_batches=True` when creating and initializing your
|
||||
[`Accelerator`], in which case the batch size will always stay the same, whether you run your
|
||||
script on 1, 2, 4, or 64 GPUs.
|
||||
|
||||
You should execute this instruction as soon as all objects for training are created, before starting your actual
|
||||
training loop.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
You should only pass the learning rate scheduler to [`~Accelerator.prepare`] when the scheduler needs to be stepped
|
||||
at each optimizer step.
|
||||
|
||||
</Tip>
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
Your training dataloader may change length when going through this method: if you run on X GPUs, it will have its
|
||||
length divided by X (since your actual batch size will be multiplied by X), unless you set
|
||||
`split_batches=True`.
|
||||
|
||||
</Tip>
|
||||
|
||||
Any instruction using your training dataloader length (for instance if you want to log the number of total training
|
||||
steps) should go after the call to [`~Accelerator.prepare`].
|
||||
|
||||
You can perfectly send your dataloader to [`~Accelerator.prepare`] on its own, but it's best to send the
|
||||
model and optimizer to [`~Accelerator.prepare`] together.
|
||||
|
||||
You may or may not want to send your validation dataloader to [`~Accelerator.prepare`], depending on
|
||||
whether you want to run distributed evaluation or not (see below).
|
||||
|
||||
4. Replace the line `loss.backward()` by `accelerator.backward(loss)`.
|
||||
|
||||
And you're all set! With all these changes, your script will run on your local machine as well as on multiple GPUs or a
|
||||
TPU! You can either use your favorite tool to launch the distributed training, or you can use the 🤗 Accelerate
|
||||
launcher.
|
||||
|
||||
|
||||
## Distributed evaluation
|
||||
|
||||
You can perform regular evaluation in your training script, if you leave your validation dataloader out of the
|
||||
[`~Accelerator.prepare`] method. In this case, you will need to put the input data on the
|
||||
`accelerator.device` manually.
|
||||
|
||||
To perform distributed evaluation, send along your validation dataloader to the [`~Accelerator.prepare`]
|
||||
method:
|
||||
To perform distributed evaluation, pass your validation dataloader to the [`~Accelerator.prepare`] method:
|
||||
|
||||
```python
|
||||
validation_dataloader = accelerator.prepare(validation_dataloader)
|
||||
```
|
||||
|
||||
As for your training dataloader, it will mean that (should you run your script on multiple devices) each device will
|
||||
only see part of the evaluation data. This means you will need to group your predictions together. This is very easy to
|
||||
do with the [`~Accelerator.gather_for_metrics`] method.
|
||||
Each device in your distributed setup only receives a part of the evaluation data, which means you should group your predictions together with the [`~Accelerator.gather_for_metrics`] method. This method requires all tensors to be the same size on each process, so if your tensors have different sizes on each process (for instance when dynamically padding to the maximum length in a batch), you should use the [`~Accelerator.pad_across_processes`] method to pad you tensor to the largest size across processes. Note that the tensors needs to be 1D and that we concatenate the tensors along the first dimension.
|
||||
|
||||
```python
|
||||
for inputs, targets in validation_dataloader:
|
||||
@ -134,387 +137,52 @@ for inputs, targets in validation_dataloader:
|
||||
metric.add_batch(all_predictions, all_targets)
|
||||
```
|
||||
|
||||
<Tip warning={true}>
|
||||
For more complex cases (e.g. 2D tensors, don't want to concatenate tensors, dict of 3D tensors), you can pass `use_gather_object=True` in `gather_for_metrics`. This will return the list of objects after gathering. Note that using it with GPU tensors is not well supported and inefficient.
|
||||
|
||||
Similar to the training dataloader, passing your validation dataloader through
|
||||
[`~Accelerator.prepare`] may change it: if you run on X GPUs, it will have its length divided by X
|
||||
(since your actual batch size will be multiplied by X), unless you set `split_batches=True`.
|
||||
> [!TIP]
|
||||
> Data at the end of a dataset may be duplicated so the batch can be equally divided among all workers. The [`~Accelerator.gather_for_metrics`] method automatically removes the duplicated data to calculate a more accurate metric.
|
||||
|
||||
</Tip>
|
||||
## Big Model Inference
|
||||
|
||||
Any instruction using your training dataloader length (for instance if you need the number of total training steps
|
||||
to create a learning rate scheduler) should go after the call to [`~Accelerator.prepare`].
|
||||
Accelerate's Big Model Inference has two main features, [`~accelerate.init_empty_weights`] and [`~accelerate.load_checkpoint_and_dispatch`], to load large models for inference that typically don't fit into memory.
|
||||
|
||||
Some data at the end of the dataset may be duplicated so the batch can be divided equally among all workers. As a result, metrics
|
||||
should be calculated through the [`~Accelerator.gather_for_metrics`] method to automatically remove the duplicated data while gathering.
|
||||
> [!TIP]
|
||||
> Take a look at the [Handling big models for inference](concept_guides/big_model_inference) guide for a better understanding of how Big Model Inference works under the hood.
|
||||
|
||||
<Tip>
|
||||
### Empty weights initialization
|
||||
|
||||
If for some reason you don't wish to have this automatically done, [`~Accelerator.gather`] can be used instead to gather
|
||||
the data across all processes and this can manually be done instead.
|
||||
The [`~accelerate.init_empty_weights`] context manager initializes models of any size by creating a *model skeleton* and moving and placing parameters each time they're created to PyTorch's [**meta**](https://pytorch.org/docs/main/meta.html) device. This way, not all weights are immediately loaded and only a small part of the model is loaded into memory at a time.
|
||||
|
||||
</Tip>
|
||||
For example, loading an empty [Mixtral-8x7B](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1) model takes significantly less memory than fully loading the models and weights on the CPU.
|
||||
|
||||
```py
|
||||
from accelerate import init_empty_weights
|
||||
from transformers import AutoConfig, AutoModelForCausalLM
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
The [`~Accelerator.gather`] and [`~Accelerator.gather_for_metrics`] methods require the tensors to be all the same size on each process. If
|
||||
you have tensors of different sizes on each process (for instance when dynamically padding to the maximum length in
|
||||
a batch), you should use the [`~Accelerator.pad_across_processes`] method to pad you tensor to the
|
||||
biggest size across processes.
|
||||
|
||||
</Tip>
|
||||
|
||||
## Launching your distributed script
|
||||
|
||||
You can use the regular commands to launch your distributed training (like `torch.distributed.run` for
|
||||
PyTorch), they are fully compatible with 🤗 Accelerate.
|
||||
|
||||
🤗 Accelerate also provides a CLI tool that unifies all launchers, so you only have to remember one command. To use it,
|
||||
just run:
|
||||
|
||||
```bash
|
||||
accelerate config
|
||||
config = AutoConfig.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
||||
with init_empty_weights():
|
||||
model = AutoModelForCausalLM.from_config(config)
|
||||
```
|
||||
|
||||
on your machine and reply to the questions asked. This will save a *default_config.yaml* file in your cache folder for
|
||||
🤗 Accelerate. That cache folder is (with decreasing order of priority):
|
||||
### Load and dispatch weights
|
||||
|
||||
- The content of your environment variable `HF_HOME` suffixed with *accelerate*.
|
||||
- If it does not exist, the content of your environment variable `XDG_CACHE_HOME` suffixed with
|
||||
*huggingface/accelerate*.
|
||||
- If this does not exist either, the folder *~/.cache/huggingface/accelerate*
|
||||
The [`~accelerate.load_checkpoint_and_dispatch`] function loads full or sharded checkpoints into the empty model, and automatically distribute weights across all available devices.
|
||||
|
||||
You can also specify with the flag `--config_file` the location of the file you want to save.
|
||||
The `device_map` parameter determines where to place each model layer, and specifiying `"auto"` places them on the GPU first, then the CPU, and finally the hard drive as memory-mapped tensors if there's still not enough memory. Use the `no_split_module_classes` parameter to indicate which modules shouldn't be split across devices (typically those with a residual connection).
|
||||
|
||||
Once this is done, you can test everything is going well on your setup by running:
|
||||
```py
|
||||
from accelerate import load_checkpoint_and_dispatch
|
||||
|
||||
```bash
|
||||
accelerate test
|
||||
model = load_checkpoint_and_dispatch(
|
||||
model, checkpoint="mistralai/Mixtral-8x7B-Instruct-v0.1", device_map="auto", no_split_module_classes=['Block']
|
||||
)
|
||||
```
|
||||
|
||||
This will launch a short script that will test the distributed environment. If it runs fine, you are ready for the next
|
||||
step!
|
||||
## Next steps
|
||||
|
||||
Note that if you specified a location for the config file in the previous step, you need to pass it here as well:
|
||||
Now that you've been introduced to the main Accelerate features, your next steps could include:
|
||||
|
||||
```bash
|
||||
accelerate test --config_file path_to_config.yaml
|
||||
```
|
||||
|
||||
Now that this is done, you can run your script with the following command:
|
||||
|
||||
```bash
|
||||
accelerate launch path_to_script.py --args_for_the_script
|
||||
```
|
||||
|
||||
If you stored the config file in a non-default location, you can indicate it to the launcher like this:
|
||||
|
||||
```bash
|
||||
accelerate launch --config_file path_to_config.yaml path_to_script.py --args_for_the_script
|
||||
```
|
||||
|
||||
You can also override any of the arguments determined by your config file.
|
||||
To see the complete list of parameters that you can pass in, run `accelerate launch -h`.
|
||||
|
||||
Check out the [Launch tutorial](basic_tutorials/launch) for more information about launching your scripts.
|
||||
|
||||
|
||||
## Launching training from a notebook
|
||||
|
||||
In Accelerate 0.3.0, a new [`notebook_launcher`] has been introduced to help you launch your training
|
||||
function from a notebook. This launcher supports launching a training with TPUs on Colab or Kaggle, as well as training
|
||||
on several GPUs (if the machine on which you are running your notebook has them).
|
||||
|
||||
Just define a function responsible for your whole training and/or evaluation in a cell of the notebook, then execute a
|
||||
cell with the following code:
|
||||
|
||||
```python
|
||||
from accelerate import notebook_launcher
|
||||
|
||||
notebook_launcher(training_function)
|
||||
```
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
Your [`Accelerator`] object should only be defined inside the training function. This is because the
|
||||
initialization should be done inside the launcher only.
|
||||
|
||||
</Tip>
|
||||
|
||||
Check out the [Notebook Launcher tutorial](basic_tutorials/notebook) for more information about training on TPUs.
|
||||
|
||||
|
||||
## Training on TPU
|
||||
|
||||
If you want to launch your script on TPUs, there are a few caveats you should be aware of. Behind the scenes, the TPUs
|
||||
will create a graph of all the operations happening in your training step (forward pass, backward pass and optimizer
|
||||
step). This is why your first step of training will always be very long as building and compiling this graph for
|
||||
optimizations takes some time.
|
||||
|
||||
The good news is that this compilation will be cached so the second step and all the following will be much faster. The
|
||||
bad news is that it only applies if all of your steps do exactly the same operations, which implies:
|
||||
|
||||
- having all tensors of the same length in all your batches
|
||||
- having static code (i.e., not a for loop of length that could change from step to step)
|
||||
|
||||
Having any of the things above change between two steps will trigger a new compilation which will, once again, take a
|
||||
lot of time. In practice, that means you must take special care to have all your tensors in your inputs of the same
|
||||
shape (so no dynamic padding for instance if you are in an NLP problem) and should not use layers with for loops that
|
||||
have different lengths depending on the inputs (such as an LSTM) or the training will be excruciatingly slow.
|
||||
|
||||
To introduce special behavior in your script for TPUs you can check the `distributed_type` of your
|
||||
`accelerator`:
|
||||
|
||||
```python docstyle-ignore
|
||||
from accelerate import DistributedType
|
||||
|
||||
if accelerator.distributed_type == DistributedType.TPU:
|
||||
# do something of static shape
|
||||
else:
|
||||
# go crazy and be dynamic
|
||||
```
|
||||
|
||||
The [NLP example](https://github.com/huggingface/accelerate/blob/main/examples/nlp_example.py) shows an example in a
|
||||
situation with dynamic padding.
|
||||
|
||||
One last thing to pay close attention to: if your model has tied weights (such as language models which tie the weights
|
||||
of the embedding matrix with the weights of the decoder), moving this model to the TPU (either yourself or after you
|
||||
passed your model to [`~Accelerator.prepare`]) will break the tying. You will need to retie the weights
|
||||
after. You can find an example of this in the [run_clm_no_trainer](https://github.com/huggingface/transformers/blob/master/examples/pytorch/language-modeling/run_clm.py) script in
|
||||
the Transformers repository.
|
||||
|
||||
Check out the [TPU tutorial](concept_guides/training_tpu) for more information about training on TPUs.
|
||||
|
||||
|
||||
## Other caveats
|
||||
|
||||
We list here all smaller issues you could have in your script conversion and how to resolve them.
|
||||
|
||||
### Execute a statement only on one processes
|
||||
|
||||
Some of your instructions only need to run for one process on a given server: for instance a data download or a log
|
||||
statement. To do this, wrap the statement in a test like this:
|
||||
|
||||
```python docstyle-ignore
|
||||
if accelerator.is_local_main_process:
|
||||
# Is executed once per server
|
||||
```
|
||||
|
||||
Another example is progress bars: to avoid having multiple progress bars in your output, you should only display one on
|
||||
the local main process:
|
||||
|
||||
```python
|
||||
from tqdm.auto import tqdm
|
||||
|
||||
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
|
||||
```
|
||||
|
||||
The *local* means per machine: if you are running your training on two servers with several GPUs, the instruction will
|
||||
be executed once on each of those servers. If you need to execute something only once for all processes (and not per
|
||||
machine) for instance, uploading the final model to the 🤗 model hub, wrap it in a test like this:
|
||||
|
||||
```python docstyle-ignore
|
||||
if accelerator.is_main_process:
|
||||
# Is executed once only
|
||||
```
|
||||
|
||||
For printing statements you only want executed once per machine, you can just replace the `print` function by
|
||||
`accelerator.print`.
|
||||
|
||||
|
||||
### Defer execution
|
||||
|
||||
When you run your usual script, instructions are executed in order. Using 🤗 Accelerate to deploy your script on several
|
||||
GPUs at the same time introduces a complication: while each process executes all instructions in order, some may be
|
||||
faster than others.
|
||||
|
||||
You might need to wait for all processes to have reached a certain point before executing a given instruction. For
|
||||
instance, you shouldn't save a model before being sure every process is done with training. To do this, just write the
|
||||
following line in your code:
|
||||
|
||||
```
|
||||
accelerator.wait_for_everyone()
|
||||
```
|
||||
|
||||
This instruction will block all the processes that arrive first until all the other processes have reached that
|
||||
point (if you run your script on just one GPU or CPU, this won't do anything).
|
||||
|
||||
|
||||
### Saving/loading a model
|
||||
|
||||
Saving the model you trained might need a bit of adjustment: first you should wait for all processes to reach that
|
||||
point in the script as shown above, and then, you should unwrap your model before saving it. This is because when going
|
||||
through the [`~Accelerator.prepare`] method, your model may have been placed inside a bigger model,
|
||||
which deals with the distributed training. This in turn means that saving your model state dictionary without taking
|
||||
any precaution will take that potential extra layer into account, and you will end up with weights you can't load back
|
||||
in your base model. The [`~Accelerator.save_model`] method will help you to achieve that. It will unwrap your model and save
|
||||
the model state dictionnary.
|
||||
|
||||
Here is an example:
|
||||
```
|
||||
accelerator.wait_for_everyone()
|
||||
accelerator.save_model(model, save_directory)
|
||||
```
|
||||
The [`~Accelerator.save_model`] method can also save a model into sharded checkpoints or with safetensors format.
|
||||
Here is an example:
|
||||
|
||||
```python
|
||||
accelerator.wait_for_everyone()
|
||||
accelerator.save_model(model, save_directory, max_shard_size="1GB", safe_serialization=True)
|
||||
```
|
||||
|
||||
If your script contains logic to load a checkpoint, we also recommend you load your weights in the unwrapped model
|
||||
(this is only useful if you use the load function after making your model go through
|
||||
[`~Accelerator.prepare`]). Here is an example:
|
||||
|
||||
```python
|
||||
unwrapped_model = accelerator.unwrap_model(model)
|
||||
path_to_checkpoint = os.path.join(save_directory,"pytorch_model.bin")
|
||||
unwrapped_model.load_state_dict(torch.load(path_to_checkpoint))
|
||||
```
|
||||
|
||||
Note that since all the model parameters are references to tensors, this will load your weights inside `model`.
|
||||
|
||||
If you want to load a sharded checkpoint or a checkpoint with safetensors format into the model with a specific `device`, we recommend you to load it with [`~utils.load_checkpoint_in_model`] function. Here's an example:
|
||||
|
||||
```python
|
||||
load_checkpoint_in_model(unwrapped_model, save_directory, device_map={"":device})
|
||||
```
|
||||
|
||||
## Saving/loading entire states
|
||||
|
||||
When training your model, you may want to save the current state of the model, optimizer, random generators, and potentially LR schedulers to be restored in the _same script_.
|
||||
You can use [`~Accelerator.save_state`] and [`~Accelerator.load_state`] respectively to do so.
|
||||
|
||||
To further customize where and how states saved through [`~Accelerator.save_state`] the [`~utils.ProjectConfiguration`] class can be used. For example
|
||||
if `automatic_checkpoint_naming` is enabled each saved checkpoint will be located then at `Accelerator.project_dir/checkpoints/checkpoint_{checkpoint_number}`.
|
||||
|
||||
If you have registered any other stateful items to be stored through [`~Accelerator.register_for_checkpointing`] they will also be saved and/or loaded.
|
||||
|
||||
<Tip>
|
||||
|
||||
Every object passed to [`~Accelerator.register_for_checkpointing`] must have a `load_state_dict` and `state_dict` function to be stored
|
||||
|
||||
</Tip>
|
||||
|
||||
|
||||
### Gradient clipping
|
||||
|
||||
If you are using gradient clipping in your script, you should replace the calls to
|
||||
`torch.nn.utils.clip_grad_norm_` or `torch.nn.utils.clip_grad_value_` with [`~Accelerator.clip_grad_norm_`]
|
||||
and [`~Accelerator.clip_grad_value_`] respectively.
|
||||
|
||||
|
||||
### Mixed Precision training
|
||||
|
||||
If you are running your training in Mixed Precision with 🤗 Accelerate, you will get the best result with your loss being
|
||||
computed inside your model (like in Transformer models for instance). Every computation outside of the model will be
|
||||
executed in full precision (which is generally what you want for loss computation, especially if it involves a
|
||||
softmax). However you might want to put your loss computation inside the [`~Accelerator.autocast`] context manager:
|
||||
|
||||
```
|
||||
with accelerator.autocast():
|
||||
loss = complex_loss_function(outputs, target):
|
||||
```
|
||||
|
||||
Another caveat with Mixed Precision training is that the gradient will skip a few updates at the beginning and
|
||||
sometimes during training: because of the dynamic loss scaling strategy, there are points during training where the
|
||||
gradients have overflown, and the loss scaling factor is reduced to avoid this happening again at the next step.
|
||||
|
||||
This means that you may update your learning rate scheduler when there was no update, which is fine in general, but may
|
||||
have an impact when you have very little training data, or if the first learning rate values of your scheduler are very
|
||||
important. In this case, you can skip the learning rate scheduler updates when the optimizer step was not done like
|
||||
this:
|
||||
|
||||
```
|
||||
if not accelerator.optimizer_step_was_skipped:
|
||||
lr_scheduler.step()
|
||||
```
|
||||
|
||||
### Gradient Accumulation
|
||||
|
||||
To perform gradient accumulation use [`~Accelerator.accumulate`] and specify a `gradient_accumulation_steps`.
|
||||
This will also automatically ensure the gradients are synced or unsynced when on multi-device training, check if the step should
|
||||
actually be performed, and auto-scale the loss:
|
||||
|
||||
```python
|
||||
accelerator = Accelerator(gradient_accumulation_steps=2)
|
||||
model, optimizer, training_dataloader = accelerator.prepare(model, optimizer, training_dataloader)
|
||||
|
||||
for input, label in training_dataloader:
|
||||
with accelerator.accumulate(model):
|
||||
predictions = model(input)
|
||||
loss = loss_function(predictions, label)
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
scheduler.step()
|
||||
optimizer.zero_grad()
|
||||
```
|
||||
|
||||
### DeepSpeed
|
||||
|
||||
DeepSpeed support is experimental, so the underlying API will evolve in the near future and may have some slight
|
||||
breaking changes. In particular, 🤗 Accelerate does not support DeepSpeed config you have written yourself yet, this
|
||||
will be added in a next version.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
The [`notebook_launcher`] does not support the DeepSpeed integration yet.
|
||||
|
||||
</Tip>
|
||||
|
||||
## Internal mechanism
|
||||
|
||||
Internally, the library works by first analyzing the environment in which the script is launched to determine which
|
||||
kind of distributed setup is used, how many different processes there are and which one the current script is in. All
|
||||
that information is stored in the [`~AcceleratorState`].
|
||||
|
||||
This class is initialized the first time you instantiate an [`~Accelerator`] as well as performing any
|
||||
specific initialization your distributed setup needs. Its state is then uniquely shared through all instances of
|
||||
[`~state.AcceleratorState`].
|
||||
|
||||
Then, when calling [`~Accelerator.prepare`], the library:
|
||||
|
||||
- wraps your model(s) in the container adapted for the distributed setup,
|
||||
- wraps your optimizer(s) in a [`~optimizer.AcceleratedOptimizer`],
|
||||
- creates a new version of your dataloader(s) in a [`~data_loader.DataLoaderShard`].
|
||||
|
||||
While the model(s) and optimizer(s) are just put in simple wrappers, the dataloader(s) are re-created. This is mostly
|
||||
because PyTorch does not let the user change the `batch_sampler` of a dataloader once it's been created and the
|
||||
library handles the sharding of your data between processes by changing that `batch_sampler` to yield every other
|
||||
`num_processes` batches.
|
||||
|
||||
The [`~data_loader.DataLoaderShard`] subclasses `DataLoader` to add the following functionality:
|
||||
|
||||
- it synchronizes the appropriate random number generator of all processes at each new iteration, to ensure any
|
||||
randomization (like shuffling) is done the exact same way across processes.
|
||||
- it puts the batches on the proper device before yielding them (unless you have opted out of
|
||||
`device_placement=True`).
|
||||
|
||||
The random number generator synchronization will by default synchronize:
|
||||
|
||||
- the `generator` attribute of a given sampler (like the PyTorch `RandomSampler`) for PyTorch >= 1.6
|
||||
- the main random number generator in PyTorch <=1.5.1
|
||||
|
||||
You can choose which random number generator(s) to synchronize with the `rng_types` argument of the main
|
||||
[`Accelerator`]. In PyTorch >= 1.6, it is recommended to rely on a local `generator` to avoid
|
||||
setting the same seed in the main random number generator in all processes.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
Synchronization of the main torch (or CUDA or XLA) random number generator will affect any other potential random
|
||||
artifacts you could have in your dataset (like random data augmentation) in the sense that all processes will get
|
||||
the same random numbers from the torch random modules (so will apply the same random data augmentation if it's
|
||||
controlled by torch).
|
||||
|
||||
</Tip>
|
||||
|
||||
<Tip>
|
||||
|
||||
The randomization part of your custom sampler, batch sampler or iterable dataset should be done using a local
|
||||
`torch.Generator` object (in PyTorch >= 1.6), see the traditional `RandomSampler`, as an example.
|
||||
|
||||
</Tip>
|
||||
|
||||
For more details about the internals, see the [Internals page](package_reference/torch_wrappers).
|
||||
* Check out the [tutorials](basic_tutorials/overview) for a gentle walkthrough of Accelerate. This is especially useful if you're new to distributed training and the library.
|
||||
* Dive into the [guides](usage_guides/explore) to see how to use Accelerate for specific use-cases.
|
||||
* Deepen your conceptual understanding of how Accelerate works internally by reading the [concept guides](concept_guides/internal_mechanism).
|
||||
* Look up classes and commands in the [API reference](package_reference/accelerator) to see what parameters and options are available.
|
||||
|
||||
@ -52,7 +52,7 @@ will attempt to fill all the space in your GPU(s), then loading them to the CPU,
|
||||
|
||||
<Tip>
|
||||
|
||||
For more details on desigining your own device map, see this section of the [concept guide](../concept_guide/big_model_inference#desigining-a-device-map)
|
||||
For more details on designing your own device map, see this section of the [concept guide](../concept_guides/big_model_inference#designing-a-device-map)
|
||||
|
||||
</Tip>
|
||||
|
||||
@ -90,7 +90,7 @@ What will happen now is each time the input gets passed through a layer, it will
|
||||
|
||||
<Tip>
|
||||
|
||||
Multiple GPUs can be utilized, however this is considered "model parallism" and as a result only one GPU will be active at a given moment, waiting for the prior one to send it the output. You should launch your script normally with `python`
|
||||
Multiple GPUs can be utilized, however this is considered "model parallelism" and as a result only one GPU will be active at a given moment, waiting for the prior one to send it the output. You should launch your script normally with `python`
|
||||
and not need `torchrun`, `accelerate launch`, etc.
|
||||
|
||||
</Tip>
|
||||
|
||||
325
docs/source/usage_guides/ddp_comm_hook.md
Normal file
325
docs/source/usage_guides/ddp_comm_hook.md
Normal file
@ -0,0 +1,325 @@
|
||||
<!--
|
||||
Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# DDP Communication Hooks
|
||||
|
||||
Distributed Data Parallel (DDP) communication hooks provide a generic interface to control how gradients are communicated across workers by overriding the vanilla allreduce in `DistributedDataParallel`. A few built-in communication hooks are provided, and users can easily apply any of these hooks to optimize communication.
|
||||
|
||||
|
||||
- **FP16 Compression Hook**: Compresses gradients by casting them to half-precision floating-point format (`torch.float16`), reducing communication overhead.
|
||||
- **BF16 Compression Hook**: Similar to FP16, but uses the Brain Floating Point format (`torch.bfloat16`), which can be more efficient on certain hardware.
|
||||
- **PowerSGD Hook**: An advanced gradient compression algorithm that provides high compression rates and can accelerate bandwidth-bound distributed training.
|
||||
|
||||
In this tutorial, you will see how to quickly set up DDP communication hooks and perform training with the utilities provided in 🤗 Accelerate, which can be as simple as adding just one new line of code! This demonstrates how to use DDP communication hooks to optimize gradient communication in distributed training with the 🤗 Accelerate library.
|
||||
|
||||
## FP16 Compression Hook
|
||||
|
||||
<hfoptions id="fp16">
|
||||
<hfoption id="PyTorch">
|
||||
|
||||
```python
|
||||
import torch
|
||||
from torch.nn.parallel import DistributedDataParallel as DDP
|
||||
from torch.distributed.algorithms.ddp_comm_hooks import default_hooks
|
||||
|
||||
class MyModel(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.layer = torch.nn.Linear(10, 10)
|
||||
|
||||
def forward(self, x):
|
||||
return self.layer(x)
|
||||
|
||||
model = MyModel()
|
||||
model = DDP(model, device_ids=[torch.cuda.current_device()])
|
||||
model.register_comm_hook(state=None, hook=default_hooks.fp16_compress_hook)
|
||||
|
||||
# Training loop
|
||||
for data, targets in data_loader:
|
||||
outputs = model(data)
|
||||
loss = criterion(outputs, targets)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="Accelerate">
|
||||
|
||||
```python
|
||||
from accelerate import Accelerator, DDPCommunicationHookType, DistributedDataParallelKwargs
|
||||
import torch
|
||||
|
||||
class MyModel(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.layer = torch.nn.Linear(10, 10)
|
||||
|
||||
def forward(self, x):
|
||||
return self.layer(x)
|
||||
|
||||
# DDP Communication Hook setup
|
||||
ddp_kwargs = DistributedDataParallelKwargs(comm_hook=DDPCommunicationHookType.FP16)
|
||||
accelerator = Accelerator(kwargs_handlers=[ddp_kwargs])
|
||||
|
||||
model = MyModel()
|
||||
optimizer = torch.optim.Adam(model.parameters())
|
||||
data_loader = DataLoader(dataset, batch_size=16)
|
||||
|
||||
model, optimizer, data_loader = accelerator.prepare(model, optimizer, data_loader)
|
||||
|
||||
# Training loop
|
||||
for data, targets in data_loader:
|
||||
outputs = model(data)
|
||||
loss = criterion(outputs, targets)
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
### BF16 Compression Hook
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
BF16 Compression Hook API is experimental, and it requires NCCL version later than 2.9.6.
|
||||
|
||||
</Tip>
|
||||
|
||||
<hfoptions id="bf16">
|
||||
<hfoption id="PyTorch">
|
||||
|
||||
```python
|
||||
import torch
|
||||
from torch.nn.parallel import DistributedDataParallel as DDP
|
||||
from torch.distributed.algorithms.ddp_comm_hooks import default_hooks
|
||||
|
||||
class MyModel(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.layer = torch.nn.Linear(10, 10)
|
||||
|
||||
def forward(self, x):
|
||||
return self.layer(x)
|
||||
|
||||
model = MyModel()
|
||||
model = DDP(model, device_ids=[torch.cuda.current_device()])
|
||||
model.register_comm_hook(state=None, hook=default_hooks.bf16_compress_hook)
|
||||
|
||||
# Training loop
|
||||
for data, targets in data_loader:
|
||||
outputs = model(data)
|
||||
loss = criterion(outputs, targets)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="Accelerate">
|
||||
|
||||
```python
|
||||
from accelerate import Accelerator, DDPCommunicationHookType, DistributedDataParallelKwargs
|
||||
import torch
|
||||
|
||||
class MyModel(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.layer = torch.nn.Linear(10, 10)
|
||||
|
||||
def forward(self, x):
|
||||
return self.layer(x)
|
||||
|
||||
# DDP Communication Hook setup
|
||||
ddp_kwargs = DistributedDataParallelKwargs(comm_hook=DDPCommunicationHookType.BF16)
|
||||
accelerator = Accelerator(kwargs_handlers=[ddp_kwargs])
|
||||
|
||||
model = MyModel()
|
||||
optimizer = torch.optim.Adam(model.parameters())
|
||||
data_loader = DataLoader(dataset, batch_size=16)
|
||||
|
||||
model, optimizer, data_loader = accelerator.prepare(model, optimizer, data_loader)
|
||||
|
||||
# Training loop
|
||||
for data, targets in data_loader:
|
||||
outputs = model(data)
|
||||
loss = criterion(outputs, targets)
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
### PowerSGD Hook
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
PowerSGD typically requires extra memory of the same size as the model’s gradients to enable error feedback, which can compensate for biased compressed communication and improve accuracy.
|
||||
|
||||
</Tip>
|
||||
|
||||
<hfoptions id="powerSGD">
|
||||
<hfoption id="PyTorch">
|
||||
|
||||
```python
|
||||
import torch
|
||||
from torch.nn.parallel import DistributedDataParallel as DDP
|
||||
from torch.distributed.algorithms.ddp_comm_hooks import powerSGD_hook
|
||||
|
||||
class MyModel(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.layer = torch.nn.Linear(10, 10)
|
||||
|
||||
def forward(self, x):
|
||||
return self.layer(x)
|
||||
|
||||
model = MyModel()
|
||||
model = DDP(model, device_ids=[torch.cuda.current_device()])
|
||||
state = powerSGD_hook.PowerSGDState(process_group=None)
|
||||
model.register_comm_hook(state=state, hook=powerSGD_hook.powerSGD_hook)
|
||||
|
||||
# Training loop
|
||||
for data, targets in data_loader:
|
||||
outputs = model(data)
|
||||
loss = criterion(outputs, targets)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="Accelerate">
|
||||
|
||||
```python
|
||||
from accelerate import Accelerator, DDPCommunicationHookType, DistributedDataParallelKwargs
|
||||
import torch
|
||||
|
||||
class MyModel(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.layer = torch.nn.Linear(10, 10)
|
||||
|
||||
def forward(self, x):
|
||||
return self.layer(x)
|
||||
|
||||
# DDP Communication Hook setup
|
||||
ddp_kwargs = DistributedDataParallelKwargs(comm_hook=DDPCommunicationHookType.POWER_SGD)
|
||||
accelerator = Accelerator(kwargs_handlers=[ddp_kwargs])
|
||||
|
||||
model = MyModel()
|
||||
optimizer = torch.optim.Adam(model.parameters())
|
||||
data_loader = DataLoader(dataset, batch_size=16)
|
||||
|
||||
model, optimizer, data_loader = accelerator.prepare(model, optimizer, data_loader)
|
||||
|
||||
# Training loop
|
||||
for data, targets in data_loader:
|
||||
outputs = model(data)
|
||||
loss = criterion(outputs, targets)
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
## DDP Communication Hooks utilities
|
||||
|
||||
There are two additional utilities for supporting optional functionalities with the communication hooks.
|
||||
|
||||
### comm_wrapper
|
||||
|
||||
`comm_wrapper` is an option to wrap a communication hook with additional functionality. For example, it can be used to combine FP16 compression with other communication strategies. Currently supported wrappers are `no`, `fp16`, and `bf16`.
|
||||
|
||||
```python
|
||||
from accelerate import Accelerator, DDPCommunicationHookType, DistributedDataParallelKwargs
|
||||
import torch
|
||||
|
||||
class MyModel(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.layer = torch.nn.Linear(10, 10)
|
||||
|
||||
def forward(self, x):
|
||||
return self.layer(x)
|
||||
|
||||
# DDP Communication Hook setup
|
||||
ddp_kwargs = DistributedDataParallelKwargs(
|
||||
comm_hook=DDPCommunicationHookType.POWER_SGD,
|
||||
comm_wrapper=DDPCommunicationHookType.FP16
|
||||
)
|
||||
accelerator = Accelerator(kwargs_handlers=[ddp_kwargs])
|
||||
|
||||
model = MyModel()
|
||||
optimizer = torch.optim.Adam(model.parameters())
|
||||
data_loader = DataLoader(dataset, batch_size=16)
|
||||
|
||||
model, optimizer, data_loader = accelerator.prepare(model, optimizer, data_loader)
|
||||
|
||||
# Training loop
|
||||
for data, targets in data_loader:
|
||||
outputs = model(data)
|
||||
loss = criterion(outputs, targets)
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
```
|
||||
|
||||
### comm_state_option
|
||||
|
||||
`comm_state_option` allows you to pass additional state information required by certain communication hooks. This is particularly useful for stateful hooks like `PowerSGD`, which require maintaining hyperparameters and internal states across training steps. Below is an example showcasing the use of `comm_state_option` with the `PowerSGD` hook.
|
||||
|
||||
```python
|
||||
from accelerate import Accelerator, DDPCommunicationHookType, DistributedDataParallelKwargs
|
||||
import torch
|
||||
|
||||
class MyModel(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.layer = torch.nn.Linear(10, 10)
|
||||
|
||||
def forward(self, x):
|
||||
return self.layer(x)
|
||||
|
||||
# DDP Communication Hook setup
|
||||
ddp_kwargs = DistributedDataParallelKwargs(
|
||||
comm_hook=DDPCommunicationHookType.POWER_SGD,
|
||||
comm_state_option={"matrix_approximation_rank": 2}
|
||||
)
|
||||
accelerator = Accelerator(kwargs_handlers=[ddp_kwargs])
|
||||
|
||||
model = MyModel()
|
||||
optimizer = torch.optim.Adam(model.parameters())
|
||||
data_loader = DataLoader(dataset, batch_size=16)
|
||||
|
||||
model, optimizer, data_loader = accelerator.prepare(model, optimizer, data_loader)
|
||||
|
||||
# Training loop
|
||||
for data, targets in data_loader:
|
||||
outputs = model(data)
|
||||
loss = criterion(outputs, targets)
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
```
|
||||
|
||||
For more advanced usage and additional hooks, refer to the [PyTorch DDP Communication Hooks documentation](https://pytorch.org/docs/stable/ddp_comm_hooks.html).
|
||||
@ -1,93 +0,0 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Debugging Distributed Operations
|
||||
|
||||
When running scripts in a distributed fashion, often functions such as [`Accelerator.gather`] and [`Accelerator.reduce`] (and others) are neccessary to grab tensors across devices and perform certain operations on them. However, if the tensors which are being grabbed are not the proper shapes then this will result in your code hanging forever. The only sign that exists of this truly happening is hitting a timeout exception from `torch.distributed`, but this can get quite costly as usually the timeout is 10 minutes.
|
||||
|
||||
Accelerate now has a `debug` mode which adds a neglible amount of time to each operation, but allows it to verify that the inputs you are bringing in can *actually* perform the operation you want **without** hitting this timeout problem!
|
||||
|
||||
## Visualizing the problem
|
||||
|
||||
To have a tangible example of this issue, let's take the following setup (on 2 GPUs):
|
||||
|
||||
```python
|
||||
from accelerate import PartialState
|
||||
|
||||
state = PartialState()
|
||||
if state.process_index == 0:
|
||||
tensor = torch.tensor([[0.0, 1, 2, 3, 4]]).to(state.device)
|
||||
else:
|
||||
tensor = torch.tensor([[[0.0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]]).to(state.device)
|
||||
|
||||
broadcast_tensor = broadcast(tensor)
|
||||
print(broadcast_tensor)
|
||||
```
|
||||
|
||||
We've created a single tensor on each device, with two radically different shapes. With this setup if we want to perform an operation such as [`utils.broadcast`], we would forever hit a timeout because `torch.distributed` requires that these operations have the **exact same shape** across all processes for it to work.
|
||||
|
||||
If you run this yourself, you will find that `broadcast_tensor` can be printed on the main process, but its results won't quite be right, and then it will just hang never printing it on any of the other processes:
|
||||
|
||||
```
|
||||
>>> tensor([[0, 1, 2, 3, 4]], device='cuda:0')
|
||||
```
|
||||
|
||||
## The solution
|
||||
|
||||
By enabling Accelerate's operational debug mode, Accelerate will properly find and catch errors such as this and provide a very clear traceback immediatly:
|
||||
|
||||
```
|
||||
Traceback (most recent call last):
|
||||
File "/home/zach_mueller_huggingface_co/test.py", line 18, in <module>
|
||||
main()
|
||||
File "/home/zach_mueller_huggingface_co/test.py", line 15, in main
|
||||
main()broadcast_tensor = broadcast(tensor)
|
||||
File "/home/zach_mueller_huggingface_co/accelerate/src/accelerate/utils/operations.py", line 303, in wrapper
|
||||
broadcast_tensor = broadcast(tensor)
|
||||
accelerate.utils.operations.DistributedOperationException: Cannot apply desired operation due to shape mismatches. All shapes across devices must be valid.
|
||||
|
||||
Operation: `accelerate.utils.operations.broadcast`
|
||||
Input shapes:
|
||||
- Process 0: [1, 5]
|
||||
- Process 1: [1, 2, 5]
|
||||
```
|
||||
|
||||
This explains that the shapes across our devices were *not* the same, and that we should ensure that they match properly to be compatible. Typically this means that there is either an extra dimension, or certain dimensions are incompatible with the operation.
|
||||
|
||||
To enable this please do one of the following:
|
||||
|
||||
Enable it through the questionarre during `accelerate config` (recommended)
|
||||
|
||||
From the CLI:
|
||||
|
||||
```
|
||||
accelerate launch --debug {my_script.py} --arg1 --arg2
|
||||
```
|
||||
|
||||
As an environmental variable (which avoids the need for `accelerate launch`):
|
||||
|
||||
```
|
||||
ACCELERATE_DEBUG_MODE="1" accelerate launch {my_script.py} --arg1 --arg2
|
||||
```
|
||||
|
||||
Manually changing the `config.yaml` file:
|
||||
|
||||
```diff
|
||||
compute_environment: LOCAL_MACHINE
|
||||
+debug: true
|
||||
```
|
||||
|
||||
|
||||
|
||||
@ -9,13 +9,13 @@ Unless required by applicable law or agreed to in writing, software distributed
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
⚠️ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# DeepSpeed
|
||||
# DeepSpeed
|
||||
|
||||
[DeepSpeed](https://github.com/microsoft/DeepSpeed) implements everything described in the [ZeRO paper](https://arxiv.org/abs/1910.02054). Currently, it provides full support for:
|
||||
[DeepSpeed](https://github.com/microsoft/DeepSpeed) implements everything described in the [ZeRO paper](https://arxiv.org/abs/1910.02054). Some of the salient optimizations are:
|
||||
|
||||
1. Optimizer state partitioning (ZeRO stage 1)
|
||||
2. Gradient partitioning (ZeRO stage 2)
|
||||
@ -23,6 +23,7 @@ rendered properly in your Markdown viewer.
|
||||
4. Custom mixed precision training handling
|
||||
5. A range of fast CUDA-extension-based optimizers
|
||||
6. ZeRO-Offload to CPU and Disk/NVMe
|
||||
7. Hierarchical partitioning of model parameters (ZeRO++)
|
||||
|
||||
ZeRO-Offload has its own dedicated paper: [ZeRO-Offload: Democratizing Billion-Scale Model Training](https://arxiv.org/abs/2101.06840). And NVMe-support is described in the paper [ZeRO-Infinity: Breaking the GPU
|
||||
Memory Wall for Extreme Scale Deep Learning](https://arxiv.org/abs/2104.07857).
|
||||
@ -35,16 +36,16 @@ won't be possible on a single GPU.
|
||||
🤗 Accelerate integrates [DeepSpeed](https://github.com/microsoft/DeepSpeed) via 2 options:
|
||||
|
||||
1. Integration of the DeepSpeed features via `deepspeed config file` specification in `accelerate config` . You just supply your custom config file or use our template. Most of
|
||||
this document is focused on this feature. This supports all the core features of DeepSpeed and gives user a lot of flexibility.
|
||||
this document is focused on this feature. This supports all the core features of DeepSpeed and gives user a lot of flexibility.
|
||||
User may have to change a few lines of code depending on the config.
|
||||
2. Integration via `deepspeed_plugin`.This supports subset of the DeepSpeed features and uses default options for the rest of the configurations.
|
||||
2. Integration via `deepspeed_plugin`.This supports subset of the DeepSpeed features and uses default options for the rest of the configurations.
|
||||
User need not change any code and is good for those who are fine with most of the default settings of DeepSpeed.
|
||||
|
||||
## What is integrated?
|
||||
|
||||
Training:
|
||||
|
||||
1. DeepSpeed ZeRO training supports the full ZeRO stages 1, 2 and 3 as well as CPU/Disk offload of optimizer states, gradients and parameters.
|
||||
1. 🤗 Accelerate integrates all features of DeepSpeed ZeRO. This includes all the ZeRO stages 1, 2 and 3 as well as ZeRO-Offload, ZeRO-Infinity (which can offload to disk/NVMe) and ZeRO++.
|
||||
Below is a short description of Data Parallelism using ZeRO - Zero Redundancy Optimizer along with diagram from this [blog post](https://www.microsoft.com/en-us/research/blog/zero-deepspeed-new-system-optimizations-enable-training-models-with-over-100-billion-parameters/)
|
||||

|
||||
|
||||
@ -60,6 +61,8 @@ Below is a short description of Data Parallelism using ZeRO - Zero Redundancy Op
|
||||
|
||||
e. **Param Offload**: Offloads the model parameters to CPU/Disk building on top of ZERO Stage 3
|
||||
|
||||
f. **Hierarchical Partitioning**: Enables efficient multi-node training with data-parallel training across nodes and ZeRO-3 sharding within a node, built on top of ZeRO Stage 3.
|
||||
|
||||
<u>Note</u>: With respect to Disk Offload, the disk should be an NVME for decent speed but it technically works on any Disk
|
||||
|
||||
Inference:
|
||||
@ -74,8 +77,8 @@ Inference:
|
||||
**Pre-Requisites**: Install DeepSpeed version >=0.6.5. Please refer to the [DeepSpeed Installation details](https://github.com/microsoft/DeepSpeed#installation)
|
||||
for more information.
|
||||
|
||||
We will first look at easy to use integration via `accelerate config`.
|
||||
Followed by more flexible and feature rich `deepspeed config file` integration.
|
||||
We will first look at easy to use integration via `accelerate config`.
|
||||
Followed by more flexible and feature rich `deepspeed config file` integration.
|
||||
|
||||
### Accelerate DeepSpeed Plugin
|
||||
On your machine(s) just run:
|
||||
@ -154,10 +157,18 @@ Currently, `Accelerate` supports following config through the CLI:
|
||||
`gradient_accumulation_steps`: Number of training steps to accumulate gradients before averaging and applying them.
|
||||
`gradient_clipping`: Enable gradient clipping with value.
|
||||
`offload_optimizer_device`: [none] Disable optimizer offloading, [cpu] offload optimizer to CPU, [nvme] offload optimizer to NVMe SSD. Only applicable with ZeRO >= Stage-2.
|
||||
`offload_optimizer_nvme_path`: Decides Nvme Path to offload optimizer states. If unspecified, will default to 'none'.
|
||||
`offload_param_device`: [none] Disable parameter offloading, [cpu] offload parameters to CPU, [nvme] offload parameters to NVMe SSD. Only applicable with ZeRO Stage-3.
|
||||
`offload_param_nvme_path`: Decides Nvme Path to offload parameters. If unspecified, will default to 'none'.
|
||||
`zero3_init_flag`: Decides whether to enable `deepspeed.zero.Init` for constructing massive models. Only applicable with ZeRO Stage-3.
|
||||
`zero3_save_16bit_model`: Decides whether to save 16-bit model weights when using ZeRO Stage-3.
|
||||
`mixed_precision`: `no` for FP32 training, `fp16` for FP16 mixed-precision training and `bf16` for BF16 mixed-precision training.
|
||||
`mixed_precision`: `no` for FP32 training, `fp16` for FP16 mixed-precision training and `bf16` for BF16 mixed-precision training.
|
||||
`deepspeed_moe_layer_cls_names`: Comma-separated list of transformer Mixture-of-Experts (MoE) layer class names (case-sensitive) to wrap ,e.g, `MixtralSparseMoeBlock`, `Qwen2MoeSparseMoeBlock`, `JetMoEAttention,JetMoEBlock` ...
|
||||
`deepspeed_hostfile`: DeepSpeed hostfile for configuring multi-node compute resources.
|
||||
`deepspeed_exclusion_filter`: DeepSpeed exclusion filter string when using mutli-node setup.
|
||||
`deepspeed_inclusion_filter`: DeepSpeed inclusion filter string when using mutli-node setup.
|
||||
`deepspeed_multinode_launcher`: DeepSpeed multi-node launcher to use. If unspecified, will default to `pdsh`.
|
||||
`deepspeed_config_file`: path to the DeepSpeed config file in `json` format. See the next section for more details on this.
|
||||
```
|
||||
To be able to tweak more options, you will need to use a DeepSpeed config file.
|
||||
|
||||
@ -168,8 +179,8 @@ On your machine(s) just run:
|
||||
accelerate config
|
||||
```
|
||||
|
||||
and answer the questions asked. It will ask whether you want to use a config file for deepspeed to which you answer yes
|
||||
and provide the path to the deepspeed config file.
|
||||
and answer the questions asked. It will ask whether you want to use a config file for deepspeed to which you answer yes
|
||||
and provide the path to the deepspeed config file.
|
||||
This will generate a config file that will be used automatically to properly set the
|
||||
default options when doing
|
||||
|
||||
@ -349,17 +360,38 @@ accelerate launch examples/by_feature/deepspeed_with_config_support.py \
|
||||
--report_to "wandb"\
|
||||
```
|
||||
|
||||
**ZeRO++ Config Example**
|
||||
You can use the features of ZeRO++ by using the appropriate config parameters. Note that ZeRO++ is an extension for ZeRO Stage 3. Here is how the config file can be modified, from [DeepSpeed's ZeRO++ tutorial](https://www.deepspeed.ai/tutorials/zeropp/):
|
||||
|
||||
```json
|
||||
{
|
||||
"zero_optimization": {
|
||||
"stage": 3,
|
||||
"reduce_bucket_size": "auto",
|
||||
|
||||
"zero_quantized_weights": true,
|
||||
"zero_hpz_partition_size": 8,
|
||||
"zero_quantized_gradients": true,
|
||||
|
||||
"contiguous_gradients": true,
|
||||
"overlap_comm": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
For hierarchical partitioning, the partition size `zero_hpz_partition_size` should ideally be set to the number of GPUs per node. (For example, the above config file assumes 8 GPUs per node)
|
||||
|
||||
**Important code changes when using DeepSpeed Config File**
|
||||
|
||||
1. DeepSpeed Optimizers and Schedulers. For more information on these,
|
||||
1. DeepSpeed Optimizers and Schedulers. For more information on these,
|
||||
see the [DeepSpeed Optimizers](https://deepspeed.readthedocs.io/en/latest/optimizers.html) and [DeepSpeed Schedulers](https://deepspeed.readthedocs.io/en/latest/schedulers.html) documentation.
|
||||
We will look at the changes needed in the code when using these.
|
||||
|
||||
|
||||
a. DS Optim + DS Scheduler: The case when both `optimizer` and `scheduler` keys are present in the DeepSpeed config file.
|
||||
In this situation, those will be used and the user has to use `accelerate.utils.DummyOptim` and `accelerate.utils.DummyScheduler` to replace the PyTorch/Custom optimizers and schedulers in their code.
|
||||
Below is the snippet from `examples/by_feature/deepspeed_with_config_support.py` showing this:
|
||||
```python
|
||||
# Creates Dummy Optimizer if `optimizer` was spcified in the config file else creates Adam Optimizer
|
||||
# Creates Dummy Optimizer if `optimizer` was specified in the config file else creates Adam Optimizer
|
||||
optimizer_cls = (
|
||||
torch.optim.AdamW
|
||||
if accelerator.state.deepspeed_plugin is None
|
||||
@ -368,7 +400,7 @@ We will look at the changes needed in the code when using these.
|
||||
)
|
||||
optimizer = optimizer_cls(optimizer_grouped_parameters, lr=args.learning_rate)
|
||||
|
||||
# Creates Dummy Scheduler if `scheduler` was spcified in the config file else creates `args.lr_scheduler_type` Scheduler
|
||||
# Creates Dummy Scheduler if `scheduler` was specified in the config file else creates `args.lr_scheduler_type` Scheduler
|
||||
if (
|
||||
accelerator.state.deepspeed_plugin is None
|
||||
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
|
||||
@ -388,16 +420,25 @@ We will look at the changes needed in the code when using these.
|
||||
In this situation, no code changes are needed from the user and this is the case when using integration via DeepSpeed Plugin.
|
||||
In the above example we can see that the code remains unchanged if the `optimizer` and `scheduler` keys are absent in the DeepSpeed config file.
|
||||
|
||||
c. Custom Optim + DS Scheduler: The case when only `scheduler` key is present in the DeepSpeed config file.
|
||||
In this situation, the user has to use `accelerate.utils.DummyScheduler` to replace the PyTorch/Custom scheduler in their code.
|
||||
c. Custom Optim + DS Scheduler: The case when only `scheduler` key is present in the DeepSpeed config file.
|
||||
In this situation, the user has to use `accelerate.utils.DummyScheduler` to replace the PyTorch/Custom scheduler in their code.
|
||||
|
||||
d. DS Optim + Custom Scheduler: The case when only `optimizer` key is present in the DeepSpeed config file.
|
||||
d. DS Optim + Custom Scheduler: The case when only `optimizer` key is present in the DeepSpeed config file.
|
||||
This will result in an error because you can only use DS Scheduler when using DS Optim.
|
||||
|
||||
2. Notice the `auto` values in the above example DeepSpeed config files. These are automatically handled by `prepare` method
|
||||
based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method.
|
||||
2. Notice the `auto` values in the above example DeepSpeed config files. These are automatically handled by `prepare` method
|
||||
based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method.
|
||||
Only the `auto` fields specified in above examples are handled by `prepare` method and the rest have to be explicitly specified by the user.
|
||||
|
||||
The `auto` values are calculated as:
|
||||
|
||||
- `reduce_bucket_size`: `hidden_size * hidden_size`
|
||||
- `stage3_prefetch_bucket_size`: `int(0.9 * hidden_size * hidden_size)`
|
||||
- `stage3_param_persistence_threshold`: `10 * hidden_size`
|
||||
|
||||
For the `auto` feature to work for these 3 config entries - Accelerate will use `model.config.hidden_size` or `max(model.config.hidden_sizes)` as `hidden_size`. If neither of these is available, the launching will fail and you will have to set these 3 config entries manually. Remember the first 2 config entries are the communication buffers - the larger they are the more efficient the comms will be, and the larger they are the more GPU memory they will consume, so it's a tunable performance trade-off.
|
||||
|
||||
|
||||
**Things to note when using DeepSpeed Config File**
|
||||
|
||||
Below is a sample script using `deepspeed_config_file` in different scenarios.
|
||||
@ -482,11 +523,11 @@ use_cpu: false
|
||||
3. Output of `accelerate launch test.py`:
|
||||
|
||||
```bash
|
||||
ValueError: When using `deepspeed_config_file`, the following accelerate config variables will be ignored:
|
||||
['gradient_accumulation_steps', 'gradient_clipping', 'zero_stage', 'offload_optimizer_device', 'offload_param_device',
|
||||
ValueError: When using `deepspeed_config_file`, the following accelerate config variables will be ignored:
|
||||
['gradient_accumulation_steps', 'gradient_clipping', 'zero_stage', 'offload_optimizer_device', 'offload_param_device',
|
||||
'zero3_save_16bit_model', 'mixed_precision'].
|
||||
Please specify them appropriately in the DeepSpeed config file.
|
||||
If you are using an accelerate config file, remove others config variables mentioned in the above specified list.
|
||||
If you are using an accelerate config file, remove other config variables mentioned in the above specified list.
|
||||
The easiest method is to create a new config following the questionnaire via `accelerate config`.
|
||||
It will only ask for the necessary config variables when using `deepspeed_config_file`.
|
||||
```
|
||||
@ -499,15 +540,15 @@ It will only ask for the necessary config variables when using `deepspeed_config
|
||||
$ accelerate config
|
||||
-------------------------------------------------------------------------------------------------------------------------------
|
||||
In which compute environment are you running?
|
||||
This machine
|
||||
This machine
|
||||
-------------------------------------------------------------------------------------------------------------------------------
|
||||
Which type of machine are you using?
|
||||
multi-GPU
|
||||
How many different machines will you use (use more than 1 for multi-node training)? [1]:
|
||||
Do you wish to optimize your script with torch dynamo?[yes/NO]:
|
||||
Do you want to use DeepSpeed? [yes/NO]: yes
|
||||
Do you want to specify a json file to a DeepSpeed config? [yes/NO]: yes
|
||||
Please enter the path to the json DeepSpeed config file: ds_config.json
|
||||
Which type of machine are you using?
|
||||
multi-GPU
|
||||
How many different machines will you use (use more than 1 for multi-node training)? [1]:
|
||||
Do you wish to optimize your script with torch dynamo?[yes/NO]:
|
||||
Do you want to use DeepSpeed? [yes/NO]: yes
|
||||
Do you want to specify a json file to a DeepSpeed config? [yes/NO]: yes
|
||||
Please enter the path to the json DeepSpeed config file: ds_config.json
|
||||
Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: yes
|
||||
How many GPU(s) should be used for distributed training? [1]:4
|
||||
accelerate configuration saved at ds_config_sample.yaml
|
||||
@ -585,10 +626,10 @@ Mixed precision type: fp16
|
||||
ds_config: {'bf16': {'enabled': False}, 'zero_optimization': {'stage': 3, 'stage3_gather_16bit_weights_on_model_save': True, 'offload_optimizer': {'device': 'nvme'}, 'offload_param': {'device': 'cpu'}}, 'gradient_clipping': 1.0, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 5, 'steps_per_print': inf, 'fp16': {'enabled': True, 'auto_cast': True}}
|
||||
```
|
||||
|
||||
**Note**:
|
||||
1. Remaining `"auto"` values are handled in `accelerator.prepare()` call as explained in point 2 of
|
||||
**Note**:
|
||||
1. Remaining `"auto"` values are handled in `accelerator.prepare()` call as explained in point 2 of
|
||||
`Important code changes when using DeepSpeed Config File`.
|
||||
2. Only when `gradient_accumulation_steps` is `auto`, the value passed while creating `Accelerator` object via `Accelerator(gradient_accumulation_steps=k)` will be used. When using DeepSpeed Plugin, the value from it will be used and it will overwrite the value passed while creating Accelerator object.
|
||||
2. Only when `gradient_accumulation_steps` is `auto`, the value passed while creating `Accelerator` object via `Accelerator(gradient_accumulation_steps=k)` will be used. When using DeepSpeed Plugin, the value from it will be used and it will overwrite the value passed while creating Accelerator object.
|
||||
|
||||
## Saving and loading
|
||||
|
||||
@ -599,7 +640,7 @@ ZeRO Stage-3 has 2 options:
|
||||
|
||||
a. Saving the entire 16bit model weights to directly load later on using `model.load_state_dict(torch.load(pytorch_model.bin))`.
|
||||
For this, either set `zero_optimization.stage3_gather_16bit_weights_on_model_save` to True in DeepSpeed Config file or set
|
||||
`zero3_save_16bit_model` to True in DeepSpeed Plugin.
|
||||
`zero3_save_16bit_model` to True in DeepSpeed Plugin.
|
||||
**Note that this option requires consolidation of the weights on one GPU it can be slow and memory demanding, so only use this feature when needed.**
|
||||
Below is the snippet from `examples/by_feature/deepspeed_with_config_support.py` showing this:
|
||||
```python
|
||||
@ -623,15 +664,15 @@ ZeRO Stage-3 has 2 options:
|
||||
Below is the snippet from `examples/by_feature/deepspeed_with_config_support.py` showing this:
|
||||
```python
|
||||
success = model.save_checkpoint(PATH, ckpt_id, checkpoint_state_dict)
|
||||
status_msg = "checkpointing: PATH={}, ckpt_id={}".format(PATH, ckpt_id)
|
||||
status_msg = f"checkpointing: PATH={PATH}, ckpt_id={ckpt_id}"
|
||||
if success:
|
||||
logging.info(f"Success {status_msg}")
|
||||
else:
|
||||
logging.warning(f"Failure {status_msg}")
|
||||
```
|
||||
```
|
||||
This will create ZeRO model and optimizer partitions along with `zero_to_fp32.py` script in checkpoint directory.
|
||||
You can use this script to do offline consolidation.
|
||||
It requires no configuration files or GPUs. Here is an example of its usage:
|
||||
You can use this script to do offline consolidation.
|
||||
It requires no configuration files or GPUs. Here is an example of its usage:
|
||||
```bash
|
||||
$ cd /path/to/checkpoint_dir
|
||||
$ ./zero_to_fp32.py . pytorch_model.bin
|
||||
@ -655,7 +696,7 @@ ZeRO Stage-3 has 2 options:
|
||||
Note that all these functions require ~2x memory (general RAM) of the size of the final checkpoint.
|
||||
|
||||
## ZeRO Inference
|
||||
DeepSpeed ZeRO Inference supports ZeRO stage 3 with ZeRO-Infinity.
|
||||
DeepSpeed ZeRO Inference supports ZeRO stage 3 with ZeRO-Infinity.
|
||||
It uses the same ZeRO protocol as training, but it doesn't use an optimizer and a lr scheduler and only stage 3 is relevant.
|
||||
With accelerate integration, you just need to prepare the model and dataloader as shown below:
|
||||
|
||||
@ -663,11 +704,11 @@ With accelerate integration, you just need to prepare the model and dataloader a
|
||||
model, eval_dataloader = accelerator.prepare(model, eval_dataloader)
|
||||
```
|
||||
|
||||
## Few caveats to be aware of
|
||||
## Few caveats to be aware of
|
||||
|
||||
1. Current integration doesn’t support Pipeline Parallelism of DeepSpeed.
|
||||
2. Current integration doesn’t support `mpu`, limiting the tensor parallelism which is supported in Megatron-LM.
|
||||
3. Current integration doesn’t support multiple models.
|
||||
2. Current integration doesn’t support `mpu`, limiting the tensor parallelism which is supported in Megatron-LM.
|
||||
3. Current integration doesn’t support multiple models.
|
||||
|
||||
## DeepSpeed Resources
|
||||
|
||||
@ -683,7 +724,15 @@ Papers:
|
||||
- [ZeRO: Memory Optimizations Toward Training Trillion Parameter Models](https://arxiv.org/abs/1910.02054)
|
||||
- [ZeRO-Offload: Democratizing Billion-Scale Model Training](https://arxiv.org/abs/2101.06840)
|
||||
- [ZeRO-Infinity: Breaking the GPU Memory Wall for Extreme Scale Deep Learning](https://arxiv.org/abs/2104.07857)
|
||||
- [ZeRO++: Extremely Efficient Collective Communication for Giant Model Training](https://arxiv.org/abs/2306.10209)
|
||||
|
||||
|
||||
Finally, please, remember that 🤗 `Accelerate` only integrates DeepSpeed, therefore if you
|
||||
have any problems or questions with regards to DeepSpeed usage, please, file an issue with [DeepSpeed GitHub](https://github.com/microsoft/DeepSpeed/issues).
|
||||
|
||||
|
||||
<Tip>
|
||||
|
||||
For those interested in the similarities and differences between FSDP and DeepSpeed, please check out the [concept guide here](../concept_guides/fsdp_and_deepspeed)!
|
||||
|
||||
</Tip>
|
||||
@ -15,12 +15,18 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
# Distributed Inference with 🤗 Accelerate
|
||||
|
||||
Distributed inference is a common use case, especially with natural language processing (NLP) models. Users often want to
|
||||
send a number of different prompts, each to a different GPU, and then get the results back. This also has other cases
|
||||
outside of just NLP, however for this tutorial we will focus on just this idea of each GPU receiving a different prompt,
|
||||
and then returning the results.
|
||||
Distributed inference can fall into three brackets:
|
||||
|
||||
## The Problem
|
||||
1. Loading an entire model onto each GPU and sending chunks of a batch through each GPU's model copy at a time
|
||||
2. Loading parts of a model onto each GPU and processing a single input at one time
|
||||
3. Loading parts of a model onto each GPU and using what is called scheduled Pipeline Parallelism to combine the two prior techniques.
|
||||
|
||||
We're going to go through the first and the last bracket, showcasing how to do each as they are more realistic scenarios.
|
||||
|
||||
|
||||
## Sending chunks of a batch automatically to each loaded model
|
||||
|
||||
This is the most memory-intensive solution, as it requires each GPU to keep a full copy of the model in memory at a given time.
|
||||
|
||||
Normally when doing this, users send the model to a specific device to load it from the CPU, and then move each prompt to a different device.
|
||||
|
||||
@ -51,11 +57,10 @@ def run_inference(rank, world_size):
|
||||
One will notice how we have to check the rank to know what prompt to send, which can be a bit tedious.
|
||||
|
||||
A user might then also think that with 🤗 Accelerate, using the `Accelerator` to prepare a dataloader for such a task might also be
|
||||
a simple way to manage this. (To learn more, check out the relvent section in the [Quick Tour](../quicktour#distributed-evaluation))
|
||||
a simple way to manage this. (To learn more, check out the relevant section in the [Quick Tour](../quicktour#distributed-evaluation))
|
||||
|
||||
Can it manage it? Yes. Does it add unneeded extra code however: also yes.
|
||||
|
||||
## The Solution
|
||||
|
||||
With 🤗 Accelerate, we can simplify this process by using the [`Accelerator.split_between_processes`] context manager (which also exists in `PartialState` and `AcceleratorState`).
|
||||
This function will automatically split whatever data you pass to it (be it a prompt, a set of tensors, a dictionary of the prior data, etc.) across all the processes (with a potential
|
||||
@ -134,3 +139,99 @@ with distributed_state.split_between_processes(["a dog", "a cat", "a chicken"],
|
||||
|
||||
On the first GPU, the prompts will be `["a dog", "a cat"]`, and on the second GPU it will be `["a chicken", "a chicken"]`.
|
||||
Make sure to drop the final sample, as it will be a duplicate of the previous one.
|
||||
|
||||
You can find more complex examples [here](https://github.com/huggingface/accelerate/tree/main/examples/inference/distributed) such as how to use it with LLMs.
|
||||
|
||||
## Memory-efficient pipeline parallelism (experimental)
|
||||
|
||||
This next part will discuss using *pipeline parallelism*. This is an **experimental** API utilizing the [PiPPy library by PyTorch](https://github.com/pytorch/PiPPy/) as a native solution.
|
||||
|
||||
The general idea with pipeline parallelism is: say you have 4 GPUs and a model big enough it can be *split* on four GPUs using `device_map="auto"`. With this method you can send in 4 inputs at a time (for example here, any amount works) and each model chunk will work on an input, then receive the next input once the prior chunk finished, making it *much* more efficient **and faster** than the method described earlier. Here's a visual taken from the PyTorch repository:
|
||||
|
||||

|
||||
|
||||
To illustrate how you can use this with Accelerate, we have created an [example zoo](https://github.com/huggingface/accelerate/tree/main/examples/inference) showcasing a number of different models and situations. In this tutorial, we'll show this method for GPT2 across two GPUs.
|
||||
|
||||
Before you proceed, please make sure you have the latest pippy installed by running the following:
|
||||
|
||||
```bash
|
||||
pip install torchpippy
|
||||
```
|
||||
|
||||
We require at least version 0.2.0. To confirm that you have the correct version, run `pip show torchpippy`.
|
||||
|
||||
Start by creating the model on the CPU:
|
||||
|
||||
```{python}
|
||||
from transformers import GPT2ForSequenceClassification, GPT2Config
|
||||
|
||||
config = GPT2Config()
|
||||
model = GPT2ForSequenceClassification(config)
|
||||
model.eval()
|
||||
```
|
||||
|
||||
Next you'll need to create some example inputs to use. These help PiPPy trace the model.
|
||||
|
||||
<Tip warning={true}>
|
||||
However you make this example will determine the relative batch size that will be used/passed
|
||||
through the model at a given time, so make sure to remember how many items there are!
|
||||
</Tip>
|
||||
|
||||
```{python}
|
||||
input = torch.randint(
|
||||
low=0,
|
||||
high=config.vocab_size,
|
||||
size=(2, 1024), # bs x seq_len
|
||||
device="cpu",
|
||||
dtype=torch.int64,
|
||||
requires_grad=False,
|
||||
)
|
||||
```
|
||||
Next we need to actually perform the tracing and get the model ready. To do so, use the [`inference.prepare_pippy`] function and it will fully wrap the model for pipeline parallelism automatically:
|
||||
|
||||
```{python}
|
||||
from accelerate.inference import prepare_pippy
|
||||
example_inputs = {"input_ids": input}
|
||||
model = prepare_pippy(model, example_args=(input,))
|
||||
```
|
||||
|
||||
<Tip>
|
||||
|
||||
There are a variety of parameters you can pass through to `prepare_pippy`:
|
||||
|
||||
* `split_points` lets you determine what layers to split the model at. By default we use wherever `device_map="auto" declares, such as `fc` or `conv1`.
|
||||
|
||||
* `num_chunks` determines how the batch will be split and sent to the model itself (so `num_chunks=1` with four split points/four GPUs will have a naive MP where a single input gets passed between the four layer split points)
|
||||
|
||||
</Tip>
|
||||
|
||||
From here, all that's left is to actually perform the distributed inference!
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
When passing inputs, we highly recommend to pass them in as a tuple of arguments. Using `kwargs` is supported, however, this approach is experimental.
|
||||
</Tip>
|
||||
|
||||
```{python}
|
||||
args = some_more_arguments
|
||||
with torch.no_grad():
|
||||
output = model(*args)
|
||||
```
|
||||
|
||||
When finished all the data will be on the last process only:
|
||||
|
||||
```{python}
|
||||
from accelerate import PartialState
|
||||
if PartialState().is_last_process:
|
||||
print(output)
|
||||
```
|
||||
|
||||
<Tip>
|
||||
|
||||
If you pass in `gather_output=True` to [`inference.prepare_pippy`], the output will be sent
|
||||
across to all the GPUs afterwards without needing the `is_last_process` check. This is
|
||||
`False` by default as it incurs a communication call.
|
||||
|
||||
</Tip>
|
||||
|
||||
And that's it! To explore more, please check out the inference examples in the [Accelerate repo](https://github.com/huggingface/accelerate/tree/main/examples/inference/pippy) and our [documentation](../package_reference/inference) as we work to improving this integration.
|
||||
|
||||
@ -16,7 +16,7 @@ rendered properly in your Markdown viewer.
|
||||
# Learning how to incorporate 🤗 Accelerate features quickly!
|
||||
|
||||
Please use the interactive tool below to help you get started with learning about a particular
|
||||
feature of 🤗 Accelerate and how to utilize it! It will provide you with a code diff, an explaination
|
||||
feature of 🤗 Accelerate and how to utilize it! It will provide you with a code diff, an explanation
|
||||
towards what is going on, as well as provide you with some useful links to explore more within
|
||||
the documentation!
|
||||
|
||||
|
||||
@ -36,27 +36,34 @@ default options when doing
|
||||
accelerate launch my_script.py --args_to_my_script
|
||||
```
|
||||
|
||||
For instance, here is how you would run the NLP example (from the root of the repo) with FSDP enabled:
|
||||
For instance, here is how you would run `examples/nlp_example.py` (from the root of the repo) with FSDP enabled:
|
||||
|
||||
```bash
|
||||
compute_environment: LOCAL_MACHINE
|
||||
deepspeed_config: {}
|
||||
debug: false
|
||||
distributed_type: FSDP
|
||||
downcast_bf16: 'no'
|
||||
fsdp_config:
|
||||
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
|
||||
fsdp_backward_prefetch_policy: BACKWARD_PRE
|
||||
fsdp_forward_prefetch: false
|
||||
fsdp_cpu_ram_efficient_loading: true
|
||||
fsdp_offload_params: false
|
||||
fsdp_sharding_strategy: 1
|
||||
fsdp_state_dict_type: FULL_STATE_DICT
|
||||
fsdp_sharding_strategy: FULL_SHARD
|
||||
fsdp_state_dict_type: SHARDED_STATE_DICT
|
||||
fsdp_sync_module_states: true
|
||||
fsdp_transformer_layer_cls_to_wrap: BertLayer
|
||||
fsdp_use_orig_params: true
|
||||
machine_rank: 0
|
||||
main_process_ip: null
|
||||
main_process_port: null
|
||||
main_training_function: main
|
||||
mixed_precision: 'no'
|
||||
mixed_precision: bf16
|
||||
num_machines: 1
|
||||
num_processes: 2
|
||||
rdzv_backend: static
|
||||
same_network: true
|
||||
tpu_env: []
|
||||
tpu_use_cluster: false
|
||||
tpu_use_sudo: false
|
||||
use_cpu: false
|
||||
```
|
||||
|
||||
@ -66,40 +73,30 @@ accelerate launch examples/nlp_example.py
|
||||
|
||||
Currently, `Accelerate` supports the following config through the CLI:
|
||||
|
||||
```bash
|
||||
`Sharding Strategy`: [1] FULL_SHARD (shards optimizer states, gradients and parameters), [2] SHARD_GRAD_OP (shards optimizer states and gradients), [3] NO_SHARD (DDP), [4] HYBRID_SHARD (shards optimizer states, gradients and parameters within each node while each node has full copy), [5] HYBRID_SHARD_ZERO2 (shards optimizer states and gradients within each node while each node has full copy)
|
||||
`fsdp_sharding_strategy`: [1] FULL_SHARD (shards optimizer states, gradients and parameters), [2] SHARD_GRAD_OP (shards optimizer states and gradients), [3] NO_SHARD (DDP), [4] HYBRID_SHARD (shards optimizer states, gradients and parameters within each node while each node has full copy), [5] HYBRID_SHARD_ZERO2 (shards optimizer states and gradients within each node while each node has full copy). For more information, please refer the official [PyTorch docs](https://pytorch.org/docs/stable/fsdp.html#torch.distributed.fsdp.ShardingStrategy).
|
||||
|
||||
`Offload Params`: Decides Whether to offload parameters and gradients to CPU
|
||||
`fsdp_offload_params` : Decides Whether to offload parameters and gradients to CPU
|
||||
|
||||
`Auto Wrap Policy`: [1] TRANSFORMER_BASED_WRAP, [2] SIZE_BASED_WRAP, [3] NO_WRAP
|
||||
`fsdp_auto_wrap_policy`: [1] TRANSFORMER_BASED_WRAP, [2] SIZE_BASED_WRAP, [3] NO_WRAP
|
||||
|
||||
`Transformer Layer Class to Wrap`: When using `TRANSFORMER_BASED_WRAP`, user specifies comma-separated string of transformer layer class names (case-sensitive) to wrap ,e.g,
|
||||
`BertLayer`, `GPTJBlock`, `T5Block`, `BertLayer,BertEmbeddings,BertSelfOutput`...
|
||||
This is important because submodules that share weights (e.g., embedding layer) should not end up in different FSDP wrapped units.
|
||||
Using this policy, wrapping happens for each block containing Multi-Head Attention followed by couple of MLP layers.
|
||||
Remaining layers including the shared embeddings are conveniently wrapped in same outermost FSDP unit.
|
||||
Therefore, use this for transformer based models.
|
||||
You can use the `model._no_split_modules` for 🤗 Transformer models by answering `yes` to
|
||||
`Do you want to use the model's `_no_split_modules` to wrap. Only applicable for 🤗 Transformers`.
|
||||
It will try to use `model._no_split_modules` when available.
|
||||
`fsdp_transformer_layer_cls_to_wrap`: Only applicable for 🤗 Transformers. When using `fsdp_auto_wrap_policy=TRANSFORMER_BASED_WRAP`, a user may provide a comma-separated string of transformer layer class names (case-sensitive) to wrap, e.g., `BertLayer`, `GPTJBlock`, `T5Block`, `BertLayer,BertEmbeddings,BertSelfOutput`. This is important because submodules that share weights (e.g., embedding layers) should not end up in different FSDP wrapped units. Using this policy, wrapping happens for each block containing Multi-Head Attention followed by a couple of MLP layers. Remaining layers including the shared embeddings are conveniently wrapped in same outermost FSDP unit. Therefore, use this for transformer-based models. You can use the `model._no_split_modules` for 🤗 Transformer models by answering `yes` to `Do you want to use the model's `_no_split_modules` to wrap. It will try to use `model._no_split_modules` when possible.
|
||||
|
||||
`Min Num Params`: minimum number of parameters when using `SIZE_BASED_WRAP`
|
||||
`fsdp_min_num_params`: minimum number of parameters when using `fsdp_auto_wrap_policy=SIZE_BASED_WRAP`.
|
||||
|
||||
`Backward Prefetch`: [1] BACKWARD_PRE, [2] BACKWARD_POST, [3] NO_PREFETCH
|
||||
`fsdp_backward_prefetch_policy`: [1] BACKWARD_PRE, [2] BACKWARD_POST, [3] NO_PREFETCH
|
||||
|
||||
`State Dict Type`: [1] FULL_STATE_DICT, [2] LOCAL_STATE_DICT, [3] SHARDED_STATE_DICT
|
||||
`fsdp_forward_prefetch`: if True, then FSDP explicitly prefetches the next upcoming all-gather while executing in the forward pass. Should only be used for static-graph models since the prefetching follows the first iteration’s execution order. i.e., if the sub-modules' order changes dynamically during the model's execution do not enable this feature.
|
||||
|
||||
`Forward Prefetch`: if True, then FSDP explicitly prefetches the next upcoming
|
||||
all-gather while executing in the forward pass. only use with Static graphs.
|
||||
`fsdp_state_dict_type`: [1] FULL_STATE_DICT, [2] LOCAL_STATE_DICT, [3] SHARDED_STATE_DICT
|
||||
|
||||
`Use Orig Params`: If True, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable paramteres.
|
||||
Useful in cases such as parameter-efficient fine-tuning.
|
||||
Please refer this [blog](https://dev-discuss.pytorch.org/t/rethinking-pytorch-fully-sharded-data-parallel-fsdp-from-first-principles/1019)
|
||||
`fsdp_use_orig_params`: If True, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable parameters. This setting is useful in cases such as parameter-efficient fine-tuning as discussed in [this post](https://dev-discuss.pytorch.org/t/rethinking-pytorch-fully-sharded-data-parallel-fsdp-from-first-principles/1019). This option also allows one to have multiple optimizer param groups. This should be `True` when creating an optimizer before preparing/wrapping the model with FSDP.
|
||||
|
||||
`Sync Module States`: If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0
|
||||
```
|
||||
`fsdp_cpu_ram_efficient_loading`: Only applicable for 🤗 Transformers models. If True, only the first process loads the pretrained model checkpoint while all other processes have empty weights. This should be set to False if you experience errors when loading the pretrained 🤗 Transformers model via `from_pretrained` method. When this setting is True `fsdp_sync_module_states` also must to be True, otherwise all the processes except the main process would have random weights leading to unexpected behaviour during training. For this to work, make sure the distributed process group is initialized before calling Transformers `from_pretrained` method. When using 🤗 Trainer API, the distributed process group is initialized when you create an instance of `TrainingArguments` class.
|
||||
|
||||
For additional and more nuanced control, you can specify other FSDP parameters via `FullyShardedDataParallelPlugin`.
|
||||
`fsdp_sync_module_states`: If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0.
|
||||
|
||||
|
||||
For additional and more nuanced control, you can specify other FSDP parameters via `FullyShardedDataParallelPlugin`.
|
||||
When creating `FullyShardedDataParallelPlugin` object, pass it the parameters that weren't part of the accelerate config or if you want to override them.
|
||||
The FSDP parameters will be picked based on the accelerate config file or launch command arguments and other parameters that you will pass directly through the `FullyShardedDataParallelPlugin` object will set/override that.
|
||||
|
||||
@ -126,9 +123,9 @@ Below is the code snippet to save using `save_state` utility of accelerate.
|
||||
accelerator.save_state("ckpt")
|
||||
```
|
||||
|
||||
Inspect the ckeckpoint folder to see model and optimizer as shards per process:
|
||||
Inspect the checkpoint folder to see model and optimizer as shards per process:
|
||||
```
|
||||
ls ckpt
|
||||
ls ckpt
|
||||
# optimizer_0 pytorch_model_0 random_states_0.pkl random_states_1.pkl scheduler.bin
|
||||
|
||||
cd ckpt
|
||||
@ -146,7 +143,7 @@ To load them back for resuming the training, use the `load_state` utility of acc
|
||||
accelerator.load_state("ckpt")
|
||||
```
|
||||
|
||||
When using transformers `save_pretrained`, pass `state_dict=accelerator.get_state_dict(model)` to save the model state dict.
|
||||
When using transformers `save_pretrained`, pass `state_dict=accelerator.get_state_dict(model)` to save the model state dict.
|
||||
Below is an example:
|
||||
|
||||
```diff
|
||||
@ -160,67 +157,44 @@ When using transformers `save_pretrained`, pass `state_dict=accelerator.get_stat
|
||||
|
||||
### State Dict
|
||||
|
||||
`accelerator.get_state_dict` will call the underlying `model.state_dict` implementation. With a model wrapped by FSDP, the default behavior of `state_dict` is to gather all of the state in the rank 0 device. This can cause CUDA out of memory errors if the parameters don't fit on a single GPU.
|
||||
|
||||
To avoid this, PyTorch provides a context manager that adjusts the behavior of `state_dict`. To offload some of the state dict onto CPU, you can use the following code:
|
||||
|
||||
```
|
||||
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, StateDictType, FullStateDictConfig
|
||||
|
||||
full_state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
|
||||
with FSDP.state_dict_type(unwrapped_model, StateDictType.FULL_STATE_DICT, full_state_dict_config):
|
||||
state = accelerator.get_state_dict(unwrapped_model)
|
||||
```
|
||||
`accelerator.get_state_dict` will call the underlying `model.state_dict` implementation using `FullStateDictConfig(offload_to_cpu=True, rank0_only=True)` context manager to get the state dict only for rank 0 and it will be offloaded to CPU.
|
||||
|
||||
You can then pass `state` into the `save_pretrained` method. There are several modes for `StateDictType` and `FullStateDictConfig` that you can use to control the behavior of `state_dict`. For more information, see the [PyTorch documentation](https://pytorch.org/docs/stable/fsdp.html).
|
||||
|
||||
## A few caveats to be aware of
|
||||
If you choose to use `StateDictType.SHARDED_STATE_DICT`, the weights of the model during `Accelerator.save_state` will be split into `n` files for each sub-split on the model. To merge them back into
|
||||
a single dictionary to load back into the model later after training you can use the `merge_weights` utility:
|
||||
|
||||
- PyTorch FSDP auto wraps sub-modules, flattens the parameters and shards the parameters in place.
|
||||
Due to this, any optimizer created before model wrapping gets broken and occupies more memory.
|
||||
Hence, it is highly recommended and efficient to prepare the model before creating the optimizer.
|
||||
`Accelerate` will automatically wrap the model and create an optimizer for you in case of single model with a warning message.
|
||||
> FSDP Warning: When using FSDP, it is efficient and recommended to call prepare for the model before creating the optimizer
|
||||
```py
|
||||
from accelerate.utils import merge_fsdp_weights
|
||||
|
||||
However, below is the recommended way to prepare model and optimizer while using FSDP:
|
||||
# Our weights are saved usually in a `pytorch_model_fsdp_{model_number}` folder
|
||||
merge_fsdp_weights("pytorch_model_fsdp_0", "output_path", safe_serialization=True)
|
||||
```
|
||||
The final output will then either be saved to `model.safetensors` or `pytorch_model.bin` (if `safe_serialization=False` is passed).
|
||||
|
||||
```diff
|
||||
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True)
|
||||
+ model = accelerator.prepare(model)
|
||||
|
||||
optimizer = torch.optim.AdamW(params=model.parameters(), lr=lr)
|
||||
|
||||
- model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
||||
- model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
||||
- )
|
||||
|
||||
+ optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
||||
+ optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
||||
+ )
|
||||
This can also be called using the CLI:
|
||||
```bash
|
||||
accelerate merge-weights pytorch_model_fsdp_0/ output_path
|
||||
```
|
||||
|
||||
- In case of a single model, if you have created the optimizer with multiple parameter groups and called prepare with them together,
|
||||
then the parameter groups will be lost and the following warning is displayed:
|
||||
> FSDP Warning: When using FSDP, several parameter groups will be conflated into
|
||||
> a single one due to nested module wrapping and parameter flattening.
|
||||
|
||||
This is because parameter groups created before wrapping will have no meaning post wrapping due to parameter flattening of nested FSDP modules into 1D arrays (which can consume many layers).
|
||||
For instance, below are the named parameters of an FSDP model on GPU 0 (When using 2 GPUs. Around 55M (110M/2) params in 1D arrays as this will have the 1st shard of the parameters).
|
||||
Here, if one has applied no weight decay for [bias, LayerNorm.weight] the named parameters of an unwrapped BERT model,
|
||||
it can't be applied to the below FSDP wrapped model as there are no named parameters with either of those strings and
|
||||
the parameters of those layers are concatenated with parameters of various other layers.
|
||||
```
|
||||
{
|
||||
'_fsdp_wrapped_module.flat_param': torch.Size([494209]),
|
||||
'_fsdp_wrapped_module._fpw_module.bert.embeddings.word_embeddings._fsdp_wrapped_module.flat_param': torch.Size([11720448]),
|
||||
'_fsdp_wrapped_module._fpw_module.bert.encoder._fsdp_wrapped_module.flat_param': torch.Size([42527232])
|
||||
}
|
||||
```
|
||||
|
||||
## Mapping between FSDP sharding strategies and DeepSpeed ZeRO Stages
|
||||
* `FULL_SHARD` maps to the DeepSpeed `ZeRO Stage-3`. Shards optimizer states, gradients and parameters.
|
||||
* `SHARD_GRAD_OP` maps to the DeepSpeed `ZeRO Stage-2`. Shards optimizer states and gradients.
|
||||
* `NO_SHARD` maps to `ZeRO Stage-0`. No sharding wherein each GPU has full copy of model, optimizer states and gradients.
|
||||
* `HYBRID_SHARD` maps to `ZeRO++ Stage-3` wherein `zero_hpz_partition_size=<num_gpus_per_node>`. Here, this will shard optimizer states, gradients and parameters within each node while each node has full copy.
|
||||
|
||||
- In case of multiple models, it is necessary to prepare the models before creating optimizers or else it will throw an error.
|
||||
Then pass the optimizers to the prepare call in the same order as corresponding models else `accelerator.save_state()` and `accelerator.load_state()` will result in wrong/unexpected behaviour.
|
||||
## A few caveats to be aware of
|
||||
|
||||
- In case of multiple models, pass the optimizers to the prepare call in the same order as corresponding models else `accelerator.save_state()` and `accelerator.load_state()` will result in wrong/unexpected behaviour.
|
||||
- This feature is incompatible with `--predict_with_generate` in the `run_translation.py` script of 🤗 `Transformers` library.
|
||||
|
||||
For more control, users can leverage the `FullyShardedDataParallelPlugin`. After creating an instance of this class, users can pass it to the Accelerator class instantiation.
|
||||
For more information on these options, please refer to the PyTorch [FullyShardedDataParallel](https://github.com/pytorch/pytorch/blob/0df2e863fbd5993a7b9e652910792bd21a516ff3/torch/distributed/fsdp/fully_sharded_data_parallel.py#L236) code.
|
||||
|
||||
|
||||
<Tip>
|
||||
|
||||
For those interested in the similarities and differences between FSDP and DeepSpeed, please check out the [concept guide here](../concept_guides/fsdp_and_deepspeed)!
|
||||
|
||||
</Tip>
|
||||
@ -118,8 +118,24 @@ You can remove all the special checks for the step number and the loss adjustmen
|
||||
As you can see the [`Accelerator`] is able to keep track of the batch number you are on and it will automatically know whether to step through the prepared optimizer and how to adjust the loss.
|
||||
|
||||
<Tip>
|
||||
|
||||
Typically with gradient accumulation, you would need to adjust the number of steps to reflect the change in total batches you are
|
||||
training on. 🤗 Accelerate automagically does this for you by default. Behind the scenes we instantiate a GradientAccumulationPlugin configured to do this.
|
||||
training on. 🤗 Accelerate automagically does this for you by default. Behind the scenes we instantiate a [`GradientAccumulationPlugin`] configured to do this.
|
||||
|
||||
</Tip>
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
The [`state.GradientState`] is sync'd with the active dataloader being iterated upon. As such it assumes naively that when we have reached the end of the dataloader everything will sync and a step will be performed. To disable this, set `sync_with_dataloader` to be `False` in the [`GradientAccumulationPlugin`]:
|
||||
|
||||
```{python}
|
||||
from accelerate import Accelerator
|
||||
from accelerate.utils import GradientAccumulationPlugin
|
||||
|
||||
plugin = GradientAccumulationPlugin(sync_with_dataloader=False)
|
||||
accelerator = Accelerator(..., gradient_accumulation_plugin=plugin)
|
||||
```
|
||||
|
||||
</Tip>
|
||||
|
||||
## The finished code
|
||||
|
||||
@ -115,8 +115,11 @@ What is the IP address of the machine that will host the main process? 36.112.23
|
||||
What is the port you will use to communicate with the main process? 29500
|
||||
Are all the machines on the same local network? Answer `no` if nodes are on the cloud and/or on different network hosts [YES/no]: yes
|
||||
Do you want to use Intel PyTorch Extension (IPEX) to speed up training on CPU? [yes/NO]:yes
|
||||
Do you want accelerate to launch mpirun? [yes/NO]: yes
|
||||
Please enter the path to the hostfile to use with mpirun [~/hostfile]: ~/hostfile
|
||||
Enter the number of oneCCL worker threads [1]: 1
|
||||
Do you wish to optimize your script with torch dynamo?[yes/NO]:NO
|
||||
How many CPU(s) should be used for distributed training? [1]:16
|
||||
How many processes should be used for distributed training? [1]:16
|
||||
-----------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
Do you wish to use FP16 or BF16 (mixed precision)?
|
||||
bf16
|
||||
@ -135,6 +138,9 @@ main_process_ip: 36.112.23.24
|
||||
main_process_port: 29500
|
||||
main_training_function: main
|
||||
mixed_precision: bf16
|
||||
mpirun_config:
|
||||
mpirun_ccl: '1'
|
||||
mpirun_hostfile: /home/user/hostfile
|
||||
num_machines: 4
|
||||
num_processes: 16
|
||||
rdzv_backend: static
|
||||
@ -148,6 +154,7 @@ use_cpu: true
|
||||
Set following env and using intel MPI to launch the training
|
||||
|
||||
In node0, you need to create a configuration file which contains the IP addresses of each node (for example hostfile) and pass that configuration file path as an argument.
|
||||
If you selected to have Accelerate launch `mpirun`, ensure that the location of your hostfile matches the path in the config.
|
||||
```bash
|
||||
$ cat hostfile
|
||||
xxx.xxx.xxx.xxx #node0 ip
|
||||
@ -155,7 +162,18 @@ xxx.xxx.xxx.xxx #node1 ip
|
||||
xxx.xxx.xxx.xxx #node2 ip
|
||||
xxx.xxx.xxx.xxx #node3 ip
|
||||
```
|
||||
Now, run the following command in node0 and **16DDP** will be enabled in node0,node1,node2,node3 with BF16 mixed precision:
|
||||
When Accelerate is launching `mpirun`, source the oneCCL bindings setvars.sh to get your Intel MPI environment, and then
|
||||
run your script using `accelerate launch`. Note that the python script and environment needs to exist on all of the
|
||||
machines being used for multi-CPU training.
|
||||
```bash
|
||||
oneccl_bindings_for_pytorch_path=$(python -c "from oneccl_bindings_for_pytorch import cwd; print(cwd)")
|
||||
source $oneccl_bindings_for_pytorch_path/env/setvars.sh
|
||||
|
||||
accelerate launch examples/nlp_example.py
|
||||
```
|
||||
Otherwise, if you selected not to have Accelerate launch `mpirun`, run the following command in node0 and **16DDP** will
|
||||
be enabled in node0,node1,node2,node3 with BF16 mixed precision. When using this method, the python script, python
|
||||
environment, and accelerate config file need to be present on all of the machines used for multi-CPU training.
|
||||
```bash
|
||||
oneccl_bindings_for_pytorch_path=$(python -c "from oneccl_bindings_for_pytorch import cwd; print(cwd)")
|
||||
source $oneccl_bindings_for_pytorch_path/env/setvars.sh
|
||||
|
||||
@ -88,7 +88,7 @@ achieved by adding one `with LocalSGD` statement and one call `local_sgd.step()`
|
||||
+ local_sgd.step()
|
||||
```
|
||||
|
||||
Under the hood, the Local SGD code **disables** automatic gradient synchornization (but accumulation still works as expected!). Instead it averages model parameters every `local_sgd_steps` steps (as well as in the end of the training loop).
|
||||
Under the hood, the Local SGD code **disables** automatic gradient synchronization (but accumulation still works as expected!). Instead it averages model parameters every `local_sgd_steps` steps (as well as at the end of the training loop).
|
||||
|
||||
## Limitations
|
||||
|
||||
|
||||
145
docs/source/usage_guides/low_precision_training.md
Normal file
145
docs/source/usage_guides/low_precision_training.md
Normal file
@ -0,0 +1,145 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Low Precision Training Methods
|
||||
|
||||
🤗 Accelerate provides integrations to train on lower precision methods using specified supported hardware through the `TransformersEngine` and `MS-AMP` packages. This documentation will help guide you through what hardware is supported, how to configure your [`Accelerator`] to leverage the low precision methods, and what you can expect when training.
|
||||
|
||||
## What training on FP8 means
|
||||
|
||||
To explore more of the nitty-gritty in training in FP8 with PyTorch and 🤗 Accelerate, check out the [concept_guide](../concept_guides/low_precision_training) on why this can be difficult. But essentially rather than training in BF16, some (or all) aspects of training a model can be performed using 8 bits instead of 16. The challenge is doing so without degrading final performance.
|
||||
|
||||
This is only enabled on specific NVIDIA hardware, namely:
|
||||
|
||||
* Anything after the 3000 series consumer graphics cards (such as the 4090)
|
||||
* Hopper-based GPU architectures (such as the `H100` and `H200`)
|
||||
|
||||
What this will result in is some gain in the memory used (as we've cut the needed memory in half for some parts of training) and an increase in throughput *should* be seen as well for larger models that can replace certain layers with FP8-enabled ones.
|
||||
|
||||
## Configuring the Accelerator
|
||||
|
||||
Currently two different backends for FP8 are supported (`TransformersEngine` and `MS-AMP`), each with different capabilities and configurations.
|
||||
|
||||
To use either, the same core API is used. Just pass `mixed_precision="fp8"` to either the [`Accelerator`], during `accelerate config` when prompted about mixed precision, or as part of your `config.yaml` file in the `mixed_precision` key:
|
||||
|
||||
```{python}
|
||||
from accelerate import Accelerator
|
||||
accelerator = Accelerator(mixed_precision="fp8")
|
||||
```
|
||||
|
||||
By default, if `MS-AMP` is available in your environment, 🤗 Accelerate will automatically utilize it as a backend. To specify it yourself (and customize other parts of the FP8 mixed precision setup), you can utilize the [`utils.FP8RecipeKwargs`] or clarify it in your config `yaml`/during `accelerate launch`:
|
||||
|
||||
```{python}
|
||||
from accelerate import Accelerator
|
||||
from accelerate.utils import FP8RecipeKwargs
|
||||
kwargs = [FP8RecipeKwargs(backend="msamp")]
|
||||
# Or to specify the backend as `TransformersEngine` even if MS-AMP is installed
|
||||
# kwargs = [FP8RecipeKwargs(backend="te")]
|
||||
accelerator = Accelerator(mixed_precision="fp8", kwarg_handlers=kwargs)
|
||||
```
|
||||
|
||||
```{yaml}
|
||||
mixed_precision: fp8
|
||||
fp8_config:
|
||||
amax_compute_algorithm: max
|
||||
amax_history_length: 1024
|
||||
backend: TE
|
||||
fp8_format: HYBRID
|
||||
interval: 1
|
||||
margin: 0
|
||||
override_linear_precision: false
|
||||
use_autocast_during_eval: false
|
||||
```
|
||||
|
||||
## Configuring MS-AMP
|
||||
|
||||
Of the two, `MS-AMP` is traditionally the easier one to configure as there is only a single argument: the optimization level.
|
||||
|
||||
Currently two levels of optimization are supported in the 🤗 Accelerate integration, `"O1"` and `"O2"` (using the letter 'o', not zero).
|
||||
|
||||
* `"O1"` will cast the weight gradients and `all_reduce` communications to happen in 8-bit, while the rest are done in 16 bit. This reduces the general GPU memory usage and speeds up communication bandwidths.
|
||||
* `"O2"` will also cast first-order optimizer states into 8 bit, while the second order states are in FP16. (Currently just the `Adam` optimizer is supported). This tries its best to minimize final accuracy degradation and will save the highest potential memory.
|
||||
|
||||
To specify an optimization level, pass it to the `FP8KwargsHandler` by setting the `optimization_level` argument:
|
||||
|
||||
```{python}
|
||||
from accelerate import Accelerator
|
||||
from accelerate.utils import FP8RecipeKwargs
|
||||
kwargs = [FP8RecipeKwargs(backend="msamp", optimization_level="O2")]
|
||||
accelerator = Accelerator(mixed_precision="fp8", kwarg_handlers=kwargs)
|
||||
```
|
||||
|
||||
Or during `accelerate launch` via `--fp8_backend=msamp --fp8_opt_level=O2`
|
||||
|
||||
Similarly this can be set in your `config.yaml`:
|
||||
|
||||
```{yaml}
|
||||
mixed_precision: fp8
|
||||
fp8_config:
|
||||
backend: MSAMP
|
||||
opt_level: O2
|
||||
```
|
||||
|
||||
## Configuring TransformersEngine
|
||||
|
||||
TransformersEngine has much more available for customizing how and what FP8 calculations are performed. A full list of supported arguments and what they mean are available in [NVIDIA's documentation](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html), however they are restated as part of [`FP8KwargsHandler`]'s docstring for your convenience.
|
||||
|
||||
🤗 Accelerate tries to set sensible defaults, but exploring and tweaking the various parameters yourself can lead to better performance potentially.
|
||||
|
||||
To use it, specify `backend="te"` and modify any of the arguments you want as part of your kwarg handler:
|
||||
|
||||
```{python}
|
||||
from accelerate import Accelerator
|
||||
from accelerate.utils import FP8RecipeKwargs
|
||||
kwargs = [FP8RecipeKwargs(backend="te", ...)]
|
||||
accelerator = Accelerator(mixed_precision="fp8", kwarg_handlers=kwargs)
|
||||
```
|
||||
|
||||
Or during `accelerate launch` via `--fp8_backend=te ...`. Use `accelerate launch --fp8_backend=te -h` to see relevent arguments.
|
||||
|
||||
Similarly this can be set in your `config.yaml`:
|
||||
|
||||
```{yaml}
|
||||
mixed_precision: fp8
|
||||
fp8_config:
|
||||
amax_compute_algorithm: max
|
||||
amax_history_length: 1024
|
||||
backend: TE
|
||||
fp8_format: HYBRID
|
||||
interval: 1
|
||||
margin: 0
|
||||
override_linear_precision: false
|
||||
use_autocast_during_eval: false
|
||||
```
|
||||
|
||||
## Example Zoo
|
||||
|
||||
We have examples showcasing training with FP8 both with accelerate and its underlying implementation available in the accelerate repo.
|
||||
Currently we support scripts showcasing:
|
||||
|
||||
* Single GPU
|
||||
* Distributed Data Parallelism (Multi-GPU)
|
||||
* Fully Sharded Data Parallelism
|
||||
* DeepSpeed ZeRO 1 through 3
|
||||
|
||||
Find out more [here](https://github.com/huggingface/accelerate/tree/main/benchmarks/fp8)
|
||||
|
||||
## Further Reading
|
||||
|
||||
To learn more about training in FP8 please check out the following resources:
|
||||
|
||||
* [Our concept guide](../concept_guides/low_precision_training) detailing into more about both TransformersEngine and MS-AMP
|
||||
* [The `transformers-engine` documentation](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html)
|
||||
* [The `MS-AMP` documentation](https://azure.github.io/MS-AMP/docs/)
|
||||
@ -9,7 +9,7 @@ Unless required by applicable law or agreed to in writing, software distributed
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
⚠️ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
@ -107,13 +107,16 @@ cd ..
|
||||
4. Installing Megatron-LM
|
||||
|
||||
```
|
||||
pip install git+https://github.com/huggingface/Megatron-LM.git
|
||||
git clone https://github.com/NVIDIA/Megatron-LM.git
|
||||
cd Megatron-LM
|
||||
git checkout core_r0.5.0
|
||||
pip install --no-use-pep517 -e .
|
||||
```
|
||||
|
||||
## Accelerate Megatron-LM Plugin
|
||||
|
||||
Important features are directly supported via the `accelerate config` command.
|
||||
An example of thr corresponding questions for using Megatron-LM features is shown below:
|
||||
An example of the corresponding questions for using Megatron-LM features is shown below:
|
||||
|
||||
```bash
|
||||
:~$ accelerate config --config_file "megatron_gpt_config.yaml"
|
||||
@ -128,7 +131,7 @@ Do you want to enable Sequence Parallelism? [YES/no]:
|
||||
What is the Pipeline Parallelism degree/size? [1]:2
|
||||
What is the number of micro-batches? [1]:2
|
||||
Do you want to enable selective activation recomputation? [YES/no]:
|
||||
Do you want to use distributed optimizer which shards optimizer state and gradients across data pralellel ranks? [YES/no]:
|
||||
Do you want to use distributed optimizer which shards optimizer state and gradients across data parallel ranks? [YES/no]:
|
||||
What is the gradient clipping value based on global L2 Norm (0 to disable)? [1.0]:
|
||||
How many GPU(s) should be used for distributed training? [1]:4
|
||||
Do you wish to use FP16 or BF16 (mixed precision)? [NO/fp16/bf16]: bf16
|
||||
@ -355,8 +358,8 @@ def main():
|
||||
|
||||
2. For using the Megatron-LM datasets, a few more changes are required. Dataloaders for these datasets
|
||||
are available only on rank 0 of each tensor parallel group. As such, there are rank where dataloader won't be
|
||||
avaiable and this requires tweaks to the training loop. Being able to do all this shows how
|
||||
felixble and extensible 🤗 Accelerate is. The changes required are as follows.
|
||||
available and this requires tweaks to the training loop. Being able to do all this shows how
|
||||
flexible and extensible 🤗 Accelerate is. The changes required are as follows.
|
||||
|
||||
a. For Megatron-LM indexed datasets, we need to use `MegatronLMDummyDataLoader`
|
||||
and pass the required dataset args to it such as `data_path`, `seq_length` etc.
|
||||
@ -542,12 +545,12 @@ megatron_lm_plugin = MegatronLMPlugin(other_megatron_args=other_megatron_args)
|
||||
This covers Decoder only, Encode only and Encoder-Decoder model classes.
|
||||
|
||||
2. Only loss is returned from model forward pass as
|
||||
there is quite complex interplay of pipeline, tensor and data parallelsim behind the scenes.
|
||||
there is quite complex interplay of pipeline, tensor and data parallelism behind the scenes.
|
||||
The `model(**batch_data)` call return loss(es) averaged across the data parallel ranks.
|
||||
This is fine for most cases wherein pre-training jobs are run using Megatron-LM features and
|
||||
you can easily compute the `perplexity` using the loss.
|
||||
For GPT model, returning logits in addition to loss(es) is supported.
|
||||
These logits aren't gathered across data prallel ranks. Use `accelerator.utils.gather_across_data_parallel_groups`
|
||||
These logits aren't gathered across data parallel ranks. Use `accelerator.utils.gather_across_data_parallel_groups`
|
||||
to gather logits across data parallel ranks. These logits along with labels can be used for computing various
|
||||
performance metrics.
|
||||
|
||||
@ -580,4 +583,4 @@ b. Megatron-LM [GPTModel](https://github.com/NVIDIA/Megatron-LM/blob/main/megatr
|
||||
c. Megatron-LM [T5Model](https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/t5_model.py) :
|
||||
🤗 transformers models with `t5` in config's model type, e.g.,
|
||||
[T5](https://huggingface.co/docs/transformers/model_doc/t5) and
|
||||
[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)
|
||||
[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)
|
||||
|
||||
@ -1,58 +0,0 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Memory Utilities
|
||||
|
||||
One of the most frustrating errors when it comes to running training scripts is hitting "CUDA Out-of-Memory",
|
||||
as the entire script needs to be restarted, progress is lost, and typically a developer would want to simply
|
||||
start their script and let it run.
|
||||
|
||||
`Accelerate` provides a utility heavily based on [toma](https://github.com/BlackHC/toma) to give this capability.
|
||||
|
||||
## find_executable_batch_size
|
||||
|
||||
This algorithm operates with exponential decay, decreasing the batch size in half after each failed run on some
|
||||
training script. To use it, restructure your training function to include an inner function that includes this wrapper,
|
||||
and build your dataloaders inside it. At a minimum, this could look like 4 new lines of code.
|
||||
> Note: The inner function *must* take in the batch size as the first parameter, but we do not pass one to it when called. The wrapper handles this for us
|
||||
|
||||
It should also be noted that anything which will consume CUDA memory and passed to the `accelerator` **must** be declared inside the inner function,
|
||||
such as models and optimizers.
|
||||
|
||||
```diff
|
||||
def training_function(args):
|
||||
accelerator = Accelerator()
|
||||
|
||||
+ @find_executable_batch_size(starting_batch_size=args.batch_size)
|
||||
+ def inner_training_loop(batch_size):
|
||||
+ nonlocal accelerator # Ensure they can be used in our context
|
||||
+ accelerator.free_memory() # Free all lingering references
|
||||
model = get_model()
|
||||
model.to(accelerator.device)
|
||||
optimizer = get_optimizer()
|
||||
train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)
|
||||
lr_scheduler = get_scheduler(
|
||||
optimizer,
|
||||
num_training_steps=len(train_dataloader)*num_epochs
|
||||
)
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
||||
)
|
||||
train(model, optimizer, train_dataloader, lr_scheduler)
|
||||
validate(model, eval_dataloader)
|
||||
+ inner_training_loop()
|
||||
```
|
||||
|
||||
To find out more, check the documentation [here](../package_reference/utilities#accelerate.find_executable_batch_size).
|
||||
@ -32,6 +32,27 @@ Currently we support searching for models that can be used in `timm` and `transf
|
||||
|
||||
</Tip>
|
||||
|
||||
## Gradio Demos
|
||||
|
||||
Below are a few gradio demos related to what was described above. The first is the official Hugging Face memory estimation space, utilizing Accelerate directly:
|
||||
|
||||
<div class="block dark:hidden">
|
||||
<iframe
|
||||
src="https://hf-accelerate-model-memory-usage.hf.space?__theme=light"
|
||||
width="850"
|
||||
height="1600"
|
||||
></iframe>
|
||||
</div>
|
||||
<div class="hidden dark:block">
|
||||
<iframe
|
||||
src="https://hf-accelerate-model-memory-usage.hf.space?__theme=dark"
|
||||
width="850"
|
||||
height="1600"
|
||||
></iframe>
|
||||
</div>
|
||||
|
||||
A community member has taken the idea and expanded it further, allowing you to filter models directly and see if you can run a particular LLM given GPU constraints and LoRA configurations. To play with it, see [here](https://huggingface.co/spaces/Vokturz/can-it-run-llm) for more details.
|
||||
|
||||
## The Command
|
||||
|
||||
When using `accelerate estimate-memory`, you need to pass in the name of the model you want to use, potentially the framework
|
||||
@ -114,8 +135,3 @@ This calculation is accurate within a few % of the actual value, so it is a very
|
||||
|
||||
When performing inference you can expect to add up to an additional 20% as found by [EleutherAI](https://blog.eleuther.ai/transformer-math/). We'll be conducting research into finding a more accurate estimate to these values, and will update
|
||||
this calculator once done.
|
||||
|
||||
## Live Gradio Demo
|
||||
|
||||
Lastly, we invite you to try the [live Gradio demo](https://huggingface.co/spaces/hf-accelerate/model-memory-usage) of this utility,
|
||||
which includes an option to post a discussion thread on a models repository with this data. Doing so will help provide access to these numbers in the community faster and help users know what you've learned!
|
||||
334
docs/source/usage_guides/profiler.md
Normal file
334
docs/source/usage_guides/profiler.md
Normal file
@ -0,0 +1,334 @@
|
||||
<!--
|
||||
Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Profiler
|
||||
|
||||
Profiler is a tool that allows the collection of performance metrics during training and inference. Profiler’s context manager API can be used to better understand what model operators are the most expensive, examine their input shapes and stack traces, study device kernel activity, and visualize the execution trace. It provides insights into the performance of your model, allowing you to optimize and improve it.
|
||||
|
||||
This guide explains how to use PyTorch Profiler to measure the time and memory consumption of the model’s operators and how to integrate this with 🤗 Accelerate. We will cover various use cases and provide examples for each.
|
||||
|
||||
## Using profiler to analyze execution time
|
||||
|
||||
Profiler allows one to check which operators were called during the execution of a code range wrapped with a profiler context manager.
|
||||
|
||||
Let’s see how we can use profiler to analyze the execution time:
|
||||
|
||||
<hfoptions id="cpu execution time">
|
||||
<hfoption id="PyTorch">
|
||||
|
||||
```python
|
||||
import torch
|
||||
import torchvision.models as models
|
||||
from torch.profiler import profile, record_function, ProfilerActivity
|
||||
|
||||
model = models.resnet18()
|
||||
inputs = torch.randn(5, 3, 224, 224)
|
||||
|
||||
with profile(activities=[ProfilerActivity.CPU], record_shapes=True) as prof:
|
||||
model(inputs)
|
||||
|
||||
print(prof.key_averages().table(sort_by="cpu_time_total", row_limit=10))
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="Accelerate">
|
||||
|
||||
```python
|
||||
from accelerate import Accelerator, ProfileKwargs
|
||||
import torch
|
||||
import torchvision.models as models
|
||||
|
||||
model = models.resnet18()
|
||||
inputs = torch.randn(5, 3, 224, 224)
|
||||
|
||||
profile_kwargs = ProfileKwargs(
|
||||
activities=["cpu"],
|
||||
record_shapes=True
|
||||
)
|
||||
|
||||
accelerator = Accelerator(cpu=True, kwargs_handlers=[profile_kwargs])
|
||||
model = accelerator.prepare(model)
|
||||
|
||||
with accelerator.profile() as prof:
|
||||
with torch.no_grad():
|
||||
model(inputs)
|
||||
|
||||
print(prof.key_averages().table(sort_by="cpu_time_total", row_limit=10))
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
The resulting table output (omitting some columns):
|
||||
|
||||
```
|
||||
--------------------------------- ------------ ------------ ------------ ------------
|
||||
Name Self CPU CPU total CPU time avg # of Calls
|
||||
--------------------------------- ------------ ------------ ------------ ------------
|
||||
aten::conv2d 171.000us 52.260ms 2.613ms 20
|
||||
aten::convolution 227.000us 52.089ms 2.604ms 20
|
||||
aten::_convolution 270.000us 51.862ms 2.593ms 20
|
||||
aten::mkldnn_convolution 51.273ms 51.592ms 2.580ms 20
|
||||
aten::batch_norm 118.000us 7.059ms 352.950us 20
|
||||
aten::_batch_norm_impl_index 315.000us 6.941ms 347.050us 20
|
||||
aten::native_batch_norm 6.305ms 6.599ms 329.950us 20
|
||||
aten::max_pool2d 40.000us 4.008ms 4.008ms 1
|
||||
aten::max_pool2d_with_indices 3.968ms 3.968ms 3.968ms 1
|
||||
aten::add_ 780.000us 780.000us 27.857us 28
|
||||
--------------------------------- ------------ ------------ ------------ ------------
|
||||
Self CPU time total: 67.016ms
|
||||
```
|
||||
|
||||
To get a finer granularity of results and include operator input shapes, pass `group_by_input_shape=True` (note: this requires running the profiler with `record_shapes=True`):
|
||||
|
||||
```python
|
||||
print(prof.key_averages(group_by_input_shape=True).table(sort_by="cpu_time_total", row_limit=10))
|
||||
```
|
||||
|
||||
## Using profiler to analyze memory consumption
|
||||
|
||||
Profiler can also show the amount of memory (used by the model’s tensors) that was allocated (or released) during the execution of the model’s operators. To enable memory profiling functionality pass `profile_memory=True`.
|
||||
|
||||
<hfoptions id="memory consumption">
|
||||
<hfoption id="PyTorch">
|
||||
|
||||
```python
|
||||
model = models.resnet18()
|
||||
inputs = torch.randn(5, 3, 224, 224)
|
||||
|
||||
with profile(activities=[ProfilerActivity.CPU],
|
||||
profile_memory=True, record_shapes=True) as prof:
|
||||
model(inputs)
|
||||
|
||||
print(prof.key_averages().table(sort_by="self_cpu_memory_usage", row_limit=10))
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="Accelerate">
|
||||
|
||||
```python
|
||||
model = models.resnet18()
|
||||
inputs = torch.randn(5, 3, 224, 224)
|
||||
|
||||
profile_kwargs = ProfileKwargs(
|
||||
activities=["cpu"],
|
||||
profile_memory=True,
|
||||
record_shapes=True
|
||||
)
|
||||
|
||||
accelerator = Accelerator(cpu=True, kwargs_handlers=[profile_kwargs])
|
||||
model = accelerator.prepare(model)
|
||||
|
||||
with accelerator.profile() as prof:
|
||||
model(inputs)
|
||||
|
||||
print(prof.key_averages().table(sort_by="self_cpu_memory_usage", row_limit=10))
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
The resulting table output (omitting some columns):
|
||||
|
||||
```
|
||||
--------------------------------- ------------ ------------ ------------
|
||||
Name CPU Mem Self CPU Mem # of Calls
|
||||
--------------------------------- ------------ ------------ ------------
|
||||
aten::empty 94.85 Mb 94.85 Mb 205
|
||||
aten::max_pool2d_with_indices 11.48 Mb 11.48 Mb 1
|
||||
aten::addmm 19.53 Kb 19.53 Kb 1
|
||||
aten::mean 10.00 Kb 10.00 Kb 1
|
||||
aten::empty_strided 492 b 492 b 5
|
||||
aten::cat 240 b 240 b 6
|
||||
aten::abs 480 b 240 b 4
|
||||
aten::masked_select 120 b 112 b 1
|
||||
aten::ne 61 b 53 b 3
|
||||
aten::eq 30 b 30 b 1
|
||||
--------------------------------- ------------ ------------ ------------
|
||||
Self CPU time total: 69.332ms
|
||||
```
|
||||
|
||||
|
||||
## Exporting chrome trace
|
||||
|
||||
You can examine the sequence of profiled operators and CUDA kernels in Chrome trace viewer (`chrome://tracing`):
|
||||
|
||||

|
||||
|
||||
<hfoptions id="exporting chrome trace">
|
||||
<hfoption id="PyTorch">
|
||||
|
||||
```python
|
||||
model = models.resnet18().cuda()
|
||||
inputs = torch.randn(5, 3, 224, 224).cuda()
|
||||
|
||||
with profile(activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA]) as prof:
|
||||
model(inputs)
|
||||
|
||||
prof.export_chrome_trace("trace.json")
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="Accelerate">
|
||||
|
||||
```python
|
||||
profile_kwargs = ProfileKwargs(
|
||||
activities=["cpu", "cuda"],
|
||||
output_trace_dir="trace"
|
||||
)
|
||||
|
||||
accelerator = Accelerator(kwargs_handlers=[profile_kwargs])
|
||||
model = accelerator.prepare(model)
|
||||
|
||||
with accelerator.profile() as prof:
|
||||
model(inputs)
|
||||
|
||||
# The trace will be saved to the specified directory
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
## Using Profiler to Analyze Long-Running Jobs
|
||||
|
||||
Profiler offers an additional API to handle long-running jobs (such as training loops). Tracing all of the execution can be slow and result in very large trace files. To avoid this, use optional arguments:
|
||||
|
||||
- `schedule_option`: Scheduling options allow you to control when profiling is active. This is useful for long-running jobs to avoid collecting too much data. Available keys are `wait`, `warmup`, `active`, `repeat` and `skip_first`. The profiler will skip the first `skip_first` steps, then wait for `wait` steps, then do the warmup for the next `warmup` steps, then do the active recording for the next `active` steps and then repeat the cycle starting with `wait` steps. The optional number of cycles is specified with the `repeat` parameter, the zero value means that the cycles will continue until the profiling is finished.
|
||||
- `on_trace_ready`: specifies a function that takes a reference to the profiler as an input and is called by the profiler each time the new trace is ready.
|
||||
|
||||
To illustrate how the API works, consider the following example:
|
||||
|
||||
<hfoptions id="custom handler">
|
||||
<hfoption id="PyTorch">
|
||||
|
||||
```python
|
||||
from torch.profiler import schedule
|
||||
|
||||
my_schedule = schedule(
|
||||
skip_first=10,
|
||||
wait=5,
|
||||
warmup=1,
|
||||
active=3,
|
||||
repeat=2
|
||||
)
|
||||
|
||||
def trace_handler(p):
|
||||
output = p.key_averages().table(sort_by="self_cuda_time_total", row_limit=10)
|
||||
print(output)
|
||||
p.export_chrome_trace("/tmp/trace_" + str(p.step_num) + ".json")
|
||||
|
||||
with profile(
|
||||
activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA],
|
||||
schedule=my_schedule,
|
||||
on_trace_ready=trace_handler
|
||||
) as p:
|
||||
for idx in range(8):
|
||||
model(inputs)
|
||||
p.step()
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="Accelerate">
|
||||
|
||||
```python
|
||||
def trace_handler(p):
|
||||
output = p.key_averages().table(sort_by="self_cuda_time_total", row_limit=10)
|
||||
print(output)
|
||||
p.export_chrome_trace("/tmp/trace_" + str(p.step_num) + ".json")
|
||||
|
||||
profile_kwargs = ProfileKwargs(
|
||||
activities=["cpu", "cuda"],
|
||||
schedule_option={"wait": 5, "warmup": 1, "active": 3, "repeat": 2, "skip_first": 10},
|
||||
on_trace_ready=trace_handler
|
||||
)
|
||||
|
||||
accelerator = Accelerator(kwargs_handlers=[profile_kwargs])
|
||||
model = accelerator.prepare(model)
|
||||
|
||||
with accelerator.profile() as prof:
|
||||
for idx in range(8):
|
||||
model(inputs)
|
||||
prof.step()
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
## FLOPS
|
||||
|
||||
Use formula to estimate the FLOPs (floating point operations) of specific operators (matrix multiplication and 2D convolution).
|
||||
|
||||
To measure floating-point operations (FLOPS):
|
||||
|
||||
<hfoptions id="FLOPS">
|
||||
<hfoption id="PyTorch">
|
||||
|
||||
```python
|
||||
with profile(
|
||||
activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA],
|
||||
with_flops=True
|
||||
) as prof:
|
||||
model(inputs)
|
||||
|
||||
print(prof.key_averages().table(sort_by="flops", row_limit=10))
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="Accelerate">
|
||||
|
||||
```python
|
||||
profile_kwargs = ProfileKwargs(
|
||||
with_flops=True
|
||||
)
|
||||
accelerator = Accelerator(kwargs_handlers=[profile_kwargs])
|
||||
|
||||
with accelerator.profile() as prof:
|
||||
model(inputs)
|
||||
|
||||
print(prof.key_averages().table(sort_by="flops", row_limit=10))
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
The resulting table output (omitting some columns):
|
||||
|
||||
```
|
||||
------------------------------------------------------- ------------ ------------ ------------
|
||||
Name Self CPU Self CUDA Total FLOPs
|
||||
------------------------------------------------------- ------------ ------------ ------------
|
||||
aten::conv2d 197.000us 0.000us 18135613440.000
|
||||
aten::addmm 103.000us 17.000us 5120000.000
|
||||
aten::mul 29.000us 2.000us 30.000
|
||||
aten::convolution 409.000us 0.000us --
|
||||
aten::_convolution 253.000us 0.000us --
|
||||
aten::cudnn_convolution 5.465ms 2.970ms --
|
||||
cudaEventRecord 138.000us 0.000us --
|
||||
cudaStreamIsCapturing 43.000us 0.000us --
|
||||
cudaStreamGetPriority 40.000us 0.000us --
|
||||
cudaDeviceGetStreamPriorityRange 10.000us 0.000us --
|
||||
------------------------------------------------------- ------------ ------------ ------------
|
||||
Self CPU time total: 21.938ms
|
||||
Self CUDA time total: 4.165ms
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Conclusion and Further Information
|
||||
|
||||
PyTorch Profiler is a powerful tool for analyzing the performance of your models. By integrating it with 🤗 Accelerate, you can easily profile your models and gain insights into their performance, helping you to optimize and improve them.
|
||||
|
||||
For more detailed information, refer to the [PyTorch Profiler documentation](https://pytorch.org/docs/stable/profiler.html).
|
||||
@ -20,12 +20,15 @@ There are a large number of experiment tracking API's available, however getting
|
||||
|
||||
## Integrated Trackers
|
||||
|
||||
Currently `Accelerate` supports four trackers out-of-the-box:
|
||||
Currently `Accelerate` supports seven trackers out-of-the-box:
|
||||
|
||||
- TensorBoard
|
||||
- WandB
|
||||
- CometML
|
||||
- Aim
|
||||
- MLFlow
|
||||
- ClearML
|
||||
- DVCLive
|
||||
|
||||
To use any of them, pass in the selected type(s) to the `log_with` parameter in [`Accelerate`]:
|
||||
```python
|
||||
@ -195,7 +198,7 @@ achieve the same outcome with:
|
||||
|
||||
```python
|
||||
wandb_tracker = accelerator.get_tracker("wandb", unwrap=True)
|
||||
with accelerator.on_main_process:
|
||||
if accelerator.is_main_process:
|
||||
wandb_tracker.log_artifact(some_artifact_to_log)
|
||||
```
|
||||
|
||||
|
||||
@ -15,7 +15,7 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
# Example Zoo
|
||||
|
||||
Below contains a non-exhuastive list of tutorials and scripts showcasing 🤗 Accelerate
|
||||
Below contains a non-exhaustive list of tutorials and scripts showcasing 🤗 Accelerate
|
||||
|
||||
## Official Accelerate Examples:
|
||||
|
||||
@ -72,6 +72,11 @@ These are tutorials from libraries that integrate with 🤗 Accelerate:
|
||||
|
||||
> Don't find your integration here? Make a PR to include it!
|
||||
|
||||
### Amphion
|
||||
- [Training Text-to-Speech Models with Amphion](https://github.com/open-mmlab/Amphion/blob/main/egs/tts/README.md)
|
||||
- [Training Singing Voice Conversion Models with Amphion](https://github.com/open-mmlab/Amphion/blob/main/egs/svc/README.md)
|
||||
- [Training Vocoders with Amphion](https://github.com/open-mmlab/Amphion/blob/main/egs/vocoder/README.md)
|
||||
|
||||
### Catalyst
|
||||
|
||||
- [Distributed training tutorial with Catalyst](https://catalyst-team.github.io/catalyst/tutorials/ddp.html)
|
||||
@ -154,12 +159,12 @@ Below contains a non-exhaustive list of papers utilizing 🤗 Accelerate.
|
||||
* Puijin Cheng, Li Lin, Yijin Huang, Huaqing He, Wenhan Luo, Xiaoying Tang: “Learning Enhancement From Degradation: A Diffusion Model For Fundus Image Enhancement”, 2023; [arXiv:2303.04603](http://arxiv.org/abs/2303.04603).
|
||||
* Shun Shao, Yftah Ziser, Shay Cohen: “Erasure of Unaligned Attributes from Neural Representations”, 2023; [arXiv:2302.02997](http://arxiv.org/abs/2302.02997).
|
||||
* Seonghyeon Ye, Hyeonbin Hwang, Sohee Yang, Hyeongu Yun, Yireun Kim, Minjoon Seo: “In-Context Instruction Learning”, 2023; [arXiv:2302.14691](http://arxiv.org/abs/2302.14691).
|
||||
* Shikun Liu, Linxi Fan, Edward Johns, Zhiding Yu, Chaowei Xiao, Anima Anandkumar: “Prismer: A Vision-Language Model with An Ensemble of Experts”, 2023; [arXiv:2303.02506](http://arxiv.org/abs/2303.02506 ).
|
||||
* Shikun Liu, Linxi Fan, Edward Johns, Zhiding Yu, Chaowei Xiao, Anima Anandkumar: “Prismer: A Vision-Language Model with An Ensemble of Experts”, 2023; [arXiv:2303.02506](http://arxiv.org/abs/2303.02506).
|
||||
* Haoyu Chen, Zhihua Wang, Yang Yang, Qilin Sun, Kede Ma: “Learning a Deep Color Difference Metric for Photographic Images”, 2023; [arXiv:2303.14964](http://arxiv.org/abs/2303.14964).
|
||||
* Van-Hoang Le, Hongyu Zhang: “Log Parsing with Prompt-based Few-shot Learning”, 2023; [arXiv:2302.07435](http://arxiv.org/abs/2302.07435).
|
||||
* Keito Kudo, Yoichi Aoki, Tatsuki Kuribayashi, Ana Brassard, Masashi Yoshikawa, Keisuke Sakaguchi, Kentaro Inui: “Do Deep Neural Networks Capture Compositionality in Arithmetic Reasoning?”, 2023; [arXiv:2302.07866](http://arxiv.org/abs/2302.07866).
|
||||
* Ruoyao Wang, Peter Jansen, Marc-Alexandre Côté, Prithviraj Ammanabrolu: “Behavior Cloned Transformers are Neurosymbolic Reasoners”, 2022; [arXiv:2210.07382](http://arxiv.org/abs/2210.07382).
|
||||
* Martin Wessel, Tomáš Horych, Terry Ruas, Akiko Aizawa, Bela Gipp, Timo Spinde: “Introducing MBIB -- the first Media Bias Identification Benchmark Task and Dataset Collection”, 2023; [arXiv:2304.13148](http://arxiv.org/abs/2304.13148 ). DOI: [https://dx.doi.org/10.1145/3539618.3591882 10.1145/3539618.3591882].
|
||||
* Martin Wessel, Tomáš Horych, Terry Ruas, Akiko Aizawa, Bela Gipp, Timo Spinde: “Introducing MBIB -- the first Media Bias Identification Benchmark Task and Dataset Collection”, 2023; [arXiv:2304.13148](http://arxiv.org/abs/2304.13148). DOI: [https://dx.doi.org/10.1145/3539618.3591882 10.1145/3539618.3591882].
|
||||
* Hila Chefer, Yuval Alaluf, Yael Vinker, Lior Wolf, Daniel Cohen-Or: “Attend-and-Excite: Attention-Based Semantic Guidance for Text-to-Image Diffusion Models”, 2023; [arXiv:2301.13826](http://arxiv.org/abs/2301.13826).
|
||||
* Marcio Fonseca, Yftah Ziser, Shay B. Cohen: “Factorizing Content and Budget Decisions in Abstractive Summarization of Long Documents”, 2022; [arXiv:2205.12486](http://arxiv.org/abs/2205.12486).
|
||||
* Elad Richardson, Gal Metzer, Yuval Alaluf, Raja Giryes, Daniel Cohen-Or: “TEXTure: Text-Guided Texturing of 3D Shapes”, 2023; [arXiv:2302.01721](http://arxiv.org/abs/2302.01721).
|
||||
@ -172,4 +177,4 @@ Below contains a non-exhaustive list of papers utilizing 🤗 Accelerate.
|
||||
* Zhiruo Wang, Shuyan Zhou, Daniel Fried, Graham Neubig: “Execution-Based Evaluation for Open-Domain Code Generation”, 2022; [arXiv:2212.10481](http://arxiv.org/abs/2212.10481).
|
||||
* Minh-Long Luu, Zeyi Huang, Eric P. Xing, Yong Jae Lee, Haohan Wang: “Expeditious Saliency-guided Mix-up through Random Gradient Thresholding”, 2022; [arXiv:2212.04875](http://arxiv.org/abs/2212.04875).
|
||||
* Jun Hao Liew, Hanshu Yan, Daquan Zhou, Jiashi Feng: “MagicMix: Semantic Mixing with Diffusion Models”, 2022; [arXiv:2210.16056](http://arxiv.org/abs/2210.16056).
|
||||
* Yaqing Wang, Subhabrata Mukherjee, Xiaodong Liu, Jing Gao, Ahmed Hassan Awadallah, Jianfeng Gao: “LiST: Lite Prompted Self-training Makes Parameter-Efficient Few-shot Learners”, 2021; [arXiv:2110.06274](http://arxiv.org/abs/2110.06274).
|
||||
* Yaqing Wang, Subhabrata Mukherjee, Xiaodong Liu, Jing Gao, Ahmed Hassan Awadallah, Jianfeng Gao: “LiST: Lite Prompted Self-training Makes Parameter-Efficient Few-shot Learners”, 2021; [arXiv:2110.06274](http://arxiv.org/abs/2110.06274).
|
||||
|
||||
@ -28,6 +28,7 @@ pip install datasets evaluate transformers
|
||||
|
||||
The same script can be run in any of the following configurations:
|
||||
- single CPU or single GPU
|
||||
- multi CPUs
|
||||
- multi GPUs (using PyTorch distributed mode)
|
||||
- (multi) TPUs
|
||||
- fp16 (mixed-precision) or fp32 (normal precision)
|
||||
@ -58,15 +59,27 @@ To run it in each of these various modes, use the following commands:
|
||||
* from any server with Accelerate launcher
|
||||
```bash
|
||||
accelerate launch --mixed_precision fp16 ./nlp_example.py
|
||||
- multi CPUs (requires Open MPI, Intel MPI, or MVAPICH)
|
||||
* With Accelerate config and launcher, execute the following from node 0:
|
||||
```bash
|
||||
accelerate config # Select to have accelerate launch mpirun
|
||||
accelerate launch ./nlp_example.py # This will run the script on each server
|
||||
```
|
||||
* With Intel MPI:
|
||||
```bash
|
||||
export CCL_WORKER_COUNT=1
|
||||
export MASTER_ADDR=xxx.xxx.xxx.xxx #node0 ip
|
||||
mpirun -f hostfile -n 16 -ppn 4 python ./nlp_example.py
|
||||
```
|
||||
- multi GPUs (using PyTorch distributed mode)
|
||||
* With Accelerate config and launcher
|
||||
```bash
|
||||
accelerate config # This will create a config file on your server
|
||||
accelerate launch ./nlp_example.py # This will run the script on your server
|
||||
```
|
||||
* With traditional PyTorch launcher (`torch.distributed.launch` can be used with older versions of PyTorch)
|
||||
* With traditional PyTorch launcher (`python -m torch.distributed.run` can be used instead of `torchrun`)
|
||||
```bash
|
||||
python -m torchrun --nproc_per_node 2 --use_env ./nlp_example.py
|
||||
torchrun --nproc_per_node 2 ./nlp_example.py
|
||||
```
|
||||
- multi GPUs, multi node (several machines, using PyTorch distributed mode)
|
||||
* With Accelerate config and launcher, on each machine:
|
||||
@ -74,18 +87,15 @@ To run it in each of these various modes, use the following commands:
|
||||
accelerate config # This will create a config file on each server
|
||||
accelerate launch ./nlp_example.py # This will run the script on each server
|
||||
```
|
||||
* With PyTorch launcher only (`torch.distributed.launch` can be used in older versions of PyTorch)
|
||||
* With PyTorch launcher only (`python -m torch.distributed.run` can be used instead of `torchrun`). Run this command on each node:
|
||||
```bash
|
||||
python -m torchrun --nproc_per_node 2 \
|
||||
--use_env \
|
||||
--node_rank 0 \
|
||||
--master_addr master_node_ip_address \
|
||||
./nlp_example.py # On the first server
|
||||
python -m torchrun --nproc_per_node 2 \
|
||||
--use_env \
|
||||
--node_rank 1 \
|
||||
--master_addr master_node_ip_address \
|
||||
./nlp_example.py # On the second server
|
||||
torchrun \ # python -m torch.distributed.run
|
||||
--nproc_per_node 2 \
|
||||
--nnodes 2 \
|
||||
--rdzv_id 2299 \ # A unique job id
|
||||
--rdzv_backend c10d \
|
||||
--rdzv_endpoint master_node_ip_address:29500 \
|
||||
./nlp_example.py
|
||||
```
|
||||
- (multi) TPUs
|
||||
* With Accelerate config and launcher
|
||||
@ -103,6 +113,7 @@ The [cv_example.py](./cv_example.py) script is a simple example to fine-tune a R
|
||||
|
||||
The same script can be run in any of the following configurations:
|
||||
- single CPU or single GPU
|
||||
- multi CPUs
|
||||
- multi GPUs (using PyTorch distributed mode)
|
||||
- (multi) TPUs
|
||||
- fp16 (mixed-precision) or fp32 (normal precision)
|
||||
@ -146,40 +157,49 @@ To run it in each of these various modes, use the following commands:
|
||||
* from any server with Accelerate launcher
|
||||
```bash
|
||||
accelerate launch --mixed_precison fp16 ./cv_example.py --data_dir path_to_data
|
||||
- multi CPUs (requires Open MPI, Intel MPI, or MVAPICH)
|
||||
* With Accelerate config and launcher, run the following from node 0:
|
||||
```bash
|
||||
accelerate config --config_file config.yaml # Select to have accelerate launch mpirun
|
||||
accelerate launch ./cv_example.py --data_dir path_to_data # This will run the script on each server
|
||||
```
|
||||
* With Intel MPI, execute mpirun from node 0:
|
||||
```bash
|
||||
export CCL_WORKER_COUNT=1
|
||||
export MASTER_ADDR=xxx.xxx.xxx.xxx #node0 ip
|
||||
mpirun -f hostfile -n 16 -ppn 4 python ./cv_example.py --data_dir path_to_data
|
||||
```
|
||||
- multi GPUs (using PyTorch distributed mode)
|
||||
* With Accelerate config and launcher
|
||||
```bash
|
||||
accelerate config # This will create a config file on your server
|
||||
accelerate launch ./cv_example.py --data_dir path_to_data # This will run the script on your server
|
||||
accelerate config --config_file config.yaml # This will create a config file on your server to `config.yaml`
|
||||
accelerate launch --config_file config.yaml ./cv_example.py --data_dir path_to_data # This will run the script on your server
|
||||
```
|
||||
* With traditional PyTorch launcher (`torch.distributed.launch` can be used with older versions of PyTorch)
|
||||
* With traditional PyTorch launcher (`python -m torch.distributed.run` can be used instead of `torchrun`)
|
||||
```bash
|
||||
python -m torchrun --nproc_per_node 2 --use_env ./cv_example.py --data_dir path_to_data
|
||||
torchrun --nproc_per_node 2 ./cv_example.py --data_dir path_to_data
|
||||
```
|
||||
- multi GPUs, multi node (several machines, using PyTorch distributed mode)
|
||||
* With Accelerate config and launcher, on each machine:
|
||||
```bash
|
||||
accelerate config # This will create a config file on each server
|
||||
accelerate launch ./cv_example.py --data_dir path_to_data # This will run the script on each server
|
||||
accelerate config --config_file config.yaml # This will create a config file on your server to `config.yaml`
|
||||
accelerate launch --config_file config.yaml ./cv_example.py --data_dir path_to_data # This will run the script on each server
|
||||
```
|
||||
* With PyTorch launcher only (`torch.distributed.launch` can be used with older versions of PyTorch)
|
||||
* With PyTorch launcher only (`python -m torch.distributed.run` can be used instead of `torchrun`). Run this command on each node:
|
||||
```bash
|
||||
python -m torchrun --nproc_per_node 2 \
|
||||
--use_env \
|
||||
--node_rank 0 \
|
||||
--master_addr master_node_ip_address \
|
||||
./cv_example.py --data_dir path_to_data # On the first server
|
||||
python -m torchrun --nproc_per_node 2 \
|
||||
--use_env \
|
||||
--node_rank 1 \
|
||||
--master_addr master_node_ip_address \
|
||||
./cv_example.py --data_dir path_to_data # On the second server
|
||||
torchrun \ # python -m torch.distributed.run
|
||||
--nproc_per_node 2 \
|
||||
--nnodes 2 \
|
||||
--rdzv_id 2299 \ # A unique job id
|
||||
--rdzv_backend c10d \
|
||||
--rdzv_endpoint master_node_ip_address:29500 \
|
||||
./cv_example.py --data_dir path_to_data
|
||||
```
|
||||
- (multi) TPUs
|
||||
* With Accelerate config and launcher
|
||||
```bash
|
||||
accelerate config # This will create a config file on your TPU server
|
||||
accelerate launch ./cv_example.py --data_dir path_to_data # This will run the script on each server
|
||||
accelerate config --config_file config.yaml # This will create a config file on your server to `config.yaml`
|
||||
accelerate launch --config_file config.yaml ./cv_example.py --data_dir path_to_data # This will run the script on each server
|
||||
```
|
||||
* In PyTorch:
|
||||
Add an `xmp.spawn` line in your script as you usually do.
|
||||
@ -188,11 +208,40 @@ To run it in each of these various modes, use the following commands:
|
||||
|
||||
- [huggan project](https://github.com/huggingface/community-events/tree/main/huggan)
|
||||
|
||||
|
||||
### Using AWS SageMaker integration
|
||||
- [Examples showcasing AWS SageMaker integration of 🤗 Accelerate.](https://github.com/pacman100/accelerate-aws-sagemaker)
|
||||
|
||||
## Configuration zoo
|
||||
In [/config_yaml_templates](./config_yaml_templates/) we have a variety of *minimal* `config.yaml` templates and examples to help you learn
|
||||
how to create your own configuration files depending on the scenario.
|
||||
|
||||
## Simple Multi-GPU Hardware Launcher
|
||||
## SLURM Scripts
|
||||
In [/slurm/submit_multigpu.sh](./slurm/submit_multigpu.sh) and [/slurm/submit_multinode.sh](./slurm/submit_multinode.sh) we present two scripts for running the examples on a machine with [SLURM](https://slurm.schedmd.com/documentation.html) workload manager.
|
||||
|
||||
In [/slurm/submit_multigpu.sh](./slurm/submit_multigpu.sh) the only parameter in the launcher that needs to be modified is `--num_processes`, which determines the number of GPUs we will use. In this case, using the environment variable `$SLURM_GPUS`, we indicate that we want to utilize all the GPUs available on the node we have requested.
|
||||
|
||||
In [/slurm/submit_multinode.sh](./slurm/submit_multinode.sh) we must specify the number of nodes that will be part of the training (`--num_machines`), how many GPUs we will use in total (`--num_processes`), the [`backend`](https://pytorch.org/docs/stable/elastic/run.html#note-on-rendezvous-backend), `--main_process_ip` which will be the address the master node and the `--main_process_port`.
|
||||
|
||||
In [/slurm/submit_multicpu.sh](./slurm/submit_multicpu.sh) we must specify the number of nodes that will be part of the training (`--num_machines`), how many CPU processes we will use in total (`--num_processes`), the [`backend`](https://pytorch.org/docs/stable/elastic/run.html#note-on-rendezvous-backend), `--main_process_ip` which will be the address the master node and the `--main_process_port`. `mpirun_hostfile` specifies to run the job using MPIRun.
|
||||
|
||||
In both scripts, we run `activateEnviroment.sh` at the beginning. This script should contain the necessary instructions to initialize the environment for execution. Below, we show an example that loads the necessary libraries ([Environment modules](https://github.com/cea-hpc/modules)), activates the Python environment, and sets up various environment variables, most of them to run the scripts in offline mode in case we don't have internet connection from the cluster.
|
||||
|
||||
```bash
|
||||
# activateEnvironment.sh
|
||||
module purge
|
||||
module load anaconda3/2020.02 cuda/10.2 cudnn/8.0.5 nccl/2.9.9 arrow/7.0.0 openmpi
|
||||
source activate /home/nct01/nct01328/pytorch_antoni_local
|
||||
|
||||
export HF_HOME=/gpfs/projects/nct01/nct01328/
|
||||
export HF_LOCAL_HOME=/gpfs/projects/nct01/nct01328/HF_LOCAL
|
||||
export HF_DATASETS_OFFLINE=1
|
||||
export TRANSFORMERS_OFFLINE=1
|
||||
export PYTHONPATH=/home/nct01/nct01328/transformers-in-supercomputers:$PYTHONPATH
|
||||
export GPUS_PER_NODE=4
|
||||
```
|
||||
|
||||
## Simple Multi-GPU Hardware Launcher (using an external platform)
|
||||
|
||||
[multigpu_remote_launcher.py](./multigpu_remote_launcher.py) is a minimal script that demonstrates launching accelerate
|
||||
on multiple remote GPUs, and with automatic hardware environment and dependency setup for reproducibility. You can
|
||||
|
||||
@ -88,4 +88,34 @@ These arguments should be added at the end of any method for starting the python
|
||||
accelerate launch ./local_sgd.py --local_sgd_steps 4
|
||||
```
|
||||
|
||||
### DDP Communication Hook (`ddp_comm_hook.py`)
|
||||
|
||||
- Shows how to use DDP Communication Hooks to control and optimize gradient communication across workers in a DistributedDataParallel setup.
|
||||
- Arguments available:
|
||||
- `ddp_comm_hook`, the type of DDP communication hook to use. Choose between `no`, `fp16`, `bf16`, `power_sgd`, and `batched_power_sgd`.
|
||||
|
||||
These arguments should be added at the end of any method for starting the python script (such as `accelerate launch`, `python -m torch.distributed.run`), such as:
|
||||
|
||||
```bash
|
||||
accelerate launch ./ddp_comm_hook.py --mixed_precision fp16 --ddp_comm_hook power_sgd
|
||||
```
|
||||
|
||||
### Profiler (`profiler.py`)
|
||||
|
||||
- Shows how to use the profiling capabilities of `Accelerate` to profile PyTorch models during training.
|
||||
- Uses the `ProfileKwargs` handler to customize profiling options, including activities, scheduling, and additional profiling options.
|
||||
- Can generate and save profiling traces in JSON format for visualization in Chrome's tracing tool.
|
||||
|
||||
Arguments available:
|
||||
- `--record_shapes`: If passed, records shapes for profiling.
|
||||
- `--profile_memory`: If passed, profiles memory usage.
|
||||
- `--with_stack`: If passed, profiles stack traces.
|
||||
- `--with_flops`: If passed, profiles floating point operations (FLOPS).
|
||||
- `--output_trace_dir`: If specified, saves the profiling trace to the given dir in JSON format.
|
||||
- `--cpu`: If passed, trains on the CPU instead of GPU.
|
||||
|
||||
These arguments should be added at the end of any method for starting the Python script (such as `python`, `accelerate launch`, `python -m torchrun`), such as:
|
||||
|
||||
```bash
|
||||
accelerate launch ./profiler.py --record_shapes --profile_memory --with_flops --output_trace_dir "profiler"
|
||||
```
|
||||
|
||||
@ -217,6 +217,7 @@ def training_function(config, args):
|
||||
# And call it at the end with no arguments
|
||||
# Note: You could also refactor this outside of your training loop function
|
||||
inner_training_loop()
|
||||
accelerator.end_training()
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -20,9 +19,10 @@ import torch
|
||||
from datasets import load_dataset
|
||||
from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup
|
||||
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from accelerate import Accelerator, DataLoaderConfiguration, DistributedType
|
||||
from accelerate.utils import set_seed
|
||||
|
||||
|
||||
########################################################################
|
||||
@ -86,7 +86,7 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
@ -126,7 +126,8 @@ def training_function(config, args):
|
||||
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
|
||||
config["num_epochs"] = 2
|
||||
# Initialize accelerator
|
||||
accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)
|
||||
dataloader_config = DataLoaderConfiguration(use_stateful_dataloader=args.use_stateful_dataloader)
|
||||
accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision, dataloader_config=dataloader_config)
|
||||
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
|
||||
lr = config["lr"]
|
||||
num_epochs = int(config["num_epochs"])
|
||||
@ -154,7 +155,7 @@ def training_function(config, args):
|
||||
|
||||
# If the batch size is too big we use gradient accumulation
|
||||
gradient_accumulation_steps = 1
|
||||
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
|
||||
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA:
|
||||
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
|
||||
batch_size = MAX_GPU_BATCH_SIZE
|
||||
|
||||
@ -218,8 +219,11 @@ def training_function(config, args):
|
||||
model.train()
|
||||
# New Code #
|
||||
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
|
||||
# We need to skip steps until we reach the resumed step
|
||||
active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step)
|
||||
# We need to skip steps until we reach the resumed step only if we are not using a stateful dataloader
|
||||
if not args.use_stateful_dataloader:
|
||||
active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step)
|
||||
else:
|
||||
active_dataloader = train_dataloader
|
||||
overall_step += resume_step
|
||||
else:
|
||||
# After the first iteration though, we need to go back to the original dataloader
|
||||
@ -249,7 +253,6 @@ def training_function(config, args):
|
||||
if args.output_dir is not None:
|
||||
output_dir = os.path.join(args.output_dir, output_dir)
|
||||
accelerator.save_state(output_dir)
|
||||
|
||||
model.eval()
|
||||
for step, batch in enumerate(eval_dataloader):
|
||||
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
|
||||
@ -262,7 +265,6 @@ def training_function(config, args):
|
||||
predictions=predictions,
|
||||
references=references,
|
||||
)
|
||||
|
||||
eval_metric = metric.compute()
|
||||
# Use accelerator.print to print only on the main process.
|
||||
accelerator.print(f"epoch {epoch}:", eval_metric)
|
||||
@ -277,6 +279,7 @@ def training_function(config, args):
|
||||
if args.output_dir is not None:
|
||||
output_dir = os.path.join(args.output_dir, output_dir)
|
||||
accelerator.save_state(output_dir)
|
||||
accelerator.end_training()
|
||||
|
||||
|
||||
def main():
|
||||
@ -309,6 +312,11 @@ def main():
|
||||
default=None,
|
||||
help="If the training should continue from a checkpoint folder.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--use_stateful_dataloader",
|
||||
action="store_true",
|
||||
help="If the dataloader should be a resumable stateful dataloader.",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
|
||||
training_function(config, args)
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -106,7 +105,7 @@ def get_fold_dataloaders(
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
@ -157,7 +156,7 @@ def training_function(config, args):
|
||||
|
||||
# If the batch size is too big we use gradient accumulation
|
||||
gradient_accumulation_steps = 1
|
||||
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
|
||||
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA:
|
||||
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
|
||||
batch_size = MAX_GPU_BATCH_SIZE
|
||||
|
||||
@ -249,13 +248,14 @@ def training_function(config, args):
|
||||
# Use accelerator.print to print only on the main process.
|
||||
test_predictions.append(torch.cat(fold_predictions, dim=0))
|
||||
# We now need to release all our memory and get rid of the current model, optimizer, etc
|
||||
accelerator.free_memory()
|
||||
model, optimizer = accelerator.free_memory(model, optimizer)
|
||||
# New Code #
|
||||
# Finally we check the accuracy of our folded results:
|
||||
test_references = torch.cat(test_references, dim=0)
|
||||
preds = torch.stack(test_predictions, dim=0).sum(dim=0).div(int(args.num_folds)).argmax(dim=-1)
|
||||
test_metric = metric.compute(predictions=preds, references=test_references)
|
||||
accelerator.print("Average test metrics from all folds:", test_metric)
|
||||
accelerator.end_training()
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
232
examples/by_feature/ddp_comm_hook.py
Normal file
232
examples/by_feature/ddp_comm_hook.py
Normal file
@ -0,0 +1,232 @@
|
||||
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import argparse
|
||||
import os
|
||||
|
||||
import evaluate
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from accelerate.utils import DDPCommunicationHookType, DistributedDataParallelKwargs
|
||||
|
||||
|
||||
########################################################################
|
||||
# This is a fully working simple example to use Accelerate
|
||||
# and perform ddp communication hook
|
||||
#
|
||||
# This example trains a Bert base model on GLUE MRPC
|
||||
# in any of the following settings (with the same script):
|
||||
# - single CPU or single GPU
|
||||
# - multi GPUS (using PyTorch distributed mode)
|
||||
# - (multi) TPUs
|
||||
# - fp16 (mixed-precision) or fp32 (normal precision)
|
||||
#
|
||||
# To run it in each of these various modes, follow the instructions
|
||||
# in the readme for examples:
|
||||
# https://github.com/huggingface/accelerate/tree/main/examples
|
||||
#
|
||||
########################################################################
|
||||
|
||||
|
||||
MAX_GPU_BATCH_SIZE = 16
|
||||
EVAL_BATCH_SIZE = 32
|
||||
|
||||
|
||||
def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
"""
|
||||
Creates a set of `DataLoader`s for the `glue` dataset,
|
||||
using "bert-base-cased" as the tokenizer.
|
||||
|
||||
Args:
|
||||
accelerator (`Accelerator`):
|
||||
An `Accelerator` object
|
||||
batch_size (`int`, *optional*):
|
||||
The batch size for the train and validation DataLoaders.
|
||||
"""
|
||||
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
|
||||
datasets = load_dataset("glue", "mrpc")
|
||||
|
||||
def tokenize_function(examples):
|
||||
# max_length=None => use the model max length (it's actually the default)
|
||||
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
|
||||
return outputs
|
||||
|
||||
# Apply the method we just defined to all the examples in all the splits of the dataset
|
||||
# starting with the main process first:
|
||||
with accelerator.main_process_first():
|
||||
tokenized_datasets = datasets.map(
|
||||
tokenize_function,
|
||||
batched=True,
|
||||
remove_columns=["idx", "sentence1", "sentence2"],
|
||||
)
|
||||
|
||||
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
|
||||
# transformers library
|
||||
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
elif accelerator.mixed_precision != "no":
|
||||
pad_to_multiple_of = 8
|
||||
else:
|
||||
pad_to_multiple_of = None
|
||||
|
||||
return tokenizer.pad(
|
||||
examples,
|
||||
padding="longest",
|
||||
max_length=max_length,
|
||||
pad_to_multiple_of=pad_to_multiple_of,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
# Instantiate dataloaders.
|
||||
train_dataloader = DataLoader(
|
||||
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
|
||||
)
|
||||
eval_dataloader = DataLoader(
|
||||
tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
|
||||
)
|
||||
|
||||
return train_dataloader, eval_dataloader
|
||||
|
||||
|
||||
# For testing only
|
||||
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
|
||||
from accelerate.test_utils.training import mocked_dataloaders
|
||||
|
||||
get_dataloaders = mocked_dataloaders # noqa: F811
|
||||
|
||||
|
||||
def training_function(config, args):
|
||||
# For testing only
|
||||
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
|
||||
config["num_epochs"] = 2
|
||||
# New Code #
|
||||
ddp_comm_hook_type = DDPCommunicationHookType(args.ddp_comm_hook)
|
||||
ddp_comm_wrapper = DDPCommunicationHookType(args.ddp_comm_wrapper)
|
||||
ddp_kwargs = DistributedDataParallelKwargs(comm_hook=ddp_comm_hook_type, comm_wrapper=ddp_comm_wrapper)
|
||||
# Initialize accelerator
|
||||
accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision, kwargs_handlers=[ddp_kwargs])
|
||||
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
|
||||
lr = config["lr"]
|
||||
num_epochs = int(config["num_epochs"])
|
||||
seed = int(config["seed"])
|
||||
batch_size = int(config["batch_size"])
|
||||
|
||||
metric = evaluate.load("glue", "mrpc")
|
||||
|
||||
set_seed(seed)
|
||||
train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)
|
||||
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
|
||||
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True)
|
||||
|
||||
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
|
||||
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
|
||||
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
|
||||
model = model.to(accelerator.device)
|
||||
|
||||
# Instantiate optimizer
|
||||
optimizer = AdamW(params=model.parameters(), lr=lr)
|
||||
|
||||
# Instantiate scheduler
|
||||
lr_scheduler = get_linear_schedule_with_warmup(
|
||||
optimizer=optimizer,
|
||||
num_warmup_steps=100,
|
||||
num_training_steps=(len(train_dataloader) * num_epochs),
|
||||
)
|
||||
|
||||
# Prepare everything
|
||||
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
|
||||
# prepare method.
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
||||
)
|
||||
|
||||
# Now we train the model
|
||||
for epoch in range(num_epochs):
|
||||
model.train()
|
||||
for step, batch in enumerate(train_dataloader):
|
||||
# We could avoid this line since we set the accelerator with `device_placement=True`.
|
||||
batch.to(accelerator.device)
|
||||
# We use the new `accumulate` context manager to perform gradient accumulation
|
||||
with accelerator.accumulate(model):
|
||||
output = model(**batch)
|
||||
loss = output.loss
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
lr_scheduler.step()
|
||||
optimizer.zero_grad()
|
||||
|
||||
model.eval()
|
||||
for step, batch in enumerate(eval_dataloader):
|
||||
# We could avoid this line since we set the accelerator with `device_placement=True`.
|
||||
batch.to(accelerator.device)
|
||||
with torch.no_grad():
|
||||
outputs = model(**batch)
|
||||
predictions = outputs.logits.argmax(dim=-1)
|
||||
predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"]))
|
||||
metric.add_batch(
|
||||
predictions=predictions,
|
||||
references=references,
|
||||
)
|
||||
|
||||
eval_metric = metric.compute()
|
||||
# Use accelerator.print to print only on the main process.
|
||||
accelerator.print(f"epoch {epoch}:", eval_metric)
|
||||
accelerator.end_training()
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Simple example of training script.")
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
default=None,
|
||||
choices=["no", "fp16", "bf16", "fp8"],
|
||||
help="Whether to use mixed precision. Choose"
|
||||
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
||||
"and an Nvidia Ampere GPU.",
|
||||
)
|
||||
# New Code #
|
||||
parser.add_argument(
|
||||
"--ddp_comm_hook",
|
||||
type=str,
|
||||
default="no",
|
||||
choices=["no", "fp16", "bf16", "power_sgd", "batched_power_sgd"],
|
||||
help="DDP Communication hook to use. Choose between `no`, `fp16`, `bf16`, `power_sgd`, and `batched_power_sgd`.",
|
||||
)
|
||||
# New Code #
|
||||
parser.add_argument(
|
||||
"--ddp_comm_wrapper",
|
||||
type=str,
|
||||
default="no",
|
||||
choices=["no", "fp16", "bf16"],
|
||||
help="DDP Communication wrapper to use. Choose between `no`, `fp16`, and `bf16`.",
|
||||
)
|
||||
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.")
|
||||
args = parser.parse_args()
|
||||
config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
|
||||
training_function(config, args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -1,5 +1,4 @@
|
||||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -35,7 +34,7 @@ import datasets
|
||||
import torch
|
||||
import transformers
|
||||
from datasets import load_dataset
|
||||
from huggingface_hub import Repository
|
||||
from huggingface_hub import HfApi
|
||||
from torch.utils.data import DataLoader
|
||||
from tqdm.auto import tqdm
|
||||
from transformers import (
|
||||
@ -48,7 +47,6 @@ from transformers import (
|
||||
default_data_collator,
|
||||
get_scheduler,
|
||||
)
|
||||
from transformers.utils import get_full_repo_name
|
||||
from transformers.utils.versions import require_version
|
||||
|
||||
from accelerate import Accelerator, DistributedType
|
||||
@ -220,7 +218,7 @@ def parse_args():
|
||||
default="all",
|
||||
help=(
|
||||
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,'
|
||||
' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.'
|
||||
' `"wandb"`, `"comet_ml"`, and `"dvclive"`. Use `"all"` (default) to report to all integrations.'
|
||||
"Only applicable when `--with_tracking` is passed."
|
||||
),
|
||||
)
|
||||
@ -304,11 +302,13 @@ def main():
|
||||
# Handle the repository creation
|
||||
if accelerator.is_main_process:
|
||||
if args.push_to_hub:
|
||||
if args.hub_model_id is None:
|
||||
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
|
||||
else:
|
||||
repo_name = args.hub_model_id
|
||||
repo = Repository(args.output_dir, clone_from=repo_name)
|
||||
api = HfApi(token=args.hub_token)
|
||||
|
||||
# Create repo (repo_name from args or inferred)
|
||||
repo_name = args.hub_model_id
|
||||
if repo_name is None:
|
||||
repo_name = Path(args.output_dir).absolute().name
|
||||
repo_id = api.create_repo(repo_name, exist_ok=True).repo_id
|
||||
|
||||
with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
|
||||
if "step_*" not in gitignore:
|
||||
@ -512,7 +512,7 @@ def main():
|
||||
optimizer = optimizer_cls(optimizer_grouped_parameters, lr=args.learning_rate)
|
||||
|
||||
# On TPU, the tie weights in our model have been disconnected, so we need to restore the ties.
|
||||
if accelerator.distributed_type == DistributedType.TPU:
|
||||
if accelerator.distributed_type == DistributedType.XLA:
|
||||
model.tie_weights()
|
||||
|
||||
# Scheduler and math around the number of training steps.
|
||||
@ -708,10 +708,15 @@ def main():
|
||||
if accelerator.is_main_process:
|
||||
tokenizer.save_pretrained(args.output_dir)
|
||||
if args.push_to_hub:
|
||||
repo.push_to_hub(commit_message="End of training", auto_lfs_prune=True)
|
||||
api.upload_folder(
|
||||
repo_id=repo_id,
|
||||
folder_path=args.output_dir,
|
||||
commit_message="End of training",
|
||||
)
|
||||
|
||||
with open(os.path.join(args.output_dir, "all_results.json"), "w") as f:
|
||||
json.dump({"perplexity": perplexity, "eval_loss": eval_loss.item()}, f)
|
||||
accelerator.end_training()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -81,7 +80,7 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
@ -151,7 +150,7 @@ def training_function(config, args):
|
||||
|
||||
# If the batch size is too big we use gradient accumulation
|
||||
gradient_accumulation_steps = 1
|
||||
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
|
||||
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA:
|
||||
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
|
||||
batch_size = MAX_GPU_BATCH_SIZE
|
||||
|
||||
@ -223,6 +222,7 @@ def training_function(config, args):
|
||||
|
||||
# Use accelerator.print to print only on the main process.
|
||||
accelerator.print(f"epoch {epoch}:", eval_metric)
|
||||
accelerator.end_training()
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -209,13 +208,13 @@ def training_function(config, args):
|
||||
|
||||
# If the batch size is too big we use gradient accumulation
|
||||
gradient_accumulation_steps = 1
|
||||
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
|
||||
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA:
|
||||
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
|
||||
batch_size = MAX_GPU_BATCH_SIZE
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
@ -247,16 +246,19 @@ def training_function(config, args):
|
||||
args.model_name_or_path, return_dict=True, low_cpu_mem_usage=True
|
||||
)
|
||||
|
||||
# New Code #
|
||||
# For FSDP feature, it is highly recommended and efficient to prepare the model before creating optimizer
|
||||
model = accelerator.prepare(model)
|
||||
accelerator.print(model)
|
||||
no_decay = ["bias", "LayerNorm.weight"]
|
||||
optimizer_grouped_parameters = [
|
||||
{
|
||||
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
|
||||
"weight_decay": 0.003,
|
||||
},
|
||||
{
|
||||
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
|
||||
"weight_decay": 0.0,
|
||||
},
|
||||
]
|
||||
|
||||
# Instantiate optimizer
|
||||
# New Code #
|
||||
# For FSDP feature, at present it doesn't support multiple parameter groups,
|
||||
# so we need to create a single parameter group for the whole model
|
||||
optimizer = torch.optim.AdamW(params=model.parameters(), lr=lr, weight_decay=2e-4)
|
||||
optimizer = torch.optim.AdamW(params=optimizer_grouped_parameters, lr=lr, weight_decay=2e-4)
|
||||
|
||||
# Instantiate scheduler
|
||||
lr_scheduler = get_linear_schedule_with_warmup(
|
||||
@ -265,13 +267,8 @@ def training_function(config, args):
|
||||
num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps,
|
||||
)
|
||||
|
||||
# New Code #
|
||||
# For FSDP feature, prepare everything except the model as we have already prepared the model
|
||||
# before creating the optimizer
|
||||
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
|
||||
# prepare method.
|
||||
optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
||||
optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
||||
)
|
||||
|
||||
overall_step = 0
|
||||
@ -336,13 +333,11 @@ def training_function(config, args):
|
||||
accelerator.save_state(output_dir)
|
||||
# New Code #
|
||||
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
|
||||
accelerator.print("Memory before entering the train : {}".format(b2mb(tracemalloc.begin)))
|
||||
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used))
|
||||
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked))
|
||||
accelerator.print(f"Memory before entering the train : {b2mb(tracemalloc.begin)}")
|
||||
accelerator.print(f"Memory consumed at the end of the train (end-begin): {tracemalloc.used}")
|
||||
accelerator.print(f"Peak Memory consumed during the train (max-begin): {tracemalloc.peaked}")
|
||||
accelerator.print(
|
||||
"Total Peak Memory consumed during the train (max): {}".format(
|
||||
tracemalloc.peaked + b2mb(tracemalloc.begin)
|
||||
)
|
||||
f"Total Peak Memory consumed during the train (max): {tracemalloc.peaked + b2mb(tracemalloc.begin)}"
|
||||
)
|
||||
# Logging the peak memory usage of the GPU to the tracker
|
||||
if args.with_tracking:
|
||||
@ -389,11 +384,11 @@ def training_function(config, args):
|
||||
accelerator.save_state(output_dir)
|
||||
# New Code #
|
||||
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
|
||||
accelerator.print("Memory before entering the eval : {}".format(b2mb(tracemalloc.begin)))
|
||||
accelerator.print("Memory consumed at the end of the eval (end-begin): {}".format(tracemalloc.used))
|
||||
accelerator.print("Peak Memory consumed during the eval (max-begin): {}".format(tracemalloc.peaked))
|
||||
accelerator.print(f"Memory before entering the eval : {b2mb(tracemalloc.begin)}")
|
||||
accelerator.print(f"Memory consumed at the end of the eval (end-begin): {tracemalloc.used}")
|
||||
accelerator.print(f"Peak Memory consumed during the eval (max-begin): {tracemalloc.peaked}")
|
||||
accelerator.print(
|
||||
"Total Peak Memory consumed during the eval (max): {}".format(tracemalloc.peaked + b2mb(tracemalloc.begin))
|
||||
f"Total Peak Memory consumed during the eval (max): {tracemalloc.peaked + b2mb(tracemalloc.begin)}"
|
||||
)
|
||||
# Logging the peak memory usage of the GPU to the tracker
|
||||
if args.with_tracking:
|
||||
@ -404,8 +399,7 @@ def training_function(config, args):
|
||||
step=epoch,
|
||||
)
|
||||
|
||||
if args.with_tracking:
|
||||
accelerator.end_training()
|
||||
accelerator.end_training()
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -81,7 +80,7 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
@ -126,7 +125,7 @@ def training_function(config, args):
|
||||
accelerator = Accelerator(
|
||||
cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=gradient_accumulation_steps
|
||||
)
|
||||
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
|
||||
if accelerator.distributed_type == DistributedType.XLA and gradient_accumulation_steps > 1:
|
||||
raise NotImplementedError(
|
||||
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`"
|
||||
)
|
||||
@ -198,6 +197,7 @@ def training_function(config, args):
|
||||
eval_metric = metric.compute()
|
||||
# Use accelerator.print to print only on the main process.
|
||||
accelerator.print(f"epoch {epoch}:", eval_metric)
|
||||
accelerator.end_training()
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -84,7 +83,7 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
@ -130,8 +129,6 @@ def training_function(config, args):
|
||||
accelerator = Accelerator(
|
||||
cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=gradient_accumulation_steps
|
||||
)
|
||||
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
|
||||
raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)")
|
||||
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
|
||||
lr = config["lr"]
|
||||
num_epochs = int(config["num_epochs"])
|
||||
@ -205,6 +202,7 @@ def training_function(config, args):
|
||||
eval_metric = metric.compute()
|
||||
# Use accelerator.print to print only on the main process.
|
||||
accelerator.print(f"epoch {epoch}:", eval_metric)
|
||||
accelerator.end_training()
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -35,7 +34,7 @@ import datasets
|
||||
import torch
|
||||
import transformers
|
||||
from datasets import load_dataset
|
||||
from huggingface_hub import Repository
|
||||
from huggingface_hub import HfApi
|
||||
from torch.utils.data import DataLoader
|
||||
from tqdm.auto import tqdm
|
||||
from transformers import (
|
||||
@ -48,7 +47,7 @@ from transformers import (
|
||||
default_data_collator,
|
||||
get_scheduler,
|
||||
)
|
||||
from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry
|
||||
from transformers.utils import check_min_version, send_example_telemetry
|
||||
from transformers.utils.versions import require_version
|
||||
|
||||
from accelerate import Accelerator, DistributedType
|
||||
@ -216,7 +215,7 @@ def parse_args():
|
||||
default="all",
|
||||
help=(
|
||||
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,'
|
||||
' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.'
|
||||
' `"wandb"`, `"comet_ml"`, and `"dvclive"`. Use `"all"` (default) to report to all integrations.'
|
||||
"Only applicable when `--with_tracking` is passed."
|
||||
),
|
||||
)
|
||||
@ -278,11 +277,13 @@ def main():
|
||||
# Handle the repository creation
|
||||
if accelerator.is_main_process:
|
||||
if args.push_to_hub:
|
||||
if args.hub_model_id is None:
|
||||
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
|
||||
else:
|
||||
repo_name = args.hub_model_id
|
||||
repo = Repository(args.output_dir, clone_from=repo_name)
|
||||
api = HfApi(token=args.hub_token)
|
||||
|
||||
# Create repo (repo_name from args or inferred)
|
||||
repo_name = args.hub_model_id
|
||||
if repo_name is None:
|
||||
repo_name = Path(args.output_dir).absolute().name
|
||||
repo_id = api.create_repo(repo_name, exist_ok=True).repo_id
|
||||
|
||||
with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
|
||||
if "step_*" not in gitignore:
|
||||
@ -405,7 +406,7 @@ def main():
|
||||
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
|
||||
"Picking 1024 instead. You can change that default value by passing --block_size xxx."
|
||||
)
|
||||
block_size = 1024
|
||||
block_size = 1024
|
||||
else:
|
||||
if args.block_size > tokenizer.model_max_length:
|
||||
logger.warning(
|
||||
@ -506,7 +507,7 @@ def main():
|
||||
)
|
||||
|
||||
# On TPU, the tie weights in our model have been disconnected, so we need to restore the ties.
|
||||
if accelerator.distributed_type == DistributedType.TPU:
|
||||
if accelerator.distributed_type == DistributedType.XLA:
|
||||
model.tie_weights()
|
||||
|
||||
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
|
||||
@ -662,8 +663,11 @@ def main():
|
||||
)
|
||||
if accelerator.is_main_process:
|
||||
tokenizer.save_pretrained(args.output_dir)
|
||||
repo.push_to_hub(
|
||||
commit_message=f"Training in progress epoch {epoch}", blocking=False, auto_lfs_prune=True
|
||||
api.upload_folder(
|
||||
repo_id=repo_id,
|
||||
folder_path=args.output_dir,
|
||||
commit_message=f"Training in progress epoch {epoch}",
|
||||
run_as_future=True,
|
||||
)
|
||||
|
||||
if args.checkpointing_steps == "epoch":
|
||||
@ -691,10 +695,15 @@ def main():
|
||||
if accelerator.is_main_process:
|
||||
tokenizer.save_pretrained(args.output_dir)
|
||||
if args.push_to_hub:
|
||||
repo.push_to_hub(commit_message="End of training", auto_lfs_prune=True)
|
||||
api.upload_folder(
|
||||
repo_id=repo_id,
|
||||
folder_path=args.output_dir,
|
||||
commit_message="End of training",
|
||||
)
|
||||
|
||||
with open(os.path.join(args.output_dir, "all_results.json"), "w") as f:
|
||||
json.dump({"perplexity": perplexity}, f)
|
||||
accelerator.end_training()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@ -86,7 +86,7 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
@ -210,6 +210,7 @@ def training_function(config, args):
|
||||
# And call it at the end with no arguments
|
||||
# Note: You could also refactor this outside of your training loop function
|
||||
inner_training_loop()
|
||||
accelerator.end_training()
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -88,7 +87,7 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
@ -139,7 +138,7 @@ def training_function(config, args):
|
||||
|
||||
# If the batch size is too big we use gradient accumulation
|
||||
gradient_accumulation_steps = 1
|
||||
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
|
||||
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA:
|
||||
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
|
||||
batch_size = MAX_GPU_BATCH_SIZE
|
||||
|
||||
@ -215,6 +214,7 @@ def training_function(config, args):
|
||||
eval_metric = metric.compute()
|
||||
# Use accelerator.print to print only on the main process.
|
||||
accelerator.print(f"epoch {epoch}:", eval_metric)
|
||||
accelerator.end_training()
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
255
examples/by_feature/profiler.py
Normal file
255
examples/by_feature/profiler.py
Normal file
@ -0,0 +1,255 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import argparse
|
||||
import os
|
||||
|
||||
import evaluate
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from accelerate.utils import ProfileKwargs
|
||||
|
||||
|
||||
########################################################################
|
||||
# This is a fully working simple example to use Accelerate
|
||||
# and perform profiling
|
||||
#
|
||||
# This example trains a Bert base model on GLUE MRPC
|
||||
# in any of the following settings (with the same script):
|
||||
# - single CPU or single GPU
|
||||
# - multi GPUS (using PyTorch distributed mode)
|
||||
# - (multi) TPUs
|
||||
# - fp16 (mixed-precision) or fp32 (normal precision)
|
||||
#
|
||||
# To run it in each of these various modes, follow the instructions
|
||||
# in the readme for examples:
|
||||
# https://github.com/huggingface/accelerate/tree/main/examples
|
||||
#
|
||||
########################################################################
|
||||
|
||||
|
||||
MAX_GPU_BATCH_SIZE = 16
|
||||
EVAL_BATCH_SIZE = 32
|
||||
|
||||
|
||||
def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
"""
|
||||
Creates a set of `DataLoader`s for the `glue` dataset,
|
||||
using "bert-base-cased" as the tokenizer.
|
||||
|
||||
Args:
|
||||
accelerator (`Accelerator`):
|
||||
An `Accelerator` object
|
||||
batch_size (`int`, *optional*):
|
||||
The batch size for the train and validation DataLoaders.
|
||||
"""
|
||||
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
|
||||
datasets = load_dataset("glue", "mrpc")
|
||||
|
||||
def tokenize_function(examples):
|
||||
# max_length=None => use the model max length (it's actually the default)
|
||||
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
|
||||
return outputs
|
||||
|
||||
# Apply the method we just defined to all the examples in all the splits of the dataset
|
||||
# starting with the main process first:
|
||||
with accelerator.main_process_first():
|
||||
tokenized_datasets = datasets.map(
|
||||
tokenize_function,
|
||||
batched=True,
|
||||
remove_columns=["idx", "sentence1", "sentence2"],
|
||||
)
|
||||
|
||||
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
|
||||
# transformers library
|
||||
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
elif accelerator.mixed_precision != "no":
|
||||
pad_to_multiple_of = 8
|
||||
else:
|
||||
pad_to_multiple_of = None
|
||||
|
||||
return tokenizer.pad(
|
||||
examples,
|
||||
padding="longest",
|
||||
max_length=max_length,
|
||||
pad_to_multiple_of=pad_to_multiple_of,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
# Instantiate dataloaders.
|
||||
train_dataloader = DataLoader(
|
||||
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
|
||||
)
|
||||
eval_dataloader = DataLoader(
|
||||
tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
|
||||
)
|
||||
|
||||
return train_dataloader, eval_dataloader
|
||||
|
||||
|
||||
# For testing only
|
||||
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
|
||||
from accelerate.test_utils.training import mocked_dataloaders
|
||||
|
||||
get_dataloaders = mocked_dataloaders # noqa: F811
|
||||
|
||||
|
||||
def training_function(config, args):
|
||||
# For testing only
|
||||
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
|
||||
config["num_epochs"] = 2
|
||||
# New Code #
|
||||
profile_kwargs = ProfileKwargs(
|
||||
record_shapes=args.record_shapes,
|
||||
profile_memory=args.profile_memory,
|
||||
with_flops=args.with_flops,
|
||||
output_trace_dir=args.output_trace_dir,
|
||||
)
|
||||
# Initialize accelerator
|
||||
accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision, kwargs_handlers=[profile_kwargs])
|
||||
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
|
||||
lr = config["lr"]
|
||||
num_epochs = int(config["num_epochs"])
|
||||
seed = int(config["seed"])
|
||||
batch_size = int(config["batch_size"])
|
||||
|
||||
metric = evaluate.load("glue", "mrpc")
|
||||
|
||||
set_seed(seed)
|
||||
train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)
|
||||
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
|
||||
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True)
|
||||
|
||||
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
|
||||
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
|
||||
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
|
||||
model = model.to(accelerator.device)
|
||||
|
||||
# Instantiate optimizer
|
||||
optimizer = AdamW(params=model.parameters(), lr=lr)
|
||||
|
||||
# Instantiate scheduler
|
||||
lr_scheduler = get_linear_schedule_with_warmup(
|
||||
optimizer=optimizer,
|
||||
num_warmup_steps=100,
|
||||
num_training_steps=(len(train_dataloader) * num_epochs),
|
||||
)
|
||||
|
||||
# Prepare everything
|
||||
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
|
||||
# prepare method.
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
||||
)
|
||||
|
||||
# Now we train the model
|
||||
for epoch in range(num_epochs):
|
||||
model.train()
|
||||
# New Code #
|
||||
with accelerator.profile() as prof:
|
||||
for step, batch in enumerate(train_dataloader):
|
||||
# We could avoid this line since we set the accelerator with `device_placement=True`.
|
||||
batch.to(accelerator.device)
|
||||
# We use the new `accumulate` context manager to perform gradient accumulation
|
||||
with accelerator.accumulate(model):
|
||||
output = model(**batch)
|
||||
loss = output.loss
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
lr_scheduler.step()
|
||||
optimizer.zero_grad()
|
||||
# New Code #
|
||||
accelerator.print(
|
||||
prof.key_averages().table(
|
||||
sort_by="self_cpu_time_total" if args.cpu else "self_cuda_time_total", row_limit=-1
|
||||
)
|
||||
)
|
||||
|
||||
model.eval()
|
||||
for step, batch in enumerate(eval_dataloader):
|
||||
# We could avoid this line since we set the accelerator with `device_placement=True`.
|
||||
batch.to(accelerator.device)
|
||||
with torch.no_grad():
|
||||
outputs = model(**batch)
|
||||
predictions = outputs.logits.argmax(dim=-1)
|
||||
predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"]))
|
||||
metric.add_batch(
|
||||
predictions=predictions,
|
||||
references=references,
|
||||
)
|
||||
|
||||
eval_metric = metric.compute()
|
||||
# Use accelerator.print to print only on the main process.
|
||||
accelerator.print(f"epoch {epoch}:", eval_metric)
|
||||
accelerator.end_training()
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Simple example of training script.")
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
default=None,
|
||||
choices=["no", "fp16", "bf16", "fp8"],
|
||||
help="Whether to use mixed precision. Choose"
|
||||
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
||||
"and an Nvidia Ampere GPU.",
|
||||
)
|
||||
# New Code #
|
||||
parser.add_argument(
|
||||
"--record_shapes",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="If passed, will record shapes for profiling.",
|
||||
)
|
||||
# New Code #
|
||||
parser.add_argument(
|
||||
"--profile_memory",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="If passed, will profile memory.",
|
||||
)
|
||||
# New Code #
|
||||
parser.add_argument(
|
||||
"--with_flops",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="If passed, will profile flops.",
|
||||
)
|
||||
# New Code #
|
||||
parser.add_argument(
|
||||
"--output_trace_dir",
|
||||
type=str,
|
||||
default=None,
|
||||
help="If passed, will save a json trace to the specified path.",
|
||||
)
|
||||
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.")
|
||||
args = parser.parse_args()
|
||||
config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
|
||||
training_function(config, args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
226
examples/by_feature/schedule_free.py
Normal file
226
examples/by_feature/schedule_free.py
Normal file
@ -0,0 +1,226 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import argparse
|
||||
import os
|
||||
|
||||
import evaluate
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, set_seed
|
||||
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from accelerate.utils import is_schedulefree_available
|
||||
|
||||
|
||||
if is_schedulefree_available():
|
||||
import schedulefree
|
||||
else:
|
||||
raise ImportError(
|
||||
"This example requires the `schedulefree` library. Please install it with `pip install schedulefree`"
|
||||
)
|
||||
|
||||
|
||||
########################################################################
|
||||
# This is a fully working simple example to use Accelerate and Facebook's
|
||||
# scheduler-free optimizer: https://github.com/facebookresearch/schedule_free/
|
||||
#
|
||||
# This example trains a Bert base model on GLUE MRPC
|
||||
# in any of the following settings (with the same script):
|
||||
# - single CPU or single GPU
|
||||
# - multi GPUS (using PyTorch distributed mode)
|
||||
# - (multi) TPUs
|
||||
# - fp16 (mixed-precision) or fp32 (normal precision)
|
||||
#
|
||||
# To run it in each of these various modes, follow the instructions
|
||||
# in the readme for examples:
|
||||
# https://github.com/huggingface/accelerate/tree/main/examples
|
||||
#
|
||||
########################################################################
|
||||
|
||||
|
||||
MAX_GPU_BATCH_SIZE = 16
|
||||
EVAL_BATCH_SIZE = 32
|
||||
|
||||
|
||||
def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
"""
|
||||
Creates a set of `DataLoader`s for the `glue` dataset,
|
||||
using "bert-base-cased" as the tokenizer.
|
||||
|
||||
Args:
|
||||
accelerator (`Accelerator`):
|
||||
An `Accelerator` object
|
||||
batch_size (`int`, *optional*):
|
||||
The batch size for the train and validation DataLoaders.
|
||||
"""
|
||||
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
|
||||
datasets = load_dataset("glue", "mrpc")
|
||||
|
||||
def tokenize_function(examples):
|
||||
# max_length=None => use the model max length (it's actually the default)
|
||||
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
|
||||
return outputs
|
||||
|
||||
# Apply the method we just defined to all the examples in all the splits of the dataset
|
||||
# starting with the main process first:
|
||||
with accelerator.main_process_first():
|
||||
tokenized_datasets = datasets.map(
|
||||
tokenize_function,
|
||||
batched=True,
|
||||
remove_columns=["idx", "sentence1", "sentence2"],
|
||||
)
|
||||
|
||||
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
|
||||
# transformers library
|
||||
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
|
||||
|
||||
def collate_fn(examples):
|
||||
# For Torchxla, it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
elif accelerator.mixed_precision != "no":
|
||||
pad_to_multiple_of = 8
|
||||
else:
|
||||
pad_to_multiple_of = None
|
||||
|
||||
return tokenizer.pad(
|
||||
examples,
|
||||
padding="longest",
|
||||
max_length=max_length,
|
||||
pad_to_multiple_of=pad_to_multiple_of,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
# Instantiate dataloaders.
|
||||
train_dataloader = DataLoader(
|
||||
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size, drop_last=True
|
||||
)
|
||||
eval_dataloader = DataLoader(
|
||||
tokenized_datasets["validation"],
|
||||
shuffle=False,
|
||||
collate_fn=collate_fn,
|
||||
batch_size=EVAL_BATCH_SIZE,
|
||||
drop_last=(accelerator.mixed_precision == "fp8"),
|
||||
)
|
||||
|
||||
return train_dataloader, eval_dataloader
|
||||
|
||||
|
||||
# For testing only
|
||||
|
||||
|
||||
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
|
||||
from accelerate.test_utils.training import mocked_dataloaders
|
||||
|
||||
get_dataloaders = mocked_dataloaders # noqa: F811
|
||||
|
||||
|
||||
def training_function(config, args):
|
||||
# Initialize accelerator
|
||||
accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)
|
||||
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
|
||||
lr = config["lr"]
|
||||
num_epochs = int(config["num_epochs"])
|
||||
seed = int(config["seed"])
|
||||
batch_size = int(config["batch_size"])
|
||||
|
||||
metric = evaluate.load("glue", "mrpc")
|
||||
|
||||
# If the batch size is too big we use gradient accumulation
|
||||
gradient_accumulation_steps = 1
|
||||
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA:
|
||||
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
|
||||
batch_size = MAX_GPU_BATCH_SIZE
|
||||
|
||||
set_seed(seed)
|
||||
train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)
|
||||
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
|
||||
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True)
|
||||
|
||||
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
|
||||
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
|
||||
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
|
||||
model = model.to(accelerator.device)
|
||||
# Instantiate optimizer with warmup steps
|
||||
optimizer = schedulefree.AdamWScheduleFree(
|
||||
model.parameters(),
|
||||
lr=lr,
|
||||
warmup_steps=100,
|
||||
)
|
||||
|
||||
# Prepare everything
|
||||
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
|
||||
# prepare method.
|
||||
|
||||
model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(
|
||||
model, optimizer, train_dataloader, eval_dataloader
|
||||
)
|
||||
|
||||
# Now we train the model
|
||||
for epoch in range(num_epochs):
|
||||
model.train()
|
||||
optimizer.train()
|
||||
for step, batch in enumerate(train_dataloader):
|
||||
# We could avoid this line since we set the accelerator with `device_placement=True`.
|
||||
batch.to(accelerator.device)
|
||||
outputs = model(**batch)
|
||||
loss = outputs.loss
|
||||
loss = loss / gradient_accumulation_steps
|
||||
accelerator.backward(loss)
|
||||
if step % gradient_accumulation_steps == 0:
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
|
||||
model.eval()
|
||||
optimizer.eval()
|
||||
for step, batch in enumerate(eval_dataloader):
|
||||
# We could avoid this line since we set the accelerator with `device_placement=True`.
|
||||
batch.to(accelerator.device)
|
||||
with torch.no_grad():
|
||||
outputs = model(**batch)
|
||||
predictions = outputs.logits.argmax(dim=-1)
|
||||
predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"]))
|
||||
metric.add_batch(
|
||||
predictions=predictions,
|
||||
references=references,
|
||||
)
|
||||
|
||||
eval_metric = metric.compute()
|
||||
# Use accelerator.print to print only on the main process.
|
||||
accelerator.print(f"epoch {epoch}:", eval_metric)
|
||||
accelerator.end_training()
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Simple example of training script.")
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
default=None,
|
||||
choices=["no", "fp16", "bf16", "fp8"],
|
||||
help="Whether to use mixed precision. Choose"
|
||||
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
||||
"and an Nvidia Ampere GPU.",
|
||||
)
|
||||
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.")
|
||||
args = parser.parse_args()
|
||||
config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
|
||||
training_function(config, args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -1,4 +1,3 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -86,7 +85,7 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
@ -149,7 +148,7 @@ def training_function(config, args):
|
||||
|
||||
# If the batch size is too big we use gradient accumulation
|
||||
gradient_accumulation_steps = 1
|
||||
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
|
||||
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA:
|
||||
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
|
||||
batch_size = MAX_GPU_BATCH_SIZE
|
||||
|
||||
@ -237,11 +236,7 @@ def training_function(config, args):
|
||||
step=epoch,
|
||||
)
|
||||
|
||||
# New Code #
|
||||
# When a run is finished, you should call `accelerator.end_training()`
|
||||
# to close all of the open trackers
|
||||
if args.with_tracking:
|
||||
accelerator.end_training()
|
||||
accelerator.end_training()
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -24,7 +23,7 @@ from torch.optim.lr_scheduler import OneCycleLR
|
||||
from torch.utils.data import DataLoader, Dataset
|
||||
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
|
||||
|
||||
from accelerate import Accelerator
|
||||
from accelerate import Accelerator, DataLoaderConfiguration
|
||||
|
||||
|
||||
########################################################################
|
||||
@ -73,12 +72,19 @@ class PetsDataset(Dataset):
|
||||
|
||||
def training_function(config, args):
|
||||
# Initialize accelerator
|
||||
dataloader_config = DataLoaderConfiguration(use_stateful_dataloader=args.use_stateful_dataloader)
|
||||
if args.with_tracking:
|
||||
accelerator = Accelerator(
|
||||
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", project_dir=args.project_dir
|
||||
cpu=args.cpu,
|
||||
mixed_precision=args.mixed_precision,
|
||||
log_with="all",
|
||||
project_dir=args.project_dir,
|
||||
dataloader_config=dataloader_config,
|
||||
)
|
||||
else:
|
||||
accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)
|
||||
accelerator = Accelerator(
|
||||
cpu=args.cpu, mixed_precision=args.mixed_precision, dataloader_config=dataloader_config
|
||||
)
|
||||
|
||||
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
|
||||
lr = config["lr"]
|
||||
@ -263,8 +269,7 @@ def training_function(config, args):
|
||||
output_dir = os.path.join(args.output_dir, output_dir)
|
||||
accelerator.save_state(output_dir)
|
||||
|
||||
if args.with_tracking:
|
||||
accelerator.end_training()
|
||||
accelerator.end_training()
|
||||
|
||||
|
||||
def main():
|
||||
@ -299,6 +304,11 @@ def main():
|
||||
default=None,
|
||||
help="If the training should continue from a checkpoint folder.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--use_stateful_dataloader",
|
||||
action="store_true",
|
||||
help="If the dataloader should be a resumable stateful dataloader.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--with_tracking",
|
||||
action="store_true",
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -22,7 +21,7 @@ from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from accelerate import Accelerator, DataLoaderConfiguration, DistributedType
|
||||
|
||||
|
||||
########################################################################
|
||||
@ -50,12 +49,19 @@ EVAL_BATCH_SIZE = 32
|
||||
|
||||
def training_function(config, args):
|
||||
# Initialize accelerator
|
||||
dataloader_config = DataLoaderConfiguration(use_stateful_dataloader=args.use_stateful_dataloader)
|
||||
if args.with_tracking:
|
||||
accelerator = Accelerator(
|
||||
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", project_dir=args.project_dir
|
||||
cpu=args.cpu,
|
||||
mixed_precision=args.mixed_precision,
|
||||
dataloader_config=dataloader_config,
|
||||
log_with="all",
|
||||
project_dir=args.project_dir,
|
||||
)
|
||||
else:
|
||||
accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)
|
||||
accelerator = Accelerator(
|
||||
cpu=args.cpu, mixed_precision=args.mixed_precision, dataloader_config=dataloader_config
|
||||
)
|
||||
|
||||
if hasattr(args.checkpointing_steps, "isdigit"):
|
||||
if args.checkpointing_steps == "epoch":
|
||||
@ -103,13 +109,13 @@ def training_function(config, args):
|
||||
|
||||
# If the batch size is too big we use gradient accumulation
|
||||
gradient_accumulation_steps = 1
|
||||
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
|
||||
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA:
|
||||
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
|
||||
batch_size = MAX_GPU_BATCH_SIZE
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
@ -195,7 +201,10 @@ def training_function(config, args):
|
||||
total_loss = 0
|
||||
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
|
||||
# We need to skip steps until we reach the resumed step
|
||||
active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step)
|
||||
if not args.use_stateful_dataloader:
|
||||
active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step)
|
||||
else:
|
||||
active_dataloader = train_dataloader
|
||||
overall_step += resume_step
|
||||
else:
|
||||
# After the first iteration though, we need to go back to the original dataloader
|
||||
@ -257,8 +266,7 @@ def training_function(config, args):
|
||||
output_dir = os.path.join(args.output_dir, output_dir)
|
||||
accelerator.save_state(output_dir)
|
||||
|
||||
if args.with_tracking:
|
||||
accelerator.end_training()
|
||||
accelerator.end_training()
|
||||
|
||||
|
||||
def main():
|
||||
@ -285,6 +293,11 @@ def main():
|
||||
default=None,
|
||||
help="If the training should continue from a checkpoint folder.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--use_stateful_dataloader",
|
||||
action="store_true",
|
||||
help="If the dataloader should be a resumable stateful dataloader.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--with_tracking",
|
||||
action="store_true",
|
||||
|
||||
10
examples/config_yaml_templates/README.md
Normal file
10
examples/config_yaml_templates/README.md
Normal file
@ -0,0 +1,10 @@
|
||||
# Config Zoo
|
||||
|
||||
This folder contains a variety of minimal configurations for `Accelerate` achieving certain goals. You can use these
|
||||
direct config YAML's, or build off of them for your own YAML's.
|
||||
|
||||
These are highly annoted versions, aiming to teach you what each section does.
|
||||
|
||||
Each config can be run via `accelerate launch --config_file {file} run_me.py`
|
||||
|
||||
`run_me.py` will then print out how the current environment is setup (the contents of the `AcceleratorState`)
|
||||
15
examples/config_yaml_templates/deepspeed.yaml
Normal file
15
examples/config_yaml_templates/deepspeed.yaml
Normal file
@ -0,0 +1,15 @@
|
||||
# Similar to FSDP, we set the distributed type as DEEPSPEED
|
||||
distributed_type: DEEPSPEED
|
||||
# With DeepSpeed, we utilize a deepspeed config file for the entire configuration
|
||||
deepspeed_config:
|
||||
# Can also be any of the config json's in accelerate/examples/deepspeed_config_templates
|
||||
deepspeed_config_file: ../deepspeed_config_templates/zero_stage1_config.json
|
||||
# If using ZeRO-3 and wanting to load big models in, this should be set to `true` so
|
||||
# `transformers` uses the right `init` function
|
||||
zero3_init_flag: false # true
|
||||
|
||||
# Finally we need to specify the number of GPUs to use
|
||||
num_processes: 2
|
||||
# Optionally we can set the mixed precision now instead of in the deepspeed config file,
|
||||
# however this requires the `fp16` and `bf16` options to be set to `auto` in the deepspeed config file
|
||||
# mixed_precision: "bf16"
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user