mirror of
https://github.com/huggingface/accelerate.git
synced 2025-11-14 22:24:32 +08:00
Compare commits
1340 Commits
v0.13.1
...
feat/async
| Author | SHA1 | Date | |
|---|---|---|---|
| 50042518db | |||
| 571ca0200d | |||
| 0cb1a33475 | |||
| dfdc219018 | |||
| 45959d7b96 | |||
| 8b493524c8 | |||
| 9ead94e556 | |||
| a0bc36e8ed | |||
| 8830e58a91 | |||
| 40ebb4bea3 | |||
| ec92b1af7a | |||
| 62ede1ed2a | |||
| 9f9c490c6b | |||
| 8b55e62b2c | |||
| 0e4419b347 | |||
| 3b67c21696 | |||
| 7b981788ca | |||
| c4460e33ef | |||
| 5dd3d0b690 | |||
| 5fe4460ccd | |||
| 979d81e4a9 | |||
| 7c25f696b8 | |||
| a7d6f28f99 | |||
| 23cf4ef8a3 | |||
| ff872f5f71 | |||
| 2941a6b0fb | |||
| c0a3aefea8 | |||
| 42fdda1c1f | |||
| e23b004b30 | |||
| 898cad39e8 | |||
| 24c8157bba | |||
| 6891c57072 | |||
| 24e48f3d20 | |||
| 6640ff415c | |||
| c173b4fdd6 | |||
| cb343c63d7 | |||
| 354b0b5da3 | |||
| 9359a0194f | |||
| 2f075c724c | |||
| 7ecc2d7f39 | |||
| 12f89bb754 | |||
| 348aabaaaf | |||
| 3b13453bbf | |||
| 0408ab12d7 | |||
| 55e518a762 | |||
| 7e11ac43f0 | |||
| e2cc537db8 | |||
| 847ae58c74 | |||
| 6e104f31de | |||
| 524e5f9828 | |||
| d6c986c3f2 | |||
| 1ac8643df7 | |||
| 07ce74868c | |||
| 175fe91589 | |||
| fe16ce8bce | |||
| 5987d79a53 | |||
| 31af8d4e8e | |||
| b7493a82b1 | |||
| a16d2bb3c1 | |||
| cac22ed980 | |||
| be826a6b7b | |||
| 5939640829 | |||
| 7f9c8cbe34 | |||
| 9888c7ed23 | |||
| 42a68c30dc | |||
| 6597dae780 | |||
| 8878d93745 | |||
| 2eaf5cdbbc | |||
| 23c1d8db89 | |||
| 0af621bbec | |||
| bee04f1b01 | |||
| 8a953f08c6 | |||
| 3518c03584 | |||
| 2f8fd72e51 | |||
| d2e6b0313d | |||
| b9fee48c85 | |||
| 3a82b056cf | |||
| 6b61a373a2 | |||
| 682691deac | |||
| 791055b484 | |||
| 16bf1d8901 | |||
| ab3c604e48 | |||
| 273799c85d | |||
| 43526c5c08 | |||
| 07f2392f40 | |||
| ee2f48c2c3 | |||
| 4f3abb73a7 | |||
| db536cbfeb | |||
| 4e9d0deba6 | |||
| 8cb3ace894 | |||
| b6d97cb856 | |||
| 33967d4733 | |||
| 5b1fcda371 | |||
| f55f0533b5 | |||
| 1ec99f0b58 | |||
| 417bc52965 | |||
| 97c93c4809 | |||
| cd37bbb629 | |||
| 7aa3b56c80 | |||
| 14f4306ca6 | |||
| e6e717589e | |||
| 1f6efcea0b | |||
| 9fa97f9600 | |||
| 764eee4a48 | |||
| 202e6c178a | |||
| 32874257f3 | |||
| 281314b479 | |||
| 3524a504c8 | |||
| f48d95c493 | |||
| f76208f5a8 | |||
| ae0499ea96 | |||
| ddc49f1e9a | |||
| 9b2d6eaf32 | |||
| 7b5774ac55 | |||
| 7013365791 | |||
| 8d8fd83672 | |||
| 3a941d4b4e | |||
| d02e51cc21 | |||
| c5caa11e85 | |||
| 39e2bebb12 | |||
| 0af45bf1e8 | |||
| 806ac848c9 | |||
| 23b092507a | |||
| 8fb073536a | |||
| 4f35cf713c | |||
| ada21cfbbd | |||
| b451956fd6 | |||
| 6a9a61520d | |||
| 423fbbfdea | |||
| 34c1779828 | |||
| 54496571fd | |||
| 4a3cbcb63c | |||
| 583b26db3c | |||
| 7812d979c3 | |||
| 67adb473a4 | |||
| ee4cab96ed | |||
| 73c2378c55 | |||
| b2f937faec | |||
| 3b89987710 | |||
| a43e4170fc | |||
| 334d6ab957 | |||
| 650b6659c0 | |||
| fb90996365 | |||
| 32b2e1606f | |||
| 8c0a29626d | |||
| 63168b151f | |||
| 3cf5e4c802 | |||
| 9642a1ac81 | |||
| 3169339f5b | |||
| 67a768be07 | |||
| 531643436e | |||
| 83e09a9331 | |||
| 9c4eeb9ba8 | |||
| a0edc8dcf2 | |||
| 11a3c0001d | |||
| 8b31a2fe2c | |||
| 3f636d6260 | |||
| 803b6648b4 | |||
| 17f9c19f48 | |||
| d7c741a6bc | |||
| 8ab01d32cf | |||
| 140acb356e | |||
| 8576112bc8 | |||
| 806f661cd3 | |||
| 9015a26f09 | |||
| 6de900e10a | |||
| ffb27138f7 | |||
| 4b6be89910 | |||
| a702364256 | |||
| a31bd767c1 | |||
| 71036329f7 | |||
| f648feba97 | |||
| 14fc61eeac | |||
| d9e6af8773 | |||
| b271eb1365 | |||
| 4677b8089f | |||
| e456796be8 | |||
| ac3749dc11 | |||
| 6e8eea2e73 | |||
| c7b3625592 | |||
| 90f81986b9 | |||
| fa26dc6156 | |||
| 6fcc8efd2e | |||
| 8039158d71 | |||
| e34db4d0d2 | |||
| 526925b48c | |||
| 24f8d0276c | |||
| 5cc99e6e02 | |||
| ce63623421 | |||
| f19b95700f | |||
| 81d8a0356c | |||
| f076495580 | |||
| 03153658f4 | |||
| 675e35bcd4 | |||
| 8f2d31c5b9 | |||
| 4c2c89ea90 | |||
| 28c171b05a | |||
| 65356780d4 | |||
| 78b8126bff | |||
| 7e324103c4 | |||
| 02d25612a5 | |||
| fbfa53bc5e | |||
| d09040dfc9 | |||
| 828aae4e32 | |||
| f0b030554c | |||
| 80973430ee | |||
| c67d47ae79 | |||
| 8c423cff79 | |||
| 95f34d6243 | |||
| ba90f85627 | |||
| b13aadcb67 | |||
| 58f14364d5 | |||
| 54370d4504 | |||
| d6d3e03cd4 | |||
| acfbf72a7f | |||
| 200c9eb783 | |||
| 7b2edc0bf2 | |||
| b92fb4774f | |||
| 3e62fbb09c | |||
| cb8b7c637a | |||
| aa16d69561 | |||
| f9a2e7902f | |||
| 51fd482d6e | |||
| 60461ff7c4 | |||
| f8c77f0522 | |||
| b626ef5f00 | |||
| dd68af886a | |||
| 11818e657b | |||
| 1f508a6df6 | |||
| 4a100eef43 | |||
| c6f34a060f | |||
| 29be478862 | |||
| e11d3ceff3 | |||
| 08101b9dde | |||
| 5f96369161 | |||
| 069743775e | |||
| 77f2b6235e | |||
| d7b1b368e9 | |||
| 8ad2b3b8e7 | |||
| e724c9a97f | |||
| cf169a1ae6 | |||
| 8ade23cc6a | |||
| c0552c9012 | |||
| bf4572b6ce | |||
| a4a44aca1f | |||
| b0e5fd353c | |||
| 8159c98d43 | |||
| 497eb3cf86 | |||
| 87732a4c32 | |||
| ffbca15979 | |||
| ba7ab93f5e | |||
| 85f35647db | |||
| 2f39575bbd | |||
| 1ace241db4 | |||
| 78e1bdd088 | |||
| 4dda5797bd | |||
| 1f4fbb77a2 | |||
| c809f8e45c | |||
| 39dc2b120f | |||
| 735dfa3018 | |||
| a84327e596 | |||
| 292954b547 | |||
| 0e61127b5a | |||
| 6f79b63b86 | |||
| 1d2ca747f1 | |||
| cba3f2d5e0 | |||
| f1f2b4d1a8 | |||
| fd9880da91 | |||
| 21c994c298 | |||
| 52581c3f01 | |||
| f4ee5a2dc7 | |||
| 55136b8dc4 | |||
| fb68cb9d0e | |||
| 506d732230 | |||
| ae9cb6e4db | |||
| 127818fc27 | |||
| bcc13c00b5 | |||
| d4d6b6e7f5 | |||
| 1077611552 | |||
| cd93e35e08 | |||
| e93b056687 | |||
| 5060574827 | |||
| 018a99e5f6 | |||
| 4305033f80 | |||
| 4617be3760 | |||
| 521eb5bee4 | |||
| 9f9951325c | |||
| e9e5a73fcc | |||
| 79a8426416 | |||
| 8a43837cc9 | |||
| a768b2b753 | |||
| 85b1a03552 | |||
| fc52fa969e | |||
| 3a670bd0da | |||
| b32d8bcb75 | |||
| d5b7b70e06 | |||
| 1ce2eb6385 | |||
| 3fd02e60dc | |||
| ed9a574564 | |||
| 7d3bbe721b | |||
| 4b4c036933 | |||
| e7e01812df | |||
| 5ad982ac51 | |||
| 9d67867ad9 | |||
| 52b3421d8f | |||
| f1ca8ac78f | |||
| ab89fc7e1d | |||
| b5235f21d8 | |||
| 8931e5e48c | |||
| a84859242d | |||
| 758d6243a7 | |||
| b07ad2adf2 | |||
| 1d09a20fc1 | |||
| 3fcc9461c4 | |||
| 939ce400cb | |||
| c2120927b0 | |||
| 654e1d9984 | |||
| 8c3aded21a | |||
| 2789933938 | |||
| 726140cad2 | |||
| 2d4f1dda7e | |||
| c0cf860dc6 | |||
| ad3f574a3b | |||
| 1a6af0bd6d | |||
| 52fae0960c | |||
| 7ffe7662ca | |||
| 5536a3a893 | |||
| 7ec8eab955 | |||
| 589fddd317 | |||
| 99c69aaf73 | |||
| 00785cd9fc | |||
| a452327e8e | |||
| 851cf34351 | |||
| cd5698bb32 | |||
| 90d5023901 | |||
| 3bde615607 | |||
| dc3b5ad82e | |||
| 12a5befdd6 | |||
| 79ca85c27d | |||
| 13d93c4f50 | |||
| d982751aec | |||
| 95edc68cb3 | |||
| 288accc0ec | |||
| 83b0610155 | |||
| 386f7d2825 | |||
| 308a8e9689 | |||
| f35cbd1f02 | |||
| a14260c9da | |||
| 32f368ec3f | |||
| 415eddf1be | |||
| 230857691a | |||
| a5a3e57125 | |||
| 0af1d8b8de | |||
| d16d7371a1 | |||
| 7a5c231b9e | |||
| 4f02bb764a | |||
| 709fd1e42b | |||
| f4f1260a0e | |||
| c6da9f8693 | |||
| 3ebbe573ad | |||
| 24bf5ec546 | |||
| e1247de01e | |||
| 12a007d559 | |||
| 5bdcd7e169 | |||
| 2471eacdd6 | |||
| 167cb5eb20 | |||
| 947f64ee62 | |||
| 8330b375d4 | |||
| 92404fbf5f | |||
| 3a02754915 | |||
| fec1170e35 | |||
| eac206f063 | |||
| 6882ff2bea | |||
| 57a4c7465e | |||
| 404510a5ec | |||
| 3086e26db9 | |||
| 5d5d07abfc | |||
| 5a0b7dc597 | |||
| c799c198e9 | |||
| 1f7a79b428 | |||
| 4cc3530b64 | |||
| 5d4a3beb01 | |||
| 0284f9a9f6 | |||
| 573d22d48f | |||
| 13ca7dccb6 | |||
| 3b5a00e048 | |||
| 3c4eaedd46 | |||
| c0faec766c | |||
| 91a2599f93 | |||
| 5f9235a731 | |||
| 7a36a75c7c | |||
| f62854a281 | |||
| a9869ea0dc | |||
| 6d59614603 | |||
| 2d74c0c077 | |||
| 40007b4e97 | |||
| 7141881b1f | |||
| f0049b2cfb | |||
| 83bad87559 | |||
| 24d8b63fc3 | |||
| 4a83ee5382 | |||
| 05d240af95 | |||
| bad2ce42ed | |||
| 30cb7ece76 | |||
| b7fa2fa956 | |||
| d5d378d64e | |||
| 065e74d11a | |||
| 86b6deaea1 | |||
| b24a0ef5db | |||
| e061edc6e7 | |||
| c3f422699a | |||
| 0553483638 | |||
| 415789d0e4 | |||
| ae472bac48 | |||
| 4f2c2ba45c | |||
| e26065a265 | |||
| 1cb6fdcf7b | |||
| 4ba436eccc | |||
| 91e8a3ced4 | |||
| 4ad4d28c49 | |||
| befd87f043 | |||
| abce3604f0 | |||
| 27a607ea90 | |||
| aa21174de9 | |||
| 6cf1cc0a39 | |||
| bb465a9cf0 | |||
| 67308ca6ef | |||
| 63772f6ac2 | |||
| 8798cf06ab | |||
| 47bb2dd53e | |||
| 724824abbe | |||
| afc2c99e6a | |||
| 0fb95a2d3b | |||
| 7ac153f404 | |||
| 0f1b91bb74 | |||
| d1eb44c856 | |||
| 11a363287a | |||
| 5cfe409443 | |||
| 5b3a7f3892 | |||
| 060361fca3 | |||
| 6ac27e2383 | |||
| ba5f49219f | |||
| 2c767338f2 | |||
| 234a85506d | |||
| 232ebd159a | |||
| 4d3d4bc88f | |||
| 2b1e7bd462 | |||
| c7e5e41b8c | |||
| 9557598c45 | |||
| 156331aecd | |||
| cd7df4117d | |||
| 6af157ea93 | |||
| 83317b3081 | |||
| e831bcb3b1 | |||
| 092c3af0c4 | |||
| 3e944c5583 | |||
| f67737363c | |||
| f7daaaa305 | |||
| 3dc131cd8d | |||
| ef0f62c12a | |||
| baafaf4a6e | |||
| abc86c0e35 | |||
| 4450cb3132 | |||
| fd0dcd1c45 | |||
| f478201c28 | |||
| c7046845e7 | |||
| 701e24c539 | |||
| 37da848e6c | |||
| c470a1336a | |||
| 581a390e2f | |||
| 2fc48c7eee | |||
| 1024231133 | |||
| 5ca095a34f | |||
| b77c65398c | |||
| a91691463b | |||
| 5056d327f8 | |||
| c0a37015e3 | |||
| e9b9c7d022 | |||
| 6c09584f73 | |||
| b8c8583953 | |||
| df485ae1e3 | |||
| 6386f70103 | |||
| 6d92198ef4 | |||
| 16488be9a4 | |||
| 685bd3a439 | |||
| 2e69948c1a | |||
| 7531e8c13e | |||
| 8e439de744 | |||
| d96a5aa730 | |||
| d7bcd85d4d | |||
| d927b8f3a2 | |||
| f579d9550d | |||
| bbecad4e8e | |||
| b82999a84b | |||
| 11568e562c | |||
| d9a1b8f975 | |||
| b634388ef1 | |||
| 4d415f2129 | |||
| 829171a9a4 | |||
| 5a232de2fa | |||
| 5f8048cd04 | |||
| 4378b560e8 | |||
| 8644e23b71 | |||
| b2fc3a3b0e | |||
| 290446d446 | |||
| 85a75d4c3d | |||
| f94f0ff912 | |||
| 1b2e634970 | |||
| dd62fc90ce | |||
| 10b418495e | |||
| c2f193a25c | |||
| 1812152392 | |||
| b8b353b7a7 | |||
| f2778d6502 | |||
| 2ad42e77c3 | |||
| e8aaee5d9b | |||
| 910c1b6a8f | |||
| 92d3240bb5 | |||
| 02a8a9a3a7 | |||
| ee163b66fb | |||
| 354db5b5f7 | |||
| 92b1ad01f3 | |||
| 60bfdaa934 | |||
| 16eb6d76bf | |||
| c8acfa700b | |||
| e70e3c87de | |||
| bc8dfe3caf | |||
| e3d324240f | |||
| 10882eeddd | |||
| 145a98fc12 | |||
| 64ae9ea3fe | |||
| 8aa72b9748 | |||
| 97d115a266 | |||
| 63cfd9efdc | |||
| 6cf8221a09 | |||
| 7a2feecad4 | |||
| ee004674b9 | |||
| 65544d8fe9 | |||
| 5fce525f90 | |||
| ca37b0e471 | |||
| 82a1258ffc | |||
| 21b225e8d5 | |||
| 25ee6ab3b7 | |||
| 2d3e822d11 | |||
| 811dc1e464 | |||
| c59c6c9bff | |||
| 422bd23f3f | |||
| c0b16b684f | |||
| 78b15561a1 | |||
| 8f9673f509 | |||
| 9c071103f0 | |||
| 1127e670ca | |||
| fa83efc33e | |||
| 4aa71049c3 | |||
| c0b441f6be | |||
| 34fdddd7df | |||
| 3fb9a3a231 | |||
| 065d88729b | |||
| 67e698cf4d | |||
| 46ac6c9bba | |||
| 9b24f56e42 | |||
| f20445d4ac | |||
| 97d2168e59 | |||
| 79016eb163 | |||
| 164193fa7e | |||
| 482a9f9fa4 | |||
| d7de8d1794 | |||
| b443be70fb | |||
| 613ad7089a | |||
| 13e79ccfab | |||
| aba3b8c72f | |||
| 70cdf5fe52 | |||
| b38590a28a | |||
| 5318bc7733 | |||
| ef68b4655c | |||
| ecebfa19c9 | |||
| 5a39359fb2 | |||
| b3d2111708 | |||
| f75c6245ba | |||
| 9c1d5bac15 | |||
| b0b867da85 | |||
| 433d693b70 | |||
| c3aec59b12 | |||
| 9467a62744 | |||
| 86228e321d | |||
| 06b138d845 | |||
| 0867c09318 | |||
| 0e1ee4b92d | |||
| d8a64cb79d | |||
| b703efdcc3 | |||
| 68f54720dc | |||
| 46f1391b79 | |||
| cd7ff5e137 | |||
| f4b411f84b | |||
| 7ba64e632c | |||
| 8b770a7dab | |||
| 3d8b998fbb | |||
| 03365a3d17 | |||
| 7aafa25673 | |||
| f88661b5d9 | |||
| 581fabba48 | |||
| e909eb34e2 | |||
| 7644a02e6b | |||
| 162a82164e | |||
| 0d6a5fa8ee | |||
| 53845d2596 | |||
| 5ec00da2be | |||
| 649e65b542 | |||
| 14d7c3fca6 | |||
| c7d11d7e40 | |||
| ec4f01a099 | |||
| f5c01eeb63 | |||
| 20ff458d80 | |||
| 6719cb6db3 | |||
| 31fd2b1ad6 | |||
| fce61a99ec | |||
| 6ec92cf06b | |||
| 2a4037322f | |||
| f823404f69 | |||
| ef2fe912c5 | |||
| e3e9b87592 | |||
| 456afd92ce | |||
| 0d2280dadc | |||
| 55d4a496dd | |||
| 2a8829d9a5 | |||
| 3969731ce8 | |||
| 411aa58a77 | |||
| 4420ec641d | |||
| 2241725ad6 | |||
| 5cac878984 | |||
| 5d31423308 | |||
| 2721387b98 | |||
| 2cfa88bdf1 | |||
| 102caf4fab | |||
| 07df5d268f | |||
| 68b3dbf666 | |||
| 403c0714d1 | |||
| 848ed800fa | |||
| ad957ce556 | |||
| 3db088f5d6 | |||
| d1abd59114 | |||
| ceb7c699bc | |||
| c5baa055c0 | |||
| 349be97ccb | |||
| b60061dfd2 | |||
| b565a6c58a | |||
| a03c361ffb | |||
| b0528392c8 | |||
| 060678415a | |||
| 6b2d968897 | |||
| ad3a5bc920 | |||
| eafcea07f6 | |||
| eff30e2130 | |||
| 694f2e2c12 | |||
| 9964f90fd7 | |||
| f86876d56d | |||
| 0a37e2042e | |||
| 54d670be41 | |||
| 339854a9a4 | |||
| 5296419df4 | |||
| 6a4857fec2 | |||
| 9569150174 | |||
| 8f871f41f1 | |||
| 47e6c36155 | |||
| 47c144570c | |||
| 6a54d0781b | |||
| 0482548363 | |||
| 0e48b2358d | |||
| 3499cf25aa | |||
| 68d63ee15f | |||
| 151637920d | |||
| 0ba3e9bb50 | |||
| b04d36c75f | |||
| 5fc1b230d3 | |||
| 244122c736 | |||
| d25efa71ce | |||
| 1aeb1e8997 | |||
| 0e51680994 | |||
| 7d430cf8de | |||
| b8ca803f98 | |||
| 1243191ecb | |||
| 2b25b8b3c5 | |||
| ca300c0a04 | |||
| 427ef8bd00 | |||
| 35b0206353 | |||
| fbe00d7897 | |||
| 62af737219 | |||
| cd51581248 | |||
| a5a7c039a0 | |||
| cf745c936d | |||
| 99877f56d6 | |||
| 0f2686c8d3 | |||
| a912b2ee09 | |||
| e9fd72a613 | |||
| 8dedb140ef | |||
| b55855a3d4 | |||
| 2b53a9089c | |||
| 39d255b3d0 | |||
| 99dff1a167 | |||
| a0a16e118a | |||
| 15458c5737 | |||
| fc0a43c3c1 | |||
| 8256a9c2d4 | |||
| 6727ac4394 | |||
| 9674b40580 | |||
| 0b0d9215a9 | |||
| e638b1e21a | |||
| 76de60dbdc | |||
| 217e1a248c | |||
| 5e0eb0d750 | |||
| 183c9dd3ce | |||
| 4f100318f4 | |||
| fa6f43033c | |||
| 820fc4ca7a | |||
| bd72a5f1a8 | |||
| 55088a2cf5 | |||
| c2d8e245e9 | |||
| d8e1285409 | |||
| 5b3f3b99d6 | |||
| 2935057606 | |||
| bb6759d634 | |||
| 55747318a0 | |||
| 217faafe08 | |||
| 5440387529 | |||
| e1fab05ce7 | |||
| c3ec7ff5a9 | |||
| d8535921ad | |||
| eb8c535c17 | |||
| b7686ccb44 | |||
| f3229872bc | |||
| 7843286f2e | |||
| 11e2e99cfc | |||
| 07e745f1c4 | |||
| c7c99a30ea | |||
| 8f45a2eae8 | |||
| 9fd64b7ea9 | |||
| 5be16ad90b | |||
| dab62832de | |||
| caa9f9bcbb | |||
| 943efedb88 | |||
| 50acb0c2ec | |||
| e6d96e5f70 | |||
| 1dfb6e9304 | |||
| 4bef6bc511 | |||
| 73640d0463 | |||
| 7a1159143e | |||
| cbb0b82fa2 | |||
| 5ae6111180 | |||
| 230a5f541b | |||
| 956114ac92 | |||
| 76ee7f211d | |||
| 420743af22 | |||
| 206ab491ed | |||
| 936d2f4f5c | |||
| da98d601b5 | |||
| 658492fb41 | |||
| 80da9cfb09 | |||
| 03deec2a01 | |||
| 629d02c844 | |||
| a87c95da9e | |||
| bbcdbbaffc | |||
| ce53708e0e | |||
| 53209ce6d8 | |||
| bd083ae1bf | |||
| e5452a618d | |||
| 40a73e0ae0 | |||
| 937e08ce75 | |||
| 5d558f21e2 | |||
| d9b5ce60b3 | |||
| 61a87ab946 | |||
| 5dec654aae | |||
| b2a950205e | |||
| ca7b853abc | |||
| 6832aa51a6 | |||
| 4a1d5b1fb6 | |||
| 82369c8314 | |||
| cdb001ca5f | |||
| c72e22419b | |||
| c872c3086f | |||
| cec5ae8e4d | |||
| cd570b2e2a | |||
| 727d624322 | |||
| afed2f75f8 | |||
| 739b135f83 | |||
| 4a9dd1cd82 | |||
| feab09908d | |||
| e0baaa8df0 | |||
| 1b998f1695 | |||
| 7befe580c2 | |||
| cd3d3a37f9 | |||
| 81fffe51fd | |||
| 0b5ac0253e | |||
| a16b843a1b | |||
| bc86a9379f | |||
| 87a096f95e | |||
| 44adf1e14f | |||
| ce870e1ce1 | |||
| 1ace672d3e | |||
| e2ae254008 | |||
| 0fa291e707 | |||
| ba6f11ec3e | |||
| 430ee9df6b | |||
| 409a9df0a4 | |||
| acad5bae5c | |||
| 81b19c4094 | |||
| 3e97a9172b | |||
| 812719644d | |||
| 16e5113f8a | |||
| 3122a6164d | |||
| c8682ae74c | |||
| 0768905f77 | |||
| d087be0156 | |||
| 41caaa56e1 | |||
| 21d127334e | |||
| 3cf7dee576 | |||
| 64c586f5eb | |||
| 0e714f5ba4 | |||
| 92f23e123d | |||
| f67e11afd7 | |||
| 6458058559 | |||
| 4d13e4e474 | |||
| 058a3546ea | |||
| 98ecab2083 | |||
| b30a349078 | |||
| 7cb19ae613 | |||
| 39897a0662 | |||
| aa71bb815a | |||
| f43a08a9c5 | |||
| b42c65b729 | |||
| 7bad726935 | |||
| 29ff7c3911 | |||
| 30eff605df | |||
| fc95663e03 | |||
| 49cb83a423 | |||
| d2b159ea1a | |||
| 40056c69d1 | |||
| 505b5be044 | |||
| a6333f2e7c | |||
| 0dec477985 | |||
| a24189db35 | |||
| a9aee447ee | |||
| d5894ab499 | |||
| 6f14928e28 | |||
| 777334a803 | |||
| c3d82d24e2 | |||
| 6e70e79e4e | |||
| b3fc3c9067 | |||
| a9d79163e5 | |||
| 0b36ca6e64 | |||
| f3b7f9cf25 | |||
| b909bfacb9 | |||
| a2d8f540c3 | |||
| e8ed10ae62 | |||
| a6291e43b0 | |||
| 2a289f6108 | |||
| cafc7f785f | |||
| 39889c7304 | |||
| 12d5a2d0da | |||
| 243288627d | |||
| efc1fa8376 | |||
| 18e3012489 | |||
| daa1952f47 | |||
| 653ba110d3 | |||
| f518b0ab03 | |||
| 3a05e0cf70 | |||
| 299f3ef8ab | |||
| 925a13eb04 | |||
| 4170f395d1 | |||
| bb47344c77 | |||
| 243cd82409 | |||
| 51f5e829a8 | |||
| 5b9c5881b6 | |||
| 0209606364 | |||
| 5909c1a514 | |||
| e7150b0b15 | |||
| e8c64f598b | |||
| a14081ccc5 | |||
| d895809613 | |||
| 02015eb25c | |||
| 19bcd43e14 | |||
| 59f2fff3cf | |||
| c33adecc9f | |||
| 518c206a2a | |||
| 65b5c2cfad | |||
| 7954a28a71 | |||
| 3bdb35abfa | |||
| d58aac2e1e | |||
| a4c2654f50 | |||
| 27d29087b2 | |||
| c7698834fc | |||
| 64d7b58c44 | |||
| e3aae2ac65 | |||
| d0a7991b65 | |||
| 180ef7c415 | |||
| 95bffdec43 | |||
| c74c28c6d1 | |||
| e0f5e03009 | |||
| dfbfbdfea8 | |||
| 24ae624d96 | |||
| 40f822a1e3 | |||
| a0bfe2140c | |||
| c6443f8bd4 | |||
| 3cd02e9340 | |||
| 17ec2ede11 | |||
| e30938700a | |||
| b864946606 | |||
| bc234c040c | |||
| 662a7dd905 | |||
| d3db2d4fe5 | |||
| 96f926a25e | |||
| a9d43cda80 | |||
| effccbdc84 | |||
| d141b4ce79 | |||
| bc49d0f9b3 | |||
| 5ea7c81277 | |||
| efe4481a28 | |||
| df215cc243 | |||
| 5791d949ff | |||
| b76409ba05 | |||
| a25c4eacae | |||
| d8437ae096 | |||
| 2fa22f3342 | |||
| a2ecb58132 | |||
| 73cc944067 | |||
| b16916f447 | |||
| 36f8e48747 | |||
| 790cb8b461 | |||
| 7b4d12623a | |||
| 956c6baf71 | |||
| 485e8c8cb4 | |||
| aaf38c2f35 | |||
| f433457244 | |||
| 535b52cef2 | |||
| e60a424398 | |||
| 32f85ce524 | |||
| 0983a9b9b4 | |||
| e5d0df44f0 | |||
| 50eabe5b1d | |||
| f2d1047059 | |||
| 3e68f1da63 | |||
| f8b0696076 | |||
| 51a2ca5d88 | |||
| 51de46e368 | |||
| e2b0224ec4 | |||
| db11bd5035 | |||
| 543c59af22 | |||
| 81765e6e00 | |||
| a4ebc14fab | |||
| 058f6f70f5 | |||
| 665d5180fc | |||
| d1ea9ab40c | |||
| 632dce67ab | |||
| e41864ce9d | |||
| 979991aa78 | |||
| 7fc1e438d1 | |||
| 040f178569 | |||
| 87c81315a1 | |||
| f1e84decc9 | |||
| eafddf02e3 | |||
| f0029d6f60 | |||
| 3147de9010 | |||
| d448ebaf90 | |||
| 65dd4f2039 | |||
| 7ee2c79da9 | |||
| bbe2e30901 | |||
| 0ab72613a7 | |||
| 6f14e619b2 | |||
| 90e9703d99 | |||
| 5f21cde3c7 | |||
| 76ccfae682 | |||
| 62357f218f | |||
| be1b76e97a | |||
| 3f2b5da094 | |||
| 3f1cb09e7b | |||
| 7a39d928f5 | |||
| 961fe728d9 | |||
| ef0c4bf277 | |||
| de855b3247 | |||
| b9628f13c2 | |||
| 16ca01feea | |||
| 4cbbde8945 | |||
| eba6eb79dc | |||
| 109f3272f5 | |||
| 85901cdcf9 | |||
| 5e74d932b9 | |||
| 090c65cd9d | |||
| b7d5d9072a | |||
| d4262021d5 | |||
| 8ae56dc51d | |||
| c9fbb71e37 | |||
| 4d583ad6a1 | |||
| 70d999ee4a | |||
| 3913fa4dd0 | |||
| f9b2e6769b | |||
| d3f8c52f4c | |||
| af12e7b023 | |||
| 68376babd8 | |||
| 7d24bdefb5 | |||
| bb296348e1 | |||
| 0226f75025 | |||
| 419c9ce22a | |||
| 2249fbde0d | |||
| e0ffea5bc3 | |||
| 9a86a49f72 | |||
| 70920895e8 | |||
| bf3cd30a66 | |||
| bfa74e51d2 | |||
| e6699e6aba | |||
| 0871e93a74 | |||
| 86720fdb11 | |||
| 1deab71e3c | |||
| 5d1cee3d81 | |||
| 5904f56c45 | |||
| 99d790dc34 | |||
| 1760d2dc8c | |||
| b93bfac16d | |||
| 981c6fb8d6 | |||
| 6413f25ba9 | |||
| 39e20d3e55 | |||
| 3a381bfa48 | |||
| bc82d18821 | |||
| 330d60b817 | |||
| 612ecef7b8 | |||
| 9493d7276b | |||
| 40c6e0ca41 | |||
| a28491bc24 | |||
| 435079aafb | |||
| dcde1e93d0 | |||
| ab379793d4 | |||
| b50e75f85d | |||
| f95067bfbf | |||
| d07fd959cc | |||
| 873b39b85b | |||
| da39665055 | |||
| d95d68ec46 | |||
| fafadc5323 | |||
| 145fca5a09 | |||
| 9fe690706d | |||
| 6e81938282 | |||
| e965d590cd | |||
| 6dfcf5b8ef | |||
| e4ea4ed4de | |||
| fa8e1cff91 | |||
| 60856787ac | |||
| 995563fec9 | |||
| 2d62bd1570 | |||
| f8169eaded | |||
| 75ab711993 | |||
| f489a86573 | |||
| 2708c1ae31 | |||
| e30034ed07 | |||
| 78bf8bcb21 | |||
| 57f2cf5fa7 | |||
| e06e7b35e7 | |||
| 5651521833 | |||
| ba0ee8a54d | |||
| c2a162932a | |||
| c29c3c5e70 | |||
| 945085edb3 | |||
| 70388fa44e | |||
| 2fee0c15fd | |||
| c05ed13fc9 | |||
| 5e6351502a | |||
| ee0c587182 | |||
| 43e7229a1a | |||
| 8b96515ed2 | |||
| 9d9ea62785 | |||
| 2106e87d58 | |||
| 40980e8fe8 | |||
| f2f810c536 | |||
| 0a9403f308 | |||
| 75a693c9b4 | |||
| 55691b14c2 | |||
| b757b62325 | |||
| 15dbf9722b | |||
| 419ecf38af | |||
| 3cb9d5fd9c | |||
| f1298b143e | |||
| 07ad358f2d | |||
| 211707857d | |||
| e57d5d0eae | |||
| 92d072043e | |||
| 3d1a0f7e98 | |||
| 8b3e30887a | |||
| 3e304c4a1a | |||
| 1c102f23cc | |||
| 4c0d5a46ba | |||
| d0c17d707f | |||
| b41d8d8228 | |||
| 3a6db664c7 | |||
| 166520feea | |||
| 663f5120c2 | |||
| 23ac55fcab | |||
| 93951ce516 | |||
| ae86a00be0 | |||
| 532da3e342 | |||
| a826e4441d | |||
| 1fe27e7c95 | |||
| c1a6c209df | |||
| 8ebd6ab2ee | |||
| ea9b85477d | |||
| 420ff21c3b | |||
| b1b3312749 | |||
| 6e4e870203 | |||
| a3065e1842 | |||
| 4eaf36e1c4 | |||
| e7bb060c0e | |||
| a15d307426 | |||
| 7e7f3445aa | |||
| 10c674633d | |||
| 82c2665cd6 | |||
| 2930cac698 | |||
| 901ab69a16 | |||
| 780e4aa32a | |||
| e4620984f8 | |||
| 017a98c0e9 | |||
| d1aa558119 | |||
| 41479fe483 | |||
| eac5d13c7b | |||
| b228136cae | |||
| 90deb748c6 | |||
| d942708745 | |||
| 3783180844 | |||
| ea836f3057 | |||
| a4c9476204 | |||
| 3ca8c9a997 | |||
| 2f83b1afef | |||
| b0591c665c | |||
| d9871c0f87 | |||
| abc2beb423 | |||
| 8749b4ece4 | |||
| 4a3eaee6be | |||
| 3533e2b0b1 | |||
| 3e0ceac79f | |||
| 03b617b674 | |||
| 840bb1aeda | |||
| 1bfde6b963 | |||
| 3482495bb5 | |||
| 947b2a88a9 | |||
| cac1ed41eb | |||
| 9dc5b349ea | |||
| 0aae1e93f4 | |||
| 78151f87a4 | |||
| 853823d0ae | |||
| 77ae51a050 | |||
| ad9cf788b1 | |||
| 5f9cea4ce9 | |||
| 96ffd349f3 | |||
| d88bbbd0e2 | |||
| 075b5d615d | |||
| 9b5877d1b6 | |||
| 586941d107 | |||
| e1b84bf503 | |||
| b2ea1c7b4f | |||
| bdd93cd933 | |||
| 639c1da8df | |||
| fdb1402c7d | |||
| 0b3f219881 | |||
| ade4f1db92 | |||
| 907a86d145 | |||
| f054799e7f | |||
| d4f5fd694e | |||
| 38fd30e764 | |||
| 03754c1e02 | |||
| ea36b7dceb | |||
| bc9153e465 | |||
| 89b7e36bf6 | |||
| b34db0b987 | |||
| 9875714610 | |||
| 4b47f190a9 | |||
| 17bc8a1103 | |||
| 279475307a | |||
| 9c2e704791 | |||
| 4e1816d7ec | |||
| 5a2cb3b5e3 | |||
| 04103090cc | |||
| ca615f879f | |||
| 2694a6c63a | |||
| b4388b45dc | |||
| 69e4c3c54d | |||
| 68d809256c | |||
| bd091a605b | |||
| cb993d7d8c | |||
| 028b5816c8 | |||
| 8951195a15 | |||
| 60460ae1af | |||
| 978dfc38ea | |||
| 5002e56704 | |||
| 71e81bab00 | |||
| 76c41f0df7 | |||
| 2b981c0942 | |||
| a60640d4fa | |||
| 4be70838e7 | |||
| e89131c92d | |||
| 4e5cc0c6b9 | |||
| 587eea9bb5 | |||
| 57cbcab45b | |||
| c0caa068ba | |||
| b51b78ffb7 | |||
| 67dbae52be | |||
| d0df263b09 | |||
| a5026706a7 | |||
| 20e4973903 | |||
| 1d9bcdd39d | |||
| ba856524f6 | |||
| 332326c833 | |||
| e6d5776ad8 | |||
| fe709a2490 | |||
| ac970148cd | |||
| f0f348921d | |||
| b37680bd66 | |||
| 5286d843c8 | |||
| 22bf677ceb | |||
| bd82bec78e | |||
| 3825e478b2 | |||
| 6c3f6792e9 | |||
| 5858ac62b4 | |||
| 5b0a03d1fb | |||
| c3ea690d48 | |||
| ae8c4875dc | |||
| 55a528487d | |||
| bd1d5fad2f | |||
| b22f088ff6 | |||
| f3f2f9e4b5 | |||
| 7e4136164e | |||
| 5dd631e2cd | |||
| 0a16f37ba1 | |||
| aaa2637a5e | |||
| 7573a8cd55 | |||
| 126550126d | |||
| 733755c94c | |||
| 741d23301f | |||
| 9b7ef9679f | |||
| 30a6a3435f | |||
| f7427c86ee | |||
| d0bf459c7f | |||
| bf8fe0347b | |||
| e60f3cab7a | |||
| 07e2e712ca | |||
| 63f09f63b8 | |||
| 50b8d8e8a8 | |||
| 0ec1f24c17 | |||
| 3c5c0f9c99 | |||
| 53b8ed1e8e | |||
| 49bbf2390d | |||
| aa533277f6 | |||
| ca6505a6a8 | |||
| bb6ee0b7bc | |||
| 7889ba6b6d | |||
| f002ce2ae9 | |||
| 7fd0635d46 | |||
| 235fdf1096 | |||
| 351f89758a | |||
| 7f5e94d33b | |||
| 74a8ed9e48 | |||
| 6bd28790c2 | |||
| 2359af1870 | |||
| e6b61da7ca | |||
| 344bfe2713 | |||
| e9d15e5973 | |||
| 5315290b55 | |||
| f4eee1cf86 | |||
| b12f503f6d | |||
| 58be9901b6 | |||
| 13ef1c83f9 | |||
| 62e5cfcbbd | |||
| 762ce7cc80 | |||
| 4a447d85be | |||
| e4e5611e5d | |||
| 79b712559a | |||
| eaf7899850 | |||
| d2e804f69d | |||
| 2df1a9328a | |||
| 8bf40e5870 | |||
| b0165a0f77 | |||
| 8a96b0bfb8 | |||
| 0efabe485e | |||
| 75c7d935fd | |||
| bea1e75182 | |||
| dd8f2054d8 | |||
| 71660af123 | |||
| 5f4ba04628 | |||
| 39e4a5a0f3 | |||
| 0d0f2cd5a7 | |||
| e8e3709765 | |||
| 074d8d5a5a | |||
| b17fb69dd6 | |||
| ccdc2252f7 | |||
| f9317f253c | |||
| 08f64896a0 | |||
| 74642aac95 | |||
| ceffd47cdd | |||
| 4ed46648e7 | |||
| 56308da519 | |||
| 4855405041 | |||
| cea6aaa116 | |||
| 91f8fb018b | |||
| 05d58c835f | |||
| 874c4967d9 | |||
| dc9966df93 | |||
| e2cd36b6cc | |||
| 6a0082de30 | |||
| 102cf00ded | |||
| 359bd1bc5f | |||
| 0de1644126 | |||
| b816e258a9 | |||
| c4c444a158 | |||
| f3129d1130 | |||
| 8c928057c6 | |||
| 8c0505d760 | |||
| 16d548c358 | |||
| 415b73853a | |||
| a5525406fc | |||
| 37b2aa0173 | |||
| 4df576efe8 | |||
| 87a7e0783f | |||
| 5c8f181ab0 | |||
| 6f7fa4f48e | |||
| 15a854e2cd | |||
| 63d0653647 | |||
| 21b7f15c96 | |||
| 49cd8d37e6 | |||
| 1eafa55b80 | |||
| 9114fb09d5 | |||
| 5e8ab12c3d | |||
| a63511107b | |||
| a7334df955 | |||
| 4a7268df9c | |||
| 148f6dcaaa | |||
| 693d46826e | |||
| dfba92adcd | |||
| 4dc5049927 | |||
| e3ebf176b8 | |||
| 2697bebeb4 | |||
| 1f25825211 | |||
| b04776159e | |||
| 9179e6bf85 | |||
| ba88a710eb | |||
| 66edfe103a | |||
| ec183666b6 |
29
.devcontainer/devcontainer.json
Normal file
29
.devcontainer/devcontainer.json
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
// File only needed for VSCode users to have proper Docker based interpreters
|
||||||
|
{
|
||||||
|
"name": "accelerate_dev_environment",
|
||||||
|
"build": {
|
||||||
|
// ACTION NEEDED: comment/uncomment the relevant line depending on whether you are in a CPU/GPU environment
|
||||||
|
"dockerfile": "../docker/accelerate-cpu/Dockerfile"
|
||||||
|
// "dockerfile": "../docker/accelerate-gpu/Dockerfile"
|
||||||
|
},
|
||||||
|
"runArgs": [
|
||||||
|
// ACTION NEEDED: uncomment the next line if your local machine has GPUs available
|
||||||
|
// "--gpus", "all",
|
||||||
|
// Enable the docker container to access system resources
|
||||||
|
"--ipc", "host"
|
||||||
|
],
|
||||||
|
"remoteEnv": {
|
||||||
|
"PYTHONPATH": "${containerEnv:PATH}:${containerWorkspaceFolder}"
|
||||||
|
},
|
||||||
|
"customizations": {
|
||||||
|
"vscode": {
|
||||||
|
"extensions": [
|
||||||
|
// Ensure we have IntelliSense in VSCode when running inside container
|
||||||
|
"ms-python.python"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"workspaceFolder": "/workspaces/accelerate",
|
||||||
|
// Need git for VSCode to color code modifications. Only runs when building environment.
|
||||||
|
"onCreateCommand": "apt-get update && apt-get install -y git && pip install -e '.[dev]'"
|
||||||
|
}
|
||||||
7
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
7
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@ -1,6 +1,12 @@
|
|||||||
name: "\U0001F41B Bug Report"
|
name: "\U0001F41B Bug Report"
|
||||||
description: Submit a bug report to help us improve Accelerate
|
description: Submit a bug report to help us improve Accelerate
|
||||||
body:
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
Thanks for taking the time to submit a bug report! 🐛
|
||||||
|
If this is not a bug related to the Accelerate library directly, but instead a general question about your code or the library specifically please use the [forums](https://discuss.huggingface.co/c/accelerate/18).
|
||||||
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: system-info
|
id: system-info
|
||||||
attributes:
|
attributes:
|
||||||
@ -55,4 +61,3 @@ body:
|
|||||||
attributes:
|
attributes:
|
||||||
label: Expected behavior
|
label: Expected behavior
|
||||||
description: "A clear and concise description of what you would expect to happen."
|
description: "A clear and concise description of what you would expect to happen."
|
||||||
render: Shell
|
|
||||||
|
|||||||
47
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
47
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
# What does this PR do?
|
||||||
|
|
||||||
|
<!--
|
||||||
|
Congratulations! You've made it this far! You're not quite done yet though.
|
||||||
|
|
||||||
|
Once merged, your PR is going to appear in the release notes with the title you set, so make sure it's a great title that fully reflects the extent of your awesome contribution.
|
||||||
|
|
||||||
|
Then, please replace this with a description of the change and which issue is fixed (if applicable). Please also include relevant motivation and context. List any dependencies (if any) that are required for this change.
|
||||||
|
|
||||||
|
Once you're done, someone will review your PR shortly (see the section "Who can review?" below to tag some potential reviewers). They may suggest changes to make the code even better. If no one reviewed your PR after a week has passed, don't hesitate to post a new comment @-mentioning the same persons---sometimes notifications get lost.
|
||||||
|
-->
|
||||||
|
|
||||||
|
<!-- Remove if not applicable -->
|
||||||
|
|
||||||
|
Fixes # (issue)
|
||||||
|
|
||||||
|
|
||||||
|
## Before submitting
|
||||||
|
- [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case).
|
||||||
|
- [ ] Did you read the [contributor guideline](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md#submitting-a-pull-request-pr),
|
||||||
|
Pull Request section?
|
||||||
|
- [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link
|
||||||
|
to it if that's the case.
|
||||||
|
- [ ] Did you make sure to update the documentation with your changes? Here are the
|
||||||
|
[documentation guidelines](https://github.com/huggingface/accelerate/tree/main/docs), and
|
||||||
|
[here are tips on formatting docstrings](https://github.com/huggingface/accelerate/tree/main/docs#writing-documentation---specification).
|
||||||
|
- [ ] Did you write any new necessary tests?
|
||||||
|
|
||||||
|
|
||||||
|
## Who can review?
|
||||||
|
|
||||||
|
Anyone in the community is free to review the PR once the tests have passed. Feel free to tag
|
||||||
|
members/contributors who may be interested in your PR.
|
||||||
|
|
||||||
|
<!-- Your PR will be replied to more quickly if you can figure out the right person to tag with @
|
||||||
|
|
||||||
|
If you know how to use git blame, that is the easiest way, otherwise, here is a rough guide of **who to tag**.
|
||||||
|
|
||||||
|
- Big modeling: @SunMarc
|
||||||
|
- Fully-Sharded Data Parallism: @SunMarc @zach-huggingface
|
||||||
|
- DeepSpeed: @SunMarc @zach-huggingface
|
||||||
|
- Command Line Interface: @SunMarc @zach-huggingface
|
||||||
|
- Documentation: @SunMarc @zach-huggingface
|
||||||
|
- Core parts of the library: @BenjaminBossan @SunMarc @zach-huggingface
|
||||||
|
- Maintained examples: @SunMarc or @zach-huggingface
|
||||||
|
|
||||||
|
-->
|
||||||
@ -15,50 +15,90 @@ jobs:
|
|||||||
outputs:
|
outputs:
|
||||||
version: ${{ steps.step1.outputs.version }}
|
version: ${{ steps.step1.outputs.version }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- id: step1
|
- id: step1
|
||||||
run: echo "::set-output name=version::$(python setup.py --version)"
|
run: echo "version=$(python setup.py --version)" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
version-cpu:
|
version-cpu:
|
||||||
name: "Latest Accelerate CPU [version]"
|
name: "Latest Accelerate CPU [version]"
|
||||||
runs-on: ubuntu-latest
|
runs-on:
|
||||||
|
group: aws-general-8-plus
|
||||||
needs: get-version
|
needs: get-version
|
||||||
steps:
|
steps:
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v1
|
uses: docker/setup-buildx-action@v2
|
||||||
- name: Check out code
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
- name: Login to DockerHub
|
- name: Login to DockerHub
|
||||||
uses: docker/login-action@v1
|
uses: docker/login-action@v2
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||||
|
|
||||||
- name: Build and Push CPU
|
- name: Build and Push CPU
|
||||||
uses: docker/build-push-action@v2
|
uses: docker/build-push-action@v4
|
||||||
with:
|
with:
|
||||||
context: ./docker/accelerate-cpu
|
file: docker/accelerate-cpu/Dockerfile
|
||||||
push: true
|
push: true
|
||||||
tags: huggingface/accelerate-cpu:${{needs.get-version.outputs.version}}
|
tags: huggingface/accelerate:cpu-release-${{ needs.get-version.outputs.version }}
|
||||||
|
|
||||||
version-cuda:
|
version-cuda:
|
||||||
name: "Latest Accelerate GPU [version]"
|
name: "Latest Accelerate GPU [version]"
|
||||||
runs-on: ubuntu-latest
|
runs-on:
|
||||||
|
group: aws-g6-4xlarge-plus
|
||||||
needs: get-version
|
needs: get-version
|
||||||
steps:
|
steps:
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v1
|
uses: docker/setup-buildx-action@v2
|
||||||
- name: Check out code
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
- name: Login to DockerHub
|
- name: Login to DockerHub
|
||||||
uses: docker/login-action@v1
|
uses: docker/login-action@v2
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||||
|
|
||||||
- name: Build and Push GPU
|
- name: Build and Push GPU
|
||||||
uses: docker/build-push-action@v2
|
uses: docker/build-push-action@v4
|
||||||
with:
|
with:
|
||||||
context: ./docker/accelerate-gpu
|
file: docker/accelerate-gpu/Dockerfile
|
||||||
push: true
|
push: true
|
||||||
tags: huggingface/accelerate-gpu:${{needs.get-version.outputs.version}}
|
tags: huggingface/accelerate:gpu-release-${{needs.get-version.outputs.version}}
|
||||||
|
|
||||||
|
version-cuda-deepspeed:
|
||||||
|
name: "Latest Accelerate GPU DeepSpeed [version]"
|
||||||
|
runs-on:
|
||||||
|
group: aws-g6-4xlarge-plus
|
||||||
|
needs: get-version
|
||||||
|
steps:
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v2
|
||||||
|
- name: Login to DockerHub
|
||||||
|
uses: docker/login-action@v2
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||||
|
|
||||||
|
- name: Build and Push GPU
|
||||||
|
uses: docker/build-push-action@v4
|
||||||
|
with:
|
||||||
|
file: docker/accelerate-gpu-deepspeed/Dockerfile
|
||||||
|
push: true
|
||||||
|
tags: huggingface/accelerate:gpu-deepspeed-release-${{needs.get-version.outputs.version}}
|
||||||
|
|
||||||
|
version-cuda-fp8-transformerengine:
|
||||||
|
name: "Latest Accelerate GPU FP8 TransformerEngine [version]"
|
||||||
|
runs-on:
|
||||||
|
group: aws-g6-4xlarge-plus
|
||||||
|
needs: get-version
|
||||||
|
steps:
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v2
|
||||||
|
- name: Login to DockerHub
|
||||||
|
uses: docker/login-action@v2
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||||
|
|
||||||
|
- name: Build and Push GPU
|
||||||
|
uses: docker/build-push-action@v4
|
||||||
|
with:
|
||||||
|
file: docker/accelerate-gpu/Dockerfile
|
||||||
|
push: true
|
||||||
|
tags: huggingface/accelerate:gpu-fp8-transformerengine-release-${{needs.get-version.outputs.version}}
|
||||||
13
.github/workflows/build_and_run_tests.yml
vendored
13
.github/workflows/build_and_run_tests.yml
vendored
@ -16,20 +16,20 @@ jobs:
|
|||||||
outputs:
|
outputs:
|
||||||
changed: ${{ steps.was_changed.outputs.changed }}
|
changed: ${{ steps.was_changed.outputs.changed }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: "2"
|
fetch-depth: "2"
|
||||||
|
|
||||||
- name: Get changed files
|
- name: Get changed files
|
||||||
id: changed-files
|
id: changed-files
|
||||||
uses: tj-actions/changed-files@v22.2
|
uses: tj-actions/changed-files@3f54ebb830831fc121d3263c1857cfbdc310cdb9 #v42
|
||||||
|
|
||||||
- name: Was setup changed
|
- name: Was setup changed
|
||||||
id: was_changed
|
id: was_changed
|
||||||
run: |
|
run: |
|
||||||
for file in ${{ steps.changed-files.outputs.all_changed_files }}; do
|
for file in ${{ steps.changed-files.outputs.all_changed_files }}; do
|
||||||
if [ `basename "${file}"` == "setup.py" ]; then
|
if [ `basename "${file}"` == "setup.py" ]; then
|
||||||
echo ::set-output name=changed::"1"
|
echo "changed=1" >> $GITHUB_OUTPUT
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
@ -42,4 +42,9 @@ jobs:
|
|||||||
run-merge-tests:
|
run-merge-tests:
|
||||||
needs: build-docker-containers
|
needs: build-docker-containers
|
||||||
if: always()
|
if: always()
|
||||||
uses: ./.github/workflows/run_merge_tests.yml
|
uses: ./.github/workflows/run_merge_tests.yml
|
||||||
|
|
||||||
|
run-integration-tests:
|
||||||
|
needs: build-docker-containers
|
||||||
|
if: always()
|
||||||
|
uses: ./.github/workflows/self_hosted_integration_tests.yml
|
||||||
|
|||||||
98
.github/workflows/build_docker_images.yml
vendored
98
.github/workflows/build_docker_images.yml
vendored
@ -13,42 +13,104 @@ concurrency:
|
|||||||
jobs:
|
jobs:
|
||||||
latest-cpu:
|
latest-cpu:
|
||||||
name: "Latest Accelerate CPU [dev]"
|
name: "Latest Accelerate CPU [dev]"
|
||||||
runs-on: ubuntu-latest
|
runs-on:
|
||||||
|
group: aws-general-8-plus
|
||||||
steps:
|
steps:
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v1
|
uses: docker/setup-buildx-action@v2
|
||||||
- name: Check out code
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
- name: Login to DockerHub
|
- name: Login to DockerHub
|
||||||
uses: docker/login-action@v1
|
uses: docker/login-action@v2
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||||
|
- name: Get current date
|
||||||
|
id: date
|
||||||
|
run: |
|
||||||
|
echo "date=$(date '+%Y-%m-%d')" >> $GITHUB_ENV
|
||||||
- name: Build and Push CPU
|
- name: Build and Push CPU
|
||||||
uses: docker/build-push-action@v2
|
uses: docker/build-push-action@v4
|
||||||
with:
|
with:
|
||||||
context: ./docker/accelerate-cpu
|
file: docker/accelerate-cpu/Dockerfile
|
||||||
push: true
|
push: true
|
||||||
tags: huggingface/accelerate-cpu
|
tags: |
|
||||||
|
huggingface/accelerate:cpu-nightly
|
||||||
|
huggingface/accelerate:cpu-nightly-${{ env.date }}
|
||||||
|
|
||||||
latest-cuda:
|
latest-cuda:
|
||||||
name: "Latest Accelerate GPU [dev]"
|
name: "Latest Accelerate GPU [dev]"
|
||||||
runs-on: ubuntu-latest
|
runs-on:
|
||||||
|
group: aws-g6-4xlarge-plus
|
||||||
steps:
|
steps:
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v1
|
uses: docker/setup-buildx-action@v2
|
||||||
- name: Check out code
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
- name: Login to DockerHub
|
- name: Login to DockerHub
|
||||||
uses: docker/login-action@v1
|
uses: docker/login-action@v2
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||||
|
- name: Get current date
|
||||||
|
id: date
|
||||||
|
run: |
|
||||||
|
echo "date=$(date '+%Y-%m-%d')" >> $GITHUB_ENV
|
||||||
- name: Build and Push GPU
|
- name: Build and Push GPU
|
||||||
uses: docker/build-push-action@v2
|
uses: docker/build-push-action@v4
|
||||||
with:
|
with:
|
||||||
context: ./docker/accelerate-gpu
|
file: docker/accelerate-gpu/Dockerfile
|
||||||
push: true
|
push: true
|
||||||
tags: huggingface/accelerate-gpu
|
tags: |
|
||||||
|
huggingface/accelerate:gpu-nightly
|
||||||
|
huggingface/accelerate:gpu-nightly-${{ env.date }}
|
||||||
|
|
||||||
|
latest-cuda-deepspeed:
|
||||||
|
name: "Latest Accelerate GPU DeepSpeed [dev]"
|
||||||
|
runs-on:
|
||||||
|
group: aws-g6-4xlarge-plus
|
||||||
|
steps:
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v2
|
||||||
|
- name: Login to DockerHub
|
||||||
|
uses: docker/login-action@v2
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||||
|
- name: Get current date
|
||||||
|
id: date
|
||||||
|
run: |
|
||||||
|
echo "date=$(date '+%Y-%m-%d')" >> $GITHUB_ENV
|
||||||
|
- name: Build and Push GPU
|
||||||
|
uses: docker/build-push-action@v4
|
||||||
|
with:
|
||||||
|
file: docker/accelerate-gpu-deepspeed/Dockerfile
|
||||||
|
push: true
|
||||||
|
tags: |
|
||||||
|
huggingface/accelerate:gpu-deepspeed-nightly
|
||||||
|
huggingface/accelerate:gpu-deepspeed-nightly-${{ env.date }}
|
||||||
|
|
||||||
|
latest-cuda-fp8-transformerengine:
|
||||||
|
name: "Latest Accelerate GPU FP8 TransformerEngine [dev]"
|
||||||
|
runs-on:
|
||||||
|
group: aws-g6-4xlarge-plus
|
||||||
|
steps:
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v2
|
||||||
|
- name: Login to DockerHub
|
||||||
|
uses: docker/login-action@v2
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||||
|
- name: Get current date
|
||||||
|
id: date
|
||||||
|
run: |
|
||||||
|
echo "date=$(date '+%Y-%m-%d')" >> $GITHUB_ENV
|
||||||
|
# Get the previous month
|
||||||
|
echo "base_year=$(date -d 'last month' '+%y')" >> $GITHUB_ENV
|
||||||
|
echo "base_month=$(date -d 'last month' '+%m')" >> $GITHUB_ENV
|
||||||
|
- name: Build and Push GPU
|
||||||
|
uses: docker/build-push-action@v4
|
||||||
|
with:
|
||||||
|
file: benchmarks/fp8/transformer_engine/Dockerfile
|
||||||
|
push: true
|
||||||
|
tags: huggingface/accelerate:gpu-fp8-transformerengine-nightly-${{ env.date }}
|
||||||
|
build-args: |
|
||||||
|
BASE_YEAR=${{ env.base_year }}
|
||||||
|
BASE_MONTH=${{ env.base_month }}
|
||||||
3
.github/workflows/build_documentation.yml
vendored
3
.github/workflows/build_documentation.yml
vendored
@ -13,5 +13,6 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
commit_sha: ${{ github.sha }}
|
commit_sha: ${{ github.sha }}
|
||||||
package: accelerate
|
package: accelerate
|
||||||
|
custom_container: huggingface/transformers-doc-builder
|
||||||
secrets:
|
secrets:
|
||||||
token: ${{ secrets.HUGGINGFACE_PUSH }}
|
hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}
|
||||||
|
|||||||
1
.github/workflows/build_pr_documentation.yml
vendored
1
.github/workflows/build_pr_documentation.yml
vendored
@ -14,3 +14,4 @@ jobs:
|
|||||||
commit_sha: ${{ github.event.pull_request.head.sha }}
|
commit_sha: ${{ github.event.pull_request.head.sha }}
|
||||||
pr_number: ${{ github.event.number }}
|
pr_number: ${{ github.event.number }}
|
||||||
package: accelerate
|
package: accelerate
|
||||||
|
custom_container: huggingface/transformers-doc-builder
|
||||||
|
|||||||
13
.github/workflows/delete_doc_comment.yml
vendored
13
.github/workflows/delete_doc_comment.yml
vendored
@ -1,13 +0,0 @@
|
|||||||
name: Delete dev documentation
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
types: [ closed ]
|
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
delete:
|
|
||||||
uses: huggingface/doc-builder/.github/workflows/delete_doc_comment.yml@main
|
|
||||||
with:
|
|
||||||
pr_number: ${{ github.event.number }}
|
|
||||||
package: accelerate
|
|
||||||
37
.github/workflows/fp8_runner.yml
vendored
Normal file
37
.github/workflows/fp8_runner.yml
vendored
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
name: Test FP8 Runner
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
jobs:
|
||||||
|
set-prev-day:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
outputs:
|
||||||
|
prev-day: ${{ steps.set-prev-day.outputs.prev-day }}
|
||||||
|
steps:
|
||||||
|
- name: Set PREV_DAY
|
||||||
|
id: set-prev-day
|
||||||
|
run: |
|
||||||
|
PREV_DAY=$(date -d "yesterday" '+%Y-%m-%d')
|
||||||
|
echo "prev-day=$PREV_DAY" >> $GITHUB_OUTPUT
|
||||||
|
run-fp8-tests:
|
||||||
|
needs: set-prev-day
|
||||||
|
runs-on:
|
||||||
|
group: aws-g6e-12xlarge
|
||||||
|
container:
|
||||||
|
image: huggingface/accelerate:gpu-fp8-transformerengine-nightly-${{ needs.set-prev-day.outputs.prev-day }}
|
||||||
|
options: --gpus all --shm-size "16gb"
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- name: Install the library
|
||||||
|
run: |
|
||||||
|
pip install -e .[test_prod,test_fp8]
|
||||||
|
- name: Show installed libraries
|
||||||
|
run: |
|
||||||
|
pip freeze
|
||||||
|
- name: Run TE FP8 tests
|
||||||
|
run: |
|
||||||
|
python -m pytest -s -v ./tests/test_fp8.py
|
||||||
|
|
||||||
87
.github/workflows/gaudi3_scheduled.yml
vendored
Normal file
87
.github/workflows/gaudi3_scheduled.yml
vendored
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
name: Gaudi3 tests (scheduled)
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
schedule: # every day at 6 AM UTC
|
||||||
|
- cron: "0 6 * * *"
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
run-gaudi3-tests:
|
||||||
|
runs-on:
|
||||||
|
group: itac-bm-emr-gaudi3-dell-2gaudi
|
||||||
|
|
||||||
|
container:
|
||||||
|
image: docker://vault.habana.ai/gaudi-docker/1.21.1/ubuntu22.04/habanalabs/pytorch-installer-2.6.0:latest
|
||||||
|
options: --runtime=habana --shm-size=64G --cap-add=sys_nice --env HABANA_VISIBLE_DEVICES
|
||||||
|
env:
|
||||||
|
OMPI_MCA_btl_vader_single_copy_mechanism: none
|
||||||
|
PT_ENABLE_INT64_SUPPORT: 1
|
||||||
|
PT_HPU_LAZY_MODE: 0
|
||||||
|
RUN_SLOW: 1
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: HL-SMI (1)
|
||||||
|
run: |
|
||||||
|
hl-smi
|
||||||
|
echo "HABANA_VISIBLE_DEVICES=${HABANA_VISIBLE_DEVICES}"
|
||||||
|
echo "HABANA_VISIBLE_MODULES=${HABANA_VISIBLE_MODULES}"
|
||||||
|
|
||||||
|
- name: Extract HPU visible modules
|
||||||
|
id: add-modules
|
||||||
|
run: |
|
||||||
|
export HABANA_VISIBLE_MODULES=$(hl-smi -Q module_id -f csv,noheader | tr '\n' ',' | sed 's/,$//')
|
||||||
|
echo "HABANA_VISIBLE_MODULES=${HABANA_VISIBLE_MODULES}" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: HL-SMI (2)
|
||||||
|
run: |
|
||||||
|
hl-smi
|
||||||
|
echo "HABANA_VISIBLE_DEVICES=${HABANA_VISIBLE_DEVICES}"
|
||||||
|
echo "HABANA_VISIBLE_MODULES=${HABANA_VISIBLE_MODULES}"
|
||||||
|
|
||||||
|
- name: Checkout to Accelerate
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Install Accelerate with Transformers & DeepSpeed
|
||||||
|
run: |
|
||||||
|
pip install -e .[testing] \
|
||||||
|
git+https://github.com/HabanaAI/DeepSpeed.git@1.20.0 \
|
||||||
|
git+https://github.com/huggingface/transformers.git
|
||||||
|
|
||||||
|
- name: Run CLI tests
|
||||||
|
if: ${{ !cancelled() && (success() || failure()) }}
|
||||||
|
run: |
|
||||||
|
make test_cli
|
||||||
|
|
||||||
|
- name: Run Core tests
|
||||||
|
if: ${{ !cancelled() && (success() || failure()) }}
|
||||||
|
run: |
|
||||||
|
make test_core
|
||||||
|
|
||||||
|
- name: Run Big Modeling tests
|
||||||
|
if: ${{ !cancelled() && (success() || failure()) }}
|
||||||
|
run: |
|
||||||
|
make test_big_modeling
|
||||||
|
|
||||||
|
- name: Run DeepSpeed integration tests
|
||||||
|
if: ${{ !cancelled() && (success() || failure()) }}
|
||||||
|
run: |
|
||||||
|
make test_deepspeed
|
||||||
|
|
||||||
|
- name: Run FSDP integration tests
|
||||||
|
if: ${{ !cancelled() && (success() || failure()) }}
|
||||||
|
run: |
|
||||||
|
make test_fsdp
|
||||||
|
|
||||||
|
- name: Run TP integration tests
|
||||||
|
if: ${{ !cancelled() && (success() || failure()) }}
|
||||||
|
run: |
|
||||||
|
make test_tp
|
||||||
|
|
||||||
|
- name: Run Examples tests
|
||||||
|
if: ${{ !cancelled() && (success() || failure()) }}
|
||||||
|
run: |
|
||||||
|
make test_examples
|
||||||
58
.github/workflows/integration_tests.yml
vendored
Normal file
58
.github/workflows/integration_tests.yml
vendored
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
# CI for specifically ensuring integrations work fine (`transformers` mainly)
|
||||||
|
# Useful tips:
|
||||||
|
# - New integrations to test should have its own job, and follow a strategy method where we check both
|
||||||
|
# the pypi and github versions.
|
||||||
|
# - When checking the latest release of the integration, use
|
||||||
|
# git checkout $(git describe --tags `git rev-list --tags --max-count=1`) to get the latest release.
|
||||||
|
|
||||||
|
name: Integration Tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- "src/**"
|
||||||
|
- "tests/**"
|
||||||
|
- ".github/**"
|
||||||
|
- "examples/**"
|
||||||
|
- "setup.py"
|
||||||
|
types: [opened, synchronize, reopened]
|
||||||
|
|
||||||
|
env:
|
||||||
|
HF_HOME: ~/hf_cache
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
run-trainer-tests:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Set up python 3.9
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: 3.9
|
||||||
|
cache: 'pip'
|
||||||
|
cache-dependency-path: 'setup.py'
|
||||||
|
|
||||||
|
- name: Install Accelerate from source
|
||||||
|
run: |
|
||||||
|
pip install --upgrade pip
|
||||||
|
pip install -e .
|
||||||
|
|
||||||
|
- name: Clone and install transformers
|
||||||
|
run: |
|
||||||
|
cd ..
|
||||||
|
git clone https://github.com/huggingface/transformers
|
||||||
|
cd transformers
|
||||||
|
pip install .[torch,testing]
|
||||||
|
|
||||||
|
- name: Show installed libraries
|
||||||
|
run: |
|
||||||
|
pip freeze
|
||||||
|
|
||||||
|
- name: Run Trainer tests
|
||||||
|
env:
|
||||||
|
WANDB_DISABLED: true
|
||||||
|
run: |
|
||||||
|
cd ../transformers
|
||||||
|
pytest -sv tests/trainer
|
||||||
187
.github/workflows/nightly.yml
vendored
187
.github/workflows/nightly.yml
vendored
@ -8,81 +8,226 @@ on:
|
|||||||
env:
|
env:
|
||||||
RUN_SLOW: "yes"
|
RUN_SLOW: "yes"
|
||||||
IS_GITHUB_CI: "1"
|
IS_GITHUB_CI: "1"
|
||||||
|
SLACK_API_TOKEN: ${{ secrets.SLACK_API_TOKEN }}
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
run_all_tests_single_gpu:
|
run_core_tests_single_gpu:
|
||||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
runs-on:
|
||||||
|
group: aws-g6-4xlarge-plus
|
||||||
env:
|
env:
|
||||||
CUDA_VISIBLE_DEVICES: "0"
|
CUDA_VISIBLE_DEVICES: "0"
|
||||||
|
TEST_TYPE: "single_gpu"
|
||||||
container:
|
container:
|
||||||
image: huggingface/accelerate-gpu:latest
|
image: huggingface/accelerate:gpu-nightly
|
||||||
options: --gpus all --shm-size "16gb"
|
options: --gpus all --shm-size "16gb"
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
working-directory: accelerate/
|
|
||||||
shell: bash
|
shell: bash
|
||||||
steps:
|
steps:
|
||||||
- name: Update clone & pip install
|
- name: Update clone & pip install
|
||||||
run: |
|
run: |
|
||||||
source activate accelerate
|
source activate accelerate
|
||||||
git config --global --add safe.directory '*'
|
git clone https://github.com/huggingface/accelerate;
|
||||||
git fetch && git checkout ${{ github.sha }}
|
cd accelerate;
|
||||||
|
git checkout ${{ github.sha }};
|
||||||
pip install -e . --no-deps
|
pip install -e . --no-deps
|
||||||
pip install pytest-reportlog
|
pip install pytest-reportlog tabulate
|
||||||
|
|
||||||
|
- name: Show installed libraries
|
||||||
|
run: |
|
||||||
|
source activate accelerate;
|
||||||
|
pip freeze
|
||||||
|
|
||||||
- name: Run test on GPUs
|
- name: Run test on GPUs
|
||||||
|
working-directory: accelerate
|
||||||
run: |
|
run: |
|
||||||
source activate accelerate
|
source activate accelerate
|
||||||
make test
|
make test
|
||||||
|
|
||||||
- name: Run examples on GPUs
|
- name: Run examples on GPUs
|
||||||
|
working-directory: accelerate
|
||||||
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
source activate accelerate
|
source activate accelerate
|
||||||
pip uninstall comet_ml -y
|
pip uninstall comet_ml -y
|
||||||
make test_examples
|
make test_examples
|
||||||
|
|
||||||
- name: Generate Report
|
- name: Generate Report
|
||||||
|
working-directory: accelerate
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
|
pip install slack_sdk tabulate
|
||||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
run_all_tests_multi_gpu:
|
run_deepspeed_tests_single_gpu:
|
||||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
runs-on:
|
||||||
|
group: aws-g6-4xlarge-plus
|
||||||
env:
|
env:
|
||||||
CUDA_VISIBLE_DEVICES: "0,1"
|
CUDA_VISIBLE_DEVICES: "0"
|
||||||
|
TEST_TYPE: "single_gpu_deepspeed"
|
||||||
container:
|
container:
|
||||||
image: huggingface/accelerate-gpu:latest
|
image: huggingface/accelerate:gpu-deepspeed-nightly
|
||||||
options: --gpus all --shm-size "16gb"
|
options: --gpus all --shm-size "16gb"
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
working-directory: accelerate/
|
|
||||||
shell: bash
|
shell: bash
|
||||||
steps:
|
steps:
|
||||||
- name: Update clone
|
- name: Update clone & pip install
|
||||||
run: |
|
run: |
|
||||||
source activate accelerate
|
source activate accelerate
|
||||||
git config --global --add safe.directory '*'
|
git clone https://github.com/huggingface/accelerate;
|
||||||
git fetch && git checkout ${{ github.sha }}
|
cd accelerate;
|
||||||
|
git checkout ${{ github.sha }};
|
||||||
pip install -e . --no-deps
|
pip install -e . --no-deps
|
||||||
pip install pytest-reportlog
|
pip install pytest-reportlog tabulate
|
||||||
|
|
||||||
- name: Run core and big modeling tests on GPUs
|
- name: Show installed libraries
|
||||||
|
run: |
|
||||||
|
source activate accelerate;
|
||||||
|
pip freeze
|
||||||
|
|
||||||
|
- name: Run test on GPUs
|
||||||
|
working-directory: accelerate
|
||||||
run: |
|
run: |
|
||||||
source activate accelerate
|
source activate accelerate
|
||||||
make test_big_modeling
|
make test_deepspeed
|
||||||
make test_core
|
|
||||||
|
|
||||||
- name: Run Integration tests on GPUs
|
- name: Run Integration tests on GPUs
|
||||||
|
working-directory: accelerate
|
||||||
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
source activate accelerate
|
source activate accelerate
|
||||||
make test_integrations
|
make test_integrations
|
||||||
|
|
||||||
- name: Run examples on GPUs
|
- name: Run examples on GPUs
|
||||||
|
working-directory: accelerate
|
||||||
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
source activate accelerate
|
source activate accelerate
|
||||||
pip uninstall comet_ml -y
|
pip uninstall comet_ml -y
|
||||||
make test_examples
|
make test_examples
|
||||||
|
|
||||||
- name: Generate Report
|
- name: Generate Report
|
||||||
|
working-directory: accelerate
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
pip install slack_sdk tabulate
|
||||||
|
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
|
run_core_tests_multi_gpu:
|
||||||
|
runs-on:
|
||||||
|
group: aws-g6-12xlarge-plus
|
||||||
|
env:
|
||||||
|
CUDA_VISIBLE_DEVICES: "0,1"
|
||||||
|
TEST_TYPE: "multi_gpu"
|
||||||
|
container:
|
||||||
|
image: huggingface/accelerate:gpu-nightly
|
||||||
|
options: --gpus all --shm-size "16gb"
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: bash
|
||||||
|
steps:
|
||||||
|
- name: Update clone
|
||||||
|
run: |
|
||||||
|
source activate accelerate
|
||||||
|
git clone https://github.com/huggingface/accelerate;
|
||||||
|
cd accelerate;
|
||||||
|
git checkout ${{ github.sha }};
|
||||||
|
pip install -e . --no-deps
|
||||||
|
pip install pytest-reportlog tabulate
|
||||||
|
|
||||||
|
- name: Show installed libraries
|
||||||
|
run: |
|
||||||
|
source activate accelerate;
|
||||||
|
pip freeze
|
||||||
|
|
||||||
|
- name: Run core and big modeling tests on GPUs
|
||||||
|
working-directory: accelerate
|
||||||
|
run: |
|
||||||
|
source activate accelerate
|
||||||
|
make test_core
|
||||||
|
make test_big_modeling
|
||||||
|
make test_cli
|
||||||
|
|
||||||
|
- name: Run Integration tests on GPUs
|
||||||
|
working-directory: accelerate
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
source activate accelerate
|
||||||
|
make test_integrations
|
||||||
|
|
||||||
|
- name: Run examples on GPUs
|
||||||
|
working-directory: accelerate
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
source activate accelerate
|
||||||
|
pip uninstall comet_ml -y
|
||||||
|
make test_examples
|
||||||
|
|
||||||
|
- name: Generate Report
|
||||||
|
working-directory: accelerate
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
pip install slack_sdk tabulate
|
||||||
|
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
|
run_deepspeed_tests_multi_gpu:
|
||||||
|
runs-on:
|
||||||
|
group: aws-g6-12xlarge-plus
|
||||||
|
env:
|
||||||
|
CUDA_VISIBLE_DEVICES: "0,1"
|
||||||
|
TEST_TYPE: "multi_gpu_deepspeed"
|
||||||
|
container:
|
||||||
|
image: huggingface/accelerate:gpu-deepspeed-nightly
|
||||||
|
options: --gpus all --shm-size "16gb"
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: bash
|
||||||
|
steps:
|
||||||
|
- name: Update clone
|
||||||
|
run: |
|
||||||
|
source activate accelerate
|
||||||
|
git clone https://github.com/huggingface/accelerate;
|
||||||
|
cd accelerate;
|
||||||
|
git checkout ${{ github.sha }};
|
||||||
|
pip install -e . --no-deps
|
||||||
|
pip install pytest-reportlog tabulate
|
||||||
|
|
||||||
|
- name: Show installed libraries
|
||||||
|
run: |
|
||||||
|
source activate accelerate;
|
||||||
|
pip freeze
|
||||||
|
|
||||||
|
- name: Run DeepSpeed tests
|
||||||
|
working-directory: accelerate
|
||||||
|
run: |
|
||||||
|
source activate accelerate
|
||||||
|
make test_deepspeed
|
||||||
|
|
||||||
|
- name: Run Integration tests on GPUs
|
||||||
|
working-directory: accelerate
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
source activate accelerate
|
||||||
|
make test_integrations
|
||||||
|
|
||||||
|
- name: Run examples on GPUs
|
||||||
|
working-directory: accelerate
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
source activate accelerate
|
||||||
|
pip uninstall comet_ml -y
|
||||||
|
make test_examples
|
||||||
|
|
||||||
|
- name: Generate Report
|
||||||
|
working-directory: accelerate
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
pip install slack_sdk tabulate
|
||||||
|
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
|
|
||||||
|
run-integration-tests:
|
||||||
|
if: always()
|
||||||
|
uses: ./.github/workflows/self_hosted_integration_tests.yml
|
||||||
|
|||||||
19
.github/workflows/pr_style_bot.yml
vendored
Normal file
19
.github/workflows/pr_style_bot.yml
vendored
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
# To run this bot, comment "@bot /style" on a PR
|
||||||
|
name: Style Bot
|
||||||
|
|
||||||
|
on:
|
||||||
|
issue_comment:
|
||||||
|
types: [created]
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
pull-requests: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
style:
|
||||||
|
uses: huggingface/huggingface_hub/.github/workflows/style-bot-action.yml@main
|
||||||
|
with:
|
||||||
|
python_quality_dependencies: "[quality]"
|
||||||
|
style_command_type: "default"
|
||||||
|
secrets:
|
||||||
|
bot_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
17
.github/workflows/quality.yml
vendored
17
.github/workflows/quality.yml
vendored
@ -6,12 +6,19 @@ jobs:
|
|||||||
quality:
|
quality:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v4
|
||||||
- name: Set up Python 3.7
|
- name: Set up Python 3.9
|
||||||
uses: actions/setup-python@v3
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: 3.7
|
python-version: 3.9
|
||||||
|
cache: 'pip'
|
||||||
|
cache-dependency-path: 'setup.py'
|
||||||
- name: Install Python dependencies
|
- name: Install Python dependencies
|
||||||
run: pip install -e .[quality]
|
run: pip install -e .[quality]
|
||||||
- name: Run Quality check
|
- name: Run Quality check
|
||||||
run: make quality
|
run: make quality
|
||||||
|
- name: Check if failure
|
||||||
|
if: ${{ failure() }}
|
||||||
|
run: |
|
||||||
|
echo "Quality check failed. Please ensure the right dependency versions are installed with 'pip install -e .[quality]' and rerun 'make style; make quality;'" >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
|
|||||||
161
.github/workflows/run_merge_tests.yml
vendored
161
.github/workflows/run_merge_tests.yml
vendored
@ -9,71 +9,180 @@ env:
|
|||||||
IS_GITHUB_CI: "1"
|
IS_GITHUB_CI: "1"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
run_all_tests_single_gpu:
|
run_core_tests_single_gpu:
|
||||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
runs-on:
|
||||||
|
group: aws-g6-4xlarge-plus
|
||||||
env:
|
env:
|
||||||
CUDA_VISIBLE_DEVICES: "0"
|
CUDA_VISIBLE_DEVICES: "0"
|
||||||
container:
|
container:
|
||||||
image: huggingface/accelerate-gpu:latest
|
image: huggingface/accelerate:gpu-nightly
|
||||||
options: --gpus all --shm-size "16gb"
|
options: --gpus all --shm-size "16gb"
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
working-directory: accelerate/
|
|
||||||
shell: bash
|
shell: bash
|
||||||
steps:
|
steps:
|
||||||
- name: Update clone & pip install
|
- name: Install accelerate
|
||||||
run: |
|
run: |
|
||||||
source activate accelerate
|
source activate accelerate;
|
||||||
git config --global --add safe.directory '*'
|
git clone https://github.com/huggingface/accelerate;
|
||||||
git fetch && git checkout ${{ github.sha }}
|
cd accelerate;
|
||||||
pip install -e .[testing,test_trackers]
|
git checkout ${{ github.sha }};
|
||||||
pip install pytest-reportlog
|
pip install -e .[testing,test_trackers] -U;
|
||||||
|
pip install pytest-reportlog tabulate ;
|
||||||
|
|
||||||
|
- name: Show installed libraries
|
||||||
|
run: |
|
||||||
|
source activate accelerate;
|
||||||
|
pip freeze
|
||||||
|
|
||||||
|
- name: Run CLI tests (use make cli)
|
||||||
|
working-directory: accelerate
|
||||||
|
run: |
|
||||||
|
source activate accelerate;
|
||||||
|
make test_cli
|
||||||
|
|
||||||
- name: Run test on GPUs
|
- name: Run test on GPUs
|
||||||
|
working-directory: accelerate
|
||||||
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
source activate accelerate
|
source activate accelerate;
|
||||||
make test
|
make test
|
||||||
- name: Run examples on GPUs
|
- name: Run examples on GPUs
|
||||||
|
working-directory: accelerate
|
||||||
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
source activate accelerate
|
source activate accelerate;
|
||||||
pip uninstall comet_ml -y
|
pip uninstall comet_ml -y;
|
||||||
make test_examples
|
make test_examples
|
||||||
|
|
||||||
- name: Generate Report
|
- name: Generate Report
|
||||||
|
working-directory: accelerate
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
|
pip install tabulate;
|
||||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
run_all_tests_multi_gpu:
|
run_deepspeed_tests_single_gpu:
|
||||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
runs-on:
|
||||||
|
group: aws-g6-4xlarge-plus
|
||||||
|
env:
|
||||||
|
CUDA_VISIBLE_DEVICES: "0"
|
||||||
container:
|
container:
|
||||||
image: huggingface/accelerate-gpu:latest
|
image: huggingface/accelerate:gpu-deepspeed-nightly
|
||||||
|
options: --gpus all --shm-size "16gb"
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: bash
|
||||||
|
steps:
|
||||||
|
- name: Install accelerate
|
||||||
|
run: |
|
||||||
|
source activate accelerate;
|
||||||
|
git clone https://github.com/huggingface/accelerate;
|
||||||
|
cd accelerate;
|
||||||
|
git checkout ${{ github.sha }};
|
||||||
|
pip install -e .[testing,test_trackers] -U;
|
||||||
|
pip install pytest-reportlog tabulate ;
|
||||||
|
|
||||||
|
- name: Show installed libraries
|
||||||
|
run: |
|
||||||
|
source activate accelerate;
|
||||||
|
pip freeze
|
||||||
|
|
||||||
|
- name: Run test on GPUs
|
||||||
|
working-directory: accelerate
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
source activate accelerate;
|
||||||
|
make test_deepspeed
|
||||||
|
|
||||||
|
- name: Generate Report
|
||||||
|
working-directory: accelerate
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
pip install tabulate;
|
||||||
|
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
|
run_core_tests_multi_gpu:
|
||||||
|
runs-on:
|
||||||
|
group: aws-g6-12xlarge-plus
|
||||||
|
env:
|
||||||
|
CUDA_VISIBLE_DEVICES: 0,1
|
||||||
|
container:
|
||||||
|
image: huggingface/accelerate:gpu-nightly
|
||||||
options: --gpus all --shm-size "16gb"
|
options: --gpus all --shm-size "16gb"
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
working-directory: accelerate/
|
|
||||||
shell: bash
|
shell: bash
|
||||||
steps:
|
steps:
|
||||||
- name: Update clone
|
- name: Update clone
|
||||||
run: |
|
run: |
|
||||||
source activate accelerate
|
source activate accelerate;
|
||||||
git config --global --add safe.directory '*'
|
git clone https://github.com/huggingface/accelerate;
|
||||||
git fetch && git checkout ${{ github.sha }}
|
cd accelerate;
|
||||||
pip install -e .[testing,test_trackers]
|
git checkout ${{ github.sha }};
|
||||||
pip install pytest-reportlog
|
pip install -e .[testing,test_trackers] -U;
|
||||||
|
pip install pytest-reportlog tabulate
|
||||||
|
|
||||||
|
- name: Show installed libraries
|
||||||
|
run: |
|
||||||
|
source activate accelerate;
|
||||||
|
pip freeze
|
||||||
|
|
||||||
- name: Run test on GPUs
|
- name: Run test on GPUs
|
||||||
|
working-directory: accelerate
|
||||||
run: |
|
run: |
|
||||||
source activate accelerate
|
source activate accelerate;
|
||||||
make test
|
make test
|
||||||
|
|
||||||
- name: Run examples on GPUs
|
- name: Run examples on GPUs
|
||||||
|
working-directory: accelerate
|
||||||
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
source activate accelerate
|
source activate accelerate;
|
||||||
pip uninstall comet_ml -y
|
pip uninstall comet_ml -y;
|
||||||
make test_examples
|
make test_examples
|
||||||
|
|
||||||
- name: Generate Report
|
- name: Generate Report
|
||||||
|
working-directory: accelerate
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
source activate accelerate;
|
||||||
|
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
|
run_deepspeed_tests_multi_gpu:
|
||||||
|
runs-on:
|
||||||
|
group: aws-g6-12xlarge-plus
|
||||||
|
container:
|
||||||
|
image: huggingface/accelerate:gpu-deepspeed-nightly
|
||||||
|
options: --gpus all --shm-size "16gb"
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: bash
|
||||||
|
steps:
|
||||||
|
- name: Install accelerate
|
||||||
|
run: |
|
||||||
|
source activate accelerate;
|
||||||
|
git clone https://github.com/huggingface/accelerate;
|
||||||
|
cd accelerate;
|
||||||
|
git checkout ${{ github.sha }};
|
||||||
|
pip install -e .[testing,test_trackers] -U;
|
||||||
|
pip install pytest-reportlog tabulate ;
|
||||||
|
|
||||||
|
- name: Show installed libraries
|
||||||
|
run: |
|
||||||
|
source activate accelerate;
|
||||||
|
pip freeze
|
||||||
|
|
||||||
|
- name: Run test on GPUs
|
||||||
|
working-directory: accelerate
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
source activate accelerate;
|
||||||
|
make test_deepspeed
|
||||||
|
|
||||||
|
- name: Generate Report
|
||||||
|
working-directory: accelerate
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
pip install tabulate;
|
||||||
|
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||||
|
|||||||
127
.github/workflows/self_hosted_integration_tests.yml
vendored
Normal file
127
.github/workflows/self_hosted_integration_tests.yml
vendored
Normal file
@ -0,0 +1,127 @@
|
|||||||
|
# CI for specifically ensuring integrations work fine (`transformers` mainly) on GPUs
|
||||||
|
# Useful tips:
|
||||||
|
# - `working-directory` should be set to the root of the repo, which is cloned on the actual CI runner.
|
||||||
|
# It follows the directory structure of `actions-runner/_work/{repo_name}/{repo_name}/{cloned_repo} on
|
||||||
|
# prem, but in Actions setting `working-directory` looks just in the `{repo_name}` level.
|
||||||
|
# - New integrations to test should have its own job, and follow a strategy method where we check both
|
||||||
|
# the pypi and github versions.
|
||||||
|
# - Workflow call lets this be called from `build_and_run_tests.yml`
|
||||||
|
# - When using a docker container, it's recommended to set `--shm-size`, we use 16gb.
|
||||||
|
name: Integration Tests (push to "main")
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
env:
|
||||||
|
HF_HOME: ~/hf_cache
|
||||||
|
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: bash
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
run-trainer-tests:
|
||||||
|
container:
|
||||||
|
image: huggingface/accelerate:gpu-deepspeed-nightly
|
||||||
|
options: --gpus all --shm-size "16gb"
|
||||||
|
runs-on:
|
||||||
|
group: aws-g6-12xlarge-plus
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
cuda_visible_devices: [
|
||||||
|
"0",
|
||||||
|
"0,1"
|
||||||
|
]
|
||||||
|
steps:
|
||||||
|
- name: Install transformers
|
||||||
|
run: |
|
||||||
|
source activate accelerate;
|
||||||
|
git clone https://github.com/huggingface/transformers --depth 1;
|
||||||
|
cd transformers;
|
||||||
|
pip install .[torch,deepspeed-testing];
|
||||||
|
cd ..;
|
||||||
|
|
||||||
|
- name: Install accelerate
|
||||||
|
run: |
|
||||||
|
source activate accelerate;
|
||||||
|
git clone https://github.com/huggingface/accelerate;
|
||||||
|
cd accelerate;
|
||||||
|
git checkout ${{ github.sha }} ;
|
||||||
|
pip install -e .[testing];
|
||||||
|
pip uninstall comet_ml wandb dvclive -y
|
||||||
|
cd ..;
|
||||||
|
|
||||||
|
- name: Show installed libraries
|
||||||
|
run: |
|
||||||
|
source activate accelerate;
|
||||||
|
pip freeze
|
||||||
|
|
||||||
|
- name: Run trainer tests
|
||||||
|
working-directory: transformers/
|
||||||
|
env:
|
||||||
|
CUDA_VISIBLE_DEVICES: ${{ matrix.cuda_visible_devices }}
|
||||||
|
WANDB_DISABLED: true
|
||||||
|
run: |
|
||||||
|
source activate accelerate;
|
||||||
|
pytest -sv tests/trainer
|
||||||
|
|
||||||
|
- name: Run deepspeed tests
|
||||||
|
working-directory: transformers/
|
||||||
|
env:
|
||||||
|
CUDA_VISIBLE_DEVICES: ${{ matrix.cuda_visible_devices }}
|
||||||
|
WANDB_DISABLED: true
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
source activate accelerate;
|
||||||
|
pytest -sv tests/deepspeed
|
||||||
|
|
||||||
|
- name: Run transformers examples tests
|
||||||
|
working-directory: transformers/
|
||||||
|
env:
|
||||||
|
CUDA_VISIBLE_DEVICES: ${{ matrix.cuda_visible_devices }}
|
||||||
|
WANDB_DISABLED: true
|
||||||
|
run: |
|
||||||
|
source activate accelerate
|
||||||
|
pip install -r examples/pytorch/_tests_requirements.txt
|
||||||
|
pytest -sv examples/pytorch/test_accelerate_examples.py examples/pytorch/test_pytorch_examples.py
|
||||||
|
|
||||||
|
run-skorch-tests:
|
||||||
|
container:
|
||||||
|
image: huggingface/accelerate:gpu-nightly
|
||||||
|
options: --gpus all --shm-size "16gb"
|
||||||
|
runs-on:
|
||||||
|
group: aws-g6-12xlarge-plus
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
steps:
|
||||||
|
- name: Install accelerate
|
||||||
|
run:
|
||||||
|
source activate accelerate;
|
||||||
|
git clone https://github.com/huggingface/accelerate;
|
||||||
|
cd accelerate;
|
||||||
|
git checkout ${{ github.sha }};
|
||||||
|
pip install -e .[testing];
|
||||||
|
cd ..
|
||||||
|
|
||||||
|
- name: Install skorch
|
||||||
|
run: |
|
||||||
|
source activate accelerate
|
||||||
|
git clone https://github.com/skorch-dev/skorch;
|
||||||
|
cd skorch;
|
||||||
|
git config --global --add safe.directory '*'
|
||||||
|
git checkout master && git pull
|
||||||
|
pip install .[test]
|
||||||
|
pip install flaky
|
||||||
|
|
||||||
|
- name: Show installed libraries
|
||||||
|
run: |
|
||||||
|
source activate accelerate;
|
||||||
|
pip freeze
|
||||||
|
|
||||||
|
- name: Run skorch tests
|
||||||
|
working-directory: skorch/
|
||||||
|
run: |
|
||||||
|
source activate accelerate;
|
||||||
|
pytest -sv -k TestAccelerate
|
||||||
13
.github/workflows/stale.yml
vendored
13
.github/workflows/stale.yml
vendored
@ -10,19 +10,24 @@ jobs:
|
|||||||
name: Close Stale Issues
|
name: Close Stale Issues
|
||||||
if: github.repository == 'huggingface/accelerate'
|
if: github.repository == 'huggingface/accelerate'
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
issues: write
|
||||||
|
pull-requests: write
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Setup Python
|
- name: Setup Python
|
||||||
uses: actions/setup-python@v1
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: 3.7
|
python-version: 3.9
|
||||||
|
cache: 'pip'
|
||||||
|
cache-dependency-path: 'setup.py'
|
||||||
|
|
||||||
- name: Install requirements
|
- name: Install requirements
|
||||||
run: |
|
run: |
|
||||||
pip install PyGithub
|
pip install PyGithub
|
||||||
- name: Close stale issues
|
- name: Close stale issues
|
||||||
run: |
|
run: |
|
||||||
python utils/stale.py
|
python utils/stale.py
|
||||||
|
|||||||
34
.github/workflows/test.yml
vendored
34
.github/workflows/test.yml
vendored
@ -23,11 +23,12 @@ jobs:
|
|||||||
matrix:
|
matrix:
|
||||||
pytorch-version: [
|
pytorch-version: [
|
||||||
latest,
|
latest,
|
||||||
minimum
|
minimum,
|
||||||
]
|
]
|
||||||
test-kind: [
|
test-kind: [
|
||||||
test_prod,
|
test_prod,
|
||||||
test_core,
|
test_core,
|
||||||
|
test_cli,
|
||||||
test_big_modeling,
|
test_big_modeling,
|
||||||
test_deepspeed,
|
test_deepspeed,
|
||||||
test_fsdp,
|
test_fsdp,
|
||||||
@ -37,34 +38,33 @@ jobs:
|
|||||||
test_rest
|
test_rest
|
||||||
]
|
]
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- name: Set up python 3.7
|
- name: Set up python 3.9
|
||||||
uses: actions/setup-python@v3
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: 3.7
|
python-version: 3.9
|
||||||
|
cache: 'pip'
|
||||||
- name: Activate python cache
|
cache-dependency-path: 'setup.py'
|
||||||
uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
${{ env.pythonLocation }}
|
|
||||||
${{ env.HF_HOME }}
|
|
||||||
key: ${{ env.pythonLocation }}-${{ matrix.test-kind }}-${{ hashFiles('setup.py') }}
|
|
||||||
|
|
||||||
- name: Install the library
|
- name: Install the library
|
||||||
run: |
|
run: |
|
||||||
pip install --upgrade pip
|
|
||||||
if [[ ${{ matrix.test-kind }} = test_prod ]]; then pip install -e .[test_prod]; fi
|
if [[ ${{ matrix.test-kind }} = test_prod ]]; then pip install -e .[test_prod]; fi
|
||||||
if [[ ${{ matrix.test-kind }} != test_prod ]]; then pip install -e .[testing,test_trackers]; fi
|
if [[ ${{ matrix.test-kind }} != test_prod ]]; then pip install -e .[testing,test_trackers]; fi
|
||||||
if [[ ${{ matrix.test-kind }} = test_rest ]]; then pip uninstall comet_ml -y; fi
|
if [[ ${{ matrix.test-kind }} = test_rest ]]; then pip uninstall comet_ml -y; fi
|
||||||
if [[ ${{ matrix.pytorch-version }} = minimum ]]; then pip install torch==1.6.0; fi
|
if [[ ${{ matrix.pytorch-version }} = minimum ]]; then pip install torchvision==0.18.1 torch==2.3.1; fi
|
||||||
pip install pytest-reportlog
|
pip install pytest-reportlog tabulate setuptools importlib_metadata
|
||||||
|
|
||||||
|
- name: Show installed libraries
|
||||||
|
run: |
|
||||||
|
pip freeze
|
||||||
|
|
||||||
- name: Run Tests
|
- name: Run Tests
|
||||||
|
env:
|
||||||
|
PYTORCH_VERSION: ${{ matrix.pytorch-version }}
|
||||||
run: |
|
run: |
|
||||||
make ${{ matrix.test-kind }}
|
make ${{ matrix.test-kind }}
|
||||||
|
|
||||||
- name: Generate Report
|
- name: Generate Report
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||||
|
|||||||
55
.github/workflows/test_imports.yml
vendored
Normal file
55
.github/workflows/test_imports.yml
vendored
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
name: Run Import Tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- "src/**"
|
||||||
|
- "tests/**"
|
||||||
|
- ".github/**"
|
||||||
|
- "examples/**"
|
||||||
|
- "setup.py"
|
||||||
|
types: [opened, synchronize, reopened]
|
||||||
|
|
||||||
|
env:
|
||||||
|
HF_HOME: ~/hf_cache
|
||||||
|
TESTING_MOCKED_DATALOADERS: "1"
|
||||||
|
IS_GITHUB_CI: "1"
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
run-tests:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
pytorch-version: [
|
||||||
|
latest,
|
||||||
|
minimum,
|
||||||
|
]
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Set up python 3.9
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: 3.9
|
||||||
|
cache: 'pip'
|
||||||
|
cache-dependency-path: 'setup.py'
|
||||||
|
|
||||||
|
- name: Install the library
|
||||||
|
run: |
|
||||||
|
pip install -e .
|
||||||
|
pip install pytest-reportlog tabulate setuptools git+https://github.com/muellerzr/import-timer
|
||||||
|
|
||||||
|
- name: Show installed libraries
|
||||||
|
run: |
|
||||||
|
pip freeze
|
||||||
|
|
||||||
|
- name: Run Import Tests
|
||||||
|
env:
|
||||||
|
PYTORCH_VERSION: ${{ matrix.pytorch-version }}
|
||||||
|
run: |
|
||||||
|
pytest -sv tests/test_imports.py
|
||||||
|
|
||||||
|
- name: Generate Report
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||||
15
.github/workflows/trufflehog.yml
vendored
Normal file
15
.github/workflows/trufflehog.yml
vendored
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
on:
|
||||||
|
push:
|
||||||
|
|
||||||
|
name: Secret Leaks
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
trufflehog:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: Secret Scanning
|
||||||
|
uses: trufflesecurity/trufflehog@main
|
||||||
16
.github/workflows/upload_pr_documentation.yml
vendored
Normal file
16
.github/workflows/upload_pr_documentation.yml
vendored
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
name: Upload PR Documentation
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_run:
|
||||||
|
workflows: ["Build PR Documentation"]
|
||||||
|
types:
|
||||||
|
- completed
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
uses: huggingface/doc-builder/.github/workflows/upload_pr_documentation.yml@main
|
||||||
|
with:
|
||||||
|
package_name: accelerate
|
||||||
|
secrets:
|
||||||
|
hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}
|
||||||
|
comment_bot_token: ${{ secrets.COMMENT_BOT_TOKEN }}
|
||||||
5
.gitignore
vendored
5
.gitignore
vendored
@ -138,4 +138,7 @@ dmypy.json
|
|||||||
.DS_Store
|
.DS_Store
|
||||||
|
|
||||||
# More test things
|
# More test things
|
||||||
wandb
|
wandb
|
||||||
|
|
||||||
|
# ruff
|
||||||
|
.ruff_cache
|
||||||
|
|||||||
13
.pre-commit-config.yaml
Normal file
13
.pre-commit-config.yaml
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
repos:
|
||||||
|
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||||
|
rev: v0.2.1
|
||||||
|
hooks:
|
||||||
|
- id: ruff
|
||||||
|
args:
|
||||||
|
- --fix
|
||||||
|
- id: ruff-format
|
||||||
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
|
rev: v4.5.0
|
||||||
|
hooks:
|
||||||
|
- id: check-merge-conflict
|
||||||
|
- id: check-yaml
|
||||||
@ -123,12 +123,18 @@ Follow these steps to start contributing:
|
|||||||
4. Set up a development environment by running the following command in a conda or a virtual environment you've created for working on this library:
|
4. Set up a development environment by running the following command in a conda or a virtual environment you've created for working on this library:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ pip install -e ".[quality]"
|
$ pip install -e ".[dev]"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
This will install all testing and linting/code quality dependencies for the library (see `quality`, `test_dev`,
|
||||||
|
`test_prod` targets in [`setup.py`](./setup.py)).
|
||||||
|
|
||||||
(If accelerate was already installed in the virtual environment, remove
|
(If accelerate was already installed in the virtual environment, remove
|
||||||
it with `pip uninstall accelerate` before reinstalling it in editable
|
it with `pip uninstall accelerate` before reinstalling it in editable
|
||||||
mode with the `-e` flag.)
|
mode with the `-e` flag).
|
||||||
|
|
||||||
|
Alternatively, if you are using [Visual Studio Code](https://code.visualstudio.com/Download), the fastest way to get set up is by using
|
||||||
|
the provided Dev Container. Documentation on how to get started with dev containers is available [here](https://code.visualstudio.com/docs/remote/containers).
|
||||||
|
|
||||||
5. Develop the features on your branch.
|
5. Develop the features on your branch.
|
||||||
|
|
||||||
@ -149,7 +155,7 @@ Follow these steps to start contributing:
|
|||||||
$ make test
|
$ make test
|
||||||
```
|
```
|
||||||
|
|
||||||
`accelerate` relies on `black` and `isort` to format its source code
|
`accelerate` relies on `ruff` to format its source code
|
||||||
consistently. After you make changes, apply automatic style corrections and code verifications
|
consistently. After you make changes, apply automatic style corrections and code verifications
|
||||||
that can't be automated in one go with:
|
that can't be automated in one go with:
|
||||||
|
|
||||||
@ -162,13 +168,21 @@ Follow these steps to start contributing:
|
|||||||
$ make style
|
$ make style
|
||||||
```
|
```
|
||||||
|
|
||||||
`accelerate` also uses `flake8` and a few custom scripts to check for coding mistakes. Quality
|
`accelerate` also uses a few custom scripts to check for coding mistakes. Quality
|
||||||
control runs in CI, however you can also run the same checks with:
|
control runs in CI, however you can also run the same checks with:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ make quality
|
$ make quality
|
||||||
```
|
```
|
||||||
|
|
||||||
|
You can also set up [`pre-commit`](https://pre-commit.com/) to run these checks
|
||||||
|
automatically as Git commit hooks.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ pip install pre-commit
|
||||||
|
$ pre-commit install
|
||||||
|
```
|
||||||
|
|
||||||
Once you're happy with your changes, add changed files using `git add` and
|
Once you're happy with your changes, add changed files using `git add` and
|
||||||
make a commit with `git commit` to record your changes locally:
|
make a commit with `git commit` to record your changes locally:
|
||||||
|
|
||||||
@ -232,4 +246,4 @@ $ python -m pytest -sv ./tests
|
|||||||
In fact, that's how `make test` is implemented (sans the `pip install` line)!
|
In fact, that's how `make test` is implemented (sans the `pip install` line)!
|
||||||
|
|
||||||
You can specify a smaller set of tests in order to test only the feature
|
You can specify a smaller set of tests in order to test only the feature
|
||||||
you're working on.
|
you're working on.
|
||||||
|
|||||||
87
Makefile
87
Makefile
@ -1,6 +1,6 @@
|
|||||||
.PHONY: quality style test docs
|
.PHONY: quality style test docs utils
|
||||||
|
|
||||||
check_dirs := tests src examples benchmarks
|
check_dirs := .
|
||||||
|
|
||||||
# Check that source code meets quality standards
|
# Check that source code meets quality standards
|
||||||
|
|
||||||
@ -8,57 +8,94 @@ extra_quality_checks:
|
|||||||
python utils/check_copies.py
|
python utils/check_copies.py
|
||||||
python utils/check_dummies.py
|
python utils/check_dummies.py
|
||||||
python utils/check_repo.py
|
python utils/check_repo.py
|
||||||
python utils/style_doc.py src/accelerate docs/source --max_len 119
|
doc-builder style src/accelerate docs/source --max_len 119
|
||||||
|
|
||||||
# this target runs checks on all files
|
# this target runs checks on all files
|
||||||
quality:
|
quality:
|
||||||
black --check $(check_dirs)
|
ruff check $(check_dirs)
|
||||||
isort --check-only $(check_dirs)
|
ruff format --check $(check_dirs)
|
||||||
flake8 $(check_dirs)
|
doc-builder style src/accelerate docs/source --max_len 119 --check_only
|
||||||
python utils/style_doc.py src/accelerate docs/source --max_len 119 --check_only
|
|
||||||
|
|
||||||
# Format source code automatically and check is there are any problems left that need manual fixing
|
# Format source code automatically and check is there are any problems left that need manual fixing
|
||||||
style:
|
style:
|
||||||
black $(check_dirs)
|
ruff check $(check_dirs) --fix
|
||||||
isort $(check_dirs)
|
ruff format $(check_dirs)
|
||||||
python utils/style_doc.py src/accelerate docs/source --max_len 119
|
doc-builder style src/accelerate docs/source --max_len 119
|
||||||
|
|
||||||
# Run tests for the library
|
# Run tests for the library
|
||||||
test:
|
test_core:
|
||||||
python -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py $(if $(IS_GITHUB_CI),--report-log 'all.log',)
|
python -m pytest -s -v ./tests/ \
|
||||||
|
--ignore=./tests/test_big_modeling.py \
|
||||||
|
--ignore=./tests/test_modeling_utils.py \
|
||||||
|
--ignore=./tests/test_examples.py \
|
||||||
|
--ignore=./tests/test_cli.py \
|
||||||
|
--ignore=./tests/deepspeed \
|
||||||
|
--ignore=./tests/fsdp \
|
||||||
|
--ignore=./tests/tp \
|
||||||
|
$(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_core.log",)
|
||||||
|
|
||||||
|
test_cli:
|
||||||
|
python -m pytest -s -v ./tests/test_cli.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_cli.log",)
|
||||||
|
|
||||||
test_big_modeling:
|
test_big_modeling:
|
||||||
python -m pytest -s -v ./tests/test_big_modeling.py $(if $(IS_GITHUB_CI),--report-log 'big_modeling.log',)
|
python -m pytest -s -v ./tests/test_big_modeling.py ./tests/test_modeling_utils.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_big_modeling.log",)
|
||||||
|
|
||||||
test_core:
|
|
||||||
python -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py --ignore=./tests/deepspeed --ignore=./tests/test_big_modeling.py \
|
|
||||||
--ignore=./tests/fsdp $(if $(IS_GITHUB_CI),--report-log 'core.log',)
|
|
||||||
|
|
||||||
test_deepspeed:
|
test_deepspeed:
|
||||||
python -m pytest -s -v ./tests/deepspeed $(if $(IS_GITHUB_CI),--report-log 'deepspeed.log',)
|
python -m pytest -s -v ./tests/deepspeed $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_deepspeed.log",)
|
||||||
|
|
||||||
test_fsdp:
|
test_fsdp:
|
||||||
python -m pytest -s -v ./tests/fsdp $(if $(IS_GITHUB_CI),--report-log 'fsdp.log',)
|
python -m pytest -s -v ./tests/fsdp $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_fsdp.log",)
|
||||||
|
|
||||||
|
test_tp:
|
||||||
|
python -m pytest -s -v ./tests/tp $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_tp.log",)
|
||||||
|
|
||||||
|
# Since the new version of pytest will *change* how things are collected, we need `deepspeed` to
|
||||||
|
# run after test_core and test_cli
|
||||||
|
test:
|
||||||
|
$(MAKE) test_core
|
||||||
|
$(MAKE) test_cli
|
||||||
|
$(MAKE) test_big_modeling
|
||||||
|
$(MAKE) test_deepspeed
|
||||||
|
$(MAKE) test_fsdp
|
||||||
|
$(MAKE) test_tp
|
||||||
|
|
||||||
test_examples:
|
test_examples:
|
||||||
python -m pytest -s -v ./tests/test_examples.py $(if $(IS_GITHUB_CI),--report-log 'examples.log',)
|
python -m pytest -s -v ./tests/test_examples.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_examples.log",)
|
||||||
|
|
||||||
# Broken down example tests for the CI runners
|
# Broken down example tests for the CI runners
|
||||||
test_integrations:
|
test_integrations:
|
||||||
python -m pytest -s -v ./tests/deepspeed ./tests/fsdp $(if $(IS_GITHUB_CI),--report-log 'integrations.log',)
|
python -m pytest -s -v ./tests/fsdp ./tests/tp ./tests/deepspeed $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_integrations.log",)
|
||||||
|
|
||||||
test_example_differences:
|
test_example_differences:
|
||||||
python -m pytest -s -v ./tests/test_examples.py::ExampleDifferenceTests $(if $(IS_GITHUB_CI),--report-log 'example_diff.log',)
|
python -m pytest -s -v ./tests/test_examples.py::ExampleDifferenceTests $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_example_diff.log",)
|
||||||
|
|
||||||
test_checkpoint_epoch:
|
test_checkpoint_epoch:
|
||||||
python -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k "by_epoch" $(if $(IS_GITHUB_CI),--report-log 'checkpoint_epoch.log',)
|
python -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k "by_epoch" $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_checkpoint_epoch.log",)
|
||||||
|
|
||||||
test_checkpoint_step:
|
test_checkpoint_step:
|
||||||
python -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k "by_step" $(if $(IS_GITHUB_CI),--report-log 'checkpoint_step.log',)
|
python -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k "by_step" $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_checkpoint_step.log",)
|
||||||
|
|
||||||
# Same as test but used to install only the base dependencies
|
# Same as test but used to install only the base dependencies
|
||||||
test_prod:
|
test_prod:
|
||||||
$(MAKE) test_core
|
$(MAKE) test_core
|
||||||
|
|
||||||
test_rest:
|
test_rest:
|
||||||
python -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k "not by_step and not by_epoch" $(if $(IS_GITHUB_CI),--report-log 'rest.log',)
|
python -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k "not by_step and not by_epoch" $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_rest.log",)
|
||||||
|
|
||||||
|
# For developers to prepare a release
|
||||||
|
prepare_release:
|
||||||
|
rm -rf dist build
|
||||||
|
python setup.py bdist_wheel sdist
|
||||||
|
|
||||||
|
# Make sure this is ran in a fresh venv of some form
|
||||||
|
install_test_release:
|
||||||
|
pip uninstall accelerate -y
|
||||||
|
pip install -i https://testpypi.python.org/pypi --extra-index-url https://pypi.org/simple accelerate$(if $(version),==$(version),)
|
||||||
|
|
||||||
|
# Run as `make target=testpypi upload_release`
|
||||||
|
upload_release:
|
||||||
|
@if [ "$(target)" != "testpypi" ] && [ "$(target)" != "pypi" ]; then \
|
||||||
|
echo "Error: target must be either 'testpypi' or 'pypi'"; \
|
||||||
|
exit 1; \
|
||||||
|
fi
|
||||||
|
twine upload dist/* -r $(target)
|
||||||
85
README.md
85
README.md
@ -16,28 +16,18 @@ limitations under the License.
|
|||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<br>
|
<br>
|
||||||
<img src="docs/source/imgs/accelerate_logo.png" width="400"/>
|
<img src="https://raw.githubusercontent.com/huggingface/accelerate/main/docs/source/imgs/accelerate_logo.png" width="400"/>
|
||||||
<br>
|
<br>
|
||||||
<p>
|
<p>
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<!-- Uncomment when CircleCI is setup
|
<!-- Uncomment when CircleCI is set up
|
||||||
<a href="https://circleci.com/gh/huggingface/accelerate">
|
<a href="https://circleci.com/gh/huggingface/accelerate"><img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/master"></a>
|
||||||
<img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/master">
|
|
||||||
</a>
|
|
||||||
-->
|
-->
|
||||||
<a href="https://github.com/huggingface/accelerate/blob/main/LICENSE">
|
<a href="https://github.com/huggingface/accelerate/blob/main/LICENSE"><img alt="License" src="https://img.shields.io/github/license/huggingface/accelerate.svg?color=blue"></a>
|
||||||
<img alt="License" src="https://img.shields.io/github/license/huggingface/accelerate.svg?color=blue">
|
<a href="https://huggingface.co/docs/accelerate/index.html"><img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/accelerate/index.html.svg?down_color=red&down_message=offline&up_message=online"></a>
|
||||||
</a>
|
<a href="https://github.com/huggingface/accelerate/releases"><img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/accelerate.svg"></a>
|
||||||
<a href="https://huggingface.co/docs/accelerate/index.html">
|
<a href="https://github.com/huggingface/accelerate/blob/main/CODE_OF_CONDUCT.md"><img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg"></a>
|
||||||
<img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/accelerate/index.html.svg?down_color=red&down_message=offline&up_message=online">
|
|
||||||
</a>
|
|
||||||
<a href="https://github.com/huggingface/accelerate/releases">
|
|
||||||
<img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/accelerate.svg">
|
|
||||||
</a>
|
|
||||||
<a href="https://github.com/huggingface/accelerate/blob/main/CODE_OF_CONDUCT.md">
|
|
||||||
<img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg">
|
|
||||||
</a>
|
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<h3 align="center">
|
<h3 align="center">
|
||||||
@ -91,7 +81,7 @@ Here is an example:
|
|||||||
optimizer.step()
|
optimizer.step()
|
||||||
```
|
```
|
||||||
|
|
||||||
As you can see in this example, by adding 5-lines to any standard PyTorch training script you can now run on any kind of single or distributed node setting (single CPU, single GPU, multi-GPUs and TPUs) as well as with or without mixed precision (fp16).
|
As you can see in this example, by adding 5-lines to any standard PyTorch training script you can now run on any kind of single or distributed node setting (single CPU, single GPU, multi-GPUs and TPUs) as well as with or without mixed precision (fp8, fp16, bf16).
|
||||||
|
|
||||||
In particular, the same code can then be run without modification on your local machine for debugging or your training environment.
|
In particular, the same code can then be run without modification on your local machine for debugging or your training environment.
|
||||||
|
|
||||||
@ -132,11 +122,11 @@ In particular, the same code can then be run without modification on your local
|
|||||||
optimizer.step()
|
optimizer.step()
|
||||||
```
|
```
|
||||||
|
|
||||||
Want to learn more? Check out the [documentation](https://huggingface.co/docs/accelerate) or have look at our [examples](https://github.com/huggingface/accelerate/tree/main/examples).
|
Want to learn more? Check out the [documentation](https://huggingface.co/docs/accelerate) or have a look at our [examples](https://github.com/huggingface/accelerate/tree/main/examples).
|
||||||
|
|
||||||
## Launching script
|
## Launching script
|
||||||
|
|
||||||
🤗 Accelerate also provides an optional CLI tool that allows you to quickly configure and test your training environment before launching the scripts. No need to remember how to use `torch.distributed.launch` or to write a specific launcher for TPU training!
|
🤗 Accelerate also provides an optional CLI tool that allows you to quickly configure and test your training environment before launching the scripts. No need to remember how to use `torch.distributed.run` or to write a specific launcher for TPU training!
|
||||||
On your machine(s) just run:
|
On your machine(s) just run:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -155,28 +145,48 @@ For instance, here is how you would run the GLUE example on the MRPC task (from
|
|||||||
accelerate launch examples/nlp_example.py
|
accelerate launch examples/nlp_example.py
|
||||||
```
|
```
|
||||||
|
|
||||||
This CLI tool is **optional**, and you can still use `python my_script.py` or `python -m torch.distributed.launch my_script.py` at your convenance.
|
This CLI tool is **optional**, and you can still use `python my_script.py` or `python -m torchrun my_script.py` at your convenience.
|
||||||
|
|
||||||
|
You can also directly pass in the arguments you would to `torchrun` as arguments to `accelerate launch` if you wish to not run` accelerate config`.
|
||||||
|
|
||||||
|
For example, here is how to launch on two GPUs:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
accelerate launch --multi_gpu --num_processes 2 examples/nlp_example.py
|
||||||
|
```
|
||||||
|
|
||||||
|
To learn more, check the CLI documentation available [here](https://huggingface.co/docs/accelerate/package_reference/cli).
|
||||||
|
|
||||||
|
Or view the configuration zoo [here](https://github.com/huggingface/accelerate/blob/main/examples/config_yaml_templates/)
|
||||||
|
|
||||||
## Launching multi-CPU run using MPI
|
## Launching multi-CPU run using MPI
|
||||||
|
|
||||||
🤗 Here is another way to launch multi-CPU run using MPI. You can learn how to install Open MPI on [this page](https://www.open-mpi.org/faq/?category=building#easy-build). You can use Intel MPI or MVAPICH as well.
|
🤗 Here is another way to launch multi-CPU run using MPI. You can learn how to install Open MPI on [this page](https://www.open-mpi.org/faq/?category=building#easy-build). You can use Intel MPI or MVAPICH as well.
|
||||||
Once you have MPI setup on your cluster, just run:
|
Once you have MPI setup on your cluster, just run:
|
||||||
|
```bash
|
||||||
|
accelerate config
|
||||||
|
```
|
||||||
|
Answer the questions that are asked, selecting to run using multi-CPU, and answer "yes" when asked if you want accelerate to launch mpirun.
|
||||||
|
Then, use `accelerate launch` with your script like:
|
||||||
|
```bash
|
||||||
|
accelerate launch examples/nlp_example.py
|
||||||
|
```
|
||||||
|
Alternatively, you can use mpirun directly, without using the CLI like:
|
||||||
```bash
|
```bash
|
||||||
mpirun -np 2 python examples/nlp_example.py
|
mpirun -np 2 python examples/nlp_example.py
|
||||||
```
|
```
|
||||||
|
|
||||||
## Launching training using DeepSpeed
|
## Launching training using DeepSpeed
|
||||||
|
|
||||||
🤗 Accelerate supports training on single/multiple GPUs using DeepSpeed. To use it, you don't need to change anything in your training code; you can set everything using just `accelerate config`. However, if you desire to tweak your DeepSpeed related args from your python script, we provide you the `DeepSpeedPlugin`.
|
🤗 Accelerate supports training on single/multiple GPUs using DeepSpeed. To use it, you don't need to change anything in your training code; you can set everything using just `accelerate config`. However, if you desire to tweak your DeepSpeed related args from your Python script, we provide you the `DeepSpeedPlugin`.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from accelerator import Accelerator, DeepSpeedPlugin
|
from accelerate import Accelerator, DeepSpeedPlugin
|
||||||
|
|
||||||
# deepspeed needs to know your gradient accumulation steps before hand, so don't forget to pass it
|
# deepspeed needs to know your gradient accumulation steps beforehand, so don't forget to pass it
|
||||||
# Remember you still need to do gradient accumulation by yourself, just like you would have done without deepspeed
|
# Remember you still need to do gradient accumulation by yourself, just like you would have done without deepspeed
|
||||||
deepspeed_plugin = DeepSpeedPlugin(zero_stage=2, gradient_accumulation_steps=2)
|
deepspeed_plugin = DeepSpeedPlugin(zero_stage=2, gradient_accumulation_steps=2)
|
||||||
accelerator = Accelerator(fp16=True, deepspeed_plugin=deepspeed_plugin)
|
accelerator = Accelerator(mixed_precision='fp16', deepspeed_plugin=deepspeed_plugin)
|
||||||
|
|
||||||
# How to save your 🤗 Transformer?
|
# How to save your 🤗 Transformer?
|
||||||
accelerator.wait_for_everyone()
|
accelerator.wait_for_everyone()
|
||||||
@ -200,7 +210,7 @@ An example can be found in [this notebook](https://github.com/huggingface/notebo
|
|||||||
|
|
||||||
## Why should I use 🤗 Accelerate?
|
## Why should I use 🤗 Accelerate?
|
||||||
|
|
||||||
You should use 🤗 Accelerate when you want to easily run your training scripts in a distributed environment without having to renounce full control over your training loop. This is not a high-level framework above PyTorch, just a thin wrapper so you don't have to learn a new library, In fact the whole API of 🤗 Accelerate is in one class, the `Accelerator` object.
|
You should use 🤗 Accelerate when you want to easily run your training scripts in a distributed environment without having to renounce full control over your training loop. This is not a high-level framework above PyTorch, just a thin wrapper so you don't have to learn a new library. In fact, the whole API of 🤗 Accelerate is in one class, the `Accelerator` object.
|
||||||
|
|
||||||
## Why shouldn't I use 🤗 Accelerate?
|
## Why shouldn't I use 🤗 Accelerate?
|
||||||
|
|
||||||
@ -208,18 +218,25 @@ You shouldn't use 🤗 Accelerate if you don't want to write a training loop you
|
|||||||
|
|
||||||
## Frameworks using 🤗 Accelerate
|
## Frameworks using 🤗 Accelerate
|
||||||
|
|
||||||
If you like the simplicity of 🤗 Accelerate but would prefer a higher-level abstraction around your training loop, some frameworks that are built on top of 🤗 Accelerate are listed below:
|
If you like the simplicity of 🤗 Accelerate but would prefer a higher-level abstraction around its capabilities, some frameworks and libraries that are built on top of 🤗 Accelerate are listed below:
|
||||||
|
|
||||||
|
* [Amphion](https://github.com/open-mmlab/Amphion) is a toolkit for Audio, Music, and Speech Generation. Its purpose is to support reproducible research and help junior researchers and engineers get started in the field of audio, music, and speech generation research and development.
|
||||||
* [Animus](https://github.com/Scitator/animus) is a minimalistic framework to run machine learning experiments. Animus highlights common "breakpoints" in ML experiments and provides a unified interface for them within [IExperiment](https://github.com/Scitator/animus/blob/main/animus/core.py#L76).
|
* [Animus](https://github.com/Scitator/animus) is a minimalistic framework to run machine learning experiments. Animus highlights common "breakpoints" in ML experiments and provides a unified interface for them within [IExperiment](https://github.com/Scitator/animus/blob/main/animus/core.py#L76).
|
||||||
* [Catalyst](https://github.com/catalyst-team/catalyst#getting-started) is a PyTorch framework for Deep Learning Research and Development. It focuses on reproducibility, rapid experimentation, and codebase reuse so you can create something new rather than write yet another train loop. Catalyst provides a [Runner](https://catalyst-team.github.io/catalyst/api/core.html#runner) to connect all parts of the experiment: hardware backend, data transformations, model train, and inference logic.
|
* [Catalyst](https://github.com/catalyst-team/catalyst#getting-started) is a PyTorch framework for Deep Learning Research and Development. It focuses on reproducibility, rapid experimentation, and codebase reuse so you can create something new rather than write yet another train loop. Catalyst provides a [Runner](https://catalyst-team.github.io/catalyst/api/core.html#runner) to connect all parts of the experiment: hardware backend, data transformations, model training, and inference logic.
|
||||||
* [fastai](https://github.com/fastai/fastai#installing) is a PyTorch framework for Deep Learning that simplifies training fast and accurate neural nets using modern best practices. fastai provides a [Learner](https://docs.fast.ai/learner.html#Learner) to handle the training, fine-tuning, and inference of deep learning algorithms.
|
* [fastai](https://github.com/fastai/fastai#installing) is a PyTorch framework for Deep Learning that simplifies training fast and accurate neural nets using modern best practices. fastai provides a [Learner](https://docs.fast.ai/learner.html#Learner) to handle the training, fine-tuning, and inference of deep learning algorithms.
|
||||||
|
* [Finetuner](https://github.com/jina-ai/finetuner) is a service that enables models to create higher-quality embeddings for semantic search, visual similarity search, cross-modal text<->image search, recommendation systems, clustering, duplication detection, anomaly detection, or other uses.
|
||||||
|
* [InvokeAI](https://github.com/invoke-ai/InvokeAI) is a creative engine for Stable Diffusion models, offering industry-leading WebUI, terminal usage support, and serves as the foundation for many commercial products.
|
||||||
* [Kornia](https://kornia.readthedocs.io/en/latest/get-started/introduction.html) is a differentiable library that allows classical computer vision to be integrated into deep learning models. Kornia provides a [Trainer](https://kornia.readthedocs.io/en/latest/x.html#kornia.x.Trainer) with the specific purpose to train and fine-tune the supported deep learning algorithms within the library.
|
* [Kornia](https://kornia.readthedocs.io/en/latest/get-started/introduction.html) is a differentiable library that allows classical computer vision to be integrated into deep learning models. Kornia provides a [Trainer](https://kornia.readthedocs.io/en/latest/x.html#kornia.x.Trainer) with the specific purpose to train and fine-tune the supported deep learning algorithms within the library.
|
||||||
* [pytorch-accelerated](https://github.com/Chris-hughes10/pytorch-accelerated) is a lightweight training library, with a streamlined feature set centred around a general-purpose [Trainer](https://pytorch-accelerated.readthedocs.io/en/latest/trainer.html), that places a huge emphasis on simplicity and transparency; enabling users to understand exactly what is going on under the hood, but without having to write and maintain the boilerplate themselves!
|
* [Open Assistant](https://projects.laion.ai/Open-Assistant/) is a chat-based assistant that understands tasks, can interact with their party systems, and retrieve information dynamically to do so.
|
||||||
|
* [pytorch-accelerated](https://github.com/Chris-hughes10/pytorch-accelerated) is a lightweight training library, with a streamlined feature set centered around a general-purpose [Trainer](https://pytorch-accelerated.readthedocs.io/en/latest/trainer.html), that places a huge emphasis on simplicity and transparency; enabling users to understand exactly what is going on under the hood, but without having to write and maintain the boilerplate themselves!
|
||||||
|
* [Stable Diffusion web UI](https://github.com/AUTOMATIC1111/stable-diffusion-webui) is an open-source browser-based easy-to-use interface based on the Gradio library for Stable Diffusion.
|
||||||
|
* [torchkeras](https://github.com/lyhue1991/torchkeras) is a simple tool for training pytorch model just in a keras style, a dynamic and beautiful plot is provided in notebook to monitor your loss or metric.
|
||||||
|
* [transformers](https://github.com/huggingface/transformers) as a tool for helping train state-of-the-art machine learning models in PyTorch, Tensorflow, and JAX. (Accelerate is the backend for the PyTorch side).
|
||||||
|
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
This repository is tested on Python 3.6+ and PyTorch 1.4.0+
|
This repository is tested on Python 3.8+ and PyTorch 1.10.0+
|
||||||
|
|
||||||
You should install 🤗 Accelerate in a [virtual environment](https://docs.python.org/3/library/venv.html). If you're unfamiliar with Python virtual environments, check out the [user guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/).
|
You should install 🤗 Accelerate in a [virtual environment](https://docs.python.org/3/library/venv.html). If you're unfamiliar with Python virtual environments, check out the [user guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/).
|
||||||
|
|
||||||
@ -240,9 +257,11 @@ pip install accelerate
|
|||||||
- multi-GPU on one node (machine)
|
- multi-GPU on one node (machine)
|
||||||
- multi-GPU on several nodes (machines)
|
- multi-GPU on several nodes (machines)
|
||||||
- TPU
|
- TPU
|
||||||
- FP16 with native AMP (apex on the roadmap)
|
- FP16/BFloat16 mixed precision
|
||||||
|
- FP8 mixed precision with [Transformer Engine](https://github.com/NVIDIA/TransformerEngine) or [MS-AMP](https://github.com/Azure/MS-AMP/)
|
||||||
- DeepSpeed support (Experimental)
|
- DeepSpeed support (Experimental)
|
||||||
- PyTorch Fully Sharded Data Parallel (FSDP) support (Experimental)
|
- PyTorch Fully Sharded Data Parallel (FSDP) support (Experimental)
|
||||||
|
- Megatron-LM support (Experimental)
|
||||||
|
|
||||||
## Citing 🤗 Accelerate
|
## Citing 🤗 Accelerate
|
||||||
|
|
||||||
@ -251,7 +270,7 @@ If you use 🤗 Accelerate in your publication, please cite it by using the foll
|
|||||||
```bibtex
|
```bibtex
|
||||||
@Misc{accelerate,
|
@Misc{accelerate,
|
||||||
title = {Accelerate: Training and inference at scale made simple, efficient and adaptable.},
|
title = {Accelerate: Training and inference at scale made simple, efficient and adaptable.},
|
||||||
author = {Sylvain Gugger, Lysandre Debut, Thomas Wolf, Philipp Schmid, Zachary Mueller, Sourab Mangrulkar},
|
author = {Sylvain Gugger and Lysandre Debut and Thomas Wolf and Philipp Schmid and Zachary Mueller and Sourab Mangrulkar and Marc Sun and Benjamin Bossan},
|
||||||
howpublished = {\url{https://github.com/huggingface/accelerate}},
|
howpublished = {\url{https://github.com/huggingface/accelerate}},
|
||||||
year = {2022}
|
year = {2022}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,46 +1,5 @@
|
|||||||
# Big model inference benchmarks
|
# Benchmarks
|
||||||
|
|
||||||
Running inference with Accelerate on big models.
|
The folders below contain suites to test various functionalities in Accelerate.
|
||||||
|
|
||||||
## Setup
|
See their relevant README.md's for more information.
|
||||||
|
|
||||||
These benchmarks use the `transformers` library:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
pip install transformers
|
|
||||||
```
|
|
||||||
|
|
||||||
To reproduce or test a new setup, run
|
|
||||||
|
|
||||||
```py
|
|
||||||
python inference_acc.py model_name
|
|
||||||
```
|
|
||||||
|
|
||||||
This script supports `gpt-j-6b`, `gpt-neox`, `opt` (30B version) and `T0pp` out of the box, but you can specify any valid checkpoint for `model_name`.
|
|
||||||
|
|
||||||
To force a different `torch_dtype` than the one in the config: `--torch_dtype xxx`.
|
|
||||||
|
|
||||||
If you get an error linked to disk offload, you need to add the option `--disk-offload`
|
|
||||||
|
|
||||||
## Results
|
|
||||||
|
|
||||||
On a setup with two Titan RTXs (24GB of RAM) and 32GB of RAM, we get the following benchmarks (T0pp does not run in float16, which is why it's not included).
|
|
||||||
|
|
||||||
| Model | Model load time | Generation time | dtype | GPU 0 use | GPU 1 use | CPU use | Disk offload |
|
|
||||||
|:-----:|:---------------:|:---------------:|:-----:|:---------:|:---------:|:-------:|:------------:|
|
|
||||||
| GPT-J-6B | 8.7s | 0.05s per token | float16 | 11.7GB | 0GB | 0GB | no |
|
|
||||||
| GPT-J-6B | 12.4s | 0.06s per token | float32 | 21.9GB | 1.5GB | 0GB | no |
|
|
||||||
| GPT-Neo-X-20B | 30.9s | 0.08s per token | float16 | 21.5GB | 18GB | 0GB | no |
|
|
||||||
| GPT-Neo-X-20B | 78.2s | 10.72s per token | float32 | 20.3GB | 22.7 GB | 24.4GB | yes |
|
|
||||||
| T0pp (11B) | 29.4s | 0.05s per token | float32 | 21.1GB | 21.3GB | 0GB | no |
|
|
||||||
| OPT-30B | 34.5s | 2.37s per token | float16 | 20.7GB | 22.3GB | 14.1GB | no |
|
|
||||||
| OPT-30B | 112.3s | 33.9s per token | float32 | 20.2GB | 21.2GB | 23.5GB | yes |
|
|
||||||
|
|
||||||
Note on the results:
|
|
||||||
- using two GPUs instead of one does not slow down generation
|
|
||||||
- using CPU offload slows down a bit (see OPT-30b)
|
|
||||||
- using disk offload slows down a lot (need to implement prefetching)
|
|
||||||
|
|
||||||
You will also note that Accelerate does not use anymore GPU and CPU RAM than necessary:
|
|
||||||
- peak GPU memory is exactly the size of the model put on a given GPU
|
|
||||||
- peak CPU memory is either the size of the biggest checkpoint shard or the part of the model offloaded on CPU, whichever is bigger.
|
|
||||||
|
|||||||
46
benchmarks/big_model_inference/README.md
Normal file
46
benchmarks/big_model_inference/README.md
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
# Big model inference benchmarks
|
||||||
|
|
||||||
|
Running inference with Accelerate on big models.
|
||||||
|
|
||||||
|
## Setup
|
||||||
|
|
||||||
|
These benchmarks use the `transformers` library:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install transformers
|
||||||
|
```
|
||||||
|
|
||||||
|
To reproduce or test a new setup, run
|
||||||
|
|
||||||
|
```py
|
||||||
|
python big_model_inference.py model_name
|
||||||
|
```
|
||||||
|
|
||||||
|
This script supports `gpt-j-6b`, `gpt-neox`, `opt` (30B version) and `T0pp` out of the box, but you can specify any valid checkpoint for `model_name`.
|
||||||
|
|
||||||
|
To force a different `torch_dtype` than the one in the config: `--torch_dtype xxx`.
|
||||||
|
|
||||||
|
If you get an error linked to disk offload, you need to add the option `--disk-offload`
|
||||||
|
|
||||||
|
## Results
|
||||||
|
|
||||||
|
On a setup with two Titan RTXs (24GB of RAM) and 32GB of RAM, we get the following benchmarks (T0pp does not run in float16, which is why it's not included).
|
||||||
|
|
||||||
|
| Model | Model load time | Generation time | dtype | GPU 0 use | GPU 1 use | CPU use | Disk offload |
|
||||||
|
|:-----:|:---------------:|:---------------:|:-----:|:---------:|:---------:|:-------:|:------------:|
|
||||||
|
| GPT-J-6B | 8.7s | 0.05s per token | float16 | 11.7GB | 0GB | 0GB | no |
|
||||||
|
| GPT-J-6B | 12.4s | 0.06s per token | float32 | 21.9GB | 1.5GB | 0GB | no |
|
||||||
|
| GPT-Neo-X-20B | 30.9s | 0.08s per token | float16 | 21.5GB | 18GB | 0GB | no |
|
||||||
|
| GPT-Neo-X-20B | 78.2s | 10.72s per token | float32 | 20.3GB | 22.7 GB | 24.4GB | yes |
|
||||||
|
| T0pp (11B) | 29.4s | 0.05s per token | float32 | 21.1GB | 21.3GB | 0GB | no |
|
||||||
|
| OPT-30B | 34.5s | 2.37s per token | float16 | 20.7GB | 22.3GB | 14.1GB | no |
|
||||||
|
| OPT-30B | 112.3s | 33.9s per token | float32 | 20.2GB | 21.2GB | 23.5GB | yes |
|
||||||
|
|
||||||
|
Note on the results:
|
||||||
|
- using two GPUs instead of one does not slow down generation
|
||||||
|
- using CPU offload slows down a bit (see OPT-30b)
|
||||||
|
- using disk offload slows down a lot (need to implement prefetching)
|
||||||
|
|
||||||
|
You will also note that Accelerate does not use anymore GPU and CPU RAM than necessary:
|
||||||
|
- peak GPU memory is exactly the size of the model put on a given GPU
|
||||||
|
- peak CPU memory is either the size of the biggest checkpoint shard or the part of the model offloaded on CPU, whichever is bigger.
|
||||||
@ -16,12 +16,12 @@ import argparse
|
|||||||
import time
|
import time
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
import transformers
|
import transformers
|
||||||
from accelerate.utils import compute_module_sizes
|
|
||||||
from measures_util import end_measure, log_measures, start_measure
|
from measures_util import end_measure, log_measures, start_measure
|
||||||
from transformers import AutoConfig, AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer
|
from transformers import AutoConfig, AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer
|
||||||
|
|
||||||
|
from accelerate.utils import compute_module_sizes
|
||||||
|
|
||||||
|
|
||||||
DEFAULT_MODELS = {
|
DEFAULT_MODELS = {
|
||||||
"gpt-j-6b": {"is_causal": True, "model": "sgugger/sharded-gpt-j-6B", "tokenizer": "EleutherAI/gpt-j-6B"},
|
"gpt-j-6b": {"is_causal": True, "model": "sgugger/sharded-gpt-j-6B", "tokenizer": "EleutherAI/gpt-j-6B"},
|
||||||
@ -1,10 +1,28 @@
|
|||||||
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
import gc
|
import gc
|
||||||
import threading
|
import threading
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
import psutil
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
import psutil
|
from accelerate.test_utils.testing import get_backend
|
||||||
|
|
||||||
|
|
||||||
|
torch_device_type, _, _ = get_backend()
|
||||||
|
torch_accelerator_module = getattr(torch, torch_device_type, torch.cuda)
|
||||||
|
|
||||||
|
|
||||||
class PeakCPUMemory:
|
class PeakCPUMemory:
|
||||||
@ -42,16 +60,16 @@ def start_measure():
|
|||||||
measures = {"time": time.time()}
|
measures = {"time": time.time()}
|
||||||
|
|
||||||
gc.collect()
|
gc.collect()
|
||||||
torch.cuda.empty_cache()
|
torch_accelerator_module.empty_cache()
|
||||||
|
|
||||||
# CPU mem
|
# CPU mem
|
||||||
measures["cpu"] = psutil.Process().memory_info().rss
|
measures["cpu"] = psutil.Process().memory_info().rss
|
||||||
cpu_peak_tracker.start()
|
cpu_peak_tracker.start()
|
||||||
|
|
||||||
# GPU mem
|
# GPU mem
|
||||||
for i in range(torch.cuda.device_count()):
|
for i in range(torch_accelerator_module.device_count()):
|
||||||
measures[str(i)] = torch.cuda.memory_allocated(i)
|
measures[str(i)] = torch_accelerator_module.memory_allocated(i)
|
||||||
torch.cuda.reset_peak_memory_stats()
|
torch_accelerator_module.reset_peak_memory_stats()
|
||||||
|
|
||||||
return measures
|
return measures
|
||||||
|
|
||||||
@ -61,16 +79,16 @@ def end_measure(start_measures):
|
|||||||
measures = {"time": time.time() - start_measures["time"]}
|
measures = {"time": time.time() - start_measures["time"]}
|
||||||
|
|
||||||
gc.collect()
|
gc.collect()
|
||||||
torch.cuda.empty_cache()
|
torch_accelerator_module.empty_cache()
|
||||||
|
|
||||||
# CPU mem
|
# CPU mem
|
||||||
measures["cpu"] = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
|
measures["cpu"] = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
|
||||||
measures["cpu-peak"] = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
|
measures["cpu-peak"] = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
|
||||||
|
|
||||||
# GPU mem
|
# GPU mem
|
||||||
for i in range(torch.cuda.device_count()):
|
for i in range(torch_accelerator_module.device_count()):
|
||||||
measures[str(i)] = (torch.cuda.memory_allocated(i) - start_measures[str(i)]) / 2**20
|
measures[str(i)] = (torch_accelerator_module.memory_allocated(i) - start_measures[str(i)]) / 2**20
|
||||||
measures[f"{i}-peak"] = (torch.cuda.max_memory_allocated(i) - start_measures[str(i)]) / 2**20
|
measures[f"{i}-peak"] = (torch_accelerator_module.max_memory_allocated(i) - start_measures[str(i)]) / 2**20
|
||||||
|
|
||||||
return measures
|
return measures
|
||||||
|
|
||||||
@ -78,9 +96,9 @@ def end_measure(start_measures):
|
|||||||
def log_measures(measures, description):
|
def log_measures(measures, description):
|
||||||
print(f"{description}:")
|
print(f"{description}:")
|
||||||
print(f"- Time: {measures['time']:.2f}s")
|
print(f"- Time: {measures['time']:.2f}s")
|
||||||
for i in range(torch.cuda.device_count()):
|
for i in range(torch_accelerator_module.device_count()):
|
||||||
print(f"- GPU {i} allocated: {measures[str(i)]:.2f}MiB")
|
print(f"- {torch_device_type} {i} allocated: {measures[str(i)]:.2f}MiB")
|
||||||
peak = measures[f"{i}-peak"]
|
peak = measures[f"{i}-peak"]
|
||||||
print(f"- GPU {i} peak: {peak:.2f}MiB")
|
print(f"- {torch_device_type} {i} peak: {peak:.2f}MiB")
|
||||||
print(f"- CPU RAM allocated: {measures['cpu']:.2f}MiB")
|
print(f"- CPU RAM allocated: {measures['cpu']:.2f}MiB")
|
||||||
print(f"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB")
|
print(f"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB")
|
||||||
12
benchmarks/fp8/ms_amp/Dockerfile
Normal file
12
benchmarks/fp8/ms_amp/Dockerfile
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
FROM ghcr.io/azure/msamp
|
||||||
|
|
||||||
|
RUN pip install transformers evaluate datasets
|
||||||
|
RUN git clone https://github.com/huggingface/accelerate
|
||||||
|
|
||||||
|
RUN cd accelerate && \
|
||||||
|
pip install -e . && \
|
||||||
|
cd benchmarks/fp8
|
||||||
|
|
||||||
|
CMD ["bash"]
|
||||||
|
|
||||||
|
|
||||||
123
benchmarks/fp8/ms_amp/ddp.py
Normal file
123
benchmarks/fp8/ms_amp/ddp.py
Normal file
@ -0,0 +1,123 @@
|
|||||||
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
This script tests to ensure that `accelerate` performs at the same level as raw `MS-AMP`.
|
||||||
|
|
||||||
|
This particular script verifies this for DDP training.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import evaluate
|
||||||
|
import msamp
|
||||||
|
import torch
|
||||||
|
from fp8_utils import evaluate_model, get_training_utilities
|
||||||
|
from torch.nn.parallel import DistributedDataParallel as DDP
|
||||||
|
|
||||||
|
from accelerate import Accelerator
|
||||||
|
from accelerate.state import AcceleratorState
|
||||||
|
from accelerate.utils import FP8RecipeKwargs, get_grad_scaler, set_seed
|
||||||
|
|
||||||
|
|
||||||
|
MODEL_NAME = "bert-base-cased"
|
||||||
|
METRIC = evaluate.load("glue", "mrpc")
|
||||||
|
|
||||||
|
|
||||||
|
def train_baseline(opt_level="O2"):
|
||||||
|
set_seed(42)
|
||||||
|
scaler = get_grad_scaler()
|
||||||
|
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(MODEL_NAME)
|
||||||
|
accelerator = Accelerator()
|
||||||
|
device = accelerator.device
|
||||||
|
|
||||||
|
model, optimizer = msamp.initialize(model, optimizer, opt_level=opt_level)
|
||||||
|
|
||||||
|
model.to(device)
|
||||||
|
|
||||||
|
# Convert the model to DDP
|
||||||
|
device_ids, output_device = [accelerator.local_process_index], accelerator.local_process_index
|
||||||
|
model = DDP(model, device_ids=device_ids, output_device=output_device)
|
||||||
|
|
||||||
|
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||||
|
model.train()
|
||||||
|
|
||||||
|
for i, batch in enumerate(train_dataloader):
|
||||||
|
with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
|
||||||
|
outputs = model(**batch)
|
||||||
|
loss = outputs.loss
|
||||||
|
scaler.scale(loss).backward()
|
||||||
|
optimizer.step()
|
||||||
|
optimizer.zero_grad()
|
||||||
|
lr_scheduler.step()
|
||||||
|
|
||||||
|
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||||
|
|
||||||
|
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||||
|
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||||
|
)
|
||||||
|
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||||
|
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return base_model_results, trained_model_results
|
||||||
|
|
||||||
|
|
||||||
|
def train_integration(opt_level="O2"):
|
||||||
|
kwargs_handlers = [FP8RecipeKwargs(backend="msamp", opt_level=opt_level)]
|
||||||
|
AcceleratorState()._reset_state(True)
|
||||||
|
accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=kwargs_handlers)
|
||||||
|
set_seed(42)
|
||||||
|
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
|
||||||
|
MODEL_NAME, accelerator=accelerator
|
||||||
|
)
|
||||||
|
|
||||||
|
model, optimizer = accelerator.prepare(model, optimizer)
|
||||||
|
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||||
|
model.train()
|
||||||
|
for i, batch in enumerate(train_dataloader):
|
||||||
|
with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
|
||||||
|
outputs = model(**batch)
|
||||||
|
loss = outputs.loss
|
||||||
|
accelerator.backward(loss)
|
||||||
|
optimizer.step()
|
||||||
|
optimizer.zero_grad()
|
||||||
|
lr_scheduler.step()
|
||||||
|
|
||||||
|
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||||
|
|
||||||
|
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||||
|
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||||
|
)
|
||||||
|
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||||
|
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return base_model_results, trained_model_results
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
for opt_level in ["O1", "O2"]:
|
||||||
|
baseline_not_trained, baseline_trained = train_baseline(opt_level)
|
||||||
|
accelerator_not_trained, accelerator_trained = train_integration(opt_level)
|
||||||
|
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
|
||||||
|
f"Accuracy not the same for untrained baseline and accelerator using opt_level={opt_level}: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
|
||||||
|
)
|
||||||
|
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
|
||||||
|
f"F1 not the same for untrained baseline and accelerator using opt_level={opt_level}: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
|
||||||
|
)
|
||||||
|
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
|
||||||
|
f"Accuracy not the same for trained baseline and accelerator using opt_level={opt_level}: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
|
||||||
|
)
|
||||||
|
assert baseline_trained["f1"] == accelerator_trained["f1"], (
|
||||||
|
f"F1 not the same for trained baseline and accelerator using opt_level={opt_level}: {baseline_trained['f1']} == {accelerator_trained['f1']}"
|
||||||
|
)
|
||||||
161
benchmarks/fp8/ms_amp/distrib_deepspeed.py
Normal file
161
benchmarks/fp8/ms_amp/distrib_deepspeed.py
Normal file
@ -0,0 +1,161 @@
|
|||||||
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
This script tests to ensure that `accelerate` performs at the same level as raw `MS-AMP`.
|
||||||
|
|
||||||
|
This particular script verifies this for DeepSpeed training.
|
||||||
|
|
||||||
|
NOTE: MS-AMP does *not* support ZeRO-3.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# import msamp.deepspeed as msamp_deepspeed
|
||||||
|
import evaluate
|
||||||
|
import torch
|
||||||
|
from fp8_utils import evaluate_model, get_training_utilities
|
||||||
|
from msamp import deepspeed as msamp_deepspeed
|
||||||
|
|
||||||
|
from accelerate import Accelerator, DeepSpeedPlugin
|
||||||
|
from accelerate.state import AcceleratorState
|
||||||
|
from accelerate.utils import set_seed
|
||||||
|
|
||||||
|
|
||||||
|
MODEL_NAME = "bert-base-cased"
|
||||||
|
METRIC = evaluate.load("glue", "mrpc")
|
||||||
|
|
||||||
|
|
||||||
|
def train_baseline(zero_stage: int = 1, opt_level: str = "O1"):
|
||||||
|
set_seed(42)
|
||||||
|
accelerator = Accelerator()
|
||||||
|
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
|
||||||
|
MODEL_NAME, accelerator=accelerator
|
||||||
|
)
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
config = {
|
||||||
|
"train_batch_size": 32,
|
||||||
|
"train_micro_batch_size_per_gpu": 16,
|
||||||
|
"gradient_accumulation_steps": 1,
|
||||||
|
"zero_optimization": {
|
||||||
|
"stage": zero_stage,
|
||||||
|
"offload_optimizer": {"device": "none", "nvme_path": None},
|
||||||
|
"offload_param": {"device": "none", "nvme_path": None},
|
||||||
|
},
|
||||||
|
"gradient_clipping": 1.0,
|
||||||
|
"steps_per_print": np.inf,
|
||||||
|
"bf16": {"enabled": True},
|
||||||
|
"fp16": {"enabled": False},
|
||||||
|
"zero_allow_untested_optimizer": True,
|
||||||
|
"msamp": {
|
||||||
|
"enabled": True,
|
||||||
|
"opt_level": opt_level,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
(
|
||||||
|
model,
|
||||||
|
optimizer,
|
||||||
|
_,
|
||||||
|
_,
|
||||||
|
) = msamp_deepspeed.initialize(
|
||||||
|
model=model,
|
||||||
|
optimizer=optimizer,
|
||||||
|
config_params=config,
|
||||||
|
)
|
||||||
|
|
||||||
|
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||||
|
model.train()
|
||||||
|
|
||||||
|
for _ in range(2):
|
||||||
|
for batch in train_dataloader:
|
||||||
|
outputs = model(**batch)
|
||||||
|
loss = outputs.loss
|
||||||
|
model.backward(loss)
|
||||||
|
model.step()
|
||||||
|
for _ in range(accelerator.num_processes):
|
||||||
|
lr_scheduler.step()
|
||||||
|
|
||||||
|
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||||
|
model.destroy()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
AcceleratorState()._reset_state(True)
|
||||||
|
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||||
|
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||||
|
)
|
||||||
|
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||||
|
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return base_model_results, trained_model_results
|
||||||
|
|
||||||
|
|
||||||
|
def train_integration(zero_stage: int = 1, opt_level: str = "O1"):
|
||||||
|
set_seed(42)
|
||||||
|
deepspeed_plugin = DeepSpeedPlugin(
|
||||||
|
zero_stage=zero_stage,
|
||||||
|
enable_msamp=True,
|
||||||
|
msamp_opt_level=opt_level,
|
||||||
|
)
|
||||||
|
accelerator = Accelerator(mixed_precision="fp8", deepspeed_plugin=deepspeed_plugin)
|
||||||
|
accelerator.state.deepspeed_plugin.deepspeed_config["train_micro_batch_size_per_gpu"] = 16
|
||||||
|
|
||||||
|
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
|
||||||
|
MODEL_NAME, accelerator=accelerator
|
||||||
|
)
|
||||||
|
|
||||||
|
model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler)
|
||||||
|
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||||
|
model.train()
|
||||||
|
for _ in range(2):
|
||||||
|
for batch in train_dataloader:
|
||||||
|
outputs = model(**batch)
|
||||||
|
loss = outputs.loss
|
||||||
|
accelerator.backward(loss)
|
||||||
|
optimizer.step()
|
||||||
|
lr_scheduler.step()
|
||||||
|
optimizer.zero_grad()
|
||||||
|
|
||||||
|
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||||
|
model.destroy()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||||
|
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||||
|
)
|
||||||
|
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||||
|
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||||
|
)
|
||||||
|
|
||||||
|
AcceleratorState()._reset_state(True)
|
||||||
|
return base_model_results, trained_model_results
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
for zero_stage in [1, 2]:
|
||||||
|
for opt_level in ["O1", "O2", "O3"]:
|
||||||
|
baseline_not_trained, baseline_trained = train_baseline(zero_stage, opt_level)
|
||||||
|
accelerator_not_trained, accelerator_trained = train_integration(zero_stage, opt_level)
|
||||||
|
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
|
||||||
|
f"ZERO stage {zero_stage}, opt_level={opt_level}:\nAccuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
|
||||||
|
)
|
||||||
|
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
|
||||||
|
f"ZERO stage {zero_stage}, opt_level={opt_level}:\nF1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
|
||||||
|
)
|
||||||
|
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
|
||||||
|
f"ZERO stage {zero_stage}, opt_level={opt_level}:\nAccuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
|
||||||
|
)
|
||||||
|
assert baseline_trained["f1"] == accelerator_trained["f1"], (
|
||||||
|
f"ZERO stage {zero_stage}, opt_level={opt_level}:\nF1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
|
||||||
|
)
|
||||||
|
|
||||||
|
torch.distributed.destroy_process_group()
|
||||||
118
benchmarks/fp8/ms_amp/fp8_utils.py
Normal file
118
benchmarks/fp8/ms_amp/fp8_utils.py
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import torch
|
||||||
|
|
||||||
|
|
||||||
|
def get_dataloaders(model_name: str, batch_size: int = 16):
|
||||||
|
from datasets import load_dataset
|
||||||
|
from torch.utils.data import DataLoader
|
||||||
|
from transformers import AutoTokenizer
|
||||||
|
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
||||||
|
datasets = load_dataset("glue", "mrpc")
|
||||||
|
|
||||||
|
def tokenize_function(examples):
|
||||||
|
# max_length=None => use the model max length (it's actually the default)
|
||||||
|
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
|
||||||
|
return outputs
|
||||||
|
|
||||||
|
# Apply the method we just defined to all the examples in all the splits of the dataset
|
||||||
|
# starting with the main process first:
|
||||||
|
tokenized_datasets = datasets.map(
|
||||||
|
tokenize_function,
|
||||||
|
batched=True,
|
||||||
|
remove_columns=["idx", "sentence1", "sentence2"],
|
||||||
|
)
|
||||||
|
|
||||||
|
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
|
||||||
|
# transformers library
|
||||||
|
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
|
||||||
|
|
||||||
|
def collate_fn(examples):
|
||||||
|
return tokenizer.pad(
|
||||||
|
examples,
|
||||||
|
padding="longest",
|
||||||
|
pad_to_multiple_of=16, # Specific for FP8
|
||||||
|
return_tensors="pt",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Instantiate dataloaders.
|
||||||
|
train_dataloader = DataLoader(
|
||||||
|
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size, drop_last=True
|
||||||
|
)
|
||||||
|
eval_dataloader = DataLoader(
|
||||||
|
tokenized_datasets["validation"],
|
||||||
|
shuffle=False,
|
||||||
|
collate_fn=collate_fn,
|
||||||
|
batch_size=16,
|
||||||
|
drop_last=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
return train_dataloader, eval_dataloader
|
||||||
|
|
||||||
|
|
||||||
|
def get_training_utilities(model_name: str, batch_size: int = 16, accelerator=None):
|
||||||
|
"""
|
||||||
|
Returns a tuple of:
|
||||||
|
- Model
|
||||||
|
- Optimizer
|
||||||
|
- Train dataloader (prepared)
|
||||||
|
- Eval dataloader (prepared)
|
||||||
|
- LR Scheduler
|
||||||
|
Suitable for training on the MRPC dataset
|
||||||
|
"""
|
||||||
|
from torch.optim import AdamW
|
||||||
|
from transformers import AutoModelForSequenceClassification, get_linear_schedule_with_warmup
|
||||||
|
|
||||||
|
from accelerate import Accelerator
|
||||||
|
|
||||||
|
if accelerator is None:
|
||||||
|
accelerator = Accelerator()
|
||||||
|
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
||||||
|
train_dataloader, eval_dataloader = get_dataloaders(model_name, batch_size)
|
||||||
|
optimizer = AdamW(model.parameters(), lr=0.0001)
|
||||||
|
lr_scheduler = get_linear_schedule_with_warmup(
|
||||||
|
optimizer=optimizer,
|
||||||
|
num_warmup_steps=100,
|
||||||
|
num_training_steps=len(train_dataloader) * 2,
|
||||||
|
)
|
||||||
|
train_dataloader, eval_dataloader = accelerator.prepare(train_dataloader, eval_dataloader)
|
||||||
|
return model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
||||||
|
|
||||||
|
|
||||||
|
def get_named_parameters(model):
|
||||||
|
"""
|
||||||
|
Same thing as `Accelerator.get_named_parameters` Returns a list of the named parameters of the model (extracted
|
||||||
|
from parallel)
|
||||||
|
"""
|
||||||
|
from accelerate.utils import extract_model_from_parallel
|
||||||
|
|
||||||
|
model = extract_model_from_parallel(model)
|
||||||
|
return {n: p for n, p in model.named_parameters()}
|
||||||
|
|
||||||
|
|
||||||
|
def evaluate_model(model, dataloader, metric, accelerator=None):
|
||||||
|
"Turns model to .eval(), runs dataloader, calculates metric, then turns eval back on"
|
||||||
|
model.eval()
|
||||||
|
for step, batch in enumerate(dataloader):
|
||||||
|
with torch.no_grad():
|
||||||
|
# W/ MS-AMP, we need to cast while evaluating
|
||||||
|
with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
|
||||||
|
outputs = model(**batch)
|
||||||
|
predictions = outputs.logits.argmax(dim=-1)
|
||||||
|
references = batch["labels"]
|
||||||
|
if accelerator is not None and accelerator.num_processes > 1:
|
||||||
|
predictions, references = accelerator.gather_for_metrics((predictions, references))
|
||||||
|
metric.add_batch(predictions=predictions, references=references)
|
||||||
|
return metric.compute()
|
||||||
118
benchmarks/fp8/ms_amp/non_distributed.py
Normal file
118
benchmarks/fp8/ms_amp/non_distributed.py
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
This script tests to ensure that `accelerate` performs at the same level as raw `MS-AMP`.
|
||||||
|
|
||||||
|
This particular script verifies this for single GPU training.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import evaluate
|
||||||
|
import msamp
|
||||||
|
import torch
|
||||||
|
from fp8_utils import evaluate_model, get_training_utilities
|
||||||
|
|
||||||
|
from accelerate import Accelerator
|
||||||
|
from accelerate.state import AcceleratorState
|
||||||
|
from accelerate.utils import FP8RecipeKwargs, get_grad_scaler, set_seed
|
||||||
|
|
||||||
|
|
||||||
|
MODEL_NAME = "bert-base-cased"
|
||||||
|
METRIC = evaluate.load("glue", "mrpc")
|
||||||
|
|
||||||
|
|
||||||
|
def train_baseline(opt_level="O2"):
|
||||||
|
set_seed(42)
|
||||||
|
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(MODEL_NAME)
|
||||||
|
|
||||||
|
model, optimizer = msamp.initialize(model, optimizer, opt_level=opt_level)
|
||||||
|
model.to("cuda")
|
||||||
|
|
||||||
|
base_model_results = evaluate_model(model, eval_dataloader, METRIC)
|
||||||
|
model.train()
|
||||||
|
scaler = get_grad_scaler()
|
||||||
|
|
||||||
|
for batch in train_dataloader:
|
||||||
|
batch = batch.to("cuda")
|
||||||
|
with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
|
||||||
|
outputs = model(**batch)
|
||||||
|
loss = outputs.loss
|
||||||
|
loss = scaler.scale(loss)
|
||||||
|
loss.backward()
|
||||||
|
optimizer.step()
|
||||||
|
optimizer.zero_grad()
|
||||||
|
lr_scheduler.step()
|
||||||
|
|
||||||
|
trained_model_results = evaluate_model(model, eval_dataloader, METRIC)
|
||||||
|
|
||||||
|
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||||
|
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||||
|
)
|
||||||
|
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||||
|
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return base_model_results, trained_model_results
|
||||||
|
|
||||||
|
|
||||||
|
def train_integration(opt_level="O2"):
|
||||||
|
kwargs_handlers = [FP8RecipeKwargs(backend="msamp", opt_level=opt_level)]
|
||||||
|
AcceleratorState()._reset_state(True)
|
||||||
|
accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=kwargs_handlers)
|
||||||
|
set_seed(42)
|
||||||
|
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
|
||||||
|
MODEL_NAME, accelerator=accelerator
|
||||||
|
)
|
||||||
|
|
||||||
|
model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler)
|
||||||
|
base_model_results = evaluate_model(model, eval_dataloader, METRIC)
|
||||||
|
model.train()
|
||||||
|
|
||||||
|
for batch in train_dataloader:
|
||||||
|
outputs = model(**batch)
|
||||||
|
loss = outputs.loss
|
||||||
|
accelerator.backward(loss)
|
||||||
|
optimizer.step()
|
||||||
|
optimizer.zero_grad()
|
||||||
|
lr_scheduler.step()
|
||||||
|
|
||||||
|
trained_model_results = evaluate_model(model, eval_dataloader, METRIC)
|
||||||
|
|
||||||
|
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||||
|
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||||
|
)
|
||||||
|
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||||
|
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return base_model_results, trained_model_results
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
for opt_level in ["O1", "O2"]:
|
||||||
|
baseline_not_trained, baseline_trained = train_baseline(opt_level)
|
||||||
|
accelerator_not_trained, accelerator_trained = train_integration(opt_level)
|
||||||
|
|
||||||
|
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
|
||||||
|
f"Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
|
||||||
|
)
|
||||||
|
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
|
||||||
|
f"F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
|
||||||
|
)
|
||||||
|
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
|
||||||
|
f"Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
|
||||||
|
)
|
||||||
|
assert baseline_trained["f1"] == accelerator_trained["f1"], (
|
||||||
|
f"F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
|
||||||
|
)
|
||||||
12
benchmarks/fp8/torchao/Dockerfile
Normal file
12
benchmarks/fp8/torchao/Dockerfile
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
FROM nvcr.io/nvidia/pytorch:24.07-py3
|
||||||
|
|
||||||
|
RUN pip install transformers evaluate datasets
|
||||||
|
RUN git clone https://github.com/huggingface/accelerate.git
|
||||||
|
|
||||||
|
RUN cd accelerate && \
|
||||||
|
pip install -e . && \
|
||||||
|
cd benchmarks/fp8
|
||||||
|
|
||||||
|
RUN /bin/bash
|
||||||
|
|
||||||
|
|
||||||
32
benchmarks/fp8/torchao/README.md
Normal file
32
benchmarks/fp8/torchao/README.md
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
# FP8 Benchmarks
|
||||||
|
|
||||||
|
Comparing and running [torchao](https://github.com/pytorch/ao/tree/main/torchao/float8) FP8 with accelerate
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
This repo provides scripts which compare native `torchao` model training against `accelerate`'s own integration. Each modeling type is segmented out via a script, supporting the following:
|
||||||
|
|
||||||
|
* Single GPU training (`non_distributed.py`)
|
||||||
|
* Multi-GPU training via DistributedDataParallelism (`ddp.py`)
|
||||||
|
* Fully Sharded Data Parallelism (`fsdp.py`)
|
||||||
|
* DeepSpeed ZeRO 1-3 (`deepspeed.py`)
|
||||||
|
|
||||||
|
To run them, it's recommended to use a docker image (see the attached `Dockerfile`) and not install `torchao` manually.
|
||||||
|
|
||||||
|
## Running:
|
||||||
|
|
||||||
|
There are official Docker images located at `huggingface/accelerate:gpu-fp8-torchao-nightly` which can be used.
|
||||||
|
|
||||||
|
You can run all scripts using the core `accelerate launch` command without any `accelerate config` being needed.
|
||||||
|
|
||||||
|
For single GPU, run it via `python`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python non_distributed.py
|
||||||
|
```
|
||||||
|
|
||||||
|
For the rest, run it via `accelerate launch`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
accelerate launch ddp.py # or distrib_deepspeed.py, ddp.py
|
||||||
|
```
|
||||||
158
benchmarks/fp8/torchao/ddp.py
Normal file
158
benchmarks/fp8/torchao/ddp.py
Normal file
@ -0,0 +1,158 @@
|
|||||||
|
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
This script tests to ensure that `accelerate` performs at the same level as raw `torchao`.
|
||||||
|
|
||||||
|
This particular script verifies this for DDP training.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from functools import partial
|
||||||
|
|
||||||
|
import evaluate
|
||||||
|
import torch
|
||||||
|
from fp8_utils import get_training_utilities
|
||||||
|
from torch.nn.parallel import DistributedDataParallel as DDP
|
||||||
|
from torchao.float8 import convert_to_float8_training
|
||||||
|
|
||||||
|
from accelerate import Accelerator
|
||||||
|
from accelerate.state import AcceleratorState
|
||||||
|
from accelerate.utils import AORecipeKwargs, set_seed
|
||||||
|
|
||||||
|
|
||||||
|
MODEL_NAME = "bert-base-cased"
|
||||||
|
METRIC = evaluate.load("glue", "mrpc")
|
||||||
|
|
||||||
|
|
||||||
|
def evaluate_model(model, dataloader, metric, accelerator=None):
|
||||||
|
"Turns model to .eval(), runs dataloader, calculates metric, then turns eval back on"
|
||||||
|
model.eval()
|
||||||
|
for step, batch in enumerate(dataloader):
|
||||||
|
with torch.no_grad():
|
||||||
|
outputs = model(**batch)
|
||||||
|
predictions = outputs.logits.argmax(dim=-1)
|
||||||
|
references = batch["labels"]
|
||||||
|
if accelerator is not None and accelerator.num_processes > 1:
|
||||||
|
predictions, references = accelerator.gather_for_metrics((predictions, references))
|
||||||
|
metric.add_batch(predictions=predictions, references=references)
|
||||||
|
return metric.compute()
|
||||||
|
|
||||||
|
|
||||||
|
def filter_linear_layers(module, fqn, first_layer_name=None, last_layer_name=None):
|
||||||
|
if isinstance(module, torch.nn.Linear):
|
||||||
|
if module.in_features % 16 != 0 or module.out_features % 16 != 0:
|
||||||
|
return False
|
||||||
|
# For stability reasons, we skip the first and last linear layers
|
||||||
|
# Otherwise can lead to the model not training or converging properly
|
||||||
|
if fqn in (first_layer_name, last_layer_name):
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def train_baseline():
|
||||||
|
set_seed(42)
|
||||||
|
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(MODEL_NAME)
|
||||||
|
first_linear = None
|
||||||
|
last_linear = None
|
||||||
|
for name, module in model.named_modules():
|
||||||
|
if isinstance(module, torch.nn.Linear):
|
||||||
|
if first_linear is None:
|
||||||
|
first_linear = name
|
||||||
|
last_linear = name
|
||||||
|
func = partial(filter_linear_layers, first_layer_name=first_linear, last_layer_name=last_linear)
|
||||||
|
accelerator = Accelerator()
|
||||||
|
device = accelerator.device
|
||||||
|
model.to(device)
|
||||||
|
|
||||||
|
convert_to_float8_training(model, module_filter_fn=func)
|
||||||
|
|
||||||
|
# Convert the model to DDP
|
||||||
|
device_ids, output_device = [accelerator.local_process_index], accelerator.local_process_index
|
||||||
|
model = DDP(model, device_ids=device_ids, output_device=output_device)
|
||||||
|
|
||||||
|
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||||
|
model.train()
|
||||||
|
|
||||||
|
for batch in train_dataloader:
|
||||||
|
with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
|
||||||
|
batch = batch.to(device)
|
||||||
|
outputs = model(**batch)
|
||||||
|
loss = outputs.loss
|
||||||
|
loss.backward()
|
||||||
|
optimizer.step()
|
||||||
|
optimizer.zero_grad()
|
||||||
|
lr_scheduler.step()
|
||||||
|
|
||||||
|
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||||
|
|
||||||
|
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||||
|
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||||
|
)
|
||||||
|
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||||
|
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return base_model_results, trained_model_results
|
||||||
|
|
||||||
|
|
||||||
|
def train_integration():
|
||||||
|
AcceleratorState()._reset_state(True)
|
||||||
|
accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=[AORecipeKwargs()])
|
||||||
|
set_seed(42)
|
||||||
|
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
|
||||||
|
MODEL_NAME, accelerator=accelerator
|
||||||
|
)
|
||||||
|
|
||||||
|
model, optimizer = accelerator.prepare(model, optimizer)
|
||||||
|
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||||
|
model.train()
|
||||||
|
|
||||||
|
for batch in train_dataloader:
|
||||||
|
outputs = model(**batch)
|
||||||
|
loss = outputs.loss
|
||||||
|
accelerator.backward(loss)
|
||||||
|
optimizer.step()
|
||||||
|
optimizer.zero_grad()
|
||||||
|
lr_scheduler.step()
|
||||||
|
|
||||||
|
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||||
|
|
||||||
|
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||||
|
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||||
|
)
|
||||||
|
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||||
|
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return base_model_results, trained_model_results
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
baseline_not_trained, baseline_trained = train_baseline()
|
||||||
|
accelerator_not_trained, accelerator_trained = train_integration()
|
||||||
|
|
||||||
|
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
|
||||||
|
f"Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
|
||||||
|
)
|
||||||
|
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
|
||||||
|
f"F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
|
||||||
|
)
|
||||||
|
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
|
||||||
|
f"Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
|
||||||
|
)
|
||||||
|
assert baseline_trained["f1"] == accelerator_trained["f1"], (
|
||||||
|
f"F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
|
||||||
|
)
|
||||||
|
|
||||||
|
torch.distributed.destroy_process_group()
|
||||||
213
benchmarks/fp8/torchao/distrib_deepspeed.py
Normal file
213
benchmarks/fp8/torchao/distrib_deepspeed.py
Normal file
@ -0,0 +1,213 @@
|
|||||||
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
This script tests to ensure that `accelerate` performs at the same level as raw `torchao`.
|
||||||
|
|
||||||
|
This particular script verifies this for deepspeed training.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from functools import partial
|
||||||
|
from unittest.mock import patch
|
||||||
|
|
||||||
|
import deepspeed
|
||||||
|
import evaluate
|
||||||
|
import torch
|
||||||
|
from fp8_utils import evaluate_model, get_training_utilities
|
||||||
|
from torchao.float8 import convert_to_float8_training
|
||||||
|
from transformers.integrations import HfDeepSpeedConfig
|
||||||
|
|
||||||
|
from accelerate import Accelerator, DeepSpeedPlugin
|
||||||
|
from accelerate.state import AcceleratorState
|
||||||
|
from accelerate.utils import AORecipeKwargs, set_seed
|
||||||
|
|
||||||
|
|
||||||
|
MODEL_NAME = "bert-base-cased"
|
||||||
|
METRIC = evaluate.load("glue", "mrpc")
|
||||||
|
|
||||||
|
|
||||||
|
def filter_linear_layers(module, fqn, first_layer_name=None, last_layer_name=None):
|
||||||
|
if isinstance(module, torch.nn.Linear):
|
||||||
|
if module.in_features % 16 != 0 or module.out_features % 16 != 0:
|
||||||
|
return False
|
||||||
|
# For stability reasons, we skip the first and last linear layers
|
||||||
|
# Otherwise can lead to the model not training or converging properly
|
||||||
|
if fqn in (first_layer_name, last_layer_name):
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def train_baseline(zero_stage: int = 1):
|
||||||
|
set_seed(42)
|
||||||
|
# This forces transformers to think Zero-3 Init should be used
|
||||||
|
with patch("transformers.integrations.deepspeed.is_deepspeed_zero3_enabled") as mock:
|
||||||
|
mock.return_value = zero_stage == 3
|
||||||
|
|
||||||
|
config = HfDeepSpeedConfig(
|
||||||
|
{
|
||||||
|
"train_micro_batch_size_per_gpu": 16,
|
||||||
|
"gradient_accumulation_steps": 1,
|
||||||
|
"zero_optimization": {"stage": zero_stage},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
plugin = DeepSpeedPlugin(hf_ds_config=config)
|
||||||
|
accelerator = Accelerator(deepspeed_plugin=plugin)
|
||||||
|
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
|
||||||
|
MODEL_NAME, accelerator=accelerator
|
||||||
|
)
|
||||||
|
first_linear = None
|
||||||
|
last_linear = None
|
||||||
|
for name, module in model.named_modules():
|
||||||
|
if isinstance(module, torch.nn.Linear):
|
||||||
|
if first_linear is None:
|
||||||
|
first_linear = name
|
||||||
|
last_linear = name
|
||||||
|
func = partial(filter_linear_layers, first_layer_name=first_linear, last_layer_name=last_linear)
|
||||||
|
|
||||||
|
convert_to_float8_training(model, module_filter_fn=func)
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
config = {
|
||||||
|
"train_batch_size": 32,
|
||||||
|
"train_micro_batch_size_per_gpu": 16,
|
||||||
|
"gradient_accumulation_steps": 1,
|
||||||
|
"zero_optimization": {
|
||||||
|
"stage": zero_stage,
|
||||||
|
"offload_optimizer": {"device": "none", "nvme_path": None},
|
||||||
|
"offload_param": {"device": "none", "nvme_path": None},
|
||||||
|
"stage3_gather_16bit_weights_on_model_save": False,
|
||||||
|
},
|
||||||
|
"gradient_clipping": 1.0,
|
||||||
|
"steps_per_print": np.inf,
|
||||||
|
"bf16": {"enabled": True},
|
||||||
|
"fp16": {"enabled": False},
|
||||||
|
"zero_allow_untested_optimizer": True,
|
||||||
|
}
|
||||||
|
|
||||||
|
(
|
||||||
|
model,
|
||||||
|
optimizer,
|
||||||
|
_,
|
||||||
|
lr_scheduler,
|
||||||
|
) = deepspeed.initialize(
|
||||||
|
model=model,
|
||||||
|
optimizer=optimizer,
|
||||||
|
lr_scheduler=lr_scheduler,
|
||||||
|
config_params=config,
|
||||||
|
)
|
||||||
|
|
||||||
|
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||||
|
model.train()
|
||||||
|
|
||||||
|
model_outputs = []
|
||||||
|
data = []
|
||||||
|
|
||||||
|
for batch in train_dataloader:
|
||||||
|
outputs = model(**batch)
|
||||||
|
data.append(batch.to("cpu"))
|
||||||
|
model_outputs.append(outputs.logits.to("cpu"))
|
||||||
|
loss = outputs.loss
|
||||||
|
model.backward(loss)
|
||||||
|
model.step()
|
||||||
|
for _ in range(accelerator.num_processes):
|
||||||
|
lr_scheduler.step()
|
||||||
|
|
||||||
|
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||||
|
model.destroy()
|
||||||
|
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||||
|
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||||
|
)
|
||||||
|
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||||
|
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||||
|
)
|
||||||
|
|
||||||
|
del config
|
||||||
|
return base_model_results, trained_model_results, model_outputs, data
|
||||||
|
|
||||||
|
|
||||||
|
def train_integration(zero_stage: int = 1):
|
||||||
|
set_seed(42)
|
||||||
|
AcceleratorState()._reset_state(True)
|
||||||
|
config = HfDeepSpeedConfig(
|
||||||
|
{
|
||||||
|
"train_micro_batch_size_per_gpu": 16,
|
||||||
|
"gradient_accumulation_steps": 1,
|
||||||
|
"zero_optimization": {"stage": zero_stage},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
deepspeed_plugin = DeepSpeedPlugin(
|
||||||
|
hf_ds_config=config,
|
||||||
|
)
|
||||||
|
# This forces transformers to think Zero-3 Init should be used
|
||||||
|
with patch("transformers.integrations.deepspeed.is_deepspeed_zero3_enabled") as mock:
|
||||||
|
mock.return_value = zero_stage == 3
|
||||||
|
accelerator = Accelerator(
|
||||||
|
mixed_precision="fp8", kwargs_handlers=[AORecipeKwargs()], deepspeed_plugin=deepspeed_plugin
|
||||||
|
)
|
||||||
|
|
||||||
|
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
|
||||||
|
MODEL_NAME, accelerator=accelerator
|
||||||
|
)
|
||||||
|
|
||||||
|
model, optimizer, lr_scheduler, train_dataloader, eval_dataloader = accelerator.prepare(
|
||||||
|
model, optimizer, lr_scheduler, train_dataloader, eval_dataloader
|
||||||
|
)
|
||||||
|
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||||
|
model.train()
|
||||||
|
model_outputs = []
|
||||||
|
data = []
|
||||||
|
for batch in train_dataloader:
|
||||||
|
outputs = model(**batch)
|
||||||
|
data.append(batch.to("cpu"))
|
||||||
|
model_outputs.append(outputs.logits.to("cpu"))
|
||||||
|
loss = outputs.loss
|
||||||
|
accelerator.backward(loss)
|
||||||
|
optimizer.step()
|
||||||
|
lr_scheduler.step()
|
||||||
|
optimizer.zero_grad()
|
||||||
|
|
||||||
|
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||||
|
model.destroy()
|
||||||
|
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||||
|
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||||
|
)
|
||||||
|
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||||
|
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||||
|
)
|
||||||
|
|
||||||
|
del config
|
||||||
|
return base_model_results, trained_model_results, model_outputs, data
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
for zero_stage in [1, 2, 3]:
|
||||||
|
baseline_not_trained, baseline_trained, baseline_outputs, baseline_data = train_baseline(zero_stage)
|
||||||
|
accelerator_not_trained, accelerator_trained, accelerator_outputs, accelerator_data = train_integration(
|
||||||
|
zero_stage
|
||||||
|
)
|
||||||
|
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
|
||||||
|
f"ZERO stage {zero_stage}: Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
|
||||||
|
)
|
||||||
|
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
|
||||||
|
f"ZERO stage {zero_stage}: F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
|
||||||
|
)
|
||||||
|
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
|
||||||
|
f"ZERO stage {zero_stage}: Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
|
||||||
|
)
|
||||||
|
assert baseline_trained["f1"] == accelerator_trained["f1"], (
|
||||||
|
f"ZERO stage {zero_stage}: F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
|
||||||
|
)
|
||||||
|
AcceleratorState()._reset_state(True)
|
||||||
|
torch.distributed.destroy_process_group()
|
||||||
116
benchmarks/fp8/torchao/fp8_utils.py
Normal file
116
benchmarks/fp8/torchao/fp8_utils.py
Normal file
@ -0,0 +1,116 @@
|
|||||||
|
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import torch
|
||||||
|
|
||||||
|
|
||||||
|
def get_dataloaders(model_name: str, batch_size: int = 16):
|
||||||
|
from datasets import load_dataset
|
||||||
|
from torch.utils.data import DataLoader
|
||||||
|
from transformers import AutoTokenizer
|
||||||
|
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
||||||
|
datasets = load_dataset("glue", "mrpc")
|
||||||
|
|
||||||
|
def tokenize_function(examples):
|
||||||
|
# max_length=None => use the model max length (it's actually the default)
|
||||||
|
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
|
||||||
|
return outputs
|
||||||
|
|
||||||
|
# Apply the method we just defined to all the examples in all the splits of the dataset
|
||||||
|
# starting with the main process first:
|
||||||
|
tokenized_datasets = datasets.map(
|
||||||
|
tokenize_function,
|
||||||
|
batched=True,
|
||||||
|
remove_columns=["idx", "sentence1", "sentence2"],
|
||||||
|
)
|
||||||
|
|
||||||
|
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
|
||||||
|
# transformers library
|
||||||
|
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
|
||||||
|
|
||||||
|
def collate_fn(examples):
|
||||||
|
return tokenizer.pad(
|
||||||
|
examples,
|
||||||
|
padding="longest",
|
||||||
|
pad_to_multiple_of=16, # Specific for FP8
|
||||||
|
return_tensors="pt",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Instantiate dataloaders.
|
||||||
|
train_dataloader = DataLoader(
|
||||||
|
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size, drop_last=True
|
||||||
|
)
|
||||||
|
eval_dataloader = DataLoader(
|
||||||
|
tokenized_datasets["validation"],
|
||||||
|
shuffle=False,
|
||||||
|
collate_fn=collate_fn,
|
||||||
|
batch_size=16,
|
||||||
|
drop_last=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
return train_dataloader, eval_dataloader
|
||||||
|
|
||||||
|
|
||||||
|
def get_training_utilities(model_name: str, batch_size: int = 16, accelerator=None, prepare=True):
|
||||||
|
"""
|
||||||
|
Returns a tuple of:
|
||||||
|
- Model
|
||||||
|
- Optimizer
|
||||||
|
- Train dataloader (prepared)
|
||||||
|
- Eval dataloader (prepared)
|
||||||
|
- LR Scheduler
|
||||||
|
Suitable for training on the MRPC dataset
|
||||||
|
"""
|
||||||
|
from torch.optim import AdamW
|
||||||
|
from transformers import AutoModelForSequenceClassification, get_linear_schedule_with_warmup
|
||||||
|
|
||||||
|
from accelerate import Accelerator
|
||||||
|
|
||||||
|
if accelerator is None:
|
||||||
|
accelerator = Accelerator()
|
||||||
|
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
||||||
|
train_dataloader, eval_dataloader = get_dataloaders(model_name, batch_size)
|
||||||
|
optimizer = AdamW(model.parameters(), lr=0.0001)
|
||||||
|
lr_scheduler = get_linear_schedule_with_warmup(
|
||||||
|
optimizer=optimizer,
|
||||||
|
num_warmup_steps=100,
|
||||||
|
num_training_steps=len(train_dataloader) * 2,
|
||||||
|
)
|
||||||
|
train_dataloader, eval_dataloader = accelerator.prepare(train_dataloader, eval_dataloader)
|
||||||
|
return model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
||||||
|
|
||||||
|
|
||||||
|
def get_named_parameters(model):
|
||||||
|
"""
|
||||||
|
Same thing as `Accelerator.get_named_parameters` Returns a list of the named parameters of the model (extracted
|
||||||
|
from parallel)
|
||||||
|
"""
|
||||||
|
from accelerate.utils import extract_model_from_parallel
|
||||||
|
|
||||||
|
model = extract_model_from_parallel(model)
|
||||||
|
return {n: p for n, p in model.named_parameters()}
|
||||||
|
|
||||||
|
|
||||||
|
def evaluate_model(model, dataloader, metric, accelerator=None):
|
||||||
|
"Turns model to .eval(), runs dataloader, calculates metric, then turns eval back on"
|
||||||
|
model.eval()
|
||||||
|
for step, batch in enumerate(dataloader):
|
||||||
|
with torch.no_grad():
|
||||||
|
outputs = model(**batch)
|
||||||
|
predictions = outputs.logits.argmax(dim=-1)
|
||||||
|
references = batch["labels"]
|
||||||
|
if accelerator is not None and accelerator.num_processes > 1:
|
||||||
|
predictions, references = accelerator.gather_for_metrics((predictions, references))
|
||||||
|
metric.add_batch(predictions=predictions, references=references)
|
||||||
|
return metric.compute()
|
||||||
173
benchmarks/fp8/torchao/fsdp.py
Normal file
173
benchmarks/fp8/torchao/fsdp.py
Normal file
@ -0,0 +1,173 @@
|
|||||||
|
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
This script tests to ensure that `accelerate` performs at the same level as raw `torchao`.
|
||||||
|
|
||||||
|
This particular script verifies this for FSDP training.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from functools import partial
|
||||||
|
|
||||||
|
import evaluate
|
||||||
|
import torch
|
||||||
|
from fp8_utils import get_training_utilities
|
||||||
|
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
|
||||||
|
from torch.distributed.fsdp import MixedPrecision
|
||||||
|
from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy
|
||||||
|
from torchao.float8 import convert_to_float8_training
|
||||||
|
from transformers.models.bert import BertLayer
|
||||||
|
|
||||||
|
from accelerate import Accelerator
|
||||||
|
from accelerate import FullyShardedDataParallelPlugin as FSDPPlugin
|
||||||
|
from accelerate.state import AcceleratorState
|
||||||
|
from accelerate.utils import AORecipeKwargs, set_seed
|
||||||
|
|
||||||
|
|
||||||
|
MODEL_NAME = "bert-base-cased"
|
||||||
|
METRIC = evaluate.load("glue", "mrpc")
|
||||||
|
|
||||||
|
FSDP_WRAP_POLICY = partial(transformer_auto_wrap_policy, transformer_layer_cls={BertLayer})
|
||||||
|
|
||||||
|
|
||||||
|
def filter_linear_layers(module, fqn, first_layer_name=None, last_layer_name=None):
|
||||||
|
if isinstance(module, torch.nn.Linear):
|
||||||
|
if module.in_features % 16 != 0 or module.out_features % 16 != 0:
|
||||||
|
return False
|
||||||
|
# For stability reasons, we skip the first and last linear layers
|
||||||
|
# Otherwise can lead to the model not training or converging properly
|
||||||
|
if fqn in (first_layer_name, last_layer_name):
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def evaluate_model(model, dataloader, metric, accelerator=None):
|
||||||
|
"Turns model to .eval(), runs dataloader, calculates metric, then turns eval back on"
|
||||||
|
model.eval()
|
||||||
|
for step, batch in enumerate(dataloader):
|
||||||
|
with torch.no_grad():
|
||||||
|
outputs = model(**batch)
|
||||||
|
predictions = outputs.logits.argmax(dim=-1)
|
||||||
|
references = batch["labels"]
|
||||||
|
if accelerator is not None and accelerator.num_processes > 1:
|
||||||
|
predictions, references = accelerator.gather_for_metrics((predictions, references))
|
||||||
|
metric.add_batch(predictions=predictions, references=references)
|
||||||
|
return metric.compute()
|
||||||
|
|
||||||
|
|
||||||
|
def train_baseline():
|
||||||
|
set_seed(42)
|
||||||
|
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(MODEL_NAME)
|
||||||
|
first_linear = None
|
||||||
|
last_linear = None
|
||||||
|
for name, module in model.named_modules():
|
||||||
|
if isinstance(module, torch.nn.Linear):
|
||||||
|
if first_linear is None:
|
||||||
|
first_linear = name
|
||||||
|
last_linear = name
|
||||||
|
func = partial(filter_linear_layers, first_layer_name=first_linear, last_layer_name=last_linear)
|
||||||
|
accelerator = Accelerator()
|
||||||
|
device = accelerator.device
|
||||||
|
model.to(device)
|
||||||
|
|
||||||
|
convert_to_float8_training(model, module_filter_fn=func)
|
||||||
|
|
||||||
|
# Convert the model to FSDP
|
||||||
|
model = FSDP(
|
||||||
|
model,
|
||||||
|
use_orig_params=True,
|
||||||
|
mixed_precision=MixedPrecision(param_dtype=torch.bfloat16, reduce_dtype=torch.float32),
|
||||||
|
auto_wrap_policy=FSDP_WRAP_POLICY,
|
||||||
|
)
|
||||||
|
|
||||||
|
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||||
|
model.train()
|
||||||
|
|
||||||
|
for batch in train_dataloader:
|
||||||
|
with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
|
||||||
|
batch = batch.to(device)
|
||||||
|
outputs = model(**batch)
|
||||||
|
loss = outputs.loss
|
||||||
|
loss.backward()
|
||||||
|
optimizer.step()
|
||||||
|
optimizer.zero_grad()
|
||||||
|
lr_scheduler.step()
|
||||||
|
|
||||||
|
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||||
|
|
||||||
|
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||||
|
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||||
|
)
|
||||||
|
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||||
|
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return base_model_results, trained_model_results
|
||||||
|
|
||||||
|
|
||||||
|
def train_integration():
|
||||||
|
AcceleratorState()._reset_state(True)
|
||||||
|
fsdp_plugin = FSDPPlugin(
|
||||||
|
auto_wrap_policy=FSDP_WRAP_POLICY,
|
||||||
|
use_orig_params=True,
|
||||||
|
mixed_precision_policy=MixedPrecision(param_dtype=torch.bfloat16, reduce_dtype=torch.float32),
|
||||||
|
)
|
||||||
|
accelerator = Accelerator(mixed_precision="fp8", fsdp_plugin=fsdp_plugin, kwargs_handlers=[AORecipeKwargs()])
|
||||||
|
set_seed(42)
|
||||||
|
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
|
||||||
|
MODEL_NAME, accelerator=accelerator
|
||||||
|
)
|
||||||
|
|
||||||
|
model, optimizer = accelerator.prepare(model, optimizer)
|
||||||
|
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||||
|
model.train()
|
||||||
|
|
||||||
|
for batch in train_dataloader:
|
||||||
|
outputs = model(**batch)
|
||||||
|
loss = outputs.loss
|
||||||
|
accelerator.backward(loss)
|
||||||
|
optimizer.step()
|
||||||
|
optimizer.zero_grad()
|
||||||
|
lr_scheduler.step()
|
||||||
|
|
||||||
|
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||||
|
|
||||||
|
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||||
|
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||||
|
)
|
||||||
|
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||||
|
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return base_model_results, trained_model_results
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
baseline_not_trained, baseline_trained = train_baseline()
|
||||||
|
accelerator_not_trained, accelerator_trained = train_integration()
|
||||||
|
|
||||||
|
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
|
||||||
|
f"Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
|
||||||
|
)
|
||||||
|
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
|
||||||
|
f"F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
|
||||||
|
)
|
||||||
|
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
|
||||||
|
f"Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
|
||||||
|
)
|
||||||
|
assert baseline_trained["f1"] == accelerator_trained["f1"], (
|
||||||
|
f"F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
|
||||||
|
)
|
||||||
|
|
||||||
|
torch.distributed.destroy_process_group()
|
||||||
145
benchmarks/fp8/torchao/non_distributed.py
Normal file
145
benchmarks/fp8/torchao/non_distributed.py
Normal file
@ -0,0 +1,145 @@
|
|||||||
|
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
This script tests to ensure that `accelerate` performs at the same level as raw `torchao`.
|
||||||
|
|
||||||
|
This particular script verifies this for single GPU training.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from functools import partial
|
||||||
|
|
||||||
|
import evaluate
|
||||||
|
import torch
|
||||||
|
from fp8_utils import get_training_utilities
|
||||||
|
from torchao.float8 import convert_to_float8_training
|
||||||
|
|
||||||
|
from accelerate import Accelerator
|
||||||
|
from accelerate.state import AcceleratorState
|
||||||
|
from accelerate.utils import AORecipeKwargs, set_seed
|
||||||
|
|
||||||
|
|
||||||
|
MODEL_NAME = "bert-base-cased"
|
||||||
|
METRIC = evaluate.load("glue", "mrpc")
|
||||||
|
|
||||||
|
|
||||||
|
def evaluate_model(model, dataloader, metric, accelerator=None):
|
||||||
|
"Turns model to .eval(), runs dataloader, calculates metric, then turns eval back on"
|
||||||
|
model.eval()
|
||||||
|
for step, batch in enumerate(dataloader):
|
||||||
|
with torch.no_grad():
|
||||||
|
outputs = model(**batch)
|
||||||
|
predictions = outputs.logits.argmax(dim=-1)
|
||||||
|
references = batch["labels"]
|
||||||
|
if accelerator is not None and accelerator.num_processes > 1:
|
||||||
|
predictions, references = accelerator.gather_for_metrics((predictions, references))
|
||||||
|
metric.add_batch(predictions=predictions, references=references)
|
||||||
|
return metric.compute()
|
||||||
|
|
||||||
|
|
||||||
|
def filter_linear_layers(module, fqn, first_layer_name=None, last_layer_name=None):
|
||||||
|
if isinstance(module, torch.nn.Linear):
|
||||||
|
if module.in_features % 16 != 0 or module.out_features % 16 != 0:
|
||||||
|
return False
|
||||||
|
# For stability reasons, we skip the first and last linear layers
|
||||||
|
# Otherwise can lead to the model not training or converging properly
|
||||||
|
if fqn in (first_layer_name, last_layer_name):
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def train_baseline():
|
||||||
|
set_seed(42)
|
||||||
|
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(MODEL_NAME)
|
||||||
|
first_linear = None
|
||||||
|
last_linear = None
|
||||||
|
for name, module in model.named_modules():
|
||||||
|
if isinstance(module, torch.nn.Linear):
|
||||||
|
if first_linear is None:
|
||||||
|
first_linear = name
|
||||||
|
last_linear = name
|
||||||
|
|
||||||
|
func = partial(filter_linear_layers, first_layer_name=first_linear, last_layer_name=last_linear)
|
||||||
|
model.to("cuda")
|
||||||
|
convert_to_float8_training(model, module_filter_fn=func)
|
||||||
|
base_model_results = evaluate_model(model, eval_dataloader, METRIC)
|
||||||
|
model.train()
|
||||||
|
|
||||||
|
for batch in train_dataloader:
|
||||||
|
with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
|
||||||
|
outputs = model(**batch)
|
||||||
|
loss = outputs.loss
|
||||||
|
loss.backward()
|
||||||
|
optimizer.step()
|
||||||
|
optimizer.zero_grad()
|
||||||
|
lr_scheduler.step()
|
||||||
|
|
||||||
|
trained_model_results = evaluate_model(model, eval_dataloader, METRIC)
|
||||||
|
|
||||||
|
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||||
|
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||||
|
)
|
||||||
|
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||||
|
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return base_model_results, trained_model_results
|
||||||
|
|
||||||
|
|
||||||
|
def train_integration():
|
||||||
|
set_seed(42)
|
||||||
|
accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=[AORecipeKwargs()])
|
||||||
|
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
|
||||||
|
MODEL_NAME, accelerator=accelerator
|
||||||
|
)
|
||||||
|
model = accelerator.prepare(model)
|
||||||
|
base_model_results = evaluate_model(model, eval_dataloader, METRIC)
|
||||||
|
model.train()
|
||||||
|
|
||||||
|
for batch in train_dataloader:
|
||||||
|
outputs = model(**batch)
|
||||||
|
loss = outputs.loss
|
||||||
|
loss.backward()
|
||||||
|
optimizer.step()
|
||||||
|
optimizer.zero_grad()
|
||||||
|
lr_scheduler.step()
|
||||||
|
|
||||||
|
trained_model_results = evaluate_model(model, eval_dataloader, METRIC)
|
||||||
|
|
||||||
|
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||||
|
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||||
|
)
|
||||||
|
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||||
|
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return base_model_results, trained_model_results
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
baseline_not_trained, baseline_trained = train_baseline()
|
||||||
|
AcceleratorState._reset_state(True)
|
||||||
|
accelerator_not_trained, accelerator_trained = train_integration()
|
||||||
|
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
|
||||||
|
f"Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
|
||||||
|
)
|
||||||
|
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
|
||||||
|
f"F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
|
||||||
|
)
|
||||||
|
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
|
||||||
|
f"Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
|
||||||
|
)
|
||||||
|
assert baseline_trained["f1"] == accelerator_trained["f1"], (
|
||||||
|
f"F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
|
||||||
|
)
|
||||||
15
benchmarks/fp8/transformer_engine/Dockerfile
Normal file
15
benchmarks/fp8/transformer_engine/Dockerfile
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
ARG BASE_YEAR=25
|
||||||
|
ARG BASE_MONTH=03
|
||||||
|
|
||||||
|
FROM nvcr.io/nvidia/pytorch:${BASE_YEAR}.${BASE_MONTH}-py3
|
||||||
|
|
||||||
|
RUN pip install transformers evaluate datasets
|
||||||
|
RUN git clone https://github.com/huggingface/accelerate.git
|
||||||
|
|
||||||
|
RUN cd accelerate && \
|
||||||
|
pip install -e .[deepspeed] && \
|
||||||
|
cd benchmarks/fp8
|
||||||
|
|
||||||
|
RUN /bin/bash
|
||||||
|
|
||||||
|
|
||||||
32
benchmarks/fp8/transformer_engine/README.md
Normal file
32
benchmarks/fp8/transformer_engine/README.md
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
# FP8 Benchmarks
|
||||||
|
|
||||||
|
Comparing and running [TransformerEngine](https://github.com/NVIDIA/TransformerEngine) FP8 with accelerate
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
This repo provides scripts which compare native TransformerEngine model training against `accelerate`'s own integration. Each modeling type is segmented out via a script, supporting the following:
|
||||||
|
|
||||||
|
* Single GPU training (`non_distributed.py`)
|
||||||
|
* Multi-GPU training via DistributedDataParallelism (`ddp.py`)
|
||||||
|
* Fully Sharded Data Parallelism (`fsdp.py`)
|
||||||
|
* DeepSpeed ZeRO 1-3 (`deepspeed.py`)
|
||||||
|
|
||||||
|
To run them, it's recommended to use a docker image (see the attached `Dockerfile`) and not install `TransformerEngine` manually.
|
||||||
|
|
||||||
|
## Running:
|
||||||
|
|
||||||
|
There are official Docker images located at `huggingface/accelerate:gpu-fp8-transformerengine-nightly` which can be used.
|
||||||
|
|
||||||
|
You can run all scripts using the core `accelerate launch` command without any `accelerate config` being needed.
|
||||||
|
|
||||||
|
For single GPU, run it via `python`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python non_distributed.py
|
||||||
|
```
|
||||||
|
|
||||||
|
For the rest, run it via `accelerate launch`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
accelerate launch ddp.py # or distrib_deepspeed.py, ddp.py
|
||||||
|
```
|
||||||
144
benchmarks/fp8/transformer_engine/ddp.py
Normal file
144
benchmarks/fp8/transformer_engine/ddp.py
Normal file
@ -0,0 +1,144 @@
|
|||||||
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
This script tests to ensure that `accelerate` performs at the same level as raw `TransformersEngine`.
|
||||||
|
|
||||||
|
This particular script verifies this for DDP training.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import evaluate
|
||||||
|
import torch
|
||||||
|
import transformer_engine.common.recipe as te_recipe
|
||||||
|
import transformer_engine.pytorch as te
|
||||||
|
from fp8_utils import evaluate_model, get_named_parameters, get_training_utilities
|
||||||
|
from torch.nn.parallel import DistributedDataParallel as DDP
|
||||||
|
from transformer_engine.common.recipe import DelayedScaling
|
||||||
|
|
||||||
|
from accelerate import Accelerator
|
||||||
|
from accelerate.state import AcceleratorState
|
||||||
|
from accelerate.utils import FP8RecipeKwargs, set_seed
|
||||||
|
from accelerate.utils.transformer_engine import convert_model
|
||||||
|
|
||||||
|
|
||||||
|
MODEL_NAME = "bert-base-cased"
|
||||||
|
METRIC = evaluate.load("glue", "mrpc")
|
||||||
|
|
||||||
|
|
||||||
|
def train_baseline():
|
||||||
|
set_seed(42)
|
||||||
|
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(MODEL_NAME)
|
||||||
|
accelerator = Accelerator()
|
||||||
|
device = accelerator.device
|
||||||
|
model.to(device)
|
||||||
|
|
||||||
|
# Convert the model to TE
|
||||||
|
old_named_params = get_named_parameters(model)
|
||||||
|
|
||||||
|
with torch.no_grad():
|
||||||
|
convert_model(model)
|
||||||
|
|
||||||
|
FP8_RECIPE_KWARGS = {"fp8_format": te_recipe.Format.HYBRID, "amax_history_len": 32, "amax_compute_algo": "max"}
|
||||||
|
fp8_recipe = DelayedScaling(**FP8_RECIPE_KWARGS)
|
||||||
|
|
||||||
|
new_named_params = get_named_parameters(model)
|
||||||
|
|
||||||
|
# Convert the model to DDP
|
||||||
|
device_ids, output_device = [accelerator.local_process_index], accelerator.local_process_index
|
||||||
|
model = DDP(model, device_ids=device_ids, output_device=output_device)
|
||||||
|
|
||||||
|
mapping = {p: new_named_params[n] for n, p in old_named_params.items()}
|
||||||
|
for param_group in optimizer.param_groups:
|
||||||
|
param_group["params"] = [mapping[p] for p in param_group["params"]]
|
||||||
|
|
||||||
|
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||||
|
model.train()
|
||||||
|
|
||||||
|
for _ in range(2):
|
||||||
|
for batch in train_dataloader:
|
||||||
|
with te.fp8_autocast(enabled=True, fp8_recipe=fp8_recipe):
|
||||||
|
with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
|
||||||
|
batch = batch.to(device)
|
||||||
|
outputs = model(**batch)
|
||||||
|
loss = outputs.loss
|
||||||
|
loss.backward()
|
||||||
|
optimizer.step()
|
||||||
|
optimizer.zero_grad()
|
||||||
|
lr_scheduler.step()
|
||||||
|
|
||||||
|
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||||
|
|
||||||
|
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||||
|
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||||
|
)
|
||||||
|
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||||
|
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return base_model_results, trained_model_results
|
||||||
|
|
||||||
|
|
||||||
|
def train_integration():
|
||||||
|
FP8_RECIPE_KWARGS = {"fp8_format": "HYBRID", "amax_history_len": 32, "amax_compute_algo": "max"}
|
||||||
|
kwargs_handlers = [FP8RecipeKwargs(backend="TE", **FP8_RECIPE_KWARGS)]
|
||||||
|
AcceleratorState()._reset_state(True)
|
||||||
|
accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=kwargs_handlers)
|
||||||
|
set_seed(42)
|
||||||
|
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
|
||||||
|
MODEL_NAME, accelerator=accelerator
|
||||||
|
)
|
||||||
|
|
||||||
|
model, optimizer = accelerator.prepare(model, optimizer)
|
||||||
|
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||||
|
model.train()
|
||||||
|
|
||||||
|
for _ in range(2):
|
||||||
|
for batch in train_dataloader:
|
||||||
|
outputs = model(**batch)
|
||||||
|
loss = outputs.loss
|
||||||
|
accelerator.backward(loss)
|
||||||
|
optimizer.step()
|
||||||
|
optimizer.zero_grad()
|
||||||
|
lr_scheduler.step()
|
||||||
|
|
||||||
|
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||||
|
|
||||||
|
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||||
|
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||||
|
)
|
||||||
|
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||||
|
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return base_model_results, trained_model_results
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
baseline_not_trained, baseline_trained = train_baseline()
|
||||||
|
accelerator_not_trained, accelerator_trained = train_integration()
|
||||||
|
|
||||||
|
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
|
||||||
|
f"Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
|
||||||
|
)
|
||||||
|
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
|
||||||
|
f"F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
|
||||||
|
)
|
||||||
|
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
|
||||||
|
f"Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
|
||||||
|
)
|
||||||
|
assert baseline_trained["f1"] == accelerator_trained["f1"], (
|
||||||
|
f"F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
|
||||||
|
)
|
||||||
|
|
||||||
|
torch.distributed.destroy_process_group()
|
||||||
191
benchmarks/fp8/transformer_engine/distrib_deepspeed.py
Normal file
191
benchmarks/fp8/transformer_engine/distrib_deepspeed.py
Normal file
@ -0,0 +1,191 @@
|
|||||||
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
This script tests to ensure that `accelerate` performs at the same level as raw `TransformersEngine`.
|
||||||
|
|
||||||
|
This particular script verifies this for DDP training.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from unittest.mock import patch
|
||||||
|
|
||||||
|
import deepspeed
|
||||||
|
import evaluate
|
||||||
|
import torch
|
||||||
|
import transformer_engine.common.recipe as te_recipe
|
||||||
|
import transformer_engine.pytorch as te
|
||||||
|
from fp8_utils import evaluate_model, get_named_parameters, get_training_utilities
|
||||||
|
from transformer_engine.common.recipe import DelayedScaling
|
||||||
|
|
||||||
|
from accelerate import Accelerator, DeepSpeedPlugin
|
||||||
|
from accelerate.state import AcceleratorState
|
||||||
|
from accelerate.utils import FP8RecipeKwargs, set_seed
|
||||||
|
from accelerate.utils.transformer_engine import convert_model
|
||||||
|
|
||||||
|
|
||||||
|
MODEL_NAME = "bert-base-cased"
|
||||||
|
METRIC = evaluate.load("glue", "mrpc")
|
||||||
|
|
||||||
|
|
||||||
|
def train_baseline(zero_stage: int = 1):
|
||||||
|
# This forces transformers to think Zero-3 Init should be used
|
||||||
|
with patch("transformers.integrations.deepspeed.is_deepspeed_zero3_enabled") as mock:
|
||||||
|
mock.return_value = zero_stage == 3
|
||||||
|
set_seed(42)
|
||||||
|
|
||||||
|
accelerator = Accelerator()
|
||||||
|
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
|
||||||
|
MODEL_NAME, accelerator=accelerator
|
||||||
|
)
|
||||||
|
|
||||||
|
# Convert the model to TE
|
||||||
|
old_named_params = get_named_parameters(model)
|
||||||
|
|
||||||
|
with torch.no_grad():
|
||||||
|
convert_model(model)
|
||||||
|
new_named_params = get_named_parameters(model)
|
||||||
|
|
||||||
|
mapping = {p: new_named_params[n] for n, p in old_named_params.items()}
|
||||||
|
for param_group in optimizer.param_groups:
|
||||||
|
param_group["params"] = [mapping[p] for p in param_group["params"]]
|
||||||
|
|
||||||
|
FP8_RECIPE_KWARGS = {"fp8_format": te_recipe.Format.HYBRID, "amax_history_len": 32, "amax_compute_algo": "max"}
|
||||||
|
fp8_recipe = DelayedScaling(**FP8_RECIPE_KWARGS)
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
config = {
|
||||||
|
"train_batch_size": 16,
|
||||||
|
"train_micro_batch_size_per_gpu": 16,
|
||||||
|
"gradient_accumulation_steps": 1,
|
||||||
|
"zero_optimization": {
|
||||||
|
"stage": zero_stage,
|
||||||
|
"offload_optimizer": {"device": "none", "nvme_path": None},
|
||||||
|
"offload_param": {"device": "none", "nvme_path": None},
|
||||||
|
"stage3_gather_16bit_weights_on_model_save": False,
|
||||||
|
},
|
||||||
|
"gradient_clipping": 1.0,
|
||||||
|
"steps_per_print": np.inf,
|
||||||
|
"bf16": {"enabled": True},
|
||||||
|
"fp16": {"enabled": False},
|
||||||
|
"zero_allow_untested_optimizer": True,
|
||||||
|
}
|
||||||
|
|
||||||
|
(
|
||||||
|
model,
|
||||||
|
optimizer,
|
||||||
|
_,
|
||||||
|
_,
|
||||||
|
) = deepspeed.initialize(
|
||||||
|
model=model,
|
||||||
|
optimizer=optimizer,
|
||||||
|
config_params=config,
|
||||||
|
)
|
||||||
|
|
||||||
|
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||||
|
model.train()
|
||||||
|
|
||||||
|
model_outputs = []
|
||||||
|
data = []
|
||||||
|
|
||||||
|
for _ in range(2):
|
||||||
|
for batch in train_dataloader:
|
||||||
|
with te.fp8_autocast(enabled=True, fp8_recipe=fp8_recipe):
|
||||||
|
outputs = model(**batch)
|
||||||
|
data.append(batch.to("cpu"))
|
||||||
|
model_outputs.append(outputs.logits.to("cpu"))
|
||||||
|
loss = outputs.loss
|
||||||
|
model.backward(loss)
|
||||||
|
model.step()
|
||||||
|
for _ in range(accelerator.num_processes):
|
||||||
|
lr_scheduler.step()
|
||||||
|
|
||||||
|
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||||
|
model.destroy()
|
||||||
|
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||||
|
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||||
|
)
|
||||||
|
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||||
|
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return base_model_results, trained_model_results, model_outputs, data
|
||||||
|
|
||||||
|
|
||||||
|
def train_integration(zero_stage: int = 1):
|
||||||
|
set_seed(42)
|
||||||
|
FP8_RECIPE_KWARGS = {"fp8_format": "HYBRID", "amax_history_len": 32, "amax_compute_algo": "max"}
|
||||||
|
kwargs_handlers = [FP8RecipeKwargs(backend="TE", **FP8_RECIPE_KWARGS)]
|
||||||
|
AcceleratorState()._reset_state(True)
|
||||||
|
deepspeed_plugin = DeepSpeedPlugin(
|
||||||
|
zero_stage=zero_stage,
|
||||||
|
zero3_init_flag=zero_stage == 3,
|
||||||
|
)
|
||||||
|
accelerator = Accelerator(
|
||||||
|
mixed_precision="fp8", kwargs_handlers=kwargs_handlers, deepspeed_plugin=deepspeed_plugin
|
||||||
|
)
|
||||||
|
accelerator.state.deepspeed_plugin.deepspeed_config["train_micro_batch_size_per_gpu"] = 16
|
||||||
|
|
||||||
|
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
|
||||||
|
MODEL_NAME, accelerator=accelerator
|
||||||
|
)
|
||||||
|
|
||||||
|
model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler)
|
||||||
|
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||||
|
model.train()
|
||||||
|
model_outputs = []
|
||||||
|
data = []
|
||||||
|
for _ in range(2):
|
||||||
|
for batch in train_dataloader:
|
||||||
|
outputs = model(**batch)
|
||||||
|
data.append(batch.to("cpu"))
|
||||||
|
model_outputs.append(outputs.logits.to("cpu"))
|
||||||
|
loss = outputs.loss
|
||||||
|
accelerator.backward(loss)
|
||||||
|
optimizer.step()
|
||||||
|
lr_scheduler.step()
|
||||||
|
optimizer.zero_grad()
|
||||||
|
|
||||||
|
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||||
|
model.destroy()
|
||||||
|
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||||
|
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||||
|
)
|
||||||
|
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||||
|
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return base_model_results, trained_model_results, model_outputs, data
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
for zero_stage in [1, 2, 3]:
|
||||||
|
baseline_not_trained, baseline_trained, baseline_outputs, baseline_data = train_baseline(zero_stage)
|
||||||
|
accelerator_not_trained, accelerator_trained, accelerator_outputs, accelerator_data = train_integration(
|
||||||
|
zero_stage
|
||||||
|
)
|
||||||
|
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
|
||||||
|
f"ZERO stage {zero_stage}: Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
|
||||||
|
)
|
||||||
|
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
|
||||||
|
f"ZERO stage {zero_stage}: F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
|
||||||
|
)
|
||||||
|
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
|
||||||
|
f"ZERO stage {zero_stage}: Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
|
||||||
|
)
|
||||||
|
assert baseline_trained["f1"] == accelerator_trained["f1"], (
|
||||||
|
f"ZERO stage {zero_stage}: F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
|
||||||
|
)
|
||||||
|
|
||||||
|
torch.distributed.destroy_process_group()
|
||||||
116
benchmarks/fp8/transformer_engine/fp8_utils.py
Normal file
116
benchmarks/fp8/transformer_engine/fp8_utils.py
Normal file
@ -0,0 +1,116 @@
|
|||||||
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import torch
|
||||||
|
|
||||||
|
|
||||||
|
def get_dataloaders(model_name: str, batch_size: int = 16):
|
||||||
|
from datasets import load_dataset
|
||||||
|
from torch.utils.data import DataLoader
|
||||||
|
from transformers import AutoTokenizer
|
||||||
|
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
||||||
|
datasets = load_dataset("glue", "mrpc")
|
||||||
|
|
||||||
|
def tokenize_function(examples):
|
||||||
|
# max_length=None => use the model max length (it's actually the default)
|
||||||
|
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
|
||||||
|
return outputs
|
||||||
|
|
||||||
|
# Apply the method we just defined to all the examples in all the splits of the dataset
|
||||||
|
# starting with the main process first:
|
||||||
|
tokenized_datasets = datasets.map(
|
||||||
|
tokenize_function,
|
||||||
|
batched=True,
|
||||||
|
remove_columns=["idx", "sentence1", "sentence2"],
|
||||||
|
)
|
||||||
|
|
||||||
|
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
|
||||||
|
# transformers library
|
||||||
|
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
|
||||||
|
|
||||||
|
def collate_fn(examples):
|
||||||
|
return tokenizer.pad(
|
||||||
|
examples,
|
||||||
|
padding="longest",
|
||||||
|
pad_to_multiple_of=16, # Specific for FP8
|
||||||
|
return_tensors="pt",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Instantiate dataloaders.
|
||||||
|
train_dataloader = DataLoader(
|
||||||
|
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size, drop_last=True
|
||||||
|
)
|
||||||
|
eval_dataloader = DataLoader(
|
||||||
|
tokenized_datasets["validation"],
|
||||||
|
shuffle=False,
|
||||||
|
collate_fn=collate_fn,
|
||||||
|
batch_size=16,
|
||||||
|
drop_last=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
return train_dataloader, eval_dataloader
|
||||||
|
|
||||||
|
|
||||||
|
def get_training_utilities(model_name: str, batch_size: int = 16, accelerator=None):
|
||||||
|
"""
|
||||||
|
Returns a tuple of:
|
||||||
|
- Model
|
||||||
|
- Optimizer
|
||||||
|
- Train dataloader (prepared)
|
||||||
|
- Eval dataloader (prepared)
|
||||||
|
- LR Scheduler
|
||||||
|
Suitable for training on the MRPC dataset
|
||||||
|
"""
|
||||||
|
from torch.optim import AdamW
|
||||||
|
from transformers import AutoModelForSequenceClassification, get_linear_schedule_with_warmup
|
||||||
|
|
||||||
|
from accelerate import Accelerator
|
||||||
|
|
||||||
|
if accelerator is None:
|
||||||
|
accelerator = Accelerator()
|
||||||
|
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
||||||
|
train_dataloader, eval_dataloader = get_dataloaders(model_name, batch_size)
|
||||||
|
optimizer = AdamW(model.parameters(), lr=0.0001)
|
||||||
|
lr_scheduler = get_linear_schedule_with_warmup(
|
||||||
|
optimizer=optimizer,
|
||||||
|
num_warmup_steps=100,
|
||||||
|
num_training_steps=len(train_dataloader) * 2,
|
||||||
|
)
|
||||||
|
train_dataloader, eval_dataloader = accelerator.prepare(train_dataloader, eval_dataloader)
|
||||||
|
return model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
||||||
|
|
||||||
|
|
||||||
|
def get_named_parameters(model):
|
||||||
|
"""
|
||||||
|
Same thing as `Accelerator.get_named_parameters` Returns a list of the named parameters of the model (extracted
|
||||||
|
from parallel)
|
||||||
|
"""
|
||||||
|
from accelerate.utils import extract_model_from_parallel
|
||||||
|
|
||||||
|
model = extract_model_from_parallel(model)
|
||||||
|
return {n: p for n, p in model.named_parameters()}
|
||||||
|
|
||||||
|
|
||||||
|
def evaluate_model(model, dataloader, metric, accelerator=None):
|
||||||
|
"Turns model to .eval(), runs dataloader, calculates metric, then turns eval back on"
|
||||||
|
model.eval()
|
||||||
|
for step, batch in enumerate(dataloader):
|
||||||
|
with torch.no_grad():
|
||||||
|
outputs = model(**batch)
|
||||||
|
predictions = outputs.logits.argmax(dim=-1)
|
||||||
|
references = batch["labels"]
|
||||||
|
if accelerator is not None and accelerator.num_processes > 1:
|
||||||
|
predictions, references = accelerator.gather_for_metrics((predictions, references))
|
||||||
|
metric.add_batch(predictions=predictions, references=references)
|
||||||
|
return metric.compute()
|
||||||
161
benchmarks/fp8/transformer_engine/fsdp.py
Normal file
161
benchmarks/fp8/transformer_engine/fsdp.py
Normal file
@ -0,0 +1,161 @@
|
|||||||
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
This script tests to ensure that `accelerate` performs at the same level as raw `TransformersEngine`.
|
||||||
|
|
||||||
|
This particular script verifies this for FSDP training.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from functools import partial
|
||||||
|
|
||||||
|
import evaluate
|
||||||
|
import torch
|
||||||
|
import transformer_engine.common.recipe as te_recipe
|
||||||
|
import transformer_engine.pytorch as te
|
||||||
|
from fp8_utils import evaluate_model, get_named_parameters, get_training_utilities
|
||||||
|
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
|
||||||
|
from torch.distributed.fsdp import MixedPrecision
|
||||||
|
from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy
|
||||||
|
from transformer_engine.common.recipe import DelayedScaling
|
||||||
|
from transformers.models.bert import BertLayer
|
||||||
|
|
||||||
|
from accelerate import Accelerator
|
||||||
|
from accelerate import FullyShardedDataParallelPlugin as FSDPPlugin
|
||||||
|
from accelerate.state import AcceleratorState
|
||||||
|
from accelerate.utils import FP8RecipeKwargs, set_seed
|
||||||
|
from accelerate.utils.transformer_engine import convert_model
|
||||||
|
|
||||||
|
|
||||||
|
MODEL_NAME = "bert-base-cased"
|
||||||
|
METRIC = evaluate.load("glue", "mrpc")
|
||||||
|
|
||||||
|
FSDP_WRAP_POLICY = partial(transformer_auto_wrap_policy, transformer_layer_cls={BertLayer})
|
||||||
|
|
||||||
|
|
||||||
|
def train_baseline():
|
||||||
|
set_seed(42)
|
||||||
|
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(MODEL_NAME)
|
||||||
|
accelerator = Accelerator()
|
||||||
|
device = accelerator.device
|
||||||
|
model.to(device)
|
||||||
|
|
||||||
|
# Convert the model to TE
|
||||||
|
old_named_params = get_named_parameters(model)
|
||||||
|
|
||||||
|
with torch.no_grad():
|
||||||
|
convert_model(model)
|
||||||
|
|
||||||
|
FP8_RECIPE_KWARGS = {"fp8_format": te_recipe.Format.HYBRID, "amax_history_len": 32, "amax_compute_algo": "max"}
|
||||||
|
fp8_recipe = DelayedScaling(**FP8_RECIPE_KWARGS)
|
||||||
|
|
||||||
|
new_named_params = get_named_parameters(model)
|
||||||
|
|
||||||
|
# Convert the model to FSDP
|
||||||
|
model = FSDP(
|
||||||
|
model,
|
||||||
|
use_orig_params=True,
|
||||||
|
mixed_precision=MixedPrecision(param_dtype=torch.bfloat16, reduce_dtype=torch.float32),
|
||||||
|
auto_wrap_policy=FSDP_WRAP_POLICY,
|
||||||
|
)
|
||||||
|
|
||||||
|
mapping = {p: new_named_params[n] for n, p in old_named_params.items()}
|
||||||
|
for param_group in optimizer.param_groups:
|
||||||
|
param_group["params"] = [mapping[p] for p in param_group["params"]]
|
||||||
|
|
||||||
|
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||||
|
model.train()
|
||||||
|
|
||||||
|
for _ in range(2):
|
||||||
|
for batch in train_dataloader:
|
||||||
|
with te.fp8_autocast(enabled=True, fp8_recipe=fp8_recipe):
|
||||||
|
with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
|
||||||
|
batch = batch.to(device)
|
||||||
|
outputs = model(**batch)
|
||||||
|
loss = outputs.loss
|
||||||
|
loss.backward()
|
||||||
|
optimizer.step()
|
||||||
|
optimizer.zero_grad()
|
||||||
|
lr_scheduler.step()
|
||||||
|
|
||||||
|
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||||
|
|
||||||
|
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||||
|
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||||
|
)
|
||||||
|
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||||
|
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return base_model_results, trained_model_results
|
||||||
|
|
||||||
|
|
||||||
|
def train_integration():
|
||||||
|
FP8_RECIPE_KWARGS = {"fp8_format": "HYBRID", "amax_history_len": 32, "amax_compute_algo": "max"}
|
||||||
|
kwargs_handlers = [FP8RecipeKwargs(backend="TE", **FP8_RECIPE_KWARGS)]
|
||||||
|
AcceleratorState()._reset_state(True)
|
||||||
|
fsdp_plugin = FSDPPlugin(
|
||||||
|
auto_wrap_policy=FSDP_WRAP_POLICY,
|
||||||
|
use_orig_params=True,
|
||||||
|
mixed_precision_policy=MixedPrecision(param_dtype=torch.bfloat16, reduce_dtype=torch.float32),
|
||||||
|
)
|
||||||
|
accelerator = Accelerator(mixed_precision="fp8", fsdp_plugin=fsdp_plugin, kwargs_handlers=kwargs_handlers)
|
||||||
|
set_seed(42)
|
||||||
|
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
|
||||||
|
MODEL_NAME, accelerator=accelerator
|
||||||
|
)
|
||||||
|
|
||||||
|
model, optimizer = accelerator.prepare(model, optimizer)
|
||||||
|
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||||
|
model.train()
|
||||||
|
|
||||||
|
for _ in range(2):
|
||||||
|
for batch in train_dataloader:
|
||||||
|
outputs = model(**batch)
|
||||||
|
loss = outputs.loss
|
||||||
|
accelerator.backward(loss)
|
||||||
|
optimizer.step()
|
||||||
|
optimizer.zero_grad()
|
||||||
|
lr_scheduler.step()
|
||||||
|
|
||||||
|
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
|
||||||
|
|
||||||
|
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||||
|
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||||
|
)
|
||||||
|
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||||
|
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return base_model_results, trained_model_results
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
baseline_not_trained, baseline_trained = train_baseline()
|
||||||
|
accelerator_not_trained, accelerator_trained = train_integration()
|
||||||
|
|
||||||
|
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
|
||||||
|
f"Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
|
||||||
|
)
|
||||||
|
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
|
||||||
|
f"F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
|
||||||
|
)
|
||||||
|
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
|
||||||
|
f"Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
|
||||||
|
)
|
||||||
|
assert baseline_trained["f1"] == accelerator_trained["f1"], (
|
||||||
|
f"F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
|
||||||
|
)
|
||||||
|
|
||||||
|
torch.distributed.destroy_process_group()
|
||||||
132
benchmarks/fp8/transformer_engine/non_distributed.py
Normal file
132
benchmarks/fp8/transformer_engine/non_distributed.py
Normal file
@ -0,0 +1,132 @@
|
|||||||
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
This script tests to ensure that `accelerate` performs at the same level as raw `TransformersEngine`.
|
||||||
|
|
||||||
|
This particular script verifies this for single GPU training.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import evaluate
|
||||||
|
import torch
|
||||||
|
import transformer_engine.common.recipe as te_recipe
|
||||||
|
import transformer_engine.pytorch as te
|
||||||
|
from fp8_utils import evaluate_model, get_named_parameters, get_training_utilities
|
||||||
|
from transformer_engine.common.recipe import DelayedScaling
|
||||||
|
|
||||||
|
from accelerate import Accelerator
|
||||||
|
from accelerate.state import AcceleratorState
|
||||||
|
from accelerate.utils import FP8RecipeKwargs, set_seed
|
||||||
|
from accelerate.utils.transformer_engine import convert_model
|
||||||
|
|
||||||
|
|
||||||
|
MODEL_NAME = "bert-base-cased"
|
||||||
|
METRIC = evaluate.load("glue", "mrpc")
|
||||||
|
|
||||||
|
|
||||||
|
def train_baseline():
|
||||||
|
set_seed(42)
|
||||||
|
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(MODEL_NAME)
|
||||||
|
|
||||||
|
# Convert the model to TE
|
||||||
|
old_named_params = get_named_parameters(model)
|
||||||
|
|
||||||
|
with torch.no_grad():
|
||||||
|
convert_model(model)
|
||||||
|
|
||||||
|
new_named_params = get_named_parameters(model)
|
||||||
|
mapping = {p: new_named_params[n] for n, p in old_named_params.items()}
|
||||||
|
for param_group in optimizer.param_groups:
|
||||||
|
param_group["params"] = [mapping[p] for p in param_group["params"]]
|
||||||
|
|
||||||
|
FP8_RECIPE_KWARGS = {"fp8_format": te_recipe.Format.HYBRID, "amax_history_len": 32, "amax_compute_algo": "max"}
|
||||||
|
fp8_recipe = DelayedScaling(**FP8_RECIPE_KWARGS)
|
||||||
|
|
||||||
|
model.to("cuda")
|
||||||
|
base_model_results = evaluate_model(model, eval_dataloader, METRIC)
|
||||||
|
model.train()
|
||||||
|
|
||||||
|
for batch in train_dataloader:
|
||||||
|
with te.fp8_autocast(enabled=True, fp8_recipe=fp8_recipe):
|
||||||
|
with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
|
||||||
|
batch = batch.to("cuda")
|
||||||
|
outputs = model(**batch)
|
||||||
|
loss = outputs.loss
|
||||||
|
loss.backward()
|
||||||
|
optimizer.step()
|
||||||
|
optimizer.zero_grad()
|
||||||
|
lr_scheduler.step()
|
||||||
|
|
||||||
|
trained_model_results = evaluate_model(model, eval_dataloader, METRIC)
|
||||||
|
|
||||||
|
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||||
|
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||||
|
)
|
||||||
|
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||||
|
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return base_model_results, trained_model_results
|
||||||
|
|
||||||
|
|
||||||
|
def train_integration():
|
||||||
|
FP8_RECIPE_KWARGS = {"fp8_format": "HYBRID", "amax_history_len": 32, "amax_compute_algo": "max"}
|
||||||
|
kwargs_handlers = [FP8RecipeKwargs(backend="TE", **FP8_RECIPE_KWARGS)]
|
||||||
|
AcceleratorState()._reset_state(True)
|
||||||
|
accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=kwargs_handlers)
|
||||||
|
set_seed(42)
|
||||||
|
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
|
||||||
|
MODEL_NAME, accelerator=accelerator
|
||||||
|
)
|
||||||
|
|
||||||
|
model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler)
|
||||||
|
base_model_results = evaluate_model(model, eval_dataloader, METRIC)
|
||||||
|
model.train()
|
||||||
|
|
||||||
|
for batch in train_dataloader:
|
||||||
|
outputs = model(**batch)
|
||||||
|
loss = outputs.loss
|
||||||
|
accelerator.backward(loss)
|
||||||
|
optimizer.step()
|
||||||
|
optimizer.zero_grad()
|
||||||
|
lr_scheduler.step()
|
||||||
|
|
||||||
|
trained_model_results = evaluate_model(model, eval_dataloader, METRIC)
|
||||||
|
|
||||||
|
assert trained_model_results["accuracy"] > base_model_results["accuracy"], (
|
||||||
|
f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}"
|
||||||
|
)
|
||||||
|
assert trained_model_results["f1"] > base_model_results["f1"], (
|
||||||
|
f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return base_model_results, trained_model_results
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
baseline_not_trained, baseline_trained = train_baseline()
|
||||||
|
accelerator_not_trained, accelerator_trained = train_integration()
|
||||||
|
|
||||||
|
assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], (
|
||||||
|
f"Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}"
|
||||||
|
)
|
||||||
|
assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], (
|
||||||
|
f"F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}"
|
||||||
|
)
|
||||||
|
assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], (
|
||||||
|
f"Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}"
|
||||||
|
)
|
||||||
|
assert baseline_trained["f1"] == accelerator_trained["f1"], (
|
||||||
|
f"F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}"
|
||||||
|
)
|
||||||
74
benchmarks/fsdp2/README.md
Normal file
74
benchmarks/fsdp2/README.md
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
# FSDP2 Benchmarks
|
||||||
|
|
||||||
|
This benchmark showcases `FSDP2` in 🤗 `accelerate` and compares it to `torch` baseline.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
This benchmark consists of two parts:
|
||||||
|
- `main.py` is the main script that runs the benchmark
|
||||||
|
- `visualize.py` is the script that visualizes the results (if `--output_dir` was specified for the previous command)
|
||||||
|
|
||||||
|
## Motivation
|
||||||
|
|
||||||
|
We want to showcase that 🤗 `accelerate`'s integration of `FSDP2` is on par raw PyTorch, and highlight a "broken" part in PyTorch that creating an optimizer before applying `FSDP2` **doesn't result in a working training loop**. (more on this later)
|
||||||
|
This script showcases **matching memory usage and convergence between `accelerate` and `torch`'s baseline.**
|
||||||
|
To deal with this breaking change (and maintain backward compatibility with FSDP1 in terms of an API), `accelerate` had to come up with a workaround since `accelerate` assumes that the user will nearly always create a model, optimizer, scheduler, etc beforehand and bring them themselves. This lead to an issue of a stark increase in memory as well as the model not even training if the user creates an optimizer beforehand.
|
||||||
|
To workaround this, we replace the parameters inside the optimizer with the newly created FSDP2 sharded ones. More about this can be found in this [blog post (TBD)](TODO)
|
||||||
|
> [!WARNING]
|
||||||
|
> This script is intended to fit on 2x 24GB GPUs, though on so few GPUs it's not possible to see the memory difference (discrepancies in grad allocation result in lower memory usage in the non-fixed case), only the difference in convergence. Below are attached results from 8x H100 GPUs where the difference is visible.
|
||||||
|
> TLDR: more GPUs = bigger memory difference between fixed and non-fixed cases.
|
||||||
|
|
||||||
|
## Results
|
||||||
|
|
||||||
|
Here are the results from running the benchmark on 8x H100 GPUs:
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<img src="imgs/allocated_memory.png" width="80%" alt="Allocated Memory Usage">
|
||||||
|
</p>
|
||||||
|
<p align="center">
|
||||||
|
<img src="imgs/reserved_memory.png" width="80%" alt="Reserved Memory Usage">
|
||||||
|
</p>
|
||||||
|
|
||||||
|
As you can see, the memory usage of `accelerate` and `torch_post_shard` (the **intended** way) are very similar, while `torch_pre_shard_not_fixed` uses significantly more memory. Our fix in `torch_pre_shard_fixed` brings the memory usage back in line with the **intended** approach.
|
||||||
|
|
||||||
|
> [!WARNING]
|
||||||
|
> Timing discrepancies are due to the benchmarks being ran in 1 script.
|
||||||
|
|
||||||
|
|
||||||
|
## Running
|
||||||
|
|
||||||
|
To run the benchmark, you can either use `accelerate launch` or `torchrun`:
|
||||||
|
```bash
|
||||||
|
accelerate launch main.py
|
||||||
|
```
|
||||||
|
```bash
|
||||||
|
# For two GPUs
|
||||||
|
torchrun --nproc_per_node 2 main.py
|
||||||
|
```
|
||||||
|
|
||||||
|
This supports multiple configurable options, you can learn about them by running:
|
||||||
|
```bash
|
||||||
|
python3 main.py --help
|
||||||
|
```
|
||||||
|
|
||||||
|
This script will run 4 different benchmarks:
|
||||||
|
- `torch_optimizer_after_fsdp`: `torch` baseline where optimizer is created after applying `FSDP2`, this is the **intended** way to do it
|
||||||
|
- `torch_optimizer_before_fsdp_not_fixed`: `torch` baseline where optimizer is created before applying `FSDP2` without fixing the optimizer parameters
|
||||||
|
- `torch_optimizer_before_fsdp_fixed`: `torch` baseline where optimizer is created before applying `FSDP2` with our fix to the optimizer
|
||||||
|
- `accelerate`: `accelerate`'s own integration of `FSDP2` where optimizer is created before applying `FSDP2`, but we apply our fix to the optimizer
|
||||||
|
|
||||||
|
Memory results are saved in a folder specified by `--output_dir` argument.
|
||||||
|
Optionally, you can specify `--save_memory_snapshot` to save the torch memory snapshot, which can then be viewed using [`torch memory viz`](https://pytorch.org/memory_viz)
|
||||||
|
|
||||||
|
## Visualizing results
|
||||||
|
|
||||||
|
To visualize the results, you can run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python3 visualize.py --dir <path_to_output_dir>
|
||||||
|
```
|
||||||
|
|
||||||
|
This will then create two plots, showcasing allocated and reserved memory usage between all the different benchmarks discussed above.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
BIN
benchmarks/fsdp2/imgs/allocated_memory.png
Normal file
BIN
benchmarks/fsdp2/imgs/allocated_memory.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 124 KiB |
BIN
benchmarks/fsdp2/imgs/reserved_memory.png
Normal file
BIN
benchmarks/fsdp2/imgs/reserved_memory.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 56 KiB |
122
benchmarks/fsdp2/main.py
Normal file
122
benchmarks/fsdp2/main.py
Normal file
@ -0,0 +1,122 @@
|
|||||||
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import functools
|
||||||
|
from typing import Callable
|
||||||
|
|
||||||
|
import torch
|
||||||
|
|
||||||
|
from accelerate import Accelerator
|
||||||
|
from utils import parse_args, prepare_accelerate, prepare_torch
|
||||||
|
|
||||||
|
|
||||||
|
MODEL_NAME = "Qwen/Qwen2.5-1.5B-Instruct"
|
||||||
|
LEARNING_RATE = 3e-5
|
||||||
|
|
||||||
|
CONFIG = {
|
||||||
|
"model_name": MODEL_NAME,
|
||||||
|
"learning_rate": LEARNING_RATE,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def train(
|
||||||
|
model: torch.nn.Module,
|
||||||
|
optimizer: torch.optim.Optimizer,
|
||||||
|
train_dataloader: torch.utils.data.DataLoader,
|
||||||
|
accelerator: Accelerator,
|
||||||
|
) -> torch.Tensor:
|
||||||
|
losses = []
|
||||||
|
for batch in train_dataloader:
|
||||||
|
optimizer.zero_grad()
|
||||||
|
outputs = model(**batch, use_cache=False)
|
||||||
|
|
||||||
|
loss = outputs.loss
|
||||||
|
losses.append(loss.item())
|
||||||
|
accelerator.backward(loss)
|
||||||
|
optimizer.step()
|
||||||
|
|
||||||
|
return torch.tensor(losses)
|
||||||
|
|
||||||
|
|
||||||
|
def evaluate(args, config: dict, init_fn: Callable, run_name: str) -> torch.Tensor:
|
||||||
|
model, optimizer, dataloader, accelerator, memory_tracker = init_fn(args, config)
|
||||||
|
|
||||||
|
loss = train(model, optimizer, dataloader, accelerator)
|
||||||
|
|
||||||
|
memory_tracker.stop()
|
||||||
|
msg = f"""Results for {run_name} (rank 0):
|
||||||
|
Loss: {loss[-1].item()}
|
||||||
|
Peak Allocated Memory: {float(memory_tracker.peak_allocated_memory):.2f} MB
|
||||||
|
Peak Reserved Memory: {float(memory_tracker.peak_reserved_memory):.2f} MB
|
||||||
|
{"-" * 34}"""
|
||||||
|
accelerator.print(msg)
|
||||||
|
return loss
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
args = parse_args()
|
||||||
|
evaluations = [
|
||||||
|
functools.partial(
|
||||||
|
evaluate,
|
||||||
|
init_fn=functools.partial(prepare_torch, post_shard_optimizer=False, apply_optimizer_fix=True),
|
||||||
|
run_name="Optimizer Before FSDP (w/ fix)",
|
||||||
|
),
|
||||||
|
functools.partial(
|
||||||
|
evaluate,
|
||||||
|
init_fn=functools.partial(prepare_torch, post_shard_optimizer=False, apply_optimizer_fix=False),
|
||||||
|
run_name="Optimizer Before FSDP (w/o fix)",
|
||||||
|
),
|
||||||
|
functools.partial(
|
||||||
|
evaluate,
|
||||||
|
init_fn=functools.partial(prepare_torch, post_shard_optimizer=True),
|
||||||
|
run_name="Optimizer After FSDP",
|
||||||
|
),
|
||||||
|
functools.partial(evaluate, init_fn=prepare_accelerate, run_name="Accelerate"),
|
||||||
|
]
|
||||||
|
labels = [
|
||||||
|
"Optimizer Before FSDP (w/ fix)",
|
||||||
|
"Optimizer Before FSDP (w/o fix)",
|
||||||
|
"Optimizer After FSDP",
|
||||||
|
"Accelerate",
|
||||||
|
]
|
||||||
|
|
||||||
|
results = {}
|
||||||
|
torch.use_deterministic_algorithms(True)
|
||||||
|
|
||||||
|
for evaluation, label in zip(evaluations, labels):
|
||||||
|
results[label] = evaluation(args, CONFIG)
|
||||||
|
|
||||||
|
torch.testing.assert_close(
|
||||||
|
results["Optimizer After FSDP"],
|
||||||
|
results["Optimizer Before FSDP (w/ fix)"],
|
||||||
|
msg="Optimizer After FSDP and Optimizer Before FSDP (w/ fix) should be the same",
|
||||||
|
)
|
||||||
|
|
||||||
|
torch.testing.assert_close(
|
||||||
|
results["Optimizer After FSDP"],
|
||||||
|
results["Accelerate"],
|
||||||
|
msg="Optimizer After FSDP and Accelerate should be the same",
|
||||||
|
)
|
||||||
|
|
||||||
|
torch.testing.assert_close(
|
||||||
|
results["Accelerate"],
|
||||||
|
results["Optimizer Before FSDP (w/ fix)"],
|
||||||
|
msg="Accelerate and Optimizer Before FSDP (w/ fix) should be the same",
|
||||||
|
)
|
||||||
|
|
||||||
|
torch.distributed.destroy_process_group()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
130
benchmarks/fsdp2/measure_utils.py
Normal file
130
benchmarks/fsdp2/measure_utils.py
Normal file
@ -0,0 +1,130 @@
|
|||||||
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import gc
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
|
||||||
|
import psutil
|
||||||
|
import torch
|
||||||
|
|
||||||
|
from accelerate import PartialState
|
||||||
|
|
||||||
|
|
||||||
|
class MemoryTracker:
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
device: torch.device,
|
||||||
|
output_directory: str,
|
||||||
|
run_name: str,
|
||||||
|
save_memory_snapshot: bool,
|
||||||
|
log_interval: float = 0.01,
|
||||||
|
):
|
||||||
|
"""Class for tracking gpu and cpu memory usage of the process.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
device (`torch.device`):
|
||||||
|
PyTorch device to monitor.
|
||||||
|
output_directory (`str`):
|
||||||
|
Directory to save the memory usage data to, will be created if it doesn't exist.
|
||||||
|
run_name (`str`):
|
||||||
|
Name of the run, will be used to name the output files.
|
||||||
|
save_memory_snapshot (`bool`):
|
||||||
|
Whether to also save `torch.cuda.memory._dump_snapshot` to the output directory.
|
||||||
|
log_interval (`float`, *optional*):
|
||||||
|
Interval in seconds between memory measurements. Defaults to 0.01.
|
||||||
|
"""
|
||||||
|
self.log_interval = log_interval
|
||||||
|
self.save_memory_snapshot = save_memory_snapshot
|
||||||
|
self.output_directory = output_directory
|
||||||
|
self.run_name = run_name
|
||||||
|
|
||||||
|
self.timestamps = []
|
||||||
|
self.allocated_memory = []
|
||||||
|
self.reserved_memory = []
|
||||||
|
self.virtual_memory = []
|
||||||
|
|
||||||
|
self.start_time = None
|
||||||
|
self.running = False
|
||||||
|
|
||||||
|
self._thread = None
|
||||||
|
self._state = PartialState()
|
||||||
|
self._process = psutil.Process()
|
||||||
|
self._device = device
|
||||||
|
self.torch_accelerator_module = getattr(torch, device.type, torch.cuda)
|
||||||
|
|
||||||
|
def _monitor(self):
|
||||||
|
self.start_time = time.time()
|
||||||
|
|
||||||
|
while self.running:
|
||||||
|
allocated = self.torch_accelerator_module.memory_allocated(self._device) / (1024 * 1024)
|
||||||
|
reserved = self.torch_accelerator_module.memory_reserved(self._device) / (1024 * 1024)
|
||||||
|
virtual_memory = self._process.memory_info().rss / (1024 * 1024)
|
||||||
|
|
||||||
|
self.allocated_memory.append(allocated)
|
||||||
|
self.reserved_memory.append(reserved)
|
||||||
|
self.virtual_memory.append(virtual_memory)
|
||||||
|
self.timestamps.append(time.time() - self.start_time)
|
||||||
|
|
||||||
|
time.sleep(self.log_interval)
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
gc.collect()
|
||||||
|
self.torch_accelerator_module.empty_cache()
|
||||||
|
|
||||||
|
if self.output_directory:
|
||||||
|
os.makedirs(self.output_directory, exist_ok=True)
|
||||||
|
|
||||||
|
if self.save_memory_snapshot:
|
||||||
|
self.torch_accelerator_module.memory._record_memory_history()
|
||||||
|
|
||||||
|
self.running = True
|
||||||
|
self._thread = threading.Thread(target=self._monitor)
|
||||||
|
self._thread.daemon = True
|
||||||
|
self._thread.start()
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
self.running = False
|
||||||
|
if self._thread:
|
||||||
|
self._thread.join()
|
||||||
|
|
||||||
|
if self.save_memory_snapshot and self._state.is_main_process and self.output_directory:
|
||||||
|
output_file = os.path.join(self.output_directory, f"{self.run_name}_memory_snapshot.pkl")
|
||||||
|
self.torch_accelerator_module.memory._dump_snapshot(output_file)
|
||||||
|
|
||||||
|
if self._state.is_main_process and self.output_directory:
|
||||||
|
path = os.path.join(self.output_directory, f"{self.run_name}_memory_usage.json")
|
||||||
|
with open(path, "w") as f:
|
||||||
|
json.dump(
|
||||||
|
{
|
||||||
|
"timestamps": self.timestamps,
|
||||||
|
"allocated_memory": self.allocated_memory,
|
||||||
|
"reserved_memory": self.reserved_memory,
|
||||||
|
"virtual_memory": self.virtual_memory,
|
||||||
|
},
|
||||||
|
f,
|
||||||
|
)
|
||||||
|
if self.save_memory_snapshot:
|
||||||
|
self.torch_accelerator_module.memory._record_memory_history(False)
|
||||||
|
self.torch_accelerator_module.empty_cache()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def peak_allocated_memory(self):
|
||||||
|
return max(self.allocated_memory)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def peak_reserved_memory(self):
|
||||||
|
return max(self.reserved_memory)
|
||||||
290
benchmarks/fsdp2/utils.py
Normal file
290
benchmarks/fsdp2/utils.py
Normal file
@ -0,0 +1,290 @@
|
|||||||
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
from types import MethodType
|
||||||
|
from typing import Union
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from datasets import load_dataset
|
||||||
|
from measure_utils import MemoryTracker
|
||||||
|
from torch.distributed.fsdp import MixedPrecisionPolicy, fully_shard
|
||||||
|
from torch.optim import AdamW
|
||||||
|
from torch.utils.data import DataLoader
|
||||||
|
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, DataCollatorForLanguageModeling
|
||||||
|
from transformers.models.qwen2.modeling_qwen2 import Qwen2DecoderLayer
|
||||||
|
|
||||||
|
from accelerate import Accelerator, FullyShardedDataParallelPlugin
|
||||||
|
from accelerate.state import AcceleratorState, is_initialized
|
||||||
|
from accelerate.utils import convert_outputs_to_fp32, set_seed
|
||||||
|
|
||||||
|
|
||||||
|
SEED = 421
|
||||||
|
|
||||||
|
|
||||||
|
def get_named_parameters(model: torch.nn.Module, drop_refs: bool = False) -> dict[str, Union[torch.Tensor, int]]:
|
||||||
|
"""
|
||||||
|
This function returns a dictionary mapping the parameter names to their data pointers or
|
||||||
|
the original parameters if `drop_refs` is `False`.
|
||||||
|
It is used to get the original parameter names before `fully_shard` is applied.
|
||||||
|
|
||||||
|
We only return the data pointers, so we drop the references to the original parameters
|
||||||
|
and `fully_shard` will then trigger a new allocation for the sharded ones.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model (`torch.nn.Module`): Model instance to get the named parameters from
|
||||||
|
drop_refs (`bool`, *optional*, defaults to `False`): Whether to drop the references to the original parameters
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
`dict[str, Union[torch.Tensor, int]]`: Dictionary mapping the parameter names to their data pointers or the original parameters if `drop_refs` is `False`
|
||||||
|
"""
|
||||||
|
named_parameters = {}
|
||||||
|
for n, p in model.named_parameters():
|
||||||
|
# We only preserve the data pointers to have the unique 1:1 mapping between the original and the sharded parameters
|
||||||
|
named_parameters[n] = p.data_ptr() if drop_refs else p
|
||||||
|
return named_parameters
|
||||||
|
|
||||||
|
|
||||||
|
def replace_optimizer_params(optimizer: torch.optim.Optimizer):
|
||||||
|
"""
|
||||||
|
This function is called before using `fully_shard` on the model. It replaces the parameters of the optimizer with
|
||||||
|
empty tensors, so `fully_shard` can trigger a new allocation for the sharded ones. After this, we swap the parameters
|
||||||
|
`data_ptr` to the original one, so we can reuse that later to map the sharded parameters to the original ones.
|
||||||
|
This function modifies the optimizer in-place.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
optimizer (torch.optim.Optimizer): Optimizer instance which contains the original model parameters
|
||||||
|
"""
|
||||||
|
|
||||||
|
for param_group in optimizer.param_groups:
|
||||||
|
for i, p in enumerate(param_group["params"]):
|
||||||
|
# We drop a reference to the original param here, so that _move_states_to_device triggers a reallocation
|
||||||
|
# This is required or else the `fully_shard` -> `_move_states_to_device` uses the original memory address
|
||||||
|
# for the sharded parameters, and we get a weird/undefined behavior.
|
||||||
|
param_group["params"][i] = torch.empty_like(p)
|
||||||
|
|
||||||
|
# We save the original data_ptr, so we can swap back the parameters later
|
||||||
|
param_group["params"][i].data_ptr = p.data_ptr()
|
||||||
|
|
||||||
|
|
||||||
|
def swap_back_optimizer_params(
|
||||||
|
model: torch.nn.Module, optimizer: torch.optim.Optimizer, old_named_parameter_pointers: dict[str, int]
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
This function is the counterpart of `replace_optimizer_params`. It is called after `fully_shard` being applied to
|
||||||
|
the model. It swaps the parameters of the optimizer to their sharded counterparts.
|
||||||
|
It is done using the `data_ptr` mapping prepared in `replace_optimizer_params` and `get_named_parameters`.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model (`torch.nn.Module`): Model instance to get the new named parameters from
|
||||||
|
optimizer (`torch.optim.Optimizer`): Optimizer instance to swap the parameters of
|
||||||
|
old_named_parameter_pointers (`dict[str, int]`): Dictionary mapping the original parameter names: data_ptrs to the new ones
|
||||||
|
"""
|
||||||
|
# We get the new named parameters after `fully_shard` being applied
|
||||||
|
# We don't drop the references as we need the sharded parameters now
|
||||||
|
new_named_parameters = get_named_parameters(model, drop_refs=False)
|
||||||
|
|
||||||
|
# We create a mapping from the original data_ptr to the new sharded param corresponding to it
|
||||||
|
mapping = {p: new_named_parameters[n] for n, p in old_named_parameter_pointers.items()}
|
||||||
|
|
||||||
|
for param_group in optimizer.param_groups:
|
||||||
|
# We swap the parameters of the optimizer to the new sharded ones
|
||||||
|
param_group["params"] = [mapping[p.data_ptr] for p in param_group["params"]]
|
||||||
|
|
||||||
|
|
||||||
|
def parse_args():
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument(
|
||||||
|
"--output_dir",
|
||||||
|
type=str,
|
||||||
|
help="Directory to save the benchmarking results.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--save_memory_snapshot",
|
||||||
|
action="store_true",
|
||||||
|
default=False,
|
||||||
|
help="If True, `torch.cuda.memory._dump_snapshot` will be used to additionaly save the memory trace.",
|
||||||
|
)
|
||||||
|
######################
|
||||||
|
# Training arguments #
|
||||||
|
######################
|
||||||
|
parser.add_argument(
|
||||||
|
"--batch_size",
|
||||||
|
type=int,
|
||||||
|
default=2,
|
||||||
|
help="Batch size for the training loop.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--block_size",
|
||||||
|
type=int,
|
||||||
|
default=128,
|
||||||
|
help="The maximum sequence length to use with the model.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--dataset_fraction",
|
||||||
|
type=float,
|
||||||
|
default=1.0,
|
||||||
|
help="Fraction of the dataset to use.",
|
||||||
|
)
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
def prepare_dataloader(tokenizer, args, accelerator: Accelerator) -> DataLoader:
|
||||||
|
dataset = load_dataset("tiny_shakespeare", split="train", trust_remote_code=True)
|
||||||
|
|
||||||
|
def tokenize_function(example):
|
||||||
|
return tokenizer(
|
||||||
|
example["text"],
|
||||||
|
)
|
||||||
|
|
||||||
|
dataset = dataset.map(
|
||||||
|
tokenize_function,
|
||||||
|
batched=True,
|
||||||
|
remove_columns=["text"],
|
||||||
|
)
|
||||||
|
|
||||||
|
block_size = min(tokenizer.model_max_length, args.block_size)
|
||||||
|
|
||||||
|
def group_texts(examples):
|
||||||
|
concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
|
||||||
|
total_length = len(concatenated_examples[list(examples.keys())[0]])
|
||||||
|
|
||||||
|
total_length = (total_length // block_size) * block_size
|
||||||
|
|
||||||
|
result = {
|
||||||
|
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
|
||||||
|
for k, t in concatenated_examples.items()
|
||||||
|
}
|
||||||
|
|
||||||
|
result["labels"] = result["input_ids"].copy()
|
||||||
|
return result
|
||||||
|
|
||||||
|
dataset = dataset.map(group_texts, batched=True)
|
||||||
|
dataset = dataset.select(range(int(len(dataset) * args.dataset_fraction)))
|
||||||
|
|
||||||
|
def collate_fn(examples):
|
||||||
|
return DataCollatorForLanguageModeling(
|
||||||
|
tokenizer=tokenizer,
|
||||||
|
mlm=False,
|
||||||
|
)(examples)
|
||||||
|
|
||||||
|
dataloader = DataLoader(
|
||||||
|
dataset,
|
||||||
|
batch_size=args.batch_size,
|
||||||
|
collate_fn=collate_fn,
|
||||||
|
)
|
||||||
|
dataloader = accelerator.prepare(dataloader)
|
||||||
|
return dataloader
|
||||||
|
|
||||||
|
|
||||||
|
def get_model(model_name: str):
|
||||||
|
# We reguire model to be loaded in fp32, otherwise benchmarks don't match as accelerate does upcasting of parameters to fp32
|
||||||
|
config = AutoConfig.from_pretrained(model_name, trust_remote_code=True, torch_dtype=torch.float32)
|
||||||
|
model = AutoModelForCausalLM.from_config(config)
|
||||||
|
return model
|
||||||
|
|
||||||
|
|
||||||
|
def get_tokenizer(model_name: str):
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
||||||
|
tokenizer.pad_token = tokenizer.eos_token
|
||||||
|
return tokenizer
|
||||||
|
|
||||||
|
|
||||||
|
def prepare_torch(
|
||||||
|
args, config: dict, post_shard_optimizer: bool = False, apply_optimizer_fix: bool = False
|
||||||
|
) -> tuple[torch.nn.Module, torch.optim.Optimizer, torch.utils.data.DataLoader, Accelerator]:
|
||||||
|
mp_policy = MixedPrecisionPolicy(
|
||||||
|
param_dtype=torch.bfloat16,
|
||||||
|
reduce_dtype=torch.bfloat16,
|
||||||
|
output_dtype=torch.bfloat16,
|
||||||
|
)
|
||||||
|
|
||||||
|
accelerator = Accelerator(mixed_precision="bf16")
|
||||||
|
set_seed(SEED)
|
||||||
|
is_fixed = "fixed" if apply_optimizer_fix else "not_fixed"
|
||||||
|
is_post_shard = "optimizer_after_fsdp" if post_shard_optimizer else "optimizer_before_fsdp"
|
||||||
|
run_name = f"torch_{is_post_shard}" if post_shard_optimizer else f"torch_{is_post_shard}_{is_fixed}"
|
||||||
|
|
||||||
|
tokenizer = get_tokenizer(config["model_name"])
|
||||||
|
train_dataloader = prepare_dataloader(tokenizer, args, accelerator)
|
||||||
|
|
||||||
|
memory_tracker = MemoryTracker(accelerator.device, args.output_dir, run_name, args.save_memory_snapshot)
|
||||||
|
memory_tracker.start()
|
||||||
|
|
||||||
|
model = get_model(config["model_name"])
|
||||||
|
optimizer = None
|
||||||
|
|
||||||
|
if not post_shard_optimizer:
|
||||||
|
optimizer = AdamW(model.parameters(), lr=config["learning_rate"])
|
||||||
|
|
||||||
|
if apply_optimizer_fix:
|
||||||
|
# We drop the references to the original parameters, so that `fully_shard` can trigger a new allocation
|
||||||
|
# Then we get the `module_name: data_ptr` mapping, so we can swap back the parameters later
|
||||||
|
old_named_parameters = get_named_parameters(model, drop_refs=True)
|
||||||
|
|
||||||
|
# We replace the parameters of the optimizer with empty tensors, so that `fully_shard` can trigger a new allocation
|
||||||
|
# We also change the `data_ptr` of the parameters to the original ones, so we can swap back the parameters later
|
||||||
|
replace_optimizer_params(optimizer)
|
||||||
|
|
||||||
|
for module in model.modules():
|
||||||
|
if isinstance(module, Qwen2DecoderLayer):
|
||||||
|
fully_shard(module, mp_policy=mp_policy)
|
||||||
|
fully_shard(model, mp_policy=mp_policy)
|
||||||
|
|
||||||
|
# We do this to imitate how accelerate forces outputs to be in fp32 via `convert_outputs_to_fp32`
|
||||||
|
autocast_context = torch.autocast(device_type=accelerator.state.device.type, dtype=torch.bfloat16)
|
||||||
|
model_forward_func = model.forward.__func__
|
||||||
|
new_forward = autocast_context(model_forward_func)
|
||||||
|
model.forward = MethodType(new_forward, model)
|
||||||
|
model.forward = MethodType(convert_outputs_to_fp32(model.forward.__func__), model)
|
||||||
|
|
||||||
|
if post_shard_optimizer:
|
||||||
|
optimizer = AdamW(model.parameters(), lr=config["learning_rate"])
|
||||||
|
|
||||||
|
if not post_shard_optimizer and apply_optimizer_fix:
|
||||||
|
# We swap back the parameters of the optimizer to the original ones
|
||||||
|
swap_back_optimizer_params(model, optimizer, old_named_parameters)
|
||||||
|
|
||||||
|
return model, optimizer, train_dataloader, accelerator, memory_tracker
|
||||||
|
|
||||||
|
|
||||||
|
def prepare_accelerate(
|
||||||
|
args, config: dict
|
||||||
|
) -> tuple[torch.nn.Module, torch.optim.Optimizer, torch.utils.data.DataLoader, Accelerator]:
|
||||||
|
if is_initialized():
|
||||||
|
AcceleratorState()._reset_state(True)
|
||||||
|
|
||||||
|
fsdp_plugin = FullyShardedDataParallelPlugin(
|
||||||
|
fsdp_version=2,
|
||||||
|
auto_wrap_policy="transformer_based_wrap",
|
||||||
|
transformer_cls_names_to_wrap=["Qwen2DecoderLayer"],
|
||||||
|
)
|
||||||
|
accelerator = Accelerator(
|
||||||
|
fsdp_plugin=fsdp_plugin,
|
||||||
|
mixed_precision="bf16",
|
||||||
|
)
|
||||||
|
set_seed(SEED)
|
||||||
|
|
||||||
|
tokenizer = get_tokenizer(config["model_name"])
|
||||||
|
train_dataloader = prepare_dataloader(tokenizer, args, accelerator)
|
||||||
|
|
||||||
|
memory_tracker = MemoryTracker(accelerator.device, args.output_dir, "accelerate", args.save_memory_snapshot)
|
||||||
|
memory_tracker.start()
|
||||||
|
|
||||||
|
model = get_model(config["model_name"])
|
||||||
|
optimizer = AdamW(model.parameters(), lr=config["learning_rate"])
|
||||||
|
|
||||||
|
model, optimizer = accelerator.prepare(model, optimizer)
|
||||||
|
|
||||||
|
return model, optimizer, train_dataloader, accelerator, memory_tracker
|
||||||
114
benchmarks/fsdp2/visualize.py
Normal file
114
benchmarks/fsdp2/visualize.py
Normal file
@ -0,0 +1,114 @@
|
|||||||
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
|
||||||
|
|
||||||
|
def parse_args():
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument("--dir", type=str, help="Directory containing the memory usage data")
|
||||||
|
parser.add_argument(
|
||||||
|
"--memory_threshold",
|
||||||
|
type=int,
|
||||||
|
default=0,
|
||||||
|
help="Memory threshold to filter data that is below this value (only filters 1st `--filter_partition` of the points which should roughtly correspond to the model loading)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--filter_partition",
|
||||||
|
type=float,
|
||||||
|
default=1 / 3,
|
||||||
|
help="Partition to drop data from that are below the memory threshold",
|
||||||
|
)
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
def filter_data(data, memory_threshold, filter_partition, key):
|
||||||
|
timestamps = data["timestamps"]
|
||||||
|
memory = data[key]
|
||||||
|
|
||||||
|
mid_point = int(len(timestamps) * filter_partition)
|
||||||
|
filtered_times = []
|
||||||
|
filtered_memory = []
|
||||||
|
for i, (t, m) in enumerate(zip(timestamps, memory)):
|
||||||
|
if i < mid_point and m < memory_threshold:
|
||||||
|
continue
|
||||||
|
filtered_times.append(t)
|
||||||
|
filtered_memory.append(m)
|
||||||
|
return filtered_times, filtered_memory
|
||||||
|
|
||||||
|
|
||||||
|
def compare_memory_usage(data, labels, memory_threshold, filter_partition):
|
||||||
|
plt.style.use("seaborn-v0_8")
|
||||||
|
colors = ["#2ecc71", "#e74c3c", "#3498db", "#f1c40f"]
|
||||||
|
|
||||||
|
fig1, ax1 = plt.subplots(figsize=(15, 5))
|
||||||
|
for data_item, label, color in zip(data, labels, colors):
|
||||||
|
timestamps, allocated = filter_data(data_item, memory_threshold, filter_partition, "allocated_memory")
|
||||||
|
ax1.plot(timestamps, allocated, label=label, color=color, linewidth=2)
|
||||||
|
|
||||||
|
ax1.set_xlabel("Time (s)", fontsize=12)
|
||||||
|
ax1.set_ylabel("Allocated Memory (GB)", fontsize=12)
|
||||||
|
ax1.set_title("Allocated Memory Usage Over Time", fontsize=14, pad=15)
|
||||||
|
ax1.grid(True, linestyle="--", alpha=0.7)
|
||||||
|
ax1.legend(frameon=True, fancybox=True, shadow=True, fontsize=10)
|
||||||
|
ax1.spines["top"].set_visible(False)
|
||||||
|
ax1.spines["right"].set_visible(False)
|
||||||
|
plt.tight_layout()
|
||||||
|
|
||||||
|
fig2, ax2 = plt.subplots(figsize=(15, 5))
|
||||||
|
for data_item, label, color in zip(data, labels, colors):
|
||||||
|
timestamps, reserved = filter_data(data_item, memory_threshold, filter_partition, "reserved_memory")
|
||||||
|
ax2.plot(timestamps, reserved, label=label, color=color, linewidth=2)
|
||||||
|
|
||||||
|
ax2.set_xlabel("Time (s)", fontsize=12)
|
||||||
|
ax2.set_ylabel("Reserved Memory (GB)", fontsize=12)
|
||||||
|
ax2.set_title("Reserved Memory Usage Over Time", fontsize=14, pad=15)
|
||||||
|
ax2.grid(True, linestyle="--", alpha=0.7)
|
||||||
|
ax2.legend(frameon=True, fancybox=True, shadow=True, fontsize=10)
|
||||||
|
ax2.spines["top"].set_visible(False)
|
||||||
|
ax2.spines["right"].set_visible(False)
|
||||||
|
plt.tight_layout()
|
||||||
|
|
||||||
|
return fig1, fig2
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
args = parse_args()
|
||||||
|
DIR = args.dir
|
||||||
|
with open(f"{DIR}/torch_optimizer_before_fsdp_not_fixed_memory_usage.json") as f:
|
||||||
|
optimizer_before_fsdp_not_fixed = json.load(f)
|
||||||
|
|
||||||
|
with open(f"{DIR}/torch_optimizer_after_fsdp_memory_usage.json") as f:
|
||||||
|
optimizer_after_fsdp = json.load(f)
|
||||||
|
|
||||||
|
with open(f"{DIR}/torch_optimizer_before_fsdp_fixed_memory_usage.json") as f:
|
||||||
|
optimizer_before_fsdp_fixed = json.load(f)
|
||||||
|
|
||||||
|
with open(f"{DIR}/accelerate_memory_usage.json") as f:
|
||||||
|
accelerate = json.load(f)
|
||||||
|
|
||||||
|
data = [optimizer_before_fsdp_not_fixed, optimizer_before_fsdp_fixed, optimizer_after_fsdp, accelerate]
|
||||||
|
labels = [
|
||||||
|
"Optimizer Before FSDP (w/o fix)",
|
||||||
|
"Optimizer Before FSDP (w/ fix)",
|
||||||
|
"Optimizer After FSDP",
|
||||||
|
"Accelerate",
|
||||||
|
]
|
||||||
|
|
||||||
|
fig1, fig2 = compare_memory_usage(data, labels, args.memory_threshold, args.filter_partition)
|
||||||
|
fig1.savefig(f"{DIR}/allocated_memory.png")
|
||||||
|
fig2.savefig(f"{DIR}/reserved_memory.png")
|
||||||
111
benchmarks/torch.compile/README.md
Normal file
111
benchmarks/torch.compile/README.md
Normal file
@ -0,0 +1,111 @@
|
|||||||
|
# Regional Compilation Benchmark
|
||||||
|
|
||||||
|
This benchmark compares different compilation strategies using PyTorch's `torch.compile` and Accelerate's `compile_regions` utility, which is based on the recipe in [PyTorch documentation](https://pytorch.org/tutorials/recipes/regional_compilation.html).
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The benchmark evaluates three approaches:
|
||||||
|
|
||||||
|
- **Baseline**: No compilation, standard PyTorch eager execution.
|
||||||
|
- **Full compilation**: Using PyTorch's `torch.compile()` on the entire model.
|
||||||
|
- **Regional compilation**: Using `accelerate.utils.compile_regions()` which targets specific blocks of the model to optimize compilation time.
|
||||||
|
|
||||||
|
Each approach is tested with different batch sizes (1 and 4) and sequence lengths (128) on various LLaMA-based models ranging from 1B to 13B parameters. We purposefully run the forward pass outside of the `torch.no_grad()` context to simulate performance in a training environment, where gradients are needed.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
To run this benchmark:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python regional_compilation.py
|
||||||
|
```
|
||||||
|
|
||||||
|
The script will automatically download the model configurations, create models, and benchmark both compilation and inference times across different scenarios.
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
- Suitable GPU memory for the models being tested.
|
||||||
|
- PyTorch with CUDA support.
|
||||||
|
- Transformers library.
|
||||||
|
- Accelerate library.
|
||||||
|
|
||||||
|
## Results
|
||||||
|
|
||||||
|
The benchmark results are summarized in the following figures:
|
||||||
|
|
||||||
|
- Compilation time is how long it takes to run the first forward pass.
|
||||||
|
- Speedup factor is the ratio of non-compiled baseline inference time to the fully/regionally compiled inference time.
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<img src="imgs/compilation_time.png" width="80%" alt="Compilation Time">
|
||||||
|
</p>
|
||||||
|
<p align="center">
|
||||||
|
<img src="imgs/speedup_factor.png" width="80%" alt="Speedup Factor">
|
||||||
|
</p>
|
||||||
|
|
||||||
|
Full results are available in the tables below:
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
[-------------------------------------------------- NousResearch/Llama-3.2-1B ---------------------------------------------------]
|
||||||
|
| Inference time (1x128) | Inference time (4x128) | Compile time (1x128) | Compile time (4x128)
|
||||||
|
1 threads: -----------------------------------------------------------------------------------------------------------------------
|
||||||
|
Baseline | 18.3 | 18.4 | |
|
||||||
|
Full compilation | 6.3 | 10.0 | 10696.4 | 10248.0
|
||||||
|
Regional compilation | 9.7 | 10.0 | 1952.7 | 2903.9
|
||||||
|
|
||||||
|
Times are in milliseconds (ms).
|
||||||
|
|
||||||
|
[---------------------------------------------- NousResearch/Hermes-3-Llama-3.2-3B ----------------------------------------------]
|
||||||
|
| Inference time (1x128) | Inference time (4x128) | Compile time (1x128) | Compile time (4x128)
|
||||||
|
1 threads: -----------------------------------------------------------------------------------------------------------------------
|
||||||
|
Baseline | 33.4 | 33.6 | |
|
||||||
|
Full compilation | 11.2 | 23.9 | 17857.5 | 17736.5
|
||||||
|
Regional compilation | 17.3 | 23.7 | 2993.2 | 2478.8
|
||||||
|
|
||||||
|
Times are in milliseconds (ms).
|
||||||
|
|
||||||
|
[---------------------------------------------- NousResearch/Hermes-3-Llama-3.1-8B ----------------------------------------------]
|
||||||
|
| Inference time (1x128) | Inference time (4x128) | Compile time (1x128) | Compile time (4x128)
|
||||||
|
1 threads: -----------------------------------------------------------------------------------------------------------------------
|
||||||
|
Baseline | 40.3 | 59.5 | |
|
||||||
|
Full compilation | 18.9 | 54.4 | 20437.8 | 20152.3
|
||||||
|
Regional compilation | 19.7 | 54.0 | 2903.1 | 2438.0
|
||||||
|
|
||||||
|
Times are in milliseconds (ms).
|
||||||
|
|
||||||
|
[--------------------------------------------- NousResearch/Nous-Hermes-Llama2-13b ----------------------------------------------]
|
||||||
|
| Inference time (1x128) | Inference time (4x128) | Compile time (1x128) | Compile time (4x128)
|
||||||
|
1 threads: -----------------------------------------------------------------------------------------------------------------------
|
||||||
|
Baseline | 45.5 | 100.4 | |
|
||||||
|
Full compilation | 29.4 | 89.7 | 23099.4 | 22885.9
|
||||||
|
Regional compilation | 29.4 | 87.5 | 2945.5 | 2526.2
|
||||||
|
|
||||||
|
Times are in milliseconds (ms).
|
||||||
|
```
|
||||||
|
|
||||||
|
## Results Summary
|
||||||
|
|
||||||
|
### Compilation Time
|
||||||
|
|
||||||
|
Regional compilation provides significantly faster compilation times compared to full model compilation:
|
||||||
|
|
||||||
|
- **Full compilation**: Takes ~10-23 seconds depending on model size.
|
||||||
|
- **Regional compilation**: Takes only ~2-3 seconds across all model sizes.
|
||||||
|
- **Speed improvement**: Regional compilation is **5-9x faster** to compile.
|
||||||
|
|
||||||
|
### Inference Time
|
||||||
|
|
||||||
|
Regional compilation delivers inference performance close to full compilation:
|
||||||
|
|
||||||
|
- For batch size 1:
|
||||||
|
- For smaller models (1B-3B): Full compilation has a slight edge over regional compilation.
|
||||||
|
- For larger models (8B-13B): Regional compilation performs similarly to full compilation.
|
||||||
|
- For batch size 4: Regional compilation performs similarly to full compilation across all models.
|
||||||
|
|
||||||
|
## Key Takeaways
|
||||||
|
|
||||||
|
1. **Comparable Performance**: Regional compilation delivers performance speedups similar to full compilation, especially for larger models.
|
||||||
|
2. **Faster Compilation**: Regional compilation significantly reduces the time taken to compile models, making it a more efficient choice for deployment.
|
||||||
|
3. **Batch Size Impact**: At batch size 4, full compilation and regional compilation perform nearly identically.
|
||||||
|
4. **Model Size Impact**: Even with a small batch size, full compilation and regional compilation perform similarly for larger models (8B-13B).
|
||||||
|
5. **Practical Application**: For real-world applications, regional compilation is a practical choice for optimizing training cold start times, especially when working with large models.
|
||||||
BIN
benchmarks/torch.compile/imgs/compilation_time.png
Normal file
BIN
benchmarks/torch.compile/imgs/compilation_time.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 242 KiB |
BIN
benchmarks/torch.compile/imgs/speedup_factor.png
Normal file
BIN
benchmarks/torch.compile/imgs/speedup_factor.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 218 KiB |
77
benchmarks/torch.compile/regional_compilation.py
Normal file
77
benchmarks/torch.compile/regional_compilation.py
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from torch.utils.benchmark import Compare, Timer
|
||||||
|
from transformers import AutoConfig, AutoModelForCausalLM
|
||||||
|
|
||||||
|
from accelerate.test_utils.testing import get_backend
|
||||||
|
from accelerate.utils import compile_regions
|
||||||
|
|
||||||
|
|
||||||
|
torch.set_float32_matmul_precision("high")
|
||||||
|
|
||||||
|
COMPILE_ITERS = 2
|
||||||
|
INFERENCE_ITERS = 100
|
||||||
|
|
||||||
|
BASELINE = "Baseline"
|
||||||
|
COMPILE_TIME = "Compile time"
|
||||||
|
INFRENCE_TIME = "Inference time"
|
||||||
|
FULL_COMPILATION = "Full compilation"
|
||||||
|
REGIONAL_COMPILATION = "Regional compilation"
|
||||||
|
|
||||||
|
INFRENCE_STMT = "model(input_ids, use_cache=False)"
|
||||||
|
COMPILE_STMT = f"torch._dynamo.reset(); torch._inductor.utils.clear_inductor_caches(); {INFRENCE_STMT}"
|
||||||
|
|
||||||
|
torch_device_type, _, _ = get_backend()
|
||||||
|
|
||||||
|
results = []
|
||||||
|
for model_id in [
|
||||||
|
# non-gated llama models
|
||||||
|
"NousResearch/Llama-3.2-1B",
|
||||||
|
"NousResearch/Hermes-3-Llama-3.2-3B",
|
||||||
|
"NousResearch/Hermes-3-Llama-3.1-8B",
|
||||||
|
"NousResearch/Nous-Hermes-Llama2-13b",
|
||||||
|
]:
|
||||||
|
with torch.device(torch_device_type):
|
||||||
|
config = AutoConfig.from_pretrained(model_id)
|
||||||
|
model = AutoModelForCausalLM.from_config(config).to(dtype=torch.float16).eval()
|
||||||
|
|
||||||
|
full_compilation_model = torch.compile(model)
|
||||||
|
regional_compilation_model = compile_regions(model)
|
||||||
|
|
||||||
|
for model, sub_label, description, stmt, iters in [
|
||||||
|
(model, BASELINE, INFRENCE_TIME, INFRENCE_STMT, INFERENCE_ITERS),
|
||||||
|
(full_compilation_model, FULL_COMPILATION, COMPILE_TIME, COMPILE_STMT, COMPILE_ITERS),
|
||||||
|
(full_compilation_model, FULL_COMPILATION, INFRENCE_TIME, INFRENCE_STMT, INFERENCE_ITERS),
|
||||||
|
(regional_compilation_model, REGIONAL_COMPILATION, COMPILE_TIME, COMPILE_STMT, COMPILE_ITERS),
|
||||||
|
(regional_compilation_model, REGIONAL_COMPILATION, INFRENCE_TIME, INFRENCE_STMT, INFERENCE_ITERS),
|
||||||
|
]:
|
||||||
|
for batch_size, sequence_length in [(1, 128), (4, 128)]:
|
||||||
|
input_ids = torch.randint(
|
||||||
|
0, 1000, size=(batch_size, sequence_length), dtype=torch.int64, device=torch_device_type
|
||||||
|
)
|
||||||
|
results.append(
|
||||||
|
Timer(
|
||||||
|
label=model_id,
|
||||||
|
sub_label=sub_label,
|
||||||
|
description=f"{description} ({batch_size}x{sequence_length})",
|
||||||
|
globals={"model": model, "input_ids": input_ids},
|
||||||
|
stmt=stmt,
|
||||||
|
).timeit(number=iters)
|
||||||
|
)
|
||||||
|
|
||||||
|
compare = Compare(results)
|
||||||
|
compare.colorize()
|
||||||
|
compare.print()
|
||||||
74
docker/README.md
Normal file
74
docker/README.md
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
<!---
|
||||||
|
Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Official Hugging Face Accelerate Docker Images
|
||||||
|
|
||||||
|
Accelerate publishes a variety of docker versions as part of our CI that users can also use. These are stable images that Accelerate can run off of which comes with a variety of different setup configurations, all of which are officially hosted on [Docker Hub](https://hub.docker.com/r/huggingface/accelerate).
|
||||||
|
|
||||||
|
A breakdown of each are given below
|
||||||
|
|
||||||
|
## Naming Conventions
|
||||||
|
|
||||||
|
Accelerate docker images follow a tagging convention of:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
huggingface/accelerate:{accelerator}-{nightly,release}
|
||||||
|
```
|
||||||
|
|
||||||
|
`accelerator` in this instance is one of many applical pre-configured backend supports:
|
||||||
|
* `gpu`: Comes compiled off of the `nvidia/cuda` image and includes core parts like `bitsandbytes`. Runs off python 3.9.
|
||||||
|
* `cpu`: Comes compiled off of `python:3.9-slim` and is designed for non-CUDA based workloads.
|
||||||
|
* More to come soon
|
||||||
|
* `gpu-deepspeed`: Comes compiled off of the `nvidia/cuda` image and includes core parts like `bitsandbytes` as well as the latest `deepspeed` version. Runs off python 3.10.
|
||||||
|
* `gpu-fp8-transformerengine`: Comes compiled off of `nvcr.io/nvidia/pytorch` and is specifically for running the `benchmarks/fp8` scripts on devices which support FP8 operations using the `TransformerEngine` library (RTX 4090, H100, etc)
|
||||||
|
|
||||||
|
## Nightlies vs Releases
|
||||||
|
|
||||||
|
Each release a new build is pushed with a version number included in the name. For a GPU-supported image of version 0.28.0 for instance, it would look like the following:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
huggingface/accelerate:gpu-release-0.28.0
|
||||||
|
```
|
||||||
|
|
||||||
|
Nightlies contain two different image tags. There is a general `nightly` tag which is built each night, and a `nightly-YYYY-MM-DD` which corresponds to a build from a particular date.
|
||||||
|
|
||||||
|
For instance, here is an example nightly CPU image from 3/14/2024
|
||||||
|
|
||||||
|
```bash
|
||||||
|
huggingface/accelerate:cpu-nightly-2024-03-14
|
||||||
|
```
|
||||||
|
|
||||||
|
## Running the images
|
||||||
|
|
||||||
|
Each image comes compiled with `conda` and an `accelerate` environment contains all of the installed dependencies.
|
||||||
|
|
||||||
|
To pull down the latest nightly run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker pull huggingface/accelerate:gpu-nightly
|
||||||
|
```
|
||||||
|
|
||||||
|
To then run it in interactive mode with GPU-memory available, run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker container run --gpus all -it huggingface/accelerate:gpu-nightly
|
||||||
|
```
|
||||||
|
|
||||||
|
## DEPRECATED IMAGES
|
||||||
|
|
||||||
|
CPU and GPU docker images were hosted at `huggingface/accelerate-gpu` and `huggingface/accelerate-cpu`. These builds are now outdated and will not receive updates.
|
||||||
|
|
||||||
|
The builds at the corresponding `huggingface/accelerate:{gpu,cpu}` contain the same `Dockerfile`, so it's as simple as changing the docker image to the desired ones from above. We will not be deleting these images for posterity, but they will not be receiving updates going forward.
|
||||||
@ -1,7 +1,7 @@
|
|||||||
# Builds CPU-only Docker image of PyTorch
|
# Builds CPU-only Docker image of PyTorch
|
||||||
# Uses multi-staged approach to reduce size
|
# Uses multi-staged approach to reduce size
|
||||||
# Stage 1
|
# Stage 1
|
||||||
FROM python:3.7-slim as compile-image
|
FROM python:3.9-slim as compile-image
|
||||||
|
|
||||||
ARG DEBIAN_FRONTEND=noninteractive
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
@ -25,7 +25,7 @@ RUN python3 -m pip install --no-cache-dir \
|
|||||||
--extra-index-url https://download.pytorch.org/whl/cpu
|
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||||
|
|
||||||
# Stage 2
|
# Stage 2
|
||||||
FROM python:3.7-slim AS build-image
|
FROM python:3.9-slim AS build-image
|
||||||
COPY --from=compile-image /opt/venv /opt/venv
|
COPY --from=compile-image /opt/venv /opt/venv
|
||||||
RUN useradd -ms /bin/bash user
|
RUN useradd -ms /bin/bash user
|
||||||
USER user
|
USER user
|
||||||
|
|||||||
46
docker/accelerate-gpu-deepspeed/Dockerfile
Normal file
46
docker/accelerate-gpu-deepspeed/Dockerfile
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
# Builds GPU docker image of PyTorch specifically
|
||||||
|
# Uses multi-staged approach to reduce size
|
||||||
|
# Stage 1
|
||||||
|
# Use base conda image to reduce time
|
||||||
|
FROM continuumio/miniconda3:latest AS compile-image
|
||||||
|
# Specify py version
|
||||||
|
# Note: DeepSpeed beyond v0.12.6 requires py 3.10
|
||||||
|
ENV PYTHON_VERSION=3.10
|
||||||
|
# Install apt libs
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y curl git wget && \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists*
|
||||||
|
|
||||||
|
# Create our conda env
|
||||||
|
RUN conda create --name accelerate python=${PYTHON_VERSION} ipython jupyter pip
|
||||||
|
# We don't install pytorch here yet since CUDA isn't available
|
||||||
|
# instead we use the direct torch wheel
|
||||||
|
ENV PATH /opt/conda/envs/accelerate/bin:$PATH
|
||||||
|
# Activate our bash shell
|
||||||
|
RUN chsh -s /bin/bash
|
||||||
|
SHELL ["/bin/bash", "-c"]
|
||||||
|
# Activate the conda env, install mpy4pi, and install torch + accelerate
|
||||||
|
RUN source activate accelerate && conda install -c conda-forge mpi4py
|
||||||
|
RUN source activate accelerate && \
|
||||||
|
python3 -m pip install --no-cache-dir \
|
||||||
|
git+https://github.com/huggingface/accelerate#egg=accelerate[testing,test_trackers,deepspeed] \
|
||||||
|
--extra-index-url https://download.pytorch.org/whl/cu126
|
||||||
|
|
||||||
|
RUN python3 -m pip install --no-cache-dir bitsandbytes
|
||||||
|
|
||||||
|
# Stage 2
|
||||||
|
FROM nvidia/cuda:12.6.3-cudnn-devel-ubuntu22.04 AS build-image
|
||||||
|
COPY --from=compile-image /opt/conda /opt/conda
|
||||||
|
ENV PATH /opt/conda/bin:$PATH
|
||||||
|
|
||||||
|
# Install apt libs
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y curl git wget && \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists*
|
||||||
|
|
||||||
|
RUN echo "source activate accelerate" >> ~/.profile
|
||||||
|
|
||||||
|
# Activate the virtualenv
|
||||||
|
CMD ["/bin/bash"]
|
||||||
@ -1,10 +1,10 @@
|
|||||||
# Builds GPU docker image of PyTorch
|
# Builds GPU docker image of PyTorch specifically
|
||||||
# Uses multi-staged approach to reduce size
|
# Uses multi-staged approach to reduce size
|
||||||
# Stage 1
|
# Stage 1
|
||||||
# Use base conda image to reduce time
|
# Use base conda image to reduce time
|
||||||
FROM continuumio/miniconda3:latest AS compile-image
|
FROM continuumio/miniconda3:latest AS compile-image
|
||||||
# Specify py version
|
# Specify py version
|
||||||
ENV PYTHON_VERSION=3.7.3
|
ENV PYTHON_VERSION=3.9
|
||||||
# Install apt libs
|
# Install apt libs
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y curl git wget && \
|
apt-get install -y curl git wget && \
|
||||||
@ -19,14 +19,17 @@ ENV PATH /opt/conda/envs/accelerate/bin:$PATH
|
|||||||
# Activate our bash shell
|
# Activate our bash shell
|
||||||
RUN chsh -s /bin/bash
|
RUN chsh -s /bin/bash
|
||||||
SHELL ["/bin/bash", "-c"]
|
SHELL ["/bin/bash", "-c"]
|
||||||
# Activate the conda env and install torch + accelerate
|
# Activate the conda env, install mpy4pi, and install torch + accelerate
|
||||||
|
RUN source activate accelerate && conda install -c conda-forge mpi4py
|
||||||
RUN source activate accelerate && \
|
RUN source activate accelerate && \
|
||||||
python3 -m pip install --no-cache-dir \
|
python3 -m pip install --no-cache-dir \
|
||||||
git+https://github.com/huggingface/accelerate#egg=accelerate[testing,test_trackers] \
|
git+https://github.com/huggingface/accelerate#egg=accelerate[testing,test_trackers] \
|
||||||
--extra-index-url https://download.pytorch.org/whl/cu113
|
--extra-index-url https://download.pytorch.org/whl/cu126
|
||||||
|
|
||||||
|
RUN python3 -m pip install --no-cache-dir bitsandbytes
|
||||||
|
|
||||||
# Stage 2
|
# Stage 2
|
||||||
FROM nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04 AS build-image
|
FROM nvidia/cuda:12.6.3-cudnn-devel-ubuntu22.04 AS build-image
|
||||||
COPY --from=compile-image /opt/conda /opt/conda
|
COPY --from=compile-image /opt/conda /opt/conda
|
||||||
ENV PATH /opt/conda/bin:$PATH
|
ENV PATH /opt/conda/bin:$PATH
|
||||||
|
|
||||||
|
|||||||
267
docs/README.md
Normal file
267
docs/README.md
Normal file
@ -0,0 +1,267 @@
|
|||||||
|
<!---
|
||||||
|
Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Generating the documentation
|
||||||
|
|
||||||
|
To generate the documentation, you first have to build it. Several packages are necessary to build the doc,
|
||||||
|
you can install them with the following command, at the root of the code repository:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install -e ".[docs]"
|
||||||
|
```
|
||||||
|
|
||||||
|
Then you need to install our special tool that builds the documentation:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install git+https://github.com/huggingface/doc-builder
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
**NOTE**
|
||||||
|
|
||||||
|
You only need to generate the documentation to inspect it locally (if you're planning changes and want to
|
||||||
|
check how they look before committing for instance). You don't have to commit the built documentation.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Building the documentation
|
||||||
|
|
||||||
|
Once you have setup the `doc-builder` and additional packages, you can generate the documentation by
|
||||||
|
typing the following command:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
doc-builder build accelerate docs/source/ --build_dir ~/tmp/test-build
|
||||||
|
```
|
||||||
|
|
||||||
|
You can adapt the `--build_dir` to set any temporary folder that you prefer. This command will create it and generate
|
||||||
|
the MDX files that will be rendered as the documentation on the main website. You can inspect them in your favorite
|
||||||
|
Markdown editor.
|
||||||
|
|
||||||
|
## Previewing the documentation
|
||||||
|
|
||||||
|
To preview the docs, first install the `watchdog` module with:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install watchdog
|
||||||
|
```
|
||||||
|
|
||||||
|
Then run the following command:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
doc-builder preview {package_name} {path_to_docs}
|
||||||
|
```
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
doc-builder preview accelerate docs/source/
|
||||||
|
```
|
||||||
|
|
||||||
|
The docs will be viewable at [http://localhost:3000](http://localhost:3000). You can also preview the docs once you have opened a PR. You will see a bot add a comment to a link where the documentation with your changes lives.
|
||||||
|
|
||||||
|
---
|
||||||
|
**NOTE**
|
||||||
|
|
||||||
|
The `preview` command only works with existing doc files. When you add a completely new file, you need to update `_toctree.yml` & restart `preview` command (`ctrl-c` to stop it & call `doc-builder preview ...` again).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Adding a new element to the navigation bar
|
||||||
|
|
||||||
|
Accepted files are Markdown (.md).
|
||||||
|
|
||||||
|
Create a file with its extension and put it in the source directory. You can then link it to the toc-tree by putting
|
||||||
|
the filename without the extension in the [`_toctree.yml`](https://github.com/huggingface/accelerate/blob/main/docs/source/_toctree.yml) file.
|
||||||
|
|
||||||
|
## Renaming section headers and moving sections
|
||||||
|
|
||||||
|
It helps to keep the old links working when renaming the section header and/or moving sections from one document to another. This is because the old links are likely to be used in Issues, Forums, and Social media and it'd make for a much more superior user experience if users reading those months later could still easily navigate to the originally intended information.
|
||||||
|
|
||||||
|
Therefore, we simply keep a little map of moved sections at the end of the document where the original section was. The key is to preserve the original anchor.
|
||||||
|
|
||||||
|
So if you renamed a section from: "Section A" to "Section B", then you can add at the end of the file:
|
||||||
|
|
||||||
|
```
|
||||||
|
Sections that were moved:
|
||||||
|
|
||||||
|
[ <a href="#section-b">Section A</a><a id="section-a"></a> ]
|
||||||
|
```
|
||||||
|
and of course, if you moved it to another file, then:
|
||||||
|
|
||||||
|
```
|
||||||
|
Sections that were moved:
|
||||||
|
|
||||||
|
[ <a href="../new-file#section-b">Section A</a><a id="section-a"></a> ]
|
||||||
|
```
|
||||||
|
|
||||||
|
Use the relative style to link to the new file so that the versioned docs continue to work.
|
||||||
|
|
||||||
|
|
||||||
|
## Writing Documentation - Specification
|
||||||
|
|
||||||
|
The `huggingface/accelerate` documentation follows the
|
||||||
|
[Google documentation](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) style for docstrings,
|
||||||
|
although we can write them directly in Markdown.
|
||||||
|
|
||||||
|
### Adding a new tutorial
|
||||||
|
|
||||||
|
Adding a new tutorial or section is done in two steps:
|
||||||
|
|
||||||
|
- Add a new file under `./source`. This file can either be ReStructuredText (.rst) or Markdown (.md).
|
||||||
|
- Link that file in `./source/_toctree.yml` on the correct toc-tree.
|
||||||
|
|
||||||
|
Make sure to put your new file under the proper section. It's unlikely to go in the first section (*Get Started*), so
|
||||||
|
depending on the intended targets (beginners, more advanced users, or researchers) it should go in sections two, three, or
|
||||||
|
four.
|
||||||
|
|
||||||
|
### Writing source documentation
|
||||||
|
|
||||||
|
Values that should be put in `code` should either be surrounded by backticks: \`like so\`. Note that argument names
|
||||||
|
and objects like True, None, or any strings should usually be put in `code`.
|
||||||
|
|
||||||
|
When mentioning a class, function, or method, it is recommended to use our syntax for internal links so that our tool
|
||||||
|
adds a link to its documentation with this syntax: \[\`XXXClass\`\] or \[\`function\`\]. This requires the class or
|
||||||
|
function to be in the main package.
|
||||||
|
|
||||||
|
If you want to create a link to some internal class or function, you need to
|
||||||
|
provide its path. For instance: \[\`utils.gather\`\]. This will be converted into a link with
|
||||||
|
`utils.gather` in the description. To get rid of the path and only keep the name of the object you are
|
||||||
|
linking to in the description, add a ~: \[\`~utils.gather\`\] will generate a link with `gather` in the description.
|
||||||
|
|
||||||
|
The same works for methods so you can either use \[\`XXXClass.method\`\] or \[~\`XXXClass.method\`\].
|
||||||
|
|
||||||
|
#### Defining arguments in a method
|
||||||
|
|
||||||
|
Arguments should be defined with the `Args:` (or `Arguments:` or `Parameters:`) prefix, followed by a line return and
|
||||||
|
an indentation. The argument should be followed by its type, with its shape if it is a tensor, a colon, and its
|
||||||
|
description:
|
||||||
|
|
||||||
|
```
|
||||||
|
Args:
|
||||||
|
n_layers (`int`): The number of layers of the model.
|
||||||
|
```
|
||||||
|
|
||||||
|
If the description is too long to fit in one line (more than 119 characters in total), another indentation is necessary
|
||||||
|
before writing the description after the argument.
|
||||||
|
|
||||||
|
Finally, to maintain uniformity if any *one* description is too long to fit on one line, the
|
||||||
|
rest of the parameters should follow suit and have an indention before their description.
|
||||||
|
|
||||||
|
Here's an example showcasing everything so far:
|
||||||
|
|
||||||
|
```
|
||||||
|
Args:
|
||||||
|
gradient_accumulation_steps (`int`, *optional*, default to 1):
|
||||||
|
The number of steps that should pass before gradients are accumulated. A number > 1 should be combined with `Accelerator.accumulate`.
|
||||||
|
cpu (`bool`, *optional*):
|
||||||
|
Whether or not to force the script to execute on CPU. Will ignore GPU available if set to `True` and force the execution on one process only.
|
||||||
|
```
|
||||||
|
|
||||||
|
For optional arguments or arguments with defaults we follow the following syntax: imagine we have a function with the
|
||||||
|
following signature:
|
||||||
|
|
||||||
|
```
|
||||||
|
def my_function(x: str = None, a: float = 1):
|
||||||
|
```
|
||||||
|
|
||||||
|
then its documentation should look like this:
|
||||||
|
|
||||||
|
```
|
||||||
|
Args:
|
||||||
|
x (`str`, *optional*):
|
||||||
|
This argument controls ... and has a description longer than 119 chars.
|
||||||
|
a (`float`, *optional*, defaults to 1):
|
||||||
|
This argument is used to ... and has a description longer than 119 chars.
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that we always omit the "defaults to \`None\`" when None is the default for any argument. Also note that even
|
||||||
|
if the first line describing your argument type and its default gets long, you can't break it on several lines. You can
|
||||||
|
however write as many lines as you want in the indented description (see the example above with `input_ids`).
|
||||||
|
|
||||||
|
#### Writing a multi-line code block
|
||||||
|
|
||||||
|
Multi-line code blocks can be useful for displaying examples. They are done between two lines of three backticks as usual in Markdown:
|
||||||
|
|
||||||
|
|
||||||
|
````
|
||||||
|
```python
|
||||||
|
# first line of code
|
||||||
|
# second line
|
||||||
|
# etc
|
||||||
|
```
|
||||||
|
````
|
||||||
|
|
||||||
|
#### Writing a return block
|
||||||
|
|
||||||
|
The return block should be introduced with the `Returns:` prefix, followed by a line return and an indentation.
|
||||||
|
The first line should be the type of the return, followed by a line return. No need to indent further for the elements
|
||||||
|
building the return.
|
||||||
|
|
||||||
|
Here's an example of a single value return:
|
||||||
|
|
||||||
|
```
|
||||||
|
Returns:
|
||||||
|
`List[int]`: A list of integers in the range [0, 1] --- 1 for a special token, 0 for a sequence token.
|
||||||
|
```
|
||||||
|
|
||||||
|
Here's an example of a tuple return, comprising several objects:
|
||||||
|
|
||||||
|
```
|
||||||
|
Returns:
|
||||||
|
`tuple(torch.FloatTensor)` comprising various elements depending on the configuration ([`BertConfig`]) and inputs:
|
||||||
|
- ** loss** (*optional*, returned when `masked_lm_labels` is provided) `torch.FloatTensor` of shape `(1,)` --
|
||||||
|
Total loss is the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
|
||||||
|
- **prediction_scores** (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`) --
|
||||||
|
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
|
||||||
|
```
|
||||||
|
|
||||||
|
## Styling the docstring
|
||||||
|
|
||||||
|
We have an automatic script running with the `make style` comment that will make sure that:
|
||||||
|
- the docstrings fully take advantage of the line width
|
||||||
|
- all code examples are formatted using black, like the code of the Transformers library
|
||||||
|
|
||||||
|
This script may have some weird failures if you made a syntax mistake or if you uncover a bug. Therefore, it's
|
||||||
|
recommended to commit your changes before running `make style`, so you can revert the changes done by that script
|
||||||
|
easily.
|
||||||
|
|
||||||
|
## Writing documentation examples
|
||||||
|
|
||||||
|
The syntax for Example docstrings can look as follows:
|
||||||
|
|
||||||
|
```
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> import time
|
||||||
|
>>> from accelerate import Accelerator
|
||||||
|
>>> accelerator = Accelerator()
|
||||||
|
>>> if accelerator.is_main_process:
|
||||||
|
... time.sleep(2)
|
||||||
|
>>> else:
|
||||||
|
... print("I'm waiting for the main process to finish its sleep...")
|
||||||
|
>>> accelerator.wait_for_everyone()
|
||||||
|
>>> # Should print on every process at the same time
|
||||||
|
>>> print("Everyone is here")
|
||||||
|
```
|
||||||
|
```
|
||||||
|
|
||||||
|
The docstring should give a minimal, clear example of how the respective function
|
||||||
|
is to be used in inference and also include the expected (ideally sensible)
|
||||||
|
output.
|
||||||
|
Often, readers will try out the example before even going through the function
|
||||||
|
or class definitions. Therefore, it is of utmost importance that the example
|
||||||
|
works as expected.
|
||||||
@ -10,65 +10,124 @@
|
|||||||
- local: basic_tutorials/overview
|
- local: basic_tutorials/overview
|
||||||
title: Overview
|
title: Overview
|
||||||
- local: basic_tutorials/migration
|
- local: basic_tutorials/migration
|
||||||
title: Migrating to 🤗 Accelerate
|
title: Add Accelerate to your code
|
||||||
|
- local: basic_tutorials/execution
|
||||||
|
title: Execution process
|
||||||
|
- local: basic_tutorials/tpu
|
||||||
|
title: TPU training
|
||||||
- local: basic_tutorials/launch
|
- local: basic_tutorials/launch
|
||||||
title: Launching distributed code
|
title: Launching Accelerate scripts
|
||||||
- local: basic_tutorials/notebook
|
- local: basic_tutorials/notebook
|
||||||
title: Launching distributed training from Jupyter Notebooks
|
title: Launching distributed training from Jupyter Notebooks
|
||||||
title: Tutorials
|
title: Tutorials
|
||||||
- sections:
|
- sections:
|
||||||
- local: usage_guides/gradient_accumulation
|
- isExpanded: true
|
||||||
title: Performing gradient accumulation
|
sections:
|
||||||
- local: usage_guides/fsdp
|
- local: usage_guides/explore
|
||||||
title: Fully Sharded Data Parallelism
|
title: Start Here!
|
||||||
- local: usage_guides/checkpoint
|
- local: usage_guides/model_size_estimator
|
||||||
title: Saving and loading training states
|
title: Model memory estimator
|
||||||
- local: usage_guides/deepspeed
|
- local: usage_guides/quantization
|
||||||
title: How to use DeepSpeed
|
title: Model quantization
|
||||||
- local: usage_guides/tracking
|
- local: usage_guides/tracking
|
||||||
title: Using experiment trackers
|
title: Experiment trackers
|
||||||
- local: usage_guides/big_modeling
|
- local: usage_guides/profiler
|
||||||
title: How to use large models with small resources
|
title: Profiler
|
||||||
- local: usage_guides/memory
|
- local: usage_guides/checkpoint
|
||||||
title: How to avoid CUDA Out-of-Memory
|
title: Checkpointing
|
||||||
- local: usage_guides/sagemaker
|
- local: basic_tutorials/troubleshooting
|
||||||
title: Using 🤗 Accelerate on SageMaker
|
title: Troubleshoot
|
||||||
- local: usage_guides/mps
|
- local: usage_guides/training_zoo
|
||||||
title: How to use Apple Silicon M1 GPUs
|
title: Example Zoo
|
||||||
- local: usage_guides/training_zoo
|
title: Accelerate
|
||||||
title: 🤗 Accelerate Example Zoo
|
- isExpanded: true
|
||||||
title: How-To Guides
|
sections:
|
||||||
|
- local: usage_guides/gradient_accumulation
|
||||||
|
title: Gradient accumulation
|
||||||
|
- local: usage_guides/local_sgd
|
||||||
|
title: Local SGD
|
||||||
|
- local: usage_guides/low_precision_training
|
||||||
|
title: Low precision (FP8) training
|
||||||
|
- local: usage_guides/deepspeed
|
||||||
|
title: DeepSpeed
|
||||||
|
- local: usage_guides/deepspeed_multiple_model
|
||||||
|
title: Using multiple models with DeepSpeed
|
||||||
|
- local: usage_guides/ddp_comm_hook
|
||||||
|
title: DDP Communication Hooks
|
||||||
|
- local: usage_guides/fsdp
|
||||||
|
title: Fully Sharded Data Parallel
|
||||||
|
- local: usage_guides/megatron_lm
|
||||||
|
title: Megatron-LM
|
||||||
|
- local: usage_guides/sagemaker
|
||||||
|
title: Amazon SageMaker
|
||||||
|
- local: usage_guides/mps
|
||||||
|
title: Apple M1 GPUs
|
||||||
|
- local: usage_guides/intel_cpu
|
||||||
|
title: Intel CPU
|
||||||
|
- local: usage_guides/gaudi
|
||||||
|
title: Intel Gaudi
|
||||||
|
- local: usage_guides/compilation
|
||||||
|
title: Compilation
|
||||||
|
title: Training
|
||||||
|
- isExpanded: true
|
||||||
|
sections:
|
||||||
|
- local: usage_guides/big_modeling
|
||||||
|
title: Big Model Inference
|
||||||
|
- local: usage_guides/distributed_inference
|
||||||
|
title: Distributed inference
|
||||||
|
title: Inference
|
||||||
|
title: How to guides
|
||||||
- sections:
|
- sections:
|
||||||
|
- local: concept_guides/internal_mechanism
|
||||||
|
title: Accelerate's internal mechanism
|
||||||
|
- local: concept_guides/big_model_inference
|
||||||
|
title: Loading big models into memory
|
||||||
- local: concept_guides/performance
|
- local: concept_guides/performance
|
||||||
title: Comparing performance across distributed setups
|
title: Comparing performance across distributed setups
|
||||||
- local: concept_guides/gradient_synchronization
|
|
||||||
title: Gradient synchronization
|
|
||||||
- local: concept_guides/deferring_execution
|
- local: concept_guides/deferring_execution
|
||||||
title: Executing and deferring jobs
|
title: Executing and deferring jobs
|
||||||
|
- local: concept_guides/gradient_synchronization
|
||||||
|
title: Gradient synchronization
|
||||||
|
- local: concept_guides/fsdp_and_deepspeed
|
||||||
|
title: FSDP vs DeepSpeed
|
||||||
|
- local: concept_guides/fsdp1_vs_fsdp2
|
||||||
|
title: FSDP1 vs FSDP2
|
||||||
|
- local: concept_guides/context_parallelism
|
||||||
|
title: Context parallelism
|
||||||
|
- local: concept_guides/low_precision_training
|
||||||
|
title: Low precision training methods
|
||||||
- local: concept_guides/training_tpu
|
- local: concept_guides/training_tpu
|
||||||
title: TPU best practices
|
title: Training on TPUs
|
||||||
title: Concepts and fundamentals
|
title: Concepts and fundamentals
|
||||||
- sections:
|
- sections:
|
||||||
- local: package_reference/accelerator
|
- local: package_reference/accelerator
|
||||||
title: Main Accelerator class
|
title: Accelerator
|
||||||
- local: package_reference/state
|
- local: package_reference/state
|
||||||
title: Stateful configuration classes
|
title: Stateful classes
|
||||||
- local: package_reference/cli
|
- local: package_reference/cli
|
||||||
title: The Command Line
|
title: The Command Line
|
||||||
- local: package_reference/torch_wrappers
|
- local: package_reference/torch_wrappers
|
||||||
title: Torch wrapper classes
|
title: DataLoaders, Optimizers, Schedulers
|
||||||
- local: package_reference/tracking
|
- local: package_reference/tracking
|
||||||
title: Experiment trackers
|
title: Experiment trackers
|
||||||
- local: package_reference/launchers
|
- local: package_reference/launchers
|
||||||
title: Distributed launchers
|
title: Launchers
|
||||||
- local: package_reference/deepspeed
|
- local: package_reference/deepspeed
|
||||||
title: DeepSpeed utilities
|
title: DeepSpeed utilities
|
||||||
- local: package_reference/logging
|
- local: package_reference/logging
|
||||||
title: Logging
|
title: Logging
|
||||||
- local: package_reference/big_modeling
|
- local: package_reference/big_modeling
|
||||||
title: Working with large models
|
title: Working with large models
|
||||||
|
- local: package_reference/inference
|
||||||
|
title: Pipeline parallelism
|
||||||
- local: package_reference/kwargs
|
- local: package_reference/kwargs
|
||||||
title: Kwargs handlers
|
title: Kwargs handlers
|
||||||
|
- local: package_reference/fp8
|
||||||
|
title: FP8
|
||||||
- local: package_reference/utilities
|
- local: package_reference/utilities
|
||||||
title: Utility functions and classes
|
title: Utility functions and classes
|
||||||
title: "Reference"
|
- local: package_reference/megatron_lm
|
||||||
|
title: Megatron-LM utilities
|
||||||
|
- local: package_reference/fsdp
|
||||||
|
title: Fully Sharded Data Parallel utilities
|
||||||
|
title: "Reference"
|
||||||
|
|||||||
128
docs/source/basic_tutorials/execution.md
Normal file
128
docs/source/basic_tutorials/execution.md
Normal file
@ -0,0 +1,128 @@
|
|||||||
|
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Execution process
|
||||||
|
|
||||||
|
When working with distributed training systems, it is important to manage how and when processes are executed across GPUs. Some processes are completed faster than others, and some processes shouldn't begin if others haven't finished yet. Accelerate provides tools for orchestrating when processes are executed to ensure everything remains synchronized across all devices.
|
||||||
|
|
||||||
|
This tutorial will teach you how to execute a process on only one machine and how to delay execution until all processes have reached a certain point.
|
||||||
|
|
||||||
|
## Execute on one process
|
||||||
|
|
||||||
|
Certain code only needs to be run once on a given machine, such as printing a log statement or only displaying one progress bar on the local main process.
|
||||||
|
|
||||||
|
<hfoptions id="local-execution">
|
||||||
|
<hfoption id="statements">
|
||||||
|
|
||||||
|
You should use `accelerator.is_local_main_process` to indicate code that should only be executed once.
|
||||||
|
|
||||||
|
```py
|
||||||
|
from tqdm.auto import tqdm
|
||||||
|
|
||||||
|
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
|
||||||
|
```
|
||||||
|
|
||||||
|
You could also wrap a statement with `accelerator.is_local_main_process`.
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> For standalone `print` statements that aren't wrapped in `accelerator.is_local_main_process`, replace `print` with Accelerate's [`~Accelerator.print`] method to only print once per process.
|
||||||
|
|
||||||
|
```py
|
||||||
|
if accelerator.is_local_main_process:
|
||||||
|
print("Accelerate is the best")
|
||||||
|
```
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
<hfoption id="function">
|
||||||
|
|
||||||
|
For a function that should only be executed once, use [`~Accelerator.on_local_main_process`].
|
||||||
|
|
||||||
|
```py
|
||||||
|
@accelerator.on_local_main_process
|
||||||
|
def do_my_thing():
|
||||||
|
"Something done once per server"
|
||||||
|
do_thing_once_per_server()
|
||||||
|
```
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
</hfoptions>
|
||||||
|
|
||||||
|
You could also direct Accelerate to execute code once across *all processes* regardless of the number of machines. This is useful if you're uploading a final model to the Hub.
|
||||||
|
|
||||||
|
<hfoptions id="main-execution">
|
||||||
|
<hfoption id="statement">
|
||||||
|
|
||||||
|
You should use `accelerator.is_main_process` to indicate code that should only be executed once across all processes.
|
||||||
|
|
||||||
|
```py
|
||||||
|
if accelerator.is_main_process:
|
||||||
|
repo.push_to_hub()
|
||||||
|
```
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
<hfoption id="function">
|
||||||
|
|
||||||
|
For a function that should only be executed once across all processes, use [`~Accelerator.on_main_process`].
|
||||||
|
|
||||||
|
```py
|
||||||
|
@accelerator.on_main_process
|
||||||
|
def do_my_thing():
|
||||||
|
"Something done once per server"
|
||||||
|
do_thing_once()
|
||||||
|
```
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
</hfoptions>
|
||||||
|
|
||||||
|
## Execute on a specific process
|
||||||
|
|
||||||
|
Accelerate can also help you execute functions that should only be executed on a specific process or a local process index.
|
||||||
|
|
||||||
|
<hfoptions id="specific-execution">
|
||||||
|
<hfoption id="specific process">
|
||||||
|
|
||||||
|
Use the [`~Accelerator.on_process`] method and specify the process index to execute a function on.
|
||||||
|
|
||||||
|
```py
|
||||||
|
@accelerator.on_process(process_index=0)
|
||||||
|
def do_my_thing():
|
||||||
|
"Something done on process index 0"
|
||||||
|
do_thing_on_index_zero()
|
||||||
|
```
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
<hfoption id="local process">
|
||||||
|
|
||||||
|
Use the [`~Accelerator.on_local_process`] method and specify the local process index to execute a function on.
|
||||||
|
|
||||||
|
```py
|
||||||
|
@accelerator.on_local_process(local_process_idx=0)
|
||||||
|
def do_my_thing():
|
||||||
|
"Something done on process index 0 on each server"
|
||||||
|
do_thing_on_index_zero_on_each_server()
|
||||||
|
```
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
</hfoptions>
|
||||||
|
|
||||||
|
## Defer execution
|
||||||
|
|
||||||
|
When you run your script on several GPUs at the same time, some code may be executed faster than others. You might need to wait for all processes to reach a certain point before executing the next set of instructions. For instance, you shouldn’t save a model before making sure every process is done with training.
|
||||||
|
|
||||||
|
To do this, add [`~Accelerator.wait_for_everyone`] in your code. This blocks all processes that have finished first from continuing until all remaining processes have reached the same point (this has no effect if you're running on a single GPU or CPU).
|
||||||
|
|
||||||
|
```py
|
||||||
|
accelerator.wait_for_everyone()
|
||||||
|
```
|
||||||
@ -8,33 +8,34 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Installation and Configuration
|
# Installation
|
||||||
|
|
||||||
Before you start, you will need to setup your environment, install the appropriate packages, and configure 🤗 Accelerate. 🤗 Accelerate is tested on **Python 3.7+**.
|
Before you start, you will need to setup your environment, install the appropriate packages, and configure Accelerate. Accelerate is tested on **Python 3.8+**.
|
||||||
|
|
||||||
## Installing 🤗 Accelerate
|
Accelerate is available on pypi and conda, as well as on GitHub. Details to install from each are below:
|
||||||
|
|
||||||
🤗 Accelerate is available on pypi and conda, as well as on GitHub. Details to install from each are below:
|
## pip
|
||||||
|
|
||||||
### pip
|
To install Accelerate from pypi, perform:
|
||||||
|
|
||||||
To install 🤗 Accelerate from pypi, perform:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install accelerate
|
pip install accelerate
|
||||||
```
|
```
|
||||||
|
|
||||||
### conda
|
## conda
|
||||||
|
|
||||||
🤗 Accelerate can also be installed with conda with:
|
Accelerate can also be installed with conda with:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
conda install -c conda-forge accelerate
|
conda install -c conda-forge accelerate
|
||||||
```
|
```
|
||||||
|
|
||||||
### Source
|
## Source
|
||||||
|
|
||||||
New features are added every day that haven't been released yet. To try them out yourself, install
|
New features are added every day that haven't been released yet. To try them out yourself, install
|
||||||
from the GitHub repository:
|
from the GitHub repository:
|
||||||
@ -53,9 +54,9 @@ cd accelerate
|
|||||||
pip install -e .
|
pip install -e .
|
||||||
```
|
```
|
||||||
|
|
||||||
## Configuring 🤗 Accelerate
|
## Configuration
|
||||||
|
|
||||||
After installing, you need to configure 🤗 Accelerate for how the current system is setup for training.
|
After installing, you need to configure Accelerate for how the current system is setup for training.
|
||||||
To do so run the following and answer the questions prompted to you:
|
To do so run the following and answer the questions prompted to you:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -67,7 +68,8 @@ To write a barebones configuration that doesn't include options such as DeepSpee
|
|||||||
```bash
|
```bash
|
||||||
python -c "from accelerate.utils import write_basic_config; write_basic_config(mixed_precision='fp16')"
|
python -c "from accelerate.utils import write_basic_config; write_basic_config(mixed_precision='fp16')"
|
||||||
```
|
```
|
||||||
🤗 Accelerate will automatically utilize the maximum number of GPUs available and set the mixed precision mode.
|
|
||||||
|
Accelerate will automatically utilize the maximum number of GPUs available and set the mixed precision mode.
|
||||||
|
|
||||||
To check that your configuration looks fine, run:
|
To check that your configuration looks fine, run:
|
||||||
|
|
||||||
@ -77,23 +79,36 @@ accelerate env
|
|||||||
|
|
||||||
An example output is shown below, which describes two GPUs on a single machine with no mixed precision being used:
|
An example output is shown below, which describes two GPUs on a single machine with no mixed precision being used:
|
||||||
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
- `Accelerate` version: 0.11.0.dev0
|
- `Accelerate` version: 1.2.0.dev0
|
||||||
- Platform: Linux-5.10.0-15-cloud-amd64-x86_64-with-debian-11.3
|
- Platform: Linux-6.8.0-47-generic-x86_64-with-glibc2.35
|
||||||
- Python version: 3.7.12
|
- `accelerate` bash location: /home/zach/miniconda3/envs/accelerate/bin/accelerate
|
||||||
- Numpy version: 1.19.5
|
- Python version: 3.10.13
|
||||||
- PyTorch version (GPU?): 1.12.0+cu102 (True)
|
- Numpy version: 1.26.4
|
||||||
|
- PyTorch version (GPU?): 2.5.1+cu124 (True)
|
||||||
|
- PyTorch XPU available: False
|
||||||
|
- PyTorch NPU available: False
|
||||||
|
- PyTorch MLU available: False
|
||||||
|
- PyTorch MUSA available: False
|
||||||
|
- System RAM: 187.91 GB
|
||||||
|
- GPU type: NVIDIA GeForce RTX 4090
|
||||||
- `Accelerate` default config:
|
- `Accelerate` default config:
|
||||||
- compute_environment: LOCAL_MACHINE
|
- compute_environment: LOCAL_MACHINE
|
||||||
- distributed_type: MULTI_GPU
|
- distributed_type: MULTI_GPU
|
||||||
- mixed_precision: no
|
- mixed_precision: no
|
||||||
- use_cpu: False
|
- use_cpu: False
|
||||||
|
- debug: False
|
||||||
- num_processes: 2
|
- num_processes: 2
|
||||||
- machine_rank: 0
|
- machine_rank: 0
|
||||||
- num_machines: 1
|
- num_machines: 1
|
||||||
- main_process_ip: None
|
- gpu_ids: all
|
||||||
- main_process_port: None
|
- rdzv_backend: static
|
||||||
|
- same_network: True
|
||||||
- main_training_function: main
|
- main_training_function: main
|
||||||
- deepspeed_config: {}
|
- enable_cpu_affinity: False
|
||||||
- fsdp_config: {}
|
- downcast_bf16: no
|
||||||
```
|
- tpu_use_cluster: False
|
||||||
|
- tpu_use_sudo: False
|
||||||
|
- tpu_env: []
|
||||||
|
```
|
||||||
@ -8,11 +8,14 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Launching your 🤗 Accelerate scripts
|
# Launching Accelerate scripts
|
||||||
|
|
||||||
In the previous tutorial, you were introduced to how to modify your current training script to use 🤗 Accelerate.
|
In the previous tutorial, you were introduced to how to modify your current training script to use Accelerate.
|
||||||
The final version of that code is shown below:
|
The final version of that code is shown below:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
@ -36,7 +39,7 @@ for batch in training_dataloader:
|
|||||||
|
|
||||||
But how do you run this code and have it utilize the special hardware available to it?
|
But how do you run this code and have it utilize the special hardware available to it?
|
||||||
|
|
||||||
First you should rewrite the above code into a function, and make it callable as a script. For example:
|
First, you should rewrite the above code into a function, and make it callable as a script. For example:
|
||||||
|
|
||||||
```diff
|
```diff
|
||||||
from accelerate import Accelerator
|
from accelerate import Accelerator
|
||||||
@ -61,20 +64,20 @@ First you should rewrite the above code into a function, and make it callable as
|
|||||||
+ main()
|
+ main()
|
||||||
```
|
```
|
||||||
|
|
||||||
Next you need to launch it with `accelerate launch`.
|
Next, you need to launch it with `accelerate launch`.
|
||||||
|
|
||||||
<Tip warning={true}>
|
<Tip warning={true}>
|
||||||
|
|
||||||
It's recommended you run `accelerate config` before using `accelerate launch` to configure your environment to your liking.
|
It's recommended you run `accelerate config` before using `accelerate launch` to configure your environment to your liking.
|
||||||
Otherwise 🤗 Accelerate will use very basic defaults depending on your system setup.
|
Otherwise Accelerate will use very basic defaults depending on your system setup.
|
||||||
|
|
||||||
</Tip>
|
</Tip>
|
||||||
|
|
||||||
|
|
||||||
## Using accelerate launch
|
## Using accelerate launch
|
||||||
|
|
||||||
🤗 Accelerate has a special CLI command to help you launch your code in your system through `accelerate launch`.
|
Accelerate has a special CLI command to help you launch your code in your system through `accelerate launch`.
|
||||||
This command wraps around all of the different commands needed to launch your script on various platforms, without you having to remember what each of them are.
|
This command wraps around all of the different commands needed to launch your script on various platforms, without you having to remember what each of them is.
|
||||||
|
|
||||||
<Tip>
|
<Tip>
|
||||||
|
|
||||||
@ -88,23 +91,32 @@ You can launch your script quickly by using:
|
|||||||
accelerate launch {script_name.py} --arg1 --arg2 ...
|
accelerate launch {script_name.py} --arg1 --arg2 ...
|
||||||
```
|
```
|
||||||
|
|
||||||
Just put `accelerate launch` at the start of your command, and pass in additional arguments and parameters to your script afterwards like normal!
|
Just put `accelerate launch` at the start of your command, and pass in additional arguments and parameters to your script afterward like normal!
|
||||||
|
|
||||||
Since this runs the various torch spawn methods, all of the expected environment variables can be modified here as well.
|
Since this runs the various torch spawn methods, all of the expected environment variables can be modified here as well.
|
||||||
For example, here is how to use `accelerate launch` with a single GPU:
|
For example, here is how to use `accelerate launch` with a single GPU:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
# for cuda device:
|
||||||
CUDA_VISIBLE_DEVICES="0" accelerate launch {script_name.py} --arg1 --arg2 ...
|
CUDA_VISIBLE_DEVICES="0" accelerate launch {script_name.py} --arg1 --arg2 ...
|
||||||
|
# for xpu device:
|
||||||
|
ZE_AFFINITY_MASK="0" accelerate launch {script_name.py} --arg1 --arg2 ...
|
||||||
```
|
```
|
||||||
|
|
||||||
You can also use `accelerate launch` without performing `accelerate config` first, but you may need to manually pass in the right configuration parameters.
|
You can also use `accelerate launch` without performing `accelerate config` first, but you may need to manually pass in the right configuration parameters.
|
||||||
In this case, 🤗 Accelerate will make some hyperparameter decisions for you, e.g., if GPUs are available, it will use all of them by default without the mixed precision.
|
In this case, Accelerate will make some hyperparameter decisions for you, e.g., if GPUs are available, it will use all of them by default without the mixed precision.
|
||||||
Here is how you would use all GPUs and train with mixed precision disabled:
|
Here is how you would use all GPUs and train with mixed precision disabled:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
accelerate launch --multi_gpu {script_name.py} {--arg1} {--arg2} ...
|
accelerate launch --multi_gpu {script_name.py} {--arg1} {--arg2} ...
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Or by specifying a number of GPUs to use:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
accelerate launch --num_processes=2 {script_name.py} {--arg1} {--arg2} ...
|
||||||
|
```
|
||||||
|
|
||||||
To get more specific you should pass in the needed parameters yourself. For instance, here is how you
|
To get more specific you should pass in the needed parameters yourself. For instance, here is how you
|
||||||
would also launch that same script on two GPUs using mixed precision while avoiding all of the warnings:
|
would also launch that same script on two GPUs using mixed precision while avoiding all of the warnings:
|
||||||
|
|
||||||
@ -120,16 +132,40 @@ accelerate launch -h
|
|||||||
|
|
||||||
<Tip>
|
<Tip>
|
||||||
|
|
||||||
Even if you are not using 🤗 Accelerate in your code, you can still use the launcher for starting your scripts!
|
Even if you are not using Accelerate in your code, you can still use the launcher for starting your scripts!
|
||||||
|
|
||||||
</Tip>
|
</Tip>
|
||||||
|
|
||||||
For a visualization of this difference, that earlier `accelerate launch` on multi-gpu would look something like so with `torchrun`:
|
For a visualization of this difference, that earlier `accelerate launch` on multi-gpu would look something like so with `torchrun`:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
MIXED_PRECISION="fp16" torchrun --nproc_per_node=2 --num_machines=1 {script_name.py} {--arg1} {--arg2} ...
|
MIXED_PRECISION="fp16" torchrun --nproc_per_node=2 --nnodes=1 {script_name.py} {--arg1} {--arg2} ...
|
||||||
```
|
```
|
||||||
|
|
||||||
|
You can also launch your script utilizing the launch CLI as a python module itself, enabling the ability to pass in other python-specific
|
||||||
|
launching behaviors. To do so, use `accelerate.commands.launch` instead of `accelerate launch`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python -m accelerate.commands.launch --num_processes=2 {script_name.py} {--arg1} {--arg2}
|
||||||
|
```
|
||||||
|
|
||||||
|
If you want to execute the script with any other python flags, you can pass them in as well similar to `-m`, such as
|
||||||
|
the below example enabling unbuffered stdout and stderr:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python -u -m accelerate.commands.launch --num_processes=2 {script_name.py} {--arg1} {--arg2}
|
||||||
|
```
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
|
||||||
|
You can run your code on CPU as well! This is helpful for debugging and testing purposes on toy models and datasets.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
accelerate launch --cpu {script_name.py} {--arg1} {--arg2}
|
||||||
|
```
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
## Why you should always use `accelerate config`
|
## Why you should always use `accelerate config`
|
||||||
|
|
||||||
Why is it useful to the point you should **always** run `accelerate config`?
|
Why is it useful to the point you should **always** run `accelerate config`?
|
||||||
@ -145,7 +181,7 @@ accelerate launch {script_name.py} {--arg1} {--arg2} ...
|
|||||||
## Custom Configurations
|
## Custom Configurations
|
||||||
|
|
||||||
As briefly mentioned earlier, `accelerate launch` should be mostly used through combining set configurations
|
As briefly mentioned earlier, `accelerate launch` should be mostly used through combining set configurations
|
||||||
made with the `accelerate config` command. These configs are saved to a `default_config.yaml` file in your cache folder for 🤗 Accelerate.
|
made with the `accelerate config` command. These configs are saved to a `default_config.yaml` file in your cache folder for Accelerate.
|
||||||
This cache folder is located at (with decreasing order of priority):
|
This cache folder is located at (with decreasing order of priority):
|
||||||
|
|
||||||
- The content of your environment variable `HF_HOME` suffixed with `accelerate`.
|
- The content of your environment variable `HF_HOME` suffixed with `accelerate`.
|
||||||
@ -175,4 +211,25 @@ use_cpu: false
|
|||||||
Launching a script from the location of that custom yaml file looks like the following:
|
Launching a script from the location of that custom yaml file looks like the following:
|
||||||
```bash
|
```bash
|
||||||
accelerate launch --config_file {path/to/config/my_config_file.yaml} {script_name.py} {--arg1} {--arg2} ...
|
accelerate launch --config_file {path/to/config/my_config_file.yaml} {script_name.py} {--arg1} {--arg2} ...
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Multi-node training
|
||||||
|
Multi-node training with Accelerate is similar to [multi-node training with torchrun](https://pytorch.org/tutorials/intermediate/ddp_series_multinode.html). The simplest way to launch a multi-node training run is to do the following:
|
||||||
|
|
||||||
|
- Copy your codebase and data to all nodes. (or place them on a shared filesystem)
|
||||||
|
- Setup your python packages on all nodes.
|
||||||
|
- Run `accelerate config` on the main single node first. After specifying the number of nodes, you will be asked to specify the rank of each node (this will be 0 for the main/master node), along with the IP address and port for the main process. This is required for the worker nodes to communicate with the main process. Afterwards, you can copy or send this config file across all of your nodes, changing the `machine_rank` to 1, 2,3, etc. to avoid having to run the command (or just follow their directions directly for launching with `torchrun` as well)
|
||||||
|
|
||||||
|
Once you have done this, you can start your multi-node training run by running `accelerate launch` (or `torchrun`) on all nodes.
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
It is required that the command be ran on all nodes for everything to start, not just running it from the main node. You can use something like SLURM or a different process executor to wrap around this requirement and call everything from a single command.
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
|
||||||
|
It is recommended to use the intranet IP of your main node over the public IP for better latency. This is the `192.168.x.x` or the `172.x.x.x` address you see when you run `hostname -I` on the main node.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
To get a better idea about multi-node training, check out our example for [multi-node training with FSDP](https://huggingface.co/blog/ram-efficient-pytorch-fsdp).
|
||||||
224
docs/source/basic_tutorials/migration.md
Normal file
224
docs/source/basic_tutorials/migration.md
Normal file
@ -0,0 +1,224 @@
|
|||||||
|
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Add Accelerate to your code
|
||||||
|
|
||||||
|
Each distributed training framework has their own way of doing things which can require writing a lot of custom code to adapt it to your PyTorch training code and training environment. Accelerate offers a friendly way to interface with these distributed training frameworks without having to learn the specific details of each one. Accelerate takes care of those details for you, so you can focus on the training code and scale it to any distributed training environment.
|
||||||
|
|
||||||
|
In this tutorial, you'll learn how to adapt your existing PyTorch code with Accelerate and get you on your way toward training on distributed systems with ease! You'll start with a basic PyTorch training loop (it assumes all the training objects like `model` and `optimizer` have been setup already) and progressively integrate Accelerate into it.
|
||||||
|
|
||||||
|
```python
|
||||||
|
device = "cuda"
|
||||||
|
model.to(device)
|
||||||
|
|
||||||
|
for batch in training_dataloader:
|
||||||
|
optimizer.zero_grad()
|
||||||
|
inputs, targets = batch
|
||||||
|
inputs = inputs.to(device)
|
||||||
|
targets = targets.to(device)
|
||||||
|
outputs = model(inputs)
|
||||||
|
loss = loss_function(outputs, targets)
|
||||||
|
loss.backward()
|
||||||
|
optimizer.step()
|
||||||
|
scheduler.step()
|
||||||
|
```
|
||||||
|
|
||||||
|
## Accelerator
|
||||||
|
|
||||||
|
The [`Accelerator`] is the main class for adapting your code to work with Accelerate. It knows about the distributed setup you're using such as the number of different processes and your hardware type. This class also provides access to many of the necessary methods for enabling your PyTorch code to work in any distributed training environment and for managing and executing processes across devices.
|
||||||
|
|
||||||
|
That's why you should always start by importing and creating an [`Accelerator`] instance in your script.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from accelerate import Accelerator
|
||||||
|
|
||||||
|
accelerator = Accelerator()
|
||||||
|
```
|
||||||
|
|
||||||
|
The [`Accelerator`] also knows which device to move your PyTorch objects to, so it is recommended to let Accelerate handle this for you.
|
||||||
|
|
||||||
|
```diff
|
||||||
|
- device = "cuda"
|
||||||
|
+ device = accelerator.device
|
||||||
|
model.to(device)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Prepare PyTorch objects
|
||||||
|
|
||||||
|
Next, you need to prepare your PyTorch objects (model, optimizer, scheduler, etc.) for distributed training. The [`~Accelerator.prepare`] method takes care of placing your model in the appropriate container (like single GPU or multi-GPU) for your training setup, adapting the optimizer and scheduler to use Accelerate's [`~optimizer.AcceleratedOptimizer`] and [`~scheduler.AcceleratedScheduler`], and creating a new dataloader that can be sharded across processes.
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> Accelerate only prepares objects that inherit from their respective PyTorch classes such as `torch.optim.Optimizer`.
|
||||||
|
|
||||||
|
The PyTorch objects are returned in the same order they're sent.
|
||||||
|
|
||||||
|
```py
|
||||||
|
model, optimizer, training_dataloader, scheduler = accelerator.prepare(
|
||||||
|
model, optimizer, training_dataloader, scheduler
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Training loop
|
||||||
|
|
||||||
|
Finally, remove the `to(device)` calls to the inputs and targets in the training loop because Accelerate's DataLoader classes automatically places them on the right device. You should also replace the usual `backward()` pass with Accelerate's [`~Accelerator.backward`] method which scales the gradients for you and uses the appropriate `backward()` method depending on your distributed setup (for example, DeepSpeed or Megatron).
|
||||||
|
|
||||||
|
```diff
|
||||||
|
- inputs = inputs.to(device)
|
||||||
|
- targets = targets.to(device)
|
||||||
|
outputs = model(inputs)
|
||||||
|
loss = loss_function(outputs, targets)
|
||||||
|
- loss.backward()
|
||||||
|
+ accelerator.backward(loss)
|
||||||
|
```
|
||||||
|
|
||||||
|
Put everything together and your new Accelerate training loop should now look like this!
|
||||||
|
|
||||||
|
```python
|
||||||
|
from accelerate import Accelerator
|
||||||
|
accelerator = Accelerator()
|
||||||
|
|
||||||
|
device = accelerator.device
|
||||||
|
model, optimizer, training_dataloader, scheduler = accelerator.prepare(
|
||||||
|
model, optimizer, training_dataloader, scheduler
|
||||||
|
)
|
||||||
|
|
||||||
|
for batch in training_dataloader:
|
||||||
|
optimizer.zero_grad()
|
||||||
|
inputs, targets = batch
|
||||||
|
outputs = model(inputs)
|
||||||
|
loss = loss_function(outputs, targets)
|
||||||
|
accelerator.backward(loss)
|
||||||
|
optimizer.step()
|
||||||
|
scheduler.step()
|
||||||
|
```
|
||||||
|
|
||||||
|
## Training features
|
||||||
|
|
||||||
|
Accelerate offers additional features - like gradient accumulation, gradient clipping, mixed precision training and more - you can add to your script to improve your training run. Let's explore these three features.
|
||||||
|
|
||||||
|
### Gradient accumulation
|
||||||
|
|
||||||
|
Gradient accumulation enables you to train on larger batch sizes by accumulating the gradients over multiple batches before updating the weights. This can be useful for getting around memory limitations. To enable this feature in Accelerate, specify the `gradient_accumulation_steps` parameter in the [`Accelerator`] class and add the [`~Accelerator.accumulate`] context manager to your script.
|
||||||
|
|
||||||
|
```diff
|
||||||
|
+ accelerator = Accelerator(gradient_accumulation_steps=2)
|
||||||
|
model, optimizer, training_dataloader = accelerator.prepare(model, optimizer, training_dataloader)
|
||||||
|
|
||||||
|
for input, label in training_dataloader:
|
||||||
|
+ with accelerator.accumulate(model):
|
||||||
|
predictions = model(input)
|
||||||
|
loss = loss_function(predictions, label)
|
||||||
|
accelerator.backward(loss)
|
||||||
|
optimizer.step()
|
||||||
|
scheduler.step()
|
||||||
|
optimizer.zero_grad()
|
||||||
|
```
|
||||||
|
|
||||||
|
### Gradient clipping
|
||||||
|
|
||||||
|
Gradient clipping is a technique to prevent "exploding gradients", and Accelerate offers:
|
||||||
|
|
||||||
|
* [`~Accelerator.clip_grad_value_`] to clip gradients to a minimum and maximum value
|
||||||
|
* [`~Accelerator.clip_grad_norm_`] for normalizing gradients to a certain value
|
||||||
|
|
||||||
|
### Mixed precision
|
||||||
|
|
||||||
|
Mixed precision accelerates training by using a lower precision data type like fp16 (half-precision) to calculate the gradients. For the best performance with Accelerate, the loss should be computed inside your model (like in Transformers models) because computations outside of the model are computed in full precision.
|
||||||
|
|
||||||
|
Set the mixed precision type to use in the [`Accelerator`], and then use the [`~Accelerator.autocast`] context manager to automatically cast the values to the specified data type.
|
||||||
|
|
||||||
|
> [!WARNING]
|
||||||
|
> Accelerate enables automatic mixed precision, so [`~Accelerator.autocast`] is only needed if there are other mixed precision operations besides those performed on loss by [`~Accelerator.backward`] which already handles the scaling.
|
||||||
|
|
||||||
|
```diff
|
||||||
|
+ accelerator = Accelerator(mixed_precision="fp16")
|
||||||
|
+ with accelerator.autocast():
|
||||||
|
loss = complex_loss_function(outputs, target)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Save and load
|
||||||
|
|
||||||
|
Accelerate can also save and load a *model* once training is complete or you can also save the model and optimizer *state* which could be useful for resuming training.
|
||||||
|
|
||||||
|
### Model
|
||||||
|
|
||||||
|
Once all processes are complete, unwrap the model with the [`~Accelerator.unwrap_model`] method before saving it because the [`~Accelerator.prepare`] method wrapped your model into the proper interface for distributed training. If you don't unwrap the model, saving the model state dictionary also saves any potential extra layers from the larger model and you won't be able to load the weights back into your base model.
|
||||||
|
|
||||||
|
You should use the [`~Accelerator.save_model`] method to unwrap and save the model state dictionary. This method can also save a model into sharded checkpoints or into the [safetensors](https://hf.co/docs/safetensors/index) format.
|
||||||
|
|
||||||
|
<hfoptions id="save">
|
||||||
|
<hfoption id="single checkpoint">
|
||||||
|
|
||||||
|
```py
|
||||||
|
accelerator.wait_for_everyone()
|
||||||
|
accelerator.save_model(model, save_directory)
|
||||||
|
```
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
|
||||||
|
For models from the [Transformers](https://hf.co/docs/transformers/index) library, save the model with the [`~transformers.PreTrainedModel.save_pretrained`] method so that it can be reloaded with the [`~transformers.PreTrainedModel.from_pretrained`] method.
|
||||||
|
|
||||||
|
```py
|
||||||
|
from transformers import AutoModel
|
||||||
|
|
||||||
|
unwrapped_model = accelerator.unwrap_model(model)
|
||||||
|
unwrapped_model.save_pretrained(
|
||||||
|
"path/to/my_model_directory",
|
||||||
|
is_main_process=accelerator.is_main_process,
|
||||||
|
save_function=accelerator.save,
|
||||||
|
)
|
||||||
|
|
||||||
|
model = AutoModel.from_pretrained("path/to/my_model_directory")
|
||||||
|
```
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
To load your weights, use the [`~Accelerator.unwrap_model`] method to unwrap the model first before loading the weights. All model parameters are references to tensors, so this loads your weights inside `model`.
|
||||||
|
|
||||||
|
```py
|
||||||
|
unwrapped_model = accelerator.unwrap_model(model)
|
||||||
|
path_to_checkpoint = os.path.join(save_directory,"pytorch_model.bin")
|
||||||
|
unwrapped_model.load_state_dict(torch.load(path_to_checkpoint))
|
||||||
|
```
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
<hfoption id="sharded checkpoint">
|
||||||
|
|
||||||
|
Set `safe_serialization=True` to save the model in the safetensor format.
|
||||||
|
|
||||||
|
```py
|
||||||
|
accelerator.wait_for_everyone()
|
||||||
|
accelerator.save_model(model, save_directory, max_shard_size="1GB", safe_serialization=True)
|
||||||
|
```
|
||||||
|
|
||||||
|
To load a sharded checkpoint or a safetensor formatted checkpoint, use the [`~accelerate.load_checkpoint_in_model`] method. This method allows you to load a checkpoint onto a specific device.
|
||||||
|
|
||||||
|
```py
|
||||||
|
load_checkpoint_in_model(unwrapped_model, save_directory, device_map={"":device})
|
||||||
|
```
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
</hfoptions>
|
||||||
|
|
||||||
|
### State
|
||||||
|
|
||||||
|
During training, you may want to save the current state of the model, optimizer, random generators, and potentially learning rate schedulers so they can be restored in the *same script*. You should add the [`~Accelerator.save_state`] and [`~Accelerator.load_state`] methods to your script to save and load states.
|
||||||
|
|
||||||
|
To further customize where and how states are saved through [`~Accelerator.save_state`], use the [`~utils.ProjectConfiguration`] class. For example, if `automatic_checkpoint_naming` is enabled, each saved checkpoint is stored at `Accelerator.project_dir/checkpoints/checkpoint_{checkpoint_number}`.
|
||||||
|
|
||||||
|
Any other stateful items to be stored should be registered with the [`~Accelerator.register_for_checkpointing`] method so they can be saved and loaded. Every object passed to this method to be stored must have a `load_state_dict` and `state_dict` function.
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> If you have [`torchdata>=0.8.0`](https://github.com/pytorch/data/tree/main) installed, you can additionally pass `use_stateful_dataloader=True` into your [`~utils.DataLoaderConfiguration`]. This extends Accelerate's DataLoader classes with a `load_state_dict` and `state_dict` function, and makes it so `Accelerator.save_state` and `Accelerator.load_state` also track how far into the training dataset it has read when persisting the model.
|
||||||
@ -1,123 +0,0 @@
|
|||||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
|
||||||
the License. You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
|
||||||
specific language governing permissions and limitations under the License.
|
|
||||||
-->
|
|
||||||
|
|
||||||
# Migrating your code to 🤗 Accelerate
|
|
||||||
|
|
||||||
This tutorial will detail how to easily convert existing PyTorch code to use 🤗 Accelerate!
|
|
||||||
You'll see that by just changing a few lines of code, 🤗 Accelerate can perform its magic and get you on
|
|
||||||
your way towards running your code on distributed systems with ease!
|
|
||||||
|
|
||||||
## The base training loop
|
|
||||||
|
|
||||||
To begin, write out a very basic PyTorch training loop.
|
|
||||||
|
|
||||||
<Tip>
|
|
||||||
|
|
||||||
We are under the presumption that `training_dataloader`, `model`, `optimizer`, `scheduler`, and `loss_function` have been defined beforehand.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
```python
|
|
||||||
device = "cuda"
|
|
||||||
model.to(device)
|
|
||||||
|
|
||||||
for batch in training_dataloader:
|
|
||||||
optimizer.zero_grad()
|
|
||||||
inputs, targets = batch
|
|
||||||
inputs = inputs.to(device)
|
|
||||||
targets = targets.to(device)
|
|
||||||
outputs = model(inputs)
|
|
||||||
loss = loss_function(outputs, targets)
|
|
||||||
loss.backward()
|
|
||||||
optimizer.step()
|
|
||||||
scheduler.step()
|
|
||||||
```
|
|
||||||
|
|
||||||
## Add in 🤗 Accelerate
|
|
||||||
|
|
||||||
To start using 🤗 Accelerate, first import and create an [`Accelerator`] instance:
|
|
||||||
```python
|
|
||||||
from accelerate import Accelerator
|
|
||||||
|
|
||||||
accelerator = Accelerator()
|
|
||||||
```
|
|
||||||
[`Accelerator`] is the main force behind utilizing all the possible options for distributed training!
|
|
||||||
|
|
||||||
### Setting the right device
|
|
||||||
|
|
||||||
The [`Accelerator`] class knows the right device to move any PyTorch object to at any time, so you should
|
|
||||||
change the definition of `device` to come from [`Accelerator`]:
|
|
||||||
|
|
||||||
```diff
|
|
||||||
- device = 'cuda'
|
|
||||||
+ device = accelerator.device
|
|
||||||
model.to(device)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Preparing your objects
|
|
||||||
|
|
||||||
Next you need to pass all of the important objects related to training into [`~Accelerator.prepare`]. 🤗 Accelerate will
|
|
||||||
make sure everything is setup in the current environment for you to start training:
|
|
||||||
|
|
||||||
```
|
|
||||||
model, optimizer, training_dataloader, scheduler = accelerator.prepare(
|
|
||||||
model, optimizer, training_dataloader, scheduler
|
|
||||||
)
|
|
||||||
```
|
|
||||||
These objects are returned in the same order they were sent in with. By default when using `device_placement=True`, all of the objects that can be sent to the right device will be.
|
|
||||||
If you need to work with data that isn't passed to [~Accelerator.prepare] but should be on the active device, you should pass in the `device` you made earlier.
|
|
||||||
|
|
||||||
<Tip warning={true}>
|
|
||||||
|
|
||||||
Accelerate will only prepare objects that inherit from their respective PyTorch classes (such as `torch.optim.Optimizer`).
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
### Modifying the training loop
|
|
||||||
|
|
||||||
Finally, three lines of code need to be changed in the training loop. 🤗 Accelerate's DataLoader classes will automatically handle the device placement by default,
|
|
||||||
and [`~Accelerator.backward`] should be used for performing the backward pass:
|
|
||||||
|
|
||||||
```diff
|
|
||||||
- inputs = inputs.to(device)
|
|
||||||
- targets = targets.to(device)
|
|
||||||
outputs = model(inputs)
|
|
||||||
loss = loss_function(outputs, targets)
|
|
||||||
- loss.backward()
|
|
||||||
+ accelerator.backward(loss)
|
|
||||||
```
|
|
||||||
|
|
||||||
With that, your training loop is now ready to use 🤗 Accelerate!
|
|
||||||
|
|
||||||
## The finished code
|
|
||||||
|
|
||||||
Below is the final version of the converted code:
|
|
||||||
|
|
||||||
```python
|
|
||||||
from accelerate import Accelerator
|
|
||||||
|
|
||||||
accelerator = Accelerator()
|
|
||||||
|
|
||||||
model, optimizer, training_dataloader, scheduler = accelerator.prepare(
|
|
||||||
model, optimizer, training_dataloader, scheduler
|
|
||||||
)
|
|
||||||
|
|
||||||
for batch in training_dataloader:
|
|
||||||
optimizer.zero_grad()
|
|
||||||
inputs, targets = batch
|
|
||||||
outputs = model(inputs)
|
|
||||||
loss = loss_function(outputs, targets)
|
|
||||||
accelerator.backward(loss)
|
|
||||||
optimizer.step()
|
|
||||||
scheduler.step()
|
|
||||||
```
|
|
||||||
|
|
||||||
@ -8,9 +8,12 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Launching Multi-Node Training from a Jupyter Environment
|
# Launching distributed training from Jupyter Notebooks
|
||||||
|
|
||||||
This tutorial teaches you how to fine tune a computer vision model with 🤗 Accelerate from a Jupyter Notebook on a distributed system.
|
This tutorial teaches you how to fine tune a computer vision model with 🤗 Accelerate from a Jupyter Notebook on a distributed system.
|
||||||
You will also learn how to setup a few requirements needed for ensuring your environment is configured properly, your data has been prepared properly, and finally how to launch training.
|
You will also learn how to setup a few requirements needed for ensuring your environment is configured properly, your data has been prepared properly, and finally how to launch training.
|
||||||
@ -23,19 +26,19 @@ You will also learn how to setup a few requirements needed for ensuring your env
|
|||||||
|
|
||||||
## Configuring the Environment
|
## Configuring the Environment
|
||||||
|
|
||||||
Before any training can be performed, a 🤗 Accelerate config file must exist in the system. Usually this can be done by running the following in a terminal and answering the prompts:
|
Before any training can be performed, an Accelerate config file must exist in the system. Usually this can be done by running the following in a terminal and answering the prompts:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
accelerate config
|
accelerate config
|
||||||
```
|
```
|
||||||
|
|
||||||
However, if general defaults are fine and you are *not* running on a TPU, 🤗Accelerate has a utility to quickly write your GPU configuration into a config file via [`utils.write_basic_config`].
|
However, if general defaults are fine and you are *not* running on a TPU, Accelerate has a utility to quickly write your GPU configuration into a config file via [`utils.write_basic_config`].
|
||||||
|
|
||||||
The following code will restart Jupyter after writing the configuration, as CUDA code was called to perform this.
|
The following code will restart Jupyter after writing the configuration, as CUDA code was called to perform this.
|
||||||
|
|
||||||
<Tip warning={true}>
|
<Tip warning={true}>
|
||||||
|
|
||||||
CUDA can't be initialized more than once on a multi-node system. It's fine to debug in the notebook and have calls to CUDA, but in order to finally train a full cleanup and restart will need to be performed.
|
CUDA can't be initialized more than once on a multi-GPU system. It's fine to debug in the notebook and have calls to CUDA, but in order to finally train a full cleanup and restart will need to be performed.
|
||||||
|
|
||||||
</Tip>
|
</Tip>
|
||||||
|
|
||||||
@ -49,7 +52,7 @@ os._exit(00) # Restart the notebook
|
|||||||
|
|
||||||
## Preparing the Dataset and Model
|
## Preparing the Dataset and Model
|
||||||
|
|
||||||
Next you should prepare your dataset. As mentioned at earlier, great care should be taken when preparing the `DataLoaders` and model to make sure that **nothing** is put on *any* GPU.
|
Next you should prepare your dataset. As mentioned earlier, great care should be taken when preparing the `DataLoaders` and model to make sure that **nothing** is put on *any* GPU.
|
||||||
|
|
||||||
If you do, it is recommended to put that specific code into a function and call that from within the notebook launcher interface, which will be shown later.
|
If you do, it is recommended to put that specific code into a function and call that from within the notebook launcher interface, which will be shown later.
|
||||||
|
|
||||||
@ -153,7 +156,7 @@ def get_dataloaders(batch_size: int = 64):
|
|||||||
random_perm = np.random.permutation(len(fnames))
|
random_perm = np.random.permutation(len(fnames))
|
||||||
cut = int(0.8 * len(fnames))
|
cut = int(0.8 * len(fnames))
|
||||||
train_split = random_perm[:cut]
|
train_split = random_perm[:cut]
|
||||||
eval_split = random_perm[:cut]
|
eval_split = random_perm[cut:]
|
||||||
|
|
||||||
# For training a simple RandomResizedCrop will be used
|
# For training a simple RandomResizedCrop will be used
|
||||||
train_tfm = Compose([RandomResizedCrop((224, 224), scale=(0.5, 1.0)), ToTensor()])
|
train_tfm = Compose([RandomResizedCrop((224, 224), scale=(0.5, 1.0)), ToTensor()])
|
||||||
@ -183,7 +186,7 @@ Here is a basic training loop for the animal classification problem:
|
|||||||
|
|
||||||
<Tip>
|
<Tip>
|
||||||
|
|
||||||
The code has been split up to allow for explainations on each section. A full version that can be copy and pasted will be available at the end
|
The code has been split up to allow for explanations on each section. A full version that can be copy and pasted will be available at the end
|
||||||
|
|
||||||
</Tip>
|
</Tip>
|
||||||
|
|
||||||
@ -324,7 +327,7 @@ def training_loop(mixed_precision="fp16", seed: int = 42, batch_size: int = 64):
|
|||||||
# Build dataloaders
|
# Build dataloaders
|
||||||
train_dataloader, eval_dataloader = get_dataloaders(batch_size)
|
train_dataloader, eval_dataloader = get_dataloaders(batch_size)
|
||||||
|
|
||||||
# Instantiate the model (you build the model here so that the seed also controls new weight initaliziations)
|
# Instantiate the model (you build the model here so that the seed also controls new weight initializations)
|
||||||
model = create_model("resnet50d", pretrained=True, num_classes=len(label_to_id))
|
model = create_model("resnet50d", pretrained=True, num_classes=len(label_to_id))
|
||||||
|
|
||||||
# Freeze the base model
|
# Freeze the base model
|
||||||
@ -337,11 +340,11 @@ def training_loop(mixed_precision="fp16", seed: int = 42, batch_size: int = 64):
|
|||||||
mean = torch.tensor(model.default_cfg["mean"])[None, :, None, None]
|
mean = torch.tensor(model.default_cfg["mean"])[None, :, None, None]
|
||||||
std = torch.tensor(model.default_cfg["std"])[None, :, None, None]
|
std = torch.tensor(model.default_cfg["std"])[None, :, None, None]
|
||||||
|
|
||||||
# To make this constant available on the active device, set it to the accelerator device
|
# To make these constants available on the active device, set it to the accelerator device
|
||||||
mean = mean.to(accelerator.device)
|
mean = mean.to(accelerator.device)
|
||||||
std = std.to(accelerator.device)
|
std = std.to(accelerator.device)
|
||||||
|
|
||||||
# Intantiate the optimizer
|
# Instantiate the optimizer
|
||||||
optimizer = torch.optim.Adam(params=model.parameters(), lr=3e-2 / 25)
|
optimizer = torch.optim.Adam(params=model.parameters(), lr=3e-2 / 25)
|
||||||
|
|
||||||
# Instantiate the learning rate scheduler
|
# Instantiate the learning rate scheduler
|
||||||
@ -398,6 +401,26 @@ args = ("fp16", 42, 64)
|
|||||||
notebook_launcher(training_loop, args, num_processes=2)
|
notebook_launcher(training_loop, args, num_processes=2)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
In the case of running on multiple nodes, you need to set up a Jupyter session at each node and run the launching cell at the same time.
|
||||||
|
|
||||||
|
For an environment containing 2 nodes (computers) with 8 GPUs each and the main computer with an IP address of "172.31.43.8", it would look like so:
|
||||||
|
|
||||||
|
```python
|
||||||
|
notebook_launcher(training_loop, args, master_addr="172.31.43.8", node_rank=0, num_nodes=2, num_processes=8)
|
||||||
|
```
|
||||||
|
|
||||||
|
And in the second Jupyter session on the other machine:
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
|
||||||
|
Notice how the `node_rank` has changed
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
```python
|
||||||
|
notebook_launcher(training_loop, args, master_addr="172.31.43.8", node_rank=1, num_nodes=2, num_processes=8)
|
||||||
|
```
|
||||||
|
|
||||||
In the case of running on the TPU, it would look like so:
|
In the case of running on the TPU, it would look like so:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
@ -407,6 +430,17 @@ args = (model, "fp16", 42, 64)
|
|||||||
notebook_launcher(training_loop, args, num_processes=8)
|
notebook_launcher(training_loop, args, num_processes=8)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
To launch the training process with elasticity, enabling fault tolerance, you can use the `elastic_launch` feature provided by PyTorch. This requires setting additional parameters such as `rdzv_backend` and `max_restarts`. Here is an example of how to use `notebook_launcher` with elastic capabilities:
|
||||||
|
|
||||||
|
```python
|
||||||
|
notebook_launcher(
|
||||||
|
training_loop,
|
||||||
|
args,
|
||||||
|
num_processes=2,
|
||||||
|
max_restarts=3
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
As it's running it will print the progress as well as state how many devices you ran on. This tutorial was ran with two GPUs:
|
As it's running it will print the progress as well as state how many devices you ran on. This tutorial was ran with two GPUs:
|
||||||
|
|
||||||
```python out
|
```python out
|
||||||
@ -420,10 +454,23 @@ epoch 4: 94.71
|
|||||||
|
|
||||||
And that's it!
|
And that's it!
|
||||||
|
|
||||||
|
Please note that [`notebook_launcher`] ignores the Accelerate config file, to launch based on the config use:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
accelerate launch
|
||||||
|
```
|
||||||
|
|
||||||
|
## Debugging
|
||||||
|
|
||||||
|
A common issue when running the `notebook_launcher` is receiving a CUDA has already been initialized issue. This usually stems
|
||||||
|
from an import or prior code in the notebook that makes a call to the PyTorch `torch.cuda` sublibrary. To help narrow down what went wrong,
|
||||||
|
you can launch the `notebook_launcher` with `ACCELERATE_DEBUG_MODE=yes` in your environment and an additional check
|
||||||
|
will be made when spawning that a regular process can be created and utilize CUDA without issue. (Your CUDA code can still be ran afterwards).
|
||||||
|
|
||||||
## Conclusion
|
## Conclusion
|
||||||
|
|
||||||
This notebook showed how to perform distributed training from inside of a Jupyter Notebook. Some key notes to remember:
|
This notebook showed how to perform distributed training from inside of a Jupyter Notebook. Some key notes to remember:
|
||||||
|
|
||||||
- Make sure to save any code that use CUDA (or CUDA imports) for the function passed to [`notebook_launcher`]
|
- Make sure to save any code that use CUDA (or CUDA imports) for the function passed to [`notebook_launcher`]
|
||||||
- Set the `num_processes` to be the number of devices used for training (such as number of GPUs, CPUs, TPUs, etc)
|
- Set the `num_processes` to be the number of devices used for training (such as number of GPUs, CPUs, TPUs, etc)
|
||||||
- If using the TPU, declare your model outside the training loop function
|
- If using the TPU, declare your model outside the training loop function
|
||||||
@ -8,14 +8,17 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Overview
|
# Overview
|
||||||
|
|
||||||
Welcome to the 🤗 Accelerate tutorials! These introductory guides will help catch you up to speed on working with 🤗 Accelerate.
|
Welcome to the Accelerate tutorials! These introductory guides will help catch you up to speed on working with Accelerate.
|
||||||
You'll learn how to modify your code to have it work with the API seamlessly, how to launch your script properly,
|
You'll learn how to modify your code to have it work with the API seamlessly, how to launch your script properly,
|
||||||
and more!
|
and more!
|
||||||
|
|
||||||
These tutorials assume some basic knowledge of Python and familiarity with the PyTorch framework.
|
These tutorials assume some basic knowledge of Python and familiarity with the PyTorch framework.
|
||||||
|
|
||||||
If you have any questions about 🤗 Accelerate, feel free to join and ask the community on our [forum](https://discuss.huggingface.co/c/accelerate/18).
|
If you have any questions about Accelerate, feel free to join and ask the community on our [forum](https://discuss.huggingface.co/c/accelerate/18).
|
||||||
38
docs/source/basic_tutorials/tpu.md
Normal file
38
docs/source/basic_tutorials/tpu.md
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# TPU training
|
||||||
|
|
||||||
|
A [TPU (Tensor Processing Unit)](https://cloud.google.com/tpu/docs/intro-to-tpu) is a type of hardware specifically designed for training models efficiently. Accelerate supports TPU training, but there are a few things you should be aware of, namely graph compilation. This tutorial briefly discusses compilation, and for more details, take a look at the [Training on TPUs with Accelerate](../concept_guides/training_tpu) guide.
|
||||||
|
|
||||||
|
## Compilation
|
||||||
|
|
||||||
|
A TPU creates a graph of all the operations in the training step such as the forward pass, backward pass and optimizer step. This is why the first training step always takes a while because building and compiling this graph takes time. But once compilation is complete, it is cached and all subsequent steps are much faster.
|
||||||
|
|
||||||
|
The key is to avoid compiling your code again or else training is super slow. This means all your operations must be exactly the same:
|
||||||
|
|
||||||
|
* all tensors in your batches must have the same length (for example, no dynamic padding for NLP tasks)
|
||||||
|
* your code must be static (for example, no layers with for loops that have different lengths depending on the input such as a LSTM)
|
||||||
|
|
||||||
|
## Weight tying
|
||||||
|
|
||||||
|
A common language model design is to tie the weights of the embedding and softmax layers. However, moving the model to a TPU (either yourself or passing it to the [`~Accelerator.prepare`] method) breaks the weight tying and you'll need to retie the weights.
|
||||||
|
|
||||||
|
To add special behavior (like weight tying) in your script for TPUs, set [`~Accelerator.distributed_type`] to `DistributedType.TPU` first. Then you can use the [`~transformers.PreTrainedModel.tie_weights`] method to tie the weights.
|
||||||
|
|
||||||
|
```py
|
||||||
|
if accelerator.distributed_type == DistributedType.TPU:
|
||||||
|
model.tie_weights()
|
||||||
|
```
|
||||||
211
docs/source/basic_tutorials/troubleshooting.md
Normal file
211
docs/source/basic_tutorials/troubleshooting.md
Normal file
@ -0,0 +1,211 @@
|
|||||||
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Troubleshoot
|
||||||
|
|
||||||
|
This guide provides solutions to some issues you might encounter when using Accelerate. Not all errors are covered because Accelerate is an active library that is continuously evolving and there are many different use cases and distributed training setups. If the solutions described here don't help with your specific error, please take a look at the [Ask for help](#ask-for-help) section to learn where and how to get help.
|
||||||
|
|
||||||
|
## Logging
|
||||||
|
|
||||||
|
Logging can help you identify where an error is coming from. In a distributed setup with multiple processes, logging can be a challenge, but Accelerate provides the [`~accelerate.logging`] utility to ensure logs are synchronized.
|
||||||
|
|
||||||
|
To troubleshoot an issue, use [`~accelerate.logging`] instead of the standard Python [`logging`](https://docs.python.org/3/library/logging.html#module-logging) module. Set the verbosity level (`INFO`, `DEBUG`, `WARNING`, `ERROR`, `CRITICAL`) with the `log_level` parameter, and then you can either:
|
||||||
|
|
||||||
|
1. Export the `log_level` as the `ACCELERATE_LOG_LEVEL` environment variable.
|
||||||
|
2. Pass the `log_level` directly to `get_logger`.
|
||||||
|
|
||||||
|
For example, to set `log_level="INFO"`:
|
||||||
|
|
||||||
|
```py
|
||||||
|
from accelerate.logging import get_logger
|
||||||
|
|
||||||
|
logger = get_logger(__name__, log_level="DEBUG")
|
||||||
|
```
|
||||||
|
|
||||||
|
By default, the log is called on main processes only. To call it on all processes, pass `main_process_only=False`.
|
||||||
|
If a log should be called on all processes and in order, also pass `in_order=True`.
|
||||||
|
|
||||||
|
```py
|
||||||
|
from accelerate.logging import get_logger
|
||||||
|
|
||||||
|
logger = get_logger(__name__, log_level="DEBUG")
|
||||||
|
# log all processes
|
||||||
|
logger.debug("thing_to_log", main_process_only=False)
|
||||||
|
# log all processes in order
|
||||||
|
logger.debug("thing_to_log", main_process_only=False, in_order=True)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Hanging code and timeout errors
|
||||||
|
|
||||||
|
There can be many reasons why your code is hanging. Let's take a look at how to solve some of the most common issues that can cause your code to hang.
|
||||||
|
|
||||||
|
### Mismatched tensor shapes
|
||||||
|
|
||||||
|
Mismatched tensor shapes is a common issue that can cause your code to hang for a significant amount of time on a distributed setup.
|
||||||
|
|
||||||
|
When running scripts in a distributed setup, functions such as [`Accelerator.gather`] and [`Accelerator.reduce`] are necessary to grab tensors across devices to collectively perform operations on them. These (and other) functions rely on `torch.distributed` to perform a `gather` operation, which requires tensors to have the **exact same shape** across all processes. When the tensor shapes don't match, your code hangs and you'll eventually hit a timeout exception.
|
||||||
|
|
||||||
|
You can use Accelerate's operational debug mode to immediately catch this issue. We recommend enabling this mode during the `accelerate config` setup, but you can also enable it from the CLI, as an environment variable, or by manually editing the `config.yaml` file.
|
||||||
|
|
||||||
|
<hfoptions id="mismatch">
|
||||||
|
<hfoption id="CLI">
|
||||||
|
|
||||||
|
```bash
|
||||||
|
accelerate launch --debug {my_script.py} --arg1 --arg2
|
||||||
|
```
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
<hfoption id="environment variable">
|
||||||
|
|
||||||
|
If enabling debug mode as an environment variable, you don't need to call `accelerate launch`.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ACCELERATE_DEBUG_MODE="1" torchrun {my_script.py} --arg1 --arg2
|
||||||
|
```
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
<hfoption id="config.yaml">
|
||||||
|
|
||||||
|
Add `debug: true` to your `config.yaml` file.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
compute_environment: LOCAL_MACHINE
|
||||||
|
debug: true
|
||||||
|
```
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
</hfoptions>
|
||||||
|
|
||||||
|
Once you enable debug mode, you should get a traceback that points to the tensor shape mismatch issue.
|
||||||
|
|
||||||
|
```py
|
||||||
|
Traceback (most recent call last):
|
||||||
|
File "/home/zach_mueller_huggingface_co/test.py", line 18, in <module>
|
||||||
|
main()
|
||||||
|
File "/home/zach_mueller_huggingface_co/test.py", line 15, in main
|
||||||
|
broadcast_tensor = broadcast(tensor)
|
||||||
|
File "/home/zach_mueller_huggingface_co/accelerate/src/accelerate/utils/operations.py", line 303, in wrapper
|
||||||
|
accelerate.utils.operations.DistributedOperationException:
|
||||||
|
|
||||||
|
Cannot apply desired operation due to shape mismatches. All shapes across devices must be valid.
|
||||||
|
|
||||||
|
Operation: `accelerate.utils.operations.broadcast`
|
||||||
|
Input shapes:
|
||||||
|
- Process 0: [1, 5]
|
||||||
|
- Process 1: [1, 2, 5]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Early stopping
|
||||||
|
|
||||||
|
For early stopping in distributed training, if each process has a specific stopping condition (e.g. validation loss), it may not be synchronized across all processes. As a result, a break can happen on process 0 but not on process 1 which will cause your code to hang indefinitely until a timeout occurs.
|
||||||
|
|
||||||
|
If you have early stopping conditionals, use the `set_trigger` and `check_trigger` methods to make sure all the processes
|
||||||
|
are ended correctly.
|
||||||
|
|
||||||
|
```py
|
||||||
|
# Assume `should_do_breakpoint` is a custom defined function that returns a conditional,
|
||||||
|
# and that conditional might be true only on process 1
|
||||||
|
if should_do_breakpoint(loss):
|
||||||
|
accelerator.set_trigger()
|
||||||
|
|
||||||
|
# Later in the training script when we need to check for the breakpoint
|
||||||
|
if accelerator.check_trigger():
|
||||||
|
break
|
||||||
|
```
|
||||||
|
|
||||||
|
### Low kernel versions on Linux
|
||||||
|
|
||||||
|
On Linux with kernel version < 5.5, hanging processes have been reported. To avoid this problem, upgrade your system to a later kernel version.
|
||||||
|
|
||||||
|
### MPI
|
||||||
|
|
||||||
|
If your distributed CPU training job using MPI is hanging, ensure that you have
|
||||||
|
[passwordless SSH](https://www.open-mpi.org/faq/?category=rsh#ssh-keys) setup (using keys) between the nodes. This means
|
||||||
|
that for all nodes in your hostfile, you should to be able to SSH from one node to another without being prompted for a password.
|
||||||
|
|
||||||
|
Next, try to run the `mpirun` command as a sanity check. For example, the command below should print out the
|
||||||
|
hostnames for each of the nodes.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
mpirun -f hostfile -n {number of nodes} -ppn 1 hostname
|
||||||
|
```
|
||||||
|
|
||||||
|
## Out-of-Memory
|
||||||
|
|
||||||
|
One of the most frustrating errors when it comes to running training scripts is hitting "Out-of-Memory" on devices like CUDA, XPU or CPU. The entire script needs to be restarted and any progress is lost.
|
||||||
|
|
||||||
|
To address this problem, Accelerate provides the [`find_executable_batch_size`] utility that is heavily based on [toma](https://github.com/BlackHC/toma).
|
||||||
|
This utility retries code that fails due to OOM (out-of-memory) conditions and automatically lowers batch sizes. For each OOM condition, the algorithm decreases the batch size by half and retries the code until it succeeds.
|
||||||
|
|
||||||
|
To use [`find_executable_batch_size`], restructure your training function to include an inner function with `find_executable_batch_size` and build your dataloaders inside it. At a minimum, this only takes 4 new lines of code.
|
||||||
|
|
||||||
|
<Tip warning={true}>
|
||||||
|
|
||||||
|
The inner function **must** take batch size as the first parameter, but we do not pass one to it when called. The wrapper will handle this for you. Any object (models, optimizers) that consumes device memory and is passed to the [`Accelerator`] also **must** be declared inside the inner function.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
```diff
|
||||||
|
def training_function(args):
|
||||||
|
accelerator = Accelerator()
|
||||||
|
|
||||||
|
+ @find_executable_batch_size(starting_batch_size=args.batch_size)
|
||||||
|
+ def inner_training_loop(batch_size):
|
||||||
|
+ nonlocal accelerator # Ensure they can be used in our context
|
||||||
|
+ accelerator.free_memory() # Free all lingering references
|
||||||
|
model = get_model()
|
||||||
|
model.to(accelerator.device)
|
||||||
|
optimizer = get_optimizer()
|
||||||
|
train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)
|
||||||
|
lr_scheduler = get_scheduler(
|
||||||
|
optimizer,
|
||||||
|
num_training_steps=len(train_dataloader)*num_epochs
|
||||||
|
)
|
||||||
|
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
||||||
|
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
||||||
|
)
|
||||||
|
train(model, optimizer, train_dataloader, lr_scheduler)
|
||||||
|
validate(model, eval_dataloader)
|
||||||
|
+ inner_training_loop()
|
||||||
|
```
|
||||||
|
|
||||||
|
## Non-reproducible results between device setups
|
||||||
|
|
||||||
|
If you changed the device setup and observe different model performance, it is likely you didn't update your script when moving from one setup to another. Even if you're using the same script with the same batch size, the results will still be different on a TPU, multi-GPU, and single GPU.
|
||||||
|
|
||||||
|
For example, if you were training on a single GPU with a batch size of 16 and you move to a dual GPU setup, you need to change the batch size to 8 to have the same effective batch size. This is because when training with Accelerate, the batch size passed to the dataloader is the **batch size per GPU**.
|
||||||
|
|
||||||
|
To make sure you can reproduce the results between the setups, make sure to use the same seed, adjust the batch size accordingly, and consider scaling the learning rate.
|
||||||
|
|
||||||
|
For more details and a quick reference for batch sizes, check out the [Comparing performance between different device setups](../concept_guides/performance) guide.
|
||||||
|
|
||||||
|
## Performance issues on different GPUs
|
||||||
|
|
||||||
|
If your multi-GPU setup consists of different GPUs, you may encounter some performance issues:
|
||||||
|
|
||||||
|
- There may be an imbalance in GPU memory between the GPUs. In this case, the GPU with the smaller memory will limit the batch size or the size of the model that can be loaded onto the GPUs.
|
||||||
|
- If you are using GPUs with different performance profiles, the performance will be driven by the slowest GPU you are using because the other GPUs will have to wait for it to complete its workload.
|
||||||
|
|
||||||
|
Vastly different GPUs within the same setup can lead to performance bottlenecks.
|
||||||
|
|
||||||
|
## Ask for help
|
||||||
|
|
||||||
|
If none of the solutions and advice here helped resolve your issue, you can always reach out to the community and Accelerate team for help.
|
||||||
|
|
||||||
|
- Ask for help on the Hugging Face forums by posting your question in the [Accelerate category](https://discuss.huggingface.co/c/accelerate/18). Make sure to write a descriptive post with relevant context about your setup and reproducible code to maximize the likelihood that your problem is solved!
|
||||||
|
|
||||||
|
- Post a question on [Discord](http://hf.co/join/discord), and let the team and the community help you.
|
||||||
|
|
||||||
|
- Create an Issue on the Accelerate [GitHub repository](https://github.com/huggingface/accelerate/issues) if you think you've found a bug related to the library. Include context regarding the bug and details about your distributed setup to help us better figure out what's wrong and how we can fix it.
|
||||||
@ -8,11 +8,14 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Handling big models
|
# Loading big models into memory
|
||||||
|
|
||||||
When loading a pretrained model in PyTorch, the usual workflow looks like this:
|
When loading a pre-trained model in PyTorch, the usual workflow looks like this:
|
||||||
|
|
||||||
```py
|
```py
|
||||||
import torch
|
import torch
|
||||||
@ -27,11 +30,11 @@ In plain English, those steps are:
|
|||||||
2. Load the model weights (in a dictionary usually called a state dict) from the disk
|
2. Load the model weights (in a dictionary usually called a state dict) from the disk
|
||||||
3. Load those weights inside the model
|
3. Load those weights inside the model
|
||||||
|
|
||||||
While this works very well for regularly sized models, this workflow has some clear limitations when we deal with a huge model: in step 1, we load a full version of the model in RAM, and spend some time randomly initializing the weights (which will be discarded in step 3). In step 2, we load another full version of the model in RAM, with the pretrained weights. If you're loading a model with 6 billions parameters, this means you will need 24GB of RAM for each copy of the model, so 48GB in total (half of it to load the model in FP16).
|
While this works very well for regularly sized models, this workflow has some clear limitations when we deal with a huge model: in step 1, we load a full version of the model in RAM, and spend some time randomly initializing the weights (which will be discarded in step 3). In step 2, we load another full version of the model in RAM, with the pre-trained weights. If you're loading a model with 6 billion parameters, this means you will need 24GB of RAM for each copy of the model, so 48GB in total (half of it to load the model in FP16).
|
||||||
|
|
||||||
<Tip warning={true}>
|
<Tip warning={true}>
|
||||||
|
|
||||||
This API is quite new and still in its experimental stage. While we strive to provide a stable API, it's possible some small parts of the public API will change in the future.
|
This API is quite new and still in its experimental stage. While we strive to provide a stable API, it's possible some small parts of the public API will change in the future.
|
||||||
|
|
||||||
</Tip>
|
</Tip>
|
||||||
|
|
||||||
@ -43,7 +46,7 @@ While this works very well for regularly sized models, this workflow has some cl
|
|||||||
|
|
||||||
### Instantiating an empty model
|
### Instantiating an empty model
|
||||||
|
|
||||||
The first tool 🤗 Accelerate introduces to help with big models is a context manager [`init_empty_weights`] that helps you initialize a model without using any RAM, so that step 1 can be done on models of any size. Here is how it works:
|
The first tool Accelerate introduces to help with big models is a context manager [`init_empty_weights`] that helps you initialize a model without using any RAM so that step 1 can be done on models of any size. Here is how it works:
|
||||||
|
|
||||||
```py
|
```py
|
||||||
from accelerate import init_empty_weights
|
from accelerate import init_empty_weights
|
||||||
@ -59,7 +62,7 @@ with init_empty_weights():
|
|||||||
model = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])
|
model = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])
|
||||||
```
|
```
|
||||||
|
|
||||||
initializes an empty model with a bit more than 100B parameters. Behind the scenes, this relies on the meta device introduced in PyTorch 1.9. During the initialization under the context manager, each time a parameter is created, it is instantly moved on that device.
|
initializes an empty model with a bit more than 100B parameters. Behind the scenes, this relies on the meta device introduced in PyTorch 1.9. During the initialization under the context manager, each time a parameter is created, it is instantly moved to that device.
|
||||||
|
|
||||||
<Tip warning={true}>
|
<Tip warning={true}>
|
||||||
|
|
||||||
@ -69,9 +72,9 @@ initializes an empty model with a bit more than 100B parameters. Behind the scen
|
|||||||
|
|
||||||
### Sharded checkpoints
|
### Sharded checkpoints
|
||||||
|
|
||||||
It's possible your model is so big that even a single copy won't fit in RAM. That doesn't mean it can't be loaded: if you have one or several GPUs, this is more memory available to store your model. In this case, it's better if your checkpoint is split in several smaller files that we call checkpoint shards.
|
It's possible your model is so big that even a single copy won't fit in RAM. That doesn't mean it can't be loaded: if you have one or several GPUs, this is more memory available to store your model. In this case, it's better if your checkpoint is split into several smaller files that we call checkpoint shards.
|
||||||
|
|
||||||
🤗 Accelerate will handle sharded checkpoints as long as you follow the following format: your checkpoint should be in a folder, with several files containing the partial state dicts, and there should be an index in the JSON format that contains a dictionary mapping parameter names to the file containing their weights. For instance we could have a folder containing:
|
Accelerate will handle sharded checkpoints as long as you follow the following format: your checkpoint should be in a folder, with several files containing the partial state dicts, and there should be an index in the JSON format that contains a dictionary mapping parameter names to the file containing their weights. You can easily shard your model with [`~Accelerator.save_model`]. For instance, we could have a folder containing:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
first_state_dict.bin
|
first_state_dict.bin
|
||||||
@ -94,48 +97,69 @@ and `first_state_dict.bin` containing the weights for `"linear1.weight"` and `"l
|
|||||||
|
|
||||||
### Loading weights
|
### Loading weights
|
||||||
|
|
||||||
The second tool 🤗 Accelerate introduces is a function [`load_checkpoint_and_dispatch`], that will allow you to load a checkpoint inside your empty model. This supports full checkpoints (a single file containing the whole state dict) as well as sharded checkpoints. It will also automatically dispatch those weights across the devices you have available (GPUs, CPU RAM), so if you are loading a sharded checkpoint, the maximum RAM usage will be the size of the biggest shard.
|
The second tool Accelerate introduces is a function [`load_checkpoint_and_dispatch`], that will allow you to load a checkpoint inside your empty model. This supports full checkpoints (a single file containing the whole state dict) as well as sharded checkpoints. It will also automatically dispatch those weights across the devices you have available (GPUs, CPU RAM), so if you are loading a sharded checkpoint, the maximum RAM usage will be the size of the biggest shard.
|
||||||
|
|
||||||
Here is how we can use this to load the [GPT-J-6B](https://huggingface.co/EleutherAI/gpt-j-6B) model. You clone the sharded version of this model with:
|
If you want to use big model inference with Transformers models, check out this [documentation](https://huggingface.co/docs/transformers/main/en/main_classes/model#large-model-loading).
|
||||||
|
|
||||||
|
Here is how we can use this to load the [GPT2-1.5B](https://huggingface.co/marcsun13/gpt2-xl-linear-sharded) model.
|
||||||
|
|
||||||
|
Let's download the sharded version of this model.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://huggingface.co/sgugger/sharded-gpt-j-6B
|
pip install huggingface_hub
|
||||||
cd sharded-gpt-j-6B
|
|
||||||
git-lfs install
|
|
||||||
git pull
|
|
||||||
```
|
```
|
||||||
|
|
||||||
then we can initialize the model with
|
```py
|
||||||
|
from huggingface_hub import snapshot_download
|
||||||
|
checkpoint = "marcsun13/gpt2-xl-linear-sharded"
|
||||||
|
weights_location = snapshot_download(repo_id=checkpoint)
|
||||||
|
```
|
||||||
|
|
||||||
|
In order to initialize the model, we will use the library minGPT.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/karpathy/minGPT.git
|
||||||
|
pip install minGPT/
|
||||||
|
```
|
||||||
|
|
||||||
```py
|
```py
|
||||||
from accelerate import init_empty_weights
|
from accelerate import init_empty_weights
|
||||||
from transformers import AutoConfig, AutoModelForCausalLM
|
from mingpt.model import GPT
|
||||||
|
|
||||||
checkpoint = "EleutherAI/gpt-j-6B"
|
model_config = GPT.get_default_config()
|
||||||
config = AutoConfig.from_pretrained(checkpoint)
|
model_config.model_type = 'gpt2-xl'
|
||||||
|
model_config.vocab_size = 50257
|
||||||
|
model_config.block_size = 1024
|
||||||
|
|
||||||
with init_empty_weights():
|
with init_empty_weights():
|
||||||
model = AutoModelForCausalLM.from_config(config)
|
model = GPT(model_config)
|
||||||
```
|
```
|
||||||
|
|
||||||
and load the checkpoint we just downloaded with:
|
Then, load the checkpoint we just downloaded with:
|
||||||
|
|
||||||
```py
|
```py
|
||||||
from accelerate import load_checkpoint_and_dispatch
|
from accelerate import load_checkpoint_and_dispatch
|
||||||
|
|
||||||
model = load_checkpoint_and_dispatch(
|
model = load_checkpoint_and_dispatch(
|
||||||
model, "sharded-gpt-j-6B", device_map="auto", no_split_module_classes=["GPTJBlock"]
|
model, checkpoint=weights_location, device_map="auto", no_split_module_classes=['Block']
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
By passing `device_map="auto"`, we tell 🤗 Accelerate to determine automatically where to put each layer of the model depending on the available resources:
|
By passing `device_map="auto"`, we tell Accelerate to determine automatically where to put each layer of the model depending on the available resources:
|
||||||
- first we use the maximum space available on the GPU(s)
|
- first, we use the maximum space available on the GPU(s)
|
||||||
- if we still need space, we store the remaining weights on the CPU
|
- if we still need space, we store the remaining weights on the CPU
|
||||||
- if there is not enough RAM, we store the remaining weights on the hard drive as memory-mapped tensors
|
- if there is not enough RAM, we store the remaining weights on the hard drive as memory-mapped tensors
|
||||||
|
|
||||||
`no_split_module_classes=["GPTJBlock"]` indicates that the modules that are `GPTJBlock` should not be split on different devices. You should set here all blocks that include a residual connection of some kind.
|
|
||||||
|
|
||||||
You can see the `device_map` that 🤗 Accelerate picked by accessing the `hf_device_map` attribute of your model:
|
#### `no_split_module_classes`
|
||||||
|
|
||||||
|
This parameter will indicate that some of the modules with the name `"Block"` should not be split across different devices. You should set here all blocks that
|
||||||
|
include a residual connection of some kind.
|
||||||
|
|
||||||
|
|
||||||
|
#### The `device_map`
|
||||||
|
|
||||||
|
You can see the `device_map` that Accelerate picked by accessing the `hf_device_map` attribute of your model:
|
||||||
|
|
||||||
```py
|
```py
|
||||||
model.hf_device_map
|
model.hf_device_map
|
||||||
@ -143,43 +167,34 @@ model.hf_device_map
|
|||||||
|
|
||||||
```python out
|
```python out
|
||||||
{'transformer.wte': 0,
|
{'transformer.wte': 0,
|
||||||
|
'transformer.wpe': 0,
|
||||||
'transformer.drop': 0,
|
'transformer.drop': 0,
|
||||||
'transformer.h.0': 0,
|
'transformer.h.0': 0,
|
||||||
'transformer.h.1': 0,
|
...
|
||||||
'transformer.h.2': 0,
|
'transformer.h.21': 0,
|
||||||
'transformer.h.3': 0,
|
'transformer.h.22': 1,
|
||||||
'transformer.h.4': 0,
|
'transformer.h.23': 1,
|
||||||
'transformer.h.5': 0,
|
|
||||||
'transformer.h.6': 0,
|
|
||||||
'transformer.h.7': 0,
|
|
||||||
'transformer.h.8': 0,
|
|
||||||
'transformer.h.9': 0,
|
|
||||||
'transformer.h.10': 0,
|
|
||||||
'transformer.h.11': 0,
|
|
||||||
'transformer.h.12': 0,
|
|
||||||
'transformer.h.13': 0,
|
|
||||||
'transformer.h.14': 0,
|
|
||||||
'transformer.h.15': 0,
|
|
||||||
'transformer.h.16': 0,
|
|
||||||
'transformer.h.17': 0,
|
|
||||||
'transformer.h.18': 0,
|
|
||||||
'transformer.h.19': 0,
|
|
||||||
'transformer.h.20': 0,
|
|
||||||
'transformer.h.21': 0,
|
|
||||||
'transformer.h.22': 0,
|
|
||||||
'transformer.h.23': 0,
|
|
||||||
'transformer.h.24': 1,
|
'transformer.h.24': 1,
|
||||||
'transformer.h.25': 1,
|
...
|
||||||
'transformer.h.26': 1,
|
'transformer.h.47': 1,
|
||||||
'transformer.h.27': 1,
|
'transformer.ln_f': 1,
|
||||||
'transformer.ln_f': 1,
|
|
||||||
'lm_head': 1}
|
'lm_head': 1}
|
||||||
```
|
```
|
||||||
|
|
||||||
You can also design your `device_map` yourself, if you prefer to explicitly decide where each layer should be. In this case, the command above becomes:
|
It's fully possible to create your own device map for the layers to use as well, specifying the GPU device to use (a number), `"cpu"`, or `"disk"` and pass this in:
|
||||||
|
|
||||||
|
```python
|
||||||
|
device_map = {
|
||||||
|
"transformer.wte": "cpu",
|
||||||
|
"transformer.wpe": 0,
|
||||||
|
"transformer.drop": "cpu",
|
||||||
|
"transformer.h.0": "disk"
|
||||||
|
}
|
||||||
|
|
||||||
|
model = load_checkpoint_and_dispatch(
|
||||||
|
model, checkpoint=weights_location, device_map=device_map
|
||||||
|
)
|
||||||
|
|
||||||
```py
|
|
||||||
model = load_checkpoint_and_dispatch(model, "sharded-gpt-j-6B", device_map=my_device_map)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Run the model
|
### Run the model
|
||||||
@ -187,31 +202,30 @@ model = load_checkpoint_and_dispatch(model, "sharded-gpt-j-6B", device_map=my_de
|
|||||||
Now that we have done this, our model lies across several devices, and maybe the hard drive. But it can still be used as a regular PyTorch model:
|
Now that we have done this, our model lies across several devices, and maybe the hard drive. But it can still be used as a regular PyTorch model:
|
||||||
|
|
||||||
```py
|
```py
|
||||||
from transformers import AutoTokenizer
|
from mingpt.bpe import BPETokenizer
|
||||||
|
tokenizer = BPETokenizer()
|
||||||
|
inputs = tokenizer("Hello, my name is").to(0)
|
||||||
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
outputs = model.generate(x1, max_new_tokens=10, do_sample=False)[0]
|
||||||
inputs = tokenizer("Hello, my name is", return_tensors="pt")
|
tokenizer.decode(outputs.cpu().squeeze())
|
||||||
inputs = inputs.to(0)
|
|
||||||
output = model.generate(inputs["input_ids"])
|
|
||||||
tokenizer.decode(output[0].tolist())
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Behind the scenes, 🤗 Accelerate added hooks to the model, so that:
|
Behind the scenes, Accelerate added hooks to the model, so that:
|
||||||
- at each layer, the inputs are put on the right device (so even if your model is spread across several GPUs, it works)
|
- at each layer, the inputs are put on the right device (so even if your model is spread across several GPUs, it works)
|
||||||
- for the weights offloaded on the CPU, they are put on a GPU just before the forward pass, and cleaned up just after
|
- for the weights offloaded on the CPU, they are put on a GPU just before the forward pass and cleaned up just after
|
||||||
- for the weights offloaded on the hard drive, they are loaded in RAM then put on a GPU just before the forward pass, and cleaned up just after
|
- for the weights offloaded on the hard drive, they are loaded in RAM then put on a GPU just before the forward pass and cleaned up just after
|
||||||
|
|
||||||
This way, you model can run for inference even if it doesn't fit on one of the GPUs or the CPU RAM!
|
This way, your model can run for inference even if it doesn't fit on one of the GPUs or the CPU RAM!
|
||||||
|
|
||||||
<Tip warning={true}>
|
<Tip warning={true}>
|
||||||
|
|
||||||
This only supports inference of your model, not training. Most of the computation happens behind `torch.no_grad()` context managers to avoid spending some GPU memory with intermediate activations.
|
This only supports the inference of your model, not training. Most of the computation happens behind `torch.no_grad()` context managers to avoid spending some GPU memory with intermediate activations.
|
||||||
|
|
||||||
</Tip>
|
</Tip>
|
||||||
|
|
||||||
### Designing a device map
|
### Designing a device map
|
||||||
|
|
||||||
You can let 🤗 Accelerate handle the device map computation by setting `device_map` to one of the supported options (`"auto"`, `"balanced"`, `"balanced_low_0"`, `"sequential"`) or create one yourself, if you want more control over where each layer should go.
|
You can let Accelerate handle the device map computation by setting `device_map` to one of the supported options (`"auto"`, `"balanced"`, `"balanced_low_0"`, `"sequential"`) or create one yourself if you want more control over where each layer should go.
|
||||||
|
|
||||||
<Tip>
|
<Tip>
|
||||||
|
|
||||||
@ -221,7 +235,7 @@ You can let 🤗 Accelerate handle the device map computation by setting `device
|
|||||||
|
|
||||||
All the options will produce the same result when you don't have enough GPU memory to accommodate the whole model (which is to fit everything that can on the GPU, then offload weights on the CPU or even on the disk if there is not enough RAM).
|
All the options will produce the same result when you don't have enough GPU memory to accommodate the whole model (which is to fit everything that can on the GPU, then offload weights on the CPU or even on the disk if there is not enough RAM).
|
||||||
|
|
||||||
When you have more GPU memory available than the model size, here the difference between each option:
|
When you have more GPU memory available than the model size, here is the difference between each option:
|
||||||
- `"auto"` and `"balanced"` evenly split the model on all available GPUs, making it possible for you to use a batch size greater than 1.
|
- `"auto"` and `"balanced"` evenly split the model on all available GPUs, making it possible for you to use a batch size greater than 1.
|
||||||
- `"balanced_low_0"` evenly splits the model on all GPUs except the first one, and only puts on GPU 0 what does not fit on the others. This option is great when you need to use GPU 0 for some processing of the outputs, like when using the `generate` function for Transformers models
|
- `"balanced_low_0"` evenly splits the model on all GPUs except the first one, and only puts on GPU 0 what does not fit on the others. This option is great when you need to use GPU 0 for some processing of the outputs, like when using the `generate` function for Transformers models
|
||||||
- `"sequential"` will fit what it can on GPU 0, then move on GPU 1 and so forth (so won't use the last GPUs if it doesn't need to).
|
- `"sequential"` will fit what it can on GPU 0, then move on GPU 1 and so forth (so won't use the last GPUs if it doesn't need to).
|
||||||
@ -232,9 +246,9 @@ When you have more GPU memory available than the model size, here the difference
|
|||||||
|
|
||||||
</Tip>
|
</Tip>
|
||||||
|
|
||||||
First note that you can limit the memory used on each GPU by using the `max_memory` argument (available in [`infer_auto_device_map`] and in all functions using it). When setting `max_memory`, you should pass along a dictionary containing the GPU identifiers (for instance `0`, `1` etc.) and the `"cpu"` key for the maximum RAM you want used for CPU offload. The values can either be an integer (in bytes) or a string representing a number with its unit, such as `"10GiB"` or `"10GB"`.
|
First note that you can limit the memory used on each GPU by using the `max_memory` argument (available in [`infer_auto_device_map`] and in all functions using it). When setting `max_memory`, you should pass along a dictionary containing the GPU identifiers (for instance `0`, `1` etc.) and the `"cpu"` key for the maximum RAM you want to use for CPU offload. The values can either be an integer (in bytes) or a string representing a number with its unit, such as `"10GiB"` or `"10GB"`.
|
||||||
|
|
||||||
Here is an example where we don't want to use more than 10GiB on each of two GPUs and no more than 30GiB of CPU RAM for the model weights:
|
Here is an example where we don't want to use more than 10GiB on each of the two GPUs and no more than 30GiB of CPU RAM for the model weights:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from accelerate import infer_auto_device_map
|
from accelerate import infer_auto_device_map
|
||||||
@ -246,18 +260,18 @@ device_map = infer_auto_device_map(my_model, max_memory={0: "10GiB", 1: "10GiB",
|
|||||||
|
|
||||||
When a first allocation happens in PyTorch, it loads CUDA kernels which take about 1-2GB of memory depending on the GPU. Therefore you always have less usable memory than the actual size of the GPU. To see how much memory is actually used do `torch.ones(1).cuda()` and look at the memory usage.
|
When a first allocation happens in PyTorch, it loads CUDA kernels which take about 1-2GB of memory depending on the GPU. Therefore you always have less usable memory than the actual size of the GPU. To see how much memory is actually used do `torch.ones(1).cuda()` and look at the memory usage.
|
||||||
|
|
||||||
Therefore when you create memory maps with `max_memory` make sure to adjust the avaialble memory accordingly to avoid out-of-memory errors.
|
Therefore when you create memory maps with `max_memory` make sure to adjust the available memory accordingly to avoid out-of-memory errors.
|
||||||
|
|
||||||
</Tip>
|
</Tip>
|
||||||
|
|
||||||
Additionally, if you do some additional operations with your outputs without placing them back on the CPU (for instance inside the `generate` method of Transformers) and if you placed your inputs on a GPU, that GPU will consume more memory than the others (Accelerate always place the output back to the device of the input). Therefore if you would like to optimize the maximum batch size and you have many GPUs, give the first GPU less memory. For example, with BLOOM-176B on 8x80 A100 setup the close to ideal map is:
|
Additionally, if you do some additional operations with your outputs without placing them back on the CPU (for instance inside the `generate` method of Transformers) and if you placed your inputs on a GPU, that GPU will consume more memory than the others (Accelerate always place the output back to the device of the input). Therefore if you would like to optimize the maximum batch size and you have many GPUs, give the first GPU less memory. For example, with BLOOM-176B on 8x80 A100 setup, the close-to-ideal map is:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
max_memory = {0: "30GIB", 1: "46GIB", 2: "46GIB", 3: "46GIB", 4: "46GIB", 5: "46GIB", 6: "46GIB", 7: "46GIB"}
|
max_memory = {0: "30GIB", 1: "46GIB", 2: "46GIB", 3: "46GIB", 4: "46GIB", 5: "46GIB", 6: "46GIB", 7: "46GIB"}
|
||||||
```
|
```
|
||||||
as you can see we gave the remaining 7 GPUs ~50% more memory than GPU 0.
|
as you can see we gave the remaining 7 GPUs ~50% more memory than GPU 0.
|
||||||
|
|
||||||
If you opt to fully design the `device_map` yourself, it should be a dictionary with keys being module names of your model and values being a valid device identifier (for instance an integer for the GPUs) or `"cpu"` for CPU offload, `"disk"` for disk offload. The keys need to cover the whole model, you can then define your device map as you wish: for instance if your model has two blocks (let's say `block1` and `block2`) which each contain three linear layers (let's say `linear1`, `linear2` and `linear3`), a valid device map can be:
|
If you opt to fully design the `device_map` yourself, it should be a dictionary with keys being module names of your model and values being a valid device identifier (for instance an integer for the GPUs) or `"cpu"` for CPU offload, `"disk"` for disk offload. The keys need to cover the whole model, you can then define your device map as you wish: for instance, if your model has two blocks (let's say `block1` and `block2`) which each contain three linear layers (let's say `linear1`, `linear2` and `linear3`), a valid device map can be:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
device_map = {"block1": 0, "block2": 1}
|
device_map = {"block1": 0, "block2": 1}
|
||||||
@ -281,14 +295,47 @@ device_map = {"block1": 0, "block2.linear1": 1, "block2.linear2": 1}
|
|||||||
|
|
||||||
</Tip>
|
</Tip>
|
||||||
|
|
||||||
|
## CPU offload only
|
||||||
|
|
||||||
|
If you want to offload your model on CPU, you can use [`cpu_offload`]. As a result, all parameters of the model will be offloaded and only one copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that state dict and put on the execution device and passed as they are needed, then offloaded again.
|
||||||
|
|
||||||
|
```python
|
||||||
|
cpu_offload(model, execution_device)
|
||||||
|
```
|
||||||
|
|
||||||
|
You can also use [`cpu_offload_with_hook`]. This function will offloads a model on the CPU and puts it back to an execution device when executed. The difference with [`cpu_offload`] is that the model stays on the execution device after the forward and is only offloaded again when the `offload` method of the returned `hook` is called. Furthermore, [`cpu_offload_with_hook`] is more performant but less memory saving. It is useful for pipelines running a model in a loop:
|
||||||
|
|
||||||
|
```python
|
||||||
|
model_1, hook_1 = cpu_offload_with_hook(model_1, execution_device)
|
||||||
|
model_2, hook_2 = cpu_offload_with_hook(model_2, execution_device, prev_module_hook=hook_1)
|
||||||
|
model_3, hook_3 = cpu_offload_with_hook(model_3, execution_device, prev_module_hook=hook_2)
|
||||||
|
|
||||||
|
hid_1 = model_1(input)
|
||||||
|
for i in range(50):
|
||||||
|
# model1 is offloaded on the CPU at the first iteration, model 2 stays on the GPU for this whole loop.
|
||||||
|
hid_2 = model_2(hid_1)
|
||||||
|
# model2 is offloaded to the CPU just before this forward.
|
||||||
|
hid_3 = model_3(hid_3)
|
||||||
|
|
||||||
|
# For model3, you need to manually call the hook offload method.
|
||||||
|
hook_3.offload()
|
||||||
|
```
|
||||||
|
|
||||||
|
## Disk offload only
|
||||||
|
|
||||||
|
To perform disk offload, you can use [`disk_offload`]. As a result, all parameters of the model will be offloaded as memory-mapped array in a given folder. During the forward pass, parameters will be accessed from that folder and put on the execution device passed as they are needed, then offloaded again.
|
||||||
|
|
||||||
|
```python
|
||||||
|
disk_offload(model, offload_dir, execution_device)
|
||||||
|
```
|
||||||
|
|
||||||
## Limits and further development
|
## Limits and further development
|
||||||
|
|
||||||
We are aware of the current limitations in the API:
|
We are aware of the current limitations in the API:
|
||||||
|
|
||||||
- While this could theoretically work on just one CPU with potential disk offload, you need at least one GPU to run this API. This will be fixed in further development.
|
- [`infer_auto_device_map`] (or `device_map="auto"` in [`load_checkpoint_and_dispatch`]) tries to maximize GPU and CPU RAM it sees available when you execute it. While PyTorch is very good at managing GPU RAM efficiently (and giving it back when not needed), it's not entirely true with Python and CPU RAM. Therefore, an automatically computed device map might be too intense on the CPU. Move a few modules to the disk device if you get crashes due to a lack of RAM.
|
||||||
- [`infer_auto_device_map`] (or `device_map="auto"` in [`load_checkpoint_and_dispatch`]) tries to maximize GPU and CPU RAM it sees available when you execute it. While PyTorch is very good at managing GPU RAM efficiently (and giving it back when not needed), it's not entirely true with Python and CPU RAM. Therefore, an automatically computed device map might be too intense on the CPU. Move a few modules to the disk device if you get crashes due to lack of RAM.
|
|
||||||
- [`infer_auto_device_map`] (or `device_map="auto"` in [`load_checkpoint_and_dispatch`]) attributes devices sequentially (to avoid moving things back and forth) so if your first layer is bigger than the size of the GPU you have, it will end up with everything on the CPU/Disk.
|
- [`infer_auto_device_map`] (or `device_map="auto"` in [`load_checkpoint_and_dispatch`]) attributes devices sequentially (to avoid moving things back and forth) so if your first layer is bigger than the size of the GPU you have, it will end up with everything on the CPU/Disk.
|
||||||
- [`load_checkpoint_and_dispatch`] and [`load_checkpoint_in_model`] do not perform any check on the correctness of your state dict compared to your model at the moment (this will be fixed in a future version), so you may get some weird errors if trying to load a checkpoint with mismatched or missing keys.
|
- [`load_checkpoint_and_dispatch`] and [`load_checkpoint_in_model`] do not perform any check on the correctness of your state dict compared to your model at the moment (this will be fixed in a future version), so you may get some weird errors if trying to load a checkpoint with mismatched or missing keys.
|
||||||
- The model parallelism used when your model is split on several GPUs is naive and not optimized, meaning that only one GPU works at a given time and the other sits idle.
|
- The model parallelism used when your model is split on several GPUs is naive and not optimized, meaning that only one GPU works at a given time and the other sits idle.
|
||||||
- When weights are offloaded on the CPU/hard drive, there is no pre-fetching (yet, we will work on this for future versions) which means the weights are put on the GPU when they are needed and not before.
|
- When weights are offloaded on the CPU/hard drive, there is no pre-fetching (yet, we will work on this for future versions) which means the weights are put on the GPU when they are needed and not before.
|
||||||
- Hard-drive offloading might be very slow if the hardware you run on does not have fast communication between disk and CPU (like NVMes).
|
- Hard-drive offloading might be very slow if the hardware you run on does not have fast communication between disk and CPU (like NVMes).
|
||||||
204
docs/source/concept_guides/context_parallelism.md
Normal file
204
docs/source/concept_guides/context_parallelism.md
Normal file
@ -0,0 +1,204 @@
|
|||||||
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Context Parallel in 🤗`accelerate`
|
||||||
|
|
||||||
|
This guide will cover basics of using context parallelism in 🤗`accelerate`, for the more curious readers, we will also cover some technicalities in the later sections.
|
||||||
|
|
||||||
|
## Why context parallelism?
|
||||||
|
|
||||||
|
With the advent of large language models, and recently reasoning models, the sequence length has been growing rapidly. This, combined with quadratic memory complexity of attention, has led to a need for more efficient ways to train models with long sequences.
|
||||||
|
With sequence length of 128k, the memory requirement of the attention matrix is `128k * 128k * 2 bytes * num_heads = ~32 GB * num_heads` for `bf16` precision, given vanilla attention implementation. Granted, with usage of `flash attention` or `SDPA` which do not materialize these attention weights, this decreases drastically, but the growth in memory requirements is still considerable.
|
||||||
|
|
||||||
|
Context parallelism allows us to shard the inputs to the attention computation along the sequence dimension and compute the attention in parallel on multiple GPUs. With this, we can train models with long sequences, scaling potentially to 1M+ sequence length.
|
||||||
|
|
||||||
|
## How to use context parallelism?
|
||||||
|
|
||||||
|
```diff
|
||||||
|
from accelerate.utils import ParallelismConfig, TorchContextParallelConfig
|
||||||
|
|
||||||
|
+ cp_config = TorchContextParallelConfig(
|
||||||
|
+ cp_comm_strategy="alltoall", # no need to use cp_config at all, if you want to use the default "allgather"
|
||||||
|
+ )
|
||||||
|
|
||||||
|
+ parallelism_config = ParallelismConfig(
|
||||||
|
+ cp_size=8,
|
||||||
|
+ cp_handler=cp_config, # or just cp_size=8, if you want to use the default "allgather"
|
||||||
|
+ )
|
||||||
|
|
||||||
|
accelerator = Accelerator(
|
||||||
|
...,
|
||||||
|
parallelism_config=parallelism_config,
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
As with any other feature in 🤗`accelerate`, you can enable context parallelism also by passing the corresponding flags to `accelerate launch`.
|
||||||
|
In this case, it's no different:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
accelerate launch --parallelism-config-cp-size 8 --parallelism-config-cp-comm-strategy [allgather|alltoall] ...
|
||||||
|
```
|
||||||
|
|
||||||
|
> [!Tip]
|
||||||
|
> You can also set the `cp_size` and `cp_comm_strategy` in the `accelerate config` command, which will save them in your `accelerate` configuration file, so you don't have to pass them every time you launch your script.
|
||||||
|
|
||||||
|
> [!Tip]
|
||||||
|
> Context parallelism is compatible with other parallelism strategies, such as data parallelism, tensor parallelism and FSDP2.
|
||||||
|
> You can simply combine them by setting your parallelism sizes to the desired values, e.g. `--parallelism-config-dp-size 8 --parallelism-config-tp-size 2 --parallelism-config-cp-size 8`. Or you can use the `ParallelismConfig` class to set them programmatically.
|
||||||
|
|
||||||
|
> [!Warning]
|
||||||
|
> Context parallelism is tightly coupled with `FSDP2`, which you can learn more about in the [FSDP2 introduction](fsdp1_vs_fsdp2.md). Meaning, context parallelism only works if you use `FullyShardedDataParallelPlugin` or `--use-fsdp` with version set to 2 to your
|
||||||
|
> program. If no `FSDP2` is used, error will be raised.
|
||||||
|
|
||||||
|
> [!Warning]
|
||||||
|
> Context parallelism works only with [SDPA](https://docs.pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html) and only with no mask or causal mask. We can't properly detect this for you, so it's your responsibility to ensure that you are using `SDPA` with no mask or causal mask. If you use any other attention implementation, it will raise an error.
|
||||||
|
|
||||||
|
After enabling context parallelism with the methods mentioned above, you can then apply it to your training loop. We provide a thin wrapper around [`torch.distributed.tensor.experimental.context_parallel`](https://docs.pytorch.org/docs/stable/distributed.tensor.html#torch.distributed.tensor.experimental.context_parallel) that you can use in your training loop, that abstracts some of the complexity of using it (more on this later). To minimize the changes you have to do in your training loop, we provide a context manager that is a `noop` if context parallelism is not enabled, and applies the context parallelism if it is enabled. This way, you can use it in your training loop without changing any code based on your parallelism configuration.
|
||||||
|
You can use it as follows:
|
||||||
|
|
||||||
|
```python
|
||||||
|
for batch in dataloader:
|
||||||
|
with accelerator.maybe_context_parallel(
|
||||||
|
buffers=[batch["input_ids"], batch["attention_mask"]],
|
||||||
|
buffer_seq_dims=[1, 1],
|
||||||
|
no_restore_buffers={batch["input_ids"], batch["labels"]},
|
||||||
|
):
|
||||||
|
outputs = model(**batch)
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
> [!Warning]
|
||||||
|
> This context manager has to be recreated with each training step, as shown in the example above. It's crucial to do so.
|
||||||
|
|
||||||
|
This can scale your context size to 1M+ sequence length potentially. Below, we showcase speed and memory usage of context parallelism for up-to 256k context size. We can see that when we double the context size and number of GPUs, we can achieve consistent memory usage, potentially enabling endless context length scaling.
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/accelerate/examples/fsdp2/cp_perf.png" alt="context parallelism memory usage" />
|
||||||
|
<br>
|
||||||
|
<em>Figure 1: Memory usage and speed of context parallelism for up-to 256k context size.</em>
|
||||||
|
</p>
|
||||||
|
|
||||||
|
> [!Tip]
|
||||||
|
> These examples were created with a script you can find [in the examples folder](https://github.com/huggingface/accelerate/blob/main/examples/fsdp2/nd_parallel.py). To run the example on 8 H100 GPUs (128k sequence length), you can use the following command:
|
||||||
|
> ```bash
|
||||||
|
> accelerate launch --use-fsdp --fsdp-activation-checkpointing=TRUE examples/fsdp2/nd_parallel.py --cp-size=8 --sequence-length=128000
|
||||||
|
> ```
|
||||||
|
|
||||||
|
|
||||||
|
## Accelerate's interface
|
||||||
|
|
||||||
|
The context manager takes a few arguments, that are used to configure the context parallelism.
|
||||||
|
|
||||||
|
- `buffers`: This is a list of tensors that are to be sharded across the sequence dimension. These tensors are usually input ids, labels and attention mask.
|
||||||
|
- `buffer_seq_dims`: This is a list of integers, that specify the sequence dimension of the buffers, in the order of the `buffers` list. If you pass `buffers=[input_ids, shift_labels]` with both having shape `[batch_size, sequence_length]`, you would pass `buffer_seq_dims=[1, 1]`.
|
||||||
|
as the sequence dimension is the second dimension of the tensors. This is required for correct computation of the model outputs.
|
||||||
|
- `no_restore_buffers`: The implementation of context parallelism modifies the buffers in-place, converting them to `torch.distributed.tensor.Dtensor`s. After the context manager exits, a communication kernel would need to be launched to restore the buffers to their original state (usually all-gather). This takes some time, so it is recommended to pass the same tensors as in the `buffers` argument, to avoid unnecessary communication, unless you are sure that you need to use the buffers after the context manager exits.
|
||||||
|
|
||||||
|
|
||||||
|
> [!Warning]
|
||||||
|
> Context parallelism is not compatible with `labels` that are a copy of `input_ids`, which models from 🤗 transformers can shift to enable causal language modeling themselves.
|
||||||
|
> Imagine this case:
|
||||||
|
> labels = [l1, l2, l3, l4, ... li]
|
||||||
|
> if we apply context parallelism, each rank would end up with a part of labels, such as this:
|
||||||
|
> labels_rank_0 = [l1, l2], labels_rank_1 = [l3, l4], ...
|
||||||
|
> after transformers modelling code shifts the labels, it would end up with:
|
||||||
|
> labels_rank_0 = [l2, PAD], labels_rank_1 = [l3, PAD], ...
|
||||||
|
> where `PAD` is a padding token. This would result in incorrect loss computation, as the labels are not aligned with the inputs anymore.
|
||||||
|
> Because of this, you need to manually shift the labels before passing them in the model
|
||||||
|
|
||||||
|
|
||||||
|
## Configurable options
|
||||||
|
Accelerate provides only a single option to configure context parallelism (except for `cp_size`)
|
||||||
|
|
||||||
|
- `cp_comm_strategy`: The rotation method to use for the shards. We strongly recommend keeping this as `"allgather"`, as it's very likely it will outperform `"alltoall"` in most cases.
|
||||||
|
|
||||||
|
Context parallel size is rather self-explanatory, it's the number of ranks across which the inputs are to be-sharded.
|
||||||
|
Context parallel shard rotation defines how the shards of the inputs are rotated across ranks. We'll cover the 2 options in more detail in the next section.
|
||||||
|
|
||||||
|
You can see an end-to-end example in the [ND parallel example](https://github.com/huggingface/accelerate/blob/main/examples/fsdp2/nd_parallel.py) file, where you can train an 8B model with up-to 128k context length on a single 8xH100 node. Using multi-node training, you can scale this to 1M+ sequence length on multiple GPUs. You can also seamlessly combine it with other parallelism strategies to fit your needs.
|
||||||
|
|
||||||
|
## Technical details
|
||||||
|
|
||||||
|
> [!Tip]
|
||||||
|
> This section is fairly technical, so if you don't need to learn the internals of context parallelism, you can skip it and start building 🚀
|
||||||
|
|
||||||
|
We're going to be using word `shard` extensively in the following sections, so let's define it first. If we call tensor `sharded` across `Dth` dimension, across `N` ranks, we mean that this tensor is split into `N` parts, where each part of the tensor has shape `[..., D//N, ...]`.
|
||||||
|
|
||||||
|
|
||||||
|
## So how does it work?
|
||||||
|
|
||||||
|
Context parallelism works on sharding the `Q, K and V` matrices across the sequence dimension. Each rank has its assigned shard of `Q`, let's call it `Q_i`. This matrix stays only on this rank, during the whole computation. Similarly, each rank has its own shard of `K` and `V`, let's call them `K_i` and `V_i`. Then, each rank calculates attention with its own shard of `Q_i`, `K_i` and `V_i`, let's call it `attn_i`. During this computation, a communication kernel is launched to gather the `Ks` and `Vs` from all other ranks. What communication primitive is used, depends on the `context_parallel_shard_rotation` option.
|
||||||
|
This way, each rank gets to calculate local attention, first with `Q_i`, `K_i` and `V_i`, then with `K_j` and `V_j` from all other ranks. As each rank holds `Q, K and V` matrices that are sharded across the sequence dimension, the resulting matrices are smaller and can fit on a single GPU.
|
||||||
|
|
||||||
|
We can formalize this in the following pseudocode:
|
||||||
|
```python
|
||||||
|
comm_kernel = {"allgather": allgather, "alltoall": alltoall}[context_parallel_shard_rotation]
|
||||||
|
Qi, Ki, Vi = shard(Q, K, V, seq_dim)
|
||||||
|
attn[i] = attn(Qi, Ki, Vi)
|
||||||
|
for j in range(context_parallel_size):
|
||||||
|
Kj, Vj = comm_kernel()
|
||||||
|
attn[j] = attn(Qi, Kj, Vj) # [batch, num_heads, seq_len // context_parallel_size, head_dim]
|
||||||
|
|
||||||
|
final_attn = combine(attn)
|
||||||
|
```
|
||||||
|
|
||||||
|
## all-to-all vs all-gather
|
||||||
|
|
||||||
|
### all-gather
|
||||||
|
So what's the difference between all-to-all and all-gather? With all-gather, the communication is very simple. After (well, before, as it usually takes longer) we compute the local attention `attn_i` we launch an all-gather to gather all other `Ks` and `Vs` from all other ranks. As this communication is done, each rank has all the `Ks` and `Vs` from all other ranks, and can compute the attention with them sequentially.
|
||||||
|
In ideal scenario, all-gather finishes in the exact moment as the calculation of `attn_i` is done. However, this never happens in practice, so the ideal real overlap is achieved when the full `attn_i` is overlapped with a part of the communication, then to start the computation with `K_j` and `V_j`, we wait for the all-gather to finish.
|
||||||
|
|
||||||
|
### all-to-all
|
||||||
|
All-to-all, or sometimes called `ring-rotation` utilizes a ring-like communication pattern. After concluding `attn_i` computation, an all-to-all is launched to send `K_i` and `V_i` to the neighbouring ranks. We then repeat this `context_parallel_size-1` times, so that each rank sees all the shards of `K` and `V` from all other ranks once. In ideal scenario, we prefetch shards `K_i+1` and `V_i+1` from the neighbouring rank and this communication is exactly overlapped with computation of our current `attn_i`. Again, realistically, this perfect overlap doesn't ever happen. Given the nature of this approach, if we don't achieve perfect overlap, the penalty is way larger than with all-gather.
|
||||||
|
|
||||||
|
## How to choose the right rotation method?
|
||||||
|
In theory, all-to-all should be the better choice. Though in practice, it rarely is. Therefore, we default to all-gather, as it's more likely to achieve better performance. Extensive [benchmarks](https://discuss.pytorch.org/t/distributed-w-torchtitan-breaking-barriers-training-long-context-llms-with-1m-sequence-length-in-pytorch-using-context-parallel/215082) from the `torchtitan` team also show that all-to-all rarely outperforms all-gather. Though, we still provide both options, as you might find one to be better for your use case.
|
||||||
|
|
||||||
|
You can directly see this issue in the profiler output in the image below:
|
||||||
|
<p align="center">
|
||||||
|
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/accelerate/examples/fsdp2/cp_all_to_all.png" alt="all-to-all profiler output" />
|
||||||
|
<br>
|
||||||
|
<em>Figure 1: In red you can see the idle time, while we wait for the all-to-all kernel to finish. Highlighted in the first blue bar, you can see that it takes ~250us to finish, which is repeated N-1 times for each attention call, where N is the context parallel size.</em>
|
||||||
|
</p>
|
||||||
|
|
||||||
|
|
||||||
|
## Why only FSDP2?
|
||||||
|
|
||||||
|
We only support context parallelism with `FSDP2`, as we create a joint mesh of `context_parallel_size` and `dp_shard_size` to
|
||||||
|
utilize its full potential.
|
||||||
|
How it works is: we shard the model across the joint mesh of size `cp_size*dp_shard_size`, which maximizes the memory savings.
|
||||||
|
This is a "free lunch" of sorts, as `FSDP` communication is fully overlapped with the computation of attention, as shown in the images below.
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/accelerate/examples/fsdp2/cp_why_fsdp2.png" alt="why FSDP2+CP" />
|
||||||
|
<br>
|
||||||
|
<em>Figure 2: In blue rectangles (Stream 23), you can see that the pre-fetch of `FSDP` shard is fully overlapped with the computation of attention (Stream 7), while in red rectangles (Stream 24), you can see that the all-gather kernel results in a bubble of idle time, in which our compute stream (7) is idle.</em>
|
||||||
|
</p>
|
||||||
|
|
||||||
|
In the figure above, you can also note the difference between all-to-all and all-gather. While in all-to-all (Figure 1), we launch a communication kernel N-1 times for each attention call, in all-gather (Figure 2), we launch a communication kernel only once. This results in a bigger bubble, but it only happens once per attention call, while in all-to-all, it happens N-1 times.
|
||||||
|
|
||||||
|
## Data dispatching in joint mesh
|
||||||
|
|
||||||
|
We make sure to dispatch the same batch of data to the whole `cp` subgroup, so that the results are correct. (Meaning each rank in `cp` subgroup gets the same batch of data.) However, we also dispatch different batches to each rank of `dp_shard` group.
|
||||||
|
Imagine it like this:
|
||||||
|
```
|
||||||
|
# 8 GPUS, --dp_shard_size 4, --cp_size 2
|
||||||
|
# mesh = [[0, 1], [2, 3], [4, 5], [6, 7]]
|
||||||
|
# model is sharded across the whole mesh (each GPU holds 1/8 of the model)
|
||||||
|
# GPUs 0,1 = batch 0
|
||||||
|
# GPUs 2,3 = batch 1
|
||||||
|
... and so on.
|
||||||
|
```
|
||||||
|
|
||||||
@ -8,11 +8,14 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Deferring Executions
|
# Executing and deferring jobs
|
||||||
|
|
||||||
When you run your usual script, instructions are executed in order. Using 🤗 Accelerate to deploy your script on several
|
When you run your usual script, instructions are executed in order. Using Accelerate to deploy your script on several
|
||||||
GPUs at the same time introduces a complication: while each process executes all instructions in order, some may be
|
GPUs at the same time introduces a complication: while each process executes all instructions in order, some may be
|
||||||
faster than others.
|
faster than others.
|
||||||
|
|
||||||
@ -27,7 +30,7 @@ accelerator.wait_for_everyone()
|
|||||||
This instruction will block all the processes that arrive first until all the other processes have reached that
|
This instruction will block all the processes that arrive first until all the other processes have reached that
|
||||||
point (if you run your script on just one GPU or CPU, this won't do anything).
|
point (if you run your script on just one GPU or CPU, this won't do anything).
|
||||||
|
|
||||||
A few example cases for when to use this utility are listed below:
|
A few example cases of when to use this utility are listed below:
|
||||||
|
|
||||||
<Tip>
|
<Tip>
|
||||||
|
|
||||||
@ -38,7 +41,7 @@ A few example cases for when to use this utility are listed below:
|
|||||||
|
|
||||||
## Downloading a Dataset
|
## Downloading a Dataset
|
||||||
|
|
||||||
When downloading a dataset, you should download it first on the main process and then loading the cached dataset in afterwards
|
When downloading a dataset, you should download it first on the main process and then load the cached dataset afterward
|
||||||
|
|
||||||
<Tip>
|
<Tip>
|
||||||
|
|
||||||
@ -104,4 +107,24 @@ with accelerator.main_process_first():
|
|||||||
batched=True,
|
batched=True,
|
||||||
remove_columns=["idx", "sentence1", "sentence2"],
|
remove_columns=["idx", "sentence1", "sentence2"],
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Applying checks such as Early Stopping
|
||||||
|
|
||||||
|
To have a check that works with a flag set by a particular process, the `set_trigger` and `check_trigger` API should be used. Useful examples
|
||||||
|
for doing so can include situations such as using early stopping and monitoring the loss (as each loss slightly differs on each process).
|
||||||
|
|
||||||
|
Call [`Accelerator.set_trigger`] when your condition has been met, and [`Accelerator.check_trigger`] when checking if that condition has been met in any process:
|
||||||
|
|
||||||
|
```python
|
||||||
|
for (x,y) in data_loader:
|
||||||
|
logits = model(x)
|
||||||
|
loss = loss_func(logits, y)
|
||||||
|
# Assume `should_do_early_stopping` is a custom defined function that returns a conditional
|
||||||
|
if should_do_early_stopping(loss):
|
||||||
|
accelerator.set_trigger()
|
||||||
|
|
||||||
|
# Later in the training script when we need to check for the breakpoint
|
||||||
|
if accelerator.check_trigger():
|
||||||
|
break
|
||||||
|
```
|
||||||
105
docs/source/concept_guides/fsdp1_vs_fsdp2.md
Normal file
105
docs/source/concept_guides/fsdp1_vs_fsdp2.md
Normal file
@ -0,0 +1,105 @@
|
|||||||
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# FSDP1 vs FSDP2
|
||||||
|
|
||||||
|
This guide explains the key differences between `FSDP1` and `FSDP2` and helps you migrate your existing code to use `FSDP2` with minimal changes.
|
||||||
|
|
||||||
|
## How is FSDP2 better than FSDP1?
|
||||||
|
|
||||||
|
First, we want to understand how `FSDP1` and `FSDP2` work internally to understand the differences between them. This also helps us understand the limitations of `FSDP1` and how `FSDP2` solves them.
|
||||||
|
|
||||||
|
We'll be discussing a scenario where we have a single `Layer` that contains 3 `Linear` layers and is wrapped using `FSDP` to be sharded across 2 GPUs.
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/accelerate/layer.png" alt="Layer">
|
||||||
|
</div>
|
||||||
|
|
||||||
|
### FSDP1
|
||||||
|
First, we have to understand the original `FSDP1` and the limitations it brings. It represents each `FSDP` module as a single `FlatParameter` which is a single 1D tensor that contains all of the module parameters, which then get sharded across ranks. I.e. if you wrap the `Layer` with `FSDP1`, you'd achieve something as such:
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/accelerate/fsdp1.png" alt="FSDP1">
|
||||||
|
</div>
|
||||||
|
|
||||||
|
You might notice a problem. The whole `Layer` gets flattened into a single `FlatParameter`, which then gets sharded across ranks. But if it's a single `FlatParameter` object, how do we store metadata? That is one of the limitations. Properly storing per-parameter metadata such as `dtype`, `requires_grad`, etc. is not possible without some ugly hacks.
|
||||||
|
|
||||||
|
### FSDP2
|
||||||
|
This is why `FSDP2` was introduced. It doesn't use `FlatParameter`, instead it uses `DTensor` which is short for "Distributed Tensor". Each `DTensor` basically represents a vanilla `torch.Tensor` that has been sharded across ranks. It contains metadata about the original `torch.Tensor` and how it's sharded, what is the [placement type](https://pytorch.org/docs/stable/distributed.tensor.html#module-torch.distributed.tensor.placement_types) and so on. This is why it's called `per-parameter sharding`. The following figure shows the difference:
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/accelerate/fsdp2.png" alt="FSDP2">
|
||||||
|
</div>
|
||||||
|
|
||||||
|
Each Parameter of the original `Layer` is sharded across the 0th dimension, and split between 2 GPUs. Now, each `Linear` layer is a separate `DTensor` and storing metadata per-parameter is possible and straightforward.
|
||||||
|
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> In the image above, the tensors were sharded across the 1st dimension for the sake of fitting the image on the screen, in reality, they are sharded across the 0th dimension as stated above
|
||||||
|
|
||||||
|
## What does FSDP2 offer?
|
||||||
|
|
||||||
|
`FSDP2` is a new and improved version of PyTorch's fully-sharded data parallel training API. Its main advantage is using `DTensor` to represent sharded parameters. Compared to `FSDP1`, it offers:
|
||||||
|
- Simpler internal implementation, where each `Parameter` is a separate `DTensor`
|
||||||
|
- Enables simple partial parameter freezing because of the above, which makes methods as [`LORA`](https://arxiv.org/abs/2106.09685) work out of the box
|
||||||
|
- With `DTensor`, `FSDP2` supports mixing `fp8` and other parameter types in the same model out of the box
|
||||||
|
- Faster and simpler checkpointing without extra communication across ranks using `SHARDED_STATE_DICT` and [`torch.distributed.checkpoint`](https://pytorch.org/docs/stable/distributed.checkpoint.html), this way, each rank only saves its own shard and corresponding metadata
|
||||||
|
- For loading, it uses a `state_dict` of the sharded model to directly load the sharded parameters
|
||||||
|
- Support for asynchronous checkpointing, where parameters are first copied to CPU memory, after this, main thread continues training while another thread stores the parameters on disk
|
||||||
|
- Memory efficiency and deterministic memory usage, `FSDP2` doesn't use `recordStream` anymore and uses stream-to-stream synchronization (for more technical details see [this forum post](https://dev-discuss.pytorch.org/t/fsdp-cudacachingallocator-an-outsider-newb-perspective/1486) and [this issue](https://github.com/pytorch/pytorch/issues/114299))
|
||||||
|
- In the future, optimizations of the communication patterns via `torch.compile` are planned, further improving the performance and memory efficiency
|
||||||
|
|
||||||
|
|
||||||
|
## API Differences
|
||||||
|
|
||||||
|
We have already discussed the internal differences, now let's discuss the differences, you, as a user, will need to know.
|
||||||
|
|
||||||
|
Here are the main changes in configuration options when using `FSDP2` through the `accelerate` CLI:
|
||||||
|
|
||||||
|
Previous (`FSDP1`) | New (`FSDP2`) | What Changed
|
||||||
|
-- | -- | --
|
||||||
|
`--fsdp_sharding_strategy` | `--fsdp_reshard_after_forward` | replaces `--fsdp_sharding_strategy`, changed to `true` (previously `FULL_SHARD`) or `false` (previously `SHARD_GRAD_OP`)
|
||||||
|
`--fsdp_backward_prefetch` | \*\***REMOVED**\*\* | `FSDP2` uses previous `BACKWARD_PRE` option by default, as only this allows communication and computation overlap
|
||||||
|
`--fsdp_forward_prefetch` | \*\***NOT YET IMPLEMENTED**\*\* | How to implement this is under active discussion, for now it is not supported in `FSDP2`
|
||||||
|
`--fsdp_sync_module_states` | \*\***REMOVED**\*\* | with `FSDP2`, this parameter becomes redundant
|
||||||
|
`--fsdp_cpu_ram_efficient_loading` | `--fsdp_cpu_ram_efficient_loading` | if `true`, `FSDP2` will similarly load the model only on rank 0, and then parameters get synced to other ranks, this is the same behavior as `FSDP1`, however, setting `--fsdp_sync_module_states` isn't required anymore
|
||||||
|
`--fsdp_state_dict_type` | `--fsdp_state_dict_type` | `LOCAL_STATE_DICT` becomes obsolete and with `FSDP2` `SHARDED_STATE_DICT` is the default option, which results in no extra communication and each rank saving its own shard, other possible option is `FULL_STATE_DICT` which results in extra communication and spike in memory usage but saves the full model from rank 0.
|
||||||
|
`--fsdp_use_orig_params` | \*\***REMOVED**\*\* | `FSDP2` uses a `DTensor` class on the background, which means it *always* uses the original parameters by default
|
||||||
|
\*\***NEW**\*\* | `--fsdp_version` | `1` is the default option, to not break existing code, set to `2` to use `FSDP2`
|
||||||
|
|
||||||
|
For all other options that remain unchanged, see the [`FSDP` documentation](../usage_guides/fsdp.md).
|
||||||
|
|
||||||
|
## How to Switch to FSDP2
|
||||||
|
|
||||||
|
### If using Python code:
|
||||||
|
Simply set `fsdp_version=2` when creating your plugin and replace options according to the table above.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from accelerate import FullyShardedDataParallelPlugin, Accelerator
|
||||||
|
|
||||||
|
fsdp_plugin = FullyShardedDataParallelPlugin(
|
||||||
|
fsdp_version=2
|
||||||
|
# other options...
|
||||||
|
)
|
||||||
|
accelerator = Accelerator(fsdp_plugin=fsdp_plugin)
|
||||||
|
```
|
||||||
|
|
||||||
|
### If using YAML config:
|
||||||
|
Use our conversion tool:
|
||||||
|
```bash
|
||||||
|
accelerate to-fsdp2 --config_file config.yaml --output_file new_config.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
This will automatically convert all FSDP1 settings to their FSDP2 equivalents. Use `--overwrite` to update the existing file instead of creating a new one.
|
||||||
192
docs/source/concept_guides/fsdp_and_deepspeed.md
Normal file
192
docs/source/concept_guides/fsdp_and_deepspeed.md
Normal file
@ -0,0 +1,192 @@
|
|||||||
|
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# FSDP vs DeepSpeed
|
||||||
|
|
||||||
|
Accelerate offers flexibilty of training frameworks, by integrating two extremely powerful tools for distributed training, namely [Pytorch FSDP](../usage_guides/fsdp) and [Microsoft DeepSpeed](../usage_guides/deepspeed). The aim of this tutorial is to draw parallels, as well as to outline potential differences, to empower the user to switch seamlessly between these two frameworks.
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
|
||||||
|
To switch between the frameworks, we recommend launching code `accelerate launch` passing in the correct config file with `--config_file`, or passing in the respective arguments directly for [FSDP and DeepSpeed](../package_reference/cli#accelerate-launch) .
|
||||||
|
|
||||||
|
Example Accelerate configurations can be found here for [DeepSpeed](../usage_guides/deepspeed#accelerate-deepspeed-plugin) and [FSDP](../usage_guides/fsdp#how-it-works-out-of-the-box), or in the [example zoo under "Launch Configurations"](../usage_guides/explore)
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
<Tip warning={true}>
|
||||||
|
|
||||||
|
This tutorial is for single-node, multi-GPU, scenarios only.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
## Configuring Functionalities
|
||||||
|
|
||||||
|
Model tensors are split into different GPUs in an attempt to scale up model sizes; this is termed *sharding* in FSDP, and *partitioning* in DeepSpeed. FSDP sharding and DeepSpeed ZeRO (partitioning) stages are configured by `--fsdp_sharding_strategy`, and `--zero_stage`, respectively. In particular, FSDP `FULL_SHARD` maps to DeepSpeed ZeRO stage `3`; see this [comprehensive mapping between FSDP sharding and DeepSpeed ZeRO settings](../usage_guides/fsdp#mapping-between-fsdp-sharding-strategies-and-deepspeed-zero-stages). The below table summarizes and groups similar settings:
|
||||||
|
|
||||||
|
Group | Framework | Configuration | Example | Restrictions (if any)
|
||||||
|
--|--|--|--|--
|
||||||
|
sharding / partitioning | FSDP<br>DeepSpeed | `--fsdp_sharding_strategy`<br>`--zero_stage` | `1` (`FULL_SHARD`) <br>`3` |
|
||||||
|
offload | FSDP<br>DeepSpeed | `--fsdp_offload_params`<br>`--offload_param_device`<br>`--offload_optimizer_device` | `true`<br>`cpu`<br>`cpu` | all or nothing <br><br>
|
||||||
|
model loading | FSDP<br>DeepSpeed | <span style="white-space:nowrap;">`--fsdp_cpu_ram_efficient_loading`</span><br>`--zero3_init_flag` | `true`<br>`true` | <br>only ZeRO 3
|
||||||
|
efficient checkpointing | FSDP<br>DeepSpeed | `--fsdp_state_dict_type`<br>`--zero3_save_16bit_model` | `SHARDED_STATE_DICT`<br>`true` | <br>only ZeRO 3
|
||||||
|
weights prefetching | FSDP<br><br>DeepSpeed | `--fsdp_forward_prefetch`<br>`--fsdp_backward_prefetch`<br>None | `true`<br>`BACKWARD_PRE` | <br><br>
|
||||||
|
model | FSDP<br><br>DeepSpeed | `--fsdp_auto_wrap_policy`<br><span style="white-space:nowrap;">`--fsdp_transformer_layer_cls_to_wrap`</span><br>None | `TRANSFORMER_BASED_WRAP`<br><Layer Class> |<br>Usually not needed <br>Transparent to user.
|
||||||
|
parameters summoning | FSDP<br>DeepSpeed | `--fsdp_use_orig_params`<br>None | `true` | required for `torch.compile`<br>Transparent to user
|
||||||
|
parameters syncing | FSDP<br>DeepSpeed | `--fsdp_sync_module_states`<br>None | `true` |
|
||||||
|
training | FSDP<br>DeepSpeed | None<br>`--gradient_accumulation_steps`<br>`--gradient_clipping` | <br>`auto`<br>`auto` | Transparent to user
|
||||||
|
|
||||||
|
For detailed descriptions of the above, refer to [`Accelerate` launch documentation](../package_reference/cli#accelerate-launch).
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
|
||||||
|
To access other DeepSpeed configurations, such as mixed precision settings,
|
||||||
|
you need to pass in a `--deepspeed_config_file`, see the [documentation](../usage_guides/deepspeed#deepspeed-config-file).
|
||||||
|
|
||||||
|
DeepSpeed can be also configured via [`DeepSpeedPlugin`], e.g., `DeepSpeedPlugin.zero_stage` is equivalent of `--zero_stage`, and `DeepSpeedPlugin.hf_ds_config` can be used to pass `--deepeed_config_file.`
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
|
||||||
|
FSDP can be also configured via [`FullyShardedDataParallelPlugin`], e.g., `FullyShardedDataParallelPlugin.sharding_strategy` is equivalent of `--fsdp_sharding_strategy`.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
### Checkpointing
|
||||||
|
|
||||||
|
Do note that while FSDP can be configured via `--fsdp_state_dict_type` to save either full / sharded checkpoints.
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
|
||||||
|
For DeepSpeed Zero3, one could pass a `--zero3_save_16bit_model true`, which conveniently consolidates the model to a single rank and saves; this is the FSDP equivalent of `fsdp_state_dict_type: FULL_STATE_DICT`.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
<Tip warning={true}>
|
||||||
|
|
||||||
|
For large models, consolidating the model to a single rank can be very slow.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
|
||||||
|
For quicker checkpointing, for FSDP use `fsdp_state_dict_type: SHARDED_STATE_DICT`, and for DeepSpeed Zero3 [use the `zero_to_fp32.py` script to post-convert sharded checkpoints](https://www.deepspeed.ai/tutorials/zero/#extracting-weights).
|
||||||
|
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
### Offloading
|
||||||
|
|
||||||
|
FSDP only allows *all-or-nothing* offload (i.e., either offload parameters, gradients, and optimizer, or keep them all in GPU), but DeepSpeed can offload parameters and optimizer differently. Furthermore, DeepSpeed also supports [offloading to NVME](https://www.deepspeed.ai/docs/config-json/#parameter-offloading).
|
||||||
|
|
||||||
|
### Prefetching
|
||||||
|
|
||||||
|
FSDP allows two prefetching configurations `--fsdp_forward_prefetch` and `--fsdp_backward_prefetch` to improve overlap of comms / computation at a cost of extra memory, see [FSDP documentation](https://pytorch.org/docs/stable/fsdp.html).
|
||||||
|
For DeepSpeed, the prefetching will be turned on when needed, and it turns on depending on certain hyper-params like `stage3_param_persistence_threshold`, `stage3_max_reuse_distance`, etc, [that can be configured for Zero3](https://www.deepspeed.ai/docs/config-json/#parameter-offloading); `accelerate` may set these hyper-params automatically if you don't set those explicitly in the deepspeed config file.
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
|
||||||
|
For FSDP set `fsdp_backward_prefetch: BACKWARD_PRE` for improved throughputs if memory allows.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
### Model Loading
|
||||||
|
|
||||||
|
While FSDP require an explicit `--fsdp_cpu_ram_efficient_loading true` to activate efficient model loading, `transformers` will activate the similar feature whenever DeepSpeed Zero3 is used.
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
|
||||||
|
For FSDP, whenever setting `--fsdp_cpu_ram_efficient_loading true`, `accelerate` will automatically set `sync_module_states` to true.
|
||||||
|
For RAM efficient loading the weights will be loaded only in a single rank, and thus requires `sync_module_states` to broadcast weights to other ranks.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
### Model
|
||||||
|
|
||||||
|
FSDP requires an explicit `--fsdp_auto_wrap_policy` for the algorithm to decide how to schedule the all-gather and reduce-scatter operations. But for DeepSpeed this is transparent to the user.
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
|
||||||
|
For FSDP, simply set `fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP`. With the latest [`transformers`] versions, we try our best to figure out the suitable `fsdp_transformer_layer_cls_to_wrap` for HF transformers models. However, if you get an error regarding it, please specify this.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
### Parameters Summoning
|
||||||
|
|
||||||
|
FSDP requires an explicit `--fsdp_use_orig_params` flag if using `torch.compile`, see [the pytorch documentation](https://pytorch.org/docs/stable/fsdp.html#module-torch.distributed.fsdp). For DeepSpeed this is transparent to the user.
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
|
||||||
|
For FSDP, when using `torch.compile` please set `fsdp_use_orig_params: True`.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
|
||||||
|
## Training
|
||||||
|
|
||||||
|
Deepspeed requires explicit `--gradient_accumulation_steps` and `--gradient_clipping` flags. For FSDP this is transparent to the user.
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
|
||||||
|
When using DeepSpeed, set `gradient_accumulation_steps: "auto"` and `gradient_clipping: "auto"` to automatically pick up values set in the [`Accelerator`] or [`TrainingArguments`] (if using `transformers`).
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
|
||||||
|
## On Differences in Data Precision Handling
|
||||||
|
|
||||||
|
To discuss how data precision is handled in both FSDP and Deepspeed, it is instructive to first give an overview of how model parameters are handled in these frameworks. Before the model / optimizer parameters are distributed across GPUs, parameter preparation is involved to first "flatten" them to one-dimensional [`torch.Tensor`](https://pytorch.org/docs/stable/tensors.html#torch-tensor). The implementation of FSDP / DeepSpeed varies in the respect of the `dtype` in which these "flattened" parameters are stored, and there are ramifications with regards to how [`torch.Optimizer`](https://pytorch.org/docs/stable/optim.html#module-torch.optim) allocate their `dtype`s. The table below outlines the processes for both frameworks; the "Local" column indicates the process occurring at a per-gpu level, therefore any memory overheads by upcasting should be understood to be amortized by the number of gpus used.
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
|
||||||
|
As a rule of thumb, for stable training with automatic mixed precision, all the trainable parameters have to be in `torch.float32`.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
Process | Local | Framework | Details
|
||||||
|
--|--|--|--
|
||||||
|
Loading, i.e., [`AutoModel.from_pretrained(..., torch_dtype=torch_dtype)`] |
|
||||||
|
Preparation, i.e., creation of "flat params" | ✅ | FSDP<br>DeepSpeed | created in `torch_dtype`.<br> disregards `torch_dtype`, created in `float32`.
|
||||||
|
Optimizer initialization | ✅ | FSDP<br>DeepSpeed | creates parameters in `torch_dtype`<br> creates parameters in `float32`
|
||||||
|
Training Step, i.e, forward, backward, reduction | | FSDP<br>DeepSpeed | follows [`MixedPrecision`](https://pytorch.org/docs/stable/fsdp.html#torch.distributed.fsdp.MixedPrecision)<br> follows `deepspeed_config_file` mixed precision settings.
|
||||||
|
Optimizer (Pre-Step) | ✅ | FSDP<br>DeepSpeed | upcasting (if any) to `torch_dtype`<br>upcasted to `float32`
|
||||||
|
Optimizer (Actual Step) | ✅ | FSDP<br>DeepSpeed | occurs in `torch_dtype` <br> occurs in `float32`.
|
||||||
|
|
||||||
|
<Tip warning={true}>
|
||||||
|
|
||||||
|
Therefore when using DeepSpeed a small number of GPUs, be aware of potentially significant memory overheads due to the upcasting during preparation.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
|
||||||
|
With FSDP, in the absence of mixed precision, it is possible to operate the [`torch.Optimizer`](https://pytorch.org/docs/stable/optim.html#module-torch.optim) in low precision `torch_dtype`, which may be helpful when using small number of GPUs.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
<Tip warning={true}>
|
||||||
|
|
||||||
|
With mixed precision, FSDP and DeepSpeed will upcast in the model preparation step (c.f. table above). But do note that FSDP will then save checkpoints in the upcasted precision; Deepspeed may still save low precision checkpoints if `--zero3_save_16bit_model` is specified.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
|
||||||
|
To clarify the above table consider the concrete examples below; the optimizer pre- and actual step combined for brevity. With FSDP it is possible to operate in the two modes shown below, but DeepSpeed can only operate in one.
|
||||||
|
|
||||||
|
Framework | Model Loading (`torch_dtype`) | Mixed Precision | Preparation (Local) | Training | Optimizer (Local)
|
||||||
|
--|--|--|--|--|--
|
||||||
|
FSDP | bf16 | default (none) | bf16 | bf16 | bf16
|
||||||
|
FSDP | bf16 | bf16 | fp32 | bf16 | fp32
|
||||||
|
DeepSpeed | bf16 | bf16 | fp32 | bf16 | fp32
|
||||||
184
docs/source/concept_guides/gradient_synchronization.md
Normal file
184
docs/source/concept_guides/gradient_synchronization.md
Normal file
@ -0,0 +1,184 @@
|
|||||||
|
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Gradient synchronization
|
||||||
|
|
||||||
|
PyTorch's distributed module operates by communicating back and forth between all of the GPUs in your system.
|
||||||
|
This communication takes time, and ensuring all processes know the states of each other happens at particular triggerpoints
|
||||||
|
when using the `ddp` module.
|
||||||
|
|
||||||
|
These triggerpoints are added to the PyTorch model, specifically their `forward()` and `backward()` methods.
|
||||||
|
This happens when the model is wrapped with `DistributedDataParallel`:
|
||||||
|
```python
|
||||||
|
import torch.nn as nn
|
||||||
|
from torch.nn.parallel import DistributedDataParallel
|
||||||
|
|
||||||
|
model = nn.Linear(10, 10)
|
||||||
|
ddp_model = DistributedDataParallel(model)
|
||||||
|
```
|
||||||
|
In Accelerate this conversion happens automatically when calling [`~Accelerator.prepare`] and passing in your model.
|
||||||
|
|
||||||
|
```diff
|
||||||
|
+ from accelerate import Accelerator
|
||||||
|
+ accelerator = Accelerator()
|
||||||
|
import torch.nn as nn
|
||||||
|
- from torch.nn.parallel import DistributedDataParallel
|
||||||
|
|
||||||
|
model = nn.Linear(10,10)
|
||||||
|
+ model = accelerator.prepare(model)
|
||||||
|
```
|
||||||
|
|
||||||
|
## The slowdown in gradient accumulation
|
||||||
|
|
||||||
|
You now understand that PyTorch adds hooks to the `forward` and `backward` method of your PyTorch model when
|
||||||
|
training in a distributed setup. But how does this risk slowing down your code?
|
||||||
|
|
||||||
|
In DDP (distributed data parallel), the specific order in which processes are performed and ran are expected
|
||||||
|
at specific points and these must also occur at roughly the same time before moving on.
|
||||||
|
|
||||||
|
The most direct example is when you update model parameters through
|
||||||
|
`optimizer.step()`.
|
||||||
|
Without gradient accumulation, all instances of the model need to have updated
|
||||||
|
their gradients computed, collated, and updated before moving on to the next
|
||||||
|
batch of data.
|
||||||
|
When performing gradient accumulation, you accumulate `n` loss gradients and
|
||||||
|
skip `optimizer.step()` until `n` batches have been reached. As all training
|
||||||
|
processes only need to synchronize by the time `optimizer.step()` is called,
|
||||||
|
without any modification to your training step, this needless inter-process
|
||||||
|
communication can cause a significant slowdown.
|
||||||
|
|
||||||
|
How can you avoid this overhead?
|
||||||
|
|
||||||
|
## Solving the slowdown problem
|
||||||
|
|
||||||
|
Since you are skipping model parameter updates when training on these batches, their gradients do not need to be synchronized until the point where `optimizer.step()` is actually called.
|
||||||
|
PyTorch cannot automagically tell when you need to do this, but they do provide a tool to help through the [`no_sync`](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html#torch.nn.parallel.DistributedDataParallel.no_sync) context manager
|
||||||
|
that is added to your model after converting it to DDP.
|
||||||
|
|
||||||
|
Under this context manager, PyTorch will skip synchronizing the gradients when
|
||||||
|
`.backward()` is called, and the first call to `.backward()` outside this
|
||||||
|
context manager will trigger the synchronization. See an example below:
|
||||||
|
```python
|
||||||
|
ddp_model, dataloader, optimizer = accelerator.prepare(model, dataloader, optimizer)
|
||||||
|
|
||||||
|
for index, batch in enumerate(dataloader):
|
||||||
|
inputs, targets = batch
|
||||||
|
# Trigger gradient synchronization on the last batch
|
||||||
|
if index != (len(dataloader) - 1):
|
||||||
|
with ddp_model.no_sync():
|
||||||
|
# Gradients only accumulate
|
||||||
|
outputs = ddp_model(inputs)
|
||||||
|
loss = loss_func(outputs)
|
||||||
|
accelerator.backward(loss)
|
||||||
|
else:
|
||||||
|
# Gradients finally sync
|
||||||
|
outputs = ddp_model(inputs)
|
||||||
|
loss = loss_func(outputs)
|
||||||
|
accelerator.backward(loss)
|
||||||
|
optimizer.step()
|
||||||
|
```
|
||||||
|
|
||||||
|
In Accelerate to make this an API that can be called no matter the training device (though it may not do anything if you are not in a distributed system!),
|
||||||
|
`ddp_model.no_sync` gets replaced with [`~Accelerator.no_sync`] and operates the same way:
|
||||||
|
|
||||||
|
```diff
|
||||||
|
ddp_model, dataloader, optimizer = accelerator.prepare(model, dataloader, optimizer)
|
||||||
|
|
||||||
|
for index, batch in enumerate(dataloader):
|
||||||
|
inputs, targets = batch
|
||||||
|
# Trigger gradient synchronization on the last batch
|
||||||
|
if index != (len(dataloader)-1):
|
||||||
|
- with ddp_model.no_sync():
|
||||||
|
+ with accelerator.no_sync(model):
|
||||||
|
# Gradients only accumulate
|
||||||
|
outputs = ddp_model(inputs)
|
||||||
|
loss = loss_func(outputs, targets)
|
||||||
|
accelerator.backward(loss)
|
||||||
|
else:
|
||||||
|
# Gradients finally sync
|
||||||
|
outputs = ddp_model(inputs)
|
||||||
|
loss = loss_func(outputs)
|
||||||
|
accelerator.backward(loss)
|
||||||
|
optimizer.step()
|
||||||
|
optimizer.zero_grad()
|
||||||
|
```
|
||||||
|
|
||||||
|
As you may expect, the [`~Accelerator.accumulate`] function wraps around this conditional check by keeping track of the current batch number, leaving you with the final
|
||||||
|
gradient accumulation API:
|
||||||
|
|
||||||
|
```python
|
||||||
|
ddp_model, dataloader, optimizer = accelerator.prepare(model, dataloader, optimizer)
|
||||||
|
|
||||||
|
for batch in dataloader:
|
||||||
|
with accelerator.accumulate(model):
|
||||||
|
optimizer.zero_grad()
|
||||||
|
inputs, targets = batch
|
||||||
|
outputs = model(inputs)
|
||||||
|
loss = loss_function(outputs, targets)
|
||||||
|
accelerator.backward(loss)
|
||||||
|
optimizer.step()
|
||||||
|
optimizer.zero_grad()
|
||||||
|
```
|
||||||
|
|
||||||
|
As a result, you should either use *`accelerator.accumulate` or `accelerator.no_sync`* when it comes to API choice.
|
||||||
|
|
||||||
|
## Just how much of a slowdown is there, and easy mistakes you can make
|
||||||
|
|
||||||
|
To set up a realistic example, consider the following setup:
|
||||||
|
|
||||||
|
* Two single-GPU T4 nodes and one node with two GPUs
|
||||||
|
* Each GPU is a T4, and are hosted on GCP
|
||||||
|
* The script used is a modification of the [NLP Example](https://github.com/muellerzr/timing_experiments/blob/main/baseline.py) script
|
||||||
|
* Batch size per GPU is 16, and gradients are accumulated every 4 steps
|
||||||
|
|
||||||
|
All scripts are available in [this repository](https://github.com/muellerzr/timing_experiments).
|
||||||
|
|
||||||
|
If not careful about gradient synchronization and GPU communication, a *large* amount of time can be wasted
|
||||||
|
from when these GPUs communicate to each other during unnecessary periods.
|
||||||
|
|
||||||
|
By how much?
|
||||||
|
|
||||||
|
Reference:
|
||||||
|
- Baseline: uses no synchronization practices discussed here
|
||||||
|
- `no_sync` improperly: `no_sync` only around the `backward` call, not the `forward`
|
||||||
|
- `no_sync`: using the `no_sync` pattern properly
|
||||||
|
- `accumulate`: using [`~Accelerator.accumulate`] properly
|
||||||
|
|
||||||
|
Below are the average seconds per batch iterating over 29 batches of data for each setup on both a single node and on the dual-node setup:
|
||||||
|
|
||||||
|
| | Baseline | `no_sync` improperly | `no_sync` | `accumulate`|
|
||||||
|
| :---------: | :-------: | :------------------: | :-------: | :---------: |
|
||||||
|
| Multi-Node | 2±0.01s | 2.13±0.08s | **0.91±0.11s** | **0.91±0.11s** |
|
||||||
|
| Single Node | 0.50±0.01s | 0.50±0.01s | **0.41±0.015s** | **0.41±0.015s** |
|
||||||
|
|
||||||
|
As you can see, if you are not careful about how you set up your gradient synchronization, you can get upwards of more than a 2x slowdown during training!
|
||||||
|
|
||||||
|
If you are worried about making sure everything is done properly, we highly recommend utilizing the [`~Accelerator.accumulate`] function and passing in
|
||||||
|
`gradient_accumulation_steps` or `gradient_accumulation_plugin` to the [`Accelerator`] object so Accelerate can handle this for you.
|
||||||
|
|
||||||
|
### `no_sync` requires additional GPU memory when using FSDP
|
||||||
|
|
||||||
|
Be aware that not syncing gradients can have adverse effects while performing FSDP training. As it has been warned in `torch`, the [`no_sync` context manager for FSDP](https://pytorch.org/docs/stable/fsdp.html#torch.distributed.fsdp.FullyShardedDataParallel.no_sync) will require additional memory.
|
||||||
|
|
||||||
|
Therefore in memory intensive situations while using FSDP, we recommend to set `sync_each_batch` to `True` in the [`~utils.GradientAccumulationPlugin`] to disable `no_sync`.
|
||||||
|
|
||||||
|
See the example below where we fine-tune Mixtral (47B parameters) on 8 A100-80GB GPUs. We see that even for a modest `gradient_accumulation_steps=2` we quickly go out-of-memory (OOM) if `no_sync` is enabled. Again, this is due to additional memory overheads due to FSDP's `no_sync`. However, if `no_sync` is disabled via `sync_each_batch=True`, then the memory consumption for `gradient_accumulation_steps=16` reverts to that of `gradient_accumulation_steps=1`.
|
||||||
|
|
||||||
|
| Model | `no_sync` (accum=1) | `no_sync` (accum=2) | `no_sync` disabled (accum=16)
|
||||||
|
| :-------------: | :-----------------: | :-----------------: | :-----------------:
|
||||||
|
mixtral 8x7B | 69G | OOM | 69G
|
||||||
|
|
||||||
|
> [!WARNING]
|
||||||
|
> Disabling `no_sync` means there _will be slowdown_ due the extra data syncs, as explained by the earlier sections of this guide.
|
||||||
@ -1,119 +0,0 @@
|
|||||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
|
||||||
the License. You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
|
||||||
specific language governing permissions and limitations under the License.
|
|
||||||
-->
|
|
||||||
|
|
||||||
# Gradient Synchronization
|
|
||||||
|
|
||||||
PyTorch's distributed module operates by communicating back and forth between all of the GPUs in your system.
|
|
||||||
This communication takes time, and ensuring all processes know the states of each other happens at particular triggerpoints
|
|
||||||
when using the `ddp` module.
|
|
||||||
|
|
||||||
These triggerpoints are added to the PyTorch model, specifically their `forward()` and `backward()` methods.
|
|
||||||
This happens when the model is wrapped with `DistributedDataParallel`:
|
|
||||||
```python
|
|
||||||
import torch.nn as nn
|
|
||||||
from torch.nn.parallel import DistributedDataParallel
|
|
||||||
|
|
||||||
model = nn.Linear(10, 10)
|
|
||||||
ddp_model = DistributedDataParallel(model)
|
|
||||||
```
|
|
||||||
In 🤗 Accelerate this conversion happens automatically when calling [`~Accelerator.prepare`] and passing in your model.
|
|
||||||
|
|
||||||
```diff
|
|
||||||
+ from accelerate import Accelerator
|
|
||||||
+ accelerator = Accelerator()
|
|
||||||
import torch.nn as nn
|
|
||||||
- from torch.nn.parallel import DistributedDataParallel
|
|
||||||
|
|
||||||
model = nn.Linear(10,10)
|
|
||||||
+ model = accelerator.prepare(model)
|
|
||||||
```
|
|
||||||
|
|
||||||
## The slowdown in gradient accumulation
|
|
||||||
|
|
||||||
You now understand that PyTorch adds hooks to the `forward` and `backward` method of your PyTorch model when
|
|
||||||
training in a distributed setup. But how does this risk slowing down your code?
|
|
||||||
|
|
||||||
In DDP (distributed data parallel), the specific order in which processes are performed and ran are expected
|
|
||||||
at specific points and these must also occur at roughly the same time before moving on.
|
|
||||||
|
|
||||||
The most direct example is when you update all of the parameters in a model through `.backward()`. All instances of the model
|
|
||||||
need to have updated their gradients, collated, and updated again before moving onto the next batch of data. But when performing
|
|
||||||
gradient accumulation, you accumulate `n` losses and skip `.backward()` until `n` batches have been reached. This
|
|
||||||
can cause a significant slowdown since all the processes need to communicate with them more times than needed. How
|
|
||||||
can you avoid this overhead?
|
|
||||||
|
|
||||||
## Solving the slowdown problem
|
|
||||||
|
|
||||||
Since you are skipping these batches, their gradients do not need to be synchronized until the point where `.backward()` is actually called.
|
|
||||||
PyTorch cannot automagically tell when you need to do this, but they do provide a tool to help through the [`no_sync`](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html#torch.nn.parallel.DistributedDataParallel.no_sync) context manager
|
|
||||||
that is added to your model after converting it to DDP.
|
|
||||||
|
|
||||||
Under this context manager, PyTorch will skip synchronizing the gradients when `.backward()` is called, and the first call to `.backward()` outside this
|
|
||||||
context manager will trigger the synchronization. See an example below:
|
|
||||||
```python
|
|
||||||
ddp_model, dataloader = accelerator.prepare(model, dataloader)
|
|
||||||
|
|
||||||
for index, batch in enumerate(dataloader):
|
|
||||||
inputs, targets = batch
|
|
||||||
# Trigger gradient synchronization on the last batch
|
|
||||||
if index != (len(dataloader) - 1):
|
|
||||||
with ddp_model.no_sync():
|
|
||||||
# Gradients only accumulate
|
|
||||||
outputs = ddp_model(inputs)
|
|
||||||
loss = loss_func(outputs)
|
|
||||||
accelerator.backward(loss)
|
|
||||||
else:
|
|
||||||
# Gradients finally sync
|
|
||||||
outputs = ddp_model(inputs)
|
|
||||||
loss = loss_func(outputs)
|
|
||||||
accelerator.backward(loss)
|
|
||||||
```
|
|
||||||
|
|
||||||
In 🤗 Accelerate to make this an API that can be called no matter the training device (though it may not do anything if you are not in a distributed system!),
|
|
||||||
`ddp_model.no_sync` gets replaced with [`~Accelerator.no_sync`] and operates the same way:
|
|
||||||
|
|
||||||
```diff
|
|
||||||
ddp_model, dataloader = accelerator.prepare(model, dataloader)
|
|
||||||
|
|
||||||
for index, batch in enumerate(dataloader):
|
|
||||||
inputs, targets = batch
|
|
||||||
# Trigger gradient synchronization on the last batch
|
|
||||||
if index != (len(dataloader)-1):
|
|
||||||
- with ddp_model.no_sync():
|
|
||||||
+ with accelerator.no_sync(model):
|
|
||||||
# Gradients only accumulate
|
|
||||||
outputs = ddp_model(inputs)
|
|
||||||
loss = loss_func(outputs, targets)
|
|
||||||
accelerator.backward(loss)
|
|
||||||
else:
|
|
||||||
# Gradients finally sync
|
|
||||||
outputs = ddp_model(inputs)
|
|
||||||
loss = loss_func(outputs)
|
|
||||||
accelerator.backward(loss)
|
|
||||||
```
|
|
||||||
|
|
||||||
As you may expect, the [`~Accelerator.accumulate`] function wraps around this conditional check by keeping track of the current batch number, leaving you with the final
|
|
||||||
gradient accumulation API:
|
|
||||||
|
|
||||||
```python
|
|
||||||
ddp_model, dataloader = accelerator.prepare(model, dataloader)
|
|
||||||
|
|
||||||
for batch in dataloader:
|
|
||||||
with accelerator.accumulate(model):
|
|
||||||
optimizer.zero_grad()
|
|
||||||
inputs, targets = batch
|
|
||||||
outputs = model(inputs)
|
|
||||||
loss = loss_function(outputs, targets)
|
|
||||||
accelerator.backward(loss)
|
|
||||||
```
|
|
||||||
|
|
||||||
As a result, you should either use *`accelerator.accumulate` or `accelerator.no_sync`* when it comes to API choice.
|
|
||||||
74
docs/source/concept_guides/internal_mechanism.md
Normal file
74
docs/source/concept_guides/internal_mechanism.md
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Accelerate's internal mechanisms
|
||||||
|
|
||||||
|
Internally, Accelerate works by first analyzing the environment in which the script is launched to determine which
|
||||||
|
kind of distributed setup is used, how many different processes there are and which one the current script is in. All
|
||||||
|
that information is stored in the [`~AcceleratorState`].
|
||||||
|
|
||||||
|
This class is initialized the first time you instantiate an [`~Accelerator`] as well as performing any
|
||||||
|
specific initialization your distributed setup needs. Its state is then uniquely shared through all instances of
|
||||||
|
[`~state.AcceleratorState`]. (The same can also be done with the [`PartialState`], a more barebones version it inherits)
|
||||||
|
|
||||||
|
Then, when calling [`~Accelerator.prepare`], the library:
|
||||||
|
|
||||||
|
- wraps your model(s) in the container adapted for the distributed setup,
|
||||||
|
- wraps your optimizer(s) in an [`~optimizer.AcceleratedOptimizer`],
|
||||||
|
- wraps your scheduler(s) in an [`~scheduler.AcceleratedScheduler`]
|
||||||
|
- creates a new version of your dataloader(s) in a [`~data_loader.DataLoaderShard`] or [`~data_loader.DataLoaderDispatcher`]
|
||||||
|
|
||||||
|
While the model(s), optimizer(s), and scheduler(s) are just put in simple wrappers, the dataloader(s) are re-created. This is mostly
|
||||||
|
because PyTorch does not let the user change the `batch_sampler` of a dataloader once it's been created and the
|
||||||
|
library handles the sharding of your data between processes by changing that `batch_sampler` to yield every other
|
||||||
|
`num_processes` batches (if enabled).
|
||||||
|
|
||||||
|
The [`~data_loader.DataLoaderShard`] subclasses `DataLoader` to add the following functionality:
|
||||||
|
|
||||||
|
- it synchronizes the appropriate random number generator of all processes at each new iteration, to ensure any
|
||||||
|
randomization (like shuffling) is done the exact same way across processes.
|
||||||
|
- it puts the batches on the proper device before yielding them (unless you have opted out of
|
||||||
|
`device_placement=True`).
|
||||||
|
|
||||||
|
The [`~data_loader.DataLoaderDispatcher`] subclasses differs from the [`~data_loader.DataLoaderShard`] in that when iterating through the `DataLoader`, the data is all starting from process 0 and *then* split and sent off to each process rather than it happening at the dataset level.
|
||||||
|
|
||||||
|
The random number generator synchronization will by default synchronize:
|
||||||
|
|
||||||
|
- the `generator` attribute of a given sampler (like the PyTorch `RandomSampler`) for PyTorch >= 1.6
|
||||||
|
- the main random number generator in PyTorch <=1.5.1
|
||||||
|
|
||||||
|
You can choose which random number generator(s) to synchronize with the `rng_types` argument of the main
|
||||||
|
[`Accelerator`]. In PyTorch >= 1.6, it is recommended to rely on a local `generator` to avoid
|
||||||
|
setting the same seed in the main random number generator in all processes.
|
||||||
|
|
||||||
|
<Tip warning={true}>
|
||||||
|
|
||||||
|
Synchronization of the main torch (or CUDA or XLA) random number generator will affect any other potential random
|
||||||
|
artifacts you could have in your dataset (like random data augmentation) in the sense that all processes will get
|
||||||
|
the same random numbers from the torch random modules (so will apply the same random data augmentation if it's
|
||||||
|
controlled by torch).
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
|
||||||
|
The randomization part of your custom sampler, batch sampler or iterable dataset should be done using a local
|
||||||
|
`torch.Generator` object (in PyTorch >= 1.6), see the traditional `RandomSampler`, as an example.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
If you have [`torchdata>=0.8.0`](https://github.com/pytorch/data/tree/main) installed, and you have passed `use_stateful_dataloader=True` into your [`~utils.DataLoaderConfiguration`], these classes will directly inherit from `StatefulDataLoader` instead, and maintain a `state_dict`.
|
||||||
|
|
||||||
|
For more details about the internals, see the [Internals page](../package_reference/torch_wrappers).
|
||||||
74
docs/source/concept_guides/low_precision_training.md
Normal file
74
docs/source/concept_guides/low_precision_training.md
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Low precision training methods
|
||||||
|
|
||||||
|
The release of new kinds of hardware led to the emergence of new training paradigms that better utilize them. Currently, this is in the form of training
|
||||||
|
in 8-bit precision using packages such as [TransformersEngine](https://github.com/NVIDIA/TransformerEngine) (TE) or [MS-AMP](https://github.com/Azure/MS-AMP/tree/main).
|
||||||
|
|
||||||
|
For an introduction to the topics discussed today, we recommend reviewing the [low-precision usage guide](../usage_guides/low_precision_training) as this documentation will reference it regularly.
|
||||||
|
|
||||||
|
## A Quick Chart
|
||||||
|
|
||||||
|
Below is a quick chart from the MS-AMP documentation showing the different bit-precisions for each solution during training:
|
||||||
|
|
||||||
|
Optimization Level | Computation(GEMM) | Comm | Weight | Master Weight | Weight Gradient | Optimizer States
|
||||||
|
-- | -- | -- | -- | -- | -- | --
|
||||||
|
FP16 AMP | FP16 | FP32 | FP32 | N/A | FP32 | FP32+FP32
|
||||||
|
Nvidia TE | FP8 | FP32 | FP32 | N/A | FP32 | FP32+FP32
|
||||||
|
MS-AMP O1 | FP8 | FP8 | FP16 | N/A | FP8 | FP32+FP32
|
||||||
|
MS-AMP O2 | FP8 | FP8 | FP16 | N/A | FP8 | FP8+FP16
|
||||||
|
MS-AMP O3 | FP8 | FP8 | FP8 | FP16 | FP8 | FP8+FP16
|
||||||
|
|
||||||
|
## `TransformersEngine`
|
||||||
|
|
||||||
|
`TransformersEngine` is the first solution to trying to train in 8-bit floating point. It works by using drop-in replacement layers for certain ones in a model that utilizes their FP8-engine to reduce the number of bits (such as 32 to 8) without degrading the final accuracy of the model.
|
||||||
|
|
||||||
|
Specifically, Accelerate will find and replace the following layers with `TransformersEngine` versions:
|
||||||
|
|
||||||
|
* `nn.LayerNorm` for `te.LayerNorm`
|
||||||
|
* `nn.Linear` for `te.Linear`
|
||||||
|
|
||||||
|
As a result we wind up with a model that has most of its layers in BF16, while some layers are in FP8 reducing some of the memory.
|
||||||
|
|
||||||
|
Anecdotally, we have noticed that performance gains don't really start showing when using `TransformerEngine` until a large majority of the layers
|
||||||
|
in the model are made up of those two layers to replace. As a result, only larger models have shown performance improvements when the number of parameters is around and upwards of a few billion.
|
||||||
|
|
||||||
|
The `TransformerEngine` can receive many different arguments that customize how it performs FP8 calculations and what they do. A full list of the arguments is available below:
|
||||||
|
|
||||||
|
* `margin`: The margin to use for the gradient scaling.
|
||||||
|
* `interval`: The interval to use for how often the scaling factor is recomputed.
|
||||||
|
* `fp8_format``: The format to use for the FP8 recipe. Must be one of `HYBRID` or `E4M3`. (Generally `HYBRID` for training, `E4M3` for evaluation)
|
||||||
|
* `amax_history_len`: The length of the history to use for the scaling factor computation
|
||||||
|
* `amax_compute_algo`: The algorithm to use for the scaling factor computation. Must be one of `max` or `most_recent`.
|
||||||
|
* `override_linear_precision`: Whether or not to execute `fprop`, `dgrad`, and `wgrad` GEMMS in higher precision.
|
||||||
|
|
||||||
|
You can customize each of these as part of [`utils.FP8RecipeKwargs`] to help optimize performance of your models.
|
||||||
|
|
||||||
|
If we notice in the chart mentioned earlier, TE simply casts the computation layers into FP8, while everything else is in FP32. As a result this winds up utilizing the most memory but does so with the benefit of guaranteeing the least amount of loss in end accuracy during training.
|
||||||
|
|
||||||
|
## `MS-AMP`
|
||||||
|
|
||||||
|
MS-AMP takes a different approach to `TransformersEngine` by providing three different optimization levels to convert more operations in FP8 or FP16.
|
||||||
|
|
||||||
|
* The base optimization level (`O1`), passes communications of the weights (such as in DDP) in FP8, stores the weights of the model in FP16, and leaves the optimizer states in FP32. The main benefit of this optimization level is that we can reduce the communication bandwidth by essentially half. Additionally, more GPU memory is saved due to 1/2 of everything being cast in FP8, and the weights being cast to FP16. Notably, both the optimizer states remain in FP32.
|
||||||
|
|
||||||
|
* The second optimization level (`O2`) improves upon this by also reducing the precision of the optimizer states. One is in FP8 while the other is in FP16. Generally it's been shown that this will only provide a net-gain of no degraded end accuracy, increased training speed, and reduced memory as now every state is either in FP16 or FP8.
|
||||||
|
|
||||||
|
* Finally, MS-AMP has a third optimization level (`O3`) which helps during DDP scenarios such as DeepSpeed. The weights of the model in memory are fully cast to FP8, and the master weights are now stored in FP16. This fully reduces memory by the highest factor as now not only is almost everything in FP8, only two states are left in FP16. Currently, only DeepSpeed versions up through 0.9.2 are supported, so this capability is not included in the Accelerate integration
|
||||||
|
|
||||||
|
## Combining the two
|
||||||
|
|
||||||
|
More experiments need to be performed but it's been noted that combining both MS-AMP and TransformersEngine can lead to the highest throughput by relying on NVIDIA's optimized FP8 operators and utilizing how MS-AMP reduces the memory overhead.
|
||||||
@ -8,9 +8,12 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Comparing performance between different device setups
|
# Comparing performance across distributed setups
|
||||||
|
|
||||||
Evaluating and comparing the performance from different setups can be quite tricky if you don't know what to look for.
|
Evaluating and comparing the performance from different setups can be quite tricky if you don't know what to look for.
|
||||||
For example, you cannot run the same script with the same batch size across TPU, multi-GPU, and single-GPU with Accelerate
|
For example, you cannot run the same script with the same batch size across TPU, multi-GPU, and single-GPU with Accelerate
|
||||||
@ -18,7 +21,7 @@ and expect your results to line up.
|
|||||||
|
|
||||||
But why?
|
But why?
|
||||||
|
|
||||||
There's three reasons for this that this tutorial will cover:
|
There are three reasons for this that this tutorial will cover:
|
||||||
|
|
||||||
1. **Setting the right seeds**
|
1. **Setting the right seeds**
|
||||||
2. **Observed Batch Sizes**
|
2. **Observed Batch Sizes**
|
||||||
@ -26,10 +29,10 @@ There's three reasons for this that this tutorial will cover:
|
|||||||
|
|
||||||
## Setting the Seed
|
## Setting the Seed
|
||||||
|
|
||||||
While this issue has not come up as much, make sure to use [`utils.set_seed`] to fully set the seed in all distributed cases so training will be reproducable:
|
While this issue has not come up as much, make sure to use [`utils.set_seed`] to fully set the seed in all distributed cases so training will be reproducible:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from accelerate import set_seed
|
from accelerate.utils import set_seed
|
||||||
|
|
||||||
set_seed(42)
|
set_seed(42)
|
||||||
```
|
```
|
||||||
@ -40,13 +43,13 @@ Why is this important? Under the hood this will set **5** different seed setting
|
|||||||
random.seed(seed)
|
random.seed(seed)
|
||||||
np.random.seed(seed)
|
np.random.seed(seed)
|
||||||
torch.manual_seed(seed)
|
torch.manual_seed(seed)
|
||||||
torch.cuda.manual_seed_all(seed)
|
torch.cuda.manual_seed_all(seed) # or torch.xpu.manual_seed_all, etc
|
||||||
# ^^ safe to call this function even if cuda is not available
|
# ^^ safe to call this function even if cuda is not available
|
||||||
if is_tpu_available():
|
if is_torch_xla_available():
|
||||||
xm.set_rng_state(seed)
|
xm.set_rng_state(seed)
|
||||||
```
|
```
|
||||||
|
|
||||||
The random state, numpy's state, torch, torch's cuda state, and if TPUs are available torch_xla's cuda state.
|
The random state, numpy's state, torch, torch's device state, and if TPUs are available torch_xla's cuda state.
|
||||||
|
|
||||||
## Observed Batch Sizes
|
## Observed Batch Sizes
|
||||||
|
|
||||||
@ -58,7 +61,7 @@ The below table can be used as a quick reference to try out different batch size
|
|||||||
|
|
||||||
<Tip>
|
<Tip>
|
||||||
|
|
||||||
In this example there are two GPUs for "Multi-GPU" and a TPU pod with 8 workers
|
In this example, there are two GPUs for "Multi-GPU" and a TPU pod with 8 workers
|
||||||
|
|
||||||
</Tip>
|
</Tip>
|
||||||
|
|
||||||
@ -71,7 +74,7 @@ In this example there are two GPUs for "Multi-GPU" and a TPU pod with 8 workers
|
|||||||
|
|
||||||
## Learning Rates
|
## Learning Rates
|
||||||
|
|
||||||
As noted in multiple sources[[1](https://aws.amazon.com/blogs/machine-learning/scalable-multi-node-deep-learning-training-using-gpus-in-the-aws-cloud/)][[2](https://docs.nvidia.com/clara/tlt-mi_archive/clara-train-sdk-v2.0/nvmidl/appendix/training_with_multiple_gpus.html)], the learning rate should be scaled *linearly* based on the number of devices present. The below
|
As noted in multiple sources[[1](https://aws.amazon.com/blogs/machine-learning/scalable-multi-node-deep-learning-training-using-gpus-in-the-aws-cloud/)][[2](https://docs.nvidia.com/clara/clara-train-sdk/pt/model.html#classification-models-multi-gpu-training)], the learning rate should be scaled *linearly* based on the number of devices present. The below
|
||||||
snippet shows doing so with Accelerate:
|
snippet shows doing so with Accelerate:
|
||||||
|
|
||||||
<Tip>
|
<Tip>
|
||||||
@ -89,3 +92,12 @@ learning_rate *= accelerator.num_processes
|
|||||||
optimizer = AdamW(params=model.parameters(), lr=learning_rate)
|
optimizer = AdamW(params=model.parameters(), lr=learning_rate)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
You will also find that `accelerate` will step the learning rate based on the number of processes being trained on. This is because
|
||||||
|
of the observed batch size noted earlier. So in the case of 2 GPUs, the learning rate will be stepped twice as often as a single GPU
|
||||||
|
to account for the batch size being twice as large (if no changes to the batch size on the single GPU instance are made).
|
||||||
|
|
||||||
|
## Gradient Accumulation and Mixed Precision
|
||||||
|
|
||||||
|
When using gradient accumulation and mixed precision, due to how gradient averaging works (accumulation) and the precision loss (mixed precision),
|
||||||
|
some degradation in performance is expected. This will be explicitly seen when comparing the batch-wise loss between different compute
|
||||||
|
setups. However, the overall loss, metric, and general performance at the end of training should be _roughly_ the same.
|
||||||
@ -8,11 +8,14 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Training on TPUs with 🤗 Accelerate
|
# Training on TPUs
|
||||||
|
|
||||||
Training on TPUs can be slightly different than training on multi-gpu, even with 🤗 Accelerate. This guide aims to show you
|
Training on TPUs can be slightly different from training on multi-gpu, even with Accelerate. This guide aims to show you
|
||||||
where you should be careful and why, as well as the best practices in general.
|
where you should be careful and why, as well as the best practices in general.
|
||||||
|
|
||||||
## Training in a Notebook
|
## Training in a Notebook
|
||||||
@ -24,8 +27,8 @@ While on a TPU that last part is not as important, a critical part to understand
|
|||||||
When launching from the command-line, you perform **spawning**, where a python process is not currently running and you *spawn* a new process in. Since your Jupyter notebook is already
|
When launching from the command-line, you perform **spawning**, where a python process is not currently running and you *spawn* a new process in. Since your Jupyter notebook is already
|
||||||
utilizing a python process, you need to *fork* a new process from it to launch your code.
|
utilizing a python process, you need to *fork* a new process from it to launch your code.
|
||||||
|
|
||||||
Where this becomes important is in regards to declaring your model. On forked TPU processes, it is recommended that you instantiate your model *once* and pass this into your
|
Where this becomes important is in regard to declaring your model. On forked TPU processes, it is recommended that you instantiate your model *once* and pass this into your
|
||||||
training function. This is different than training on GPUs where you create `n` models that have their gradients synced and back-propagated at certain moments. Instead one
|
training function. This is different than training on GPUs where you create `n` models that have their gradients synced and back-propagated at certain moments. Instead, one
|
||||||
model instance is shared between all the nodes and it is passed back and forth. This is important especially when training on low-resource TPUs such as those provided in Kaggle kernels or
|
model instance is shared between all the nodes and it is passed back and forth. This is important especially when training on low-resource TPUs such as those provided in Kaggle kernels or
|
||||||
on Google Colaboratory.
|
on Google Colaboratory.
|
||||||
|
|
||||||
@ -33,7 +36,7 @@ Below is an example of a training function passed to the [`notebook_launcher`] i
|
|||||||
|
|
||||||
<Tip>
|
<Tip>
|
||||||
|
|
||||||
This code snippet is based off the one from the `simple_nlp_example` notebook found [here](https://github.com/huggingface/notebooks/blob/main/examples/accelerate/simple_nlp_example.ipynb) with slight
|
This code snippet is based off the one from the `simple_nlp_example` notebook found [here](https://github.com/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_nlp_example.ipynb) with slight
|
||||||
modifications for the sake of simplicity
|
modifications for the sake of simplicity
|
||||||
|
|
||||||
</Tip>
|
</Tip>
|
||||||
@ -78,7 +81,7 @@ notebook_launcher(training_function)
|
|||||||
|
|
||||||
<Tip>
|
<Tip>
|
||||||
|
|
||||||
The `notebook_launcher` will default to 8 processes if 🤗 Accelerate has been configured for a TPU
|
The `notebook_launcher` will default to 8 processes if Accelerate has been configured for a TPU
|
||||||
|
|
||||||
</Tip>
|
</Tip>
|
||||||
|
|
||||||
@ -125,16 +128,16 @@ And finally calling the training function with:
|
|||||||
|
|
||||||
## Mixed Precision and Global Variables
|
## Mixed Precision and Global Variables
|
||||||
|
|
||||||
As mentioned in the [mixed precision tutorial](../usage_guides/mixed_precision), 🤗 Accelerate supports fp16 and bf16, both of which can be used on TPUs.
|
As mentioned in the [mixed precision tutorial](../usage_guides/mixed_precision), Accelerate supports fp16 and bf16, both of which can be used on TPUs.
|
||||||
That being said, ideally `bf16` should be utilized as it is extremely efficient to use.
|
That being said, ideally `bf16` should be utilized as it is extremely efficient to use.
|
||||||
|
|
||||||
There are two "layers" when using `bf16` and 🤗 Accelerate on TPUs, at the base level and at the operation level.
|
There are two "layers" when using `bf16` and Accelerate on TPUs, at the base level and at the operation level.
|
||||||
|
|
||||||
At the base level, this is enabled when passing `mixed_precision="bf16"` to `Accelerator`, such as:
|
At the base level, this is enabled when passing `mixed_precision="bf16"` to `Accelerator`, such as:
|
||||||
```python
|
```python
|
||||||
accelerator = Accelerator(mixed_precision="bf16")
|
accelerator = Accelerator(mixed_precision="bf16")
|
||||||
```
|
```
|
||||||
By default this will cast `torch.float` and `torch.double` to `bfloat16` on TPUs.
|
By default, this will cast `torch.float` and `torch.double` to `bfloat16` on TPUs.
|
||||||
The specific configuration being set is an environmental variable of `XLA_USE_BF16` is set to `1`.
|
The specific configuration being set is an environmental variable of `XLA_USE_BF16` is set to `1`.
|
||||||
|
|
||||||
There is a further configuration you can perform which is setting the `XLA_DOWNCAST_BF16` environmental variable. If set to `1`, then
|
There is a further configuration you can perform which is setting the `XLA_DOWNCAST_BF16` environmental variable. If set to `1`, then
|
||||||
@ -161,4 +164,4 @@ new batch size after the first few iterations.
|
|||||||
|
|
||||||
Just because the memory is allocated does not mean it will be used or that the batch size will increase when going back to your training dataloader.
|
Just because the memory is allocated does not mean it will be used or that the batch size will increase when going back to your training dataloader.
|
||||||
|
|
||||||
</Tip>
|
</Tip>
|
||||||
BIN
docs/source/imgs/profile_export.png
Normal file
BIN
docs/source/imgs/profile_export.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 105 KiB |
@ -8,11 +8,14 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Accelerate
|
# Accelerate
|
||||||
|
|
||||||
🤗 Accelerate is a library that enables the same PyTorch code to be run across any distributed configuration by adding just four lines of code! In short, training and inference at scale made simple, efficient and adaptable.
|
Accelerate is a library that enables the same PyTorch code to be run across any distributed configuration by adding just four lines of code! In short, training and inference at scale made simple, efficient and adaptable.
|
||||||
|
|
||||||
```diff
|
```diff
|
||||||
+ from accelerate import Accelerator
|
+ from accelerate import Accelerator
|
||||||
@ -34,7 +37,7 @@ specific language governing permissions and limitations under the License.
|
|||||||
scheduler.step()
|
scheduler.step()
|
||||||
```
|
```
|
||||||
|
|
||||||
Built on `torch_xla` and `torch.distributed`, 🤗 Accelerate takes care of the heavy lifting, so you don't have to write any custom code to adapt to these platforms.
|
Built on `torch_xla` and `torch.distributed`, Accelerate takes care of the heavy lifting, so you don't have to write any custom code to adapt to these platforms.
|
||||||
Convert existing codebases to utilize [DeepSpeed](usage_guides/deepspeed), perform [fully sharded data parallelism](usage_guides/fsdp), and have automatic support for mixed-precision training!
|
Convert existing codebases to utilize [DeepSpeed](usage_guides/deepspeed), perform [fully sharded data parallelism](usage_guides/fsdp), and have automatic support for mixed-precision training!
|
||||||
|
|
||||||
<Tip>
|
<Tip>
|
||||||
@ -51,21 +54,21 @@ accelerate launch {my_script.py}
|
|||||||
|
|
||||||
<div class="mt-10">
|
<div class="mt-10">
|
||||||
<div class="w-full flex flex-col space-y-4 md:space-y-0 md:grid md:grid-cols-2 md:gap-y-4 md:gap-x-5">
|
<div class="w-full flex flex-col space-y-4 md:space-y-0 md:grid md:grid-cols-2 md:gap-y-4 md:gap-x-5">
|
||||||
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="/docs/accelerate/basic_tutorials/overview"
|
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./basic_tutorials/overview"
|
||||||
><div class="w-full text-center bg-gradient-to-br from-blue-400 to-blue-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Tutorials</div>
|
><div class="w-full text-center bg-gradient-to-br from-blue-400 to-blue-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Tutorials</div>
|
||||||
<p class="text-gray-700">Learn the basics and become familiar with using 🤗 Accelerate. Start here if you are using 🤗 Accelerate for the first time!</p>
|
<p class="text-gray-700">Learn the basics and become familiar with using Accelerate. Start here if you are using Accelerate for the first time!</p>
|
||||||
</a>
|
</a>
|
||||||
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="/docs/accelerate/usage_guides/gradient_accumulation"
|
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./usage_guides/explore"
|
||||||
><div class="w-full text-center bg-gradient-to-br from-indigo-400 to-indigo-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">How-to guides</div>
|
><div class="w-full text-center bg-gradient-to-br from-indigo-400 to-indigo-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">How-to guides</div>
|
||||||
<p class="text-gray-700">Practical guides to help you achieve a specific goal. Take a look at these guides to learn how to use 🤗 Accelerate to solve real-world problems.</p>
|
<p class="text-gray-700">Practical guides to help you achieve a specific goal. Take a look at these guides to learn how to use Accelerate to solve real-world problems.</p>
|
||||||
</a>
|
</a>
|
||||||
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="/docs/accelerate/concept_guides/gradient_synchronization"
|
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./concept_guides/gradient_synchronization"
|
||||||
><div class="w-full text-center bg-gradient-to-br from-pink-400 to-pink-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Conceptual guides</div>
|
><div class="w-full text-center bg-gradient-to-br from-pink-400 to-pink-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Conceptual guides</div>
|
||||||
<p class="text-gray-700">High-level explanations for building a better understanding of important topics such as avoiding subtle nuances and pitfalls in distributed training and DeepSpeed.</p>
|
<p class="text-gray-700">High-level explanations for building a better understanding of important topics such as avoiding subtle nuances and pitfalls in distributed training and DeepSpeed.</p>
|
||||||
</a>
|
</a>
|
||||||
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="/docs/accelerate/package_reference/accelerator"
|
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./package_reference/accelerator"
|
||||||
><div class="w-full text-center bg-gradient-to-br from-purple-400 to-purple-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Reference</div>
|
><div class="w-full text-center bg-gradient-to-br from-purple-400 to-purple-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Reference</div>
|
||||||
<p class="text-gray-700">Technical descriptions of how 🤗 Accelerate classes and methods work.</p>
|
<p class="text-gray-700">Technical descriptions of how Accelerate classes and methods work.</p>
|
||||||
</a>
|
</a>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
@ -8,17 +8,19 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Logging with Accelerate
|
# Accelerator
|
||||||
|
|
||||||
Accelerate has its own logging utility to handle logging while in a distributed system.
|
The [`Accelerator`] is the main class for enabling distributed training on any type of training setup. Read the [Add Accelerator to your code](../basic_tutorials/migration) tutorial to learn more about how to add the [`Accelerator`] to your script.
|
||||||
To utilize this replace cases of `logging` with `accelerate.logging`:
|
|
||||||
```diff
|
|
||||||
- import logging
|
|
||||||
+ from accelerate.logging import get_logger
|
|
||||||
- logger = logging.getLogger(__name__)
|
|
||||||
+ logger = get_logger(__name__)
|
|
||||||
```
|
|
||||||
|
|
||||||
[[autodoc]] logging.get_logger
|
## Accelerator[[api]]
|
||||||
|
|
||||||
|
[[autodoc]] Accelerator
|
||||||
|
|
||||||
|
## Utilities
|
||||||
|
|
||||||
|
[[autodoc]] accelerate.utils.gather_object
|
||||||
@ -1,163 +0,0 @@
|
|||||||
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
|
||||||
the License. You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
|
||||||
specific language governing permissions and limitations under the License.
|
|
||||||
-->
|
|
||||||
|
|
||||||
# Accelerator
|
|
||||||
|
|
||||||
The [`Accelerator`] is the main class provided by 🤗 Accelerate.
|
|
||||||
It serves at the main entrypoint for the API.
|
|
||||||
|
|
||||||
## Quick adaptation of your code
|
|
||||||
|
|
||||||
To quickly adapt your script to work on any kind of setup with 🤗 Accelerate just:
|
|
||||||
|
|
||||||
1. Initialize an [`Accelerator`] object (that we will call `accelerator` throughout this page) as early as possible in your script.
|
|
||||||
2. Pass your dataloader(s), model(s), optimizer(s), and scheduler(s) to the [`~Accelerator.prepare`] method.
|
|
||||||
3. Remove all the `.cuda()` or `.to(device)` from your code and let the `accelerator` handle the device placement for you.
|
|
||||||
|
|
||||||
<Tip>
|
|
||||||
|
|
||||||
Step three is optional, but considered a best practice.
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
4. Replace `loss.backward()` in your code with `accelerator.backward(loss)`
|
|
||||||
5. Gather your predictions and labels before storing them or using them for metric computation using [`~Accelerator.gather`]
|
|
||||||
|
|
||||||
<Tip warning={true}>
|
|
||||||
|
|
||||||
Step five is mandatory when using distributed evaluation
|
|
||||||
|
|
||||||
</Tip>
|
|
||||||
|
|
||||||
In most cases this is all that is needed. The next section lists a few more advanced use cases and nice features
|
|
||||||
you should search for and replace by the corresponding methods of your `accelerator`:
|
|
||||||
|
|
||||||
## Advanced recommendations
|
|
||||||
|
|
||||||
### Printing
|
|
||||||
|
|
||||||
`print` statements should be replaced by [`~Accelerator.print`] to be printed once per process
|
|
||||||
|
|
||||||
```diff
|
|
||||||
- print("My thing I want to print!")
|
|
||||||
+ accelerator.print("My thing I want to print!")
|
|
||||||
```
|
|
||||||
|
|
||||||
### Executing processes
|
|
||||||
|
|
||||||
#### Once on a single server
|
|
||||||
|
|
||||||
For statements that should be executed once per server, use [`~Accelerator.is_local_main_process`]:
|
|
||||||
|
|
||||||
```python
|
|
||||||
if accelerator.is_local_main_process:
|
|
||||||
do_thing_once_per_server()
|
|
||||||
```
|
|
||||||
|
|
||||||
A function can be wrapped using the [`~Accelerator.on_local_main_process`] function to achieve the same
|
|
||||||
behavior on a function's execution:
|
|
||||||
|
|
||||||
```python
|
|
||||||
@accelerator.on_local_main_process
|
|
||||||
def do_my_thing():
|
|
||||||
"Something done once per server"
|
|
||||||
do_thing_once_per_server()
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Only ever once across all servers
|
|
||||||
|
|
||||||
For statements that should only ever be executed once, use [`~Accelerator.is_main_process`]:
|
|
||||||
|
|
||||||
```python
|
|
||||||
if accelerator.is_main_process:
|
|
||||||
do_thing_once()
|
|
||||||
```
|
|
||||||
|
|
||||||
A function can be wrapped using the [`~Accelerator.on_main_process`] function to achieve the same
|
|
||||||
behavior on a function's execution:
|
|
||||||
|
|
||||||
```python
|
|
||||||
@accelerator.on_main_process
|
|
||||||
def do_my_thing():
|
|
||||||
"Something done once per server"
|
|
||||||
do_thing_once()
|
|
||||||
```
|
|
||||||
|
|
||||||
#### On specific processes
|
|
||||||
|
|
||||||
If a function should be ran on a specific overall or local process index, there are similar decorators
|
|
||||||
to achieve this:
|
|
||||||
|
|
||||||
```python
|
|
||||||
@accelerator.on_local_process(local_process_idx=0)
|
|
||||||
def do_my_thing():
|
|
||||||
"Something done on process index 0 on each server"
|
|
||||||
do_thing_on_index_zero_on_each_server()
|
|
||||||
```
|
|
||||||
|
|
||||||
```python
|
|
||||||
@accelerator.on_process(process_index=0)
|
|
||||||
def do_my_thing():
|
|
||||||
"Something done on process index 0"
|
|
||||||
do_thing_on_index_zero()
|
|
||||||
```
|
|
||||||
|
|
||||||
### Synchronicity control
|
|
||||||
|
|
||||||
Use [`~Accelerator.wait_for_everyone`] to make sure all processes join that point before continuing. (Useful before a model save for instance)
|
|
||||||
|
|
||||||
### Saving and loading
|
|
||||||
|
|
||||||
Use [`~Accelerator.unwrap_model`] before saving to remove all special model wrappers added during the distributed process.
|
|
||||||
|
|
||||||
```python
|
|
||||||
model = MyModel()
|
|
||||||
model = accelerator.prepare(model)
|
|
||||||
# Unwrap
|
|
||||||
model = accelerator.unwrap_model(model)
|
|
||||||
```
|
|
||||||
|
|
||||||
Use [`~Accelerator.save`] instead of `torch.save`:
|
|
||||||
|
|
||||||
```diff
|
|
||||||
state_dict = model.state_dict()
|
|
||||||
- torch.save(state_dict, "my_state.pkl")
|
|
||||||
+ accelerator.save(state_dict, "my_state.pkl")
|
|
||||||
```
|
|
||||||
|
|
||||||
### Operations
|
|
||||||
|
|
||||||
Use [`~Accelerator.clip_grad_norm_`] instead of ``torch.nn.utils.clip_grad_norm_`` and [`~Accelerator.clip_grad_value_`] instead of ``torch.nn.utils.clip_grad_value``
|
|
||||||
|
|
||||||
### Gradient Accumulation
|
|
||||||
|
|
||||||
To perform gradient accumulation use [`~Accelerator.accumulate`] and specify a gradient_accumulation_steps.
|
|
||||||
This will also automatically ensure the gradients are synced or unsynced when on
|
|
||||||
multi-device training, check if the step should actually be performed, and auto-scale the loss:
|
|
||||||
|
|
||||||
```diff
|
|
||||||
- accelerator = Accelerator()
|
|
||||||
+ accelerator = Accelerator(gradient_accumulation_steps=2)
|
|
||||||
|
|
||||||
for (input, label) in training_dataloader:
|
|
||||||
+ with accelerator.accumulate(model):
|
|
||||||
predictions = model(input)
|
|
||||||
loss = loss_function(predictions, labels)
|
|
||||||
accelerator.backward(loss)
|
|
||||||
optimizer.step()
|
|
||||||
scheduler.step()
|
|
||||||
optimizer.zero_grad()
|
|
||||||
```
|
|
||||||
|
|
||||||
## Overall API documentation:
|
|
||||||
|
|
||||||
[[autodoc]] Accelerator
|
|
||||||
110
docs/source/package_reference/big_modeling.md
Normal file
110
docs/source/package_reference/big_modeling.md
Normal file
@ -0,0 +1,110 @@
|
|||||||
|
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Working with large models
|
||||||
|
|
||||||
|
## Dispatch and offload
|
||||||
|
|
||||||
|
### init_empty_weights
|
||||||
|
|
||||||
|
[[autodoc]] big_modeling.init_empty_weights
|
||||||
|
|
||||||
|
### cpu_offload
|
||||||
|
|
||||||
|
[[autodoc]] big_modeling.cpu_offload
|
||||||
|
|
||||||
|
### cpu_offload_with_hook
|
||||||
|
|
||||||
|
[[autodoc]] big_modeling.cpu_offload_with_hook
|
||||||
|
|
||||||
|
### disk_offload
|
||||||
|
|
||||||
|
[[autodoc]] big_modeling.disk_offload
|
||||||
|
|
||||||
|
### dispatch_model
|
||||||
|
|
||||||
|
[[autodoc]] big_modeling.dispatch_model
|
||||||
|
|
||||||
|
### load_checkpoint_and_dispatch
|
||||||
|
|
||||||
|
[[autodoc]] big_modeling.load_checkpoint_and_dispatch
|
||||||
|
|
||||||
|
### load_checkpoint_in_model
|
||||||
|
|
||||||
|
[[autodoc]] big_modeling.load_checkpoint_in_model
|
||||||
|
|
||||||
|
### infer_auto_device_map
|
||||||
|
|
||||||
|
[[autodoc]] utils.infer_auto_device_map
|
||||||
|
|
||||||
|
## Hooks
|
||||||
|
|
||||||
|
### ModelHook
|
||||||
|
|
||||||
|
[[autodoc]] hooks.ModelHook
|
||||||
|
|
||||||
|
### AlignDevicesHook
|
||||||
|
|
||||||
|
[[autodoc]] hooks.AlignDevicesHook
|
||||||
|
|
||||||
|
### SequentialHook
|
||||||
|
|
||||||
|
[[autodoc]] hooks.SequentialHook
|
||||||
|
|
||||||
|
### LayerwiseCastingHook
|
||||||
|
|
||||||
|
[[autodoc]] hooks.LayerwiseCastingHook
|
||||||
|
|
||||||
|
## Adding Hooks
|
||||||
|
|
||||||
|
### add_hook_to_module
|
||||||
|
|
||||||
|
[[autodoc]] hooks.add_hook_to_module
|
||||||
|
|
||||||
|
### attach_execution_device_hook
|
||||||
|
|
||||||
|
[[autodoc]] hooks.attach_execution_device_hook
|
||||||
|
|
||||||
|
### attach_align_device_hook
|
||||||
|
|
||||||
|
[[autodoc]] hooks.attach_align_device_hook
|
||||||
|
|
||||||
|
### attach_align_device_hook_on_blocks
|
||||||
|
|
||||||
|
[[autodoc]] hooks.attach_align_device_hook_on_blocks
|
||||||
|
|
||||||
|
### attach_layerwise_casting_hooks
|
||||||
|
|
||||||
|
[[autodoc]] big_modeling.attach_layerwise_casting_hooks
|
||||||
|
|
||||||
|
## Removing Hooks
|
||||||
|
|
||||||
|
### remove_hook_from_module
|
||||||
|
|
||||||
|
[[autodoc]] hooks.remove_hook_from_module
|
||||||
|
|
||||||
|
### remove_hook_from_submodules
|
||||||
|
|
||||||
|
[[autodoc]] hooks.remove_hook_from_submodules
|
||||||
|
|
||||||
|
## Utilities
|
||||||
|
|
||||||
|
### has_offloaded_params
|
||||||
|
|
||||||
|
[[autodoc]] utils.has_offloaded_params
|
||||||
|
|
||||||
|
### align_module_device
|
||||||
|
|
||||||
|
[[autodoc]] utils.align_module_device
|
||||||
@ -1,41 +0,0 @@
|
|||||||
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
|
||||||
the License. You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
|
||||||
specific language governing permissions and limitations under the License.
|
|
||||||
-->
|
|
||||||
|
|
||||||
# Working with large models
|
|
||||||
|
|
||||||
## Dispatching and Offloading Models
|
|
||||||
|
|
||||||
[[autodoc]] big_modeling.init_empty_weights
|
|
||||||
[[autodoc]] big_modeling.cpu_offload
|
|
||||||
[[autodoc]] big_modeling.disk_offload
|
|
||||||
[[autodoc]] big_modeling.dispatch_model
|
|
||||||
[[autodoc]] big_modeling.load_checkpoint_and_dispatch
|
|
||||||
|
|
||||||
## Model Hooks
|
|
||||||
|
|
||||||
### Hook Classes
|
|
||||||
|
|
||||||
[[autodoc]] hooks.ModelHook
|
|
||||||
[[autodoc]] hooks.AlignDevicesHook
|
|
||||||
[[autodoc]] hooks.SequentialHook
|
|
||||||
|
|
||||||
### Adding Hooks
|
|
||||||
|
|
||||||
[[autodoc]] hooks.add_hook_to_module
|
|
||||||
[[autodoc]] hooks.attach_execution_device_hook
|
|
||||||
[[autodoc]] hooks.attach_align_device_hook
|
|
||||||
[[autodoc]] hooks.attach_align_device_hook_on_blocks
|
|
||||||
|
|
||||||
### Removing Hooks
|
|
||||||
|
|
||||||
[[autodoc]] hooks.remove_hook_from_module
|
|
||||||
[[autodoc]] hooks.remove_hook_from_submodules
|
|
||||||
335
docs/source/package_reference/cli.md
Normal file
335
docs/source/package_reference/cli.md
Normal file
@ -0,0 +1,335 @@
|
|||||||
|
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# The Command Line
|
||||||
|
|
||||||
|
Below is a list of all the available commands 🤗 Accelerate with their parameters
|
||||||
|
|
||||||
|
## accelerate config
|
||||||
|
|
||||||
|
**Command**:
|
||||||
|
|
||||||
|
`accelerate config` or `accelerate-config`
|
||||||
|
|
||||||
|
Launches a series of prompts to create and save a `default_config.yml` configuration file for your training system. Should
|
||||||
|
always be ran first on your machine.
|
||||||
|
|
||||||
|
**Usage**:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
accelerate config [arguments]
|
||||||
|
```
|
||||||
|
|
||||||
|
**Optional Arguments**:
|
||||||
|
* `--config_file CONFIG_FILE` (`str`) -- The path to use to store the config file. Will default to a file named default_config.yaml in the cache location, which is the content
|
||||||
|
of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have such an environment variable, your cache directory
|
||||||
|
(`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`.
|
||||||
|
* `-h`, `--help` (`bool`) -- Show a help message and exit
|
||||||
|
|
||||||
|
## accelerate config default
|
||||||
|
|
||||||
|
**Command**:
|
||||||
|
|
||||||
|
`accelerate config default` or `accelerate-config default`
|
||||||
|
|
||||||
|
Create a default config file for Accelerate with only a few flags set.
|
||||||
|
|
||||||
|
**Usage**:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
accelerate config default [arguments]
|
||||||
|
```
|
||||||
|
|
||||||
|
**Optional Arguments**:
|
||||||
|
* `--config_file CONFIG_FILE` (`str`) -- The path to use to store the config file. Will default to a file named default_config.yaml in the cache location, which is the content
|
||||||
|
of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have such an environment variable, your cache directory
|
||||||
|
(`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`.
|
||||||
|
|
||||||
|
* `-h`, `--help` (`bool`) -- Show a help message and exit
|
||||||
|
* `--mixed_precision {no,fp16,bf16}` (`str`) -- Whether or not to use mixed precision training. Choose between FP16 and BF16 (bfloat16) training. BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.
|
||||||
|
|
||||||
|
## accelerate config update
|
||||||
|
|
||||||
|
**Command**:
|
||||||
|
|
||||||
|
`accelerate config update` or `accelerate-config update`
|
||||||
|
|
||||||
|
Update an existing config file with the latest defaults while maintaining the old configuration.
|
||||||
|
|
||||||
|
**Usage**:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
accelerate config update [arguments]
|
||||||
|
```
|
||||||
|
|
||||||
|
**Optional Arguments**:
|
||||||
|
* `--config_file CONFIG_FILE` (`str`) -- The path to the config file to update. Will default to a file named default_config.yaml in the cache location, which is the content
|
||||||
|
of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have such an environment variable, your cache directory
|
||||||
|
(`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`.
|
||||||
|
|
||||||
|
* `-h`, `--help` (`bool`) -- Show a help message and exit
|
||||||
|
|
||||||
|
|
||||||
|
## accelerate env
|
||||||
|
|
||||||
|
**Command**:
|
||||||
|
|
||||||
|
`accelerate env` or `accelerate-env` or `python -m accelerate.commands.env`
|
||||||
|
|
||||||
|
Lists the contents of the passed 🤗 Accelerate configuration file. Should always be used when opening an issue on the [GitHub repository](https://github.com/huggingface/accelerate).
|
||||||
|
|
||||||
|
**Usage**:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
accelerate env [arguments]
|
||||||
|
```
|
||||||
|
|
||||||
|
**Optional Arguments**:
|
||||||
|
* `--config_file CONFIG_FILE` (`str`) -- The path to use to store the config file. Will default to a file named default_config.yaml in the cache location, which is the content
|
||||||
|
of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have such an environment variable, your cache directory
|
||||||
|
(`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`.
|
||||||
|
* `-h`, `--help` (`bool`) -- Show a help message and exit
|
||||||
|
|
||||||
|
## accelerate launch
|
||||||
|
|
||||||
|
**Command**:
|
||||||
|
|
||||||
|
`accelerate launch` or `accelerate-launch` or `python -m accelerate.commands.launch`
|
||||||
|
|
||||||
|
Launches a specified script on a distributed system with the right parameters.
|
||||||
|
|
||||||
|
**Usage**:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
accelerate launch [arguments] {training_script} --{training_script-argument-1} --{training_script-argument-2} ...
|
||||||
|
```
|
||||||
|
|
||||||
|
**Positional Arguments**:
|
||||||
|
|
||||||
|
- `{training_script}` -- The full path to the script to be launched in parallel
|
||||||
|
- `--{training_script-argument-1}` -- Arguments of the training script
|
||||||
|
|
||||||
|
**Optional Arguments**:
|
||||||
|
|
||||||
|
* `-h`, `--help` (`bool`) -- Show a help message and exit
|
||||||
|
* `--config_file CONFIG_FILE` (`str`)-- The config file to use for the default values in the launching script.
|
||||||
|
* `-m`, `--module` (`bool`) -- Change each process to interpret the launch script as a Python module, executing with the same behavior as 'python -m'.
|
||||||
|
* `--no_python` (`bool`) -- Skip prepending the training script with 'python' - just execute it directly. Useful when the script is not a Python script.
|
||||||
|
* `--debug` (`bool`) -- Whether to print out the torch.distributed stack trace when something fails.
|
||||||
|
* `-q`, `--quiet` (`bool`) -- Silence subprocess errors from the launch stack trace to only show the relevant tracebacks. (Only applicable to DeepSpeed and single-process configurations).
|
||||||
|
|
||||||
|
|
||||||
|
The rest of these arguments are configured through `accelerate config` and are read in from the specified `--config_file` (or default configuration) for their
|
||||||
|
values. They can also be passed in manually.
|
||||||
|
|
||||||
|
**Hardware Selection Arguments**:
|
||||||
|
|
||||||
|
* `--cpu` (`bool`) -- Whether or not to force the training on the CPU.
|
||||||
|
* `--multi_gpu` (`bool`) -- Whether or not this should launch a distributed GPU training.
|
||||||
|
* `--tpu` (`bool`) -- Whether or not this should launch a TPU training.
|
||||||
|
* `--ipex` (`bool`) -- Whether or not this should launch an Intel Pytorch Extension (IPEX) training. **This argument is deprecated, will be removed in Accelerate v1.10**
|
||||||
|
|
||||||
|
**Resource Selection Arguments**:
|
||||||
|
|
||||||
|
The following arguments are useful for fine-tuning how available hardware should be used
|
||||||
|
|
||||||
|
* `--mixed_precision {no,fp16,bf16,fp8}` (`str`) -- Whether or not to use mixed precision training. Choose between FP16 and BF16 (bfloat16) training. BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.
|
||||||
|
* `--num_processes NUM_PROCESSES` (`int`) -- The total number of processes to be launched in parallel.
|
||||||
|
* `--num_machines NUM_MACHINES` (`int`) -- The total number of machines used in this training.
|
||||||
|
* `--num_cpu_threads_per_process NUM_CPU_THREADS_PER_PROCESS` (`int`) -- The number of CPU threads per process. Can be tuned for optimal performance.
|
||||||
|
* `--enable_cpu_affinity` (`bool`) -- Whether or not CPU affinity and balancing should be enabled. Currently only supported on NVIDIA hardware.
|
||||||
|
|
||||||
|
**Training Paradigm Arguments**:
|
||||||
|
|
||||||
|
The following arguments are useful for selecting which training paradigm to use.
|
||||||
|
|
||||||
|
* `--use_deepspeed` (`bool`) -- Whether or not to use DeepSpeed for training.
|
||||||
|
* `--use_fsdp` (`bool`) -- Whether or not to use FullyShardedDataParallel for training.
|
||||||
|
* `--use_megatron_lm` (`bool`) -- Whether or not to use Megatron-LM for training.
|
||||||
|
* `--use_xpu` (`bool`) -- Whether to use IPEX plugin to speed up training on XPU specifically. **This argument is deprecated and ignored, will be removed in Accelerate v1.10**
|
||||||
|
|
||||||
|
**Distributed GPU Arguments**:
|
||||||
|
|
||||||
|
The following arguments are only useful when `multi_gpu` is passed or multi-gpu training is configured through `accelerate config`:
|
||||||
|
|
||||||
|
* `--gpu_ids` (`str`) -- What GPUs (by id) should be used for training on this machine as a comma-separated list
|
||||||
|
* `--same_network` (`bool`) -- Whether all machines used for multinode training exist on the same local network.
|
||||||
|
* `--machine_rank` (`int`) -- The rank of the machine on which this script is launched.
|
||||||
|
* `--main_process_ip` (`str`) -- The IP address of the machine of rank 0.
|
||||||
|
* `--main_process_port` (`int`) -- The port to use to communicate with the machine of rank 0.
|
||||||
|
* `-t`, `--tee` (`str`) -- Tee std streams into a log file and also to console.
|
||||||
|
* `--log_dir` (`str`) -- Base directory to use for log files when using torchrun/torch.distributed.run as launcher. Use with --tee to redirect std streams info log files.
|
||||||
|
* `--role` (`str`) -- User-defined role for the workers.
|
||||||
|
* `--rdzv_backend` (`str`) -- The rendezvous method to use, such as 'static' (the default) or 'c10d'
|
||||||
|
* `--rdzv_conf` (`str`) -- Additional rendezvous configuration (<key1>=<value1>,<key2>=<value2>,...).
|
||||||
|
* `--max_restarts` (`int`) -- Maximum number of worker group restarts before failing.
|
||||||
|
* `--monitor_interval` (`int`) -- Interval, in seconds, to monitor the state of workers.
|
||||||
|
|
||||||
|
**TPU Arguments**:
|
||||||
|
|
||||||
|
The following arguments are only useful when `tpu` is passed or TPU training is configured through `accelerate config`:
|
||||||
|
|
||||||
|
* `--tpu_cluster` (`bool`) -- Whether to use a GCP TPU pod for training.
|
||||||
|
* `--tpu_use_sudo` (`bool`) -- Whether to use `sudo` when running the TPU training script in each pod.
|
||||||
|
* `--vm` (`str`) -- List of single Compute VM instance names. If not provided we assume usage of instance groups. For TPU pods.
|
||||||
|
* `--env` (`str`) -- List of environment variables to set on the Compute VM instances. For TPU pods.
|
||||||
|
* `--main_training_function` (`str`) -- The name of the main function to be executed in your script (only for TPU training).
|
||||||
|
* `--downcast_bf16` (`bool`) -- Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32.
|
||||||
|
|
||||||
|
**DeepSpeed Arguments**:
|
||||||
|
|
||||||
|
The following arguments are only useful when `use_deepspeed` is passed or `deepspeed` is configured through `accelerate config`:
|
||||||
|
|
||||||
|
* `--deepspeed_config_file` (`str`) -- DeepSpeed config file.
|
||||||
|
* `--zero_stage` (`int`) -- DeepSpeed's ZeRO optimization stage.
|
||||||
|
* `--offload_optimizer_device` (`str`) -- Decides where (none|cpu|nvme) to offload optimizer states.
|
||||||
|
* `--offload_param_device` (`str`) -- Decides where (none|cpu|nvme) to offload parameters.
|
||||||
|
* `--offload_optimizer_nvme_path` (`str`) -- Decides Nvme Path to offload optimizer states.
|
||||||
|
* `--gradient_accumulation_steps` (`int`) -- No of gradient_accumulation_steps used in your training script.
|
||||||
|
* `--gradient_clipping` (`float`) -- Gradient clipping value used in your training script.
|
||||||
|
* `--zero3_init_flag` (`str`) -- Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. Only applicable with DeepSpeed ZeRO Stage-3.
|
||||||
|
* `--zero3_save_16bit_model` (`str`) -- Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. Only applicable with DeepSpeed ZeRO Stage-3.
|
||||||
|
* `--deepspeed_hostfile` (`str`) -- DeepSpeed hostfile for configuring multi-node compute resources.
|
||||||
|
* `--deepspeed_exclusion_filter` (`str`) -- DeepSpeed exclusion filter string when using multi-node setup.
|
||||||
|
* `--deepspeed_inclusion_filter` (`str`) -- DeepSpeed inclusion filter string when using multi-node setup.
|
||||||
|
* `--deepspeed_multinode_launcher` (`str`) -- DeepSpeed multi-node launcher to use.
|
||||||
|
* `--deepspeed_moe_layer_cls_names` (`str`) -- comma-separated list of transformer MoE layer class names (case-sensitive) to wrap, e.g, `MixtralSparseMoeBlock` `Qwen2MoeSparseMoeBlock`, `JetMoEAttention,JetMoEBlock`
|
||||||
|
|
||||||
|
**Fully Sharded Data Parallelism Arguments**:
|
||||||
|
|
||||||
|
The following arguments are only useful when `use_fsdp` is passed or Fully Sharded Data Parallelism is configured through `accelerate config`:
|
||||||
|
|
||||||
|
* `--fsdp_offload_params` (`str`) -- Decides Whether (true|false) to offload parameters and gradients to CPU.
|
||||||
|
* `--fsdp_min_num_params` (`int`) -- FSDP's minimum number of parameters for Default Auto Wrapping.
|
||||||
|
* `--fsdp_sharding_strategy` (`int`) -- FSDP's Sharding Strategy.
|
||||||
|
* `--fsdp_auto_wrap_policy` (`str`) -- FSDP's auto wrap policy.
|
||||||
|
* `--fsdp_transformer_layer_cls_to_wrap` (`str`) -- Transformer layer class name (case-sensitive) to wrap, e.g, `BertLayer`, `GPTJBlock`, `T5Block` ...
|
||||||
|
* `--fsdp_backward_prefetch_policy` (`str`) -- FSDP's backward prefetch policy.
|
||||||
|
* `--fsdp_state_dict_type` (`str`) -- FSDP's state dict type.
|
||||||
|
* `--fsdp_forward_prefetch` (`str`) -- FSDP forward prefetch.
|
||||||
|
* `--fsdp_use_orig_params` (`str`) -- If True, allows non-uniform `requires_grad` mixed in a FSDP unit.
|
||||||
|
* `--fsdp_cpu_ram_efficient_loading` (`str`) -- If true, only the first process loads the pretrained model checkoint while all other processes have empty weights. When using this, `--fsdp_sync_module_states` needs to True.
|
||||||
|
* `--fsdp_sync_module_states` (`str`) -- If true, each individually wrapped FSDP unit will broadcast module parameters from rank 0.
|
||||||
|
* `--fsdp_activation_checkpointing` (`bool`) -- Decides Whether intermediate activations are freed during the forward pass, and a checkpoint is left as a placeholder
|
||||||
|
|
||||||
|
**Megatron-LM Arguments**:
|
||||||
|
|
||||||
|
The following arguments are only useful when `use_megatron_lm` is passed or Megatron-LM is configured through `accelerate config`:
|
||||||
|
|
||||||
|
* `--megatron_lm_tp_degree` (``) -- Megatron-LM's Tensor Parallelism (TP) degree.
|
||||||
|
* `--megatron_lm_pp_degree` (``) -- Megatron-LM's Pipeline Parallelism (PP) degree.
|
||||||
|
* `--megatron_lm_num_micro_batches` (``) -- Megatron-LM's number of micro batches when PP degree > 1.
|
||||||
|
* `--megatron_lm_sequence_parallelism` (``) -- Decides Whether (true|false) to enable Sequence Parallelism when TP degree > 1.
|
||||||
|
* `--megatron_lm_recompute_activations` (``) -- Decides Whether (true|false) to enable Selective Activation Recomputation.
|
||||||
|
* `--megatron_lm_use_distributed_optimizer` (``) -- Decides Whether (true|false) to use distributed optimizer which shards optimizer state and gradients across Data Parallel (DP) ranks.
|
||||||
|
* `--megatron_lm_gradient_clipping` (``) -- Megatron-LM's gradient clipping value based on global L2 Norm (0 to disable).
|
||||||
|
|
||||||
|
**FP8 Arguments**:
|
||||||
|
|
||||||
|
* `--fp8_backend` (`str`) -- Choose a backend to train with FP8 (`te` or `msamp`)
|
||||||
|
* `--fp8_use_autocast_during_eval` (`bool`) -- Whether to use FP8 autocast during eval mode (useful only when `--fp8_backend=te` is passed). Generally better metrics are found when this is not passed.
|
||||||
|
* `--fp8_margin` (`int`) -- The margin to use for the gradient scaling (useful only when `--fp8_backend=te` is passed).
|
||||||
|
* `--fp8_interval` (`int`) -- The interval to use for how often the scaling factor is recomputed (useful only when `--fp8_backend=te` is passed).
|
||||||
|
* `--fp8_format` (`str`) -- The format to use for the FP8 recipe (useful only when `--fp8_backend=te` is passed).
|
||||||
|
* `--fp8_amax_history_len` (`int`) -- The length of the history to use for the scaling factor computation (useful only when `--fp8_backend=te` is passed).
|
||||||
|
* `--fp8_amax_compute_algo` (`str`) -- The algorithm to use for the scaling factor computation. (useful only when `--fp8_backend=te` is passed).
|
||||||
|
* `--fp8_override_linear_precision` (`Tuple[bool, bool, bool]`) -- Whether or not to execute `fprop`, `dgrad`, and `wgrad` GEMMS in higher precision.
|
||||||
|
* `--fp8_opt_level` (`str`) -- What level of 8-bit collective communication should be used with MS-AMP (useful only when `--fp8_backend=msamp` is passed)
|
||||||
|
|
||||||
|
**AWS SageMaker Arguments**:
|
||||||
|
|
||||||
|
The following arguments are only useful when training in SageMaker
|
||||||
|
|
||||||
|
* `--aws_access_key_id AWS_ACCESS_KEY_ID` (`str`) -- The AWS_ACCESS_KEY_ID used to launch the Amazon SageMaker training job
|
||||||
|
* `--aws_secret_access_key AWS_SECRET_ACCESS_KEY` (`str`) -- The AWS_SECRET_ACCESS_KEY used to launch the Amazon SageMaker training job
|
||||||
|
|
||||||
|
## accelerate estimate-memory
|
||||||
|
|
||||||
|
**Command**:
|
||||||
|
|
||||||
|
`accelerate estimate-memory` or `accelerate-estimate-memory` or `python -m accelerate.commands.estimate`
|
||||||
|
|
||||||
|
Estimates the total vRAM a particular model hosted on the Hub needs to be loaded in with an estimate for training. Requires that `huggingface_hub` be installed.
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
|
||||||
|
When performing inference, typically add ≤20% to the result as overall allocation [as referenced here](https://blog.eleuther.ai/transformer-math/). We will have more extensive estimations in the future that will automatically be included in the calculation.
|
||||||
|
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
**Usage**:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
accelerate estimate-memory {MODEL_NAME} --library_name {LIBRARY_NAME} --dtypes {dtype_1} {dtype_2} ...
|
||||||
|
```
|
||||||
|
|
||||||
|
**Required Arguments**:
|
||||||
|
|
||||||
|
* `MODEL_NAME` (`str`)-- The model name on the Hugging Face Hub
|
||||||
|
|
||||||
|
**Optional Arguments**:
|
||||||
|
|
||||||
|
* `--library_name {timm,transformers}` (`str`) -- The library the model has an integration with, such as `transformers`, needed only if this information is not stored on the Hub
|
||||||
|
* `--dtypes {float32,float16,int8,int4}` (`[{float32,float16,int8,int4} ...]`) -- The dtypes to use for the model, must be one (or many) of `float32`, `float16`, `int8`, and `int4`
|
||||||
|
* `--trust_remote_code` (`bool`) -- Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be passed for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.
|
||||||
|
|
||||||
|
## accelerate tpu-config
|
||||||
|
|
||||||
|
`accelerate tpu-config`
|
||||||
|
|
||||||
|
**Usage**:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
accelerate tpu-config [arguments]
|
||||||
|
```
|
||||||
|
|
||||||
|
**Optional Arguments**:
|
||||||
|
* `-h`, `--help` (`bool`) -- Show a help message and exit
|
||||||
|
|
||||||
|
**Config Arguments**:
|
||||||
|
|
||||||
|
Arguments that can be configured through `accelerate config`.
|
||||||
|
|
||||||
|
* `--config_file` (`str`) -- Path to the config file to use for accelerate.
|
||||||
|
* `--tpu_name` (`str`) -- The name of the TPU to use. If not specified, will use the TPU specified in the config file.
|
||||||
|
* `--tpu_zone` (`str`) -- The zone of the TPU to use. If not specified, will use the zone specified in the config file.
|
||||||
|
|
||||||
|
**TPU Arguments**:
|
||||||
|
|
||||||
|
Arguments for options ran inside the TPU.
|
||||||
|
|
||||||
|
* `--command_file` (`str`) -- The path to the file containing the commands to run on the pod on startup.
|
||||||
|
* `--command` (`str`) -- A command to run on the pod. Can be passed multiple times.
|
||||||
|
* `--install_accelerate` (`bool`) -- Whether to install accelerate on the pod. Defaults to False.
|
||||||
|
* `--accelerate_version` (`str`) -- The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.
|
||||||
|
* `--debug` (`bool`) -- If set, will print the command that would be run instead of running it.
|
||||||
|
|
||||||
|
## accelerate test
|
||||||
|
|
||||||
|
`accelerate test` or `accelerate-test`
|
||||||
|
|
||||||
|
Runs `accelerate/test_utils/test_script.py` to verify that 🤗 Accelerate has been properly configured on your system and runs.
|
||||||
|
|
||||||
|
**Usage**:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
accelerate test [arguments]
|
||||||
|
```
|
||||||
|
|
||||||
|
**Optional Arguments**:
|
||||||
|
* `--config_file CONFIG_FILE` (`str`) -- The path to use to store the config file. Will default to a file named default_config.yaml in the cache location, which is the content
|
||||||
|
of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have such an environment variable, your cache directory
|
||||||
|
(`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`.
|
||||||
|
* `-h`, `--help` (`bool`) -- Show a help message and exit
|
||||||
@ -1,153 +0,0 @@
|
|||||||
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
|
||||||
the License. You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
|
||||||
specific language governing permissions and limitations under the License.
|
|
||||||
-->
|
|
||||||
|
|
||||||
# The Command Line
|
|
||||||
|
|
||||||
Below is a list of all the available commands 🤗 Accelerate with their parameters
|
|
||||||
|
|
||||||
## accelerate config
|
|
||||||
|
|
||||||
**Command**:
|
|
||||||
|
|
||||||
`accelerate config` or `accelerate-config`
|
|
||||||
|
|
||||||
Launches a series of prompts to create and save a `default_config.yml` configuration file for your training system. Should
|
|
||||||
always be ran first on your machine.
|
|
||||||
|
|
||||||
**Usage**:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
accelerate config [arguments]
|
|
||||||
```
|
|
||||||
|
|
||||||
**Optional Arguments**:
|
|
||||||
* `--config_file CONFIG_FILE` (`str`) -- The path to use to store the config file. Will default to a file named default_config.yaml in the cache location, which is the content
|
|
||||||
of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have such an environment variable, your cache directory
|
|
||||||
(`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`.
|
|
||||||
* `-h`, `--help` (`bool`) -- Show a help message and exit
|
|
||||||
|
|
||||||
## accelerate env
|
|
||||||
|
|
||||||
**Command**:
|
|
||||||
|
|
||||||
`accelerate env` or `accelerate-env`
|
|
||||||
|
|
||||||
Lists the contents of the passed 🤗 Accelerate configuration file. Should always be used when opening an issue on the [GitHub repository](https://github.com/huggingface/accelerate).
|
|
||||||
|
|
||||||
**Usage**:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
accelerate env [arguments]
|
|
||||||
```
|
|
||||||
|
|
||||||
**Optional Arguments**:
|
|
||||||
* `--config_file CONFIG_FILE` (`str`) -- The path to use to store the config file. Will default to a file named default_config.yaml in the cache location, which is the content
|
|
||||||
of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have such an environment variable, your cache directory
|
|
||||||
(`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`.
|
|
||||||
* `-h`, `--help` (`bool`) -- Show a help message and exit
|
|
||||||
|
|
||||||
## accelerate launch
|
|
||||||
|
|
||||||
**Command**:
|
|
||||||
|
|
||||||
`accelerate launch` or `accelerate-launch`
|
|
||||||
|
|
||||||
Launches a specified script on a distributed system with the right parameters.
|
|
||||||
|
|
||||||
**Usage**:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
accelerate launch [arguments] {training_script} --{training_script-argument-1} --{training_script-argument-2} ...
|
|
||||||
```
|
|
||||||
|
|
||||||
**Positional Arguments**:
|
|
||||||
|
|
||||||
- `{training_script}` -- The full path to the script to be launched in parallel
|
|
||||||
- `--{training_script-argument-1}` -- Arguments of the training script
|
|
||||||
|
|
||||||
**Optional Arguments**:
|
|
||||||
|
|
||||||
* `-h`, `--help` (`bool`) -- Show a help message and exit
|
|
||||||
* `--config_file CONFIG_FILE` (`str`)-- The config file to use for the default values in the launching script.
|
|
||||||
* `--cpu` (`bool`) -- Whether or not to force the training on the CPU.
|
|
||||||
* `--mixed_precision {no,fp16,bf16}` (`str`) -- Whether or not to use mixed precision training. Choose between FP16 and BF16 (bfloat16) training. BF16 training is only supported on
|
|
||||||
Nvidia Ampere GPUs and PyTorch 1.10 or later.
|
|
||||||
* `--multi_gpu` (`bool`, defaults to `False`) -- Whether or not this should launch a distributed GPU training.
|
|
||||||
* `-m`, `--module` (`bool`) -- Change each process to interpret the launch script as a Python module, executing with the same behavior as 'python -m'.
|
|
||||||
* `--no_python` (`bool`) -- Skip prepending the training script with 'python' - just execute it directly. Useful when the script is not a Python script.
|
|
||||||
|
|
||||||
The rest of these arguments are configured through `accelerate config` and are read in from the specified `--config_file` (or default configuration) for their
|
|
||||||
values. They can also be passed in manually.
|
|
||||||
|
|
||||||
**Machine Configuration Arguments**:
|
|
||||||
|
|
||||||
The following arguments are useful for customization of worker machines
|
|
||||||
* `--machine_rank MACHINE_RANK` (`int`) -- The rank of the machine on which this script is launched.
|
|
||||||
* `--num_machines NUM_MACHINES` (`int`) -- The total number of machines used in this training.
|
|
||||||
* `--num_processes NUM_PROCESSES` (`int`) -- The total number of processes to be launched in parallel.
|
|
||||||
* `--gpu_ids` (`str`) -- What GPUs (by id) should be used for training on this machine as a comma-seperated list
|
|
||||||
* `--main_process_ip MAIN_PROCESS_IP` (`str`) -- The IP address of the machine of rank 0.
|
|
||||||
* `--main_process_port MAIN_PROCESS_PORT` (`int`) -- The port to use to communicate with the machine of rank 0.
|
|
||||||
* `--num_cpu_threads_per_process NUM_CPU_THREADS_PER_PROCESS` (`int`) -- The number of CPU threads per process. Can be tuned for optimal performance.
|
|
||||||
|
|
||||||
|
|
||||||
**DeepSpeed Arguments**:
|
|
||||||
|
|
||||||
The following arguments are only useful when `use_deepspeed` is passed:
|
|
||||||
* `--use_deepspeed` (`bool`) -- Whether to use deepspeed.
|
|
||||||
* `--deepspeed_config_file DEEPSPEED_CONFIG_FILE` (`str`) -- DeepSpeed config file.
|
|
||||||
* `--zero_stage ZERO_STAGE` (`str`) -- DeepSpeed's ZeRO optimization stage
|
|
||||||
* `--offload_optimizer_device OFFLOAD_OPTIMIZER_DEVICE` (`str`) -- Decides where (none|cpu|nvme) to offload optimizer states
|
|
||||||
* `--offload_param_device OFFLOAD_PARAM_DEVICE` (`str`) -- Decides where (none|cpu|nvme) to offload parameters
|
|
||||||
* `--gradient_accumulation_steps GRADIENT_ACCUMULATION_STEPS` (`int`) -- Number of gradient_accumulation_steps used in your training script
|
|
||||||
* `--gradient_clipping GRADIENT_CLIPPING` (`float`) -- gradient clipping value used in your training script
|
|
||||||
The following arguments are related to using ZeRO Stage-3
|
|
||||||
* `--zero3_init_flag ZERO3_INIT_FLAG` (`bool`) -- Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models
|
|
||||||
* `--zero3_save_16bit_model ZERO3_SAVE_16BIT_MODEL` (`bool`) -- Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3
|
|
||||||
|
|
||||||
**Fully Sharded Data Parallelism Arguments**:
|
|
||||||
|
|
||||||
The following arguments are only useful when `use_fdsp` is passed:
|
|
||||||
* `--use_fsdp` (`bool`) -- Whether to use fsdp.
|
|
||||||
* `--offload_params OFFLOAD_PARAMS` (`bool`) -- Decides Whether (true|false) to offload parameters and gradients to CPU.
|
|
||||||
* `--min_num_params MIN_NUM_PARAMS` (`int`) -- FSDP's minimum number of parameters for Default Auto Wrapping.
|
|
||||||
* `--sharding_strategy SHARDING_STRATEGY` (`str`) -- FSDP's Sharding Strategy.
|
|
||||||
|
|
||||||
**TPU Arguments**:
|
|
||||||
|
|
||||||
The following arguments are only useful when `tpu` is passed:
|
|
||||||
* `--tpu` (`bool`) - Whether or not this should launch a TPU training.
|
|
||||||
* `--main_training_function MAIN_TRAINING_FUNCTION` (`str`) -- The name of the main function to be executed in your script.
|
|
||||||
|
|
||||||
**AWS SageMaker Arguments**:
|
|
||||||
|
|
||||||
The following arguments are only useful when training in SageMaker
|
|
||||||
* `--aws_access_key_id AWS_ACCESS_KEY_ID` (`str`) -- The AWS_ACCESS_KEY_ID used to launch the Amazon SageMaker training job
|
|
||||||
* `--aws_secret_access_key AWS_SECRET_ACCESS_KEY` (`str`) -- The AWS_SECRET_ACCESS_KEY used to launch the Amazon SageMaker training job
|
|
||||||
|
|
||||||
## accelerate test
|
|
||||||
|
|
||||||
`accelerate test` or `accelerate-test`
|
|
||||||
|
|
||||||
Runs `accelerate/test_utils/test_script.py` to verify that 🤗 Accelerate has been properly configured on your system and runs.
|
|
||||||
|
|
||||||
**Usage**:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
accelerate test [arguments]
|
|
||||||
```
|
|
||||||
|
|
||||||
**Optional Arguments**:
|
|
||||||
* `--config_file CONFIG_FILE` (`str`) -- The path to use to store the config file. Will default to a file named default_config.yaml in the cache location, which is the content
|
|
||||||
of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have such an environment variable, your cache directory
|
|
||||||
(`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`.
|
|
||||||
* `-h`, `--help` (`bool`) -- Show a help message and exit
|
|
||||||
44
docs/source/package_reference/deepspeed.md
Normal file
44
docs/source/package_reference/deepspeed.md
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# DeepSpeed utilities
|
||||||
|
|
||||||
|
## DeepSpeedPlugin
|
||||||
|
|
||||||
|
## get_active_deepspeed_plugin
|
||||||
|
|
||||||
|
[[autodoc]] utils.get_active_deepspeed_plugin
|
||||||
|
|
||||||
|
[[autodoc]] utils.DeepSpeedPlugin
|
||||||
|
|
||||||
|
[[autodoc]] utils.deepspeed.DummyScheduler
|
||||||
|
|
||||||
|
## DeepSpeedEnginerWrapper
|
||||||
|
|
||||||
|
[[autodoc]] utils.deepspeed.DeepSpeedEngineWrapper
|
||||||
|
|
||||||
|
## DeepSpeedOptimizerWrapper
|
||||||
|
|
||||||
|
[[autodoc]] utils.deepspeed.DeepSpeedOptimizerWrapper
|
||||||
|
|
||||||
|
## DeepSpeedSchedulerWrapper
|
||||||
|
|
||||||
|
[[autodoc]] utils.deepspeed.DeepSpeedSchedulerWrapper
|
||||||
|
|
||||||
|
## DummyOptim
|
||||||
|
|
||||||
|
[[autodoc]] utils.deepspeed.DummyOptim
|
||||||
|
|
||||||
|
## DummyScheduler
|
||||||
38
docs/source/package_reference/fp8.md
Normal file
38
docs/source/package_reference/fp8.md
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# FP8
|
||||||
|
|
||||||
|
Below are functions and classes relative to the underlying FP8 implementation
|
||||||
|
|
||||||
|
## FP8RecipeKwargs
|
||||||
|
|
||||||
|
[[autodoc]] utils.FP8RecipeKwargs
|
||||||
|
|
||||||
|
## convert_model
|
||||||
|
|
||||||
|
[[autodoc]] utils.convert_model
|
||||||
|
|
||||||
|
## has_transformer_engine_layers
|
||||||
|
|
||||||
|
[[autodoc]] utils.has_transformer_engine_layers
|
||||||
|
|
||||||
|
## contextual_fp8_autocast
|
||||||
|
|
||||||
|
[[autodoc]] utils.contextual_fp8_autocast
|
||||||
|
|
||||||
|
## apply_fp8_autowrap
|
||||||
|
|
||||||
|
[[autodoc]] utils.apply_fp8_autowrap
|
||||||
46
docs/source/package_reference/fsdp.md
Normal file
46
docs/source/package_reference/fsdp.md
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Fully Sharded Data Parallel utilities
|
||||||
|
|
||||||
|
## enable_fsdp_ram_efficient_loading
|
||||||
|
|
||||||
|
[[autodoc]] utils.enable_fsdp_ram_efficient_loading
|
||||||
|
|
||||||
|
## disable_fsdp_ram_efficient_loading
|
||||||
|
|
||||||
|
[[autodoc]] utils.disable_fsdp_ram_efficient_loading
|
||||||
|
|
||||||
|
## merge_fsdp_weights
|
||||||
|
|
||||||
|
[[autodoc]] utils.merge_fsdp_weights
|
||||||
|
|
||||||
|
## FullyShardedDataParallelPlugin
|
||||||
|
|
||||||
|
[[autodoc]] utils.FullyShardedDataParallelPlugin
|
||||||
|
|
||||||
|
## fsdp2_load_full_state_dict
|
||||||
|
|
||||||
|
[[autodoc]] utils.fsdp2_load_full_state_dict
|
||||||
|
|
||||||
|
## fsdp2_switch_optimizer_parameters
|
||||||
|
|
||||||
|
[[autodoc]] utils.fsdp2_switch_optimizer_parameters
|
||||||
|
|
||||||
|
## fsdp2_prepare_model
|
||||||
|
|
||||||
|
[[autodoc]] utils.fsdp2_prepare_model
|
||||||
|
|
||||||
|
## fsdp2_prepare_auto_wrap_policy
|
||||||
22
docs/source/package_reference/inference.md
Normal file
22
docs/source/package_reference/inference.md
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Pipeline parallelism
|
||||||
|
|
||||||
|
Accelerate supports pipeline parallelism for large-scale training with the PyTorch [torch.distributed.pipelining](https://pytorch.org/docs/stable/distributed.pipelining.html) API.
|
||||||
|
|
||||||
|
## prepare_pippy
|
||||||
|
|
||||||
|
[[autodoc]] inference.prepare_pippy
|
||||||
@ -8,18 +8,32 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Kwargs Handlers
|
# Kwargs handlers
|
||||||
|
|
||||||
The following objects can be passed to the main [`Accelerator`] to customize how some PyTorch objects
|
The following objects can be passed to the main [`Accelerator`] to customize how some PyTorch objects
|
||||||
related to distributed training or mixed precision are created.
|
related to distributed training or mixed precision are created.
|
||||||
|
|
||||||
|
## AutocastKwargs
|
||||||
|
|
||||||
|
[[autodoc]] AutocastKwargs
|
||||||
|
|
||||||
## DistributedDataParallelKwargs
|
## DistributedDataParallelKwargs
|
||||||
|
|
||||||
[[autodoc]] DistributedDataParallelKwargs
|
[[autodoc]] DistributedDataParallelKwargs
|
||||||
|
|
||||||
|
## FP8RecipeKwargs
|
||||||
|
|
||||||
|
[[autodoc]] utils.FP8RecipeKwargs
|
||||||
|
|
||||||
|
## ProfileKwargs
|
||||||
|
|
||||||
|
[[autodoc]] utils.ProfileKwargs
|
||||||
|
|
||||||
## GradScalerKwargs
|
## GradScalerKwargs
|
||||||
|
|
||||||
[[autodoc]] GradScalerKwargs
|
[[autodoc]] GradScalerKwargs
|
||||||
@ -27,3 +41,7 @@ related to distributed training or mixed precision are created.
|
|||||||
## InitProcessGroupKwargs
|
## InitProcessGroupKwargs
|
||||||
|
|
||||||
[[autodoc]] InitProcessGroupKwargs
|
[[autodoc]] InitProcessGroupKwargs
|
||||||
|
|
||||||
|
## KwargsHandler
|
||||||
|
|
||||||
|
[[autodoc]] utils.KwargsHandler
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user