mirror of
https://github.com/huggingface/accelerate.git
synced 2025-11-16 23:40:42 +08:00
Compare commits
657 Commits
v0.15.0
...
fix-genera
| Author | SHA1 | Date | |
|---|---|---|---|
| c8f6f79199 | |||
| 7ca4bccf5d | |||
| 8792a8c5af | |||
| e3f6b99b68 | |||
| df7779aa44 | |||
| 77f8e92b94 | |||
| 449eb8d9ef | |||
| 9eef9dd4b8 | |||
| 06f04a998f | |||
| 2767bb19ac | |||
| e713e28eaa | |||
| 31fd2b1ad6 | |||
| fce61a99ec | |||
| 6ec92cf06b | |||
| 2a4037322f | |||
| f823404f69 | |||
| ef2fe912c5 | |||
| e3e9b87592 | |||
| 456afd92ce | |||
| 0d2280dadc | |||
| 55d4a496dd | |||
| 2a8829d9a5 | |||
| 3969731ce8 | |||
| 411aa58a77 | |||
| 4420ec641d | |||
| 2241725ad6 | |||
| 5cac878984 | |||
| 5d31423308 | |||
| 2721387b98 | |||
| 2cfa88bdf1 | |||
| 102caf4fab | |||
| 07df5d268f | |||
| 68b3dbf666 | |||
| 403c0714d1 | |||
| 848ed800fa | |||
| ad957ce556 | |||
| 3db088f5d6 | |||
| d1abd59114 | |||
| ceb7c699bc | |||
| c5baa055c0 | |||
| 349be97ccb | |||
| b60061dfd2 | |||
| b565a6c58a | |||
| a03c361ffb | |||
| b0528392c8 | |||
| 060678415a | |||
| 6b2d968897 | |||
| ad3a5bc920 | |||
| eafcea07f6 | |||
| eff30e2130 | |||
| 694f2e2c12 | |||
| 9964f90fd7 | |||
| f86876d56d | |||
| 0a37e2042e | |||
| 54d670be41 | |||
| 339854a9a4 | |||
| 5296419df4 | |||
| 6a4857fec2 | |||
| 9569150174 | |||
| 8f871f41f1 | |||
| 47e6c36155 | |||
| 47c144570c | |||
| 6a54d0781b | |||
| 0482548363 | |||
| 0e48b2358d | |||
| 3499cf25aa | |||
| 68d63ee15f | |||
| 151637920d | |||
| 0ba3e9bb50 | |||
| b04d36c75f | |||
| 5fc1b230d3 | |||
| 244122c736 | |||
| d25efa71ce | |||
| 1aeb1e8997 | |||
| 0e51680994 | |||
| 7d430cf8de | |||
| b8ca803f98 | |||
| 1243191ecb | |||
| 2b25b8b3c5 | |||
| ca300c0a04 | |||
| 427ef8bd00 | |||
| 35b0206353 | |||
| fbe00d7897 | |||
| 62af737219 | |||
| cd51581248 | |||
| a5a7c039a0 | |||
| cf745c936d | |||
| 99877f56d6 | |||
| 0f2686c8d3 | |||
| a912b2ee09 | |||
| e9fd72a613 | |||
| 8dedb140ef | |||
| b55855a3d4 | |||
| 2b53a9089c | |||
| 39d255b3d0 | |||
| 99dff1a167 | |||
| a0a16e118a | |||
| 15458c5737 | |||
| fc0a43c3c1 | |||
| 8256a9c2d4 | |||
| 6727ac4394 | |||
| 9674b40580 | |||
| 0b0d9215a9 | |||
| e638b1e21a | |||
| 76de60dbdc | |||
| 217e1a248c | |||
| 5e0eb0d750 | |||
| 183c9dd3ce | |||
| 4f100318f4 | |||
| fa6f43033c | |||
| 820fc4ca7a | |||
| bd72a5f1a8 | |||
| 55088a2cf5 | |||
| c2d8e245e9 | |||
| d8e1285409 | |||
| 5b3f3b99d6 | |||
| 2935057606 | |||
| bb6759d634 | |||
| 55747318a0 | |||
| 217faafe08 | |||
| 5440387529 | |||
| e1fab05ce7 | |||
| c3ec7ff5a9 | |||
| d8535921ad | |||
| eb8c535c17 | |||
| b7686ccb44 | |||
| f3229872bc | |||
| 7843286f2e | |||
| 11e2e99cfc | |||
| 07e745f1c4 | |||
| c7c99a30ea | |||
| 8f45a2eae8 | |||
| 9fd64b7ea9 | |||
| 5be16ad90b | |||
| dab62832de | |||
| caa9f9bcbb | |||
| 943efedb88 | |||
| 50acb0c2ec | |||
| e6d96e5f70 | |||
| 1dfb6e9304 | |||
| 4bef6bc511 | |||
| 73640d0463 | |||
| 7a1159143e | |||
| cbb0b82fa2 | |||
| 5ae6111180 | |||
| 230a5f541b | |||
| 956114ac92 | |||
| 76ee7f211d | |||
| 420743af22 | |||
| 206ab491ed | |||
| 936d2f4f5c | |||
| da98d601b5 | |||
| 658492fb41 | |||
| 80da9cfb09 | |||
| 03deec2a01 | |||
| 629d02c844 | |||
| a87c95da9e | |||
| bbcdbbaffc | |||
| ce53708e0e | |||
| 53209ce6d8 | |||
| bd083ae1bf | |||
| e5452a618d | |||
| 40a73e0ae0 | |||
| 937e08ce75 | |||
| 5d558f21e2 | |||
| d9b5ce60b3 | |||
| 61a87ab946 | |||
| 5dec654aae | |||
| b2a950205e | |||
| ca7b853abc | |||
| 6832aa51a6 | |||
| 4a1d5b1fb6 | |||
| 82369c8314 | |||
| cdb001ca5f | |||
| c72e22419b | |||
| c872c3086f | |||
| cec5ae8e4d | |||
| cd570b2e2a | |||
| 727d624322 | |||
| afed2f75f8 | |||
| 739b135f83 | |||
| 4a9dd1cd82 | |||
| feab09908d | |||
| e0baaa8df0 | |||
| 1b998f1695 | |||
| 7befe580c2 | |||
| cd3d3a37f9 | |||
| 81fffe51fd | |||
| 0b5ac0253e | |||
| a16b843a1b | |||
| bc86a9379f | |||
| 87a096f95e | |||
| 44adf1e14f | |||
| ce870e1ce1 | |||
| 1ace672d3e | |||
| e2ae254008 | |||
| 0fa291e707 | |||
| ba6f11ec3e | |||
| 430ee9df6b | |||
| 409a9df0a4 | |||
| acad5bae5c | |||
| 81b19c4094 | |||
| 3e97a9172b | |||
| 812719644d | |||
| 16e5113f8a | |||
| 3122a6164d | |||
| c8682ae74c | |||
| 0768905f77 | |||
| d087be0156 | |||
| 41caaa56e1 | |||
| 21d127334e | |||
| 3cf7dee576 | |||
| 64c586f5eb | |||
| 0e714f5ba4 | |||
| 92f23e123d | |||
| f67e11afd7 | |||
| 6458058559 | |||
| 4d13e4e474 | |||
| 058a3546ea | |||
| 98ecab2083 | |||
| b30a349078 | |||
| 7cb19ae613 | |||
| 39897a0662 | |||
| aa71bb815a | |||
| f43a08a9c5 | |||
| b42c65b729 | |||
| 7bad726935 | |||
| 29ff7c3911 | |||
| 30eff605df | |||
| fc95663e03 | |||
| 49cb83a423 | |||
| d2b159ea1a | |||
| 40056c69d1 | |||
| 505b5be044 | |||
| a6333f2e7c | |||
| 0dec477985 | |||
| a24189db35 | |||
| a9aee447ee | |||
| d5894ab499 | |||
| 6f14928e28 | |||
| 777334a803 | |||
| c3d82d24e2 | |||
| 6e70e79e4e | |||
| b3fc3c9067 | |||
| a9d79163e5 | |||
| 0b36ca6e64 | |||
| f3b7f9cf25 | |||
| b909bfacb9 | |||
| a2d8f540c3 | |||
| e8ed10ae62 | |||
| a6291e43b0 | |||
| 2a289f6108 | |||
| cafc7f785f | |||
| 39889c7304 | |||
| 12d5a2d0da | |||
| 243288627d | |||
| efc1fa8376 | |||
| 18e3012489 | |||
| daa1952f47 | |||
| 653ba110d3 | |||
| f518b0ab03 | |||
| 3a05e0cf70 | |||
| 299f3ef8ab | |||
| 925a13eb04 | |||
| 4170f395d1 | |||
| bb47344c77 | |||
| 243cd82409 | |||
| 51f5e829a8 | |||
| 5b9c5881b6 | |||
| 0209606364 | |||
| 5909c1a514 | |||
| e7150b0b15 | |||
| e8c64f598b | |||
| a14081ccc5 | |||
| d895809613 | |||
| 02015eb25c | |||
| 19bcd43e14 | |||
| 59f2fff3cf | |||
| c33adecc9f | |||
| 518c206a2a | |||
| 65b5c2cfad | |||
| 7954a28a71 | |||
| 3bdb35abfa | |||
| d58aac2e1e | |||
| a4c2654f50 | |||
| 27d29087b2 | |||
| c7698834fc | |||
| 64d7b58c44 | |||
| e3aae2ac65 | |||
| d0a7991b65 | |||
| 180ef7c415 | |||
| 95bffdec43 | |||
| c74c28c6d1 | |||
| e0f5e03009 | |||
| dfbfbdfea8 | |||
| 24ae624d96 | |||
| 40f822a1e3 | |||
| a0bfe2140c | |||
| c6443f8bd4 | |||
| 3cd02e9340 | |||
| 17ec2ede11 | |||
| e30938700a | |||
| b864946606 | |||
| bc234c040c | |||
| 662a7dd905 | |||
| d3db2d4fe5 | |||
| 96f926a25e | |||
| a9d43cda80 | |||
| effccbdc84 | |||
| d141b4ce79 | |||
| bc49d0f9b3 | |||
| 5ea7c81277 | |||
| efe4481a28 | |||
| df215cc243 | |||
| 5791d949ff | |||
| b76409ba05 | |||
| a25c4eacae | |||
| d8437ae096 | |||
| 2fa22f3342 | |||
| a2ecb58132 | |||
| 73cc944067 | |||
| b16916f447 | |||
| 36f8e48747 | |||
| 790cb8b461 | |||
| 7b4d12623a | |||
| 956c6baf71 | |||
| 485e8c8cb4 | |||
| aaf38c2f35 | |||
| f433457244 | |||
| 535b52cef2 | |||
| e60a424398 | |||
| 32f85ce524 | |||
| 0983a9b9b4 | |||
| e5d0df44f0 | |||
| 50eabe5b1d | |||
| f2d1047059 | |||
| 3e68f1da63 | |||
| f8b0696076 | |||
| 51a2ca5d88 | |||
| 51de46e368 | |||
| e2b0224ec4 | |||
| db11bd5035 | |||
| 543c59af22 | |||
| 81765e6e00 | |||
| a4ebc14fab | |||
| 058f6f70f5 | |||
| 665d5180fc | |||
| d1ea9ab40c | |||
| 632dce67ab | |||
| e41864ce9d | |||
| 979991aa78 | |||
| 7fc1e438d1 | |||
| 040f178569 | |||
| 87c81315a1 | |||
| f1e84decc9 | |||
| eafddf02e3 | |||
| f0029d6f60 | |||
| 3147de9010 | |||
| d448ebaf90 | |||
| 65dd4f2039 | |||
| 7ee2c79da9 | |||
| bbe2e30901 | |||
| 0ab72613a7 | |||
| 6f14e619b2 | |||
| 90e9703d99 | |||
| 5f21cde3c7 | |||
| 76ccfae682 | |||
| 62357f218f | |||
| be1b76e97a | |||
| 3f2b5da094 | |||
| 3f1cb09e7b | |||
| 7a39d928f5 | |||
| 961fe728d9 | |||
| ef0c4bf277 | |||
| de855b3247 | |||
| b9628f13c2 | |||
| 16ca01feea | |||
| 4cbbde8945 | |||
| eba6eb79dc | |||
| 109f3272f5 | |||
| 85901cdcf9 | |||
| 5e74d932b9 | |||
| 090c65cd9d | |||
| b7d5d9072a | |||
| d4262021d5 | |||
| 8ae56dc51d | |||
| c9fbb71e37 | |||
| 4d583ad6a1 | |||
| 70d999ee4a | |||
| 3913fa4dd0 | |||
| f9b2e6769b | |||
| d3f8c52f4c | |||
| af12e7b023 | |||
| 68376babd8 | |||
| 7d24bdefb5 | |||
| bb296348e1 | |||
| 0226f75025 | |||
| 419c9ce22a | |||
| 2249fbde0d | |||
| e0ffea5bc3 | |||
| 9a86a49f72 | |||
| 70920895e8 | |||
| bf3cd30a66 | |||
| bfa74e51d2 | |||
| e6699e6aba | |||
| 0871e93a74 | |||
| 86720fdb11 | |||
| 1deab71e3c | |||
| 5d1cee3d81 | |||
| 5904f56c45 | |||
| 99d790dc34 | |||
| 1760d2dc8c | |||
| b93bfac16d | |||
| 981c6fb8d6 | |||
| 6413f25ba9 | |||
| 39e20d3e55 | |||
| 3a381bfa48 | |||
| bc82d18821 | |||
| 330d60b817 | |||
| 612ecef7b8 | |||
| 9493d7276b | |||
| 40c6e0ca41 | |||
| a28491bc24 | |||
| 435079aafb | |||
| dcde1e93d0 | |||
| ab379793d4 | |||
| b50e75f85d | |||
| f95067bfbf | |||
| d07fd959cc | |||
| 873b39b85b | |||
| da39665055 | |||
| d95d68ec46 | |||
| fafadc5323 | |||
| 145fca5a09 | |||
| 9fe690706d | |||
| 6e81938282 | |||
| e965d590cd | |||
| 6dfcf5b8ef | |||
| e4ea4ed4de | |||
| fa8e1cff91 | |||
| 60856787ac | |||
| 995563fec9 | |||
| 2d62bd1570 | |||
| f8169eaded | |||
| 75ab711993 | |||
| f489a86573 | |||
| 2708c1ae31 | |||
| e30034ed07 | |||
| 78bf8bcb21 | |||
| 57f2cf5fa7 | |||
| e06e7b35e7 | |||
| 5651521833 | |||
| ba0ee8a54d | |||
| c2a162932a | |||
| c29c3c5e70 | |||
| 945085edb3 | |||
| 70388fa44e | |||
| 2fee0c15fd | |||
| c05ed13fc9 | |||
| 5e6351502a | |||
| ee0c587182 | |||
| 43e7229a1a | |||
| 8b96515ed2 | |||
| 9d9ea62785 | |||
| 2106e87d58 | |||
| 40980e8fe8 | |||
| f2f810c536 | |||
| 0a9403f308 | |||
| 75a693c9b4 | |||
| 55691b14c2 | |||
| b757b62325 | |||
| 15dbf9722b | |||
| 419ecf38af | |||
| 3cb9d5fd9c | |||
| f1298b143e | |||
| 07ad358f2d | |||
| 211707857d | |||
| e57d5d0eae | |||
| 92d072043e | |||
| 3d1a0f7e98 | |||
| 8b3e30887a | |||
| 3e304c4a1a | |||
| 1c102f23cc | |||
| 4c0d5a46ba | |||
| d0c17d707f | |||
| b41d8d8228 | |||
| 3a6db664c7 | |||
| 166520feea | |||
| 663f5120c2 | |||
| 23ac55fcab | |||
| 93951ce516 | |||
| ae86a00be0 | |||
| 532da3e342 | |||
| a826e4441d | |||
| 1fe27e7c95 | |||
| c1a6c209df | |||
| 8ebd6ab2ee | |||
| ea9b85477d | |||
| 420ff21c3b | |||
| b1b3312749 | |||
| 6e4e870203 | |||
| a3065e1842 | |||
| 4eaf36e1c4 | |||
| e7bb060c0e | |||
| a15d307426 | |||
| 7e7f3445aa | |||
| 10c674633d | |||
| 82c2665cd6 | |||
| 2930cac698 | |||
| 901ab69a16 | |||
| 780e4aa32a | |||
| e4620984f8 | |||
| 017a98c0e9 | |||
| d1aa558119 | |||
| 41479fe483 | |||
| eac5d13c7b | |||
| b228136cae | |||
| 90deb748c6 | |||
| d942708745 | |||
| 3783180844 | |||
| ea836f3057 | |||
| a4c9476204 | |||
| 3ca8c9a997 | |||
| 2f83b1afef | |||
| b0591c665c | |||
| d9871c0f87 | |||
| abc2beb423 | |||
| 8749b4ece4 | |||
| 4a3eaee6be | |||
| 3533e2b0b1 | |||
| 3e0ceac79f | |||
| 03b617b674 | |||
| 840bb1aeda | |||
| 1bfde6b963 | |||
| 3482495bb5 | |||
| 947b2a88a9 | |||
| cac1ed41eb | |||
| 9dc5b349ea | |||
| 0aae1e93f4 | |||
| 78151f87a4 | |||
| 853823d0ae | |||
| 77ae51a050 | |||
| ad9cf788b1 | |||
| 5f9cea4ce9 | |||
| 96ffd349f3 | |||
| d88bbbd0e2 | |||
| 075b5d615d | |||
| 9b5877d1b6 | |||
| 586941d107 | |||
| e1b84bf503 | |||
| b2ea1c7b4f | |||
| bdd93cd933 | |||
| 639c1da8df | |||
| fdb1402c7d | |||
| 0b3f219881 | |||
| ade4f1db92 | |||
| 907a86d145 | |||
| f054799e7f | |||
| d4f5fd694e | |||
| 38fd30e764 | |||
| 03754c1e02 | |||
| ea36b7dceb | |||
| bc9153e465 | |||
| 89b7e36bf6 | |||
| b34db0b987 | |||
| 9875714610 | |||
| 4b47f190a9 | |||
| 17bc8a1103 | |||
| 279475307a | |||
| 9c2e704791 | |||
| 4e1816d7ec | |||
| 5a2cb3b5e3 | |||
| 04103090cc | |||
| ca615f879f | |||
| 2694a6c63a | |||
| b4388b45dc | |||
| 69e4c3c54d | |||
| 68d809256c | |||
| bd091a605b | |||
| cb993d7d8c | |||
| 028b5816c8 | |||
| 8951195a15 | |||
| 60460ae1af | |||
| 978dfc38ea | |||
| 5002e56704 | |||
| 71e81bab00 | |||
| 76c41f0df7 | |||
| 2b981c0942 | |||
| a60640d4fa | |||
| 4be70838e7 | |||
| e89131c92d | |||
| 4e5cc0c6b9 | |||
| 587eea9bb5 | |||
| 57cbcab45b | |||
| c0caa068ba | |||
| b51b78ffb7 | |||
| 67dbae52be | |||
| d0df263b09 | |||
| a5026706a7 | |||
| 20e4973903 | |||
| 1d9bcdd39d | |||
| ba856524f6 | |||
| 332326c833 | |||
| e6d5776ad8 | |||
| fe709a2490 | |||
| ac970148cd | |||
| f0f348921d | |||
| b37680bd66 | |||
| 5286d843c8 | |||
| 22bf677ceb | |||
| bd82bec78e | |||
| 3825e478b2 | |||
| 6c3f6792e9 | |||
| 5858ac62b4 | |||
| 5b0a03d1fb | |||
| c3ea690d48 | |||
| ae8c4875dc | |||
| 55a528487d | |||
| bd1d5fad2f | |||
| b22f088ff6 | |||
| f3f2f9e4b5 | |||
| 7e4136164e | |||
| 5dd631e2cd | |||
| 0a16f37ba1 | |||
| aaa2637a5e | |||
| 7573a8cd55 | |||
| 126550126d | |||
| 733755c94c | |||
| 741d23301f | |||
| 9b7ef9679f | |||
| 30a6a3435f | |||
| f7427c86ee | |||
| d0bf459c7f | |||
| bf8fe0347b | |||
| e60f3cab7a | |||
| 07e2e712ca | |||
| 63f09f63b8 | |||
| 50b8d8e8a8 | |||
| 0ec1f24c17 | |||
| 3c5c0f9c99 | |||
| 53b8ed1e8e | |||
| 49bbf2390d | |||
| aa533277f6 | |||
| ca6505a6a8 | |||
| bb6ee0b7bc | |||
| 7889ba6b6d | |||
| f002ce2ae9 | |||
| 7fd0635d46 | |||
| 235fdf1096 | |||
| 351f89758a | |||
| 7f5e94d33b | |||
| 74a8ed9e48 | |||
| 6bd28790c2 | |||
| 2359af1870 | |||
| e6b61da7ca | |||
| 344bfe2713 | |||
| e9d15e5973 |
@ -15,10 +15,14 @@
|
||||
"remoteEnv": {
|
||||
"PYTHONPATH": "${containerEnv:PATH}:${containerWorkspaceFolder}"
|
||||
},
|
||||
"extensions": [
|
||||
// Ensure we have IntelliSense in VSCode when running inside container
|
||||
"ms-python.python"
|
||||
],
|
||||
"customizations": {
|
||||
"vscode": {
|
||||
"extensions": [
|
||||
// Ensure we have IntelliSense in VSCode when running inside container
|
||||
"ms-python.python"
|
||||
]
|
||||
}
|
||||
},
|
||||
"workspaceFolder": "/workspaces/accelerate",
|
||||
// Need git for VSCode to color code modifications. Only runs when building environment.
|
||||
"onCreateCommand": "apt-get update && apt-get install -y git && pip install -e '.[dev]'"
|
||||
|
||||
7
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
7
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@ -1,6 +1,12 @@
|
||||
name: "\U0001F41B Bug Report"
|
||||
description: Submit a bug report to help us improve Accelerate
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for taking the time to submit a bug report! 🐛
|
||||
If this is not a bug related to the Accelerate library directly, but instead a general question about your code or the library specifically please use the [forums](https://discuss.huggingface.co/c/accelerate/18).
|
||||
|
||||
- type: textarea
|
||||
id: system-info
|
||||
attributes:
|
||||
@ -55,4 +61,3 @@ body:
|
||||
attributes:
|
||||
label: Expected behavior
|
||||
description: "A clear and concise description of what you would expect to happen."
|
||||
render: Shell
|
||||
|
||||
47
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
47
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
# What does this PR do?
|
||||
|
||||
<!--
|
||||
Congratulations! You've made it this far! You're not quite done yet though.
|
||||
|
||||
Once merged, your PR is going to appear in the release notes with the title you set, so make sure it's a great title that fully reflects the extent of your awesome contribution.
|
||||
|
||||
Then, please replace this with a description of the change and which issue is fixed (if applicable). Please also include relevant motivation and context. List any dependencies (if any) that are required for this change.
|
||||
|
||||
Once you're done, someone will review your PR shortly (see the section "Who can review?" below to tag some potential reviewers). They may suggest changes to make the code even better. If no one reviewed your PR after a week has passed, don't hesitate to post a new comment @-mentioning the same persons---sometimes notifications get lost.
|
||||
-->
|
||||
|
||||
<!-- Remove if not applicable -->
|
||||
|
||||
Fixes # (issue)
|
||||
|
||||
|
||||
## Before submitting
|
||||
- [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case).
|
||||
- [ ] Did you read the [contributor guideline](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md#submitting-a-pull-request-pr),
|
||||
Pull Request section?
|
||||
- [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link
|
||||
to it if that's the case.
|
||||
- [ ] Did you make sure to update the documentation with your changes? Here are the
|
||||
[documentation guidelines](https://github.com/huggingface/accelerate/tree/main/docs), and
|
||||
[here are tips on formatting docstrings](https://github.com/huggingface/accelerate/tree/main/docs#writing-documentation---specification).
|
||||
- [ ] Did you write any new necessary tests?
|
||||
|
||||
|
||||
## Who can review?
|
||||
|
||||
Anyone in the community is free to review the PR once the tests have passed. Feel free to tag
|
||||
members/contributors who may be interested in your PR.
|
||||
|
||||
<!-- Your PR will be replied to more quickly if you can figure out the right person to tag with @
|
||||
|
||||
If you know how to use git blame, that is the easiest way, otherwise, here is a rough guide of **who to tag**.
|
||||
|
||||
- Big modeling: @SunMarc
|
||||
- Fully-Sharded Data Parallism: @pacman100
|
||||
- DeepSpeed: @pacman100
|
||||
- Command Line Interface: @muellerzr
|
||||
- Documentation: @muellerzr
|
||||
- Core parts of the library: @muellerzr @BenjaminBossan
|
||||
- Maintained examples: @muellerzr or @pacman100
|
||||
|
||||
-->
|
||||
@ -15,50 +15,46 @@ jobs:
|
||||
outputs:
|
||||
version: ${{ steps.step1.outputs.version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v3.1.0
|
||||
- id: step1
|
||||
run: echo "version=$(python setup.py --version)" >> $GITHUB_OUTPUT
|
||||
|
||||
version-cpu:
|
||||
name: "Latest Accelerate CPU [version]"
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: [self-hosted, intel-cpu, 8-cpu, ci]
|
||||
needs: get-version
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Build and Push CPU
|
||||
uses: docker/build-push-action@v2
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: ./docker/accelerate-cpu
|
||||
file: docker/accelerate-cpu/Dockerfile
|
||||
push: true
|
||||
tags: huggingface/accelerate-cpu:${{needs.get-version.outputs.version}}
|
||||
|
||||
version-cuda:
|
||||
name: "Latest Accelerate GPU [version]"
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: [self-hosted, single-gpu, nvidia-gpu, t4, ci]
|
||||
needs: get-version
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Build and Push GPU
|
||||
uses: docker/build-push-action@v2
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: ./docker/accelerate-gpu
|
||||
file: docker/accelerate-gpu/Dockerfile
|
||||
push: true
|
||||
tags: huggingface/accelerate-gpu:${{needs.get-version.outputs.version}}
|
||||
tags: huggingface/accelerate-gpu:${{needs.get-version.outputs.version}}
|
||||
|
||||
9
.github/workflows/build_and_run_tests.yml
vendored
9
.github/workflows/build_and_run_tests.yml
vendored
@ -22,7 +22,7 @@ jobs:
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v22.2
|
||||
uses: tj-actions/changed-files@v41
|
||||
|
||||
- name: Was setup changed
|
||||
id: was_changed
|
||||
@ -42,4 +42,9 @@ jobs:
|
||||
run-merge-tests:
|
||||
needs: build-docker-containers
|
||||
if: always()
|
||||
uses: ./.github/workflows/run_merge_tests.yml
|
||||
uses: ./.github/workflows/run_merge_tests.yml
|
||||
|
||||
run-integration-tests:
|
||||
needs: build-docker-containers
|
||||
if: always()
|
||||
uses: ./.github/workflows/self_hosted_integration_tests.yml
|
||||
27
.github/workflows/build_docker_images.yml
vendored
27
.github/workflows/build_docker_images.yml
vendored
@ -13,42 +13,37 @@ concurrency:
|
||||
jobs:
|
||||
latest-cpu:
|
||||
name: "Latest Accelerate CPU [dev]"
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: [self-hosted, intel-cpu, 8-cpu, ci]
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Build and Push CPU
|
||||
uses: docker/build-push-action@v2
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: ./docker/accelerate-cpu
|
||||
file: docker/accelerate-cpu/Dockerfile
|
||||
push: true
|
||||
tags: huggingface/accelerate-cpu
|
||||
|
||||
latest-cuda:
|
||||
name: "Latest Accelerate GPU [dev]"
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: [self-hosted, nvidia-gpu, t4, ci]
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Build and Push GPU
|
||||
uses: docker/build-push-action@v2
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: ./docker/accelerate-gpu
|
||||
file: docker/accelerate-gpu/Dockerfile
|
||||
push: true
|
||||
tags: huggingface/accelerate-gpu
|
||||
tags: huggingface/accelerate-gpu
|
||||
|
||||
2
.github/workflows/build_documentation.yml
vendored
2
.github/workflows/build_documentation.yml
vendored
@ -14,4 +14,4 @@ jobs:
|
||||
commit_sha: ${{ github.sha }}
|
||||
package: accelerate
|
||||
secrets:
|
||||
token: ${{ secrets.HUGGINGFACE_PUSH }}
|
||||
hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}
|
||||
|
||||
13
.github/workflows/delete_doc_comment.yml
vendored
13
.github/workflows/delete_doc_comment.yml
vendored
@ -1,13 +0,0 @@
|
||||
name: Delete dev documentation
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [ closed ]
|
||||
|
||||
|
||||
jobs:
|
||||
delete:
|
||||
uses: huggingface/doc-builder/.github/workflows/delete_doc_comment.yml@main
|
||||
with:
|
||||
pr_number: ${{ github.event.number }}
|
||||
package: accelerate
|
||||
56
.github/workflows/integration_tests.yml
vendored
Normal file
56
.github/workflows/integration_tests.yml
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
# CI for specifically ensuring integrations work fine (`transformers` mainly)
|
||||
# Useful tips:
|
||||
# - New integrations to test should have its own job, and follow a strategy method where we check both
|
||||
# the pypi and github versions.
|
||||
# - When checking the latest release of the integration, use
|
||||
# git checkout $(git describe --tags `git rev-list --tags --max-count=1`) to get the latest release.
|
||||
|
||||
name: Integration Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "src/**"
|
||||
- "tests/**"
|
||||
- ".github/**"
|
||||
- "examples/**"
|
||||
- "setup.py"
|
||||
types: [opened, synchronize, reopened]
|
||||
|
||||
env:
|
||||
HF_HOME: ~/hf_cache
|
||||
|
||||
jobs:
|
||||
run-trainer-tests:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v3.1.0
|
||||
- name: Set up python 3.8
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: 3.8
|
||||
|
||||
- name: Install Accelerate from source
|
||||
run: |
|
||||
pip install --upgrade pip
|
||||
pip install -e .
|
||||
|
||||
- name: Clone and install transformers
|
||||
run: |
|
||||
cd ..
|
||||
git clone https://github.com/huggingface/transformers
|
||||
cd transformers
|
||||
pip install .[torch,testing]
|
||||
|
||||
- name: Show installed libraries
|
||||
run: |
|
||||
pip freeze
|
||||
|
||||
- name: Run Trainer tests
|
||||
env:
|
||||
WANDB_DISABLED: true
|
||||
run: |
|
||||
cd ../transformers
|
||||
pytest -sv tests/trainer
|
||||
47
.github/workflows/nightly.yml
vendored
47
.github/workflows/nightly.yml
vendored
@ -8,81 +8,104 @@ on:
|
||||
env:
|
||||
RUN_SLOW: "yes"
|
||||
IS_GITHUB_CI: "1"
|
||||
SLACK_API_TOKEN: ${{ secrets.SLACK_API_TOKEN }}
|
||||
|
||||
|
||||
jobs:
|
||||
run_all_tests_single_gpu:
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
runs-on: [self-hosted, single-gpu, nvidia-gpu, t4, ci]
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: "0"
|
||||
TEST_TYPE: "single_gpu"
|
||||
container:
|
||||
image: huggingface/accelerate-gpu:latest
|
||||
options: --gpus all --shm-size "16gb"
|
||||
defaults:
|
||||
run:
|
||||
working-directory: accelerate/
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Update clone & pip install
|
||||
run: |
|
||||
source activate accelerate
|
||||
git config --global --add safe.directory '*'
|
||||
git fetch && git checkout ${{ github.sha }}
|
||||
git clone https://github.com/huggingface/accelerate;
|
||||
cd accelerate;
|
||||
git checkout ${{ github.sha }};
|
||||
pip install -e . --no-deps
|
||||
pip install pytest-reportlog
|
||||
pip install pytest-reportlog tabulate
|
||||
|
||||
- name: Run test on GPUs
|
||||
working-directory: accelerate
|
||||
run: |
|
||||
source activate accelerate
|
||||
make test
|
||||
|
||||
- name: Run examples on GPUs
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate
|
||||
pip uninstall comet_ml -y
|
||||
make test_examples
|
||||
|
||||
- name: Generate Report
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
pip install slack_sdk tabulate
|
||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
run_all_tests_multi_gpu:
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
runs-on: [self-hosted, multi-gpu, nvidia-gpu, t4, ci]
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: "0,1"
|
||||
TEST_TYPE: "multi_gpu"
|
||||
container:
|
||||
image: huggingface/accelerate-gpu:latest
|
||||
options: --gpus all --shm-size "16gb"
|
||||
defaults:
|
||||
run:
|
||||
working-directory: accelerate/
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Update clone
|
||||
run: |
|
||||
source activate accelerate
|
||||
git config --global --add safe.directory '*'
|
||||
git fetch && git checkout ${{ github.sha }}
|
||||
git clone https://github.com/huggingface/accelerate;
|
||||
cd accelerate;
|
||||
git checkout ${{ github.sha }};
|
||||
pip install -e . --no-deps
|
||||
pip install pytest-reportlog
|
||||
pip install pytest-reportlog tabulate
|
||||
|
||||
- name: Run core and big modeling tests on GPUs
|
||||
working-directory: accelerate
|
||||
run: |
|
||||
source activate accelerate
|
||||
make test_big_modeling
|
||||
make test_core
|
||||
make test_big_modeling
|
||||
make test_cli
|
||||
|
||||
- name: Run Integration tests on GPUs
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate
|
||||
make test_integrations
|
||||
|
||||
- name: Run examples on GPUs
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate
|
||||
pip uninstall comet_ml -y
|
||||
make test_examples
|
||||
|
||||
- name: Generate Report
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
pip install slack_sdk tabulate
|
||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
|
||||
run-integration-tests:
|
||||
if: always()
|
||||
uses: ./.github/workflows/self_hosted_integration_tests.yml
|
||||
13
.github/workflows/quality.yml
vendored
13
.github/workflows/quality.yml
vendored
@ -6,12 +6,17 @@ jobs:
|
||||
quality:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python 3.7
|
||||
- uses: actions/checkout@v3.1.0
|
||||
- name: Set up Python 3.8
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: 3.7
|
||||
python-version: 3.8
|
||||
- name: Install Python dependencies
|
||||
run: pip install -e .[quality]
|
||||
- name: Run Quality check
|
||||
run: make quality
|
||||
run: make quality
|
||||
- name: Check if failure
|
||||
if: ${{ failure() }}
|
||||
run: |
|
||||
echo "Quality check failed. Please ensure the right dependency versions are installed with 'pip install -e .[quality]' and rerun 'make style; make quality;'" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
|
||||
67
.github/workflows/run_merge_tests.yml
vendored
67
.github/workflows/run_merge_tests.yml
vendored
@ -10,7 +10,7 @@ env:
|
||||
|
||||
jobs:
|
||||
run_all_tests_single_gpu:
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
runs-on: [self-hosted, single-gpu, nvidia-gpu, t4, ci]
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: "0"
|
||||
container:
|
||||
@ -18,72 +18,81 @@ jobs:
|
||||
options: --gpus all --shm-size "16gb"
|
||||
defaults:
|
||||
run:
|
||||
working-directory: accelerate/
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Update clone & pip install
|
||||
- name: Install accelerate
|
||||
run: |
|
||||
source activate accelerate
|
||||
git config --global --add safe.directory '*'
|
||||
git fetch && git checkout ${{ github.sha }}
|
||||
pip install -e .[testing,test_trackers]
|
||||
pip install pytest-reportlog
|
||||
source activate accelerate;
|
||||
git clone https://github.com/huggingface/accelerate;
|
||||
cd accelerate;
|
||||
git checkout ${{ github.sha }};
|
||||
pip install -e .[testing,test_trackers] -U;
|
||||
pip install pytest-reportlog tabulate ;
|
||||
|
||||
- name: Run CLI tests
|
||||
- name: Run CLI tests (use make cli)
|
||||
working-directory: accelerate
|
||||
run: |
|
||||
source activate accelerate
|
||||
source activate accelerate;
|
||||
make test_cli
|
||||
|
||||
- name: Run test on GPUs
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate
|
||||
source activate accelerate;
|
||||
make test
|
||||
- name: Run examples on GPUs
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate
|
||||
pip uninstall comet_ml -y
|
||||
source activate accelerate;
|
||||
pip uninstall comet_ml -y;
|
||||
make test_examples
|
||||
|
||||
- name: Generate Report
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
pip install tabulate;
|
||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
run_all_tests_multi_gpu:
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
runs-on: [self-hosted, multi-gpu, nvidia-gpu, t4, ci]
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: 0,1
|
||||
container:
|
||||
image: huggingface/accelerate-gpu:latest
|
||||
options: --gpus all --shm-size "16gb"
|
||||
defaults:
|
||||
run:
|
||||
working-directory: accelerate/
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Update clone
|
||||
run: |
|
||||
source activate accelerate
|
||||
git config --global --add safe.directory '*'
|
||||
git fetch && git checkout ${{ github.sha }}
|
||||
pip install -e .[testing,test_trackers]
|
||||
pip install pytest-reportlog
|
||||
|
||||
- name: Run CLI tests
|
||||
run: |
|
||||
source activate accelerate
|
||||
make test_cli
|
||||
source activate accelerate;
|
||||
git clone https://github.com/huggingface/accelerate;
|
||||
cd accelerate;
|
||||
git checkout ${{ github.sha }};
|
||||
pip install -e .[testing,test_trackers] -U;
|
||||
pip install pytest-reportlog tabulate
|
||||
|
||||
- name: Run test on GPUs
|
||||
working-directory: accelerate
|
||||
run: |
|
||||
source activate accelerate
|
||||
source activate accelerate;
|
||||
make test
|
||||
|
||||
- name: Run examples on GPUs
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate
|
||||
pip uninstall comet_ml -y
|
||||
source activate accelerate;
|
||||
pip uninstall comet_ml -y;
|
||||
make test_examples
|
||||
|
||||
- name: Generate Report
|
||||
working-directory: accelerate
|
||||
if: always()
|
||||
run: |
|
||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
source activate accelerate;
|
||||
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
125
.github/workflows/self_hosted_integration_tests.yml
vendored
Normal file
125
.github/workflows/self_hosted_integration_tests.yml
vendored
Normal file
@ -0,0 +1,125 @@
|
||||
# CI for specifically ensuring integrations work fine (`transformers` mainly) on GPUs
|
||||
# Useful tips:
|
||||
# - `working-directory` should be set to the root of the repo, which is cloned on the actual CI runner.
|
||||
# It follows the directory structure of `actions-runner/_work/{repo_name}/{repo_name}/{cloned_repo} on
|
||||
# prem, but in Actions setting `working-directory` looks just in the `{repo_name}` level.
|
||||
# - New integrations to test should have its own job, and follow a strategy method where we check both
|
||||
# the pypi and github versions.
|
||||
# - Workflow call lets this be called from `build_and_run_tests.yml`
|
||||
# - When using a docker container, it's recommended to set `--shm-size`, we use 16gb.
|
||||
name: Integration Tests (push to "main")
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
HF_HOME: ~/hf_cache
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
run-trainer-tests:
|
||||
container:
|
||||
image: huggingface/accelerate-gpu:latest
|
||||
options: --gpus all --shm-size "16gb"
|
||||
runs-on: [self-hosted, multi-gpu, nvidia-gpu, t4, ci]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
cuda_visible_devices: [
|
||||
"0",
|
||||
"0,1"
|
||||
]
|
||||
steps:
|
||||
- name: Install transformers
|
||||
run: |
|
||||
source activate accelerate;
|
||||
git clone https://github.com/huggingface/transformers --depth 1;
|
||||
cd transformers;
|
||||
pip install .[torch,deepspeed-testing];
|
||||
cd ..;
|
||||
|
||||
- name: Install accelerate
|
||||
run: |
|
||||
source activate accelerate;
|
||||
git clone https://github.com/huggingface/accelerate;
|
||||
cd accelerate;
|
||||
git checkout ${{ github.sha }} ;
|
||||
pip install -e .[testing];
|
||||
pip uninstall comet_ml wandb dvclive -y
|
||||
cd ..;
|
||||
|
||||
- name: Show installed libraries
|
||||
run: |
|
||||
source activate accelerate;
|
||||
pip freeze
|
||||
|
||||
- name: Run trainer tests
|
||||
working-directory: transformers/
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: ${{ matrix.cuda_visible_devices }}
|
||||
WANDB_DISABLED: true
|
||||
run: |
|
||||
source activate accelerate;
|
||||
pytest -sv tests/trainer
|
||||
|
||||
- name: Run deepspeed tests
|
||||
working-directory: transformers/
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: ${{ matrix.cuda_visible_devices }}
|
||||
WANDB_DISABLED: true
|
||||
if: always()
|
||||
run: |
|
||||
source activate accelerate;
|
||||
pytest -sv tests/deepspeed
|
||||
|
||||
- name: Run transformers examples tests
|
||||
working-directory: transformers/
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: ${{ matrix.cuda_visible_devices }}
|
||||
WANDB_DISABLED: true
|
||||
run: |
|
||||
source activate accelerate
|
||||
pip install -r examples/pytorch/_tests_requirements.txt
|
||||
pytest -sv examples/pytorch/test_accelerate_examples.py examples/pytorch/test_pytorch_examples.py
|
||||
|
||||
run-skorch-tests:
|
||||
container:
|
||||
image: huggingface/accelerate-gpu:latest
|
||||
options: --gpus all --shm-size "16gb"
|
||||
runs-on: [self-hosted, multi-gpu, nvidia-gpu, t4, ci]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- name: Install accelerate
|
||||
run:
|
||||
source activate accelerate;
|
||||
git clone https://github.com/huggingface/accelerate;
|
||||
cd accelerate;
|
||||
git checkout ${{ github.sha }};
|
||||
pip install -e .[testing];
|
||||
cd ..
|
||||
|
||||
- name: Install skorch
|
||||
run: |
|
||||
source activate accelerate
|
||||
git clone https://github.com/skorch-dev/skorch;
|
||||
cd skorch;
|
||||
git config --global --add safe.directory '*'
|
||||
git checkout master && git pull
|
||||
pip install .[testing]
|
||||
pip install flaky
|
||||
|
||||
- name: Show installed libraries
|
||||
run: |
|
||||
source activate accelerate;
|
||||
pip freeze
|
||||
|
||||
- name: Run skorch tests
|
||||
working-directory: skorch/
|
||||
run: |
|
||||
source activate accelerate;
|
||||
pytest -sv -k TestAccelerate
|
||||
6
.github/workflows/stale.yml
vendored
6
.github/workflows/stale.yml
vendored
@ -13,12 +13,12 @@ jobs:
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3.1.0
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v1
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: 3.7
|
||||
python-version: 3.8
|
||||
|
||||
- name: Install requirements
|
||||
run: |
|
||||
|
||||
10
.github/workflows/test.yml
vendored
10
.github/workflows/test.yml
vendored
@ -23,7 +23,7 @@ jobs:
|
||||
matrix:
|
||||
pytorch-version: [
|
||||
latest,
|
||||
minimum
|
||||
minimum,
|
||||
]
|
||||
test-kind: [
|
||||
test_prod,
|
||||
@ -39,10 +39,10 @@ jobs:
|
||||
]
|
||||
steps:
|
||||
- uses: actions/checkout@v3.1.0
|
||||
- name: Set up python 3.7
|
||||
- name: Set up python 3.8
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: 3.7
|
||||
python-version: 3.8
|
||||
|
||||
- name: Activate python cache
|
||||
uses: actions/cache@v3
|
||||
@ -58,8 +58,8 @@ jobs:
|
||||
if [[ ${{ matrix.test-kind }} = test_prod ]]; then pip install -e .[test_prod]; fi
|
||||
if [[ ${{ matrix.test-kind }} != test_prod ]]; then pip install -e .[testing,test_trackers]; fi
|
||||
if [[ ${{ matrix.test-kind }} = test_rest ]]; then pip uninstall comet_ml -y; fi
|
||||
if [[ ${{ matrix.pytorch-version }} = minimum ]]; then pip install torch==1.6.0; fi
|
||||
pip install pytest-reportlog
|
||||
if [[ ${{ matrix.test-kind }} = minimum ]]; then pip install torch==1.10.0; fi
|
||||
pip install pytest-reportlog tabulate
|
||||
|
||||
- name: Run Tests
|
||||
env:
|
||||
|
||||
16
.github/workflows/upload_pr_documentation.yml
vendored
Normal file
16
.github/workflows/upload_pr_documentation.yml
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
name: Upload PR Documentation
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: ["Build PR Documentation"]
|
||||
types:
|
||||
- completed
|
||||
|
||||
jobs:
|
||||
build:
|
||||
uses: huggingface/doc-builder/.github/workflows/upload_pr_documentation.yml@main
|
||||
with:
|
||||
package_name: accelerate
|
||||
secrets:
|
||||
hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}
|
||||
comment_bot_token: ${{ secrets.COMMENT_BOT_TOKEN }}
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@ -138,4 +138,7 @@ dmypy.json
|
||||
.DS_Store
|
||||
|
||||
# More test things
|
||||
wandb
|
||||
wandb
|
||||
|
||||
# ruff
|
||||
.ruff_cache
|
||||
|
||||
@ -152,7 +152,7 @@ Follow these steps to start contributing:
|
||||
$ make test
|
||||
```
|
||||
|
||||
`accelerate` relies on `black` and `isort` to format its source code
|
||||
`accelerate` relies on `black` and `ruff` to format its source code
|
||||
consistently. After you make changes, apply automatic style corrections and code verifications
|
||||
that can't be automated in one go with:
|
||||
|
||||
@ -165,7 +165,7 @@ Follow these steps to start contributing:
|
||||
$ make style
|
||||
```
|
||||
|
||||
`accelerate` also uses `flake8` and a few custom scripts to check for coding mistakes. Quality
|
||||
`accelerate` also uses a few custom scripts to check for coding mistakes. Quality
|
||||
control runs in CI, however you can also run the same checks with:
|
||||
|
||||
```bash
|
||||
|
||||
21
Makefile
21
Makefile
@ -1,6 +1,6 @@
|
||||
.PHONY: quality style test docs
|
||||
.PHONY: quality style test docs utils
|
||||
|
||||
check_dirs := tests src examples benchmarks
|
||||
check_dirs := tests src examples benchmarks utils
|
||||
|
||||
# Check that source code meets quality standards
|
||||
|
||||
@ -8,27 +8,26 @@ extra_quality_checks:
|
||||
python utils/check_copies.py
|
||||
python utils/check_dummies.py
|
||||
python utils/check_repo.py
|
||||
python utils/style_doc.py src/accelerate docs/source --max_len 119
|
||||
doc-builder style src/accelerate docs/source --max_len 119
|
||||
|
||||
# this target runs checks on all files
|
||||
quality:
|
||||
black --check $(check_dirs)
|
||||
isort --check-only $(check_dirs)
|
||||
flake8 $(check_dirs)
|
||||
python utils/style_doc.py src/accelerate docs/source --max_len 119 --check_only
|
||||
black --required-version 23 --check $(check_dirs)
|
||||
ruff $(check_dirs)
|
||||
doc-builder style src/accelerate docs/source --max_len 119 --check_only
|
||||
|
||||
# Format source code automatically and check is there are any problems left that need manual fixing
|
||||
style:
|
||||
black $(check_dirs)
|
||||
isort $(check_dirs)
|
||||
python utils/style_doc.py src/accelerate docs/source --max_len 119
|
||||
black --required-version 23 $(check_dirs)
|
||||
ruff $(check_dirs) --fix
|
||||
doc-builder style src/accelerate docs/source --max_len 119
|
||||
|
||||
# Run tests for the library
|
||||
test:
|
||||
python -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_all.log",)
|
||||
|
||||
test_big_modeling:
|
||||
python -m pytest -s -v ./tests/test_big_modeling.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_big_modeling.log",)
|
||||
python -m pytest -s -v ./tests/test_big_modeling.py ./tests/test_modeling_utils.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_big_modeling.log",)
|
||||
|
||||
test_core:
|
||||
python -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py --ignore=./tests/deepspeed --ignore=./tests/test_big_modeling.py \
|
||||
|
||||
52
README.md
52
README.md
@ -16,12 +16,12 @@ limitations under the License.
|
||||
|
||||
<p align="center">
|
||||
<br>
|
||||
<img src="docs/source/imgs/accelerate_logo.png" width="400"/>
|
||||
<img src="https://raw.githubusercontent.com/huggingface/accelerate/main/docs/source/imgs/accelerate_logo.png" width="400"/>
|
||||
<br>
|
||||
<p>
|
||||
|
||||
<p align="center">
|
||||
<!-- Uncomment when CircleCI is setup
|
||||
<!-- Uncomment when CircleCI is set up
|
||||
<a href="https://circleci.com/gh/huggingface/accelerate">
|
||||
<img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/master">
|
||||
</a>
|
||||
@ -91,7 +91,7 @@ Here is an example:
|
||||
optimizer.step()
|
||||
```
|
||||
|
||||
As you can see in this example, by adding 5-lines to any standard PyTorch training script you can now run on any kind of single or distributed node setting (single CPU, single GPU, multi-GPUs and TPUs) as well as with or without mixed precision (fp16).
|
||||
As you can see in this example, by adding 5-lines to any standard PyTorch training script you can now run on any kind of single or distributed node setting (single CPU, single GPU, multi-GPUs and TPUs) as well as with or without mixed precision (fp8, fp16, bf16).
|
||||
|
||||
In particular, the same code can then be run without modification on your local machine for debugging or your training environment.
|
||||
|
||||
@ -132,11 +132,11 @@ In particular, the same code can then be run without modification on your local
|
||||
optimizer.step()
|
||||
```
|
||||
|
||||
Want to learn more? Check out the [documentation](https://huggingface.co/docs/accelerate) or have look at our [examples](https://github.com/huggingface/accelerate/tree/main/examples).
|
||||
Want to learn more? Check out the [documentation](https://huggingface.co/docs/accelerate) or have a look at our [examples](https://github.com/huggingface/accelerate/tree/main/examples).
|
||||
|
||||
## Launching script
|
||||
|
||||
🤗 Accelerate also provides an optional CLI tool that allows you to quickly configure and test your training environment before launching the scripts. No need to remember how to use `torch.distributed.launch` or to write a specific launcher for TPU training!
|
||||
🤗 Accelerate also provides an optional CLI tool that allows you to quickly configure and test your training environment before launching the scripts. No need to remember how to use `torch.distributed.run` or to write a specific launcher for TPU training!
|
||||
On your machine(s) just run:
|
||||
|
||||
```bash
|
||||
@ -155,7 +155,17 @@ For instance, here is how you would run the GLUE example on the MRPC task (from
|
||||
accelerate launch examples/nlp_example.py
|
||||
```
|
||||
|
||||
This CLI tool is **optional**, and you can still use `python my_script.py` or `python -m torch.distributed.launch my_script.py` at your convenance.
|
||||
This CLI tool is **optional**, and you can still use `python my_script.py` or `python -m torchrun my_script.py` at your convenience.
|
||||
|
||||
You can also directly pass in the arguments you would to `torchrun` as arguments to `accelerate launch` if you wish to not run` accelerate config`.
|
||||
|
||||
For example, here is how to launch on two GPUs:
|
||||
|
||||
```bash
|
||||
accelerate launch --multi_gpu --num_processes 2 examples/nlp_example.py
|
||||
```
|
||||
|
||||
To learn more, check the CLI documentation available [here](https://huggingface.co/docs/accelerate/package_reference/cli).
|
||||
|
||||
## Launching multi-CPU run using MPI
|
||||
|
||||
@ -168,15 +178,15 @@ mpirun -np 2 python examples/nlp_example.py
|
||||
|
||||
## Launching training using DeepSpeed
|
||||
|
||||
🤗 Accelerate supports training on single/multiple GPUs using DeepSpeed. To use it, you don't need to change anything in your training code; you can set everything using just `accelerate config`. However, if you desire to tweak your DeepSpeed related args from your python script, we provide you the `DeepSpeedPlugin`.
|
||||
🤗 Accelerate supports training on single/multiple GPUs using DeepSpeed. To use it, you don't need to change anything in your training code; you can set everything using just `accelerate config`. However, if you desire to tweak your DeepSpeed related args from your Python script, we provide you the `DeepSpeedPlugin`.
|
||||
|
||||
```python
|
||||
from accelerator import Accelerator, DeepSpeedPlugin
|
||||
from accelerate import Accelerator, DeepSpeedPlugin
|
||||
|
||||
# deepspeed needs to know your gradient accumulation steps before hand, so don't forget to pass it
|
||||
# deepspeed needs to know your gradient accumulation steps beforehand, so don't forget to pass it
|
||||
# Remember you still need to do gradient accumulation by yourself, just like you would have done without deepspeed
|
||||
deepspeed_plugin = DeepSpeedPlugin(zero_stage=2, gradient_accumulation_steps=2)
|
||||
accelerator = Accelerator(fp16=True, deepspeed_plugin=deepspeed_plugin)
|
||||
accelerator = Accelerator(mixed_precision='fp16', deepspeed_plugin=deepspeed_plugin)
|
||||
|
||||
# How to save your 🤗 Transformer?
|
||||
accelerator.wait_for_everyone()
|
||||
@ -200,7 +210,7 @@ An example can be found in [this notebook](https://github.com/huggingface/notebo
|
||||
|
||||
## Why should I use 🤗 Accelerate?
|
||||
|
||||
You should use 🤗 Accelerate when you want to easily run your training scripts in a distributed environment without having to renounce full control over your training loop. This is not a high-level framework above PyTorch, just a thin wrapper so you don't have to learn a new library, In fact the whole API of 🤗 Accelerate is in one class, the `Accelerator` object.
|
||||
You should use 🤗 Accelerate when you want to easily run your training scripts in a distributed environment without having to renounce full control over your training loop. This is not a high-level framework above PyTorch, just a thin wrapper so you don't have to learn a new library. In fact, the whole API of 🤗 Accelerate is in one class, the `Accelerator` object.
|
||||
|
||||
## Why shouldn't I use 🤗 Accelerate?
|
||||
|
||||
@ -208,18 +218,25 @@ You shouldn't use 🤗 Accelerate if you don't want to write a training loop you
|
||||
|
||||
## Frameworks using 🤗 Accelerate
|
||||
|
||||
If you like the simplicity of 🤗 Accelerate but would prefer a higher-level abstraction around your training loop, some frameworks that are built on top of 🤗 Accelerate are listed below:
|
||||
If you like the simplicity of 🤗 Accelerate but would prefer a higher-level abstraction around its capabilities, some frameworks and libraries that are built on top of 🤗 Accelerate are listed below:
|
||||
|
||||
* [Amphion](https://github.com/open-mmlab/Amphion) is a toolkit for Audio, Music, and Speech Generation. Its purpose is to support reproducible research and help junior researchers and engineers get started in the field of audio, music, and speech generation research and development.
|
||||
* [Animus](https://github.com/Scitator/animus) is a minimalistic framework to run machine learning experiments. Animus highlights common "breakpoints" in ML experiments and provides a unified interface for them within [IExperiment](https://github.com/Scitator/animus/blob/main/animus/core.py#L76).
|
||||
* [Catalyst](https://github.com/catalyst-team/catalyst#getting-started) is a PyTorch framework for Deep Learning Research and Development. It focuses on reproducibility, rapid experimentation, and codebase reuse so you can create something new rather than write yet another train loop. Catalyst provides a [Runner](https://catalyst-team.github.io/catalyst/api/core.html#runner) to connect all parts of the experiment: hardware backend, data transformations, model train, and inference logic.
|
||||
* [Catalyst](https://github.com/catalyst-team/catalyst#getting-started) is a PyTorch framework for Deep Learning Research and Development. It focuses on reproducibility, rapid experimentation, and codebase reuse so you can create something new rather than write yet another train loop. Catalyst provides a [Runner](https://catalyst-team.github.io/catalyst/api/core.html#runner) to connect all parts of the experiment: hardware backend, data transformations, model training, and inference logic.
|
||||
* [fastai](https://github.com/fastai/fastai#installing) is a PyTorch framework for Deep Learning that simplifies training fast and accurate neural nets using modern best practices. fastai provides a [Learner](https://docs.fast.ai/learner.html#Learner) to handle the training, fine-tuning, and inference of deep learning algorithms.
|
||||
* [Finetuner](https://github.com/jina-ai/finetuner) is a service that enables models to create higher-quality embeddings for semantic search, visual similarity search, cross-modal text<->image search, recommendation systems, clustering, duplication detection, anomaly detection, or other uses.
|
||||
* [InvokeAI](https://github.com/invoke-ai/InvokeAI) is a creative engine for Stable Diffusion models, offering industry-leading WebUI, terminal usage support, and serves as the foundation for many commercial products.
|
||||
* [Kornia](https://kornia.readthedocs.io/en/latest/get-started/introduction.html) is a differentiable library that allows classical computer vision to be integrated into deep learning models. Kornia provides a [Trainer](https://kornia.readthedocs.io/en/latest/x.html#kornia.x.Trainer) with the specific purpose to train and fine-tune the supported deep learning algorithms within the library.
|
||||
* [pytorch-accelerated](https://github.com/Chris-hughes10/pytorch-accelerated) is a lightweight training library, with a streamlined feature set centred around a general-purpose [Trainer](https://pytorch-accelerated.readthedocs.io/en/latest/trainer.html), that places a huge emphasis on simplicity and transparency; enabling users to understand exactly what is going on under the hood, but without having to write and maintain the boilerplate themselves!
|
||||
* [Open Assistant](https://projects.laion.ai/Open-Assistant/) is a chat-based assistant that understands tasks, can interact with their party systems, and retrieve information dynamically to do so.
|
||||
* [pytorch-accelerated](https://github.com/Chris-hughes10/pytorch-accelerated) is a lightweight training library, with a streamlined feature set centered around a general-purpose [Trainer](https://pytorch-accelerated.readthedocs.io/en/latest/trainer.html), that places a huge emphasis on simplicity and transparency; enabling users to understand exactly what is going on under the hood, but without having to write and maintain the boilerplate themselves!
|
||||
* [Stable Diffusion web UI](https://github.com/AUTOMATIC1111/stable-diffusion-webui) is an open-source browser-based easy-to-use interface based on the Gradio library for Stable Diffusion.
|
||||
* [torchkeras](https://github.com/lyhue1991/torchkeras) is a simple tool for training pytorch model just in a keras style, a dynamic and beautiful plot is provided in notebook to monitor your loss or metric.
|
||||
* [transformers](https://github.com/huggingface/transformers) as a tool for helping train state-of-the-art machine learning models in PyTorch, Tensorflow, and JAX. (Accelerate is the backend for the PyTorch side).
|
||||
|
||||
|
||||
## Installation
|
||||
|
||||
This repository is tested on Python 3.6+ and PyTorch 1.4.0+
|
||||
This repository is tested on Python 3.8+ and PyTorch 1.10.0+
|
||||
|
||||
You should install 🤗 Accelerate in a [virtual environment](https://docs.python.org/3/library/venv.html). If you're unfamiliar with Python virtual environments, check out the [user guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/).
|
||||
|
||||
@ -240,7 +257,8 @@ pip install accelerate
|
||||
- multi-GPU on one node (machine)
|
||||
- multi-GPU on several nodes (machines)
|
||||
- TPU
|
||||
- FP16 with native AMP (apex on the roadmap)
|
||||
- FP16/BFloat16 mixed precision
|
||||
- FP8 mixed precision with [Transformer Engine](https://github.com/NVIDIA/TransformerEngine)
|
||||
- DeepSpeed support (Experimental)
|
||||
- PyTorch Fully Sharded Data Parallel (FSDP) support (Experimental)
|
||||
- Megatron-LM support (Experimental)
|
||||
@ -252,7 +270,7 @@ If you use 🤗 Accelerate in your publication, please cite it by using the foll
|
||||
```bibtex
|
||||
@Misc{accelerate,
|
||||
title = {Accelerate: Training and inference at scale made simple, efficient and adaptable.},
|
||||
author = {Sylvain Gugger, Lysandre Debut, Thomas Wolf, Philipp Schmid, Zachary Mueller, Sourab Mangrulkar},
|
||||
author = {Sylvain Gugger and Lysandre Debut and Thomas Wolf and Philipp Schmid and Zachary Mueller and Sourab Mangrulkar and Marc Sun and Benjamin Bossan},
|
||||
howpublished = {\url{https://github.com/huggingface/accelerate}},
|
||||
year = {2022}
|
||||
}
|
||||
|
||||
@ -16,12 +16,12 @@ import argparse
|
||||
import time
|
||||
|
||||
import torch
|
||||
|
||||
import transformers
|
||||
from accelerate.utils import compute_module_sizes
|
||||
from measures_util import end_measure, log_measures, start_measure
|
||||
from transformers import AutoConfig, AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer
|
||||
|
||||
from accelerate.utils import compute_module_sizes
|
||||
|
||||
|
||||
DEFAULT_MODELS = {
|
||||
"gpt-j-6b": {"is_causal": True, "model": "sgugger/sharded-gpt-j-6B", "tokenizer": "EleutherAI/gpt-j-6B"},
|
||||
|
||||
@ -2,9 +2,8 @@ import gc
|
||||
import threading
|
||||
import time
|
||||
|
||||
import torch
|
||||
|
||||
import psutil
|
||||
import torch
|
||||
|
||||
|
||||
class PeakCPUMemory:
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
# Builds CPU-only Docker image of PyTorch
|
||||
# Uses multi-staged approach to reduce size
|
||||
# Stage 1
|
||||
FROM python:3.7-slim as compile-image
|
||||
FROM python:3.8-slim as compile-image
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
@ -25,7 +25,7 @@ RUN python3 -m pip install --no-cache-dir \
|
||||
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||
|
||||
# Stage 2
|
||||
FROM python:3.7-slim AS build-image
|
||||
FROM python:3.8-slim AS build-image
|
||||
COPY --from=compile-image /opt/venv /opt/venv
|
||||
RUN useradd -ms /bin/bash user
|
||||
USER user
|
||||
|
||||
@ -4,7 +4,7 @@
|
||||
# Use base conda image to reduce time
|
||||
FROM continuumio/miniconda3:latest AS compile-image
|
||||
# Specify py version
|
||||
ENV PYTHON_VERSION=3.7.3
|
||||
ENV PYTHON_VERSION=3.8
|
||||
# Install apt libs
|
||||
RUN apt-get update && \
|
||||
apt-get install -y curl git wget && \
|
||||
@ -23,10 +23,12 @@ SHELL ["/bin/bash", "-c"]
|
||||
RUN source activate accelerate && \
|
||||
python3 -m pip install --no-cache-dir \
|
||||
git+https://github.com/huggingface/accelerate#egg=accelerate[testing,test_trackers] \
|
||||
--extra-index-url https://download.pytorch.org/whl/cu113
|
||||
--extra-index-url https://download.pytorch.org/whl/cu117
|
||||
|
||||
RUN python3 -m pip install --no-cache-dir bitsandbytes
|
||||
|
||||
# Stage 2
|
||||
FROM nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04 AS build-image
|
||||
FROM nvidia/cuda:12.1.0-cudnn8-devel-ubuntu20.04 AS build-image
|
||||
COPY --from=compile-image /opt/conda /opt/conda
|
||||
ENV PATH /opt/conda/bin:$PATH
|
||||
|
||||
|
||||
267
docs/README.md
Normal file
267
docs/README.md
Normal file
@ -0,0 +1,267 @@
|
||||
<!---
|
||||
Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
|
||||
# Generating the documentation
|
||||
|
||||
To generate the documentation, you first have to build it. Several packages are necessary to build the doc,
|
||||
you can install them with the following command, at the root of the code repository:
|
||||
|
||||
```bash
|
||||
pip install -e ".[docs]"
|
||||
```
|
||||
|
||||
Then you need to install our special tool that builds the documentation:
|
||||
|
||||
```bash
|
||||
pip install git+https://github.com/huggingface/doc-builder
|
||||
```
|
||||
|
||||
---
|
||||
**NOTE**
|
||||
|
||||
You only need to generate the documentation to inspect it locally (if you're planning changes and want to
|
||||
check how they look before committing for instance). You don't have to commit the built documentation.
|
||||
|
||||
---
|
||||
|
||||
## Building the documentation
|
||||
|
||||
Once you have setup the `doc-builder` and additional packages, you can generate the documentation by
|
||||
typing the following command:
|
||||
|
||||
```bash
|
||||
doc-builder build accelerate docs/source/ --build_dir ~/tmp/test-build
|
||||
```
|
||||
|
||||
You can adapt the `--build_dir` to set any temporary folder that you prefer. This command will create it and generate
|
||||
the MDX files that will be rendered as the documentation on the main website. You can inspect them in your favorite
|
||||
Markdown editor.
|
||||
|
||||
## Previewing the documentation
|
||||
|
||||
To preview the docs, first install the `watchdog` module with:
|
||||
|
||||
```bash
|
||||
pip install watchdog
|
||||
```
|
||||
|
||||
Then run the following command:
|
||||
|
||||
```bash
|
||||
doc-builder preview {package_name} {path_to_docs}
|
||||
```
|
||||
|
||||
For example:
|
||||
|
||||
```bash
|
||||
doc-builder preview accelerate docs/source/
|
||||
```
|
||||
|
||||
The docs will be viewable at [http://localhost:3000](http://localhost:3000). You can also preview the docs once you have opened a PR. You will see a bot add a comment to a link where the documentation with your changes lives.
|
||||
|
||||
---
|
||||
**NOTE**
|
||||
|
||||
The `preview` command only works with existing doc files. When you add a completely new file, you need to update `_toctree.yml` & restart `preview` command (`ctrl-c` to stop it & call `doc-builder preview ...` again).
|
||||
|
||||
---
|
||||
|
||||
## Adding a new element to the navigation bar
|
||||
|
||||
Accepted files are Markdown (.md).
|
||||
|
||||
Create a file with its extension and put it in the source directory. You can then link it to the toc-tree by putting
|
||||
the filename without the extension in the [`_toctree.yml`](https://github.com/huggingface/accelerate/blob/main/docs/source/_toctree.yml) file.
|
||||
|
||||
## Renaming section headers and moving sections
|
||||
|
||||
It helps to keep the old links working when renaming the section header and/or moving sections from one document to another. This is because the old links are likely to be used in Issues, Forums, and Social media and it'd make for a much more superior user experience if users reading those months later could still easily navigate to the originally intended information.
|
||||
|
||||
Therefore, we simply keep a little map of moved sections at the end of the document where the original section was. The key is to preserve the original anchor.
|
||||
|
||||
So if you renamed a section from: "Section A" to "Section B", then you can add at the end of the file:
|
||||
|
||||
```
|
||||
Sections that were moved:
|
||||
|
||||
[ <a href="#section-b">Section A</a><a id="section-a"></a> ]
|
||||
```
|
||||
and of course, if you moved it to another file, then:
|
||||
|
||||
```
|
||||
Sections that were moved:
|
||||
|
||||
[ <a href="../new-file#section-b">Section A</a><a id="section-a"></a> ]
|
||||
```
|
||||
|
||||
Use the relative style to link to the new file so that the versioned docs continue to work.
|
||||
|
||||
|
||||
## Writing Documentation - Specification
|
||||
|
||||
The `huggingface/accelerate` documentation follows the
|
||||
[Google documentation](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) style for docstrings,
|
||||
although we can write them directly in Markdown.
|
||||
|
||||
### Adding a new tutorial
|
||||
|
||||
Adding a new tutorial or section is done in two steps:
|
||||
|
||||
- Add a new file under `./source`. This file can either be ReStructuredText (.rst) or Markdown (.md).
|
||||
- Link that file in `./source/_toctree.yml` on the correct toc-tree.
|
||||
|
||||
Make sure to put your new file under the proper section. It's unlikely to go in the first section (*Get Started*), so
|
||||
depending on the intended targets (beginners, more advanced users, or researchers) it should go in sections two, three, or
|
||||
four.
|
||||
|
||||
### Writing source documentation
|
||||
|
||||
Values that should be put in `code` should either be surrounded by backticks: \`like so\`. Note that argument names
|
||||
and objects like True, None, or any strings should usually be put in `code`.
|
||||
|
||||
When mentioning a class, function, or method, it is recommended to use our syntax for internal links so that our tool
|
||||
adds a link to its documentation with this syntax: \[\`XXXClass\`\] or \[\`function\`\]. This requires the class or
|
||||
function to be in the main package.
|
||||
|
||||
If you want to create a link to some internal class or function, you need to
|
||||
provide its path. For instance: \[\`utils.gather\`\]. This will be converted into a link with
|
||||
`utils.gather` in the description. To get rid of the path and only keep the name of the object you are
|
||||
linking to in the description, add a ~: \[\`~utils.gather\`\] will generate a link with `gather` in the description.
|
||||
|
||||
The same works for methods so you can either use \[\`XXXClass.method\`\] or \[~\`XXXClass.method\`\].
|
||||
|
||||
#### Defining arguments in a method
|
||||
|
||||
Arguments should be defined with the `Args:` (or `Arguments:` or `Parameters:`) prefix, followed by a line return and
|
||||
an indentation. The argument should be followed by its type, with its shape if it is a tensor, a colon, and its
|
||||
description:
|
||||
|
||||
```
|
||||
Args:
|
||||
n_layers (`int`): The number of layers of the model.
|
||||
```
|
||||
|
||||
If the description is too long to fit in one line (more than 119 characters in total), another indentation is necessary
|
||||
before writing the description after the argument.
|
||||
|
||||
Finally, to maintain uniformity if any *one* description is too long to fit on one line, the
|
||||
rest of the parameters should follow suit and have an indention before their description.
|
||||
|
||||
Here's an example showcasing everything so far:
|
||||
|
||||
```
|
||||
Args:
|
||||
gradient_accumulation_steps (`int`, *optional*, default to 1):
|
||||
The number of steps that should pass before gradients are accumulated. A number > 1 should be combined with `Accelerator.accumulate`.
|
||||
cpu (`bool`, *optional*):
|
||||
Whether or not to force the script to execute on CPU. Will ignore GPU available if set to `True` and force the execution on one process only.
|
||||
```
|
||||
|
||||
For optional arguments or arguments with defaults we follow the following syntax: imagine we have a function with the
|
||||
following signature:
|
||||
|
||||
```
|
||||
def my_function(x: str = None, a: float = 1):
|
||||
```
|
||||
|
||||
then its documentation should look like this:
|
||||
|
||||
```
|
||||
Args:
|
||||
x (`str`, *optional*):
|
||||
This argument controls ... and has a description longer than 119 chars.
|
||||
a (`float`, *optional*, defaults to 1):
|
||||
This argument is used to ... and has a description longer than 119 chars.
|
||||
```
|
||||
|
||||
Note that we always omit the "defaults to \`None\`" when None is the default for any argument. Also note that even
|
||||
if the first line describing your argument type and its default gets long, you can't break it on several lines. You can
|
||||
however write as many lines as you want in the indented description (see the example above with `input_ids`).
|
||||
|
||||
#### Writing a multi-line code block
|
||||
|
||||
Multi-line code blocks can be useful for displaying examples. They are done between two lines of three backticks as usual in Markdown:
|
||||
|
||||
|
||||
````
|
||||
```python
|
||||
# first line of code
|
||||
# second line
|
||||
# etc
|
||||
```
|
||||
````
|
||||
|
||||
#### Writing a return block
|
||||
|
||||
The return block should be introduced with the `Returns:` prefix, followed by a line return and an indentation.
|
||||
The first line should be the type of the return, followed by a line return. No need to indent further for the elements
|
||||
building the return.
|
||||
|
||||
Here's an example of a single value return:
|
||||
|
||||
```
|
||||
Returns:
|
||||
`List[int]`: A list of integers in the range [0, 1] --- 1 for a special token, 0 for a sequence token.
|
||||
```
|
||||
|
||||
Here's an example of a tuple return, comprising several objects:
|
||||
|
||||
```
|
||||
Returns:
|
||||
`tuple(torch.FloatTensor)` comprising various elements depending on the configuration ([`BertConfig`]) and inputs:
|
||||
- ** loss** (*optional*, returned when `masked_lm_labels` is provided) `torch.FloatTensor` of shape `(1,)` --
|
||||
Total loss is the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
|
||||
- **prediction_scores** (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`) --
|
||||
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
|
||||
```
|
||||
|
||||
## Styling the docstring
|
||||
|
||||
We have an automatic script running with the `make style` comment that will make sure that:
|
||||
- the docstrings fully take advantage of the line width
|
||||
- all code examples are formatted using black, like the code of the Transformers library
|
||||
|
||||
This script may have some weird failures if you made a syntax mistake or if you uncover a bug. Therefore, it's
|
||||
recommended to commit your changes before running `make style`, so you can revert the changes done by that script
|
||||
easily.
|
||||
|
||||
## Writing documentation examples
|
||||
|
||||
The syntax for Example docstrings can look as follows:
|
||||
|
||||
```
|
||||
Example:
|
||||
|
||||
```python
|
||||
>>> import time
|
||||
>>> from accelerate import Accelerator
|
||||
>>> accelerator = Accelerator()
|
||||
>>> if accelerator.is_main_process:
|
||||
... time.sleep(2)
|
||||
>>> else:
|
||||
... print("I'm waiting for the main process to finish its sleep...")
|
||||
>>> accelerator.wait_for_everyone()
|
||||
>>> # Should print on every process at the same time
|
||||
>>> print("Everyone is here")
|
||||
```
|
||||
```
|
||||
|
||||
The docstring should give a minimal, clear example of how the respective function
|
||||
is to be used in inference and also include the expected (ideally sensible)
|
||||
output.
|
||||
Often, readers will try out the example before even going through the function
|
||||
or class definitions. Therefore, it is of utmost importance that the example
|
||||
works as expected.
|
||||
@ -15,38 +15,58 @@
|
||||
title: Launching distributed code
|
||||
- local: basic_tutorials/notebook
|
||||
title: Launching distributed training from Jupyter Notebooks
|
||||
- local: basic_tutorials/troubleshooting
|
||||
title: Troubleshooting guide
|
||||
title: Tutorials
|
||||
- sections:
|
||||
- local: usage_guides/explore
|
||||
title: Start Here!
|
||||
- local: usage_guides/training_zoo
|
||||
title: Example Zoo
|
||||
- local: usage_guides/big_modeling
|
||||
title: How to perform inference on large models with small resources
|
||||
- local: usage_guides/model_size_estimator
|
||||
title: Knowing how big of a model you can fit into memory
|
||||
- local: usage_guides/quantization
|
||||
title: How to quantize model
|
||||
- local: usage_guides/distributed_inference
|
||||
title: How to perform distributed inference with normal resources
|
||||
- local: usage_guides/gradient_accumulation
|
||||
title: Performing gradient accumulation
|
||||
- local: usage_guides/fsdp
|
||||
title: Fully Sharded Data Parallelism
|
||||
- local: usage_guides/local_sgd
|
||||
title: Accelerating training with local SGD
|
||||
- local: usage_guides/checkpoint
|
||||
title: Saving and loading training states
|
||||
- local: usage_guides/deepspeed
|
||||
title: How to use DeepSpeed
|
||||
- local: usage_guides/tracking
|
||||
title: Using experiment trackers
|
||||
- local: usage_guides/big_modeling
|
||||
title: How to use large models with small resources
|
||||
- local: usage_guides/memory
|
||||
title: How to avoid CUDA Out-of-Memory
|
||||
- local: usage_guides/sagemaker
|
||||
title: Using 🤗 Accelerate on SageMaker
|
||||
- local: usage_guides/mps
|
||||
title: How to use Apple Silicon M1 GPUs
|
||||
- local: usage_guides/low_precision_training
|
||||
title: How to train in low precision (FP8)
|
||||
- local: usage_guides/deepspeed
|
||||
title: How to use DeepSpeed
|
||||
- local: usage_guides/fsdp
|
||||
title: How to use Fully Sharded Data Parallelism
|
||||
- local: usage_guides/megatron_lm
|
||||
title: How to use Megatron-LM
|
||||
- local: usage_guides/training_zoo
|
||||
title: 🤗 Accelerate Example Zoo
|
||||
- local: usage_guides/sagemaker
|
||||
title: How to use 🤗 Accelerate with SageMaker
|
||||
- local: usage_guides/ipex
|
||||
title: How to use 🤗 Accelerate with Intel® Extension for PyTorch for cpu
|
||||
title: How-To Guides
|
||||
- sections:
|
||||
- local: concept_guides/internal_mechanism
|
||||
title: 🤗 Accelerate's internal mechanism
|
||||
- local: concept_guides/big_model_inference
|
||||
title: Loading big models into memory
|
||||
- local: concept_guides/performance
|
||||
title: Comparing performance across distributed setups
|
||||
- local: concept_guides/gradient_synchronization
|
||||
title: Gradient synchronization
|
||||
- local: concept_guides/deferring_execution
|
||||
title: Executing and deferring jobs
|
||||
- local: concept_guides/gradient_synchronization
|
||||
title: Gradient synchronization
|
||||
- local: concept_guides/low_precision_training
|
||||
title: How training in low-precision environments is possible (FP8)
|
||||
- local: concept_guides/training_tpu
|
||||
title: TPU best practices
|
||||
title: Concepts and fundamentals
|
||||
@ -75,4 +95,6 @@
|
||||
title: Utility functions and classes
|
||||
- local: package_reference/megatron_lm
|
||||
title: Megatron-LM Utilities
|
||||
title: "Reference"
|
||||
- local: package_reference/fsdp
|
||||
title: Fully Sharded Data Parallelism Utilities
|
||||
title: "Reference"
|
||||
|
||||
@ -8,11 +8,14 @@ http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Installation and Configuration
|
||||
|
||||
Before you start, you will need to setup your environment, install the appropriate packages, and configure 🤗 Accelerate. 🤗 Accelerate is tested on **Python 3.7+**.
|
||||
Before you start, you will need to setup your environment, install the appropriate packages, and configure 🤗 Accelerate. 🤗 Accelerate is tested on **Python 3.8+**.
|
||||
|
||||
## Installing 🤗 Accelerate
|
||||
|
||||
@ -8,6 +8,9 @@ http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Launching your 🤗 Accelerate scripts
|
||||
@ -36,7 +39,7 @@ for batch in training_dataloader:
|
||||
|
||||
But how do you run this code and have it utilize the special hardware available to it?
|
||||
|
||||
First you should rewrite the above code into a function, and make it callable as a script. For example:
|
||||
First, you should rewrite the above code into a function, and make it callable as a script. For example:
|
||||
|
||||
```diff
|
||||
from accelerate import Accelerator
|
||||
@ -61,7 +64,7 @@ First you should rewrite the above code into a function, and make it callable as
|
||||
+ main()
|
||||
```
|
||||
|
||||
Next you need to launch it with `accelerate launch`.
|
||||
Next, you need to launch it with `accelerate launch`.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
@ -74,7 +77,7 @@ Next you need to launch it with `accelerate launch`.
|
||||
## Using accelerate launch
|
||||
|
||||
🤗 Accelerate has a special CLI command to help you launch your code in your system through `accelerate launch`.
|
||||
This command wraps around all of the different commands needed to launch your script on various platforms, without you having to remember what each of them are.
|
||||
This command wraps around all of the different commands needed to launch your script on various platforms, without you having to remember what each of them is.
|
||||
|
||||
<Tip>
|
||||
|
||||
@ -88,7 +91,7 @@ You can launch your script quickly by using:
|
||||
accelerate launch {script_name.py} --arg1 --arg2 ...
|
||||
```
|
||||
|
||||
Just put `accelerate launch` at the start of your command, and pass in additional arguments and parameters to your script afterwards like normal!
|
||||
Just put `accelerate launch` at the start of your command, and pass in additional arguments and parameters to your script afterward like normal!
|
||||
|
||||
Since this runs the various torch spawn methods, all of the expected environment variables can be modified here as well.
|
||||
For example, here is how to use `accelerate launch` with a single GPU:
|
||||
@ -105,6 +108,12 @@ Here is how you would use all GPUs and train with mixed precision disabled:
|
||||
accelerate launch --multi_gpu {script_name.py} {--arg1} {--arg2} ...
|
||||
```
|
||||
|
||||
Or by specifying a number of GPUs to use:
|
||||
|
||||
```bash
|
||||
accelerate launch --num_processes=2 {script_name.py} {--arg1} {--arg2} ...
|
||||
```
|
||||
|
||||
To get more specific you should pass in the needed parameters yourself. For instance, here is how you
|
||||
would also launch that same script on two GPUs using mixed precision while avoiding all of the warnings:
|
||||
|
||||
@ -130,6 +139,30 @@ For a visualization of this difference, that earlier `accelerate launch` on mult
|
||||
MIXED_PRECISION="fp16" torchrun --nproc_per_node=2 --num_machines=1 {script_name.py} {--arg1} {--arg2} ...
|
||||
```
|
||||
|
||||
You can also launch your script utilizing the launch CLI as a python module itself, enabling the ability to pass in other python-specific
|
||||
launching behaviors. To do so, use `accelerate.commands.launch` instead of `accelerate launch`:
|
||||
|
||||
```bash
|
||||
python -m accelerate.commands.launch --num_processes=2 {script_name.py} {--arg1} {--arg2}
|
||||
```
|
||||
|
||||
If you want to execute the script with any other python flags, you can pass them in as well similar to `-m`, such as
|
||||
the below example enabling unbuffered stdout and stderr:
|
||||
|
||||
```bash
|
||||
python -u -m accelerate.commands.launch --num_processes=2 {script_name.py} {--arg1} {--arg2}
|
||||
```
|
||||
|
||||
<Tip>
|
||||
|
||||
You can run your code on CPU as well! This is helpful for debugging and testing purposes on toy models and datasets.
|
||||
|
||||
```bash
|
||||
accelerate launch --cpu {script_name.py} {--arg1} {--arg2}
|
||||
```
|
||||
|
||||
</Tip>
|
||||
|
||||
## Why you should always use `accelerate config`
|
||||
|
||||
Why is it useful to the point you should **always** run `accelerate config`?
|
||||
@ -175,4 +208,25 @@ use_cpu: false
|
||||
Launching a script from the location of that custom yaml file looks like the following:
|
||||
```bash
|
||||
accelerate launch --config_file {path/to/config/my_config_file.yaml} {script_name.py} {--arg1} {--arg2} ...
|
||||
```
|
||||
```
|
||||
|
||||
## Multi-node training
|
||||
Multi-node training with 🤗Accelerate is similar to [multi-node training with torchrun](https://pytorch.org/tutorials/intermediate/ddp_series_multinode.html). The simplest way to launch a multi-node training run is to do the following:
|
||||
|
||||
- Copy your codebase and data to all nodes. (or place them on a shared filesystem)
|
||||
- Setup your python packages on all nodes.
|
||||
- Run `accelerate config` on the main single node first. After specifying the number of nodes, you will be asked to specify the rank of each node (this will be 0 for the main/master node), along with the IP address and port for the main process. This is required for the worker nodes to communicate with the main process. Afterwards, you can copy or send this config file across all of your nodes, changing the `machine_rank` to 1, 2,3, etc. to avoid having to run the command (or just follow their directions directly for launching with `torchrun` as well)
|
||||
|
||||
Once you have done this, you can start your multi-node training run by running `accelerate launch` (or `torchrun`) on all nodes.
|
||||
|
||||
<Tip>
|
||||
It is required that the command be ran on all nodes for everything to start, not just running it from the main node. You can use something like SLURM or a different process executor to wrap around this requirement and call everything from a single command.
|
||||
</Tip>
|
||||
|
||||
<Tip>
|
||||
|
||||
It is recommended to use the intranet IP of your main node over the public IP for better latency. This is the `192.168.x.x` or the `172.x.x.x` address you see when you run `hostname -I` on the main node.
|
||||
|
||||
</Tip>
|
||||
|
||||
To get a better idea about multi-node training, check out our example for [multi-node training with FSDP](https://huggingface.co/blog/ram-efficient-pytorch-fsdp).
|
||||
@ -8,13 +8,16 @@ http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Migrating your code to 🤗 Accelerate
|
||||
|
||||
This tutorial will detail how to easily convert existing PyTorch code to use 🤗 Accelerate!
|
||||
You'll see that by just changing a few lines of code, 🤗 Accelerate can perform its magic and get you on
|
||||
your way towards running your code on distributed systems with ease!
|
||||
your way toward running your code on distributed systems with ease!
|
||||
|
||||
## The base training loop
|
||||
|
||||
@ -65,7 +68,7 @@ change the definition of `device` to come from [`Accelerator`]:
|
||||
|
||||
### Preparing your objects
|
||||
|
||||
Next you need to pass all of the important objects related to training into [`~Accelerator.prepare`]. 🤗 Accelerate will
|
||||
Next, you need to pass all of the important objects related to training into [`~Accelerator.prepare`]. 🤗 Accelerate will
|
||||
make sure everything is setup in the current environment for you to start training:
|
||||
|
||||
```
|
||||
@ -73,7 +76,7 @@ model, optimizer, training_dataloader, scheduler = accelerator.prepare(
|
||||
model, optimizer, training_dataloader, scheduler
|
||||
)
|
||||
```
|
||||
These objects are returned in the same order they were sent in with. By default when using `device_placement=True`, all of the objects that can be sent to the right device will be.
|
||||
These objects are returned in the same order they were sent in. By default when using `device_placement=True`, all of the objects that can be sent to the right device will be.
|
||||
If you need to work with data that isn't passed to [~Accelerator.prepare] but should be on the active device, you should pass in the `device` you made earlier.
|
||||
|
||||
<Tip warning={true}>
|
||||
@ -121,3 +124,6 @@ for batch in training_dataloader:
|
||||
scheduler.step()
|
||||
```
|
||||
|
||||
## More Resources
|
||||
|
||||
To check out more ways on how to migrate to 🤗 Accelerate, check out our [interactive migration tutorial](https://huggingface.co/docs/accelerate/usage_guides/explore) which showcases other items that need to be watched for when using Accelerate and how to do so quickly.
|
||||
@ -8,9 +8,12 @@ http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Launching Multi-Node Training from a Jupyter Environment
|
||||
# Launching Multi-GPU Training from a Jupyter Environment
|
||||
|
||||
This tutorial teaches you how to fine tune a computer vision model with 🤗 Accelerate from a Jupyter Notebook on a distributed system.
|
||||
You will also learn how to setup a few requirements needed for ensuring your environment is configured properly, your data has been prepared properly, and finally how to launch training.
|
||||
@ -35,7 +38,7 @@ The following code will restart Jupyter after writing the configuration, as CUDA
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
CUDA can't be initialized more than once on a multi-node system. It's fine to debug in the notebook and have calls to CUDA, but in order to finally train a full cleanup and restart will need to be performed.
|
||||
CUDA can't be initialized more than once on a multi-GPU system. It's fine to debug in the notebook and have calls to CUDA, but in order to finally train a full cleanup and restart will need to be performed.
|
||||
|
||||
</Tip>
|
||||
|
||||
@ -153,7 +156,7 @@ def get_dataloaders(batch_size: int = 64):
|
||||
random_perm = np.random.permutation(len(fnames))
|
||||
cut = int(0.8 * len(fnames))
|
||||
train_split = random_perm[:cut]
|
||||
eval_split = random_perm[:cut]
|
||||
eval_split = random_perm[cut:]
|
||||
|
||||
# For training a simple RandomResizedCrop will be used
|
||||
train_tfm = Compose([RandomResizedCrop((224, 224), scale=(0.5, 1.0)), ToTensor()])
|
||||
@ -183,7 +186,7 @@ Here is a basic training loop for the animal classification problem:
|
||||
|
||||
<Tip>
|
||||
|
||||
The code has been split up to allow for explainations on each section. A full version that can be copy and pasted will be available at the end
|
||||
The code has been split up to allow for explanations on each section. A full version that can be copy and pasted will be available at the end
|
||||
|
||||
</Tip>
|
||||
|
||||
@ -337,11 +340,11 @@ def training_loop(mixed_precision="fp16", seed: int = 42, batch_size: int = 64):
|
||||
mean = torch.tensor(model.default_cfg["mean"])[None, :, None, None]
|
||||
std = torch.tensor(model.default_cfg["std"])[None, :, None, None]
|
||||
|
||||
# To make this constant available on the active device, set it to the accelerator device
|
||||
# To make these constants available on the active device, set it to the accelerator device
|
||||
mean = mean.to(accelerator.device)
|
||||
std = std.to(accelerator.device)
|
||||
|
||||
# Intantiate the optimizer
|
||||
# Instantiate the optimizer
|
||||
optimizer = torch.optim.Adam(params=model.parameters(), lr=3e-2 / 25)
|
||||
|
||||
# Instantiate the learning rate scheduler
|
||||
@ -398,6 +401,26 @@ args = ("fp16", 42, 64)
|
||||
notebook_launcher(training_loop, args, num_processes=2)
|
||||
```
|
||||
|
||||
In the case of running on multiple nodes, you need to set up a Jupyter session at each node and run the launching cell at the same time.
|
||||
|
||||
For an environment containing 2 nodes (computers) with 8 GPUs each and the main computer with an IP address of "172.31.43.8", it would look like so:
|
||||
|
||||
```python
|
||||
notebook_launcher(training_loop, args, master_addr="172.31.43.8", node_rank=0, num_nodes=2, num_processes=8)
|
||||
```
|
||||
|
||||
And in the second Jupyter session on the other machine:
|
||||
|
||||
<Tip>
|
||||
|
||||
Notice how the `node_rank` has changed
|
||||
|
||||
</Tip>
|
||||
|
||||
```python
|
||||
notebook_launcher(training_loop, args, master_addr="172.31.43.8", node_rank=1, num_nodes=2, num_processes=8)
|
||||
```
|
||||
|
||||
In the case of running on the TPU, it would look like so:
|
||||
|
||||
```python
|
||||
@ -420,10 +443,17 @@ epoch 4: 94.71
|
||||
|
||||
And that's it!
|
||||
|
||||
## Debugging
|
||||
|
||||
A common issue when running the `notebook_launcher` is receiving a CUDA has already been initialized issue. This usually stems
|
||||
from an import or prior code in the notebook that makes a call to the PyTorch `torch.cuda` sublibrary. To help narrow down what went wrong,
|
||||
you can launch the `notebook_launcher` with `ACCELERATE_DEBUG_MODE=yes` in your environment and an additional check
|
||||
will be made when spawning that a regular process can be created and utilize CUDA without issue. (Your CUDA code can still be ran afterwards).
|
||||
|
||||
## Conclusion
|
||||
|
||||
This notebook showed how to perform distributed training from inside of a Jupyter Notebook. Some key notes to remember:
|
||||
|
||||
- Make sure to save any code that use CUDA (or CUDA imports) for the function passed to [`notebook_launcher`]
|
||||
- Set the `num_processes` to be the number of devices used for training (such as number of GPUs, CPUs, TPUs, etc)
|
||||
- If using the TPU, declare your model outside the training loop function
|
||||
- If using the TPU, declare your model outside the training loop function
|
||||
@ -8,6 +8,9 @@ http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Overview
|
||||
222
docs/source/basic_tutorials/troubleshooting.md
Normal file
222
docs/source/basic_tutorials/troubleshooting.md
Normal file
@ -0,0 +1,222 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Troubleshooting guide
|
||||
|
||||
This guide aims to provide you the tools and knowledge required to navigate some common issues. However,
|
||||
as 🤗 Accelerate continuously evolves and the use cases and setups are diverse, you might encounter an issue not covered in this
|
||||
guide. If the suggestions listed in this guide do not cover your such situation, please refer to the final section of
|
||||
the guide, [Asking for Help](#ask-for-help), to learn where to find help with your specific issue.
|
||||
|
||||
## Logging
|
||||
|
||||
When facing an error, logging can help narrow down where it is coming from. In a distributed setup with multiple processes,
|
||||
logging can be a challenge, but 🤗 Accelerate provides a utility that streamlines the logging process and ensures that
|
||||
logs are synchronized and managed effectively across the distributed setup.
|
||||
|
||||
To troubleshoot an issue, use `accelerate.logging` instead of the standard Python `logging` module:
|
||||
|
||||
```diff
|
||||
- import logging
|
||||
+ from accelerate.logging import get_logger
|
||||
- logger = logging.getLogger(__name__)
|
||||
+ logger = get_logger(__name__)
|
||||
```
|
||||
|
||||
To set the log level (`INFO`, `DEBUG`, `WARNING`, `ERROR`, `CRITICAL`), export it as the `ACCELERATE_LOG_LEVEL` environment,
|
||||
or pass as `log_level` to `get_logger`:
|
||||
|
||||
```python
|
||||
from accelerate.logging import get_logger
|
||||
|
||||
logger = get_logger(__name__, log_level="INFO")
|
||||
```
|
||||
|
||||
By default, the log is called on main processes only. To call it on all processes, pass `main_process_only=False`.
|
||||
If a log should be called on all processes and in order, also pass `in_order=True`.
|
||||
|
||||
## Hanging code and timeout errors
|
||||
|
||||
### Mismatched tensor shapes
|
||||
|
||||
If your code seems to be hanging for a significant amount time on a distributed setup, a common cause is mismatched shapes of tensors on different
|
||||
devices.
|
||||
|
||||
When running scripts in a distributed fashion, functions such as [`Accelerator.gather`] and [`Accelerator.reduce`] are
|
||||
necessary to grab tensors across devices to perform operations on them collectively. These (and other) functions rely on
|
||||
`torch.distributed` performing a `gather` operation, which requires that tensors have the **exact same shape** across all processes.
|
||||
When the tensor shapes don't match, you will experience handing code, and eventually hit a timeout exception.
|
||||
|
||||
If you suspect this to be the case, use Accelerate's operational debug mode to immediately catch the issue.
|
||||
|
||||
The recommended way to enable Accelerate's operational debug mode is during `accelerate config` setup.
|
||||
Alternative ways to enable debug mode are:
|
||||
|
||||
* From the CLI:
|
||||
|
||||
```bash
|
||||
accelerate launch --debug {my_script.py} --arg1 --arg2
|
||||
```
|
||||
|
||||
* As an environmental variable (which avoids the need for `accelerate launch`):
|
||||
|
||||
```bash
|
||||
ACCELERATE_DEBUG_MODE="1" torchrun {my_script.py} --arg1 --arg2
|
||||
```
|
||||
|
||||
* Manually changing the `config.yaml` file:
|
||||
|
||||
```diff
|
||||
compute_environment: LOCAL_MACHINE
|
||||
+debug: true
|
||||
```
|
||||
|
||||
Once you enable the debug mode, you should get a similar traceback that points to the tensor shape mismatch issue:
|
||||
|
||||
```py
|
||||
Traceback (most recent call last):
|
||||
File "/home/zach_mueller_huggingface_co/test.py", line 18, in <module>
|
||||
main()
|
||||
File "/home/zach_mueller_huggingface_co/test.py", line 15, in main
|
||||
broadcast_tensor = broadcast(tensor)
|
||||
File "/home/zach_mueller_huggingface_co/accelerate/src/accelerate/utils/operations.py", line 303, in wrapper
|
||||
accelerate.utils.operations.DistributedOperationException:
|
||||
|
||||
Cannot apply desired operation due to shape mismatches. All shapes across devices must be valid.
|
||||
|
||||
Operation: `accelerate.utils.operations.broadcast`
|
||||
Input shapes:
|
||||
- Process 0: [1, 5]
|
||||
- Process 1: [1, 2, 5]
|
||||
```
|
||||
|
||||
### Early stopping leads to hanging
|
||||
|
||||
When doing early stopping in distributed training, if each process has a specific stopping condition (e.g. validation loss),
|
||||
it may not be synchronized across all of them. As a result, a break can happen on process 0 but not on process 1.
|
||||
This will cause the code to hang indefinitely until a timeout occurs.
|
||||
|
||||
If you have early stopping conditionals, use `set_breakpoint` and `check_breakpoint` methods to make sure all the processes
|
||||
are ended correctly:
|
||||
|
||||
```py
|
||||
# Assume `should_do_breakpoint` is a custom defined function that returns a conditional,
|
||||
# and that conditional might be true only on process 1
|
||||
if should_do_breakpoint(loss):
|
||||
accelerator.set_breakpoint()
|
||||
|
||||
# Later in the training script when we need to check for the breakpoint
|
||||
if accelerator.check_breakpoint():
|
||||
break
|
||||
```
|
||||
|
||||
### Hanging on low kernel versions on Linux
|
||||
|
||||
This is a known issue. On Linux with kernel version < 5.5, hanging processes have been reported. To avoid
|
||||
encountering this problem, we recommend upgrading your system to a later kernel version.
|
||||
|
||||
## CUDA out of memory
|
||||
|
||||
One of the most frustrating errors when it comes to running training scripts is hitting "CUDA Out-of-Memory",
|
||||
as the entire script needs to be restarted, progress is lost, and typically a developer would want to simply
|
||||
start their script and let it run.
|
||||
|
||||
To address this problem, `Accelerate` offers a utility `find_executable_batch_size` that is heavily based on [toma](https://github.com/BlackHC/toma).
|
||||
The utility retries code that fails due to OOM (out-of-memory) conditions and lowers batch sizes automatically.
|
||||
|
||||
### find_executable_batch_size
|
||||
|
||||
This algorithm operates with exponential decay, decreasing the batch size in half after each failed run on some
|
||||
training script. To use it, restructure your training function to include an inner function that includes this wrapper,
|
||||
and build your dataloaders inside it. At a minimum, this could look like 4 new lines of code.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
The inner function *must* take in the batch size as the first parameter, but we do not pass one to it when called. The wrapper handles this for us.
|
||||
|
||||
</Tip>
|
||||
|
||||
It should also be noted that anything which will consume CUDA memory and passed to the `accelerator` **must** be declared inside the inner function,
|
||||
such as models and optimizers.
|
||||
|
||||
```diff
|
||||
def training_function(args):
|
||||
accelerator = Accelerator()
|
||||
|
||||
+ @find_executable_batch_size(starting_batch_size=args.batch_size)
|
||||
+ def inner_training_loop(batch_size):
|
||||
+ nonlocal accelerator # Ensure they can be used in our context
|
||||
+ accelerator.free_memory() # Free all lingering references
|
||||
model = get_model()
|
||||
model.to(accelerator.device)
|
||||
optimizer = get_optimizer()
|
||||
train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)
|
||||
lr_scheduler = get_scheduler(
|
||||
optimizer,
|
||||
num_training_steps=len(train_dataloader)*num_epochs
|
||||
)
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
||||
)
|
||||
train(model, optimizer, train_dataloader, lr_scheduler)
|
||||
validate(model, eval_dataloader)
|
||||
+ inner_training_loop()
|
||||
```
|
||||
|
||||
To find out more, check the documentation [here](../package_reference/utilities#accelerate.find_executable_batch_size).
|
||||
|
||||
## Non-reproducible results between device setups
|
||||
|
||||
If you have changed the device setup and are observing different model performance, this is likely due to the fact that
|
||||
you have not updated your script when moving from one setup to another. The same script with the same batch size across TPU,
|
||||
multi-GPU, and single-GPU with Accelerate will have different results.
|
||||
|
||||
For example, if you were previously training on a single GPU with a batch size of 16, when moving to two GPU setup,
|
||||
you need to change the batch size to 8 to have the same effective batch size. This is because when training with Accelerate,
|
||||
the batch size passed to the dataloader is the **batch size per GPU**.
|
||||
|
||||
To make sure you can reproduce the results between the setups, make sure to use the same seed, adjust the batch size
|
||||
accordingly, consider scaling the learning rate.
|
||||
|
||||
For more details and a quick reference for batch sizes, check out the [Comparing performance between different device setups](../concept_guides/performance) guide.
|
||||
|
||||
## Performance issues on different GPUs
|
||||
|
||||
If your multi-GPU setup consists of different GPUs, you may hit some limitations:
|
||||
|
||||
- There may be an imbalance in GPU memory between the GPUs. In this case, the GPU with smaller memory will limit the batch size or the size of the model that can be loaded onto the GPUs.
|
||||
- If you are using GPUs with different performance profiles, the performance will be driven by the slowest GPU that you are using as the other GPUs will have to wait for it to complete its workload.
|
||||
|
||||
Vastly different GPUs within the same setup can lead to performance bottlenecks.
|
||||
|
||||
## Ask for help
|
||||
|
||||
If the above troubleshooting tools and advice did not help you resolve your issue, reach out for help to the community
|
||||
and the team.
|
||||
|
||||
### Forums
|
||||
|
||||
Ask for help on the Hugging Face forums - post your question in the [🤗Accelerate category](https://discuss.huggingface.co/c/accelerate/18)
|
||||
Make sure to write a descriptive post with relevant context about your setup and reproducible code to maximize the likelihood that your problem is solved!
|
||||
|
||||
### Discord
|
||||
|
||||
Post a question on [Discord](http://hf.co/join/discord), and let the team and the community help you.
|
||||
|
||||
### GitHub Issues
|
||||
|
||||
Create an Issue on the 🤗 Accelerate [GitHub repository](https://github.com/huggingface/accelerate/issues) if you suspect
|
||||
to have found a bug related to the library. Include context regarding the bug and details about your distributed setup
|
||||
to help us better figure out what's wrong and how we can fix it.
|
||||
@ -8,11 +8,14 @@ http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Handling big models
|
||||
# Handling big models for inference
|
||||
|
||||
When loading a pretrained model in PyTorch, the usual workflow looks like this:
|
||||
When loading a pre-trained model in PyTorch, the usual workflow looks like this:
|
||||
|
||||
```py
|
||||
import torch
|
||||
@ -27,11 +30,11 @@ In plain English, those steps are:
|
||||
2. Load the model weights (in a dictionary usually called a state dict) from the disk
|
||||
3. Load those weights inside the model
|
||||
|
||||
While this works very well for regularly sized models, this workflow has some clear limitations when we deal with a huge model: in step 1, we load a full version of the model in RAM, and spend some time randomly initializing the weights (which will be discarded in step 3). In step 2, we load another full version of the model in RAM, with the pretrained weights. If you're loading a model with 6 billions parameters, this means you will need 24GB of RAM for each copy of the model, so 48GB in total (half of it to load the model in FP16).
|
||||
While this works very well for regularly sized models, this workflow has some clear limitations when we deal with a huge model: in step 1, we load a full version of the model in RAM, and spend some time randomly initializing the weights (which will be discarded in step 3). In step 2, we load another full version of the model in RAM, with the pre-trained weights. If you're loading a model with 6 billion parameters, this means you will need 24GB of RAM for each copy of the model, so 48GB in total (half of it to load the model in FP16).
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
This API is quite new and still in its experimental stage. While we strive to provide a stable API, it's possible some small parts of the public API will change in the future.
|
||||
This API is quite new and still in its experimental stage. While we strive to provide a stable API, it's possible some small parts of the public API will change in the future.
|
||||
|
||||
</Tip>
|
||||
|
||||
@ -43,7 +46,7 @@ While this works very well for regularly sized models, this workflow has some cl
|
||||
|
||||
### Instantiating an empty model
|
||||
|
||||
The first tool 🤗 Accelerate introduces to help with big models is a context manager [`init_empty_weights`] that helps you initialize a model without using any RAM, so that step 1 can be done on models of any size. Here is how it works:
|
||||
The first tool 🤗 Accelerate introduces to help with big models is a context manager [`init_empty_weights`] that helps you initialize a model without using any RAM so that step 1 can be done on models of any size. Here is how it works:
|
||||
|
||||
```py
|
||||
from accelerate import init_empty_weights
|
||||
@ -59,7 +62,7 @@ with init_empty_weights():
|
||||
model = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])
|
||||
```
|
||||
|
||||
initializes an empty model with a bit more than 100B parameters. Behind the scenes, this relies on the meta device introduced in PyTorch 1.9. During the initialization under the context manager, each time a parameter is created, it is instantly moved on that device.
|
||||
initializes an empty model with a bit more than 100B parameters. Behind the scenes, this relies on the meta device introduced in PyTorch 1.9. During the initialization under the context manager, each time a parameter is created, it is instantly moved to that device.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
@ -69,9 +72,9 @@ initializes an empty model with a bit more than 100B parameters. Behind the scen
|
||||
|
||||
### Sharded checkpoints
|
||||
|
||||
It's possible your model is so big that even a single copy won't fit in RAM. That doesn't mean it can't be loaded: if you have one or several GPUs, this is more memory available to store your model. In this case, it's better if your checkpoint is split in several smaller files that we call checkpoint shards.
|
||||
It's possible your model is so big that even a single copy won't fit in RAM. That doesn't mean it can't be loaded: if you have one or several GPUs, this is more memory available to store your model. In this case, it's better if your checkpoint is split into several smaller files that we call checkpoint shards.
|
||||
|
||||
🤗 Accelerate will handle sharded checkpoints as long as you follow the following format: your checkpoint should be in a folder, with several files containing the partial state dicts, and there should be an index in the JSON format that contains a dictionary mapping parameter names to the file containing their weights. For instance we could have a folder containing:
|
||||
🤗 Accelerate will handle sharded checkpoints as long as you follow the following format: your checkpoint should be in a folder, with several files containing the partial state dicts, and there should be an index in the JSON format that contains a dictionary mapping parameter names to the file containing their weights. You can easily shard your model with [`~Accelerator.save_model`]. For instance, we could have a folder containing:
|
||||
|
||||
```bash
|
||||
first_state_dict.bin
|
||||
@ -96,44 +99,65 @@ and `first_state_dict.bin` containing the weights for `"linear1.weight"` and `"l
|
||||
|
||||
The second tool 🤗 Accelerate introduces is a function [`load_checkpoint_and_dispatch`], that will allow you to load a checkpoint inside your empty model. This supports full checkpoints (a single file containing the whole state dict) as well as sharded checkpoints. It will also automatically dispatch those weights across the devices you have available (GPUs, CPU RAM), so if you are loading a sharded checkpoint, the maximum RAM usage will be the size of the biggest shard.
|
||||
|
||||
Here is how we can use this to load the [GPT-J-6B](https://huggingface.co/EleutherAI/gpt-j-6B) model. You clone the sharded version of this model with:
|
||||
If you want to use big model inference with 🤗 Transformers models, check out this [documentation](https://huggingface.co/docs/transformers/main/en/main_classes/model#large-model-loading).
|
||||
|
||||
Here is how we can use this to load the [GPT2-1.5B](https://huggingface.co/marcsun13/gpt2-xl-linear-sharded) model.
|
||||
|
||||
Let's download the sharded version of this model.
|
||||
|
||||
```bash
|
||||
git clone https://huggingface.co/sgugger/sharded-gpt-j-6B
|
||||
cd sharded-gpt-j-6B
|
||||
git-lfs install
|
||||
git pull
|
||||
pip install huggingface_hub
|
||||
```
|
||||
|
||||
then we can initialize the model with
|
||||
```py
|
||||
from huggingface_hub import snapshot_download
|
||||
checkpoint = "marcsun13/gpt2-xl-linear-sharded"
|
||||
weights_location = snapshot_download(repo_id=checkpoint)
|
||||
```
|
||||
|
||||
In order to initialize the model, we will use the library minGPT.
|
||||
|
||||
```bash
|
||||
git clone https://github.com/karpathy/minGPT.git
|
||||
pip install minGPT/
|
||||
```
|
||||
|
||||
```py
|
||||
from accelerate import init_empty_weights
|
||||
from transformers import AutoConfig, AutoModelForCausalLM
|
||||
from mingpt.model import GPT
|
||||
|
||||
checkpoint = "EleutherAI/gpt-j-6B"
|
||||
config = AutoConfig.from_pretrained(checkpoint)
|
||||
model_config = GPT.get_default_config()
|
||||
model_config.model_type = 'gpt2-xl'
|
||||
model_config.vocab_size = 50257
|
||||
model_config.block_size = 1024
|
||||
|
||||
with init_empty_weights():
|
||||
model = AutoModelForCausalLM.from_config(config)
|
||||
model = GPT(model_config)
|
||||
```
|
||||
|
||||
and load the checkpoint we just downloaded with:
|
||||
Then, load the checkpoint we just downloaded with:
|
||||
|
||||
```py
|
||||
from accelerate import load_checkpoint_and_dispatch
|
||||
|
||||
model = load_checkpoint_and_dispatch(
|
||||
model, "sharded-gpt-j-6B", device_map="auto", no_split_module_classes=["GPTJBlock"]
|
||||
model, checkpoint=weights_location, device_map="auto", no_split_module_classes=['Block']
|
||||
)
|
||||
```
|
||||
|
||||
By passing `device_map="auto"`, we tell 🤗 Accelerate to determine automatically where to put each layer of the model depending on the available resources:
|
||||
- first we use the maximum space available on the GPU(s)
|
||||
- first, we use the maximum space available on the GPU(s)
|
||||
- if we still need space, we store the remaining weights on the CPU
|
||||
- if there is not enough RAM, we store the remaining weights on the hard drive as memory-mapped tensors
|
||||
|
||||
`no_split_module_classes=["GPTJBlock"]` indicates that the modules that are `GPTJBlock` should not be split on different devices. You should set here all blocks that include a residual connection of some kind.
|
||||
|
||||
#### `no_split_module_classes`
|
||||
|
||||
This parameter will indicate that some of the modules with the name `"Block"` should not be split across different devices. You should set here all blocks that
|
||||
include a residual connection of some kind.
|
||||
|
||||
|
||||
#### The `device_map`
|
||||
|
||||
You can see the `device_map` that 🤗 Accelerate picked by accessing the `hf_device_map` attribute of your model:
|
||||
|
||||
@ -143,43 +167,34 @@ model.hf_device_map
|
||||
|
||||
```python out
|
||||
{'transformer.wte': 0,
|
||||
'transformer.wpe': 0,
|
||||
'transformer.drop': 0,
|
||||
'transformer.h.0': 0,
|
||||
'transformer.h.1': 0,
|
||||
'transformer.h.2': 0,
|
||||
'transformer.h.3': 0,
|
||||
'transformer.h.4': 0,
|
||||
'transformer.h.5': 0,
|
||||
'transformer.h.6': 0,
|
||||
'transformer.h.7': 0,
|
||||
'transformer.h.8': 0,
|
||||
'transformer.h.9': 0,
|
||||
'transformer.h.10': 0,
|
||||
'transformer.h.11': 0,
|
||||
'transformer.h.12': 0,
|
||||
'transformer.h.13': 0,
|
||||
'transformer.h.14': 0,
|
||||
'transformer.h.15': 0,
|
||||
'transformer.h.16': 0,
|
||||
'transformer.h.17': 0,
|
||||
'transformer.h.18': 0,
|
||||
'transformer.h.19': 0,
|
||||
'transformer.h.20': 0,
|
||||
'transformer.h.21': 0,
|
||||
'transformer.h.22': 0,
|
||||
'transformer.h.23': 0,
|
||||
...
|
||||
'transformer.h.21': 0,
|
||||
'transformer.h.22': 1,
|
||||
'transformer.h.23': 1,
|
||||
'transformer.h.24': 1,
|
||||
'transformer.h.25': 1,
|
||||
'transformer.h.26': 1,
|
||||
'transformer.h.27': 1,
|
||||
'transformer.ln_f': 1,
|
||||
...
|
||||
'transformer.h.47': 1,
|
||||
'transformer.ln_f': 1,
|
||||
'lm_head': 1}
|
||||
```
|
||||
|
||||
You can also design your `device_map` yourself, if you prefer to explicitly decide where each layer should be. In this case, the command above becomes:
|
||||
It's fully possible to create your own device map for the layers to use as well, specifying the GPU device to use (a number), `"cpu"`, or `"disk"` and pass this in:
|
||||
|
||||
```python
|
||||
device_map = {
|
||||
"transformer.wte": "cpu",
|
||||
"transformer.wpe": 0,
|
||||
"transformer.drop": "cpu",
|
||||
"transformer.h.0": "disk"
|
||||
}
|
||||
|
||||
model = load_checkpoint_and_dispatch(
|
||||
model, checkpoint=weights_location, device_map=device_map
|
||||
)
|
||||
|
||||
```py
|
||||
model = load_checkpoint_and_dispatch(model, "sharded-gpt-j-6B", device_map=my_device_map)
|
||||
```
|
||||
|
||||
### Run the model
|
||||
@ -187,31 +202,30 @@ model = load_checkpoint_and_dispatch(model, "sharded-gpt-j-6B", device_map=my_de
|
||||
Now that we have done this, our model lies across several devices, and maybe the hard drive. But it can still be used as a regular PyTorch model:
|
||||
|
||||
```py
|
||||
from transformers import AutoTokenizer
|
||||
from mingpt.bpe import BPETokenizer
|
||||
tokenizer = BPETokenizer()
|
||||
inputs = tokenizer("Hello, my name is").to(0)
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
||||
inputs = tokenizer("Hello, my name is", return_tensors="pt")
|
||||
inputs = inputs.to(0)
|
||||
output = model.generate(inputs["input_ids"])
|
||||
tokenizer.decode(output[0].tolist())
|
||||
outputs = model.generate(x1, max_new_tokens=10, do_sample=False)[0]
|
||||
tokenizer.decode(outputs.cpu().squeeze())
|
||||
```
|
||||
|
||||
Behind the scenes, 🤗 Accelerate added hooks to the model, so that:
|
||||
- at each layer, the inputs are put on the right device (so even if your model is spread across several GPUs, it works)
|
||||
- for the weights offloaded on the CPU, they are put on a GPU just before the forward pass, and cleaned up just after
|
||||
- for the weights offloaded on the hard drive, they are loaded in RAM then put on a GPU just before the forward pass, and cleaned up just after
|
||||
- for the weights offloaded on the CPU, they are put on a GPU just before the forward pass and cleaned up just after
|
||||
- for the weights offloaded on the hard drive, they are loaded in RAM then put on a GPU just before the forward pass and cleaned up just after
|
||||
|
||||
This way, you model can run for inference even if it doesn't fit on one of the GPUs or the CPU RAM!
|
||||
This way, your model can run for inference even if it doesn't fit on one of the GPUs or the CPU RAM!
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
This only supports inference of your model, not training. Most of the computation happens behind `torch.no_grad()` context managers to avoid spending some GPU memory with intermediate activations.
|
||||
This only supports the inference of your model, not training. Most of the computation happens behind `torch.no_grad()` context managers to avoid spending some GPU memory with intermediate activations.
|
||||
|
||||
</Tip>
|
||||
|
||||
### Designing a device map
|
||||
|
||||
You can let 🤗 Accelerate handle the device map computation by setting `device_map` to one of the supported options (`"auto"`, `"balanced"`, `"balanced_low_0"`, `"sequential"`) or create one yourself, if you want more control over where each layer should go.
|
||||
You can let 🤗 Accelerate handle the device map computation by setting `device_map` to one of the supported options (`"auto"`, `"balanced"`, `"balanced_low_0"`, `"sequential"`) or create one yourself if you want more control over where each layer should go.
|
||||
|
||||
<Tip>
|
||||
|
||||
@ -221,7 +235,7 @@ You can let 🤗 Accelerate handle the device map computation by setting `device
|
||||
|
||||
All the options will produce the same result when you don't have enough GPU memory to accommodate the whole model (which is to fit everything that can on the GPU, then offload weights on the CPU or even on the disk if there is not enough RAM).
|
||||
|
||||
When you have more GPU memory available than the model size, here the difference between each option:
|
||||
When you have more GPU memory available than the model size, here is the difference between each option:
|
||||
- `"auto"` and `"balanced"` evenly split the model on all available GPUs, making it possible for you to use a batch size greater than 1.
|
||||
- `"balanced_low_0"` evenly splits the model on all GPUs except the first one, and only puts on GPU 0 what does not fit on the others. This option is great when you need to use GPU 0 for some processing of the outputs, like when using the `generate` function for Transformers models
|
||||
- `"sequential"` will fit what it can on GPU 0, then move on GPU 1 and so forth (so won't use the last GPUs if it doesn't need to).
|
||||
@ -232,9 +246,9 @@ When you have more GPU memory available than the model size, here the difference
|
||||
|
||||
</Tip>
|
||||
|
||||
First note that you can limit the memory used on each GPU by using the `max_memory` argument (available in [`infer_auto_device_map`] and in all functions using it). When setting `max_memory`, you should pass along a dictionary containing the GPU identifiers (for instance `0`, `1` etc.) and the `"cpu"` key for the maximum RAM you want used for CPU offload. The values can either be an integer (in bytes) or a string representing a number with its unit, such as `"10GiB"` or `"10GB"`.
|
||||
First note that you can limit the memory used on each GPU by using the `max_memory` argument (available in [`infer_auto_device_map`] and in all functions using it). When setting `max_memory`, you should pass along a dictionary containing the GPU identifiers (for instance `0`, `1` etc.) and the `"cpu"` key for the maximum RAM you want to use for CPU offload. The values can either be an integer (in bytes) or a string representing a number with its unit, such as `"10GiB"` or `"10GB"`.
|
||||
|
||||
Here is an example where we don't want to use more than 10GiB on each of two GPUs and no more than 30GiB of CPU RAM for the model weights:
|
||||
Here is an example where we don't want to use more than 10GiB on each of the two GPUs and no more than 30GiB of CPU RAM for the model weights:
|
||||
|
||||
```python
|
||||
from accelerate import infer_auto_device_map
|
||||
@ -246,18 +260,18 @@ device_map = infer_auto_device_map(my_model, max_memory={0: "10GiB", 1: "10GiB",
|
||||
|
||||
When a first allocation happens in PyTorch, it loads CUDA kernels which take about 1-2GB of memory depending on the GPU. Therefore you always have less usable memory than the actual size of the GPU. To see how much memory is actually used do `torch.ones(1).cuda()` and look at the memory usage.
|
||||
|
||||
Therefore when you create memory maps with `max_memory` make sure to adjust the avaialble memory accordingly to avoid out-of-memory errors.
|
||||
Therefore when you create memory maps with `max_memory` make sure to adjust the available memory accordingly to avoid out-of-memory errors.
|
||||
|
||||
</Tip>
|
||||
|
||||
Additionally, if you do some additional operations with your outputs without placing them back on the CPU (for instance inside the `generate` method of Transformers) and if you placed your inputs on a GPU, that GPU will consume more memory than the others (Accelerate always place the output back to the device of the input). Therefore if you would like to optimize the maximum batch size and you have many GPUs, give the first GPU less memory. For example, with BLOOM-176B on 8x80 A100 setup the close to ideal map is:
|
||||
Additionally, if you do some additional operations with your outputs without placing them back on the CPU (for instance inside the `generate` method of Transformers) and if you placed your inputs on a GPU, that GPU will consume more memory than the others (Accelerate always place the output back to the device of the input). Therefore if you would like to optimize the maximum batch size and you have many GPUs, give the first GPU less memory. For example, with BLOOM-176B on 8x80 A100 setup, the close-to-ideal map is:
|
||||
|
||||
```python
|
||||
max_memory = {0: "30GIB", 1: "46GIB", 2: "46GIB", 3: "46GIB", 4: "46GIB", 5: "46GIB", 6: "46GIB", 7: "46GIB"}
|
||||
```
|
||||
as you can see we gave the remaining 7 GPUs ~50% more memory than GPU 0.
|
||||
|
||||
If you opt to fully design the `device_map` yourself, it should be a dictionary with keys being module names of your model and values being a valid device identifier (for instance an integer for the GPUs) or `"cpu"` for CPU offload, `"disk"` for disk offload. The keys need to cover the whole model, you can then define your device map as you wish: for instance if your model has two blocks (let's say `block1` and `block2`) which each contain three linear layers (let's say `linear1`, `linear2` and `linear3`), a valid device map can be:
|
||||
If you opt to fully design the `device_map` yourself, it should be a dictionary with keys being module names of your model and values being a valid device identifier (for instance an integer for the GPUs) or `"cpu"` for CPU offload, `"disk"` for disk offload. The keys need to cover the whole model, you can then define your device map as you wish: for instance, if your model has two blocks (let's say `block1` and `block2`) which each contain three linear layers (let's say `linear1`, `linear2` and `linear3`), a valid device map can be:
|
||||
|
||||
```python
|
||||
device_map = {"block1": 0, "block2": 1}
|
||||
@ -281,14 +295,47 @@ device_map = {"block1": 0, "block2.linear1": 1, "block2.linear2": 1}
|
||||
|
||||
</Tip>
|
||||
|
||||
## CPU offload only
|
||||
|
||||
If you want to offload your model on CPU, you can use [`cpu_offload`]. As a result, all parameters of the model will be offloaded and only one copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that state dict and put on the execution device and passed as they are needed, then offloaded again.
|
||||
|
||||
```python
|
||||
cpu_offload(model, execution_device)
|
||||
```
|
||||
|
||||
You can also use [`cpu_offload_with_hook`]. This function will offloads a model on the CPU and puts it back to an execution device when executed. The difference with [`cpu_offload`] is that the model stays on the execution device after the forward and is only offloaded again when the `offload` method of the returned `hook` is called. Furthermore, [`cpu_offload_with_hook`] is more performant but less memory saving. It is useful for pipelines running a model in a loop:
|
||||
|
||||
```python
|
||||
model_1, hook_1 = cpu_offload_with_hook(model_1, execution_device)
|
||||
model_2, hook_2 = cpu_offload_with_hook(model_2, execution_device, prev_module_hook=hook_1)
|
||||
model_3, hook_3 = cpu_offload_with_hook(model_3, execution_device, prev_module_hook=hook_2)
|
||||
|
||||
hid_1 = model_1(input)
|
||||
for i in range(50):
|
||||
# model1 is offloaded on the CPU at the first iteration, model 2 stays on the GPU for this whole loop.
|
||||
hid_2 = model_2(hid_1)
|
||||
# model2 is offloaded to the CPU just before this forward.
|
||||
hid_3 = model_3(hid_3)
|
||||
|
||||
# For model3, you need to manually call the hook offload method.
|
||||
hook_3.offload()
|
||||
```
|
||||
|
||||
## Disk offload only
|
||||
|
||||
To perform disk offload, you can use [`disk_offload`]. As a result, all parameters of the model will be offloaded as memory-mapped array in a given folder. During the forward pass, parameters will be accessed from that folder and put on the execution device passed as they are needed, then offloaded again.
|
||||
|
||||
```python
|
||||
disk_offload(model, offload_dir, execution_device)
|
||||
```
|
||||
|
||||
## Limits and further development
|
||||
|
||||
We are aware of the current limitations in the API:
|
||||
|
||||
- While this could theoretically work on just one CPU with potential disk offload, you need at least one GPU to run this API. This will be fixed in further development.
|
||||
- [`infer_auto_device_map`] (or `device_map="auto"` in [`load_checkpoint_and_dispatch`]) tries to maximize GPU and CPU RAM it sees available when you execute it. While PyTorch is very good at managing GPU RAM efficiently (and giving it back when not needed), it's not entirely true with Python and CPU RAM. Therefore, an automatically computed device map might be too intense on the CPU. Move a few modules to the disk device if you get crashes due to lack of RAM.
|
||||
- [`infer_auto_device_map`] (or `device_map="auto"` in [`load_checkpoint_and_dispatch`]) tries to maximize GPU and CPU RAM it sees available when you execute it. While PyTorch is very good at managing GPU RAM efficiently (and giving it back when not needed), it's not entirely true with Python and CPU RAM. Therefore, an automatically computed device map might be too intense on the CPU. Move a few modules to the disk device if you get crashes due to a lack of RAM.
|
||||
- [`infer_auto_device_map`] (or `device_map="auto"` in [`load_checkpoint_and_dispatch`]) attributes devices sequentially (to avoid moving things back and forth) so if your first layer is bigger than the size of the GPU you have, it will end up with everything on the CPU/Disk.
|
||||
- [`load_checkpoint_and_dispatch`] and [`load_checkpoint_in_model`] do not perform any check on the correctness of your state dict compared to your model at the moment (this will be fixed in a future version), so you may get some weird errors if trying to load a checkpoint with mismatched or missing keys.
|
||||
- The model parallelism used when your model is split on several GPUs is naive and not optimized, meaning that only one GPU works at a given time and the other sits idle.
|
||||
- When weights are offloaded on the CPU/hard drive, there is no pre-fetching (yet, we will work on this for future versions) which means the weights are put on the GPU when they are needed and not before.
|
||||
- Hard-drive offloading might be very slow if the hardware you run on does not have fast communication between disk and CPU (like NVMes).
|
||||
- Hard-drive offloading might be very slow if the hardware you run on does not have fast communication between disk and CPU (like NVMes).
|
||||
@ -8,6 +8,9 @@ http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Deferring Executions
|
||||
@ -27,7 +30,7 @@ accelerator.wait_for_everyone()
|
||||
This instruction will block all the processes that arrive first until all the other processes have reached that
|
||||
point (if you run your script on just one GPU or CPU, this won't do anything).
|
||||
|
||||
A few example cases for when to use this utility are listed below:
|
||||
A few example cases of when to use this utility are listed below:
|
||||
|
||||
<Tip>
|
||||
|
||||
@ -38,7 +41,7 @@ A few example cases for when to use this utility are listed below:
|
||||
|
||||
## Downloading a Dataset
|
||||
|
||||
When downloading a dataset, you should download it first on the main process and then loading the cached dataset in afterwards
|
||||
When downloading a dataset, you should download it first on the main process and then load the cached dataset afterward
|
||||
|
||||
<Tip>
|
||||
|
||||
@ -104,4 +107,24 @@ with accelerator.main_process_first():
|
||||
batched=True,
|
||||
remove_columns=["idx", "sentence1", "sentence2"],
|
||||
)
|
||||
```
|
||||
|
||||
## Applying checks such as Early Stopping
|
||||
|
||||
To have a check that works with a flag set by a particular process, the `set_trigger` and `check_trigger` API should be used. Useful examples
|
||||
for doing so can include situations such as using early stopping and monitoring the loss (as each loss slightly differs on each process).
|
||||
|
||||
Call [`Accelerator.set_trigger`] when your condition has been met, and [`Accelerator.check_trigger`] when checking if that condition has been met in any process:
|
||||
|
||||
```python
|
||||
for (x,y) in data_loader:
|
||||
logits = model(x)
|
||||
loss = loss_func(logits, y)
|
||||
# Assume `should_do_early_stopping` is a custom defined function that returns a conditional
|
||||
if should_do_early_stopping(loss):
|
||||
accelerator.set_trigger()
|
||||
|
||||
# Later in the training script when we need to check for the breakpoint
|
||||
if accelerator.check_trigger():
|
||||
break
|
||||
```
|
||||
@ -8,6 +8,9 @@ http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Gradient Synchronization
|
||||
@ -45,22 +48,30 @@ training in a distributed setup. But how does this risk slowing down your code?
|
||||
In DDP (distributed data parallel), the specific order in which processes are performed and ran are expected
|
||||
at specific points and these must also occur at roughly the same time before moving on.
|
||||
|
||||
The most direct example is when you update all of the parameters in a model through `.backward()`. All instances of the model
|
||||
need to have updated their gradients, collated, and updated again before moving onto the next batch of data. But when performing
|
||||
gradient accumulation, you accumulate `n` losses and skip `.backward()` until `n` batches have been reached. This
|
||||
can cause a significant slowdown since all the processes need to communicate with them more times than needed. How
|
||||
can you avoid this overhead?
|
||||
The most direct example is when you update model parameters through
|
||||
`optimizer.step()`.
|
||||
Without gradient accumulation, all instances of the model need to have updated
|
||||
their gradients computed, collated, and updated before moving on to the next
|
||||
batch of data.
|
||||
When performing gradient accumulation, you accumulate `n` loss gradients and
|
||||
skip `optimizer.step()` until `n` batches have been reached. As all training
|
||||
processes only need to synchronize by the time `optimizer.step()` is called,
|
||||
without any modification to your training step, this needless inter-process
|
||||
communication can cause a significant slowdown.
|
||||
|
||||
How can you avoid this overhead?
|
||||
|
||||
## Solving the slowdown problem
|
||||
|
||||
Since you are skipping these batches, their gradients do not need to be synchronized until the point where `.backward()` is actually called.
|
||||
Since you are skipping model parameter updates when training on these batches, their gradients do not need to be synchronized until the point where `optimizer.step()` is actually called.
|
||||
PyTorch cannot automagically tell when you need to do this, but they do provide a tool to help through the [`no_sync`](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html#torch.nn.parallel.DistributedDataParallel.no_sync) context manager
|
||||
that is added to your model after converting it to DDP.
|
||||
|
||||
Under this context manager, PyTorch will skip synchronizing the gradients when `.backward()` is called, and the first call to `.backward()` outside this
|
||||
Under this context manager, PyTorch will skip synchronizing the gradients when
|
||||
`.backward()` is called, and the first call to `.backward()` outside this
|
||||
context manager will trigger the synchronization. See an example below:
|
||||
```python
|
||||
ddp_model, dataloader = accelerator.prepare(model, dataloader)
|
||||
ddp_model, dataloader, optimizer = accelerator.prepare(model, dataloader, optimizer)
|
||||
|
||||
for index, batch in enumerate(dataloader):
|
||||
inputs, targets = batch
|
||||
@ -76,13 +87,14 @@ for index, batch in enumerate(dataloader):
|
||||
outputs = ddp_model(inputs)
|
||||
loss = loss_func(outputs)
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
```
|
||||
|
||||
In 🤗 Accelerate to make this an API that can be called no matter the training device (though it may not do anything if you are not in a distributed system!),
|
||||
`ddp_model.no_sync` gets replaced with [`~Accelerator.no_sync`] and operates the same way:
|
||||
|
||||
```diff
|
||||
ddp_model, dataloader = accelerator.prepare(model, dataloader)
|
||||
ddp_model, dataloader, optimizer = accelerator.prepare(model, dataloader, optimizer)
|
||||
|
||||
for index, batch in enumerate(dataloader):
|
||||
inputs, targets = batch
|
||||
@ -99,13 +111,15 @@ In 🤗 Accelerate to make this an API that can be called no matter the training
|
||||
outputs = ddp_model(inputs)
|
||||
loss = loss_func(outputs)
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
```
|
||||
|
||||
As you may expect, the [`~Accelerator.accumulate`] function wraps around this conditional check by keeping track of the current batch number, leaving you with the final
|
||||
gradient accumulation API:
|
||||
|
||||
```python
|
||||
ddp_model, dataloader = accelerator.prepare(model, dataloader)
|
||||
ddp_model, dataloader, optimizer = accelerator.prepare(model, dataloader, optimizer)
|
||||
|
||||
for batch in dataloader:
|
||||
with accelerator.accumulate(model):
|
||||
@ -114,6 +128,42 @@ for batch in dataloader:
|
||||
outputs = model(inputs)
|
||||
loss = loss_function(outputs, targets)
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
```
|
||||
|
||||
As a result, you should either use *`accelerator.accumulate` or `accelerator.no_sync`* when it comes to API choice.
|
||||
As a result, you should either use *`accelerator.accumulate` or `accelerator.no_sync`* when it comes to API choice.
|
||||
|
||||
## Just how much of a slowdown is there, and easy mistakes you can make
|
||||
|
||||
To set up a realistic example, consider the following setup:
|
||||
|
||||
* Two single-GPU T4 nodes and one node with two GPUs
|
||||
* Each GPU is a T4, and are hosted on GCP
|
||||
* The script used is a modification of the [NLP Example](https://github.com/muellerzr/timing_experiments/blob/main/baseline.py) script
|
||||
* Batch size per GPU is 16, and gradients are accumulated every 4 steps
|
||||
|
||||
All scripts are available in [this repository](https://github.com/muellerzr/timing_experiments).
|
||||
|
||||
If not careful about gradient synchronization and GPU communication, a *large* amount of time can be wasted
|
||||
from when these GPUs communicate to each other during unnecessary periods.
|
||||
|
||||
By how much?
|
||||
|
||||
Reference:
|
||||
- Baseline: uses no synchronization practices discussed here
|
||||
- `no_sync` improperly: `no_sync` only around the `backward` call, not the `forward`
|
||||
- `no_sync`: using the `no_sync` pattern properly
|
||||
- `accumulate`: using [`~Accelerator.accumulate`] properly
|
||||
|
||||
Below are the average seconds per batch iterating over 29 batches of data for each setup on both a single node and on the dual-node setup:
|
||||
|
||||
| | Baseline | `no_sync` improperly | `no_sync` | `accumulate`|
|
||||
| :---------: | :-------: | :------------------: | :-------: | :---------: |
|
||||
| Multi-Node | 2±0.01s | 2.13±0.08s | **0.91±0.11s** | **0.91±0.11s** |
|
||||
| Single Node | 0.50±0.01s | 0.50±0.01s | **0.41±0.015s** | **0.41±0.015s** |
|
||||
|
||||
As you can see, if you are not careful about how you set up your gradient synchronization, you can get upwards of more than a 2x slowdown during training!
|
||||
|
||||
If you are worried about making sure everything is done properly, we highly recommend utilizing the [`~Accelerator.accumulate`] function and passing in
|
||||
`gradient_accumulation_steps` or `gradient_accumulation_plugin` to the [`Accelerator`] object so Accelerate can handle this for you.
|
||||
72
docs/source/concept_guides/internal_mechanism.md
Normal file
72
docs/source/concept_guides/internal_mechanism.md
Normal file
@ -0,0 +1,72 @@
|
||||
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# 🤗 Accelerate's internal mechanisms
|
||||
|
||||
Internally, 🤗 Accelerate works by first analyzing the environment in which the script is launched to determine which
|
||||
kind of distributed setup is used, how many different processes there are and which one the current script is in. All
|
||||
that information is stored in the [`~AcceleratorState`].
|
||||
|
||||
This class is initialized the first time you instantiate an [`~Accelerator`] as well as performing any
|
||||
specific initialization your distributed setup needs. Its state is then uniquely shared through all instances of
|
||||
[`~state.AcceleratorState`]. (The same can also be done with the [`PartialState`], a more barebones version it inherits)
|
||||
|
||||
Then, when calling [`~Accelerator.prepare`], the library:
|
||||
|
||||
- wraps your model(s) in the container adapted for the distributed setup,
|
||||
- wraps your optimizer(s) in an [`~optimizer.AcceleratedOptimizer`],
|
||||
- wraps your scheduler(s) in an [`~scheduler.AcceleratedScheduler`]
|
||||
- creates a new version of your dataloader(s) in a [`~data_loader.DataLoaderShard`] or [`~data_loader.DataLoaderDispatcher`]
|
||||
|
||||
While the model(s), optimizer(s), and scheduler(s) are just put in simple wrappers, the dataloader(s) are re-created. This is mostly
|
||||
because PyTorch does not let the user change the `batch_sampler` of a dataloader once it's been created and the
|
||||
library handles the sharding of your data between processes by changing that `batch_sampler` to yield every other
|
||||
`num_processes` batches (if enabled).
|
||||
|
||||
The [`~data_loader.DataLoaderShard`] subclasses `DataLoader` to add the following functionality:
|
||||
|
||||
- it synchronizes the appropriate random number generator of all processes at each new iteration, to ensure any
|
||||
randomization (like shuffling) is done the exact same way across processes.
|
||||
- it puts the batches on the proper device before yielding them (unless you have opted out of
|
||||
`device_placement=True`).
|
||||
|
||||
The [`~data_loader.DataLoaderDispatcher`] subclasses differs from the [`~data_loader.DataLoaderShard`] in that when iterating through the `DataLoader`, the data is all starting from process 0 and *then* split and sent off to each process rather than it happening at the dataset level.
|
||||
|
||||
The random number generator synchronization will by default synchronize:
|
||||
|
||||
- the `generator` attribute of a given sampler (like the PyTorch `RandomSampler`) for PyTorch >= 1.6
|
||||
- the main random number generator in PyTorch <=1.5.1
|
||||
|
||||
You can choose which random number generator(s) to synchronize with the `rng_types` argument of the main
|
||||
[`Accelerator`]. In PyTorch >= 1.6, it is recommended to rely on a local `generator` to avoid
|
||||
setting the same seed in the main random number generator in all processes.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
Synchronization of the main torch (or CUDA or XLA) random number generator will affect any other potential random
|
||||
artifacts you could have in your dataset (like random data augmentation) in the sense that all processes will get
|
||||
the same random numbers from the torch random modules (so will apply the same random data augmentation if it's
|
||||
controlled by torch).
|
||||
|
||||
</Tip>
|
||||
|
||||
<Tip>
|
||||
|
||||
The randomization part of your custom sampler, batch sampler or iterable dataset should be done using a local
|
||||
`torch.Generator` object (in PyTorch >= 1.6), see the traditional `RandomSampler`, as an example.
|
||||
|
||||
</Tip>
|
||||
|
||||
For more details about the internals, see the [Internals page](package_reference/torch_wrappers).
|
||||
74
docs/source/concept_guides/low_precision_training.md
Normal file
74
docs/source/concept_guides/low_precision_training.md
Normal file
@ -0,0 +1,74 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Low Precision Training Methods
|
||||
|
||||
The release of new kinds of hardware led to the emergence of new training paradigms that better utilize them. Currently, this is in the form of training
|
||||
in 8-bit precision using packages such as [TransformersEngine](https://github.com/NVIDIA/TransformerEngine) (TE) or [MS-AMP](https://github.com/Azure/MS-AMP/tree/main).
|
||||
|
||||
For an introduction to the topics discussed today, we recommend reviewing the [low-precision usage guide](../usage_guides/low_precision_training.md) as this documentation will reference it regularly.
|
||||
|
||||
## A Quick Chart
|
||||
|
||||
Below is a quick chart from the MS-AMP documentation showing the different bit-precisions for each solution during training:
|
||||
|
||||
Optimization Level | Computation(GEMM) | Comm | Weight | Master Weight | Weight Gradient | Optimizer States
|
||||
-- | -- | -- | -- | -- | -- | --
|
||||
FP16 AMP | FP16 | FP32 | FP32 | N/A | FP32 | FP32+FP32
|
||||
Nvidia TE | FP8 | FP32 | FP32 | N/A | FP32 | FP32+FP32
|
||||
MS-AMP O1 | FP8 | FP8 | FP16 | N/A | FP8 | FP32+FP32
|
||||
MS-AMP O2 | FP8 | FP8 | FP16 | N/A | FP8 | FP8+FP16
|
||||
MS-AMP O3 | FP8 | FP8 | FP8 | FP16 | FP8 | FP8+FP16
|
||||
|
||||
## `TransformersEngine`
|
||||
|
||||
`TransformersEngine` is the first solution to trying to train in 8-bit floating point. It works by using drop-in replacement layers for certain ones in a model that utilize their FP8-engine to reduce the number of bits (such as 32 to 8) without degrading the final accuracy of the model.
|
||||
|
||||
Specifically, 🤗 Accelerate will find and replace the following layers with `TransformersEngine` versions:
|
||||
|
||||
* `nn.LayerNorm` for `te.LayerNorm`
|
||||
* `nn.Linear` for `te.Linear`
|
||||
|
||||
As a result we wind up with a model that has most of its layers in BF16, while some layers are in FP8 reducing some of the memory.
|
||||
|
||||
Anecdotally, we have noticed that performance gains don't really start showing when using `TransformerEngine` until a large majority of the layers
|
||||
in the model are made up of those two layers to replace. As a result, only larger models have shown performance improvements when the number of parameters is around and upwards of a few billion.
|
||||
|
||||
The `TransformerEngine` can receive many different arguments that customize how it performs FP8 calculations and what they do. A full list of the arguments is available below:
|
||||
|
||||
* `margin`: The margin to use for the gradient scaling.
|
||||
* `interval`: The interval to use for how often the scaling factor is recomputed.
|
||||
* `fp8_format``: The format to use for the FP8 recipe. Must be one of `E4M3` or `HYBRID`.
|
||||
* `amax_history_len`: The length of the history to use for the scaling factor computation
|
||||
* `amax_compute_algo`: The algorithm to use for the scaling factor computation. Must be one of `max` or `most_recent`.
|
||||
* `override_linear_precision`: Whether or not to execute `fprop`, `dgrad`, and `wgrad` GEMMS in higher precision.
|
||||
|
||||
You can customize each of these as part of [`utils.FP8RecipeKwargs`] to help optimize performance of your models.
|
||||
|
||||
If we notice in the chart mentioned earlier, TE simply casts the computation layers into FP8, while everything else is in FP32. As a result this winds up utilizing the most memory but does so with the benefit of guaranteeing the least amount of loss in end accuracy during training.
|
||||
|
||||
## `MS-AMP`
|
||||
|
||||
MS-AMP takes a different approach to `TransformersEngine` by providing three different optimization levels to convert more operations in FP8 or FP16.
|
||||
|
||||
* The base optimization level (`O1`), passes communications of the weights (such as in DDP) in FP8, stores the weights of the model in FP16, and leaves the optimizer states in FP32. The main benefit of this optimization level is that we can reduce the communication bandwidth by essentially half. Additionally, more GPU memory is saved due to 1/2 of everything being cast in FP8, and the weights being cast to FP16. Notably, both the optimizer states remain in FP32.
|
||||
|
||||
* The second optimization level (`O2`) improves upon this by also reducing the precision of the optimizer states. One is in FP8 while the other is in FP16. Generally it's been shown that this will only provide a net-gain of no degraded end accuracy, increased training speed, and reduced memory as now every state is either in FP16 or FP8.
|
||||
|
||||
* Finally, MS-AMP has a third optimization level (`O3`) which helps during DDP scenarios such as DeepSpeed. The weights of the model in memory are fully cast to FP8, and the master weights are now stored in FP16. This fully reduces memory by the highest factor as now not only is almost everything in FP8, only two states are left in FP16. Currently, only DeepSpeed versions up through 0.9.2 are supported, so this capability is not included in the 🤗 Accelerate integration
|
||||
|
||||
## Combining the two
|
||||
|
||||
More experiments need to be performed but it's been noted that combining both MS-AMP and TransformersEngine can lead to the highest throughput by relying on NVIDIA's optimized FP8 operators and utilizing how MS-AMP reduces the memory overhead.
|
||||
@ -8,6 +8,9 @@ http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Comparing performance between different device setups
|
||||
@ -18,7 +21,7 @@ and expect your results to line up.
|
||||
|
||||
But why?
|
||||
|
||||
There's three reasons for this that this tutorial will cover:
|
||||
There are three reasons for this that this tutorial will cover:
|
||||
|
||||
1. **Setting the right seeds**
|
||||
2. **Observed Batch Sizes**
|
||||
@ -26,10 +29,10 @@ There's three reasons for this that this tutorial will cover:
|
||||
|
||||
## Setting the Seed
|
||||
|
||||
While this issue has not come up as much, make sure to use [`utils.set_seed`] to fully set the seed in all distributed cases so training will be reproducable:
|
||||
While this issue has not come up as much, make sure to use [`utils.set_seed`] to fully set the seed in all distributed cases so training will be reproducible:
|
||||
|
||||
```python
|
||||
from accelerate import set_seed
|
||||
from accelerate.utils import set_seed
|
||||
|
||||
set_seed(42)
|
||||
```
|
||||
@ -58,7 +61,7 @@ The below table can be used as a quick reference to try out different batch size
|
||||
|
||||
<Tip>
|
||||
|
||||
In this example there are two GPUs for "Multi-GPU" and a TPU pod with 8 workers
|
||||
In this example, there are two GPUs for "Multi-GPU" and a TPU pod with 8 workers
|
||||
|
||||
</Tip>
|
||||
|
||||
@ -71,7 +74,7 @@ In this example there are two GPUs for "Multi-GPU" and a TPU pod with 8 workers
|
||||
|
||||
## Learning Rates
|
||||
|
||||
As noted in multiple sources[[1](https://aws.amazon.com/blogs/machine-learning/scalable-multi-node-deep-learning-training-using-gpus-in-the-aws-cloud/)][[2](https://docs.nvidia.com/clara/tlt-mi_archive/clara-train-sdk-v2.0/nvmidl/appendix/training_with_multiple_gpus.html)], the learning rate should be scaled *linearly* based on the number of devices present. The below
|
||||
As noted in multiple sources[[1](https://aws.amazon.com/blogs/machine-learning/scalable-multi-node-deep-learning-training-using-gpus-in-the-aws-cloud/)][[2](https://docs.nvidia.com/clara/clara-train-sdk/pt/model.html#classification-models-multi-gpu-training)], the learning rate should be scaled *linearly* based on the number of devices present. The below
|
||||
snippet shows doing so with Accelerate:
|
||||
|
||||
<Tip>
|
||||
@ -89,3 +92,12 @@ learning_rate *= accelerator.num_processes
|
||||
optimizer = AdamW(params=model.parameters(), lr=learning_rate)
|
||||
```
|
||||
|
||||
You will also find that `accelerate` will step the learning rate based on the number of processes being trained on. This is because
|
||||
of the observed batch size noted earlier. So in the case of 2 GPUs, the learning rate will be stepped twice as often as a single GPU
|
||||
to account for the batch size being twice as large (if no changes to the batch size on the single GPU instance are made).
|
||||
|
||||
## Gradient Accumulation and Mixed Precision
|
||||
|
||||
When using gradient accumulation and mixed precision, due to how gradient averaging works (accumulation) and the precision loss (mixed precision),
|
||||
some degradation in performance is expected. This will be explicitly seen when comparing the batch-wise loss between different compute
|
||||
setups. However, the overall loss, metric, and general performance at the end of training should be _roughly_ the same.
|
||||
@ -8,11 +8,14 @@ http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Training on TPUs with 🤗 Accelerate
|
||||
|
||||
Training on TPUs can be slightly different than training on multi-gpu, even with 🤗 Accelerate. This guide aims to show you
|
||||
Training on TPUs can be slightly different from training on multi-gpu, even with 🤗 Accelerate. This guide aims to show you
|
||||
where you should be careful and why, as well as the best practices in general.
|
||||
|
||||
## Training in a Notebook
|
||||
@ -24,8 +27,8 @@ While on a TPU that last part is not as important, a critical part to understand
|
||||
When launching from the command-line, you perform **spawning**, where a python process is not currently running and you *spawn* a new process in. Since your Jupyter notebook is already
|
||||
utilizing a python process, you need to *fork* a new process from it to launch your code.
|
||||
|
||||
Where this becomes important is in regards to declaring your model. On forked TPU processes, it is recommended that you instantiate your model *once* and pass this into your
|
||||
training function. This is different than training on GPUs where you create `n` models that have their gradients synced and back-propagated at certain moments. Instead one
|
||||
Where this becomes important is in regard to declaring your model. On forked TPU processes, it is recommended that you instantiate your model *once* and pass this into your
|
||||
training function. This is different than training on GPUs where you create `n` models that have their gradients synced and back-propagated at certain moments. Instead, one
|
||||
model instance is shared between all the nodes and it is passed back and forth. This is important especially when training on low-resource TPUs such as those provided in Kaggle kernels or
|
||||
on Google Colaboratory.
|
||||
|
||||
@ -33,7 +36,7 @@ Below is an example of a training function passed to the [`notebook_launcher`] i
|
||||
|
||||
<Tip>
|
||||
|
||||
This code snippet is based off the one from the `simple_nlp_example` notebook found [here](https://github.com/huggingface/notebooks/blob/main/examples/accelerate/simple_nlp_example.ipynb) with slight
|
||||
This code snippet is based off the one from the `simple_nlp_example` notebook found [here](https://github.com/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_nlp_example.ipynb) with slight
|
||||
modifications for the sake of simplicity
|
||||
|
||||
</Tip>
|
||||
@ -134,7 +137,7 @@ At the base level, this is enabled when passing `mixed_precision="bf16"` to `Acc
|
||||
```python
|
||||
accelerator = Accelerator(mixed_precision="bf16")
|
||||
```
|
||||
By default this will cast `torch.float` and `torch.double` to `bfloat16` on TPUs.
|
||||
By default, this will cast `torch.float` and `torch.double` to `bfloat16` on TPUs.
|
||||
The specific configuration being set is an environmental variable of `XLA_USE_BF16` is set to `1`.
|
||||
|
||||
There is a further configuration you can perform which is setting the `XLA_DOWNCAST_BF16` environmental variable. If set to `1`, then
|
||||
@ -161,4 +164,4 @@ new batch size after the first few iterations.
|
||||
|
||||
Just because the memory is allocated does not mean it will be used or that the batch size will increase when going back to your training dataloader.
|
||||
|
||||
</Tip>
|
||||
</Tip>
|
||||
@ -8,6 +8,9 @@ http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Accelerate
|
||||
@ -55,7 +58,7 @@ accelerate launch {my_script.py}
|
||||
><div class="w-full text-center bg-gradient-to-br from-blue-400 to-blue-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Tutorials</div>
|
||||
<p class="text-gray-700">Learn the basics and become familiar with using 🤗 Accelerate. Start here if you are using 🤗 Accelerate for the first time!</p>
|
||||
</a>
|
||||
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./usage_guides/gradient_accumulation"
|
||||
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./usage_guides/explore"
|
||||
><div class="w-full text-center bg-gradient-to-br from-indigo-400 to-indigo-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">How-to guides</div>
|
||||
<p class="text-gray-700">Practical guides to help you achieve a specific goal. Take a look at these guides to learn how to use 🤗 Accelerate to solve real-world problems.</p>
|
||||
</a>
|
||||
@ -8,12 +8,15 @@ http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Accelerator
|
||||
|
||||
The [`Accelerator`] is the main class provided by 🤗 Accelerate.
|
||||
It serves at the main entrypoint for the API.
|
||||
It serves at the main entry point for the API.
|
||||
|
||||
## Quick adaptation of your code
|
||||
|
||||
@ -45,7 +48,7 @@ you should search for and replace by the corresponding methods of your `accelera
|
||||
|
||||
### Printing
|
||||
|
||||
`print` statements should be replaced by [`~Accelerator.print`] to be printed once per process
|
||||
`print` statements should be replaced by [`~Accelerator.print`] to be printed once per process:
|
||||
|
||||
```diff
|
||||
- print("My thing I want to print!")
|
||||
@ -113,25 +116,55 @@ def do_my_thing():
|
||||
|
||||
### Synchronicity control
|
||||
|
||||
Use [`~Accelerator.wait_for_everyone`] to make sure all processes join that point before continuing. (Useful before a model save for instance)
|
||||
Use [`~Accelerator.wait_for_everyone`] to make sure all processes join that point before continuing. (Useful before a model save for instance).
|
||||
|
||||
### Saving and loading
|
||||
|
||||
Use [`~Accelerator.unwrap_model`] before saving to remove all special model wrappers added during the distributed process.
|
||||
|
||||
```python
|
||||
model = MyModel()
|
||||
model = accelerator.prepare(model)
|
||||
# Unwrap
|
||||
model = accelerator.unwrap_model(model)
|
||||
```
|
||||
|
||||
Use [`~Accelerator.save`] instead of `torch.save`:
|
||||
Use [`~Accelerator.save_model`] instead of `torch.save` to save a model. It will remove all model wrappers added during the distributed process, get the state_dict of the model and save it. The state_dict will be in the same precision as the model being trained.
|
||||
|
||||
```diff
|
||||
state_dict = model.state_dict()
|
||||
- torch.save(state_dict, "my_state.pkl")
|
||||
+ accelerator.save(state_dict, "my_state.pkl")
|
||||
+ accelerator.save_model(model, save_directory)
|
||||
```
|
||||
|
||||
[`~Accelerator.save_model`] can also save a model into sharded checkpoints or with safetensors format.
|
||||
Here is an example:
|
||||
|
||||
```python
|
||||
accelerator.save_model(model, save_directory, max_shard_size="1GB", safe_serialization=True)
|
||||
```
|
||||
|
||||
#### 🤗 Transformers models
|
||||
|
||||
If you are using models from the [🤗 Transformers](https://huggingface.co/docs/transformers/) library, you can use the `.save_pretrained()` method.
|
||||
|
||||
```python
|
||||
from transformers import AutoModel
|
||||
|
||||
model = AutoModel.from_pretrained("bert-base-cased")
|
||||
model = accelerator.prepare(model)
|
||||
|
||||
# ...fine-tune with PyTorch...
|
||||
|
||||
unwrapped_model = accelerator.unwrap_model(model)
|
||||
unwrapped_model.save_pretrained(
|
||||
"path/to/my_model_directory",
|
||||
is_main_process=accelerator.is_main_process,
|
||||
save_function=accelerator.save,
|
||||
)
|
||||
```
|
||||
|
||||
This will ensure your model stays compatible with other 🤗 Transformers functionality like the `.from_pretrained()` method.
|
||||
|
||||
```python
|
||||
from transformers import AutoModel
|
||||
|
||||
model = AutoModel.from_pretrained("path/to/my_model_directory")
|
||||
```
|
||||
|
||||
### Operations
|
||||
@ -157,7 +190,22 @@ multi-device training, check if the step should actually be performed, and auto-
|
||||
scheduler.step()
|
||||
optimizer.zero_grad()
|
||||
```
|
||||
#### GradientAccumulationPlugin
|
||||
[[autodoc]] utils.GradientAccumulationPlugin
|
||||
|
||||
|
||||
Instead of passing `gradient_accumulation_steps` you can instantiate a GradientAccumulationPlugin and pass it to the [`Accelerator`]'s `__init__`
|
||||
as `gradient_accumulation_plugin`. You can only pass either one of `gradient_accumulation_plugin` or `gradient_accumulation_steps` passing both will raise an error.
|
||||
```diff
|
||||
from accelerate.utils import GradientAccumulationPlugin
|
||||
|
||||
gradient_accumulation_plugin = GradientAccumulationPlugin(num_steps=2)
|
||||
- accelerator = Accelerator()
|
||||
+ accelerator = Accelerator(gradient_accumulation_plugin=gradient_accumulation_plugin)
|
||||
```
|
||||
|
||||
In addition to the number of steps, this also lets you configure whether or not you adjust your learning rate scheduler to account for the change in steps due to accumulation.
|
||||
|
||||
## Overall API documentation:
|
||||
|
||||
[[autodoc]] Accelerator
|
||||
[[autodoc]] Accelerator
|
||||
@ -8,6 +8,9 @@ http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Working with large models
|
||||
@ -16,9 +19,12 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
[[autodoc]] big_modeling.init_empty_weights
|
||||
[[autodoc]] big_modeling.cpu_offload
|
||||
[[autodoc]] big_modeling.cpu_offload_with_hook
|
||||
[[autodoc]] big_modeling.disk_offload
|
||||
[[autodoc]] big_modeling.dispatch_model
|
||||
[[autodoc]] big_modeling.load_checkpoint_and_dispatch
|
||||
[[autodoc]] big_modeling.load_checkpoint_in_model
|
||||
[[autodoc]] utils.infer_auto_device_map
|
||||
|
||||
## Model Hooks
|
||||
|
||||
@ -8,6 +8,9 @@ http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# The Command Line
|
||||
@ -83,7 +86,7 @@ accelerate config update [arguments]
|
||||
|
||||
**Command**:
|
||||
|
||||
`accelerate env` or `accelerate-env`
|
||||
`accelerate env` or `accelerate-env` or `python -m accelerate.commands.env`
|
||||
|
||||
Lists the contents of the passed 🤗 Accelerate configuration file. Should always be used when opening an issue on the [GitHub repository](https://github.com/huggingface/accelerate).
|
||||
|
||||
@ -103,7 +106,7 @@ accelerate env [arguments]
|
||||
|
||||
**Command**:
|
||||
|
||||
`accelerate launch` or `accelerate-launch`
|
||||
`accelerate launch` or `accelerate-launch` or `python -m accelerate.commands.launch`
|
||||
|
||||
Launches a specified script on a distributed system with the right parameters.
|
||||
|
||||
@ -125,6 +128,8 @@ accelerate launch [arguments] {training_script} --{training_script-argument-1} -
|
||||
* `-m`, `--module` (`bool`) -- Change each process to interpret the launch script as a Python module, executing with the same behavior as 'python -m'.
|
||||
* `--no_python` (`bool`) -- Skip prepending the training script with 'python' - just execute it directly. Useful when the script is not a Python script.
|
||||
* `--debug` (`bool`) -- Whether to print out the torch.distributed stack trace when something fails.
|
||||
* `-q`, `--quiet` (`bool`) -- Silence subprocess errors from the launch stack trace to only show the relevant tracebacks. (Only applicable to DeepSpeed and single-process configurations).
|
||||
|
||||
|
||||
The rest of these arguments are configured through `accelerate config` and are read in from the specified `--config_file` (or default configuration) for their
|
||||
values. They can also be passed in manually.
|
||||
@ -133,8 +138,8 @@ values. They can also be passed in manually.
|
||||
|
||||
* `--cpu` (`bool`) -- Whether or not to force the training on the CPU.
|
||||
* `--multi_gpu` (`bool`) -- Whether or not this should launch a distributed GPU training.
|
||||
* `--mps` (`bool`) -- Whether or not this should use MPS-enabled GPU device on MacOS machines.
|
||||
* `--tpu` (`bool`) -- Whether or not this should launch a TPU training.
|
||||
* `--ipex` (`bool`) -- Whether or not this should launch an Intel Pytorch Extension (IPEX) training.
|
||||
|
||||
**Resource Selection Arguments**:
|
||||
|
||||
@ -152,6 +157,7 @@ The following arguments are useful for selecting which training paradigm to use.
|
||||
* `--use_deepspeed` (`bool`) -- Whether or not to use DeepSpeed for training.
|
||||
* `--use_fsdp` (`bool`) -- Whether or not to use FullyShardedDataParallel for training.
|
||||
* `--use_megatron_lm` (`bool`) -- Whether or not to use Megatron-LM for training.
|
||||
* `--use_xpu` (`bool`) -- Whether to use IPEX plugin to speed up training on XPU specifically.
|
||||
|
||||
**Distributed GPU Arguments**:
|
||||
|
||||
@ -162,6 +168,7 @@ The following arguments are only useful when `multi_gpu` is passed or multi-gpu
|
||||
* `--machine_rank MACHINE_RANK` (`int`) -- The rank of the machine on which this script is launched.
|
||||
* `--main_process_ip MAIN_PROCESS_IP` (`str`) -- The IP address of the machine of rank 0.
|
||||
* `--main_process_port MAIN_PROCESS_PORT` (`int`) -- The port to use to communicate with the machine of rank 0.
|
||||
* `--rdzv_backend` (`str`) -- The rendezvous method to use, such as "static" or "c10d"
|
||||
* `--rdzv_conf` (`str`) -- Additional rendezvous configuration (<key1>=<value1>,<key2>=<value2>,...).
|
||||
* `--max_restarts` (`int`) -- Maximum number of worker group restarts before failing.
|
||||
* `--monitor_interval` (`float`) -- Interval, in seconds, to monitor the state of workers.
|
||||
@ -192,7 +199,7 @@ The following arguments are only useful when `use_deepspeed` is passed or `deeps
|
||||
|
||||
**Fully Sharded Data Parallelism Arguments**:
|
||||
|
||||
The following arguments are only useful when `use_fdsp` is passed or Fully Sharded Data Parallelism is configured through `accelerate config`:
|
||||
The following arguments are only useful when `use_fsdp` is passed or Fully Sharded Data Parallelism is configured through `accelerate config`:
|
||||
|
||||
* `--fsdp_offload_params` (`str`) -- Decides Whether (true|false) to offload parameters and gradients to CPU.
|
||||
* `--fsdp_min_num_params` (`int`) -- FSDP's minimum number of parameters for Default Auto Wrapping.
|
||||
@ -211,7 +218,7 @@ The following arguments are only useful when `use_megatron_lm` is passed or Mega
|
||||
* `--megatron_lm_num_micro_batches` (``) -- Megatron-LM's number of micro batches when PP degree > 1.
|
||||
* `--megatron_lm_sequence_parallelism` (``) -- Decides Whether (true|false) to enable Sequence Parallelism when TP degree > 1.
|
||||
* `--megatron_lm_recompute_activations` (``) -- Decides Whether (true|false) to enable Selective Activation Recomputation.
|
||||
* `--megatron_lm_use_distributed_optimizer` (``) -- Decides Whether (true|false) to use distributed optimizer which shards optimizer state and gradients across Data Pralellel (DP) ranks.
|
||||
* `--megatron_lm_use_distributed_optimizer` (``) -- Decides Whether (true|false) to use distributed optimizer which shards optimizer state and gradients across Data Parallel (DP) ranks.
|
||||
* `--megatron_lm_gradient_clipping` (``) -- Megatron-LM's gradient clipping value based on global L2 Norm (0 to disable).
|
||||
|
||||
**AWS SageMaker Arguments**:
|
||||
@ -221,6 +228,36 @@ The following arguments are only useful when training in SageMaker
|
||||
* `--aws_access_key_id AWS_ACCESS_KEY_ID` (`str`) -- The AWS_ACCESS_KEY_ID used to launch the Amazon SageMaker training job
|
||||
* `--aws_secret_access_key AWS_SECRET_ACCESS_KEY` (`str`) -- The AWS_SECRET_ACCESS_KEY used to launch the Amazon SageMaker training job
|
||||
|
||||
## accelerate estimate-memory
|
||||
|
||||
**Command**:
|
||||
|
||||
`accelerate estimate-memory` or `accelerate-estimate-memory` or `python -m accelerate.commands.estimate`
|
||||
|
||||
Estimates the total vRAM a particular model hosted on the Hub needs to be loaded in with an estimate for training. Requires that `huggingface_hub` be installed.
|
||||
|
||||
<Tip>
|
||||
|
||||
When performing inference, typically add ≤20% to the result as overall allocation [as referenced here](https://blog.eleuther.ai/transformer-math/). We will have more extensive estimations in the future that will automatically be included in the calculation.
|
||||
|
||||
</Tip>
|
||||
|
||||
**Usage**:
|
||||
|
||||
```bash
|
||||
accelerate estimate-memory {MODEL_NAME} --library_name {LIBRARY_NAME} --dtypes {dtype_1} {dtype_2} ...
|
||||
```
|
||||
|
||||
**Required Arguments**:
|
||||
|
||||
* `MODEL_NAME` (`str`)-- The model name on the Hugging Face Hub
|
||||
|
||||
**Optional Arguments**:
|
||||
|
||||
* `--library_name {timm,transformers}` (`str`) -- The library the model has an integration with, such as `transformers`, needed only if this information is not stored on the Hub
|
||||
* `--dtypes {float32,float16,int8,int4}` (`[{float32,float16,int8,int4} ...]`) -- The dtypes to use for the model, must be one (or many) of `float32`, `float16`, `int8`, and `int4`
|
||||
* `--trust_remote_code` (`bool`) -- Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be passed for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.
|
||||
|
||||
## accelerate tpu-config
|
||||
|
||||
`accelerate tpu-config`
|
||||
@ -8,6 +8,9 @@ http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Utilities for DeepSpeed
|
||||
18
docs/source/package_reference/fsdp.md
Normal file
18
docs/source/package_reference/fsdp.md
Normal file
@ -0,0 +1,18 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Utilities for Fully Sharded Data Parallelism
|
||||
|
||||
[[autodoc]] utils.FullyShardedDataParallelPlugin
|
||||
@ -8,6 +8,9 @@ http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Kwargs Handlers
|
||||
@ -15,11 +18,18 @@ specific language governing permissions and limitations under the License.
|
||||
The following objects can be passed to the main [`Accelerator`] to customize how some PyTorch objects
|
||||
related to distributed training or mixed precision are created.
|
||||
|
||||
## AutocastKwargs
|
||||
|
||||
[[autodoc]] AutocastKwargs
|
||||
|
||||
## DistributedDataParallelKwargs
|
||||
|
||||
[[autodoc]] DistributedDataParallelKwargs
|
||||
|
||||
## FP8RecipeKwargs
|
||||
|
||||
[[autodoc]] utils.FP8RecipeKwargs
|
||||
|
||||
## GradScalerKwargs
|
||||
|
||||
[[autodoc]] GradScalerKwargs
|
||||
@ -8,6 +8,9 @@ http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Launchers
|
||||
21
docs/source/package_reference/logging.md
Normal file
21
docs/source/package_reference/logging.md
Normal file
@ -0,0 +1,21 @@
|
||||
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Logging with Accelerate
|
||||
|
||||
Refer to the [Troubleshooting guide](../usage_guides/troubleshooting#logging) or to the example below to learn
|
||||
how to use 🤗 Accelerate's logger.
|
||||
|
||||
[[autodoc]] logging.get_logger
|
||||
@ -1,34 +0,0 @@
|
||||
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Logging with Accelerate
|
||||
|
||||
Accelerate has its own logging utility to handle logging while in a distributed system.
|
||||
To utilize this replace cases of `logging` with `accelerate.logging`:
|
||||
```diff
|
||||
- import logging
|
||||
+ from accelerate.logging import get_logger
|
||||
- logger = logging.getLogger(__name__)
|
||||
+ logger = get_logger(__name__)
|
||||
```
|
||||
|
||||
## Setting the log level
|
||||
|
||||
The log level can be set with the `ACCELERATE_LOG_LEVEL` environment variable or by passing
|
||||
`log_level` to `get_logger`:
|
||||
```python
|
||||
from accelerate.logging import get_logger
|
||||
|
||||
logger = get_logger(__name__, log_level="INFO")
|
||||
```
|
||||
|
||||
[[autodoc]] logging.get_logger
|
||||
@ -8,6 +8,9 @@ http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Utilities for Megatron-LM
|
||||
@ -8,6 +8,9 @@ http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Stateful Classes
|
||||
@ -18,6 +21,8 @@ instances share the same state, which is initialized on the first instantiation.
|
||||
These classes are immutable and store information about certain configurations or
|
||||
states.
|
||||
|
||||
[[autodoc]] state.PartialState
|
||||
|
||||
[[autodoc]] state.AcceleratorState
|
||||
|
||||
[[autodoc]] state.GradientState
|
||||
@ -8,6 +8,9 @@ http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Wrapper classes for torch Dataloaders, Optimizers, and Schedulers
|
||||
@ -18,6 +21,7 @@ when calling [`~Accelerator.prepare`].
|
||||
## Datasets and DataLoaders
|
||||
|
||||
[[autodoc]] data_loader.prepare_data_loader
|
||||
[[autodoc]] data_loader.skip_first_batches
|
||||
|
||||
[[autodoc]] data_loader.BatchSamplerShard
|
||||
[[autodoc]] data_loader.IterableDatasetShard
|
||||
@ -8,6 +8,9 @@ http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Experiment Tracking
|
||||
@ -24,3 +27,9 @@ specific language governing permissions and limitations under the License.
|
||||
- __init__
|
||||
[[autodoc]] tracking.CometMLTracker
|
||||
- __init__
|
||||
[[autodoc]] tracking.AimTracker
|
||||
- __init__
|
||||
[[autodoc]] tracking.MLflowTracker
|
||||
- __init__
|
||||
[[autodoc]] tracking.ClearMLTracker
|
||||
- __init__
|
||||
240
docs/source/package_reference/utilities.md
Normal file
240
docs/source/package_reference/utilities.md
Normal file
@ -0,0 +1,240 @@
|
||||
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Helpful Utilities
|
||||
|
||||
Below are a variety of utility functions that 🤗 Accelerate provides, broken down by use-case.
|
||||
|
||||
## Constants
|
||||
|
||||
Constants used throughout 🤗 Accelerate for reference
|
||||
|
||||
The following are constants used when utilizing [`Accelerator.save_state`]
|
||||
|
||||
`utils.MODEL_NAME`: `"pytorch_model"`
|
||||
`utils.OPTIMIZER_NAME`: `"optimizer"`
|
||||
`utils.RNG_STATE_NAME`: `"random_states"`
|
||||
`utils.SCALER_NAME`: `"scaler.pt`
|
||||
`utils.SCHEDULER_NAME`: `"scheduler`
|
||||
|
||||
The following are constants used when utilizing [`Accelerator.save_model`]
|
||||
|
||||
`utils.WEIGHTS_NAME`: `"pytorch_model.bin"`
|
||||
`utils.SAFE_WEIGHTS_NAME`: `"model.safetensors"`
|
||||
`utils.WEIGHTS_INDEX_NAME`: `"pytorch_model.bin.index.json"`
|
||||
`utils.SAFE_WEIGHTS_INDEX_NAME`: `"model.safetensors.index.json"`
|
||||
|
||||
## Data Classes
|
||||
|
||||
These are basic dataclasses used throughout 🤗 Accelerate and they can be passed in as parameters.
|
||||
|
||||
### Standalone
|
||||
|
||||
These are standalone dataclasses used for checks, such as the type of distributed system being used
|
||||
|
||||
[[autodoc]] utils.ComputeEnvironment
|
||||
|
||||
[[autodoc]] utils.DistributedType
|
||||
|
||||
[[autodoc]] utils.DynamoBackend
|
||||
|
||||
[[autodoc]] utils.LoggerType
|
||||
|
||||
[[autodoc]] utils.PrecisionType
|
||||
|
||||
[[autodoc]] utils.RNGType
|
||||
|
||||
[[autodoc]] utils.SageMakerDistributedType
|
||||
|
||||
### Kwargs
|
||||
|
||||
These are configurable arguments for specific interactions throughout the PyTorch ecosystem that Accelerate handles under the hood.
|
||||
|
||||
|
||||
[[autodoc]] utils.AutocastKwargs
|
||||
|
||||
|
||||
[[autodoc]] utils.DistributedDataParallelKwargs
|
||||
|
||||
[[autodoc]] utils.FP8RecipeKwargs
|
||||
|
||||
[[autodoc]] utils.GradScalerKwargs
|
||||
|
||||
[[autodoc]] utils.InitProcessGroupKwargs
|
||||
|
||||
## Plugins
|
||||
|
||||
These are plugins that can be passed to the [`Accelerator`] object. While they are defined elsewhere in the documentation,
|
||||
for convenience all of them are available to see here:
|
||||
|
||||
[[autodoc]] utils.DeepSpeedPlugin
|
||||
|
||||
[[autodoc]] utils.FullyShardedDataParallelPlugin
|
||||
|
||||
[[autodoc]] utils.GradientAccumulationPlugin
|
||||
|
||||
[[autodoc]] utils.MegatronLMPlugin
|
||||
|
||||
[[autodoc]] utils.TorchDynamoPlugin
|
||||
|
||||
## Configurations
|
||||
|
||||
These are classes which can be configured and passed through to the appropriate integration
|
||||
|
||||
[[autodoc]] utils.BnbQuantizationConfig
|
||||
|
||||
[[autodoc]] utils.ProjectConfiguration
|
||||
|
||||
## Environmental Variables
|
||||
|
||||
These are environmental variables that can be enabled for different use cases
|
||||
|
||||
* `ACCELERATE_DEBUG_MODE` (`str`): Whether to run accelerate in debug mode. More info available [here](../usage_guides/debug.md).
|
||||
|
||||
|
||||
|
||||
|
||||
## Data Manipulation and Operations
|
||||
|
||||
These include data operations that mimic the same `torch` ops but can be used on distributed processes.
|
||||
|
||||
[[autodoc]] utils.broadcast
|
||||
|
||||
[[autodoc]] utils.broadcast_object_list
|
||||
|
||||
[[autodoc]] utils.concatenate
|
||||
|
||||
[[autodoc]] utils.convert_outputs_to_fp32
|
||||
|
||||
[[autodoc]] utils.convert_to_fp32
|
||||
|
||||
[[autodoc]] utils.gather
|
||||
|
||||
[[autodoc]] utils.gather_object
|
||||
|
||||
[[autodoc]] utils.listify
|
||||
|
||||
[[autodoc]] utils.pad_across_processes
|
||||
|
||||
[[autodoc]] utils.recursively_apply
|
||||
|
||||
[[autodoc]] utils.reduce
|
||||
|
||||
[[autodoc]] utils.send_to_device
|
||||
|
||||
[[autodoc]] utils.slice_tensors
|
||||
|
||||
## Environment Checks
|
||||
|
||||
These functionalities check the state of the current working environment including information about the operating system itself, what it can support, and if particular dependencies are installed.
|
||||
|
||||
[[autodoc]] utils.is_bf16_available
|
||||
|
||||
[[autodoc]] utils.is_ipex_available
|
||||
|
||||
[[autodoc]] utils.is_mps_available
|
||||
|
||||
[[autodoc]] utils.is_npu_available
|
||||
|
||||
[[autodoc]] utils.is_torch_version
|
||||
|
||||
[[autodoc]] utils.is_tpu_available
|
||||
|
||||
[[autodoc]] utils.is_xpu_available
|
||||
|
||||
## Environment Manipulation
|
||||
|
||||
[[autodoc]] utils.patch_environment
|
||||
|
||||
[[autodoc]] utils.clear_environment
|
||||
|
||||
[[autodoc]] utils.write_basic_config
|
||||
|
||||
When setting up 🤗 Accelerate for the first time, rather than running `accelerate config` [~utils.write_basic_config] can be used as an alternative for quick configuration.
|
||||
|
||||
## Memory
|
||||
|
||||
[[autodoc]] utils.find_executable_batch_size
|
||||
|
||||
## Modeling
|
||||
|
||||
These utilities relate to interacting with PyTorch models
|
||||
|
||||
[[autodoc]] utils.calculate_maximum_sizes
|
||||
|
||||
[[autodoc]] utils.compute_module_sizes
|
||||
|
||||
[[autodoc]] utils.extract_model_from_parallel
|
||||
|
||||
[[autodoc]] utils.get_balanced_memory
|
||||
|
||||
[[autodoc]] utils.get_max_layer_size
|
||||
|
||||
[[autodoc]] utils.infer_auto_device_map
|
||||
|
||||
[[autodoc]] utils.load_checkpoint_in_model
|
||||
|
||||
[[autodoc]] utils.load_offloaded_weights
|
||||
|
||||
[[autodoc]] utils.load_state_dict
|
||||
|
||||
[[autodoc]] utils.offload_state_dict
|
||||
|
||||
[[autodoc]] utils.retie_parameters
|
||||
|
||||
[[autodoc]] utils.set_module_tensor_to_device
|
||||
|
||||
[[autodoc]] utils.shard_checkpoint
|
||||
|
||||
|
||||
## Parallel
|
||||
|
||||
These include general utilities that should be used when working in parallel.
|
||||
|
||||
[[autodoc]] utils.extract_model_from_parallel
|
||||
|
||||
[[autodoc]] utils.save
|
||||
|
||||
[[autodoc]] utils.wait_for_everyone
|
||||
|
||||
|
||||
## Random
|
||||
|
||||
These utilities relate to setting and synchronizing of all the random states.
|
||||
|
||||
[[autodoc]] utils.set_seed
|
||||
|
||||
[[autodoc]] utils.synchronize_rng_state
|
||||
|
||||
[[autodoc]] utils.synchronize_rng_states
|
||||
|
||||
|
||||
## PyTorch XLA
|
||||
|
||||
These include utilities that are useful while using PyTorch with XLA.
|
||||
|
||||
[[autodoc]] utils.install_xla
|
||||
|
||||
## Loading model weights
|
||||
|
||||
These include utilities that are useful to load checkpoints.
|
||||
|
||||
[[autodoc]] utils.load_checkpoint_in_model
|
||||
|
||||
## Quantization
|
||||
|
||||
These include utilities that are useful to quantize model.
|
||||
|
||||
[[autodoc]] utils.load_and_quantize_model
|
||||
@ -1,95 +0,0 @@
|
||||
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Helpful Utilities
|
||||
|
||||
Below are a variety of utility functions that 🤗 Accelerate provides, broken down by use-case.
|
||||
|
||||
## Data Classes
|
||||
|
||||
These are basic dataclasses used throughout 🤗 Accelerate and they can be passed in as parameters.
|
||||
|
||||
[[autodoc]] utils.DistributedType
|
||||
|
||||
[[autodoc]] utils.LoggerType
|
||||
|
||||
[[autodoc]] utils.PrecisionType
|
||||
|
||||
## Data Manipulation and Operations
|
||||
|
||||
These include data operations that mimic the same `torch` ops but can be used on distributed processes.
|
||||
|
||||
[[autodoc]] utils.broadcast
|
||||
|
||||
[[autodoc]] utils.concatenate
|
||||
|
||||
[[autodoc]] utils.gather
|
||||
|
||||
[[autodoc]] utils.pad_across_processes
|
||||
|
||||
[[autodoc]] utils.reduce
|
||||
|
||||
[[autodoc]] utils.send_to_device
|
||||
|
||||
## Environment Checks
|
||||
|
||||
These functionalities check the state of the current working environment including information about the operating system itself, what it can support, and if particular dependencies are installed.
|
||||
|
||||
[[autodoc]] utils.is_bf16_available
|
||||
|
||||
[[autodoc]] utils.is_torch_version
|
||||
|
||||
[[autodoc]] utils.is_tpu_available
|
||||
|
||||
## Environment Configuration
|
||||
|
||||
[[autodoc]] utils.write_basic_config
|
||||
|
||||
When setting up 🤗 Accelerate for the first time, rather than running `accelerate config` [~utils.write_basic_config] can be used as an alternative for quick configuration.
|
||||
|
||||
## Memory
|
||||
|
||||
[[autodoc]] utils.get_max_memory
|
||||
|
||||
[[autodoc]] utils.find_executable_batch_size
|
||||
|
||||
## Modeling
|
||||
|
||||
These utilities relate to interacting with PyTorch models
|
||||
|
||||
[[autodoc]] utils.extract_model_from_parallel
|
||||
|
||||
[[autodoc]] utils.get_max_layer_size
|
||||
|
||||
[[autodoc]] utils.offload_state_dict
|
||||
|
||||
|
||||
## Parallel
|
||||
|
||||
These include general utilities that should be used when working in parallel.
|
||||
|
||||
[[autodoc]] utils.extract_model_from_parallel
|
||||
|
||||
[[autodoc]] utils.save
|
||||
|
||||
[[autodoc]] utils.wait_for_everyone
|
||||
|
||||
|
||||
## Random
|
||||
|
||||
These utilities relate to setting and synchronizing of all the random states.
|
||||
|
||||
[[autodoc]] utils.set_seed
|
||||
|
||||
[[autodoc]] utils.synchronize_rng_state
|
||||
|
||||
[[autodoc]] utils.synchronize_rng_states
|
||||
@ -8,17 +8,27 @@ http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Quick tour
|
||||
|
||||
Let's have a look at the 🤗 Accelerate main features and traps to avoid.
|
||||
This guide aims to help you get started with 🤗 Accelerate quickly. It covers the essential steps you need to take to
|
||||
enable distributed training, as well as the adjustments that you need to make in some common scenarios.
|
||||
|
||||
## Main use
|
||||
To help you navigate, the guide is split into two sections:
|
||||
* [Getting Started with 🤗 Accelerate](#getting-started-with--accelerate): start here to learn how to modify your script to enable distributed training with 🤗 Accelerate
|
||||
* [Common adaptations to the base case](#common-adaptations-to-the-base-case): check out this section for common deviations from the baseline scenario and what adjustments may need to be made to support them.
|
||||
|
||||
To use 🤗 Accelerate in your own script, you have to change four things:
|
||||
## Getting started with 🤗 Accelerate
|
||||
|
||||
1. Import the [`Accelerator`] main class and instantiate one in an `accelerator` object:
|
||||
### Enable distributed training in your script
|
||||
|
||||
To use 🤗 Accelerate in your own training script, you have to modify four things:
|
||||
|
||||
1. Import the [`Accelerator`] main class and instantiate one in an `accelerator` object.
|
||||
|
||||
```python
|
||||
from accelerate import Accelerator
|
||||
@ -26,27 +36,27 @@ from accelerate import Accelerator
|
||||
accelerator = Accelerator()
|
||||
```
|
||||
|
||||
This should happen as early as possible in your training script as it will initialize everything necessary for
|
||||
distributed training. You don't need to indicate the kind of environment you are in (just one machine with a GPU, one
|
||||
machines with several GPUs, several machines with multiple GPUs or a TPU), the library will detect this automatically.
|
||||
Add this at the beginning of your training script as it will initialize everything necessary for distributed training.
|
||||
You don't need to indicate the kind of environment you are in (a single machine with a GPU, a machine with several GPUs,
|
||||
or several machines with multiple GPUs or a TPU), the library will detect this automatically.
|
||||
|
||||
2. Remove the call `.to(device)` or `.cuda()` for your model and input data. The `accelerator` object
|
||||
will handle this for you and place all those objects on the right device for you. If you know what you're doing, you
|
||||
can leave those `.to(device)` calls but you should use the device provided by the `accelerator` object:
|
||||
`accelerator.device`.
|
||||
2. Remove the `.to(device)` or `.cuda()` calls for your model and input data.
|
||||
|
||||
To fully deactivate the automatic device placement, pass along `device_placement=False` when initializing your
|
||||
[`Accelerator`].
|
||||
The `accelerator` object will handle placing these objects on the right device for you.
|
||||
If you choose to leave those `.to(device)` calls, make sure to use the device provided by the `accelerator` object: `accelerator.device`.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
If you place your objects manually on the proper device, be careful to create your optimizer after putting your
|
||||
You can fully deactivate the automatic device placement by passing along `device_placement=False` when
|
||||
initializing the [`Accelerator`].
|
||||
However, if you place your objects manually on the proper device, be careful to create your optimizer after putting your
|
||||
model on `accelerator.device` or your training will fail on TPU.
|
||||
|
||||
</Tip>
|
||||
|
||||
3. Pass all objects relevant to training (optimizer, model, training dataloader, learning rate scheduler) to the
|
||||
[`~Accelerator.prepare`] method. This will make sure everything is ready for training.
|
||||
3. Pass all PyTorch objects relevant to training (optimizer, model, dataloader(s), learning rate scheduler) to the
|
||||
[`~Accelerator.prepare`] method as soon as these objects are created, before starting your actual
|
||||
training loop:
|
||||
|
||||
```python
|
||||
model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
|
||||
@ -54,60 +64,42 @@ model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
|
||||
)
|
||||
```
|
||||
|
||||
In particular, your training dataloader will be sharded across all GPUs/TPU cores available so that each one sees a
|
||||
different portion of the training dataset. Also, the random states of all processes will be synchronized at the
|
||||
beginning of each iteration through your dataloader, to make sure the data is shuffled the same way (if you decided to
|
||||
use `shuffle=True` or any kind of random sampler).
|
||||
**Important notes**:
|
||||
|
||||
* You should always pass the the learning rate scheduler to [`~Accelerator.prepare`], however if the scheduler should *not* be stepped at each optimization step, pass `step_with_optimizer=False` to the [`Accelerator`] init.
|
||||
* While you can send your dataloader to [`~Accelerator.prepare`] on its own (and there are cases for doing so, such as distributed inference), it's best to send it to [`~Accelerator.prepare`] together with the model and optimizer.
|
||||
* If you wish to run distributed evaluation, send your validation dataloader to [`~Accelerator.prepare`] as well. There are some nuances to distributed validation, check the [Distributed evaluation](#add-distributed-evaluation) section of the guide.
|
||||
* Any instruction using your training dataloader length (for instance if you want to log the number of total training
|
||||
steps) should go after the call to [`~Accelerator.prepare`].
|
||||
|
||||
Passing `DataLoader` objects to the [`~Accelerator.prepare`] method ensures that your dataloader will be sharded across
|
||||
all GPUs/TPU cores available so that each one sees a different portion of the training dataset. In other words, if there are 8 processes and a dataset of 64 items, each process will see 8 of these items per iteration. Also, the random states
|
||||
of all processes will be synchronized at the beginning of each iteration through your dataloader, to make sure the data
|
||||
is shuffled the same way (if you decided to use `shuffle=True` or any kind of random sampler).
|
||||
|
||||
<Tip>
|
||||
|
||||
The actual batch size for your training will be the number of devices used multiplied by the batch size you set in
|
||||
your script: for instance training on 4 GPUs with a batch size of 16 set when creating the training dataloader will
|
||||
train at an actual batch size of 64.
|
||||
|
||||
</Tip>
|
||||
|
||||
Alternatively, you can use the option `split_batches=True` when creating initializing your
|
||||
[`Accelerator`], in which case the batch size will always stay the same, whether your run your
|
||||
script on 1, 2, 4 or 64 GPUs.
|
||||
|
||||
You should execute this instruction as soon as all objects for training are created, before starting your actual
|
||||
training loop.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
You should only pass the learning rate scheduler to [`~Accelerator.prepare`] when the scheduler needs to be stepped
|
||||
at each optimizer step.
|
||||
|
||||
</Tip>
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
your script. For instance, training on 4 GPUs with a batch size of 16 set when creating the training dataloader will
|
||||
train at an actual batch size of 64 (4 * 16).
|
||||
If you want the batch size remain the same regardless of how many GPUs the script is run on, you can use the
|
||||
option `split_batches=True` when creating and initializing [`Accelerator`].
|
||||
Your training dataloader may change length when going through this method: if you run on X GPUs, it will have its
|
||||
length divided by X (since your actual batch size will be multiplied by X), unless you set
|
||||
`split_batches=True`.
|
||||
|
||||
</Tip>
|
||||
|
||||
Any instruction using your training dataloader length (for instance if you want to log the number of total training
|
||||
steps) should go after the call to [`~Accelerator.prepare`].
|
||||
|
||||
You can perfectly send your dataloader to [`~Accelerator.prepare`] on its own, but it's best to send the
|
||||
model and optimizer to [`~Accelerator.prepare`] together.
|
||||
|
||||
You may or may not want to send your validation dataloader to [`~Accelerator.prepare`], depending on
|
||||
whether you want to run distributed evaluation or not (see below).
|
||||
|
||||
4. Replace the line `loss.backward()` by `accelerator.backward(loss)`.
|
||||
4. Replace the `loss.backward()` line with `accelerator.backward(loss)`.
|
||||
|
||||
And you're all set! With all these changes, your script will run on your local machine as well as on multiple GPUs or a
|
||||
TPU! You can either use your favorite tool to launch the distributed training, or you can use the 🤗 Accelerate
|
||||
launcher.
|
||||
|
||||
### Add distributed evaluation
|
||||
|
||||
## Distributed evaluation
|
||||
|
||||
You can perform regular evaluation in your training script, if you leave your validation dataloader out of the
|
||||
You can perform regular evaluation in your training script if you leave your validation dataloader out of the
|
||||
[`~Accelerator.prepare`] method. In this case, you will need to put the input data on the
|
||||
`accelerator.device` manually.
|
||||
|
||||
@ -118,9 +110,9 @@ method:
|
||||
validation_dataloader = accelerator.prepare(validation_dataloader)
|
||||
```
|
||||
|
||||
As for your training dataloader, it will mean that (should you run your script on multiple devices) each device will
|
||||
only see part of the evaluation data. This means you will need to group your predictions together. This is very easy to
|
||||
do with the [`~Accelerator.gather_for_metrics`] method.
|
||||
Same as with your training dataloader, each device will only see part of the evaluation data should you run your script
|
||||
on multiple devices. This means you will need to group your predictions together which you can do with
|
||||
the [`~Accelerator.gather_for_metrics`] method.
|
||||
|
||||
```python
|
||||
for inputs, targets in validation_dataloader:
|
||||
@ -139,11 +131,9 @@ for inputs, targets in validation_dataloader:
|
||||
|
||||
</Tip>
|
||||
|
||||
Any instruction using your training dataloader length (for instance if you need the number of total training steps
|
||||
to create a learning rate scheduler) should go after the call to [`~Accelerator.prepare`].
|
||||
|
||||
Some data at the end of the dataset may be duplicated so the batch can be divided equally among all workers. As a result, metrics
|
||||
should be calculated through the [`~Accelerator.gather_for_metrics`] method to automatically remove the duplicated data while gathering.
|
||||
Some data at the end of the dataset may be duplicated so the batch can be divided equally among all workers. As a result,
|
||||
metrics should be calculated through the [`~Accelerator.gather_for_metrics`] method to automatically remove the duplicated
|
||||
data while gathering and provide a more accurate metric.
|
||||
|
||||
<Tip>
|
||||
|
||||
@ -162,37 +152,35 @@ should be calculated through the [`~Accelerator.gather_for_metrics`] method to a
|
||||
|
||||
</Tip>
|
||||
|
||||
## Launching your distributed script
|
||||
### Launch your distributed script
|
||||
|
||||
You can use the regular commands to launch your distributed training (like `torch.distributed.launch` for
|
||||
PyTorch), they are fully compatible with 🤗 Accelerate. The only caveat here is that 🤗 Accelerate uses the environment
|
||||
to determine all useful information, so `torch.distributed.launch` should be used with the flag `--use_env`.
|
||||
You can use the regular commands to launch your distributed training (like `torch.distributed.run` for
|
||||
PyTorch) - they are fully compatible with 🤗 Accelerate.
|
||||
|
||||
🤗 Accelerate also provides a CLI tool that unifies all launchers, so you only have to remember one command. To use it,
|
||||
just run:
|
||||
Alternatively, 🤗 Accelerate provides a CLI tool that unifies all launchers, so you only have to remember one command. \
|
||||
To use it, run a quick configuration setup first on your machine and answer the questions:
|
||||
|
||||
```bash
|
||||
accelerate config
|
||||
```
|
||||
|
||||
on your machine and reply to the questions asked. This will save a *default_config.yaml* file in your cache folder for
|
||||
🤗 Accelerate. That cache folder is (with decreasing order of priority):
|
||||
At the end of the setup, a *default_config.yaml* file will be saved in your cache folder for 🤗 Accelerate. That cache
|
||||
folder is (with decreasing order of priority):
|
||||
|
||||
- The content of your environment variable `HF_HOME` suffixed with *accelerate*.
|
||||
- If it does not exist, the content of your environment variable `XDG_CACHE_HOME` suffixed with
|
||||
*huggingface/accelerate*.
|
||||
- If this does not exist either, the folder *~/.cache/huggingface/accelerate*
|
||||
- If this does not exist either, the folder *~/.cache/huggingface/accelerate*.
|
||||
|
||||
You can also specify with the flag `--config_file` the location of the file you want to save.
|
||||
|
||||
Once this is done, you can test everything is going well on your setup by running:
|
||||
By specifying the `--config_file` flag you can specify an alternative location of the configuration file.
|
||||
Once the configuration setup is complete, you can test your setup by running:
|
||||
|
||||
```bash
|
||||
accelerate test
|
||||
```
|
||||
|
||||
This will launch a short script that will test the distributed environment. If it runs fine, you are ready for the next
|
||||
step!
|
||||
This will launch a short script that will test the distributed environment. If it runs without issues, you are ready for
|
||||
the next step!
|
||||
|
||||
Note that if you specified a location for the config file in the previous step, you need to pass it here as well:
|
||||
|
||||
@ -206,25 +194,29 @@ Now that this is done, you can run your script with the following command:
|
||||
accelerate launch path_to_script.py --args_for_the_script
|
||||
```
|
||||
|
||||
If you stored the config file in a non-default location, you can indicate it to the launcher like his:
|
||||
If you stored the config file in a non-default location, you can indicate it to the launcher like this:
|
||||
|
||||
```bash
|
||||
accelerate launch --config_file path_to_config.yaml path_to_script.py --args_for_the_script
|
||||
```
|
||||
|
||||
You can also override any of the arguments determined by your config file.
|
||||
To see the complete list of parameters that you can pass in, run `accelerate launch -h`.
|
||||
You can override any of the arguments determined by your config file. To see the complete list of parameters that you
|
||||
can pass in, run `accelerate launch -h`. (And further niche argument help by passing in partial commands, such as `accelerate launch --multi_gpu -h` for all `multi_gpu` args)
|
||||
|
||||
Check out the [Launch tutorial](basic_tutorials/launch) for more information about launching your scripts.
|
||||
Check out the [Launch tutorial](basic_tutorials/launch) for more information about launching your scripts.
|
||||
|
||||
## Common modifications of the base case
|
||||
|
||||
## Launching training from a notebook
|
||||
The previous section covers the minimal essential steps to move a training script into a distributed setup with 🤗 Accelerate.
|
||||
Here we describe common modifications/deviations from the base case scenario and the adjustments you need to make to accommodate for them.
|
||||
|
||||
In Accelerate 0.3.0, a new [`notebook_launcher`] has been introduced to help you launch your training
|
||||
function from a notebook. This launcher supports launching a training with TPUs on Colab or Kaggle, as well as training
|
||||
on several GPUs (if the machine on which you are running your notebook has them).
|
||||
### Launch distributed training from a notebook
|
||||
|
||||
Just define a function responsible for your whole training and/or evaluation in a cell of the notebook, then execute a
|
||||
Accelerate has a [`notebook_launcher`] to help you launch your training function from a
|
||||
notebook. This launcher supports launching a training with TPUs on Colab or Kaggle, as well as training on several GPUs and machines
|
||||
(if the machine on which you are running your notebook has them).
|
||||
|
||||
Define a function responsible for your whole training and/or evaluation in a cell of the notebook, then execute a
|
||||
cell with the following code:
|
||||
|
||||
```python
|
||||
@ -240,10 +232,9 @@ notebook_launcher(training_function)
|
||||
|
||||
</Tip>
|
||||
|
||||
Check out the [Notebook Launcher tutorial](basic_tutorials/notebook) for more information about training on TPUs.
|
||||
Check out the [Notebook Launcher tutorial](basic_tutorials/notebook) for more information about training on TPUs.
|
||||
|
||||
|
||||
## Training on TPU
|
||||
### Specifics of training on TPU
|
||||
|
||||
If you want to launch your script on TPUs, there are a few caveats you should be aware of. Behind the scenes, the TPUs
|
||||
will create a graph of all the operations happening in your training step (forward pass, backward pass and optimizer
|
||||
@ -282,12 +273,7 @@ passed your model to [`~Accelerator.prepare`]) will break the tying. You will ne
|
||||
after. You can find an example of this in the [run_clm_no_trainer](https://github.com/huggingface/transformers/blob/master/examples/pytorch/language-modeling/run_clm.py) script in
|
||||
the Transformers repository.
|
||||
|
||||
Check out the [TPU tutorial](concept_guides/training_tpu) for more information about training on TPUs.
|
||||
|
||||
|
||||
## Other caveats
|
||||
|
||||
We list here all smaller issues you could have in your script conversion and how to resolve them.
|
||||
Check out the [TPU tutorial](concept_guides/training_tpu) for more information about training on TPUs.
|
||||
|
||||
### Execute a statement only on one processes
|
||||
|
||||
@ -321,14 +307,14 @@ For printing statements you only want executed once per machine, you can just re
|
||||
`accelerator.print`.
|
||||
|
||||
|
||||
### Defer execution
|
||||
### Defer execution on multiple GPUs
|
||||
|
||||
When you run your usual script, instructions are executed in order. Using 🤗 Accelerate to deploy your script on several
|
||||
GPUs at the same time introduces a complication: while each process executes all instructions in order, some may be
|
||||
faster than others.
|
||||
|
||||
You might need to wait for all processes to have reached a certain point before executing a given instruction. For
|
||||
instance, you shouldn't save a model before being sure every process is done with training. To do this, just write the
|
||||
instance, you shouldn't save a model before making sure every process is done with training. To do this, add the
|
||||
following line in your code:
|
||||
|
||||
```
|
||||
@ -339,38 +325,59 @@ This instruction will block all the processes that arrive first until all the ot
|
||||
point (if you run your script on just one GPU or CPU, this won't do anything).
|
||||
|
||||
|
||||
### Saving/loading a model
|
||||
### Save/load a model in a distributed setup
|
||||
|
||||
Saving the model you trained might need a bit of adjustment: first you should wait for all processes to reach that
|
||||
point in the script as shown above, and then, you should unwrap your model before saving it. This is because when going
|
||||
through the [`~Accelerator.prepare`] method, your model may have been placed inside a bigger model,
|
||||
which deals with the distributed training. This in turn means that saving your model state dictionary without taking
|
||||
any precaution will take that potential extra layer into account, and you will end up with weights you can't load back
|
||||
in your base model.
|
||||
in your base model. The [`~Accelerator.save_model`] method will help you to achieve that. It will unwrap your model and save
|
||||
the model state dictionary.
|
||||
|
||||
This is why it's recommended to *unwrap* your model first. Here is an example:
|
||||
Here is an example:
|
||||
|
||||
```
|
||||
accelerator.wait_for_everyone()
|
||||
unwrapped_model = accelerator.unwrap_model(model)
|
||||
accelerator.save(unwrapped_model.state_dict(), filename)
|
||||
accelerator.save_model(model, save_directory)
|
||||
```
|
||||
|
||||
The [`~Accelerator.save_model`] method can also save a model into sharded checkpoints or with safetensors format:
|
||||
|
||||
```python
|
||||
accelerator.wait_for_everyone()
|
||||
accelerator.save_model(model, save_directory, max_shard_size="1GB", safe_serialization=True)
|
||||
```
|
||||
|
||||
If your script contains logic to load a checkpoint, we also recommend you load your weights in the unwrapped model
|
||||
(this is only useful if you use the load function after making your model go through
|
||||
[`~Accelerator.prepare`]). Here is an example:
|
||||
|
||||
```
|
||||
```python
|
||||
unwrapped_model = accelerator.unwrap_model(model)
|
||||
unwrapped_model.load_state_dict(torch.load(filename))
|
||||
path_to_checkpoint = os.path.join(save_directory,"pytorch_model.bin")
|
||||
unwrapped_model.load_state_dict(torch.load(path_to_checkpoint))
|
||||
```
|
||||
|
||||
Note that since all the model parameters are references to tensors, this will load your weights inside `model`.
|
||||
|
||||
## Saving/loading entire states
|
||||
If you want to load a sharded checkpoint or a checkpoint with safetensors format into the model with a specific `device`,
|
||||
we recommend you to load it with [`~utils.load_checkpoint_in_model`] function. Here's an example:
|
||||
|
||||
```python
|
||||
load_checkpoint_in_model(unwrapped_model, save_directory, device_map={"":device})
|
||||
```
|
||||
|
||||
|
||||
### Save/load entire states
|
||||
|
||||
When training your model, you may want to save the current state of the model, optimizer, random generators, and potentially
|
||||
learning rate schedulers to be restored in the _same script_.
|
||||
You can use [`~Accelerator.save_state`] and [`~Accelerator.load_state`] respectively to do so.
|
||||
|
||||
To further customize where and how states saved through [`~Accelerator.save_state`] the [`~utils.ProjectConfiguration`] class can be used. For example
|
||||
if `automatic_checkpoint_naming` is enabled each saved checkpoint will be located then at `Accelerator.project_dir/checkpoints/checkpoint_{checkpoint_number}`.
|
||||
|
||||
When training your model, you may want to save the current state of the model, optimizer, random generators, and potentially LR schedulers to be restored in the _same script_.
|
||||
You can use [`~Accelerator.save_state`] and [`~Accelerator.load_state`] respectively to do so, just by simply passing in a save location.
|
||||
If you have registered any other stateful items to be stored through [`~Accelerator.register_for_checkpointing`] they will also be saved and/or loaded.
|
||||
|
||||
<Tip>
|
||||
@ -380,19 +387,19 @@ If you have registered any other stateful items to be stored through [`~Accelera
|
||||
</Tip>
|
||||
|
||||
|
||||
### Gradient clipping
|
||||
### Use gradient clipping
|
||||
|
||||
If you are using gradient clipping in your script, you should replace the calls to
|
||||
`torch.nn.utils.clip_grad_norm_` or `torch.nn.utils.clip_grad_value_` with [`~Accelerator.clip_grad_norm_`]
|
||||
and [`~Accelerator.clip_grad_value_`] respectively.
|
||||
|
||||
|
||||
### Mixed Precision training
|
||||
### Train with mixed precision
|
||||
|
||||
If you are running your training in Mixed Precision with 🤗 Accelerate, you will get the best result with your loss being
|
||||
computed inside your model (like in Transformer models for instance). Every computation outside of the model will be
|
||||
executed in full precision (which is generally what you want for loss computation, especially if it involves a
|
||||
softmax). However you might want to put your loss computation inside the *accelerator.autocast* context manager:
|
||||
softmax). However, you might want to put your loss computation inside the [`~Accelerator.autocast`] context manager:
|
||||
|
||||
```
|
||||
with accelerator.autocast():
|
||||
@ -413,7 +420,7 @@ if not accelerator.optimizer_step_was_skipped:
|
||||
lr_scheduler.step()
|
||||
```
|
||||
|
||||
### Gradient Accumulation
|
||||
### Use gradient accumulation
|
||||
|
||||
To perform gradient accumulation use [`~Accelerator.accumulate`] and specify a `gradient_accumulation_steps`.
|
||||
This will also automatically ensure the gradients are synced or unsynced when on multi-device training, check if the step should
|
||||
@ -432,70 +439,3 @@ for input, label in training_dataloader:
|
||||
scheduler.step()
|
||||
optimizer.zero_grad()
|
||||
```
|
||||
|
||||
### DeepSpeed
|
||||
|
||||
DeepSpeed support is experimental, so the underlying API will evolve in the near future and may have some slight
|
||||
breaking changes. In particular, 🤗 Accelerate does not support DeepSpeed config you have written yourself yet, this
|
||||
will be added in a next version.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
The [`notebook_launcher`] does not support the DeepSpeed integration yet.
|
||||
|
||||
</Tip>
|
||||
|
||||
## Internal mechanism
|
||||
|
||||
Internally, the library works by first analyzing the environment in which the script is launched to determine which
|
||||
kind of distributed setup is used, how many different processes there are and which one the current script is in. All
|
||||
that information is stored in the [`~AcceleratorState`].
|
||||
|
||||
This class is initialized the first time you instantiate an [`~Accelerator`] as well as performing any
|
||||
specific initialization your distributed setup needs. Its state is then uniquely shared through all instances of
|
||||
[`~state.AcceleratorState`].
|
||||
|
||||
Then, when calling [`~Accelerator.prepare`], the library:
|
||||
|
||||
- wraps your model(s) in the container adapted for the distributed setup,
|
||||
- wraps your optimizer(s) in a [`~optimizer.AcceleratedOptimizer`],
|
||||
- creates a new version of your dataloader(s) in a [`~data_loader.DataLoaderShard`].
|
||||
|
||||
While the model(s) and optimizer(s) are just put in simple wrappers, the dataloader(s) are re-created. This is mostly
|
||||
because PyTorch does not let the user change the `batch_sampler` of a dataloader once it's been created and the
|
||||
library handles the sharding of your data between processes by changing that `batch_sampler` to yield every other
|
||||
`num_processes` batches.
|
||||
|
||||
The [`~data_loader.DataLoaderShard`] subclasses `DataLoader` to add the following functionality:
|
||||
|
||||
- it synchronizes the appropriate random number generator of all processes at each new iteration, to ensure any
|
||||
randomization (like shuffling) is done the exact same way across processes.
|
||||
- it puts the batches on the proper device before yielding them (unless you have opted out of
|
||||
`device_placement=True`).
|
||||
|
||||
The random number generator synchronization will by default synchronize:
|
||||
|
||||
- the `generator` attribute of a given sampler (like the PyTorch `RandomSampler`) for PyTorch >= 1.6
|
||||
- the main random number generator in PyTorch <=1.5.1
|
||||
|
||||
You can choose which random number generator(s) to synchronize with the `rng_types` argument of the main
|
||||
[`Accelerator`]. In PyTorch >= 1.6, it is recommended to rely on a local `generator` to avoid
|
||||
setting the same seed in the main random number generator in all processes.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
Synchronization of the main torch (or CUDA or XLA) random number generator will affect any other potential random
|
||||
artifacts you could have in your dataset (like random data augmentation) in the sense that all processes will get
|
||||
the same random numbers from the torch random modules (so will apply the same random data augmentation if it's
|
||||
controlled by torch).
|
||||
|
||||
</Tip>
|
||||
|
||||
<Tip>
|
||||
|
||||
The randomization part of your custom sampler, batch sampler or iterable dataset should be done using a local
|
||||
`torch.Generator` object (in PyTorch >= 1.6), see the traditional `RandomSampler`, as an example.
|
||||
|
||||
</Tip>
|
||||
|
||||
For more details about the internals, see the [Internals page](package_reference/torch_wrappers).
|
||||
150
docs/source/usage_guides/big_modeling.md
Normal file
150
docs/source/usage_guides/big_modeling.md
Normal file
@ -0,0 +1,150 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Handling big models for inference
|
||||
|
||||
One of the biggest advancements 🤗 Accelerate provides is the concept of [large model inference](../concept_guides/big_model_inference) wherein you can perform *inference* on models that cannot fully fit on your graphics card.
|
||||
|
||||
This tutorial will be broken down into two parts showcasing how to use both 🤗 Accelerate and 🤗 Transformers (a higher API-level) to make use of this idea.
|
||||
|
||||
## Using 🤗 Accelerate
|
||||
|
||||
For these tutorials, we'll assume a typical workflow for loading your model in such that:
|
||||
|
||||
```py
|
||||
import torch
|
||||
|
||||
my_model = ModelClass(...)
|
||||
state_dict = torch.load(checkpoint_file)
|
||||
my_model.load_state_dict(state_dict)
|
||||
```
|
||||
|
||||
Note that here we assume that `ModelClass` is a model that takes up more video-card memory than what can fit on your device (be it `mps` or `cuda`).
|
||||
|
||||
The first step is to init an empty skeleton of the model which won't take up any RAM using the [`init_empty_weights`] context manager:
|
||||
|
||||
```py
|
||||
from accelerate import init_empty_weights
|
||||
with init_empty_weights():
|
||||
my_model = ModelClass(...)
|
||||
```
|
||||
|
||||
With this `my_model` currently is "parameterless", hence leaving the smaller footprint than what one would normally get loading this onto the CPU directly.
|
||||
|
||||
Next we need to load in the weights to our model so we can perform inference.
|
||||
|
||||
For this we will use [`load_checkpoint_and_dispatch`], which as the name implies will load a checkpoint inside your empty model and dispatch the weights for each layer across all the devices you have available (GPU/MPS and CPU RAM).
|
||||
|
||||
To determine how this `dispatch` can be performed, generally specifying `device_map="auto"` will be good enough as 🤗 Accelerate
|
||||
will attempt to fill all the space in your GPU(s), then loading them to the CPU, and finally if there is not enough RAM it will be loaded to the disk (the absolute slowest option).
|
||||
|
||||
<Tip>
|
||||
|
||||
For more details on designing your own device map, see this section of the [concept guide](../concept_guide/big_model_inference#designing-a-device-map)
|
||||
|
||||
</Tip>
|
||||
|
||||
See an example below:
|
||||
|
||||
```py
|
||||
from accelerate import load_checkpoint_and_dispatch
|
||||
|
||||
model = load_checkpoint_and_dispatch(
|
||||
model, checkpoint=checkpoint_file, device_map="auto"
|
||||
)
|
||||
```
|
||||
|
||||
<Tip>
|
||||
|
||||
If there are certain "chunks" of layers that shouldn't be split, you can pass them in as `no_split_module_classes`. Read more about it [here](../concept_guides/big_model_inference#loading-weights)
|
||||
|
||||
</Tip>
|
||||
|
||||
<Tip>
|
||||
|
||||
Also to save on memory (such as if the `state_dict` will not fit in RAM), a model's weights can be divided and split into multiple checkpoint files. Read more about it [here](../concept_guides/big_model_inference#sharded-checkpoints)
|
||||
|
||||
</Tip>
|
||||
|
||||
Now that the model is dispatched fully, you can perform inference as normal with the model:
|
||||
|
||||
```py
|
||||
input = torch.randn(2,3)
|
||||
input = input.to("cuda")
|
||||
output = model(input)
|
||||
```
|
||||
|
||||
What will happen now is each time the input gets passed through a layer, it will be sent from the CPU to the GPU (or disk to CPU to GPU), the output is calculated, and then the layer is pulled back off the GPU going back down the line. While this adds some overhead to the inference being performed, through this method it is possible to run **any size model** on your system, as long as the largest layer is capable of fitting on your GPU.
|
||||
|
||||
<Tip>
|
||||
|
||||
Multiple GPUs can be utilized, however this is considered "model parallelism" and as a result only one GPU will be active at a given moment, waiting for the prior one to send it the output. You should launch your script normally with `python`
|
||||
and not need `torchrun`, `accelerate launch`, etc.
|
||||
|
||||
</Tip>
|
||||
|
||||
For a visual representation of this, check out the animation below:
|
||||
|
||||
<Youtube id="MWCSGj9jEAo" />
|
||||
|
||||
### Complete Example
|
||||
|
||||
Below is the full example showcasing what we performed above:
|
||||
|
||||
```py
|
||||
import torch
|
||||
from accelerate import init_empty_weights, load_checkpoint_and_dispatch
|
||||
|
||||
with init_empty_weights():
|
||||
model = MyModel(...)
|
||||
|
||||
model = load_checkpoint_and_dispatch(
|
||||
model, checkpoint=checkpoint_file, device_map="auto"
|
||||
)
|
||||
|
||||
input = torch.randn(2,3)
|
||||
input = input.to("cuda")
|
||||
output = model(input)
|
||||
```
|
||||
|
||||
## Using 🤗 Transformers, 🤗 Diffusers, and other 🤗 Open Source Libraries
|
||||
|
||||
Libraries that support 🤗 Accelerate big model inference include all of the earlier logic in their `from_pretrained` constructors.
|
||||
|
||||
These operate by specifying a string representing the model to download from the [🤗 Hub](https://hf.co/models) and then denoting `device_map="auto"` along with a few extra parameters.
|
||||
|
||||
As a brief example, we will look at using `transformers` and loading in Big Science's T0pp model.
|
||||
|
||||
```py
|
||||
from transformers import AutoModelForSeq2SeqLM
|
||||
|
||||
model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0pp", device_map="auto")
|
||||
```
|
||||
|
||||
After loading the model in, the initial steps from before to prepare a model have all been done and the model is fully
|
||||
ready to make use of all the resources in your machine. Through these constructors, you can also save *more* memory by
|
||||
specifying the precision the model is loaded into as well, through the `torch_dtype` parameter, such as:
|
||||
|
||||
```py
|
||||
from transformers import AutoModelForSeq2SeqLM
|
||||
|
||||
model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0pp", device_map="auto", torch_dtype=torch.float16)
|
||||
```
|
||||
|
||||
To learn more about this, check out the 🤗 Transformers documentation available [here](https://huggingface.co/docs/transformers/main/en/main_classes/model#large-model-loading).
|
||||
|
||||
## Where to go from here
|
||||
|
||||
For a much more detailed look at big model inference, be sure to check out the [Conceptual Guide on it](../concept_guides/big_model_inference)
|
||||
@ -8,6 +8,9 @@ http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Checkpointing
|
||||
@ -17,27 +20,31 @@ saving and loading the model, optimizer, RNG generators, and the GradScaler. Ins
|
||||
- Use [`~Accelerator.save_state`] for saving everything mentioned above to a folder location
|
||||
- Use [`~Accelerator.load_state`] for loading everything stored from an earlier `save_state`
|
||||
|
||||
To further customize where and how states are saved through [`~Accelerator.save_state`] the [`~utils.ProjectConfiguration`] class can be used. For example
|
||||
if `automatic_checkpoint_naming` is enabled each saved checkpoint will be located then at `Accelerator.project_dir/checkpoints/checkpoint_{checkpoint_number}`.
|
||||
|
||||
It should be noted that the expectation is that those states come from the same training script, they should not be from two separate scripts.
|
||||
|
||||
- By using [`~Accelerator.register_for_checkpointing`], you can register custom objects to be automatically stored or loaded from the two prior functions,
|
||||
so long as the object has a `state_dict` **and** a `load_state_dict` functionality. This could include objects such as a learning rate scheduler.
|
||||
|
||||
|
||||
Below is a brief example using checkpointing to save and reload a state during training:
|
||||
|
||||
```python
|
||||
from accelerate import Accelerator
|
||||
import torch
|
||||
|
||||
accelerator = Accelerator()
|
||||
accelerator = Accelerator(project_dir="my/save/path")
|
||||
|
||||
my_scheduler = torch.optim.lr_scheduler.StepLR(my_optimizer, step_size=1, gamma=0.99)
|
||||
my_model, my_optimizer, my_training_dataloader = accelerate.prepare(my_model, my_optimizer, my_training_dataloader)
|
||||
my_model, my_optimizer, my_training_dataloader = accelerator.prepare(my_model, my_optimizer, my_training_dataloader)
|
||||
|
||||
# Register the LR scheduler
|
||||
accelerate.register_for_checkpointing(my_scheduler)
|
||||
accelerator.register_for_checkpointing(my_scheduler)
|
||||
|
||||
# Save the starting state
|
||||
accelerate.save_state("my/save/path")
|
||||
accelerator.save_state()
|
||||
|
||||
device = accelerator.device
|
||||
my_model.to(device)
|
||||
@ -55,6 +62,35 @@ for epoch in range(num_epochs):
|
||||
my_optimizer.step()
|
||||
my_scheduler.step()
|
||||
|
||||
# Restore previous state
|
||||
accelerate.load_state("my/save/path")
|
||||
# Restore the previous state
|
||||
accelerator.load_state("my/save/path/checkpointing/checkpoint_0")
|
||||
```
|
||||
|
||||
## Restoring the state of the DataLoader
|
||||
|
||||
After resuming from a checkpoint, it may also be desirable to resume from a particular point in the active `DataLoader` if
|
||||
the state was saved during the middle of an epoch. You can use [`~Accelerator.skip_first_batches`] to do so.
|
||||
|
||||
```python
|
||||
from accelerate import Accelerator
|
||||
|
||||
accelerator = Accelerator(project_dir="my/save/path")
|
||||
|
||||
train_dataloader = accelerator.prepare(train_dataloader)
|
||||
accelerator.load_state("my_state")
|
||||
|
||||
# Assume the checkpoint was saved 100 steps into the epoch
|
||||
skipped_dataloader = accelerator.skip_first_batches(train_dataloader, 100)
|
||||
|
||||
# After the first iteration, go back to `train_dataloader`
|
||||
|
||||
# First epoch
|
||||
for batch in skipped_dataloader:
|
||||
# Do something
|
||||
pass
|
||||
|
||||
# Second epoch
|
||||
for batch in train_dataloader:
|
||||
# Do something
|
||||
pass
|
||||
```
|
||||
@ -8,11 +8,14 @@ http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# DeepSpeed
|
||||
# DeepSpeed
|
||||
|
||||
[DeepSpeed](https://github.com/microsoft/DeepSpeed) implements everything described in the [ZeRO paper](https://arxiv.org/abs/1910.02054). Currently it provides full support for:
|
||||
[DeepSpeed](https://github.com/microsoft/DeepSpeed) implements everything described in the [ZeRO paper](https://arxiv.org/abs/1910.02054). Some of the salient optimizations are:
|
||||
|
||||
1. Optimizer state partitioning (ZeRO stage 1)
|
||||
2. Gradient partitioning (ZeRO stage 2)
|
||||
@ -20,28 +23,29 @@ specific language governing permissions and limitations under the License.
|
||||
4. Custom mixed precision training handling
|
||||
5. A range of fast CUDA-extension-based optimizers
|
||||
6. ZeRO-Offload to CPU and Disk/NVMe
|
||||
7. Hierarchical partitioning of model parameters (ZeRO++)
|
||||
|
||||
ZeRO-Offload has its own dedicated paper: [ZeRO-Offload: Democratizing Billion-Scale Model Training](https://arxiv.org/abs/2101.06840). And NVMe-support is described in the paper [ZeRO-Infinity: Breaking the GPU
|
||||
Memory Wall for Extreme Scale Deep Learning](https://arxiv.org/abs/2104.07857).
|
||||
|
||||
DeepSpeed ZeRO-2 is primarily used only for training, as its features are of no use to inference.
|
||||
|
||||
DeepSpeed ZeRO-3 can be used for inference as well, since it allows huge models to be loaded on multiple GPUs, which
|
||||
DeepSpeed ZeRO-3 can be used for inference as well since it allows huge models to be loaded on multiple GPUs, which
|
||||
won't be possible on a single GPU.
|
||||
|
||||
🤗 Accelerate integrates [DeepSpeed](https://github.com/microsoft/DeepSpeed) via 2 options:
|
||||
|
||||
1. Integration of the DeepSpeed features via `deepspeed config file` specification in `accelerate config` . You just supply your custom config file or use our template. Most of
|
||||
this document is focused on this feature. This supports all the core features of DeepSpeed and gives user a lot of flexibility.
|
||||
User may have to change few lines of code depending on the config.
|
||||
2. Integration via `deepspeed_plugin`.This supports subset of the DeepSpeed features and uses default options for the rest of the configurations.
|
||||
this document is focused on this feature. This supports all the core features of DeepSpeed and gives user a lot of flexibility.
|
||||
User may have to change a few lines of code depending on the config.
|
||||
2. Integration via `deepspeed_plugin`.This supports subset of the DeepSpeed features and uses default options for the rest of the configurations.
|
||||
User need not change any code and is good for those who are fine with most of the default settings of DeepSpeed.
|
||||
|
||||
## What is integrated?
|
||||
|
||||
Training:
|
||||
|
||||
1. DeepSpeed ZeRO training supports the full ZeRO stages 1, 2 and 3 as well as CPU/Disk offload of optimizer states, gradients and parameters.
|
||||
1. 🤗 Accelerate integrates all features of DeepSpeed ZeRO. This includes all the ZeRO stages 1, 2 and 3 as well as ZeRO-Offload, ZeRO-Infinity (which can offload to disk/NVMe) and ZeRO++.
|
||||
Below is a short description of Data Parallelism using ZeRO - Zero Redundancy Optimizer along with diagram from this [blog post](https://www.microsoft.com/en-us/research/blog/zero-deepspeed-new-system-optimizations-enable-training-models-with-over-100-billion-parameters/)
|
||||

|
||||
|
||||
@ -57,7 +61,9 @@ Below is a short description of Data Parallelism using ZeRO - Zero Redundancy Op
|
||||
|
||||
e. **Param Offload**: Offloads the model parameters to CPU/Disk building on top of ZERO Stage 3
|
||||
|
||||
<u>Note</u>: With respect to Disk Offload, the disk should be an NVME for decent speed but it technically work on any Disk
|
||||
f. **Hierarchical Partitioning**: Enables efficient multi-node training with data-parallel training across nodes and ZeRO-3 sharding within a node, built on top of ZeRO Stage 3.
|
||||
|
||||
<u>Note</u>: With respect to Disk Offload, the disk should be an NVME for decent speed but it technically works on any Disk
|
||||
|
||||
Inference:
|
||||
|
||||
@ -71,8 +77,8 @@ Inference:
|
||||
**Pre-Requisites**: Install DeepSpeed version >=0.6.5. Please refer to the [DeepSpeed Installation details](https://github.com/microsoft/DeepSpeed#installation)
|
||||
for more information.
|
||||
|
||||
We will first look at easy to use integration via `accelerate config`.
|
||||
Followed by more flexible and feature rich `deepspeed config file` integration.
|
||||
We will first look at easy to use integration via `accelerate config`.
|
||||
Followed by more flexible and feature rich `deepspeed config file` integration.
|
||||
|
||||
### Accelerate DeepSpeed Plugin
|
||||
On your machine(s) just run:
|
||||
@ -154,7 +160,7 @@ Currently, `Accelerate` supports following config through the CLI:
|
||||
`offload_param_device`: [none] Disable parameter offloading, [cpu] offload parameters to CPU, [nvme] offload parameters to NVMe SSD. Only applicable with ZeRO Stage-3.
|
||||
`zero3_init_flag`: Decides whether to enable `deepspeed.zero.Init` for constructing massive models. Only applicable with ZeRO Stage-3.
|
||||
`zero3_save_16bit_model`: Decides whether to save 16-bit model weights when using ZeRO Stage-3.
|
||||
`mixed_precision`: `no` for FP32 training, `fp16` for FP16 mixed-precision training and `bf16` for BF16 mixed-precision training.
|
||||
`mixed_precision`: `no` for FP32 training, `fp16` for FP16 mixed-precision training and `bf16` for BF16 mixed-precision training.
|
||||
```
|
||||
To be able to tweak more options, you will need to use a DeepSpeed config file.
|
||||
|
||||
@ -165,8 +171,8 @@ On your machine(s) just run:
|
||||
accelerate config
|
||||
```
|
||||
|
||||
and answer the questions asked. It will ask whether you want to use a config file for deepspeed to which you answer yes
|
||||
and provide the path to the deepspeed config file.
|
||||
and answer the questions asked. It will ask whether you want to use a config file for deepspeed to which you answer yes
|
||||
and provide the path to the deepspeed config file.
|
||||
This will generate a config file that will be used automatically to properly set the
|
||||
default options when doing
|
||||
|
||||
@ -346,17 +352,38 @@ accelerate launch examples/by_feature/deepspeed_with_config_support.py \
|
||||
--report_to "wandb"\
|
||||
```
|
||||
|
||||
**ZeRO++ Config Example**
|
||||
You can use the the features of ZeRO++ by using the appropriate config parameters. Note that ZeRO++ is an extension for ZeRO Stage 3. Here is how the config file can be modified, from [DeepSpeed's ZeRO++ tutorial](https://www.deepspeed.ai/tutorials/zeropp/):
|
||||
|
||||
```json
|
||||
{
|
||||
"zero_optimization": {
|
||||
"stage": 3,
|
||||
"reduce_bucket_size": "auto",
|
||||
|
||||
"zero_quantized_weights": true,
|
||||
"zero_hpz_partition_size": 8,
|
||||
"zero_quantized_gradients": true,
|
||||
|
||||
"contiguous_gradients": true,
|
||||
"overlap_comm": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
For hierarchical partitioning, the partition size `zero_hpz_partition_size` should ideally be set to the number of GPUs per node. (For example, the above config file assumes 8 GPUs per node)
|
||||
|
||||
**Important code changes when using DeepSpeed Config File**
|
||||
|
||||
1. DeepSpeed Optimizers and Schedulers. For more information on these,
|
||||
1. DeepSpeed Optimizers and Schedulers. For more information on these,
|
||||
see the [DeepSpeed Optimizers](https://deepspeed.readthedocs.io/en/latest/optimizers.html) and [DeepSpeed Schedulers](https://deepspeed.readthedocs.io/en/latest/schedulers.html) documentation.
|
||||
We will look at the changes needed in the code when using these.
|
||||
|
||||
a. DS Optim + DS Scheduler: The case when both `optimizer` and `scheduler` keys present in the DeepSpeed config file.
|
||||
In this situation, those will be used and user has to use `accelerate.utils.DummyOptim` and `accelerate.utils.DummyScheduler` to replace the PyTorch/Custom optimizers and schedulers in their code.
|
||||
|
||||
a. DS Optim + DS Scheduler: The case when both `optimizer` and `scheduler` keys are present in the DeepSpeed config file.
|
||||
In this situation, those will be used and the user has to use `accelerate.utils.DummyOptim` and `accelerate.utils.DummyScheduler` to replace the PyTorch/Custom optimizers and schedulers in their code.
|
||||
Below is the snippet from `examples/by_feature/deepspeed_with_config_support.py` showing this:
|
||||
```python
|
||||
# Creates Dummy Optimizer if `optimizer` was spcified in the config file else creates Adam Optimizer
|
||||
# Creates Dummy Optimizer if `optimizer` was specified in the config file else creates Adam Optimizer
|
||||
optimizer_cls = (
|
||||
torch.optim.AdamW
|
||||
if accelerator.state.deepspeed_plugin is None
|
||||
@ -365,7 +392,7 @@ We will look at the changes needed in the code when using these.
|
||||
)
|
||||
optimizer = optimizer_cls(optimizer_grouped_parameters, lr=args.learning_rate)
|
||||
|
||||
# Creates Dummy Scheduler if `scheduler` was spcified in the config file else creates `args.lr_scheduler_type` Scheduler
|
||||
# Creates Dummy Scheduler if `scheduler` was specified in the config file else creates `args.lr_scheduler_type` Scheduler
|
||||
if (
|
||||
accelerator.state.deepspeed_plugin is None
|
||||
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
|
||||
@ -385,16 +412,217 @@ We will look at the changes needed in the code when using these.
|
||||
In this situation, no code changes are needed from the user and this is the case when using integration via DeepSpeed Plugin.
|
||||
In the above example we can see that the code remains unchanged if the `optimizer` and `scheduler` keys are absent in the DeepSpeed config file.
|
||||
|
||||
c. Custom Optim + DS Scheduler: The case when only `scheduler` key is present in the DeepSpeed config file.
|
||||
In this situation, user has to use `accelerate.utils.DummyScheduler` to replace the PyTorch/Custom scheduler in their code.
|
||||
c. Custom Optim + DS Scheduler: The case when only `scheduler` key is present in the DeepSpeed config file.
|
||||
In this situation, the user has to use `accelerate.utils.DummyScheduler` to replace the PyTorch/Custom scheduler in their code.
|
||||
|
||||
d. DS Optim + Custom Scheduler: The case when only `optimizer` key is present in the DeepSpeed config file.
|
||||
d. DS Optim + Custom Scheduler: The case when only `optimizer` key is present in the DeepSpeed config file.
|
||||
This will result in an error because you can only use DS Scheduler when using DS Optim.
|
||||
|
||||
2. Notice the `auto` values in the above example DeepSpeed config files. These are automatically handled by `prepare` method
|
||||
based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method.
|
||||
2. Notice the `auto` values in the above example DeepSpeed config files. These are automatically handled by `prepare` method
|
||||
based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method.
|
||||
Only the `auto` fields specified in above examples are handled by `prepare` method and the rest have to be explicitly specified by the user.
|
||||
|
||||
The `auto` values are calculated as:
|
||||
|
||||
- `reduce_bucket_size`: `hidden_size * hidden_size`
|
||||
- `stage3_prefetch_bucket_size`: `0.9 * hidden_size * hidden_size`
|
||||
- `stage3_param_persistence_threshold`: `10 * hidden_size`
|
||||
|
||||
For the `auto` feature to work for these 3 config entries - Accelerate will use `model.config.hidden_size` or `max(model.config.hidden_sizes)` as `hidden_size`. If neither of these is available, the launching will fail and you will have to set these 3 config entries manually. Remember the first 2 config entries are the communication buffers - the larger they are the more efficient the comms will be, and the larger they are the more GPU memory they will consume, so it's a tunable performance trade-off.
|
||||
|
||||
|
||||
**Things to note when using DeepSpeed Config File**
|
||||
|
||||
Below is a sample script using `deepspeed_config_file` in different scenarios.
|
||||
|
||||
Code `test.py`:
|
||||
|
||||
```python
|
||||
from accelerate import Accelerator
|
||||
from accelerate.state import AcceleratorState
|
||||
|
||||
|
||||
def main():
|
||||
accelerator = Accelerator()
|
||||
accelerator.print(f"{AcceleratorState()}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
```
|
||||
|
||||
**Scenario 1**: Manually tampered accelerate config file having `deepspeed_config_file` along with other entries.
|
||||
|
||||
1. Content of the `accelerate` config:
|
||||
|
||||
```yaml
|
||||
command_file: null
|
||||
commands: null
|
||||
compute_environment: LOCAL_MACHINE
|
||||
deepspeed_config:
|
||||
gradient_accumulation_steps: 1
|
||||
gradient_clipping: 1.0
|
||||
offload_optimizer_device: 'cpu'
|
||||
offload_param_device: 'cpu'
|
||||
zero3_init_flag: true
|
||||
zero3_save_16bit_model: true
|
||||
zero_stage: 3
|
||||
deepspeed_config_file: 'ds_config.json'
|
||||
distributed_type: DEEPSPEED
|
||||
downcast_bf16: 'no'
|
||||
dynamo_backend: 'NO'
|
||||
fsdp_config: {}
|
||||
gpu_ids: null
|
||||
machine_rank: 0
|
||||
main_process_ip: null
|
||||
main_process_port: null
|
||||
main_training_function: main
|
||||
megatron_lm_config: {}
|
||||
num_machines: 1
|
||||
num_processes: 2
|
||||
rdzv_backend: static
|
||||
same_network: true
|
||||
tpu_name: null
|
||||
tpu_zone: null
|
||||
use_cpu: false
|
||||
```
|
||||
|
||||
2. `ds_config.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"bf16": {
|
||||
"enabled": true
|
||||
},
|
||||
"zero_optimization": {
|
||||
"stage": 3,
|
||||
"stage3_gather_16bit_weights_on_model_save": false,
|
||||
"offload_optimizer": {
|
||||
"device": "none"
|
||||
},
|
||||
"offload_param": {
|
||||
"device": "none"
|
||||
}
|
||||
},
|
||||
"gradient_clipping": 1.0,
|
||||
"train_batch_size": "auto",
|
||||
"train_micro_batch_size_per_gpu": "auto",
|
||||
"gradient_accumulation_steps": 10,
|
||||
"steps_per_print": 2000000
|
||||
}
|
||||
```
|
||||
|
||||
3. Output of `accelerate launch test.py`:
|
||||
|
||||
```bash
|
||||
ValueError: When using `deepspeed_config_file`, the following accelerate config variables will be ignored:
|
||||
['gradient_accumulation_steps', 'gradient_clipping', 'zero_stage', 'offload_optimizer_device', 'offload_param_device',
|
||||
'zero3_save_16bit_model', 'mixed_precision'].
|
||||
Please specify them appropriately in the DeepSpeed config file.
|
||||
If you are using an accelerate config file, remove others config variables mentioned in the above specified list.
|
||||
The easiest method is to create a new config following the questionnaire via `accelerate config`.
|
||||
It will only ask for the necessary config variables when using `deepspeed_config_file`.
|
||||
```
|
||||
|
||||
**Scenario 2**: Use the solution of the error to create new accelerate config and check that no ambiguity error is now thrown.
|
||||
|
||||
1. Run `accelerate config`:
|
||||
|
||||
```bash
|
||||
$ accelerate config
|
||||
-------------------------------------------------------------------------------------------------------------------------------
|
||||
In which compute environment are you running?
|
||||
This machine
|
||||
-------------------------------------------------------------------------------------------------------------------------------
|
||||
Which type of machine are you using?
|
||||
multi-GPU
|
||||
How many different machines will you use (use more than 1 for multi-node training)? [1]:
|
||||
Do you wish to optimize your script with torch dynamo?[yes/NO]:
|
||||
Do you want to use DeepSpeed? [yes/NO]: yes
|
||||
Do you want to specify a json file to a DeepSpeed config? [yes/NO]: yes
|
||||
Please enter the path to the json DeepSpeed config file: ds_config.json
|
||||
Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: yes
|
||||
How many GPU(s) should be used for distributed training? [1]:4
|
||||
accelerate configuration saved at ds_config_sample.yaml
|
||||
```
|
||||
|
||||
2. Content of the `accelerate` config:
|
||||
|
||||
```yaml
|
||||
compute_environment: LOCAL_MACHINE
|
||||
deepspeed_config:
|
||||
deepspeed_config_file: ds_config.json
|
||||
zero3_init_flag: true
|
||||
distributed_type: DEEPSPEED
|
||||
downcast_bf16: 'no'
|
||||
dynamo_backend: 'NO'
|
||||
fsdp_config: {}
|
||||
machine_rank: 0
|
||||
main_training_function: main
|
||||
megatron_lm_config: {}
|
||||
num_machines: 1
|
||||
num_processes: 4
|
||||
rdzv_backend: static
|
||||
same_network: true
|
||||
use_cpu: false
|
||||
```
|
||||
|
||||
3. Output of `accelerate launch test.py`:
|
||||
|
||||
```bash
|
||||
Distributed environment: DEEPSPEED Backend: nccl
|
||||
Num processes: 4
|
||||
Process index: 0
|
||||
Local process index: 0
|
||||
Device: cuda:0
|
||||
Mixed precision type: bf16
|
||||
ds_config: {'bf16': {'enabled': True}, 'zero_optimization': {'stage': 3, 'stage3_gather_16bit_weights_on_model_save': False, 'offload_optimizer': {'device': 'none'}, 'offload_param': {'device': 'none'}}, 'gradient_clipping': 1.0, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 10, 'steps_per_print': inf, 'fp16': {'enabled': False}}
|
||||
```
|
||||
|
||||
**Scenario 3**: Setting the `accelerate launch` command arguments related to DeepSpeed as `"auto"` in the DeepSpeed` configuration file and check that things work as expected.
|
||||
|
||||
1. New `ds_config.json` with `"auto"` for the `accelerate launch` DeepSpeed command arguments:
|
||||
|
||||
```json
|
||||
{
|
||||
"bf16": {
|
||||
"enabled": "auto"
|
||||
},
|
||||
"zero_optimization": {
|
||||
"stage": "auto",
|
||||
"stage3_gather_16bit_weights_on_model_save": "auto",
|
||||
"offload_optimizer": {
|
||||
"device": "auto"
|
||||
},
|
||||
"offload_param": {
|
||||
"device": "auto"
|
||||
}
|
||||
},
|
||||
"gradient_clipping": "auto",
|
||||
"train_batch_size": "auto",
|
||||
"train_micro_batch_size_per_gpu": "auto",
|
||||
"gradient_accumulation_steps": "auto",
|
||||
"steps_per_print": 2000000
|
||||
}
|
||||
```
|
||||
|
||||
2. Output of `accelerate launch --mixed_precision="fp16" --zero_stage=3 --gradient_accumulation_steps=5 --gradient_clipping=1.0 --offload_param_device="cpu" --offload_optimizer_device="nvme" --zero3_save_16bit_model="true" test.py`:
|
||||
|
||||
```bash
|
||||
Distributed environment: DEEPSPEED Backend: nccl
|
||||
Num processes: 4
|
||||
Process index: 0
|
||||
Local process index: 0
|
||||
Device: cuda:0
|
||||
Mixed precision type: fp16
|
||||
ds_config: {'bf16': {'enabled': False}, 'zero_optimization': {'stage': 3, 'stage3_gather_16bit_weights_on_model_save': True, 'offload_optimizer': {'device': 'nvme'}, 'offload_param': {'device': 'cpu'}}, 'gradient_clipping': 1.0, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 5, 'steps_per_print': inf, 'fp16': {'enabled': True, 'auto_cast': True}}
|
||||
```
|
||||
|
||||
**Note**:
|
||||
1. Remaining `"auto"` values are handled in `accelerator.prepare()` call as explained in point 2 of
|
||||
`Important code changes when using DeepSpeed Config File`.
|
||||
2. Only when `gradient_accumulation_steps` is `auto`, the value passed while creating `Accelerator` object via `Accelerator(gradient_accumulation_steps=k)` will be used. When using DeepSpeed Plugin, the value from it will be used and it will overwrite the value passed while creating Accelerator object.
|
||||
|
||||
## Saving and loading
|
||||
|
||||
1. Saving and loading of models is unchanged for ZeRO Stage-1 and Stage-2.
|
||||
@ -404,7 +632,7 @@ ZeRO Stage-3 has 2 options:
|
||||
|
||||
a. Saving the entire 16bit model weights to directly load later on using `model.load_state_dict(torch.load(pytorch_model.bin))`.
|
||||
For this, either set `zero_optimization.stage3_gather_16bit_weights_on_model_save` to True in DeepSpeed Config file or set
|
||||
`zero3_save_16bit_model` to True in DeepSpeed Plugin.
|
||||
`zero3_save_16bit_model` to True in DeepSpeed Plugin.
|
||||
**Note that this option requires consolidation of the weights on one GPU it can be slow and memory demanding, so only use this feature when needed.**
|
||||
Below is the snippet from `examples/by_feature/deepspeed_with_config_support.py` showing this:
|
||||
```python
|
||||
@ -433,10 +661,10 @@ ZeRO Stage-3 has 2 options:
|
||||
logging.info(f"Success {status_msg}")
|
||||
else:
|
||||
logging.warning(f"Failure {status_msg}")
|
||||
```
|
||||
```
|
||||
This will create ZeRO model and optimizer partitions along with `zero_to_fp32.py` script in checkpoint directory.
|
||||
You can use this script to do offline consolidation.
|
||||
It requires no configuration files or GPUs. Here is an example of its usage:
|
||||
You can use this script to do offline consolidation.
|
||||
It requires no configuration files or GPUs. Here is an example of its usage:
|
||||
```bash
|
||||
$ cd /path/to/checkpoint_dir
|
||||
$ ./zero_to_fp32.py . pytorch_model.bin
|
||||
@ -460,7 +688,7 @@ ZeRO Stage-3 has 2 options:
|
||||
Note that all these functions require ~2x memory (general RAM) of the size of the final checkpoint.
|
||||
|
||||
## ZeRO Inference
|
||||
DeepSpeed ZeRO Inference supports ZeRO stage 3 with ZeRO-Infinity.
|
||||
DeepSpeed ZeRO Inference supports ZeRO stage 3 with ZeRO-Infinity.
|
||||
It uses the same ZeRO protocol as training, but it doesn't use an optimizer and a lr scheduler and only stage 3 is relevant.
|
||||
With accelerate integration, you just need to prepare the model and dataloader as shown below:
|
||||
|
||||
@ -468,11 +696,11 @@ With accelerate integration, you just need to prepare the model and dataloader a
|
||||
model, eval_dataloader = accelerator.prepare(model, eval_dataloader)
|
||||
```
|
||||
|
||||
## Few caveats to be aware of
|
||||
## Few caveats to be aware of
|
||||
|
||||
1. Current integration doesn’t support Pipeline Parallelism of DeepSpeed.
|
||||
2. Current integration doesn’t support `mpu`, limiting the tensor parallelism which is supported in Megatron-LM.
|
||||
3. Current integration doesn’t support multiple models.
|
||||
2. Current integration doesn’t support `mpu`, limiting the tensor parallelism which is supported in Megatron-LM.
|
||||
3. Current integration doesn’t support multiple models.
|
||||
|
||||
## DeepSpeed Resources
|
||||
|
||||
@ -488,7 +716,8 @@ Papers:
|
||||
- [ZeRO: Memory Optimizations Toward Training Trillion Parameter Models](https://arxiv.org/abs/1910.02054)
|
||||
- [ZeRO-Offload: Democratizing Billion-Scale Model Training](https://arxiv.org/abs/2101.06840)
|
||||
- [ZeRO-Infinity: Breaking the GPU Memory Wall for Extreme Scale Deep Learning](https://arxiv.org/abs/2104.07857)
|
||||
- [ZeRO++: Extremely Efficient Collective Communication for Giant Model Training](https://arxiv.org/abs/2306.10209)
|
||||
|
||||
Finally, please, remember that, 🤗 `Accelerate` only integrates DeepSpeed, therefore if you
|
||||
|
||||
Finally, please, remember that 🤗 `Accelerate` only integrates DeepSpeed, therefore if you
|
||||
have any problems or questions with regards to DeepSpeed usage, please, file an issue with [DeepSpeed GitHub](https://github.com/microsoft/DeepSpeed/issues).
|
||||
|
||||
136
docs/source/usage_guides/distributed_inference.md
Normal file
136
docs/source/usage_guides/distributed_inference.md
Normal file
@ -0,0 +1,136 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Distributed Inference with 🤗 Accelerate
|
||||
|
||||
Distributed inference is a common use case, especially with natural language processing (NLP) models. Users often want to
|
||||
send a number of different prompts, each to a different GPU, and then get the results back. This also has other cases
|
||||
outside of just NLP, however for this tutorial we will focus on just this idea of each GPU receiving a different prompt,
|
||||
and then returning the results.
|
||||
|
||||
## The Problem
|
||||
|
||||
Normally when doing this, users send the model to a specific device to load it from the CPU, and then move each prompt to a different device.
|
||||
|
||||
A basic pipeline using the `diffusers` library might look something like so:
|
||||
|
||||
```python
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
from diffusers import DiffusionPipeline
|
||||
|
||||
pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
|
||||
```
|
||||
Followed then by performing inference based on the specific prompt:
|
||||
|
||||
```python
|
||||
def run_inference(rank, world_size):
|
||||
dist.init_process_group("nccl", rank=rank, world_size=world_size)
|
||||
pipe.to(rank)
|
||||
|
||||
if torch.distributed.get_rank() == 0:
|
||||
prompt = "a dog"
|
||||
elif torch.distributed.get_rank() == 1:
|
||||
prompt = "a cat"
|
||||
|
||||
result = pipe(prompt).images[0]
|
||||
result.save(f"result_{rank}.png")
|
||||
```
|
||||
One will notice how we have to check the rank to know what prompt to send, which can be a bit tedious.
|
||||
|
||||
A user might then also think that with 🤗 Accelerate, using the `Accelerator` to prepare a dataloader for such a task might also be
|
||||
a simple way to manage this. (To learn more, check out the relevant section in the [Quick Tour](../quicktour#distributed-evaluation))
|
||||
|
||||
Can it manage it? Yes. Does it add unneeded extra code however: also yes.
|
||||
|
||||
## The Solution
|
||||
|
||||
With 🤗 Accelerate, we can simplify this process by using the [`Accelerator.split_between_processes`] context manager (which also exists in `PartialState` and `AcceleratorState`).
|
||||
This function will automatically split whatever data you pass to it (be it a prompt, a set of tensors, a dictionary of the prior data, etc.) across all the processes (with a potential
|
||||
to be padded) for you to use right away.
|
||||
|
||||
Let's rewrite the above example using this context manager:
|
||||
|
||||
```python
|
||||
from accelerate import PartialState # Can also be Accelerator or AcceleratorState
|
||||
from diffusers import DiffusionPipeline
|
||||
|
||||
pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
|
||||
distributed_state = PartialState()
|
||||
pipe.to(distributed_state.device)
|
||||
|
||||
# Assume two processes
|
||||
with distributed_state.split_between_processes(["a dog", "a cat"]) as prompt:
|
||||
result = pipe(prompt).images[0]
|
||||
result.save(f"result_{distributed_state.process_index}.png")
|
||||
```
|
||||
|
||||
And then to launch the code, we can use the 🤗 Accelerate:
|
||||
|
||||
If you have generated a config file to be used using `accelerate config`:
|
||||
|
||||
```bash
|
||||
accelerate launch distributed_inference.py
|
||||
```
|
||||
|
||||
If you have a specific config file you want to use:
|
||||
|
||||
```bash
|
||||
accelerate launch --config_file my_config.json distributed_inference.py
|
||||
```
|
||||
|
||||
Or if don't want to make any config files and launch on two GPUs:
|
||||
|
||||
> Note: You will get some warnings about values being guessed based on your system. To remove these you can do `accelerate config default` or go through `accelerate config` to create a config file.
|
||||
|
||||
```bash
|
||||
accelerate launch --num_processes 2 distributed_inference.py
|
||||
```
|
||||
|
||||
We've now reduced the boilerplate code needed to split this data to a few lines of code quite easily.
|
||||
|
||||
But what if we have an odd distribution of prompts to GPUs? For example, what if we have 3 prompts, but only 2 GPUs?
|
||||
|
||||
Under the context manager, the first GPU would receive the first two prompts and the second GPU the third, ensuring that
|
||||
all prompts are split and no overhead is needed.
|
||||
|
||||
*However*, what if we then wanted to do something with the results of *all the GPUs*? (Say gather them all and perform some kind of post processing)
|
||||
You can pass in `apply_padding=True` to ensure that the lists of prompts are padded to the same length, with extra data being taken
|
||||
from the last sample. This way all GPUs will have the same number of prompts, and you can then gather the results.
|
||||
|
||||
<Tip>
|
||||
|
||||
This is only needed when trying to perform an action such as gathering the results, where the data on each device
|
||||
needs to be the same length. Basic inference does not require this.
|
||||
|
||||
</Tip>
|
||||
|
||||
For instance:
|
||||
|
||||
```python
|
||||
from accelerate import PartialState # Can also be Accelerator or AcceleratorState
|
||||
from diffusers import DiffusionPipeline
|
||||
|
||||
pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
|
||||
distributed_state = PartialState()
|
||||
pipe.to(distributed_state.device)
|
||||
|
||||
# Assume two processes
|
||||
with distributed_state.split_between_processes(["a dog", "a cat", "a chicken"], apply_padding=True) as prompt:
|
||||
result = pipe(prompt).images
|
||||
```
|
||||
|
||||
On the first GPU, the prompts will be `["a dog", "a cat"]`, and on the second GPU it will be `["a chicken", "a chicken"]`.
|
||||
Make sure to drop the final sample, as it will be a duplicate of the previous one.
|
||||
51
docs/source/usage_guides/explore.md
Normal file
51
docs/source/usage_guides/explore.md
Normal file
@ -0,0 +1,51 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Learning how to incorporate 🤗 Accelerate features quickly!
|
||||
|
||||
Please use the interactive tool below to help you get started with learning about a particular
|
||||
feature of 🤗 Accelerate and how to utilize it! It will provide you with a code diff, an explanation
|
||||
towards what is going on, as well as provide you with some useful links to explore more within
|
||||
the documentation!
|
||||
|
||||
Most code examples start from the following python code before integrating 🤗 Accelerate in some way:
|
||||
|
||||
```python
|
||||
for batch in dataloader:
|
||||
optimizer.zero_grad()
|
||||
inputs, targets = batch
|
||||
inputs = inputs.to(device)
|
||||
targets = targets.to(device)
|
||||
outputs = model(inputs)
|
||||
loss = loss_function(outputs, targets)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
scheduler.step()
|
||||
```
|
||||
|
||||
<div class="block dark:hidden">
|
||||
<iframe
|
||||
src="https://hf-accelerate-accelerate-examples.hf.space?__theme=light"
|
||||
width="850"
|
||||
height="1600"
|
||||
></iframe>
|
||||
</div>
|
||||
<div class="hidden dark:block">
|
||||
<iframe
|
||||
src="https://hf-accelerate-accelerate-examples.hf.space?__theme=dark"
|
||||
width="850"
|
||||
height="1600"
|
||||
></iframe>
|
||||
</div>
|
||||
170
docs/source/usage_guides/fsdp.md
Normal file
170
docs/source/usage_guides/fsdp.md
Normal file
@ -0,0 +1,170 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Fully Sharded Data Parallel
|
||||
|
||||
To accelerate training huge models on larger batch sizes, we can use a fully sharded data parallel model.
|
||||
This type of data parallel paradigm enables fitting more data and larger models by sharding the optimizer states, gradients and parameters.
|
||||
To read more about it and the benefits, check out the [Fully Sharded Data Parallel blog](https://pytorch.org/blog/introducing-pytorch-fully-sharded-data-parallel-api/).
|
||||
We have integrated the latest PyTorch's Fully Sharded Data Parallel (FSDP) training feature.
|
||||
All you need to do is enable it through the config.
|
||||
|
||||
## How it works out of the box
|
||||
|
||||
On your machine(s) just run:
|
||||
|
||||
```bash
|
||||
accelerate config
|
||||
```
|
||||
|
||||
and answer the questions asked. This will generate a config file that will be used automatically to properly set the
|
||||
default options when doing
|
||||
|
||||
```bash
|
||||
accelerate launch my_script.py --args_to_my_script
|
||||
```
|
||||
|
||||
For instance, here is how you would run `examples/nlp_example.py` (from the root of the repo) with FSDP enabled:
|
||||
|
||||
```bash
|
||||
compute_environment: LOCAL_MACHINE
|
||||
debug: false
|
||||
distributed_type: FSDP
|
||||
downcast_bf16: 'no'
|
||||
fsdp_config:
|
||||
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
|
||||
fsdp_backward_prefetch_policy: BACKWARD_PRE
|
||||
fsdp_forward_prefetch: false
|
||||
fsdp_cpu_ram_efficient_loading: true
|
||||
fsdp_offload_params: false
|
||||
fsdp_sharding_strategy: FULL_SHARD
|
||||
fsdp_state_dict_type: SHARDED_STATE_DICT
|
||||
fsdp_sync_module_states: true
|
||||
fsdp_transformer_layer_cls_to_wrap: BertLayer
|
||||
fsdp_use_orig_params: true
|
||||
machine_rank: 0
|
||||
main_training_function: main
|
||||
mixed_precision: bf16
|
||||
num_machines: 1
|
||||
num_processes: 2
|
||||
rdzv_backend: static
|
||||
same_network: true
|
||||
tpu_env: []
|
||||
tpu_use_cluster: false
|
||||
tpu_use_sudo: false
|
||||
use_cpu: false
|
||||
```
|
||||
|
||||
```bash
|
||||
accelerate launch examples/nlp_example.py
|
||||
```
|
||||
|
||||
Currently, `Accelerate` supports the following config through the CLI:
|
||||
|
||||
`fsdp_sharding_strategy`: [1] FULL_SHARD (shards optimizer states, gradients and parameters), [2] SHARD_GRAD_OP (shards optimizer states and gradients), [3] NO_SHARD (DDP), [4] HYBRID_SHARD (shards optimizer states, gradients and parameters within each node while each node has full copy), [5] HYBRID_SHARD_ZERO2 (shards optimizer states and gradients within each node while each node has full copy)
|
||||
|
||||
`fsdp_offload_params` : Decides Whether to offload parameters and gradients to CPU
|
||||
|
||||
`fsdp_auto_wrap_policy`: [1] TRANSFORMER_BASED_WRAP, [2] SIZE_BASED_WRAP, [3] NO_WRAP
|
||||
|
||||
`fsdp_transformer_layer_cls_to_wrap`: Only applicable for 🤗 Transformers. When using `fsdp_auto_wrap_policy=TRANSFORMER_BASED_WRAP`, a user may provide a comma-separated string of transformer layer class names (case-sensitive) to wrap, e.g., `BertLayer`, `GPTJBlock`, `T5Block`, `BertLayer,BertEmbeddings,BertSelfOutput`. This is important because submodules that share weights (e.g., embedding layers) should not end up in different FSDP wrapped units. Using this policy, wrapping happens for each block containing Multi-Head Attention followed by a couple of MLP layers. Remaining layers including the shared embeddings are conveniently wrapped in same outermost FSDP unit. Therefore, use this for transformer-based models. You can use the `model._no_split_modules` for 🤗 Transformer models by answering `yes` to `Do you want to use the model's `_no_split_modules` to wrap. It will try to use `model._no_split_modules` when possible.
|
||||
|
||||
`fsdp_min_num_params`: minimum number of parameters when using `fsdp_auto_wrap_policy=SIZE_BASED_WRAP`.
|
||||
|
||||
`fsdp_backward_prefetch_policy`: [1] BACKWARD_PRE, [2] BACKWARD_POST, [3] NO_PREFETCH
|
||||
|
||||
`fsdp_forward_prefetch`: if True, then FSDP explicitly prefetches the next upcoming all-gather while executing in the forward pass. Should only be used for static-graph models since the prefetching follows the first iteration’s execution order. i.e., if the sub-modules' order changes dynamically during the model's execution do not enable this feature.
|
||||
|
||||
`fsdp_state_dict_type`: [1] FULL_STATE_DICT, [2] LOCAL_STATE_DICT, [3] SHARDED_STATE_DICT
|
||||
|
||||
`fsdp_use_orig_params`: If True, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable parameters. This setting is useful in cases such as parameter-efficient fine-tuning as discussed in [this post](https://dev-discuss.pytorch.org/t/rethinking-pytorch-fully-sharded-data-parallel-fsdp-from-first-principles/1019). This option also allows one to have multiple optimizer param groups. This should be `True` when creating an optimizer before preparing/wrapping the model with FSDP.
|
||||
|
||||
`fsdp_cpu_ram_efficient_loading`: Only applicable for 🤗 Transformers models. If True, only the first process loads the pretrained model checkpoint while all other processes have empty weights. This should be set to False if you experience errors when loading the pretrained 🤗 Transformers model via `from_pretrained` method. When this setting is True `fsdp_sync_module_states` also must to be True, otherwise all the processes except the main process would have random weights leading to unexpected behaviour during training.
|
||||
|
||||
`fsdp_sync_module_states`: If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0.
|
||||
|
||||
|
||||
For additional and more nuanced control, you can specify other FSDP parameters via `FullyShardedDataParallelPlugin`.
|
||||
When creating `FullyShardedDataParallelPlugin` object, pass it the parameters that weren't part of the accelerate config or if you want to override them.
|
||||
The FSDP parameters will be picked based on the accelerate config file or launch command arguments and other parameters that you will pass directly through the `FullyShardedDataParallelPlugin` object will set/override that.
|
||||
|
||||
Below is an example:
|
||||
|
||||
```py
|
||||
from accelerate import FullyShardedDataParallelPlugin
|
||||
from torch.distributed.fsdp.fully_sharded_data_parallel import FullOptimStateDictConfig, FullStateDictConfig
|
||||
|
||||
fsdp_plugin = FullyShardedDataParallelPlugin(
|
||||
state_dict_config=FullStateDictConfig(offload_to_cpu=False, rank0_only=False),
|
||||
optim_state_dict_config=FullOptimStateDictConfig(offload_to_cpu=False, rank0_only=False),
|
||||
)
|
||||
|
||||
accelerator = Accelerator(fsdp_plugin=fsdp_plugin)
|
||||
```
|
||||
|
||||
## Saving and loading
|
||||
|
||||
The new recommended way of checkpointing when using FSDP models is to use `SHARDED_STATE_DICT` as `StateDictType` when setting up the accelerate config.
|
||||
Below is the code snippet to save using `save_state` utility of accelerate.
|
||||
|
||||
```py
|
||||
accelerator.save_state("ckpt")
|
||||
```
|
||||
|
||||
Inspect the checkpoint folder to see model and optimizer as shards per process:
|
||||
```
|
||||
ls ckpt
|
||||
# optimizer_0 pytorch_model_0 random_states_0.pkl random_states_1.pkl scheduler.bin
|
||||
|
||||
cd ckpt
|
||||
|
||||
ls optimizer_0
|
||||
# __0_0.distcp __1_0.distcp
|
||||
|
||||
ls pytorch_model_0
|
||||
# __0_0.distcp __1_0.distcp
|
||||
```
|
||||
|
||||
To load them back for resuming the training, use the `load_state` utility of accelerate
|
||||
|
||||
```py
|
||||
accelerator.load_state("ckpt")
|
||||
```
|
||||
|
||||
When using transformers `save_pretrained`, pass `state_dict=accelerator.get_state_dict(model)` to save the model state dict.
|
||||
Below is an example:
|
||||
|
||||
```diff
|
||||
unwrapped_model.save_pretrained(
|
||||
args.output_dir,
|
||||
is_main_process=accelerator.is_main_process,
|
||||
save_function=accelerator.save,
|
||||
+ state_dict=accelerator.get_state_dict(model),
|
||||
)
|
||||
```
|
||||
|
||||
### State Dict
|
||||
|
||||
`accelerator.get_state_dict` will call the underlying `model.state_dict` implementation using `FullStateDictConfig(offload_to_cpu=True, rank0_only=True)` context manager to get the state dict only for rank 0 and it will be offloaded to CPU.
|
||||
|
||||
You can then pass `state` into the `save_pretrained` method. There are several modes for `StateDictType` and `FullStateDictConfig` that you can use to control the behavior of `state_dict`. For more information, see the [PyTorch documentation](https://pytorch.org/docs/stable/fsdp.html).
|
||||
|
||||
## A few caveats to be aware of
|
||||
|
||||
- In case of multiple models, pass the optimizers to the prepare call in the same order as corresponding models else `accelerator.save_state()` and `accelerator.load_state()` will result in wrong/unexpected behaviour.
|
||||
- This feature is incompatible with `--predict_with_generate` in the `run_translation.py` script of 🤗 `Transformers` library.
|
||||
|
||||
For more control, users can leverage the `FullyShardedDataParallelPlugin`. After creating an instance of this class, users can pass it to the Accelerator class instantiation.
|
||||
For more information on these options, please refer to the PyTorch [FullyShardedDataParallel](https://github.com/pytorch/pytorch/blob/0df2e863fbd5993a7b9e652910792bd21a516ff3/torch/distributed/fsdp/fully_sharded_data_parallel.py#L236) code.
|
||||
@ -1,125 +0,0 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Fully Sharded Data Parallel
|
||||
|
||||
To accelerate training huge models on larger batch sizes, we can use a fully sharded data parallel model.
|
||||
This type of data parallel paradigm enables fitting more data and larger models by sharding the optimizer states, gradients and parameters.
|
||||
To read more about it and the benefits, check out the [Fully Sharded Data Parallel blog](https://pytorch.org/blog/introducing-pytorch-fully-sharded-data-parallel-api/).
|
||||
We have integrated the latest PyTorch's Fully Sharded Data Parallel (FSDP) training feature.
|
||||
All you need to do is enable it through the config.
|
||||
|
||||
## How it works out of the box
|
||||
|
||||
On your machine(s) just run:
|
||||
|
||||
```bash
|
||||
accelerate config
|
||||
```
|
||||
|
||||
and answer the questions asked. This will generate a config file that will be used automatically to properly set the
|
||||
default options when doing
|
||||
|
||||
```bash
|
||||
accelerate launch my_script.py --args_to_my_script
|
||||
```
|
||||
|
||||
For instance, here is how you would run the NLP example (from the root of the repo) with FSDP enabled:
|
||||
|
||||
```bash
|
||||
compute_environment: LOCAL_MACHINE
|
||||
deepspeed_config: {}
|
||||
distributed_type: FSDP
|
||||
downcast_bf16: 'no'
|
||||
fsdp_config:
|
||||
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
|
||||
fsdp_backward_prefetch_policy: BACKWARD_PRE
|
||||
fsdp_offload_params: false
|
||||
fsdp_sharding_strategy: 1
|
||||
fsdp_state_dict_type: FULL_STATE_DICT
|
||||
fsdp_transformer_layer_cls_to_wrap: GPT2Block
|
||||
machine_rank: 0
|
||||
main_process_ip: null
|
||||
main_process_port: null
|
||||
main_training_function: main
|
||||
mixed_precision: 'no'
|
||||
num_machines: 1
|
||||
num_processes: 2
|
||||
use_cpu: false
|
||||
```
|
||||
|
||||
```bash
|
||||
accelerate launch examples/nlp_example.py
|
||||
```
|
||||
|
||||
Currently, `Accelerate` supports the following config through the CLI:
|
||||
|
||||
```bash
|
||||
`Sharding Strategy`: [1] FULL_SHARD (shards optimizer states, gradients and parameters), [2] SHARD_GRAD_OP (shards optimizer states and gradients), [3] NO_SHARD
|
||||
`Offload Params`: Decides Whether to offload parameters and gradients to CPU
|
||||
`Auto Wrap Policy`: [1] TRANSFORMER_BASED_WRAP, [2] SIZE_BASED_WRAP, [3] NO_WRAP
|
||||
`Transformer Layer Class to Wrap`: When using `TRANSFORMER_BASED_WRAP`, user specifies transformer layer class name (case-sensitive) to wrap ,e.g, `BertLayer`, `GPTJBlock`, `T5Block`...
|
||||
`Min Num Params`: minimum number of parameters when using `SIZE_BASED_WRAP`
|
||||
`Backward Prefetch`: [1] BACKWARD_PRE, [2] BACKWARD_POST, [3] NO_PREFETCH
|
||||
`State Dict Type`: [1] FULL_STATE_DICT, [2] LOCAL_STATE_DICT, [3] SHARDED_STATE_DICT
|
||||
```
|
||||
|
||||
## A few caveats to be aware of
|
||||
|
||||
- PyTorch FSDP auto wraps sub-modules, flattens the parameters and shards the parameters in place.
|
||||
Due to this, any optimizer created before model wrapping gets broken and occupies more memory.
|
||||
Hence, it is highly recommended and efficient to prepare the model before creating the optimizer.
|
||||
`Accelerate` will automatically wrap the model and create an optimizer for you in case of single model with a warning message.
|
||||
> FSDP Warning: When using FSDP, it is efficient and recommended to call prepare for the model before creating the optimizer
|
||||
|
||||
However, below is the recommended way to prepare model and optimizer while using FSDP:
|
||||
|
||||
```diff
|
||||
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True)
|
||||
+ model = accelerator.prepare(model)
|
||||
|
||||
optimizer = torch.optim.AdamW(params=model.parameters(), lr=lr)
|
||||
|
||||
- model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
||||
- model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
||||
- )
|
||||
|
||||
+ optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
||||
+ optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
||||
+ )
|
||||
```
|
||||
|
||||
- In case of a single model, if you have created the optimizer with multiple parameter groups and called prepare with them together,
|
||||
then the parameter groups will be lost and the following warning is displayed:
|
||||
> FSDP Warning: When using FSDP, several parameter groups will be conflated into
|
||||
> a single one due to nested module wrapping and parameter flattening.
|
||||
|
||||
This is because parameter groups created before wrapping will have no meaning post wrapping due to parameter flattening of nested FSDP modules into 1D arrays (which can consume many layers).
|
||||
For instance, below are the named parameters of an FSDP model on GPU 0 (When using 2 GPUs. Around 55M (110M/2) params in 1D arrays as this will have the 1st shard of the parameters).
|
||||
Here, if one has applied no weight decay for [bias, LayerNorm.weight] the named parameters of an unwrapped BERT model,
|
||||
it can't be applied to the below FSDP wrapped model as there are no named parameters with either of those strings and
|
||||
the parameters of those layers are concatenated with parameters of various other layers.
|
||||
```
|
||||
{
|
||||
'_fsdp_wrapped_module.flat_param': torch.Size([494209]),
|
||||
'_fsdp_wrapped_module._fpw_module.bert.embeddings.word_embeddings._fsdp_wrapped_module.flat_param': torch.Size([11720448]),
|
||||
'_fsdp_wrapped_module._fpw_module.bert.encoder._fsdp_wrapped_module.flat_param': torch.Size([42527232])
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
- In case of multiple models, it is necessary to prepare the models before creating optimizers or else it will throw an error.
|
||||
Then pass the optimizers to the prepare call in the same order as corresponding models else `accelerator.save_state()` and `accelerator.load_state()` will result in wrong/unexpected behaviour.
|
||||
- This feature is incompatible with `--predict_with_generate` in the `run_translation.py` script of 🤗 `Transformers` library.
|
||||
|
||||
For more control, users can leverage the `FullyShardedDataParallelPlugin`. After creating an instance of this class, users can pass it to the Accelerator class instantiation.
|
||||
For more information on these options, please refer to the PyTorch [FullyShardedDataParallel](https://github.com/pytorch/pytorch/blob/0df2e863fbd5993a7b9e652910792bd21a516ff3/torch/distributed/fsdp/fully_sharded_data_parallel.py#L236) code.
|
||||
@ -8,6 +8,9 @@ http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Performing gradient accumulation with 🤗 Accelerate
|
||||
@ -72,7 +75,7 @@ First the code shown earlier will be converted to utilize 🤗 Accelerate withou
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
In its current state, this code is not going to perform gradient accumulation efficiently due to a process called gradient synchronization. Read more about that in the [Concepts tutorial](concept_guides/gradient_synchronization)!
|
||||
In its current state, this code is not going to perform gradient accumulation efficiently due to a process called gradient synchronization. Read more about that in the [Concepts tutorial](../concept_guides/gradient_synchronization)!
|
||||
|
||||
</Tip>
|
||||
|
||||
@ -87,6 +90,9 @@ of steps to perform before each call to `step()` and how to automatically adjust
|
||||
+ accelerator = Accelerator(gradient_accumulation_steps=2)
|
||||
```
|
||||
|
||||
Alternatively, you can pass in a `gradient_accumulation_plugin` parameter to the [`Accelerator`] object's `__init__`, which will allow you to further customize the gradient accumulation behavior.
|
||||
Read more about that in the [GradientAccumulationPlugin](../package_reference/accelerator#accelerate.utils.GradientAccumulationPlugin) docs.
|
||||
|
||||
From here you can use the [`~Accelerator.accumulate`] context manager from inside your training loop to automatically perform the gradient accumulation for you!
|
||||
You just wrap it around the entire training part of our code:
|
||||
|
||||
@ -111,11 +117,37 @@ You can remove all the special checks for the step number and the loss adjustmen
|
||||
|
||||
As you can see the [`Accelerator`] is able to keep track of the batch number you are on and it will automatically know whether to step through the prepared optimizer and how to adjust the loss.
|
||||
|
||||
<Tip>
|
||||
|
||||
Typically with gradient accumulation, you would need to adjust the number of steps to reflect the change in total batches you are
|
||||
training on. 🤗 Accelerate automagically does this for you by default. Behind the scenes we instantiate a [`GradientAccumulationPlugin`] configured to do this.
|
||||
|
||||
</Tip>
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
The [`state.GradientState`] is sync'd with the active dataloader being iterated upon. As such it assumes naively that when we have reached the end of the dataloader everything will sync and a step will be performed. To disable this, set `sync_with_dataloader` to be `False` in the [`GradientAccumulationPlugin`]:
|
||||
|
||||
```{python}
|
||||
from accelerate import Accelerator
|
||||
from accelerate.utils import GradientAccumulationPlugin
|
||||
|
||||
plugin = GradientAccumulationPlugin(sync_with_dataloader=False)
|
||||
accelerator = Accelerator(..., gradient_accumulation_plugin=plugin)
|
||||
```
|
||||
|
||||
</Tip>
|
||||
|
||||
## The finished code
|
||||
|
||||
Below is the finished implementation for performing gradient accumulation with 🤗 Accelerate
|
||||
|
||||
```python
|
||||
from accelerate import Accelerator
|
||||
accelerator = Accelerator(gradient_accumulation_steps=2)
|
||||
model, optimizer, training_dataloader, scheduler = accelerator.prepare(
|
||||
model, optimizer, training_dataloader, scheduler
|
||||
)
|
||||
for batch in training_dataloader:
|
||||
with accelerator.accumulate(model):
|
||||
inputs, targets = batch
|
||||
@ -127,4 +159,74 @@ for batch in training_dataloader:
|
||||
optimizer.zero_grad()
|
||||
```
|
||||
|
||||
To learn more about what magic this wraps around, read the [Gradient Synchronization concept guide](/concept_guides/gradient_synchronization)
|
||||
<Tip warning={true}>
|
||||
|
||||
It's important that **only one forward/backward** should be done inside the context manager `with accelerator.accumulate(model)`.
|
||||
|
||||
</Tip>
|
||||
|
||||
|
||||
To learn more about what magic this wraps around, read the [Gradient Synchronization concept guide](../concept_guides/gradient_synchronization)
|
||||
|
||||
|
||||
## Self-contained example
|
||||
|
||||
Here is a self-contained example that you can run to see gradient accumulation in action with 🤗 Accelerate:
|
||||
|
||||
```python
|
||||
import torch
|
||||
import copy
|
||||
from accelerate import Accelerator
|
||||
from accelerate.utils import set_seed
|
||||
from torch.utils.data import TensorDataset, DataLoader
|
||||
|
||||
# seed
|
||||
set_seed(0)
|
||||
|
||||
# define toy inputs and labels
|
||||
x = torch.tensor([1., 2., 3., 4., 5., 6., 7., 8.])
|
||||
y = torch.tensor([2., 4., 6., 8., 10., 12., 14., 16.])
|
||||
gradient_accumulation_steps = 4
|
||||
batch_size = len(x) // gradient_accumulation_steps
|
||||
|
||||
# define dataset and dataloader
|
||||
dataset = TensorDataset(x, y)
|
||||
dataloader = DataLoader(dataset, batch_size=batch_size)
|
||||
|
||||
# define model, optimizer and loss function
|
||||
model = torch.zeros((1, 1), requires_grad=True)
|
||||
model_clone = copy.deepcopy(model)
|
||||
criterion = torch.nn.MSELoss()
|
||||
model_optimizer = torch.optim.SGD([model], lr=0.02)
|
||||
accelerator = Accelerator(gradient_accumulation_steps=gradient_accumulation_steps)
|
||||
model, model_optimizer, dataloader = accelerator.prepare(model, model_optimizer, dataloader)
|
||||
model_clone_optimizer = torch.optim.SGD([model_clone], lr=0.02)
|
||||
print(f"initial model weight is {model.mean().item():.5f}")
|
||||
print(f"initial model weight is {model_clone.mean().item():.5f}")
|
||||
for i, (inputs, labels) in enumerate(dataloader):
|
||||
with accelerator.accumulate(model):
|
||||
inputs = inputs.view(-1, 1)
|
||||
print(i, inputs.flatten())
|
||||
labels = labels.view(-1, 1)
|
||||
outputs = inputs @ model
|
||||
loss = criterion(outputs, labels)
|
||||
accelerator.backward(loss)
|
||||
model_optimizer.step()
|
||||
model_optimizer.zero_grad()
|
||||
loss = criterion(x.view(-1, 1) @ model_clone, y.view(-1, 1))
|
||||
model_clone_optimizer.zero_grad()
|
||||
loss.backward()
|
||||
model_clone_optimizer.step()
|
||||
print(f"w/ accumulation, the final model weight is {model.mean().item():.5f}")
|
||||
print(f"w/o accumulation, the final model weight is {model_clone.mean().item():.5f}")
|
||||
```
|
||||
```
|
||||
initial model weight is 0.00000
|
||||
initial model weight is 0.00000
|
||||
0 tensor([1., 2.])
|
||||
1 tensor([3., 4.])
|
||||
2 tensor([5., 6.])
|
||||
3 tensor([7., 8.])
|
||||
w/ accumulation, the final model weight is 2.04000
|
||||
w/o accumulation, the final model weight is 2.04000
|
||||
```
|
||||
174
docs/source/usage_guides/ipex.md
Normal file
174
docs/source/usage_guides/ipex.md
Normal file
@ -0,0 +1,174 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Intel® Extension for PyTorch
|
||||
|
||||
[IPEX](https://github.com/intel/intel-extension-for-pytorch) is optimized for CPUs with AVX-512 or above, and functionally works for CPUs with only AVX2. So, it is expected to bring performance benefit for Intel CPU generations with AVX-512 or above while CPUs with only AVX2 (e.g., AMD CPUs or older Intel CPUs) might result in a better performance under IPEX, but not guaranteed. IPEX provides performance optimizations for CPU training with both Float32 and BFloat16. The usage of BFloat16 is the main focus of the following sections.
|
||||
|
||||
Low precision data type BFloat16 has been natively supported on the 3rd Generation Xeon® Scalable Processors (aka Cooper Lake) with AVX512 instruction set and will be supported on the next generation of Intel® Xeon® Scalable Processors with Intel® Advanced Matrix Extensions (Intel® AMX) instruction set with further boosted performance. The Auto Mixed Precision for CPU backend has been enabled since PyTorch-1.10. At the same time, the support of Auto Mixed Precision with BFloat16 for CPU and BFloat16 optimization of operators has been massively enabled in Intel® Extension for PyTorch, and partially upstreamed to PyTorch master branch. Users can get better performance and user experience with IPEX Auto Mixed Precision.
|
||||
|
||||
## IPEX installation:
|
||||
|
||||
IPEX release is following PyTorch, to install via pip:
|
||||
|
||||
| PyTorch Version | IPEX version |
|
||||
| :---------------: | :----------: |
|
||||
| 2.0 | 2.0.0 |
|
||||
| 1.13 | 1.13.0 |
|
||||
| 1.12 | 1.12.300 |
|
||||
| 1.11 | 1.11.200 |
|
||||
| 1.10 | 1.10.100 |
|
||||
|
||||
```
|
||||
pip install intel_extension_for_pytorch==<version_name> -f https://developer.intel.com/ipex-whl-stable-cpu
|
||||
```
|
||||
|
||||
Check more approaches for [IPEX installation](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/installation.html).
|
||||
|
||||
|
||||
## How It Works For Training optimization in CPU
|
||||
|
||||
🤗 Accelerate has integrated [IPEX](https://github.com/intel/intel-extension-for-pytorch), all you need to do is enabling it through the config.
|
||||
|
||||
**Scenario 1**: Acceleration of No distributed CPU training
|
||||
|
||||
Run <u>accelerate config</u> on your machine:
|
||||
|
||||
```bash
|
||||
$ accelerate config
|
||||
-----------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
In which compute environment are you running?
|
||||
This machine
|
||||
-----------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
Which type of machine are you using?
|
||||
No distributed training
|
||||
Do you want to run your training on CPU only (even if a GPU / Apple Silicon device is available)? [yes/NO]:yes
|
||||
Do you want to use Intel PyTorch Extension (IPEX) to speed up training on CPU? [yes/NO]:yes
|
||||
Do you wish to optimize your script with torch dynamo?[yes/NO]:NO
|
||||
Do you want to use DeepSpeed? [yes/NO]: NO
|
||||
-----------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
Do you wish to use FP16 or BF16 (mixed precision)?
|
||||
bf16
|
||||
```
|
||||
This will generate a config file that will be used automatically to properly set the
|
||||
default options when doing
|
||||
|
||||
```bash
|
||||
accelerate launch my_script.py --args_to_my_script
|
||||
```
|
||||
|
||||
For instance, here is how you would run the NLP example `examples/nlp_example.py` (from the root of the repo) with IPEX enabled.
|
||||
default_config.yaml that is generated after `accelerate config`
|
||||
|
||||
```bash
|
||||
compute_environment: LOCAL_MACHINE
|
||||
distributed_type: 'NO'
|
||||
downcast_bf16: 'no'
|
||||
ipex_config:
|
||||
ipex: true
|
||||
machine_rank: 0
|
||||
main_training_function: main
|
||||
mixed_precision: bf16
|
||||
num_machines: 1
|
||||
num_processes: 1
|
||||
rdzv_backend: static
|
||||
same_network: true
|
||||
tpu_env: []
|
||||
tpu_use_cluster: false
|
||||
tpu_use_sudo: false
|
||||
use_cpu: true
|
||||
```
|
||||
```bash
|
||||
accelerate launch examples/nlp_example.py
|
||||
```
|
||||
|
||||
**Scenario 2**: Acceleration of distributed CPU training
|
||||
we use Intel oneCCL for communication, combined with Intel® MPI library to deliver flexible, efficient, scalable cluster messaging on Intel® architecture. you could refer the [here](https://huggingface.co/docs/transformers/perf_train_cpu_many) for the installation guide
|
||||
|
||||
Run <u>accelerate config</u> on your machine(node0):
|
||||
|
||||
```bash
|
||||
$ accelerate config
|
||||
-----------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
In which compute environment are you running?
|
||||
This machine
|
||||
-----------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
Which type of machine are you using?
|
||||
multi-CPU
|
||||
How many different machines will you use (use more than 1 for multi-node training)? [1]: 4
|
||||
-----------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
What is the rank of this machine?
|
||||
0
|
||||
What is the IP address of the machine that will host the main process? 36.112.23.24
|
||||
What is the port you will use to communicate with the main process? 29500
|
||||
Are all the machines on the same local network? Answer `no` if nodes are on the cloud and/or on different network hosts [YES/no]: yes
|
||||
Do you want to use Intel PyTorch Extension (IPEX) to speed up training on CPU? [yes/NO]:yes
|
||||
Do you wish to optimize your script with torch dynamo?[yes/NO]:NO
|
||||
How many CPU(s) should be used for distributed training? [1]:16
|
||||
-----------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
Do you wish to use FP16 or BF16 (mixed precision)?
|
||||
bf16
|
||||
```
|
||||
For instance, here is how you would run the NLP example `examples/nlp_example.py` (from the root of the repo) with IPEX enabled for distributed CPU training.
|
||||
|
||||
default_config.yaml that is generated after `accelerate config`
|
||||
```bash
|
||||
compute_environment: LOCAL_MACHINE
|
||||
distributed_type: MULTI_CPU
|
||||
downcast_bf16: 'no'
|
||||
ipex_config:
|
||||
ipex: true
|
||||
machine_rank: 0
|
||||
main_process_ip: 36.112.23.24
|
||||
main_process_port: 29500
|
||||
main_training_function: main
|
||||
mixed_precision: bf16
|
||||
num_machines: 4
|
||||
num_processes: 16
|
||||
rdzv_backend: static
|
||||
same_network: true
|
||||
tpu_env: []
|
||||
tpu_use_cluster: false
|
||||
tpu_use_sudo: false
|
||||
use_cpu: true
|
||||
```
|
||||
|
||||
Set following env and using intel MPI to launch the training
|
||||
|
||||
In node0, you need to create a configuration file which contains the IP addresses of each node (for example hostfile) and pass that configuration file path as an argument.
|
||||
```bash
|
||||
$ cat hostfile
|
||||
xxx.xxx.xxx.xxx #node0 ip
|
||||
xxx.xxx.xxx.xxx #node1 ip
|
||||
xxx.xxx.xxx.xxx #node2 ip
|
||||
xxx.xxx.xxx.xxx #node3 ip
|
||||
```
|
||||
Now, run the following command in node0 and **16DDP** will be enabled in node0,node1,node2,node3 with BF16 mixed precision:
|
||||
```bash
|
||||
oneccl_bindings_for_pytorch_path=$(python -c "from oneccl_bindings_for_pytorch import cwd; print(cwd)")
|
||||
source $oneccl_bindings_for_pytorch_path/env/setvars.sh
|
||||
export CCL_WORKER_COUNT=1
|
||||
export MASTER_ADDR=xxx.xxx.xxx.xxx #node0 ip
|
||||
export CCL_ATL_TRANSPORT=ofi
|
||||
mpirun -f hostfile -n 16 -ppn 4 accelerate launch examples/nlp_example.py
|
||||
```
|
||||
|
||||
## Related Resources
|
||||
|
||||
- [Project's github](https://github.com/intel/intel-extension-for-pytorch)
|
||||
- [API docs](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/api_doc.html)
|
||||
- [Tuning guide](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/performance_tuning/tuning_guide.html)
|
||||
- [Blogs & Publications](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/blogs_publications.html)
|
||||
|
||||
108
docs/source/usage_guides/local_sgd.md
Normal file
108
docs/source/usage_guides/local_sgd.md
Normal file
@ -0,0 +1,108 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Using Local SGD with 🤗 Accelerate
|
||||
|
||||
Local SGD is a technique for distributed training where gradients are not synchronized every step. Thus, each process updates its own version of the model weights and after a given number of steps these weights are synchronized by averaging across all processes. This improves communication efficiency and can lead to substantial training speed up especially when a computer lacks a faster interconnect such as NVLink.
|
||||
Unlike gradient accumulation (where improving communication efficiency requires increasing the effective batch size), Local SGD does not require changing a batch size or a learning rate / schedule. However, if necessary, Local SGD can be combined with gradient accumulation as well.
|
||||
|
||||
In this tutorial you will see how to quickly setup Local SGD 🤗 Accelerate. Compared to a standard Accelerate setup, this requires only two extra lines of code.
|
||||
|
||||
This example will use a very simplistic PyTorch training loop that performs gradient accumulation every two batches:
|
||||
|
||||
```python
|
||||
device = "cuda"
|
||||
model.to(device)
|
||||
|
||||
gradient_accumulation_steps = 2
|
||||
|
||||
for index, batch in enumerate(training_dataloader):
|
||||
inputs, targets = batch
|
||||
inputs = inputs.to(device)
|
||||
targets = targets.to(device)
|
||||
outputs = model(inputs)
|
||||
loss = loss_function(outputs, targets)
|
||||
loss = loss / gradient_accumulation_steps
|
||||
loss.backward()
|
||||
if (index + 1) % gradient_accumulation_steps == 0:
|
||||
optimizer.step()
|
||||
scheduler.step()
|
||||
optimizer.zero_grad()
|
||||
```
|
||||
|
||||
## Converting it to 🤗 Accelerate
|
||||
|
||||
First the code shown earlier will be converted to use 🤗 Accelerate with neither a LocalSGD or a gradient accumulation helper:
|
||||
|
||||
```diff
|
||||
+ from accelerate import Accelerator
|
||||
+ accelerator = Accelerator()
|
||||
|
||||
+ model, optimizer, training_dataloader, scheduler = accelerator.prepare(
|
||||
+ model, optimizer, training_dataloader, scheduler
|
||||
+ )
|
||||
|
||||
for index, batch in enumerate(training_dataloader):
|
||||
inputs, targets = batch
|
||||
- inputs = inputs.to(device)
|
||||
- targets = targets.to(device)
|
||||
outputs = model(inputs)
|
||||
loss = loss_function(outputs, targets)
|
||||
loss = loss / gradient_accumulation_steps
|
||||
+ accelerator.backward(loss)
|
||||
if (index+1) % gradient_accumulation_steps == 0:
|
||||
optimizer.step()
|
||||
scheduler.step()
|
||||
```
|
||||
|
||||
## Letting 🤗 Accelerate handle model synchronization
|
||||
|
||||
All that is left now is to let 🤗 Accelerate handle model parameter synchronization **and** the gradient accumulation for us. For simplicity let us assume we need to synchronize every 8 steps. This is
|
||||
achieved by adding one `with LocalSGD` statement and one call `local_sgd.step()` after every optimizer step:
|
||||
|
||||
```diff
|
||||
+local_sgd_steps=8
|
||||
|
||||
+with LocalSGD(accelerator=accelerator, model=model, local_sgd_steps=8, enabled=True) as local_sgd:
|
||||
for batch in training_dataloader:
|
||||
with accelerator.accumulate(model):
|
||||
inputs, targets = batch
|
||||
outputs = model(inputs)
|
||||
loss = loss_function(outputs, targets)
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
scheduler.step()
|
||||
optimizer.zero_grad()
|
||||
+ local_sgd.step()
|
||||
```
|
||||
|
||||
Under the hood, the Local SGD code **disables** automatic gradient synchornization (but accumulation still works as expected!). Instead it averages model parameters every `local_sgd_steps` steps (as well as in the end of the training loop).
|
||||
|
||||
## Limitations
|
||||
|
||||
The current implementation works only with basic multi-GPU (or multi-CPU) training without, e.g., [DeepSpeed.](https://github.com/microsoft/DeepSpeed).
|
||||
|
||||
## References
|
||||
|
||||
Although we are not aware of the true origins of this simple approach, the idea of local SGD is quite old and goes
|
||||
back to at least:
|
||||
|
||||
Zhang, J., De Sa, C., Mitliagkas, I., & Ré, C. (2016). [Parallel SGD: When does averaging help?. arXiv preprint
|
||||
arXiv:1606.07365.](https://arxiv.org/abs/1606.07365)
|
||||
|
||||
We credit the term Local SGD to the following paper (but there might be earlier references we are not aware of).
|
||||
|
||||
Stich, Sebastian Urban. ["Local SGD Converges Fast and Communicates Little." ICLR 2019-International Conference on
|
||||
Learning Representations. No. CONF. 2019.](https://arxiv.org/abs/1805.09767)
|
||||
92
docs/source/usage_guides/low_precision_training.md
Normal file
92
docs/source/usage_guides/low_precision_training.md
Normal file
@ -0,0 +1,92 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Low Precision Training Methods
|
||||
|
||||
🤗 Accelerate provides integrations to train on lower precision methods using specified supported hardware through the `TransformersEngine` and `MS-AMP` packages. This documentation will help guide you through what hardware is supported, how to configure your [`Accelerator`] to leverage the low precision methods, and what you can expect when training.
|
||||
|
||||
## What training on FP8 means
|
||||
|
||||
To explore more of the nitty-gritty in training in FP8 with PyTorch and 🤗 Accelerate, check out the [concept_guide](../concept_guides/low_precision_training.md) on why this can be difficult. But essentially rather than training in BF16, some (or all) aspects of training a model can be performed using 8 bits instead of 16. The challenge is doing so without degrading final performance.
|
||||
|
||||
This is only enabled on specific NVIDIA hardware, namely:
|
||||
|
||||
* Anything after the 3000 series consumer graphics cards (such as the 4090)
|
||||
* Hopper-based GPU architectures (such as the `H100` and `H200`)
|
||||
|
||||
What this will result in is some gain in the memory used (as we've cut the needed memory in half for some parts of training) and an increase in throughput *should* be seen as well for larger models that can replace certain layers with FP8-enabled ones.
|
||||
|
||||
## Configuring the Accelerator
|
||||
|
||||
Currently two different backends for FP8 are supported (`TransformersEngine` and `MS-AMP`), each with different capabilities and configurations.
|
||||
|
||||
To use either, the same core API is used. Just pass `mixed_precision="fp8"` to either the [`Accelerator`], during `accelerate config` when prompted about mixed precision, or as part of your `config.yaml` file in the `mixed_precision` key:
|
||||
|
||||
```{python}
|
||||
from accelerate import Accelerator
|
||||
accelerator = Accelerator(mixed_precision="fp8")
|
||||
```
|
||||
|
||||
By default, if `MS-AMP` is available in your environment, 🤗 Accelerate will automatically utilize it as a backend. To specify it yourself (and customize other parts of the FP8 mixed precision setup), you can utilize the [`utils.FP8RecipeKwargs`]:
|
||||
|
||||
```{python}
|
||||
from accelerate import Accelerator
|
||||
from accelerate.utils import FP8RecipeKwargs
|
||||
kwargs = [FP8RecipeKwargs(backend="msamp")]
|
||||
# Or to specify the backend as `TransformersEngine` even if MS-AMP is installed
|
||||
# kwargs = [FP8RecipeKwargs(backend="te")]
|
||||
accelerator = Accelerator(mixed_precision="fp8", kwarg_handlers=kwargs)
|
||||
```
|
||||
|
||||
## Configuring MS-AMP
|
||||
|
||||
Of the two, `MS-AMP` is traditionally the easier one to configure as there is only a single argument: the optimization level.
|
||||
|
||||
Currently two levels of optimization are supported in the 🤗 Accelerate integration, `"O1"` and `"O2"` (using the letter 'o', not zero).
|
||||
|
||||
* `"O1"` will cast the weight gradients and `all_reduce` communications to happen in 8-bit, while the rest are done in 16 bit. This reduces the general GPU memory usage and speeds up communication bandwidths.
|
||||
* `"O2"` will also cast first-order optimizer states into 8 bit, while the second order states are in FP16. (Currently just the `Adam` optimizer is supported). This tries it's best to minimize final accuracy degradation and will save the highest potential memory.
|
||||
|
||||
To specify an optimization level, pass it to the `FP8KwargsHandler` by setting the `optimization_level` argument:
|
||||
|
||||
```{python}
|
||||
from accelerate import Accelerator
|
||||
from accelerate.utils import FP8RecipeKwargs
|
||||
kwargs = [FP8RecipeKwargs(backend="msamp", optimization_level="O2")]
|
||||
accelerator = Accelerator(mixed_precision="fp8", kwarg_handlers=kwargs)
|
||||
```
|
||||
|
||||
## Configuring TransformersEngine
|
||||
|
||||
TransformersEngine has much more available for customizing how and what FP8 calculations are performed. A full list of supported arguments and what they mean are available in [NVIDIA's documentation](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html), however they are restated as part of [`FP8KwargsHandler`]'s docstring for your convience.
|
||||
|
||||
🤗 Accelerate tries to set sensible defaults, but exploring and tweaking the various parameters yourself can lead to better performance potentially.
|
||||
|
||||
To use it, specify `backend="te"` and modify any of the arguments you want as part of your kwarg handler:
|
||||
|
||||
```{python}
|
||||
from accelerate import Accelerator
|
||||
from accelerate.utils import FP8RecipeKwargs
|
||||
kwargs = [FP8RecipeKwargs(backend="te", ...)]
|
||||
accelerator = Accelerator(mixed_precision="fp8", kwarg_handlers=kwargs)
|
||||
```
|
||||
|
||||
## Futher Reading
|
||||
|
||||
To learn more about training in FP8 please check out the following resources:
|
||||
|
||||
* [Our concept guide](../concept_guides/low_precision_training.md) detailing into more about both TransformersEngine and MS-AMP
|
||||
* [The `transformers-engine` documentation](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html)
|
||||
* [The `MS-AMP` documentation](https://azure.github.io/MS-AMP/docs/)
|
||||
@ -8,6 +8,9 @@ http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
|
||||
@ -110,12 +113,12 @@ pip install git+https://github.com/huggingface/Megatron-LM.git
|
||||
## Accelerate Megatron-LM Plugin
|
||||
|
||||
Important features are directly supported via the `accelerate config` command.
|
||||
An example of thr corresponding questions for using Megatron-LM features is shown below:
|
||||
An example of the corresponding questions for using Megatron-LM features is shown below:
|
||||
|
||||
```bash
|
||||
:~$ accelerate config --config_file "megatron_gpt_config.yaml"
|
||||
In which compute environment are you running? ([0] This machine, [1] AWS (Amazon SageMaker)): 0
|
||||
Which type of machine are you using? ([0] No distributed training, [1] multi-CPU, [2] multi-GPU, [3] TPU [4] MPS): 2
|
||||
Which type of machine are you using? ([0] No distributed training, [1] multi-CPU, [2] multi-GPU, [3] TPU): 2
|
||||
How many different machines will you use (use more than 1 for multi-node training)? [1]:
|
||||
Do you want to use DeepSpeed? [yes/NO]:
|
||||
Do you want to use FullyShardedDataParallel? [yes/NO]:
|
||||
@ -125,7 +128,7 @@ Do you want to enable Sequence Parallelism? [YES/no]:
|
||||
What is the Pipeline Parallelism degree/size? [1]:2
|
||||
What is the number of micro-batches? [1]:2
|
||||
Do you want to enable selective activation recomputation? [YES/no]:
|
||||
Do you want to use distributed optimizer which shards optimizer state and gradients across data pralellel ranks? [YES/no]:
|
||||
Do you want to use distributed optimizer which shards optimizer state and gradients across data parallel ranks? [YES/no]:
|
||||
What is the gradient clipping value based on global L2 Norm (0 to disable)? [1.0]:
|
||||
How many GPU(s) should be used for distributed training? [1]:4
|
||||
Do you wish to use FP16 or BF16 (mixed precision)? [NO/fp16/bf16]: bf16
|
||||
@ -290,6 +293,7 @@ You will implement the `accelerate.utils.AbstractTrainStep` or inherit from thei
|
||||
```python
|
||||
from accelerate.utils import MegatronLMDummyScheduler, GPTTrainStep, avg_losses_across_data_parallel_group
|
||||
|
||||
|
||||
# Custom loss function for the Megatron model
|
||||
class GPTTrainStepWithCustomLoss(GPTTrainStep):
|
||||
def __init__(self, megatron_args, **kwargs):
|
||||
@ -351,8 +355,8 @@ def main():
|
||||
|
||||
2. For using the Megatron-LM datasets, a few more changes are required. Dataloaders for these datasets
|
||||
are available only on rank 0 of each tensor parallel group. As such, there are rank where dataloader won't be
|
||||
avaiable and this requires tweaks to the training loop. Being able to do all this shows how
|
||||
felixble and extensible 🤗 Accelerate is. The changes required are as follows.
|
||||
available and this requires tweaks to the training loop. Being able to do all this shows how
|
||||
flexible and extensible 🤗 Accelerate is. The changes required are as follows.
|
||||
|
||||
a. For Megatron-LM indexed datasets, we need to use `MegatronLMDummyDataLoader`
|
||||
and pass the required dataset args to it such as `data_path`, `seq_length` etc.
|
||||
@ -543,7 +547,7 @@ The `model(**batch_data)` call return loss(es) averaged across the data parallel
|
||||
This is fine for most cases wherein pre-training jobs are run using Megatron-LM features and
|
||||
you can easily compute the `perplexity` using the loss.
|
||||
For GPT model, returning logits in addition to loss(es) is supported.
|
||||
These logits aren't gathered across data prallel ranks. Use `accelerator.utils.gather_across_data_parallel_groups`
|
||||
These logits aren't gathered across data parallel ranks. Use `accelerator.utils.gather_across_data_parallel_groups`
|
||||
to gather logits across data parallel ranks. These logits along with labels can be used for computing various
|
||||
performance metrics.
|
||||
|
||||
@ -1,55 +0,0 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Memory Utilities
|
||||
|
||||
One of the most frustrating errors when it comes to running training scripts is hitting "CUDA Out-of-Memory",
|
||||
as the entire script needs to be restarted, progress is lost, and typically a developer would want to simply
|
||||
start their script and let it run.
|
||||
|
||||
`Accelerate` provides a utility heavily based on [toma](https://github.com/BlackHC/toma) to give this capability.
|
||||
|
||||
## find_executable_batch_size
|
||||
|
||||
This algorithm operates with exponential decay, decreasing the batch size in half after each failed run on some
|
||||
training script. To use it, restructure your training function to include an inner function that includes this wrapper,
|
||||
and build your dataloaders inside it. At a minimum, this could look like 4 new lines of code.
|
||||
> Note: The inner function *must* take in the batch size as the first parameter, but we do not pass one to it when called. The wrapper handles this for us
|
||||
|
||||
It should also be noted that anything which will consume CUDA memory and passed to the `accelerator` **must** be declared inside the inner function,
|
||||
such as models and optimizers.
|
||||
|
||||
```diff
|
||||
def training_function(args):
|
||||
accelerator = Accelerator()
|
||||
|
||||
+ @find_executable_batch_size(starting_batch_size=args.batch_size)
|
||||
+ def inner_training_loop(batch_size):
|
||||
+ nonlocal accelerator # Ensure they can be used in our context
|
||||
+ accelerator.free_memory() # Free all lingering references
|
||||
model = get_model()
|
||||
model.to(accelerator.device)
|
||||
optimizer = get_optimizer()
|
||||
train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)
|
||||
lr_scheduler = get_scheduler(
|
||||
optimizer,
|
||||
num_training_steps=len(train_dataloader)*num_epochs
|
||||
)
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
||||
)
|
||||
train(model, optimizer, train_dataloader, lr_scheduler)
|
||||
validate(model, eval_dataloader)
|
||||
+ inner_training_loop()
|
||||
```
|
||||
|
||||
To find out more, check the documentation [here](../package_reference/utilities#accelerate.find_executable_batch_size).
|
||||
137
docs/source/usage_guides/model_size_estimator.md
Normal file
137
docs/source/usage_guides/model_size_estimator.md
Normal file
@ -0,0 +1,137 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Understanding how big of a model can fit on your machine
|
||||
|
||||
One very difficult aspect when exploring potential models to use on your machine is knowing just how big of a model will *fit* into memory with your current graphics card (such as loading the model onto CUDA).
|
||||
|
||||
To help alleviate this, 🤗 Accelerate has a CLI interface through `accelerate estimate-memory`. This tutorial will
|
||||
help walk you through using it, what to expect, and at the end link to the interactive demo hosted on the 🤗 Hub which will
|
||||
even let you post those results directly on the model repo!
|
||||
|
||||
Currently we support searching for models that can be used in `timm` and `transformers`.
|
||||
|
||||
<Tip>
|
||||
|
||||
This API will load the model into memory on the `meta` device, so we are not actually downloading
|
||||
and loading the full weights of the model into memory, nor do we need to. As a result it's
|
||||
perfectly fine to measure 8 billion parameter models (or more), without having to worry about
|
||||
if your CPU can handle it!
|
||||
|
||||
</Tip>
|
||||
|
||||
## Gradio Demos
|
||||
|
||||
Below are a few gradio demos related to what was described above. The first is the official Hugging Face memory estimation space, utilizing Accelerate directly:
|
||||
|
||||
<div class="block dark:hidden">
|
||||
<iframe
|
||||
src="https://hf-accelerate-model-memory-usage.hf.space?__theme=light"
|
||||
width="850"
|
||||
height="1600"
|
||||
></iframe>
|
||||
</div>
|
||||
<div class="hidden dark:block">
|
||||
<iframe
|
||||
src="https://hf-accelerate-model-memory-usage.hf.space?__theme=dark"
|
||||
width="850"
|
||||
height="1600"
|
||||
></iframe>
|
||||
</div>
|
||||
|
||||
A community member has taken the idea and expended it further, allowing you to filter models directly and see if you can run a particular LLM given GPU constraints and LoRA configurations. To play with it, see [here](https://huggingface.co/spaces/Vokturz/can-it-run-llm) for more details.
|
||||
|
||||
## The Command
|
||||
|
||||
When using `accelerate estimate-memory`, you need to pass in the name of the model you want to use, potentially the framework
|
||||
that model utilizing (if it can't be found automatically), and the data types you want the model to be loaded in with.
|
||||
|
||||
For example, here is how we can calculate the memory footprint for `bert-base-cased`:
|
||||
|
||||
```bash
|
||||
accelerate estimate-memory bert-base-cased
|
||||
```
|
||||
|
||||
This will download the `config.json` for `bert-based-cased`, load the model on the `meta` device, and report back how much space
|
||||
it will use:
|
||||
|
||||
Memory Usage for loading `bert-base-cased`:
|
||||
|
||||
| dtype | Largest Layer | Total Size | Training using Adam |
|
||||
|---------|---------------|------------|---------------------|
|
||||
| float32 | 84.95 MB | 418.18 MB | 1.61 GB |
|
||||
| float16 | 42.47 MB | 206.59 MB | 826.36 MB |
|
||||
| int8 | 21.24 MB | 103.29 MB | 413.18 MB |
|
||||
| int4 | 10.62 MB | 51.65 MB | 206.59 MB |
|
||||
|
||||
By default it will return all the supported dtypes (`int4` through `float32`), but if you are interested in specific ones these can be filtered.
|
||||
|
||||
### Specific libraries
|
||||
|
||||
If the source library cannot be determined automatically (like it could in the case of `bert-base-cased`), a library name can
|
||||
be passed in.
|
||||
|
||||
```bash
|
||||
accelerate estimate-memory HuggingFaceM4/idefics-80b-instruct --library_name transformers
|
||||
```
|
||||
|
||||
Memory Usage for loading `HuggingFaceM4/idefics-80b-instruct`:
|
||||
|
||||
| dtype | Largest Layer | Total Size | Training using Adam |
|
||||
|---------|---------------|------------|---------------------|
|
||||
| float32 | 3.02 GB | 297.12 GB | 1.16 TB |
|
||||
| float16 | 1.51 GB | 148.56 GB | 594.24 GB |
|
||||
| int8 | 772.52 MB | 74.28 GB | 297.12 GB |
|
||||
| int4 | 386.26 MB | 37.14 GB | 148.56 GB |
|
||||
|
||||
|
||||
```bash
|
||||
accelerate estimate-memory timm/resnet50.a1_in1k --library_name timm
|
||||
```
|
||||
|
||||
Memory Usage for loading `timm/resnet50.a1_in1k`:
|
||||
|
||||
| dtype | Largest Layer | Total Size | Training using Adam |
|
||||
|---------|---------------|------------|---------------------|
|
||||
| float32 | 9.0 MB | 97.7 MB | 390.78 MB |
|
||||
| float16 | 4.5 MB | 48.85 MB | 195.39 MB |
|
||||
| int8 | 2.25 MB | 24.42 MB | 97.7 MB |
|
||||
| int4 | 1.12 MB | 12.21 MB | 48.85 MB |
|
||||
|
||||
### Specific dtypes
|
||||
|
||||
As mentioned earlier, while we return `int4` through `float32` by default, any dtype can be used from `float32`, `float16`, `int8`, and `int4`.
|
||||
|
||||
To do so, pass them in after specifying `--dtypes`:
|
||||
|
||||
```bash
|
||||
accelerate estimate-memory bert-base-cased --dtypes float32 float16
|
||||
```
|
||||
|
||||
Memory Usage for loading `bert-base-cased`:
|
||||
|
||||
| dtype | Largest Layer | Total Size | Training using Adam |
|
||||
|---------|---------------|------------|---------------------|
|
||||
| float32 | 84.95 MB | 413.18 MB | 1.61 GB |
|
||||
| float16 | 42.47 MB | 206.59 MB | 826.36 MB |
|
||||
|
||||
## Caveats with this calculator
|
||||
|
||||
This calculator will tell you how much memory is needed to purely load the model in, *not* to perform inference.
|
||||
|
||||
This calculation is accurate within a few % of the actual value, so it is a very good view of just how much memory it will take. For instance loading `bert-base-cased` actually takes `413.68 MB` when loaded on CUDA in full precision, and the calculator estimates `413.18 MB`.
|
||||
|
||||
When performing inference you can expect to add up to an additional 20% as found by [EleutherAI](https://blog.eleuther.ai/transformer-math/). We'll be conducting research into finding a more accurate estimate to these values, and will update
|
||||
this calculator once done.
|
||||
@ -8,6 +8,9 @@ http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Accelerated PyTorch Training on Mac
|
||||
@ -31,41 +34,10 @@ please follow this nice medium article [GPU-Acceleration Comes to PyTorch on M1
|
||||
|
||||
|
||||
## How it works out of the box
|
||||
It is enabled by default on MacOs machines with MPS enabled Apple Silicon GPUs.
|
||||
To disable it, pass `--cpu` flag to `accelerate launch` command or answer the corresponding question when answering the `accelerate config` questionnaire.
|
||||
|
||||
On your machine(s) just run:
|
||||
|
||||
```bash
|
||||
accelerate config
|
||||
```
|
||||
|
||||
and answer the questions asked, specifically choose `MPS` for the query:
|
||||
|
||||
```
|
||||
Which type of machine are you using?.
|
||||
```
|
||||
|
||||
This will generate a config file that will be used automatically to properly set
|
||||
the default options when doing `accelerate launch`, such as the one shown below:
|
||||
|
||||
```bash
|
||||
compute_environment: LOCAL_MACHINE
|
||||
deepspeed_config: {}
|
||||
distributed_type: MPS
|
||||
downcast_bf16: 'no'
|
||||
fsdp_config: {}
|
||||
machine_rank: 0
|
||||
main_process_ip: null
|
||||
main_process_port: null
|
||||
main_training_function: main
|
||||
mixed_precision: 'no'
|
||||
num_machines: 1
|
||||
num_processes: 1
|
||||
use_cpu: false
|
||||
```
|
||||
|
||||
After this configuration has been made, here is how you run the CV example
|
||||
(from the root of the repo) with MPS enabled:
|
||||
|
||||
You can directly run the following script to test it out on MPS enabled Apple Silicon machines:
|
||||
```bash
|
||||
accelerate launch /examples/cv_example.py --data_dir images
|
||||
```
|
||||
136
docs/source/usage_guides/quantization.md
Normal file
136
docs/source/usage_guides/quantization.md
Normal file
@ -0,0 +1,136 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Quantization
|
||||
|
||||
## `bitsandbytes` Integration
|
||||
|
||||
🤗 Accelerate brings `bitsandbytes` quantization to your model. You can now load any pytorch model in 8-bit or 4-bit with a few lines of code.
|
||||
|
||||
If you want to use 🤗 Transformers models with `bitsandbytes`, you should follow this [documentation](https://huggingface.co/docs/transformers/main_classes/quantization).
|
||||
|
||||
To learn more about how the `bitsandbytes` quantization works, check out the blog posts on [8-bit quantization](https://huggingface.co/blog/hf-bitsandbytes-integration) and [4-bit quantization](https://huggingface.co/blog/4bit-transformers-bitsandbytes).
|
||||
|
||||
### Pre-Requisites
|
||||
You will need to install the following requirements:
|
||||
|
||||
- Install `bitsandbytes` library
|
||||
```bash
|
||||
pip install bitsandbytes
|
||||
```
|
||||
- Install latest `accelerate` from source
|
||||
```bash
|
||||
pip install git+https://github.com/huggingface/accelerate.git
|
||||
```
|
||||
- Install `minGPT` and `huggingface_hub` to run examples
|
||||
```bash
|
||||
git clone https://github.com/karpathy/minGPT.git
|
||||
pip install minGPT/
|
||||
pip install huggingface_hub
|
||||
```
|
||||
|
||||
### How it works
|
||||
|
||||
First, we need to initialize our model. To save memory, we can initialize an empty model using the context manager [`init_empty_weights`].
|
||||
|
||||
Let's take the GPT2 model from minGPT library.
|
||||
```py
|
||||
from accelerate import init_empty_weights
|
||||
from mingpt.model import GPT
|
||||
|
||||
model_config = GPT.get_default_config()
|
||||
model_config.model_type = 'gpt2-xl'
|
||||
model_config.vocab_size = 50257
|
||||
model_config.block_size = 1024
|
||||
|
||||
with init_empty_weights():
|
||||
empty_model = GPT(model_config)
|
||||
```
|
||||
|
||||
Then, we need to get the path to the weights of your model. The path can be the state_dict file (e.g. "pytorch_model.bin") or a folder containing the sharded checkpoints.
|
||||
|
||||
```py
|
||||
from huggingface_hub import snapshot_download
|
||||
weights_location = snapshot_download(repo_id="marcsun13/gpt2-xl-linear-sharded")
|
||||
```
|
||||
|
||||
Finally, you need to set your quantization configuration with [`~utils.BnbQuantizationConfig`].
|
||||
|
||||
Here's an example for 8-bit quantization:
|
||||
```py
|
||||
from accelerate.utils import BnbQuantizationConfig
|
||||
bnb_quantization_config = BnbQuantizationConfig(load_in_8bit=True, llm_int8_threshold = 6)
|
||||
```
|
||||
|
||||
Here's an example for 4-bit quantization:
|
||||
```py
|
||||
from accelerate.utils import BnbQuantizationConfig
|
||||
bnb_quantization_config = BnbQuantizationConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4")
|
||||
```
|
||||
|
||||
To quantize your empty model with the selected configuration, you need to use [`~utils.load_and_quantize_model`].
|
||||
|
||||
```py
|
||||
from accelerate.utils import load_and_quantize_model
|
||||
quantized_model = load_and_quantize_model(empty_model, weights_location=weights_location, bnb_quantization_config=bnb_quantization_config, device_map = "auto")
|
||||
```
|
||||
|
||||
### Saving and loading 8-bit model
|
||||
|
||||
You can save your 8-bit model with accelerate using [`~Accelerator.save_model`].
|
||||
|
||||
```py
|
||||
from accelerate import Accelerator
|
||||
accelerate = Accelerator()
|
||||
new_weights_location = "path/to/save_directory"
|
||||
accelerate.save_model(quantized_model, new_weights_location)
|
||||
|
||||
quantized_model_from_saved = load_and_quantize_model(empty_model, weights_location=new_weights_location, bnb_quantization_config=bnb_quantization_config, device_map = "auto")
|
||||
```
|
||||
|
||||
Note that 4-bit model serialization is currently not supported.
|
||||
|
||||
### Offload modules to cpu and disk
|
||||
|
||||
You can offload some modules to cpu/disk if you don't have enough space on the GPU to store the entire model on your GPUs.
|
||||
This uses big model inference under the hood. Check this [documentation](https://huggingface.co/docs/accelerate/usage_guides/big_modeling) for more details.
|
||||
|
||||
For 8-bit quantization, the selected modules will be converted to 8-bit precision.
|
||||
|
||||
For 4-bit quantization, the selected modules will be kept in `torch_dtype` that the user passed in `BnbQuantizationConfig`. We will add support to convert these offloaded modules in 4-bit when 4-bit serialization will be possible.
|
||||
|
||||
You just need to pass a custom `device_map` in order to offload modules on cpu/disk. The offload modules will be dispatched on the GPU when needed. Here's an example :
|
||||
|
||||
```py
|
||||
device_map = {
|
||||
"transformer.wte": 0,
|
||||
"transformer.wpe": 0,
|
||||
"transformer.drop": 0,
|
||||
"transformer.h": "cpu",
|
||||
"transformer.ln_f": "disk",
|
||||
"lm_head": "disk",
|
||||
}
|
||||
```
|
||||
### Fine-tune a quantized model
|
||||
|
||||
It is not possible to perform pure 8bit or 4bit training on these models. However, you can train these models by leveraging parameter efficient fine tuning methods (PEFT) and train for example adapters on top of them. Please have a look at [peft](https://github.com/huggingface/peft) library for more details.
|
||||
|
||||
Currently, you can't add adapters on top of any quantized model. However, with the official support of adapters with 🤗 Transformers models, you can fine-tune quantized models. If you want to finetune a 🤗 Transformers model , follow this [documentation](https://huggingface.co/docs/transformers/main_classes/quantization) instead. Check out this [demo](https://colab.research.google.com/drive/1VoYNfYDKcKRQRor98Zbf2-9VQTtGJ24k?usp=sharing) on how to fine-tune a 4-bit 🤗 Transformers model.
|
||||
|
||||
Note that you don’t need to pass `device_map` when loading the model for training. It will automatically load your model on your GPU. Please note that `device_map=auto` should be used for inference only.
|
||||
|
||||
### Example demo - running GPT2 1.5b on a Google Colab
|
||||
|
||||
Check out the Google Colab [demo](https://colab.research.google.com/drive/1T1pOgewAWVpR9gKpaEWw4orOrzPFb3yM?usp=sharing) for running quantized models on a GTP2 model. The GPT2-1.5B model checkpoint is in FP32 which uses 6GB of memory. After quantization, it uses 1.6GB with 8-bit modules and 1.2GB with 4-bit modules.
|
||||
@ -8,6 +8,9 @@ http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Amazon SageMaker
|
||||
@ -160,10 +163,43 @@ use_cpu: false
|
||||
want to use different/other Python packages you can do this by adding them to the `requirements.txt`. These packages
|
||||
will be installed before your training script is started.
|
||||
|
||||
### Remote scripts: Use scripts located on Github
|
||||
### Local Training: SageMaker Local mode
|
||||
|
||||
*undecided if feature is needed. Contact us if you would like this feature.*
|
||||
The local mode in the SageMaker SDK allows you to run your training script locally inside the HuggingFace DLC (Deep Learning container)
|
||||
or using your custom container image. This is useful for debugging and testing your training script inside the final container environment.
|
||||
Local mode uses Docker compose (*Note: Docker Compose V2 is not supported yet*). The SDK will handle the authentication against ECR
|
||||
to pull the DLC to your local environment. You can emulate CPU (single and multi-instance) and GPU (single instance) SageMaker training jobs.
|
||||
|
||||
To use local mode, you need to set your `ec2_instance_type` to `local`.
|
||||
|
||||
```yaml
|
||||
ec2_instance_type: local
|
||||
```
|
||||
|
||||
### Advanced configuration
|
||||
|
||||
The configuration allows you to override parameters for the [Estimator](https://sagemaker.readthedocs.io/en/stable/api/training/estimators.html).
|
||||
These settings have to be applied in the config file and are not part of `accelerate config`. You can control many additional aspects of the training job, e.g. use Spot instances, enable network isolation and many more.
|
||||
|
||||
```yaml
|
||||
additional_args:
|
||||
# enable network isolation to restrict internet access for containers
|
||||
enable_network_isolation: True
|
||||
```
|
||||
|
||||
You can find all available configuration [here](https://sagemaker.readthedocs.io/en/stable/api/training/estimators.html).
|
||||
|
||||
### Use Spot Instances
|
||||
|
||||
*undecided if feature is needed. Contact us if you would like this feature.*
|
||||
You can use Spot Instances e.g. using (see [Advanced configuration](#advanced-configuration)):
|
||||
```yaml
|
||||
additional_args:
|
||||
use_spot_instances: True
|
||||
max_wait: 86400
|
||||
```
|
||||
|
||||
*Note: Spot Instances are subject to be terminated and training to be continued from a checkpoint. This is not handled in 🤗 Accelerate out of the box. Contact us if you would like this feature.*
|
||||
|
||||
### Remote scripts: Use scripts located on Github
|
||||
|
||||
*undecided if feature is needed. Contact us if you would like this feature.*
|
||||
@ -8,6 +8,9 @@ http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Tracking
|
||||
@ -17,12 +20,15 @@ There are a large number of experiment tracking API's available, however getting
|
||||
|
||||
## Integrated Trackers
|
||||
|
||||
Currently `Accelerate` supports four trackers out-of-the-box:
|
||||
Currently `Accelerate` supports seven trackers out-of-the-box:
|
||||
|
||||
- TensorBoard
|
||||
- WandB
|
||||
- CometML
|
||||
- Aim
|
||||
- MLFlow
|
||||
- ClearML
|
||||
- DVCLive
|
||||
|
||||
To use any of them, pass in the selected type(s) to the `log_with` parameter in [`Accelerate`]:
|
||||
```python
|
||||
@ -83,6 +89,16 @@ for iteration in config["num_iterations"]:
|
||||
accelerator.end_training()
|
||||
```
|
||||
|
||||
If a tracker requires a directory to save data to, such as `TensorBoard`, then pass the directory path to `project_dir`. The `project_dir` parameter is useful
|
||||
when there are other configurations to be combined with in the [`~utils.ProjectConfiguration`] data class. For example, you can save the TensorBoard data to `project_dir` and everything else can be logged in the `logging_dir` parameter of [`~utils.ProjectConfiguration`:
|
||||
|
||||
```python
|
||||
accelerator = Accelerator(log_with="tensorboard", project_dir=".")
|
||||
|
||||
# use with ProjectConfiguration
|
||||
config = ProjectConfiguration(project_dir=".", logging_dir="another/directory")
|
||||
accelerator = Accelerator(log_with="tensorboard", project_config=config)
|
||||
```
|
||||
|
||||
## Implementing Custom Trackers
|
||||
|
||||
@ -105,9 +121,12 @@ Every tracker must implement three functions and have three properties:
|
||||
- This should be implemented as a `@property` function
|
||||
- Should return the internal tracking mechanism the library uses, such as the `run` object for `wandb`.
|
||||
|
||||
A brief example can be seen below with an integration with Weights and Biases, containing only the relevant information:
|
||||
Each method should also utilize the [`state.PartialState`] class if the logger should only be executed on the main process for instance.
|
||||
|
||||
A brief example can be seen below with an integration with Weights and Biases, containing only the relevant information and logging just on
|
||||
the main process:
|
||||
```python
|
||||
from accelerate.tracking import GeneralTracker
|
||||
from accelerate.tracking import GeneralTracker, on_main_process
|
||||
from typing import Optional
|
||||
|
||||
import wandb
|
||||
@ -117,6 +136,7 @@ class MyCustomTracker(GeneralTracker):
|
||||
name = "wandb"
|
||||
requires_logging_directory = False
|
||||
|
||||
@on_main_process
|
||||
def __init__(self, run_name: str):
|
||||
self.run_name = run_name
|
||||
run = wandb.init(self.run_name)
|
||||
@ -125,9 +145,11 @@ class MyCustomTracker(GeneralTracker):
|
||||
def tracker(self):
|
||||
return self.run.run
|
||||
|
||||
@on_main_process
|
||||
def store_init_configuration(self, values: dict):
|
||||
wandb.config(values)
|
||||
|
||||
@on_main_process
|
||||
def log(self, values: dict, step: Optional[int] = None):
|
||||
wandb.log(values, step=step)
|
||||
```
|
||||
@ -161,16 +183,26 @@ wandb_tracker = accelerator.get_tracker("wandb")
|
||||
|
||||
From there you can interact with `wandb`'s `run` object like normal:
|
||||
|
||||
<Tip warning={true}>
|
||||
Make sure to only interact with trackers on the main process!
|
||||
```python
|
||||
wandb_run.log_artifact(some_artifact_to_log)
|
||||
```
|
||||
|
||||
<Tip>
|
||||
Trackers built in Accelerate will automatically execute on the correct process,
|
||||
so if a tracker is only meant to be ran on the main process it will do so
|
||||
automatically.
|
||||
</Tip>
|
||||
|
||||
If you want to truly remove Accelerate's wrapping entirely, you can
|
||||
achieve the same outcome with:
|
||||
|
||||
```python
|
||||
if accelerator.is_main_process:
|
||||
wandb_run.log_artifact(some_artifact_to_log)
|
||||
wandb_tracker = accelerator.get_tracker("wandb", unwrap=True)
|
||||
with accelerator.on_main_process:
|
||||
wandb_tracker.log_artifact(some_artifact_to_log)
|
||||
```
|
||||
|
||||
|
||||
## When a wrapper cannot work
|
||||
|
||||
If a library has an API that does not follow a strict `.log` with an overall dictionary such as Neptune.AI, logging can be done manually under an `if accelerator.is_main_process` statement:
|
||||
180
docs/source/usage_guides/training_zoo.md
Normal file
180
docs/source/usage_guides/training_zoo.md
Normal file
@ -0,0 +1,180 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Example Zoo
|
||||
|
||||
Below contains a non-exhaustive list of tutorials and scripts showcasing 🤗 Accelerate
|
||||
|
||||
## Official Accelerate Examples:
|
||||
|
||||
### Basic Examples
|
||||
|
||||
These examples showcase the base features of Accelerate and are a great starting point
|
||||
|
||||
- [Barebones NLP example](https://github.com/huggingface/accelerate/blob/main/examples/nlp_example.py)
|
||||
- [Barebones distributed NLP example in a Jupyter Notebook](https://github.com/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_nlp_example.ipynb)
|
||||
- [Barebones computer vision example](https://github.com/huggingface/accelerate/blob/main/examples/cv_example.py)
|
||||
- [Barebones distributed computer vision example in a Jupyter Notebook](https://github.com/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_cv_example.ipynb)
|
||||
- [Using Accelerate in Kaggle](https://www.kaggle.com/code/muellerzr/multi-gpu-and-accelerate)
|
||||
|
||||
### Feature Specific Examples
|
||||
|
||||
These examples showcase specific features that the Accelerate framework offers
|
||||
|
||||
- [Automatic memory-aware gradient accumulation](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/automatic_gradient_accumulation.py)
|
||||
- [Checkpointing states](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/checkpointing.py)
|
||||
- [Cross validation](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/cross_validation.py)
|
||||
- [DeepSpeed](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/deepspeed_with_config_support.py)
|
||||
- [Fully Sharded Data Parallelism](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/fsdp_with_peak_mem_tracking.py)
|
||||
- [Gradient accumulation](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/gradient_accumulation.py)
|
||||
- [Memory-aware batch size finder](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/memory.py)
|
||||
- [Metric Computation](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/multi_process_metrics.py)
|
||||
- [Using Trackers](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/tracking.py)
|
||||
- [Using Megatron-LM](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/megatron_lm_gpt_pretraining.py)
|
||||
|
||||
### Full Examples
|
||||
|
||||
These examples showcase every feature in Accelerate at once that was shown in "Feature Specific Examples"
|
||||
|
||||
- [Complete NLP example](https://github.com/huggingface/accelerate/blob/main/examples/complete_nlp_example.py)
|
||||
- [Complete computer vision example](https://github.com/huggingface/accelerate/blob/main/examples/complete_cv_example.py)
|
||||
- [Very complete and extensible vision example showcasing SLURM, hydra, and a very extensible usage of the framework](https://github.com/yuvalkirstain/PickScore)
|
||||
- [Causal language model fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_clm_no_trainer.py)
|
||||
- [Masked language model fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm_no_trainer.py)
|
||||
- [Speech pretraining example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/speech-pretraining/run_wav2vec2_pretraining_no_trainer.py)
|
||||
- [Translation fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/translation/run_translation_no_trainer.py)
|
||||
- [Text classification fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue_no_trainer.py)
|
||||
- [Semantic segmentation fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py)
|
||||
- [Question answering fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/question-answering/run_qa_no_trainer.py)
|
||||
- [Beam search question answering fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py)
|
||||
- [Multiple choice question answering fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/multiple-choice/run_swag_no_trainer.py)
|
||||
- [Named entity recognition fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/token-classification/run_ner_no_trainer.py)
|
||||
- [Image classification fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/image-classification/run_image_classification_no_trainer.py)
|
||||
- [Summarization fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/summarization/run_summarization_no_trainer.py)
|
||||
- [End-to-end examples on how to use AWS SageMaker integration of Accelerate](https://github.com/huggingface/notebooks/blob/main/sagemaker/22_accelerate_sagemaker_examples/README.md)
|
||||
- [Megatron-LM examples for various NLp tasks](https://github.com/pacman100/accelerate-megatron-test)
|
||||
|
||||
## Integration Examples
|
||||
|
||||
These are tutorials from libraries that integrate with 🤗 Accelerate:
|
||||
|
||||
> Don't find your integration here? Make a PR to include it!
|
||||
|
||||
### Amphion
|
||||
- [Training Text-to-Speech Models with Amphion](https://github.com/open-mmlab/Amphion/blob/main/egs/tts/README.md)
|
||||
- [Training Singing Voice Conversion Models with Amphion](https://github.com/open-mmlab/Amphion/blob/main/egs/svc/README.md)
|
||||
- [Training Vocoders with Amphion](https://github.com/open-mmlab/Amphion/blob/main/egs/vocoder/README.md)
|
||||
|
||||
### Catalyst
|
||||
|
||||
- [Distributed training tutorial with Catalyst](https://catalyst-team.github.io/catalyst/tutorials/ddp.html)
|
||||
|
||||
### DALLE2-pytorch
|
||||
|
||||
- [Fine-tuning DALLE2](https://github.com/lucidrains/DALLE2-pytorch#usage)
|
||||
|
||||
### 🤗 diffusers
|
||||
|
||||
- [Performing textual inversion with diffusers](https://github.com/huggingface/diffusers/tree/main/examples/textual_inversion)
|
||||
- [Training DreamBooth with diffusers](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth)
|
||||
|
||||
### fastai
|
||||
|
||||
- [Distributed training from Jupyter Notebooks with fastai](https://docs.fast.ai/tutorial.distributed.html)
|
||||
- [Basic distributed training examples with fastai](https://docs.fast.ai/examples/distributed_app_examples.html)
|
||||
|
||||
### GradsFlow
|
||||
|
||||
- [Auto Image Classification with GradsFlow](https://docs.gradsflow.com/en/latest/examples/nbs/01-ImageClassification/)
|
||||
|
||||
### imagen-pytorch
|
||||
|
||||
- [Fine-tuning Imagen](https://github.com/lucidrains/imagen-pytorch#usage)
|
||||
|
||||
### Kornia
|
||||
|
||||
- [Fine-tuning vision models with Kornia's Trainer](https://kornia.readthedocs.io/en/latest/get-started/training.html)
|
||||
|
||||
### PyTorch Accelerated
|
||||
|
||||
- [Quickstart distributed training tutorial with PyTorch Accelerated](https://pytorch-accelerated.readthedocs.io/en/latest/quickstart.html)
|
||||
|
||||
### PyTorch3D
|
||||
|
||||
- [Perform Deep Learning with 3D data](https://pytorch3d.org/tutorials/)
|
||||
|
||||
### Stable-Dreamfusion
|
||||
|
||||
- [Training with Stable-Dreamfusion to convert text to a 3D model](https://colab.research.google.com/drive/1MXT3yfOFvO0ooKEfiUUvTKwUkrrlCHpF?usp=sharing)
|
||||
|
||||
### Tez
|
||||
|
||||
- [Leaf disease detection with Tez and Accelerate](https://www.kaggle.com/code/abhishek/tez-faster-and-easier-training-for-leaf-detection/notebook)
|
||||
|
||||
### trlx
|
||||
|
||||
- [How to implement a sentiment learning task with trlx](https://github.com/CarperAI/trlx#example-how-to-add-a-task)
|
||||
|
||||
### Comfy-UI
|
||||
|
||||
- [Enabling using large Stable Diffusion Models in low-vram settings using Accelerate](https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/model_management.py#L291-L296)
|
||||
|
||||
|
||||
## In Science
|
||||
|
||||
Below contains a non-exhaustive list of papers utilizing 🤗 Accelerate.
|
||||
|
||||
> Don't find your paper here? Make a PR to include it!
|
||||
|
||||
* Yuval Kirstain, Adam Polyak, Uriel Singer, Shahbuland Matiana, Joe Penna, Omer Levy: “Pick-a-Pic: An Open Dataset of User Preferences for Text-to-Image Generation”, 2023; [arXiv:2305.01569](http://arxiv.org/abs/2305.01569).
|
||||
* Lei Wang, Wanyu Xu, Yihuai Lan, Zhiqiang Hu, Yunshi Lan, Roy Ka-Wei Lee, Ee-Peng Lim: “Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models”, 2023; [arXiv:2305.04091](http://arxiv.org/abs/2305.04091).
|
||||
* Arthur Câmara, Claudia Hauff: “Moving Stuff Around: A study on efficiency of moving documents into memory for Neural IR models”, 2022; [arXiv:2205.08343](http://arxiv.org/abs/2205.08343).
|
||||
* Ying Sheng, Lianmin Zheng, Binhang Yuan, Zhuohan Li, Max Ryabinin, Daniel Y. Fu, Zhiqiang Xie, Beidi Chen, Clark Barrett, Joseph E. Gonzalez, Percy Liang, Christopher Ré, Ion Stoica, Ce Zhang: “High-throughput Generative Inference of Large Language Models with a Single GPU”, 2023; [arXiv:2303.06865](http://arxiv.org/abs/2303.06865).
|
||||
* Peter Melchior, Yan Liang, ChangHoon Hahn, Andy Goulding: “Autoencoding Galaxy Spectra I: Architecture”, 2022; [arXiv:2211.07890](http://arxiv.org/abs/2211.07890).
|
||||
* Jiaao Chen, Aston Zhang, Mu Li, Alex Smola, Diyi Yang: “A Cheaper and Better Diffusion Language Model with Soft-Masked Noise”, 2023; [arXiv:2304.04746](http://arxiv.org/abs/2304.04746).
|
||||
* Ayaan Haque, Matthew Tancik, Alexei A. Efros, Aleksander Holynski, Angjoo Kanazawa: “Instruct-NeRF2NeRF: Editing 3D Scenes with Instructions”, 2023; [arXiv:2303.12789](http://arxiv.org/abs/2303.12789).
|
||||
* Luke Melas-Kyriazi, Christian Rupprecht, Iro Laina, Andrea Vedaldi: “RealFusion: 360° Reconstruction of Any Object from a Single Image”, 2023; [arXiv:2302.10663](http://arxiv.org/abs/2302.10663).
|
||||
* Xiaoshi Wu, Keqiang Sun, Feng Zhu, Rui Zhao, Hongsheng Li: “Better Aligning Text-to-Image Models with Human Preference”, 2023; [arXiv:2303.14420](http://arxiv.org/abs/2303.14420).
|
||||
* Yongliang Shen, Kaitao Song, Xu Tan, Dongsheng Li, Weiming Lu, Yueting Zhuang: “HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in HuggingFace”, 2023; [arXiv:2303.17580](http://arxiv.org/abs/2303.17580).
|
||||
* Yue Yang, Wenlin Yao, Hongming Zhang, Xiaoyang Wang, Dong Yu, Jianshu Chen: “Z-LaVI: Zero-Shot Language Solver Fueled by Visual Imagination”, 2022; [arXiv:2210.12261](http://arxiv.org/abs/2210.12261).
|
||||
* Sheng-Yen Chou, Pin-Yu Chen, Tsung-Yi Ho: “How to Backdoor Diffusion Models?”, 2022; [arXiv:2212.05400](http://arxiv.org/abs/2212.05400).
|
||||
* Junyoung Seo, Wooseok Jang, Min-Seop Kwak, Jaehoon Ko, Hyeonsu Kim, Junho Kim, Jin-Hwa Kim, Jiyoung Lee, Seungryong Kim: “Let 2D Diffusion Model Know 3D-Consistency for Robust Text-to-3D Generation”, 2023; [arXiv:2303.07937](http://arxiv.org/abs/2303.07937).
|
||||
* Or Patashnik, Daniel Garibi, Idan Azuri, Hadar Averbuch-Elor, Daniel Cohen-Or: “Localizing Object-level Shape Variations with Text-to-Image Diffusion Models”, 2023; [arXiv:2303.11306](http://arxiv.org/abs/2303.11306).
|
||||
* Dídac Surís, Sachit Menon, Carl Vondrick: “ViperGPT: Visual Inference via Python Execution for Reasoning”, 2023; [arXiv:2303.08128](http://arxiv.org/abs/2303.08128).
|
||||
* Chenyang Qi, Xiaodong Cun, Yong Zhang, Chenyang Lei, Xintao Wang, Ying Shan, Qifeng Chen: “FateZero: Fusing Attentions for Zero-shot Text-based Video Editing”, 2023; [arXiv:2303.09535](http://arxiv.org/abs/2303.09535).
|
||||
* Sean Welleck, Jiacheng Liu, Ximing Lu, Hannaneh Hajishirzi, Yejin Choi: “NaturalProver: Grounded Mathematical Proof Generation with Language Models”, 2022; [arXiv:2205.12910](http://arxiv.org/abs/2205.12910).
|
||||
* Elad Richardson, Gal Metzer, Yuval Alaluf, Raja Giryes, Daniel Cohen-Or: “TEXTure: Text-Guided Texturing of 3D Shapes”, 2023; [arXiv:2302.01721](http://arxiv.org/abs/2302.01721).
|
||||
* Puijin Cheng, Li Lin, Yijin Huang, Huaqing He, Wenhan Luo, Xiaoying Tang: “Learning Enhancement From Degradation: A Diffusion Model For Fundus Image Enhancement”, 2023; [arXiv:2303.04603](http://arxiv.org/abs/2303.04603).
|
||||
* Shun Shao, Yftah Ziser, Shay Cohen: “Erasure of Unaligned Attributes from Neural Representations”, 2023; [arXiv:2302.02997](http://arxiv.org/abs/2302.02997).
|
||||
* Seonghyeon Ye, Hyeonbin Hwang, Sohee Yang, Hyeongu Yun, Yireun Kim, Minjoon Seo: “In-Context Instruction Learning”, 2023; [arXiv:2302.14691](http://arxiv.org/abs/2302.14691).
|
||||
* Shikun Liu, Linxi Fan, Edward Johns, Zhiding Yu, Chaowei Xiao, Anima Anandkumar: “Prismer: A Vision-Language Model with An Ensemble of Experts”, 2023; [arXiv:2303.02506](http://arxiv.org/abs/2303.02506).
|
||||
* Haoyu Chen, Zhihua Wang, Yang Yang, Qilin Sun, Kede Ma: “Learning a Deep Color Difference Metric for Photographic Images”, 2023; [arXiv:2303.14964](http://arxiv.org/abs/2303.14964).
|
||||
* Van-Hoang Le, Hongyu Zhang: “Log Parsing with Prompt-based Few-shot Learning”, 2023; [arXiv:2302.07435](http://arxiv.org/abs/2302.07435).
|
||||
* Keito Kudo, Yoichi Aoki, Tatsuki Kuribayashi, Ana Brassard, Masashi Yoshikawa, Keisuke Sakaguchi, Kentaro Inui: “Do Deep Neural Networks Capture Compositionality in Arithmetic Reasoning?”, 2023; [arXiv:2302.07866](http://arxiv.org/abs/2302.07866).
|
||||
* Ruoyao Wang, Peter Jansen, Marc-Alexandre Côté, Prithviraj Ammanabrolu: “Behavior Cloned Transformers are Neurosymbolic Reasoners”, 2022; [arXiv:2210.07382](http://arxiv.org/abs/2210.07382).
|
||||
* Martin Wessel, Tomáš Horych, Terry Ruas, Akiko Aizawa, Bela Gipp, Timo Spinde: “Introducing MBIB -- the first Media Bias Identification Benchmark Task and Dataset Collection”, 2023; [arXiv:2304.13148](http://arxiv.org/abs/2304.13148). DOI: [https://dx.doi.org/10.1145/3539618.3591882 10.1145/3539618.3591882].
|
||||
* Hila Chefer, Yuval Alaluf, Yael Vinker, Lior Wolf, Daniel Cohen-Or: “Attend-and-Excite: Attention-Based Semantic Guidance for Text-to-Image Diffusion Models”, 2023; [arXiv:2301.13826](http://arxiv.org/abs/2301.13826).
|
||||
* Marcio Fonseca, Yftah Ziser, Shay B. Cohen: “Factorizing Content and Budget Decisions in Abstractive Summarization of Long Documents”, 2022; [arXiv:2205.12486](http://arxiv.org/abs/2205.12486).
|
||||
* Elad Richardson, Gal Metzer, Yuval Alaluf, Raja Giryes, Daniel Cohen-Or: “TEXTure: Text-Guided Texturing of 3D Shapes”, 2023; [arXiv:2302.01721](http://arxiv.org/abs/2302.01721).
|
||||
* Tianxing He, Jingyu Zhang, Tianle Wang, Sachin Kumar, Kyunghyun Cho, James Glass, Yulia Tsvetkov: “On the Blind Spots of Model-Based Evaluation Metrics for Text Generation”, 2022; [arXiv:2212.10020](http://arxiv.org/abs/2212.10020).
|
||||
* Ori Ram, Yoav Levine, Itay Dalmedigos, Dor Muhlgay, Amnon Shashua, Kevin Leyton-Brown, Yoav Shoham: “In-Context Retrieval-Augmented Language Models”, 2023; [arXiv:2302.00083](http://arxiv.org/abs/2302.00083).
|
||||
* Dacheng Li, Rulin Shao, Hongyi Wang, Han Guo, Eric P. Xing, Hao Zhang: “MPCFormer: fast, performant and private Transformer inference with MPC”, 2022; [arXiv:2211.01452](http://arxiv.org/abs/2211.01452).
|
||||
* Baolin Peng, Michel Galley, Pengcheng He, Chris Brockett, Lars Liden, Elnaz Nouri, Zhou Yu, Bill Dolan, Jianfeng Gao: “GODEL: Large-Scale Pre-Training for Goal-Directed Dialog”, 2022; [arXiv:2206.11309](http://arxiv.org/abs/2206.11309).
|
||||
* Egil Rønningstad, Erik Velldal, Lilja Øvrelid: “Entity-Level Sentiment Analysis (ELSA): An exploratory task survey”, 2023, Proceedings of the 29th International Conference on Computational Linguistics, 2022, pages 6773-6783; [arXiv:2304.14241](http://arxiv.org/abs/2304.14241).
|
||||
* Charlie Snell, Ilya Kostrikov, Yi Su, Mengjiao Yang, Sergey Levine: “Offline RL for Natural Language Generation with Implicit Language Q Learning”, 2022; [arXiv:2206.11871](http://arxiv.org/abs/2206.11871).
|
||||
* Zhiruo Wang, Shuyan Zhou, Daniel Fried, Graham Neubig: “Execution-Based Evaluation for Open-Domain Code Generation”, 2022; [arXiv:2212.10481](http://arxiv.org/abs/2212.10481).
|
||||
* Minh-Long Luu, Zeyi Huang, Eric P. Xing, Yong Jae Lee, Haohan Wang: “Expeditious Saliency-guided Mix-up through Random Gradient Thresholding”, 2022; [arXiv:2212.04875](http://arxiv.org/abs/2212.04875).
|
||||
* Jun Hao Liew, Hanshu Yan, Daquan Zhou, Jiashi Feng: “MagicMix: Semantic Mixing with Diffusion Models”, 2022; [arXiv:2210.16056](http://arxiv.org/abs/2210.16056).
|
||||
* Yaqing Wang, Subhabrata Mukherjee, Xiaodong Liu, Jing Gao, Ahmed Hassan Awadallah, Jianfeng Gao: “LiST: Lite Prompted Self-training Makes Parameter-Efficient Few-shot Learners”, 2021; [arXiv:2110.06274](http://arxiv.org/abs/2110.06274).
|
||||
@ -1,117 +0,0 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Example Zoo
|
||||
|
||||
Below contains a non-exhuastive list of tutorials and scripts showcasing Accelerate
|
||||
|
||||
## Official Accelerate Examples:
|
||||
|
||||
### Basic Examples
|
||||
|
||||
These examples showcase the base features of Accelerate and are a great starting point
|
||||
|
||||
- [Barebones NLP example](https://github.com/huggingface/accelerate/blob/main/examples/nlp_example.py)
|
||||
- [Barebones distributed NLP example in a Jupyter Notebook](https://github.com/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_nlp_example.ipynb)
|
||||
- [Barebones computer vision example](https://github.com/huggingface/accelerate/blob/main/examples/cv_example.py)
|
||||
- [Barebones distributed computer vision example in a Jupyter Notebook](https://github.com/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_cv_example.ipynb)
|
||||
- [Using Accelerate in Kaggle](https://www.kaggle.com/code/muellerzr/multi-gpu-and-accelerate)
|
||||
|
||||
### Feature Specific Examples
|
||||
|
||||
These examples showcase specific features that the Accelerate framework offers
|
||||
|
||||
- [Automatic memory-aware gradient accumulation](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/automatic_gradient_accumulation.py)
|
||||
- [Checkpointing states](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/checkpointing.py)
|
||||
- [Cross validation](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/cross_validation.py)
|
||||
- [DeepSpeed](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/deepspeed_with_config_support.py)
|
||||
- [Fully Sharded Data Parallelism](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/fsdp_with_peak_mem_tracking.py)
|
||||
- [Gradient accumulation](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/gradient_accumulation.py)
|
||||
- [Memory-aware batch size finder](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/memory.py)
|
||||
- [Metric Computation](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/multi_process_metrics.py)
|
||||
- [Using Trackers](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/tracking.py)
|
||||
- [Using Megatron-LM](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/megatron_lm_gpt_pretraining.py)
|
||||
|
||||
### Full Examples
|
||||
|
||||
These examples showcase every feature in Accelerate at once that was shown in "Feature Specific Examples"
|
||||
|
||||
- [Complete NLP example](https://github.com/huggingface/accelerate/blob/main/examples/complete_nlp_example.py)
|
||||
- [Complete computer vision example](https://github.com/huggingface/accelerate/blob/main/examples/complete_cv_example.py)
|
||||
- [Causal language model fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_clm_no_trainer.py)
|
||||
- [Masked language model fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm_no_trainer.py)
|
||||
- [Speech pretraining example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/speech-pretraining/run_wav2vec2_pretraining_no_trainer.py)
|
||||
- [Translation fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/translation/run_translation_no_trainer.py)
|
||||
- [Text classification fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue_no_trainer.py)
|
||||
- [Semantic segmentation fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py)
|
||||
- [Question answering fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/question-answering/run_qa_no_trainer.py)
|
||||
- [Beam search question answering fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py)
|
||||
- [Multiple choice question answering fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/multiple-choice/run_swag_no_trainer.py)
|
||||
- [Named entity recognition fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/token-classification/run_ner_no_trainer.py)
|
||||
- [Image classification fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/image-classification/run_image_classification_no_trainer.py)
|
||||
- [Summarization fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/summarization/run_summarization_no_trainer.py)
|
||||
- [End-to-end examples on how to use AWS SageMaker integration of Accelerate](https://github.com/huggingface/notebooks/blob/main/sagemaker/22_accelerate_sagemaker_examples/README.md)
|
||||
- [Megatron-LM examples for various NLp tasks](https://github.com/pacman100/accelerate-megatron-test)
|
||||
|
||||
## Integration Examples
|
||||
|
||||
These are tutorials from libraries that integrate with 🤗 Accelerate:
|
||||
|
||||
### Catalyst
|
||||
|
||||
- [Distributed training tutorial with Catalyst](https://catalyst-team.github.io/catalyst/tutorials/ddp.html)
|
||||
|
||||
### DALLE2-pytorch
|
||||
|
||||
- [Fine-tuning DALLE2](https://github.com/lucidrains/DALLE2-pytorch#usage)
|
||||
|
||||
### 🤗 diffusers
|
||||
|
||||
- [Performing textual inversion with diffusers](https://github.com/huggingface/diffusers/tree/main/examples/textual_inversion)
|
||||
- [Training DreamBooth with diffusers](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth)
|
||||
|
||||
### fastai
|
||||
|
||||
- [Distributed training from Jupyter Notebooks with fastai](https://docs.fast.ai/tutorial.distributed.html)
|
||||
- [Basic distributed training examples with fastai](https://docs.fast.ai/examples/distributed_app_examples.html)
|
||||
|
||||
### GradsFlow
|
||||
|
||||
- [Auto Image Classification with GradsFlow](https://docs.gradsflow.com/en/latest/examples/nbs/01-ImageClassification/)
|
||||
|
||||
### imagen-pytorch
|
||||
|
||||
- [Fine-tuning Imagen](https://github.com/lucidrains/imagen-pytorch#usage)
|
||||
|
||||
### Kornia
|
||||
|
||||
- [Fine-tuning vision models with Kornia's Trainer](https://kornia.readthedocs.io/en/latest/get-started/training.html)
|
||||
|
||||
### PyTorch Accelerated
|
||||
|
||||
- [Quickstart distributed training tutorial with PyTorch Accelerated](https://pytorch-accelerated.readthedocs.io/en/latest/quickstart.html)
|
||||
|
||||
### PyTorch3D
|
||||
|
||||
- [Perform Deep Learning with 3D data](https://pytorch3d.org/tutorials/)
|
||||
|
||||
### Stable-Dreamfusion
|
||||
|
||||
- [Training with Stable-Dreamfusion to convert text to a 3D model](https://colab.research.google.com/drive/1MXT3yfOFvO0ooKEfiUUvTKwUkrrlCHpF?usp=sharing)
|
||||
|
||||
### Tez
|
||||
|
||||
- [Leaf disease detection with Tez and Accelerate](https://www.kaggle.com/code/abhishek/tez-faster-and-easier-training-for-leaf-detection/notebook)
|
||||
|
||||
### trlx
|
||||
|
||||
- [How to implement a sentiment learning task with trlx](https://github.com/CarperAI/trlx#example-how-to-add-a-task)
|
||||
@ -51,22 +51,22 @@ To run it in each of these various modes, use the following commands:
|
||||
python ./nlp_example.py # from a server with a GPU
|
||||
```
|
||||
- with fp16 (mixed-precision)
|
||||
* from any server by passing `fp16=True` to the `Accelerator`.
|
||||
* from any server by passing `mixed_precison=fp16` to the `Accelerator`.
|
||||
```bash
|
||||
python ./nlp_example.py --fp16
|
||||
python ./nlp_example.py --mixed_precision fp16
|
||||
```
|
||||
* from any server with Accelerate launcher
|
||||
```bash
|
||||
accelerate launch --fp16 ./nlp_example.py
|
||||
accelerate launch --mixed_precision fp16 ./nlp_example.py
|
||||
- multi GPUs (using PyTorch distributed mode)
|
||||
* With Accelerate config and launcher
|
||||
```bash
|
||||
accelerate config # This will create a config file on your server
|
||||
accelerate launch ./nlp_example.py # This will run the script on your server
|
||||
```
|
||||
* With traditional PyTorch launcher
|
||||
* With traditional PyTorch launcher (`python -m torch.distributed.run` can be used instead of `torchrun`)
|
||||
```bash
|
||||
python -m torch.distributed.launch --nproc_per_node 2 --use_env ./nlp_example.py
|
||||
torchrun --nproc_per_node 2 ./nlp_example.py
|
||||
```
|
||||
- multi GPUs, multi node (several machines, using PyTorch distributed mode)
|
||||
* With Accelerate config and launcher, on each machine:
|
||||
@ -74,18 +74,15 @@ To run it in each of these various modes, use the following commands:
|
||||
accelerate config # This will create a config file on each server
|
||||
accelerate launch ./nlp_example.py # This will run the script on each server
|
||||
```
|
||||
* With PyTorch launcher only
|
||||
* With PyTorch launcher only (`python -m torch.distributed.run` can be used instead of `torchrun`). Run this command on each node:
|
||||
```bash
|
||||
python -m torch.distributed.launch --nproc_per_node 2 \
|
||||
--use_env \
|
||||
--node_rank 0 \
|
||||
--master_addr master_node_ip_address \
|
||||
./nlp_example.py # On the first server
|
||||
python -m torch.distributed.launch --nproc_per_node 2 \
|
||||
--use_env \
|
||||
--node_rank 1 \
|
||||
--master_addr master_node_ip_address \
|
||||
./nlp_example.py # On the second server
|
||||
torchrun \ # python -m torch.distributed.run
|
||||
--nproc_per_node 2 \
|
||||
--nnodes 2 \
|
||||
--rdzv_id 2299 \ # A unique job id
|
||||
--rdzv_backend c10d \
|
||||
--rdzv_endpoint master_node_ip_address:29500 \
|
||||
./nlp_example.py
|
||||
```
|
||||
- (multi) TPUs
|
||||
* With Accelerate config and launcher
|
||||
@ -139,47 +136,44 @@ To run it in each of these various modes, use the following commands:
|
||||
python ./cv_example.py # from a server with a GPU
|
||||
```
|
||||
- with fp16 (mixed-precision)
|
||||
* from any server by passing `fp16=True` to the `Accelerator`.
|
||||
* from any server by passing `mixed_precison=fp16` to the `Accelerator`.
|
||||
```bash
|
||||
python ./cv_example.py --data_dir path_to_data --fp16
|
||||
python ./cv_example.py --data_dir path_to_data --mixed_precison fp16
|
||||
```
|
||||
* from any server with Accelerate launcher
|
||||
```bash
|
||||
accelerate launch --fp16 ./cv_example.py --data_dir path_to_data
|
||||
accelerate launch --mixed_precison fp16 ./cv_example.py --data_dir path_to_data
|
||||
- multi GPUs (using PyTorch distributed mode)
|
||||
* With Accelerate config and launcher
|
||||
```bash
|
||||
accelerate config # This will create a config file on your server
|
||||
accelerate launch ./cv_example.py --data_dir path_to_data # This will run the script on your server
|
||||
accelerate config --config_file config.yaml # This will create a config file on your server to `config.yaml`
|
||||
accelerate launch --config_file config.yaml ./cv_example.py --data_dir path_to_data # This will run the script on your server
|
||||
```
|
||||
* With traditional PyTorch launcher
|
||||
* With traditional PyTorch launcher (`python -m torch.distributed.run` can be used instead of `torchrun`)
|
||||
```bash
|
||||
python -m torch.distributed.launch --nproc_per_node 2 --use_env ./cv_example.py --data_dir path_to_data
|
||||
torchrun --nproc_per_node 2 ./cv_example.py --data_dir path_to_data
|
||||
```
|
||||
- multi GPUs, multi node (several machines, using PyTorch distributed mode)
|
||||
* With Accelerate config and launcher, on each machine:
|
||||
```bash
|
||||
accelerate config # This will create a config file on each server
|
||||
accelerate launch ./cv_example.py --data_dir path_to_data # This will run the script on each server
|
||||
accelerate config --config_file config.yaml # This will create a config file on your server to `config.yaml`
|
||||
accelerate launch --config_file config.yaml ./cv_example.py --data_dir path_to_data # This will run the script on each server
|
||||
```
|
||||
* With PyTorch launcher only
|
||||
* With PyTorch launcher only (`python -m torch.distributed.run` can be used instead of `torchrun`). Run this command on each node:
|
||||
```bash
|
||||
python -m torch.distributed.launch --nproc_per_node 2 \
|
||||
--use_env \
|
||||
--node_rank 0 \
|
||||
--master_addr master_node_ip_address \
|
||||
./cv_example.py --data_dir path_to_data # On the first server
|
||||
python -m torch.distributed.launch --nproc_per_node 2 \
|
||||
--use_env \
|
||||
--node_rank 1 \
|
||||
--master_addr master_node_ip_address \
|
||||
./cv_example.py --data_dir path_to_data # On the second server
|
||||
torchrun \ # python -m torch.distributed.run
|
||||
--nproc_per_node 2 \
|
||||
--nnodes 2 \
|
||||
--rdzv_id 2299 \ # A unique job id
|
||||
--rdzv_backend c10d \
|
||||
--rdzv_endpoint master_node_ip_address:29500 \
|
||||
./cv_example.py --data_dir path_to_data
|
||||
```
|
||||
- (multi) TPUs
|
||||
* With Accelerate config and launcher
|
||||
```bash
|
||||
accelerate config # This will create a config file on your TPU server
|
||||
accelerate launch ./cv_example.py --data_dir path_to_data # This will run the script on each server
|
||||
accelerate config --config_file config.yaml # This will create a config file on your server to `config.yaml`
|
||||
accelerate launch --config_file config.yaml ./cv_example.py --data_dir path_to_data # This will run the script on each server
|
||||
```
|
||||
* In PyTorch:
|
||||
Add an `xmp.spawn` line in your script as you usually do.
|
||||
@ -190,7 +184,29 @@ To run it in each of these various modes, use the following commands:
|
||||
|
||||
### Using AWS SageMaker integration
|
||||
- [Examples showcasing AWS SageMaker integration of 🤗 Accelerate.](https://github.com/pacman100/accelerate-aws-sagemaker)
|
||||
|
||||
|
||||
|
||||
## Simple Multi-GPU Hardware Launcher
|
||||
|
||||
[multigpu_remote_launcher.py](./multigpu_remote_launcher.py) is a minimal script that demonstrates launching accelerate
|
||||
on multiple remote GPUs, and with automatic hardware environment and dependency setup for reproducibility. You can
|
||||
easily customize the training function used, training arguments, hyperparameters, and type of compute hardware, and then
|
||||
run the script to automatically launch multi GPU training on remote hardware.
|
||||
|
||||
This script uses [Runhouse](https://github.com/run-house/runhouse) to launch on self-hosted hardware (e.g. in your own
|
||||
cloud account or on-premise cluster) but there are other options for running remotely as well. Runhouse can be installed
|
||||
with `pip install runhouse`, and you can refer to
|
||||
[hardware setup](https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup)
|
||||
for hardware setup instructions, or this
|
||||
[Colab tutorial](https://colab.research.google.com/drive/1qVwYyLTCPYPSdz9ZX7BZl9Qm0A3j7RJe) for a more in-depth walkthrough.
|
||||
|
||||
## SLURM Scripts
|
||||
In [/slurm/submit_multigpu.sh](./slurm/submit_multigpu.sh) and [/slurm/submit_multinode.sh](./slurm/submit_multinode.sh) we present two scripts for running the examples on a machine with [SLURM](https://slurm.schedmd.com/documentation.html) workload manager.
|
||||
|
||||
In [/slurm/submit_multigpu.sh](./slurm/submit_multigpu.sh) the only parameter in the launcher that needs to be modified is `--num_processes`, which determines the number of GPUs we will use. In this case, using the environment variable `$SLURM_GPUS`, we indicate that we want to utilize all the GPUs available on the node we have requested.
|
||||
|
||||
In [/slurm/submit_multinode.sh](./slurm/submit_multinode.sh) we must specify the number of nodes that will be part of the training (`--num_machines`), how many GPUs we will use in total (`--num_processes`), the [`backend`](https://pytorch.org/docs/stable/elastic/run.html#note-on-rendezvous-backend), `--main_process_ip` which will be the address the master node and the `--main_process_port`.
|
||||
|
||||
## Finer Examples
|
||||
|
||||
While the first two scripts are extremely barebones when it comes to what you can do with accelerate, more advanced features are documented in two other locations.
|
||||
|
||||
@ -19,7 +19,7 @@ Adjustments to each script from the base `nlp_example.py` file can be found quic
|
||||
|
||||
All following scripts also accept these arguments in addition to their added ones.
|
||||
|
||||
These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torch.distributed.launch`), such as:
|
||||
These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torch.distributed.run`), such as:
|
||||
|
||||
```bash
|
||||
accelerate launch ../nlp_example.py --mixed_precision fp16 --cpu 0
|
||||
@ -34,7 +34,7 @@ accelerate launch ../nlp_example.py --mixed_precision fp16 --cpu 0
|
||||
- `output_dir`, where saved state folders should be saved to, default is current working directory
|
||||
- `resume_from_checkpoint`, what checkpoint folder to resume from. ("epoch_0", "step_22", ...)
|
||||
|
||||
These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torch.distributed.launch`), such as:
|
||||
These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torchrun`), such as:
|
||||
|
||||
(Note, `resume_from_checkpoint` assumes that we've ran the script for one epoch with the `--checkpointing_steps epoch` flag)
|
||||
|
||||
@ -48,7 +48,7 @@ accelerate launch ./checkpointing.py --checkpointing_steps epoch output_dir "che
|
||||
- Arguments available:
|
||||
- `num_folds`, the number of folds the training dataset should be split into.
|
||||
|
||||
These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torch.distributed.launch`), such as:
|
||||
These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torchrun`), such as:
|
||||
|
||||
```bash
|
||||
accelerate launch ./cross_validation.py --num_folds 2
|
||||
@ -61,7 +61,7 @@ accelerate launch ./cross_validation.py --num_folds 2
|
||||
- Arguments available:
|
||||
- `with_tracking`, whether to load in all available experiment trackers from the environment.
|
||||
|
||||
These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torch.distributed.launch`), such as:
|
||||
These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torchrun`), such as:
|
||||
|
||||
```bash
|
||||
accelerate launch ./tracking.py --with_tracking
|
||||
@ -73,8 +73,19 @@ accelerate launch ./tracking.py --with_tracking
|
||||
- Arguments available:
|
||||
- `gradient_accumulation_steps`, the number of steps to perform before the gradients are accumulated and the optimizer and scheduler are stepped + zero_grad
|
||||
|
||||
These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torch.distributed.launch`), such as:
|
||||
These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torchrun`), such as:
|
||||
|
||||
```bash
|
||||
accelerate launch ./gradient_accumulation.py --gradient_accumulation_steps 5
|
||||
```
|
||||
```
|
||||
|
||||
### LocalSGD (`local_sgd.py`)
|
||||
- Shows how to use `Accelerator.no_sync` to prevent gradient averaging in a distributed setup. However, unlike gradient accumulation, this method does not change the effective batch size. Local SGD can be combined with gradient accumulation.
|
||||
|
||||
These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torchrun`), such as:
|
||||
|
||||
```bash
|
||||
accelerate launch ./local_sgd.py --local_sgd_steps 4
|
||||
```
|
||||
|
||||
|
||||
|
||||
@ -14,17 +14,17 @@
|
||||
import argparse
|
||||
import os
|
||||
|
||||
import torch
|
||||
from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
# New Code #
|
||||
import evaluate
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from accelerate.utils import find_executable_batch_size
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
from accelerate import Accelerator
|
||||
from accelerate.utils import find_executable_batch_size
|
||||
|
||||
|
||||
########################################################################
|
||||
# This is a fully working simple example to use Accelerate,
|
||||
@ -84,10 +84,20 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
if accelerator.distributed_type == DistributedType.TPU:
|
||||
return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
|
||||
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
elif accelerator.mixed_precision != "no":
|
||||
pad_to_multiple_of = 8
|
||||
else:
|
||||
pad_to_multiple_of = None
|
||||
|
||||
return tokenizer.pad(
|
||||
examples,
|
||||
padding="longest",
|
||||
pad_to_multiple_of=pad_to_multiple_of,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
# Instantiate dataloaders.
|
||||
train_dataloader = DataLoader(
|
||||
@ -214,8 +224,8 @@ def main():
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
default="no",
|
||||
choices=["no", "fp16", "bf16"],
|
||||
default=None,
|
||||
choices=["no", "fp16", "bf16", "fp8"],
|
||||
help="Whether to use mixed precision. Choose"
|
||||
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
||||
"and an Nvidia Ampere GPU.",
|
||||
|
||||
@ -15,15 +15,15 @@
|
||||
import argparse
|
||||
import os
|
||||
|
||||
import evaluate
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
import evaluate
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from datasets import load_dataset
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
from accelerate import Accelerator, DistributedType
|
||||
|
||||
|
||||
########################################################################
|
||||
# This is a fully working simple example to use Accelerate,
|
||||
@ -86,9 +86,22 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
if accelerator.distributed_type == DistributedType.TPU:
|
||||
return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
|
||||
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
elif accelerator.mixed_precision != "no":
|
||||
pad_to_multiple_of = 8
|
||||
else:
|
||||
pad_to_multiple_of = None
|
||||
|
||||
return tokenizer.pad(
|
||||
examples,
|
||||
padding="longest",
|
||||
max_length=max_length,
|
||||
pad_to_multiple_of=pad_to_multiple_of,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
# Instantiate dataloaders.
|
||||
train_dataloader = DataLoader(
|
||||
@ -203,13 +216,15 @@ def training_function(config, args):
|
||||
# Now we train the model
|
||||
for epoch in range(starting_epoch, num_epochs):
|
||||
model.train()
|
||||
for step, batch in enumerate(train_dataloader):
|
||||
# New Code #
|
||||
# We need to skip steps until we reach the resumed step during the first epoch
|
||||
if args.resume_from_checkpoint and epoch == starting_epoch:
|
||||
if resume_step is not None and step < resume_step:
|
||||
overall_step += 1
|
||||
continue
|
||||
# New Code #
|
||||
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
|
||||
# We need to skip steps until we reach the resumed step
|
||||
active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step)
|
||||
overall_step += resume_step
|
||||
else:
|
||||
# After the first iteration though, we need to go back to the original dataloader
|
||||
active_dataloader = train_dataloader
|
||||
for step, batch in enumerate(active_dataloader):
|
||||
# We could avoid this line since we set the accelerator with `device_placement=True`.
|
||||
batch.to(accelerator.device)
|
||||
outputs = model(**batch)
|
||||
@ -269,8 +284,8 @@ def main():
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
default="no",
|
||||
choices=["no", "fp16", "bf16"],
|
||||
default=None,
|
||||
choices=["no", "fp16", "bf16", "fp8"],
|
||||
help="Whether to use mixed precision. Choose"
|
||||
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
||||
"and an Nvidia Ampere GPU.",
|
||||
|
||||
@ -15,20 +15,20 @@
|
||||
import argparse
|
||||
from typing import List
|
||||
|
||||
import evaluate
|
||||
import numpy as np
|
||||
import torch
|
||||
from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
import evaluate
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from datasets import DatasetDict, load_dataset
|
||||
|
||||
# New Code #
|
||||
# We'll be using StratifiedKFold for this example
|
||||
from sklearn.model_selection import StratifiedKFold
|
||||
from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
from accelerate import Accelerator, DistributedType
|
||||
|
||||
|
||||
########################################################################
|
||||
# This is a fully working simple example to use Accelerate,
|
||||
@ -106,9 +106,22 @@ def get_fold_dataloaders(
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
if accelerator.distributed_type == DistributedType.TPU:
|
||||
return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
|
||||
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
elif accelerator.mixed_precision != "no":
|
||||
pad_to_multiple_of = 8
|
||||
else:
|
||||
pad_to_multiple_of = None
|
||||
|
||||
return tokenizer.pad(
|
||||
examples,
|
||||
padding="longest",
|
||||
max_length=max_length,
|
||||
pad_to_multiple_of=pad_to_multiple_of,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
# Instantiate dataloaders.
|
||||
train_dataloader = DataLoader(
|
||||
@ -250,8 +263,8 @@ def main():
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
default="no",
|
||||
choices=["no", "fp16", "bf16"],
|
||||
default=None,
|
||||
choices=["no", "fp16", "bf16", "fp8"],
|
||||
help="Whether to use mixed precision. Choose"
|
||||
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
||||
"and an Nvidia Ampere GPU.",
|
||||
|
||||
@ -31,16 +31,12 @@ import random
|
||||
from itertools import chain
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
import datasets
|
||||
import torch
|
||||
import transformers
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from accelerate.logging import get_logger
|
||||
from accelerate.utils import DummyOptim, DummyScheduler, set_seed
|
||||
from datasets import load_dataset
|
||||
from huggingface_hub import Repository
|
||||
from torch.utils.data import DataLoader
|
||||
from tqdm.auto import tqdm
|
||||
from transformers import (
|
||||
CONFIG_MAPPING,
|
||||
@ -55,6 +51,10 @@ from transformers import (
|
||||
from transformers.utils import get_full_repo_name
|
||||
from transformers.utils.versions import require_version
|
||||
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from accelerate.logging import get_logger
|
||||
from accelerate.utils import DummyOptim, DummyScheduler, set_seed
|
||||
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
@ -220,7 +220,7 @@ def parse_args():
|
||||
default="all",
|
||||
help=(
|
||||
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,'
|
||||
' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.'
|
||||
' `"wandb"`, `"comet_ml"`, and `"dvclive"`. Use `"all"` (default) to report to all integrations.'
|
||||
"Only applicable when `--with_tracking` is passed."
|
||||
),
|
||||
)
|
||||
@ -243,39 +243,6 @@ def parse_args():
|
||||
return args
|
||||
|
||||
|
||||
# New Code #
|
||||
def checkpoint_model(checkpoint_folder, ckpt_id, model, epoch, last_global_step, **kwargs):
|
||||
"""Utility function for checkpointing model + optimizer dictionaries
|
||||
The main purpose for this is to be able to resume training from that instant again
|
||||
"""
|
||||
checkpoint_state_dict = {
|
||||
"epoch": epoch,
|
||||
"last_global_step": last_global_step,
|
||||
}
|
||||
# Add extra kwargs too
|
||||
checkpoint_state_dict.update(kwargs)
|
||||
|
||||
success = model.save_checkpoint(checkpoint_folder, ckpt_id, checkpoint_state_dict)
|
||||
status_msg = f"checkpointing: checkpoint_folder={checkpoint_folder}, ckpt_id={ckpt_id}"
|
||||
if success:
|
||||
logging.info(f"Success {status_msg}")
|
||||
else:
|
||||
logging.warning(f"Failure {status_msg}")
|
||||
return
|
||||
|
||||
|
||||
# New Code #
|
||||
def load_training_checkpoint(model, load_dir, tag=None, **kwargs):
|
||||
"""Utility function for checkpointing model + optimizer dictionaries
|
||||
The main purpose for this is to be able to resume training from that instant again
|
||||
"""
|
||||
_, checkpoint_state_dict = model.load_checkpoint(load_dir, tag=tag, **kwargs)
|
||||
epoch = checkpoint_state_dict["epoch"]
|
||||
last_global_step = checkpoint_state_dict["last_global_step"]
|
||||
del checkpoint_state_dict
|
||||
return (epoch, last_global_step)
|
||||
|
||||
|
||||
# New Code #
|
||||
def evaluate(args, model, eval_dataloader, accelerator, eval_dataset):
|
||||
model.eval()
|
||||
@ -302,9 +269,20 @@ def main():
|
||||
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
|
||||
# If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers
|
||||
# in the environment
|
||||
|
||||
# when using DeepSpeed, the `gradient_accumulation_steps` is properly set from the DeepSpeed plugin/config
|
||||
# or from `accelerate launch` via `--gradient_accumulation_steps` else
|
||||
# defaulting to the passed `args.gradient_accumulation_steps`
|
||||
accelerator = (
|
||||
Accelerator(log_with=args.report_to, logging_dir=args.output_dir) if args.with_tracking else Accelerator()
|
||||
Accelerator(
|
||||
log_with=args.report_to,
|
||||
project_dir=args.output_dir,
|
||||
gradient_accumulation_steps=args.gradient_accumulation_steps,
|
||||
)
|
||||
if args.with_tracking
|
||||
else Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps)
|
||||
)
|
||||
|
||||
# Make one log on every process with the configuration for debugging.
|
||||
logging.basicConfig(
|
||||
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
||||
@ -538,17 +516,11 @@ def main():
|
||||
model.tie_weights()
|
||||
|
||||
# Scheduler and math around the number of training steps.
|
||||
|
||||
# New Code
|
||||
# Get gradient accumulation steps from deepspeed config if available
|
||||
if accelerator.state.deepspeed_plugin is not None:
|
||||
args.gradient_accumulation_steps = accelerator.state.deepspeed_plugin.deepspeed_config[
|
||||
"gradient_accumulation_steps"
|
||||
]
|
||||
|
||||
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
|
||||
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / accelerator.gradient_accumulation_steps)
|
||||
overrode_max_train_steps = False
|
||||
if args.max_train_steps is None:
|
||||
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
|
||||
overrode_max_train_steps = True
|
||||
else:
|
||||
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
|
||||
|
||||
@ -575,16 +547,16 @@ def main():
|
||||
)
|
||||
|
||||
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
|
||||
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
|
||||
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
|
||||
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / accelerator.gradient_accumulation_steps)
|
||||
if overrode_max_train_steps:
|
||||
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
|
||||
# Afterwards we recalculate our number of training epochs
|
||||
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
|
||||
|
||||
# Figure out how many steps we should save the Accelerator states
|
||||
if hasattr(args.checkpointing_steps, "isdigit"):
|
||||
checkpointing_steps = args.checkpointing_steps
|
||||
if args.checkpointing_steps.isdigit():
|
||||
checkpointing_steps = int(args.checkpointing_steps)
|
||||
else:
|
||||
checkpointing_steps = None
|
||||
checkpointing_steps = args.checkpointing_steps
|
||||
if checkpointing_steps is not None and checkpointing_steps.isdigit():
|
||||
checkpointing_steps = int(checkpointing_steps)
|
||||
|
||||
# We need to initialize the trackers we use, and also store our configuration.
|
||||
# The trackers initializes automatically on the main process.
|
||||
@ -595,14 +567,16 @@ def main():
|
||||
accelerator.init_trackers("clm_no_trainer", experiment_config)
|
||||
|
||||
# Train!
|
||||
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
|
||||
total_batch_size = (
|
||||
args.per_device_train_batch_size * accelerator.num_processes * accelerator.gradient_accumulation_steps
|
||||
)
|
||||
|
||||
logger.info("***** Running training *****")
|
||||
logger.info(f" Num examples = {len(train_dataset)}")
|
||||
logger.info(f" Num Epochs = {args.num_train_epochs}")
|
||||
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
|
||||
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
|
||||
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
|
||||
logger.info(f" Gradient Accumulation steps = {accelerator.gradient_accumulation_steps}")
|
||||
logger.info(f" Total optimization steps = {args.max_train_steps}")
|
||||
# Only show the progress bar once on each machine.
|
||||
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
|
||||
@ -613,45 +587,61 @@ def main():
|
||||
|
||||
# Potentially load in the weights and states from a previous save
|
||||
if args.resume_from_checkpoint:
|
||||
# New Code #
|
||||
# Loads the DeepSpeed checkpoint from the specified path
|
||||
_, last_global_step = load_training_checkpoint(
|
||||
model,
|
||||
args.resume_from_checkpoint,
|
||||
**{"load_optimizer_states": True, "load_lr_scheduler_states": True},
|
||||
)
|
||||
accelerator.load_state(args.resume_from_checkpoint)
|
||||
accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}")
|
||||
resume_step = last_global_step
|
||||
starting_epoch = resume_step // len(train_dataloader)
|
||||
resume_step -= starting_epoch * len(train_dataloader)
|
||||
path = os.path.basename(args.resume_from_checkpoint)
|
||||
training_difference = os.path.splitext(path)[0]
|
||||
|
||||
if "epoch" in training_difference:
|
||||
starting_epoch = int(training_difference.replace("epoch_", "")) + 1
|
||||
resume_step = None
|
||||
completed_steps = starting_epoch * num_update_steps_per_epoch
|
||||
else:
|
||||
resume_step = int(training_difference.replace("step_", ""))
|
||||
starting_epoch = resume_step // num_update_steps_per_epoch
|
||||
resume_step -= starting_epoch * num_update_steps_per_epoch
|
||||
completed_steps = resume_step
|
||||
|
||||
# update progress bar if resumed from checkpoint
|
||||
progress_bar.update(completed_steps)
|
||||
|
||||
for epoch in range(starting_epoch, args.num_train_epochs):
|
||||
model.train()
|
||||
if args.with_tracking:
|
||||
total_loss = 0
|
||||
for step, batch in enumerate(train_dataloader):
|
||||
|
||||
# skip new `skip_first_batches` to skip the batches when resuming from ckpt
|
||||
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
|
||||
# We need to skip steps until we reach the resumed step
|
||||
if args.resume_from_checkpoint and epoch == starting_epoch:
|
||||
if resume_step is not None and step < resume_step:
|
||||
completed_steps += 1
|
||||
continue
|
||||
outputs = model(**batch)
|
||||
loss = outputs.loss
|
||||
# We keep track of the loss at each epoch
|
||||
if args.with_tracking:
|
||||
total_loss += loss.detach().float()
|
||||
loss = loss / args.gradient_accumulation_steps
|
||||
accelerator.backward(loss)
|
||||
if (step + 1) % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:
|
||||
active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step)
|
||||
else:
|
||||
# After the first iteration though, we need to go back to the original dataloader
|
||||
active_dataloader = train_dataloader
|
||||
for step, batch in enumerate(active_dataloader):
|
||||
# In particular, DeepSpeed handles `gradient_accumulation` via `DeepSpeedEngine`.
|
||||
# Below, we use `accelerator.accumulate` if the user
|
||||
# wants to switch to other approaches such as plain DDP, PyTorch FSDP ...
|
||||
# This avoids having to change any code as things are all handled across different distributed setups.
|
||||
with accelerator.accumulate(model):
|
||||
outputs = model(**batch)
|
||||
loss = outputs.loss
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
lr_scheduler.step()
|
||||
optimizer.zero_grad()
|
||||
progress_bar.update(1)
|
||||
completed_steps += 1
|
||||
|
||||
if accelerator.sync_gradients:
|
||||
progress_bar.update(1)
|
||||
completed_steps += 1
|
||||
|
||||
# We keep track of the loss at each epoch
|
||||
if args.with_tracking:
|
||||
step_loss = accelerator.reduce(loss.detach().clone()).item()
|
||||
total_loss += step_loss
|
||||
|
||||
if isinstance(checkpointing_steps, int):
|
||||
if completed_steps % checkpointing_steps == 0:
|
||||
output_dir = f"step_{completed_steps }"
|
||||
output_dir = f"step_{completed_steps}"
|
||||
if args.output_dir is not None:
|
||||
output_dir = os.path.join(args.output_dir, output_dir)
|
||||
accelerator.save_state(output_dir)
|
||||
@ -666,34 +656,29 @@ def main():
|
||||
{
|
||||
"perplexity": perplexity,
|
||||
"eval_loss": eval_loss,
|
||||
"train_loss": total_loss.item() / len(train_dataloader),
|
||||
"train_loss": total_loss / len(train_dataloader),
|
||||
"epoch": epoch,
|
||||
"step": completed_steps,
|
||||
},
|
||||
step=completed_steps,
|
||||
)
|
||||
|
||||
# New Code #
|
||||
# Save the DeepSpeed checkpoint to the specified path
|
||||
checkpoint_model(args.output_dir, epoch, model, epoch, completed_steps)
|
||||
if isinstance(checkpointing_steps, str) and checkpointing_steps == "epoch":
|
||||
accelerator.save_state(os.path.join(args.output_dir, f"epoch_{epoch}"))
|
||||
|
||||
# New Code #
|
||||
# Tracks the best checkpoint and best metric
|
||||
if best_metric is None or best_metric > perplexity:
|
||||
best_metric = perplexity
|
||||
best_metric_checkpoint = os.path.join(args.output_dir, str(epoch))
|
||||
best_metric_checkpoint = os.path.join(args.output_dir, "best_checkpoint")
|
||||
accelerator.save_state(best_metric_checkpoint)
|
||||
accelerator.print(f"New best metric: {best_metric} at epoch {epoch}")
|
||||
accelerator.print(f"best_metric_checkpoint: {best_metric_checkpoint}")
|
||||
|
||||
# New Code #
|
||||
# Loads the best checkpoint after the training is finished
|
||||
if args.load_best_model:
|
||||
_, last_global_step = load_training_checkpoint(
|
||||
model,
|
||||
"/".join(best_metric_checkpoint.split("/")[:-1]),
|
||||
tag=best_metric_checkpoint.split("/")[-1],
|
||||
**{"load_optimizer_states": True, "load_lr_scheduler_states": True},
|
||||
)
|
||||
accelerator.load_state(best_metric_checkpoint)
|
||||
|
||||
# New Code #
|
||||
# Evaluates using the best checkpoint
|
||||
|
||||
246
examples/by_feature/early_stopping.py
Normal file
246
examples/by_feature/early_stopping.py
Normal file
@ -0,0 +1,246 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import argparse
|
||||
|
||||
import evaluate
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
from accelerate import Accelerator, DistributedType
|
||||
|
||||
|
||||
########################################################################
|
||||
# This is a fully working simple example to use Accelerate
|
||||
# specifically showcasing how to perform early stopping,
|
||||
# and builds off the `nlp_example.py` script
|
||||
#
|
||||
# This example trains a Bert base model on GLUE MRPC
|
||||
# in any of the following settings (with the same script):
|
||||
# - single CPU or single GPU
|
||||
# - multi GPUS (using PyTorch distributed mode)
|
||||
# - (multi) TPUs
|
||||
# - fp16 (mixed-precision) or fp32 (normal precision)
|
||||
#
|
||||
# To run it in each of these various modes, follow the instructions
|
||||
# in the readme for examples:
|
||||
# https://github.com/huggingface/accelerate/tree/main/examples
|
||||
#
|
||||
########################################################################
|
||||
|
||||
|
||||
MAX_GPU_BATCH_SIZE = 16
|
||||
EVAL_BATCH_SIZE = 32
|
||||
|
||||
|
||||
def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
"""
|
||||
Creates a set of `DataLoader`s for the `glue` dataset,
|
||||
using "bert-base-cased" as the tokenizer.
|
||||
|
||||
Args:
|
||||
accelerator (`Accelerator`):
|
||||
An `Accelerator` object
|
||||
batch_size (`int`, *optional*):
|
||||
The batch size for the train and validation DataLoaders.
|
||||
"""
|
||||
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
|
||||
datasets = load_dataset("glue", "mrpc")
|
||||
|
||||
def tokenize_function(examples):
|
||||
# max_length=None => use the model max length (it's actually the default)
|
||||
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
|
||||
return outputs
|
||||
|
||||
# Apply the method we just defined to all the examples in all the splits of the dataset
|
||||
# starting with the main process first:
|
||||
with accelerator.main_process_first():
|
||||
tokenized_datasets = datasets.map(
|
||||
tokenize_function,
|
||||
batched=True,
|
||||
remove_columns=["idx", "sentence1", "sentence2"],
|
||||
)
|
||||
|
||||
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
|
||||
# transformers library
|
||||
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
elif accelerator.mixed_precision != "no":
|
||||
pad_to_multiple_of = 8
|
||||
else:
|
||||
pad_to_multiple_of = None
|
||||
|
||||
return tokenizer.pad(
|
||||
examples,
|
||||
padding="longest",
|
||||
max_length=max_length,
|
||||
pad_to_multiple_of=pad_to_multiple_of,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
# Instantiate dataloaders.
|
||||
train_dataloader = DataLoader(
|
||||
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size, drop_last=True
|
||||
)
|
||||
eval_dataloader = DataLoader(
|
||||
tokenized_datasets["validation"],
|
||||
shuffle=False,
|
||||
collate_fn=collate_fn,
|
||||
batch_size=EVAL_BATCH_SIZE,
|
||||
drop_last=(accelerator.mixed_precision == "fp8"),
|
||||
)
|
||||
|
||||
return train_dataloader, eval_dataloader
|
||||
|
||||
|
||||
# New code
|
||||
class EarlyStoppingCallback:
|
||||
"A callback class that helps with early stopping"
|
||||
|
||||
def __init__(self, min_delta=0, patience=5):
|
||||
self.min_delta = min_delta
|
||||
self.patience = patience
|
||||
self.counter = 0
|
||||
self.lowest_loss = float("inf")
|
||||
|
||||
def check_early_stopping(self, eval_loss):
|
||||
delta = self.lowest_loss - eval_loss
|
||||
if delta >= self.min_delta:
|
||||
self.lowest_loss = eval_loss
|
||||
self.counter = 0
|
||||
else:
|
||||
self.counter += 1
|
||||
if self.counter >= self.patience:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
callback = EarlyStoppingCallback()
|
||||
|
||||
|
||||
def training_function(config, args):
|
||||
# Initialize accelerator
|
||||
accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)
|
||||
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
|
||||
lr = config["lr"]
|
||||
num_epochs = int(config["num_epochs"])
|
||||
seed = int(config["seed"])
|
||||
batch_size = int(config["batch_size"])
|
||||
|
||||
metric = evaluate.load("glue", "mrpc")
|
||||
|
||||
# If the batch size is too big we use gradient accumulation
|
||||
gradient_accumulation_steps = 1
|
||||
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
|
||||
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
|
||||
batch_size = MAX_GPU_BATCH_SIZE
|
||||
|
||||
set_seed(seed)
|
||||
train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)
|
||||
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
|
||||
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True)
|
||||
|
||||
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
|
||||
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
|
||||
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
|
||||
model = model.to(accelerator.device)
|
||||
# Instantiate optimizer
|
||||
optimizer = AdamW(params=model.parameters(), lr=lr)
|
||||
|
||||
# Instantiate scheduler
|
||||
lr_scheduler = get_linear_schedule_with_warmup(
|
||||
optimizer=optimizer,
|
||||
num_warmup_steps=100,
|
||||
num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps,
|
||||
)
|
||||
|
||||
# Prepare everything
|
||||
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
|
||||
# prepare method.
|
||||
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
||||
)
|
||||
|
||||
# Now we train the model
|
||||
for epoch in range(num_epochs):
|
||||
model.train()
|
||||
for step, batch in enumerate(train_dataloader):
|
||||
# We could avoid this line since we set the accelerator with `device_placement=True`.
|
||||
batch.to(accelerator.device)
|
||||
outputs = model(**batch)
|
||||
loss = outputs.loss
|
||||
loss = loss / gradient_accumulation_steps
|
||||
accelerator.backward(loss)
|
||||
if step % gradient_accumulation_steps == 0:
|
||||
optimizer.step()
|
||||
lr_scheduler.step()
|
||||
optimizer.zero_grad()
|
||||
|
||||
# New code
|
||||
# Check if we should stop the training on any processes
|
||||
if callback.check_early_stopping(loss.item()):
|
||||
accelerator.set_trigger()
|
||||
|
||||
# If so, we break the loop
|
||||
if accelerator.check_trigger():
|
||||
break
|
||||
|
||||
model.eval()
|
||||
for step, batch in enumerate(eval_dataloader):
|
||||
# We could avoid this line since we set the accelerator with `device_placement=True`.
|
||||
batch.to(accelerator.device)
|
||||
with torch.no_grad():
|
||||
outputs = model(**batch)
|
||||
predictions = outputs.logits.argmax(dim=-1)
|
||||
predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"]))
|
||||
metric.add_batch(
|
||||
predictions=predictions,
|
||||
references=references,
|
||||
)
|
||||
|
||||
eval_metric = metric.compute()
|
||||
|
||||
# Use accelerator.print to print only on the main process.
|
||||
accelerator.print(f"epoch {epoch}:", eval_metric)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Simple example of training script.")
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
default=None,
|
||||
choices=["no", "fp16", "bf16", "fp8"],
|
||||
help="Whether to use mixed precision. Choose"
|
||||
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
||||
"and an Nvidia Ampere GPU.",
|
||||
)
|
||||
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.")
|
||||
args = parser.parse_args()
|
||||
config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
|
||||
training_function(config, args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -15,14 +15,23 @@
|
||||
import argparse
|
||||
import gc
|
||||
import os
|
||||
|
||||
import torch
|
||||
from torch.utils.data import DataLoader
|
||||
import threading
|
||||
|
||||
import evaluate
|
||||
from accelerate import Accelerator, DistributedType
|
||||
import psutil
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
from torch.distributed.fsdp.fully_sharded_data_parallel import FullOptimStateDictConfig, FullStateDictConfig
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import (
|
||||
AutoModelForSequenceClassification,
|
||||
AutoTokenizer,
|
||||
get_linear_schedule_with_warmup,
|
||||
set_seed,
|
||||
)
|
||||
|
||||
from accelerate import Accelerator, DistributedType, FullyShardedDataParallelPlugin
|
||||
from accelerate.utils import is_npu_available, is_xpu_available
|
||||
|
||||
|
||||
########################################################################
|
||||
@ -60,18 +69,65 @@ def b2mb(x):
|
||||
class TorchTracemalloc:
|
||||
def __enter__(self):
|
||||
gc.collect()
|
||||
torch.cuda.empty_cache()
|
||||
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
|
||||
self.begin = torch.cuda.memory_allocated()
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.empty_cache()
|
||||
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
|
||||
self.begin = torch.cuda.memory_allocated()
|
||||
elif is_xpu_available():
|
||||
torch.xpu.empty_cache()
|
||||
torch.xpu.reset_max_memory_allocated() # reset the peak gauge to zero
|
||||
self.begin = torch.xpu.memory_allocated()
|
||||
elif is_npu_available():
|
||||
torch.npu.empty_cache()
|
||||
torch.npu.reset_max_memory_allocated() # reset the peak gauge to zero
|
||||
self.begin = torch.npu.memory_allocated()
|
||||
self.process = psutil.Process()
|
||||
|
||||
self.cpu_begin = self.cpu_mem_used()
|
||||
self.peak_monitoring = True
|
||||
peak_monitor_thread = threading.Thread(target=self.peak_monitor_func)
|
||||
peak_monitor_thread.daemon = True
|
||||
peak_monitor_thread.start()
|
||||
return self
|
||||
|
||||
def cpu_mem_used(self):
|
||||
"""get resident set size memory for the current process"""
|
||||
return self.process.memory_info().rss
|
||||
|
||||
def peak_monitor_func(self):
|
||||
self.cpu_peak = -1
|
||||
|
||||
while True:
|
||||
self.cpu_peak = max(self.cpu_mem_used(), self.cpu_peak)
|
||||
|
||||
# can't sleep or will not catch the peak right (this comment is here on purpose)
|
||||
# time.sleep(0.001) # 1msec
|
||||
|
||||
if not self.peak_monitoring:
|
||||
break
|
||||
|
||||
def __exit__(self, *exc):
|
||||
self.peak_monitoring = False
|
||||
|
||||
gc.collect()
|
||||
torch.cuda.empty_cache()
|
||||
self.end = torch.cuda.memory_allocated()
|
||||
self.peak = torch.cuda.max_memory_allocated()
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.empty_cache()
|
||||
self.end = torch.cuda.memory_allocated()
|
||||
self.peak = torch.cuda.max_memory_allocated()
|
||||
elif is_xpu_available():
|
||||
torch.xpu.empty_cache()
|
||||
self.end = torch.xpu.memory_allocated()
|
||||
self.peak = torch.xpu.max_memory_allocated()
|
||||
elif is_npu_available():
|
||||
torch.npu.empty_cache()
|
||||
self.end = torch.npu.memory_allocated()
|
||||
self.peak = torch.npu.max_memory_allocated()
|
||||
self.used = b2mb(self.end - self.begin)
|
||||
self.peaked = b2mb(self.peak - self.begin)
|
||||
|
||||
self.cpu_end = self.cpu_mem_used()
|
||||
self.cpu_used = b2mb(self.cpu_end - self.cpu_begin)
|
||||
self.cpu_peaked = b2mb(self.cpu_peak - self.cpu_begin)
|
||||
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
|
||||
|
||||
|
||||
@ -86,13 +142,25 @@ def training_function(config, args):
|
||||
# For testing only
|
||||
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
|
||||
config["num_epochs"] = 2
|
||||
|
||||
# New Code #
|
||||
# Pass the advanced FSDP settings not part of the accelerate config by creating fsdp_plugin
|
||||
fsdp_plugin = FullyShardedDataParallelPlugin(
|
||||
state_dict_config=FullStateDictConfig(offload_to_cpu=False, rank0_only=False),
|
||||
optim_state_dict_config=FullOptimStateDictConfig(offload_to_cpu=False, rank0_only=False),
|
||||
)
|
||||
|
||||
# Initialize accelerator
|
||||
if args.with_tracking:
|
||||
accelerator = Accelerator(
|
||||
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="wandb", logging_dir=args.logging_dir
|
||||
cpu=args.cpu,
|
||||
mixed_precision=args.mixed_precision,
|
||||
log_with="wandb",
|
||||
project_dir=args.logging_dir,
|
||||
fsdp_plugin=fsdp_plugin,
|
||||
)
|
||||
else:
|
||||
accelerator = Accelerator()
|
||||
accelerator = Accelerator(fsdp_plugin=fsdp_plugin)
|
||||
accelerator.print(accelerator.distributed_type)
|
||||
|
||||
if hasattr(args.checkpointing_steps, "isdigit"):
|
||||
@ -147,9 +215,22 @@ def training_function(config, args):
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
if accelerator.distributed_type == DistributedType.TPU:
|
||||
return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
|
||||
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
elif accelerator.mixed_precision != "no":
|
||||
pad_to_multiple_of = 8
|
||||
else:
|
||||
pad_to_multiple_of = None
|
||||
|
||||
return tokenizer.pad(
|
||||
examples,
|
||||
padding="longest",
|
||||
max_length=max_length,
|
||||
pad_to_multiple_of=pad_to_multiple_of,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
# Instantiate dataloaders.
|
||||
train_dataloader = DataLoader(
|
||||
@ -162,17 +243,23 @@ def training_function(config, args):
|
||||
set_seed(seed)
|
||||
|
||||
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
|
||||
model = AutoModelForSequenceClassification.from_pretrained(args.model_name_or_path, return_dict=True)
|
||||
# New Code #
|
||||
# For FSDP feature, it is highly recommended and efficient to prepare the model before creating optimizer
|
||||
model = accelerator.prepare(model)
|
||||
accelerator.print(model)
|
||||
model = AutoModelForSequenceClassification.from_pretrained(
|
||||
args.model_name_or_path, return_dict=True, low_cpu_mem_usage=True
|
||||
)
|
||||
|
||||
# Instantiate optimizer
|
||||
# New Code #
|
||||
# For FSDP feature, at present it doesn't support multiple parameter groups,
|
||||
# so we need to create a single parameter group for the whole model
|
||||
optimizer = torch.optim.AdamW(params=model.parameters(), lr=lr, weight_decay=2e-4)
|
||||
no_decay = ["bias", "LayerNorm.weight"]
|
||||
optimizer_grouped_parameters = [
|
||||
{
|
||||
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
|
||||
"weight_decay": 0.003,
|
||||
},
|
||||
{
|
||||
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
|
||||
"weight_decay": 0.0,
|
||||
},
|
||||
]
|
||||
|
||||
optimizer = torch.optim.AdamW(params=optimizer_grouped_parameters, lr=lr, weight_decay=2e-4)
|
||||
|
||||
# Instantiate scheduler
|
||||
lr_scheduler = get_linear_schedule_with_warmup(
|
||||
@ -181,13 +268,8 @@ def training_function(config, args):
|
||||
num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps,
|
||||
)
|
||||
|
||||
# New Code #
|
||||
# For FSDP feature, prepare everything except the model as we have already prepared the model
|
||||
# before creating the optimizer
|
||||
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
|
||||
# prepare method.
|
||||
optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
||||
optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
||||
)
|
||||
|
||||
overall_step = 0
|
||||
@ -232,7 +314,6 @@ def training_function(config, args):
|
||||
batch.to(accelerator.device)
|
||||
outputs = model(**batch)
|
||||
loss = outputs.loss
|
||||
loss = loss / gradient_accumulation_steps
|
||||
# We keep track of the loss at each epoch
|
||||
if args.with_tracking:
|
||||
total_loss += loss.detach().float()
|
||||
@ -330,8 +411,8 @@ def main():
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
default="no",
|
||||
choices=["no", "fp16", "bf16"],
|
||||
default=None,
|
||||
choices=["no", "fp16", "bf16", "fp8"],
|
||||
help="Whether to use mixed precision. Choose"
|
||||
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
||||
"and an Nvidia Ampere GPU.",
|
||||
@ -373,7 +454,7 @@ def main():
|
||||
required=True,
|
||||
)
|
||||
args = parser.parse_args()
|
||||
config = {"lr": 2e-5, "num_epochs": 3, "seed": 1, "batch_size": 16}
|
||||
config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
|
||||
training_function(config, args)
|
||||
|
||||
|
||||
|
||||
@ -15,15 +15,15 @@
|
||||
import argparse
|
||||
import os
|
||||
|
||||
import evaluate
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
import evaluate
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from datasets import load_dataset
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
from accelerate import Accelerator, DistributedType
|
||||
|
||||
|
||||
########################################################################
|
||||
# This is a fully working simple example to use Accelerate
|
||||
@ -81,9 +81,22 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
if accelerator.distributed_type == DistributedType.TPU:
|
||||
return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
|
||||
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
elif accelerator.mixed_precision != "no":
|
||||
pad_to_multiple_of = 8
|
||||
else:
|
||||
pad_to_multiple_of = None
|
||||
|
||||
return tokenizer.pad(
|
||||
examples,
|
||||
padding="longest",
|
||||
max_length=max_length,
|
||||
pad_to_multiple_of=pad_to_multiple_of,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
# Instantiate dataloaders.
|
||||
train_dataloader = DataLoader(
|
||||
@ -192,8 +205,8 @@ def main():
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
default="no",
|
||||
choices=["no", "fp16", "bf16"],
|
||||
default=None,
|
||||
choices=["no", "fp16", "bf16", "fp8"],
|
||||
help="Whether to use mixed precision. Choose"
|
||||
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
||||
"and an Nvidia Ampere GPU.",
|
||||
|
||||
238
examples/by_feature/local_sgd.py
Normal file
238
examples/by_feature/local_sgd.py
Normal file
@ -0,0 +1,238 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import argparse
|
||||
import os
|
||||
|
||||
import evaluate
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from accelerate.local_sgd import LocalSGD
|
||||
|
||||
|
||||
########################################################################
|
||||
# This is a fully working simple example to use Accelerate
|
||||
# with LocalSGD, which is a method to synchronize model
|
||||
# parameters every K batches. It is different, but complementary
|
||||
# to gradient accumulation.
|
||||
#
|
||||
# This example trains a Bert base model on GLUE MRPC
|
||||
# in any of the following settings (with the same script):
|
||||
# - single CPU or single GPU
|
||||
# - multi GPUS (using PyTorch distributed mode)
|
||||
# - (multi) TPUs
|
||||
# - fp16 (mixed-precision) or fp32 (normal precision)
|
||||
#
|
||||
# To run it in each of these various modes, follow the instructions
|
||||
# in the readme for examples:
|
||||
# https://github.com/huggingface/accelerate/tree/main/examples
|
||||
#
|
||||
########################################################################
|
||||
|
||||
|
||||
MAX_GPU_BATCH_SIZE = 16
|
||||
EVAL_BATCH_SIZE = 32
|
||||
|
||||
|
||||
def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
"""
|
||||
Creates a set of `DataLoader`s for the `glue` dataset,
|
||||
using "bert-base-cased" as the tokenizer.
|
||||
|
||||
Args:
|
||||
accelerator (`Accelerator`):
|
||||
An `Accelerator` object
|
||||
batch_size (`int`, *optional*):
|
||||
The batch size for the train and validation DataLoaders.
|
||||
"""
|
||||
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
|
||||
datasets = load_dataset("glue", "mrpc")
|
||||
|
||||
def tokenize_function(examples):
|
||||
# max_length=None => use the model max length (it's actually the default)
|
||||
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
|
||||
return outputs
|
||||
|
||||
# Apply the method we just defined to all the examples in all the splits of the dataset
|
||||
# starting with the main process first:
|
||||
with accelerator.main_process_first():
|
||||
tokenized_datasets = datasets.map(
|
||||
tokenize_function,
|
||||
batched=True,
|
||||
remove_columns=["idx", "sentence1", "sentence2"],
|
||||
)
|
||||
|
||||
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
|
||||
# transformers library
|
||||
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
elif accelerator.mixed_precision != "no":
|
||||
pad_to_multiple_of = 8
|
||||
else:
|
||||
pad_to_multiple_of = None
|
||||
|
||||
return tokenizer.pad(
|
||||
examples,
|
||||
padding="longest",
|
||||
max_length=max_length,
|
||||
pad_to_multiple_of=pad_to_multiple_of,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
# Instantiate dataloaders.
|
||||
train_dataloader = DataLoader(
|
||||
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
|
||||
)
|
||||
eval_dataloader = DataLoader(
|
||||
tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
|
||||
)
|
||||
|
||||
return train_dataloader, eval_dataloader
|
||||
|
||||
|
||||
# For testing only
|
||||
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
|
||||
from accelerate.test_utils.training import mocked_dataloaders
|
||||
|
||||
get_dataloaders = mocked_dataloaders # noqa: F811
|
||||
|
||||
|
||||
def training_function(config, args):
|
||||
# For testing only
|
||||
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
|
||||
config["num_epochs"] = 2
|
||||
# New Code #
|
||||
gradient_accumulation_steps = int(args.gradient_accumulation_steps)
|
||||
local_sgd_steps = int(args.local_sgd_steps)
|
||||
# Initialize accelerator
|
||||
accelerator = Accelerator(
|
||||
cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=gradient_accumulation_steps
|
||||
)
|
||||
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
|
||||
raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)")
|
||||
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
|
||||
lr = config["lr"]
|
||||
num_epochs = int(config["num_epochs"])
|
||||
seed = int(config["seed"])
|
||||
batch_size = int(config["batch_size"])
|
||||
|
||||
metric = evaluate.load("glue", "mrpc")
|
||||
|
||||
set_seed(seed)
|
||||
train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)
|
||||
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
|
||||
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True)
|
||||
|
||||
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
|
||||
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
|
||||
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
|
||||
model = model.to(accelerator.device)
|
||||
|
||||
# Instantiate optimizer
|
||||
optimizer = AdamW(params=model.parameters(), lr=lr)
|
||||
|
||||
# Instantiate scheduler
|
||||
lr_scheduler = get_linear_schedule_with_warmup(
|
||||
optimizer=optimizer,
|
||||
num_warmup_steps=100,
|
||||
num_training_steps=(len(train_dataloader) * num_epochs),
|
||||
)
|
||||
|
||||
# Prepare everything
|
||||
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
|
||||
# prepare method.
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
||||
)
|
||||
|
||||
# Now we train the model
|
||||
for epoch in range(num_epochs):
|
||||
model.train()
|
||||
with LocalSGD(
|
||||
accelerator=accelerator, model=model, local_sgd_steps=local_sgd_steps, enabled=local_sgd_steps is not None
|
||||
) as local_sgd:
|
||||
for step, batch in enumerate(train_dataloader):
|
||||
# We could avoid this line since we set the accelerator with `device_placement=True`.
|
||||
batch.to(accelerator.device)
|
||||
# New code #
|
||||
# We use the new `accumulate` context manager to perform gradient accumulation
|
||||
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
|
||||
with accelerator.accumulate(model):
|
||||
output = model(**batch)
|
||||
loss = output.loss
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
lr_scheduler.step()
|
||||
optimizer.zero_grad()
|
||||
# LocalSGD-specific line
|
||||
local_sgd.step()
|
||||
|
||||
model.eval()
|
||||
for step, batch in enumerate(eval_dataloader):
|
||||
# We could avoid this line since we set the accelerator with `device_placement=True`.
|
||||
batch.to(accelerator.device)
|
||||
with torch.no_grad():
|
||||
outputs = model(**batch)
|
||||
predictions = outputs.logits.argmax(dim=-1)
|
||||
predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"]))
|
||||
metric.add_batch(
|
||||
predictions=predictions,
|
||||
references=references,
|
||||
)
|
||||
|
||||
eval_metric = metric.compute()
|
||||
# Use accelerator.print to print only on the main process.
|
||||
accelerator.print(f"epoch {epoch}:", eval_metric)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Simple example of training script.")
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
default=None,
|
||||
choices=["no", "fp16", "bf16", "fp8"],
|
||||
help="Whether to use mixed precision. Choose"
|
||||
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
||||
"and an Nvidia Ampere GPU.",
|
||||
)
|
||||
# New Code #
|
||||
parser.add_argument(
|
||||
"--gradient_accumulation_steps",
|
||||
type=int,
|
||||
default=1,
|
||||
help="The number of minibatches to be ran before gradients are accumulated.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--local_sgd_steps", type=int, default=8, help="Number of local SGD steps or None to disable local SGD"
|
||||
)
|
||||
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.")
|
||||
args = parser.parse_args()
|
||||
config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
|
||||
training_function(config, args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -31,16 +31,12 @@ import random
|
||||
from itertools import chain
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
import datasets
|
||||
import torch
|
||||
import transformers
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from accelerate.logging import get_logger
|
||||
from accelerate.utils import MegatronLMDummyScheduler, set_seed
|
||||
from datasets import load_dataset
|
||||
from huggingface_hub import Repository
|
||||
from torch.utils.data import DataLoader
|
||||
from tqdm.auto import tqdm
|
||||
from transformers import (
|
||||
CONFIG_MAPPING,
|
||||
@ -55,6 +51,10 @@ from transformers import (
|
||||
from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry
|
||||
from transformers.utils.versions import require_version
|
||||
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from accelerate.logging import get_logger
|
||||
from accelerate.utils import MegatronLMDummyScheduler, set_seed
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
@ -216,7 +216,7 @@ def parse_args():
|
||||
default="all",
|
||||
help=(
|
||||
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,'
|
||||
' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.'
|
||||
' `"wandb"`, `"comet_ml"`, and `"dvclive"`. Use `"all"` (default) to report to all integrations.'
|
||||
"Only applicable when `--with_tracking` is passed."
|
||||
),
|
||||
)
|
||||
|
||||
@ -14,16 +14,16 @@
|
||||
import argparse
|
||||
import os
|
||||
|
||||
import torch
|
||||
from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
# New Code #
|
||||
import evaluate
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from accelerate.utils import find_executable_batch_size
|
||||
from datasets import load_dataset
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
|
||||
########################################################################
|
||||
@ -86,9 +86,22 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
if accelerator.distributed_type == DistributedType.TPU:
|
||||
return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
|
||||
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
elif accelerator.mixed_precision != "no":
|
||||
pad_to_multiple_of = 8
|
||||
else:
|
||||
pad_to_multiple_of = None
|
||||
|
||||
return tokenizer.pad(
|
||||
examples,
|
||||
padding="longest",
|
||||
max_length=max_length,
|
||||
pad_to_multiple_of=pad_to_multiple_of,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
# Instantiate dataloaders.
|
||||
train_dataloader = DataLoader(
|
||||
@ -204,8 +217,8 @@ def main():
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
default="no",
|
||||
choices=["no", "fp16", "bf16"],
|
||||
default=None,
|
||||
choices=["no", "fp16", "bf16", "fp8"],
|
||||
help="Whether to use mixed precision. Choose"
|
||||
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
||||
"and an Nvidia Ampere GPU.",
|
||||
|
||||
@ -15,15 +15,15 @@
|
||||
import argparse
|
||||
import os
|
||||
|
||||
import evaluate
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
import evaluate
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from datasets import load_dataset
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
from accelerate import Accelerator, DistributedType
|
||||
|
||||
|
||||
########################################################################
|
||||
# This is a fully working simple example to use Accelerate,
|
||||
@ -88,9 +88,22 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
if accelerator.distributed_type == DistributedType.TPU:
|
||||
return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
|
||||
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
elif accelerator.mixed_precision != "no":
|
||||
pad_to_multiple_of = 8
|
||||
else:
|
||||
pad_to_multiple_of = None
|
||||
|
||||
return tokenizer.pad(
|
||||
examples,
|
||||
padding="longest",
|
||||
max_length=max_length,
|
||||
pad_to_multiple_of=pad_to_multiple_of,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
# Instantiate dataloaders.
|
||||
train_dataloader = DataLoader(
|
||||
@ -209,8 +222,8 @@ def main():
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
default="no",
|
||||
choices=["no", "fp16", "bf16"],
|
||||
default=None,
|
||||
choices=["no", "fp16", "bf16", "fp8"],
|
||||
help="Whether to use mixed precision. Choose"
|
||||
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
||||
"and an Nvidia Ampere GPU.",
|
||||
|
||||
@ -15,15 +15,15 @@
|
||||
import argparse
|
||||
import os
|
||||
|
||||
import evaluate
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
import evaluate
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from datasets import load_dataset
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
from accelerate import Accelerator, DistributedType
|
||||
|
||||
|
||||
########################################################################
|
||||
# This is a fully working simple example to use Accelerate,
|
||||
@ -86,9 +86,22 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
if accelerator.distributed_type == DistributedType.TPU:
|
||||
return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
|
||||
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
elif accelerator.mixed_precision != "no":
|
||||
pad_to_multiple_of = 8
|
||||
else:
|
||||
pad_to_multiple_of = None
|
||||
|
||||
return tokenizer.pad(
|
||||
examples,
|
||||
padding="longest",
|
||||
max_length=max_length,
|
||||
pad_to_multiple_of=pad_to_multiple_of,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
# Instantiate dataloaders.
|
||||
train_dataloader = DataLoader(
|
||||
@ -120,7 +133,7 @@ def training_function(config, args):
|
||||
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
|
||||
if args.with_tracking:
|
||||
accelerator = Accelerator(
|
||||
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", logging_dir=args.logging_dir
|
||||
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", project_dir=args.project_dir
|
||||
)
|
||||
else:
|
||||
accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)
|
||||
@ -236,8 +249,8 @@ def main():
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
default="no",
|
||||
choices=["no", "fp16", "bf16"],
|
||||
default=None,
|
||||
choices=["no", "fp16", "bf16", "fp8"],
|
||||
help="Whether to use mixed precision. Choose"
|
||||
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
||||
"and an Nvidia Ampere GPU.",
|
||||
@ -249,10 +262,10 @@ def main():
|
||||
help="Whether to load in all available experiment trackers from the environment and use them for logging.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--logging_dir",
|
||||
"--project_dir",
|
||||
type=str,
|
||||
default="logs",
|
||||
help="Location on where to store experiment tracking logs`",
|
||||
help="Location on where to store experiment tracking logs` and relevent project information",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
|
||||
|
||||
@ -17,15 +17,15 @@ import os
|
||||
import re
|
||||
|
||||
import numpy as np
|
||||
import PIL
|
||||
import torch
|
||||
from timm import create_model
|
||||
from torch.optim.lr_scheduler import OneCycleLR
|
||||
from torch.utils.data import DataLoader, Dataset
|
||||
|
||||
import PIL
|
||||
from accelerate import Accelerator
|
||||
from timm import create_model
|
||||
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
|
||||
|
||||
from accelerate import Accelerator
|
||||
|
||||
|
||||
########################################################################
|
||||
# This is a fully working simple example to use Accelerate
|
||||
@ -75,7 +75,7 @@ def training_function(config, args):
|
||||
# Initialize accelerator
|
||||
if args.with_tracking:
|
||||
accelerator = Accelerator(
|
||||
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", logging_dir=args.logging_dir
|
||||
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", project_dir=args.project_dir
|
||||
)
|
||||
else:
|
||||
accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)
|
||||
@ -173,7 +173,7 @@ def training_function(config, args):
|
||||
)
|
||||
# We need to keep track of how many total steps we have iterated over
|
||||
overall_step = 0
|
||||
# We also need to keep track of the stating epoch so files are named properly
|
||||
# We also need to keep track of the starting epoch so files are named properly
|
||||
starting_epoch = 0
|
||||
|
||||
# Potentially load in the weights and states from a previous save
|
||||
@ -203,12 +203,14 @@ def training_function(config, args):
|
||||
model.train()
|
||||
if args.with_tracking:
|
||||
total_loss = 0
|
||||
for step, batch in enumerate(train_dataloader):
|
||||
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
|
||||
# We need to skip steps until we reach the resumed step
|
||||
if args.resume_from_checkpoint and epoch == starting_epoch:
|
||||
if resume_step is not None and step < resume_step:
|
||||
overall_step += 1
|
||||
continue
|
||||
active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step)
|
||||
overall_step += resume_step
|
||||
else:
|
||||
# After the first iteration though, we need to go back to the original dataloader
|
||||
active_dataloader = train_dataloader
|
||||
for batch in active_dataloader:
|
||||
# We could avoid this line since we set the accelerator with `device_placement=True`.
|
||||
batch = {k: v.to(accelerator.device) for k, v in batch.items()}
|
||||
inputs = (batch["image"] - mean) / std
|
||||
@ -272,8 +274,8 @@ def main():
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
default="no",
|
||||
choices=["no", "fp16", "bf16"],
|
||||
default=None,
|
||||
choices=["no", "fp16", "bf16", "fp8"],
|
||||
help="Whether to use mixed precision. Choose"
|
||||
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
||||
"and an Nvidia Ampere GPU.",
|
||||
@ -303,10 +305,10 @@ def main():
|
||||
help="Whether to load in all available experiment trackers from the environment and use them for logging.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--logging_dir",
|
||||
"--project_dir",
|
||||
type=str,
|
||||
default="logs",
|
||||
help="Location on where to store experiment tracking logs`",
|
||||
help="Location on where to store experiment tracking logs` and relevent project information",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
config = {"lr": 3e-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
|
||||
|
||||
@ -15,15 +15,15 @@
|
||||
import argparse
|
||||
import os
|
||||
|
||||
import evaluate
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
import evaluate
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from datasets import load_dataset
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
from accelerate import Accelerator, DistributedType
|
||||
|
||||
|
||||
########################################################################
|
||||
# This is a fully working simple example to use Accelerate
|
||||
@ -52,7 +52,7 @@ def training_function(config, args):
|
||||
# Initialize accelerator
|
||||
if args.with_tracking:
|
||||
accelerator = Accelerator(
|
||||
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", logging_dir=args.logging_dir
|
||||
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", project_dir=args.project_dir
|
||||
)
|
||||
else:
|
||||
accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)
|
||||
@ -109,9 +109,22 @@ def training_function(config, args):
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
if accelerator.distributed_type == DistributedType.TPU:
|
||||
return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
|
||||
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
elif accelerator.mixed_precision != "no":
|
||||
pad_to_multiple_of = 8
|
||||
else:
|
||||
pad_to_multiple_of = None
|
||||
|
||||
return tokenizer.pad(
|
||||
examples,
|
||||
padding="longest",
|
||||
max_length=max_length,
|
||||
pad_to_multiple_of=pad_to_multiple_of,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
# Instantiate dataloaders.
|
||||
train_dataloader = DataLoader(
|
||||
@ -180,12 +193,14 @@ def training_function(config, args):
|
||||
model.train()
|
||||
if args.with_tracking:
|
||||
total_loss = 0
|
||||
for step, batch in enumerate(train_dataloader):
|
||||
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
|
||||
# We need to skip steps until we reach the resumed step
|
||||
if args.resume_from_checkpoint and epoch == starting_epoch:
|
||||
if resume_step is not None and step < resume_step:
|
||||
overall_step += 1
|
||||
continue
|
||||
active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step)
|
||||
overall_step += resume_step
|
||||
else:
|
||||
# After the first iteration though, we need to go back to the original dataloader
|
||||
active_dataloader = train_dataloader
|
||||
for step, batch in enumerate(active_dataloader):
|
||||
# We could avoid this line since we set the accelerator with `device_placement=True`.
|
||||
batch.to(accelerator.device)
|
||||
outputs = model(**batch)
|
||||
@ -251,8 +266,8 @@ def main():
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
default="no",
|
||||
choices=["no", "fp16", "bf16"],
|
||||
default=None,
|
||||
choices=["no", "fp16", "bf16", "fp8"],
|
||||
help="Whether to use mixed precision. Choose"
|
||||
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
||||
"and an Nvidia Ampere GPU.",
|
||||
@ -282,10 +297,10 @@ def main():
|
||||
help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--logging_dir",
|
||||
"--project_dir",
|
||||
type=str,
|
||||
default="logs",
|
||||
help="Location on where to store experiment tracking logs`",
|
||||
help="Location on where to store experiment tracking logs` and relevent project information",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
|
||||
|
||||
@ -17,15 +17,15 @@ import os
|
||||
import re
|
||||
|
||||
import numpy as np
|
||||
import PIL
|
||||
import torch
|
||||
from timm import create_model
|
||||
from torch.optim.lr_scheduler import OneCycleLR
|
||||
from torch.utils.data import DataLoader, Dataset
|
||||
|
||||
import PIL
|
||||
from accelerate import Accelerator
|
||||
from timm import create_model
|
||||
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
|
||||
|
||||
from accelerate import Accelerator
|
||||
|
||||
|
||||
########################################################################
|
||||
# This is a fully working simple example to use Accelerate
|
||||
@ -189,8 +189,8 @@ def main():
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
default="no",
|
||||
choices=["no", "fp16", "bf16"],
|
||||
default=None,
|
||||
choices=["no", "fp16", "bf16", "fp8"],
|
||||
help="Whether to use mixed precision. Choose"
|
||||
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
||||
"and an Nvidia Ampere GPU.",
|
||||
|
||||
55
examples/multigpu_remote_launcher.py
Normal file
55
examples/multigpu_remote_launcher.py
Normal file
@ -0,0 +1,55 @@
|
||||
import argparse
|
||||
|
||||
import runhouse as rh
|
||||
import torch
|
||||
from nlp_example import training_function
|
||||
|
||||
from accelerate.utils import PrepareForLaunch, patch_environment
|
||||
|
||||
|
||||
def launch_train(*args):
|
||||
num_processes = torch.cuda.device_count()
|
||||
print(f"Device count: {num_processes}")
|
||||
with patch_environment(
|
||||
world_size=num_processes, master_addr="127.0.0.1", master_port="29500", mixed_precision=args[1].mixed_precision
|
||||
):
|
||||
launcher = PrepareForLaunch(training_function, distributed_type="MULTI_GPU")
|
||||
torch.multiprocessing.start_processes(launcher, args=args, nprocs=num_processes, start_method="spawn")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/main/rh_primitives/cluster.html#hardware-setup
|
||||
# for cloud access setup instructions (if using on-demand hardware), and for API specifications.
|
||||
|
||||
# on-demand GPU
|
||||
# gpu = rh.cluster(name='rh-cluster', instance_type='V100:1', provider='cheapest', use_spot=False) # single GPU
|
||||
gpu = rh.cluster(name="rh-cluster", instance_type="V100:4", provider="cheapest", use_spot=False) # multi GPU
|
||||
gpu.up_if_not()
|
||||
|
||||
# on-prem GPU
|
||||
# gpu = rh.cluster(
|
||||
# ips=["ip_addr"], ssh_creds={ssh_user:"<username>", ssh_private_key:"<key_path>"}, name="rh-cluster"
|
||||
# )
|
||||
|
||||
# Set up remote function
|
||||
reqs = [
|
||||
"pip:./",
|
||||
"transformers",
|
||||
"datasets",
|
||||
"evaluate",
|
||||
"tqdm",
|
||||
"scipy",
|
||||
"scikit-learn",
|
||||
"tensorboard",
|
||||
"torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117",
|
||||
]
|
||||
launch_train_gpu = rh.function(fn=launch_train, system=gpu, reqs=reqs, name="train_bert_glue")
|
||||
|
||||
# Define train args/config, run train function
|
||||
train_args = argparse.Namespace(cpu=False, mixed_precision="fp16")
|
||||
config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
|
||||
launch_train_gpu(config, train_args, stream_logs=True)
|
||||
|
||||
# Alternatively, we can just run as instructed in the README (but only because there's already a wrapper CLI):
|
||||
# gpu.install_packages(reqs)
|
||||
# gpu.run(['accelerate launch --multi_gpu accelerate/examples/nlp_example.py'])
|
||||
@ -14,15 +14,15 @@
|
||||
# limitations under the License.
|
||||
import argparse
|
||||
|
||||
import evaluate
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from torch.optim import AdamW
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
import evaluate
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from datasets import load_dataset
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
||||
|
||||
from accelerate import Accelerator, DistributedType
|
||||
|
||||
|
||||
########################################################################
|
||||
# This is a fully working simple example to use Accelerate
|
||||
@ -79,16 +79,33 @@ def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
|
||||
|
||||
def collate_fn(examples):
|
||||
# On TPU it's best to pad everything to the same length or training will be very slow.
|
||||
if accelerator.distributed_type == DistributedType.TPU:
|
||||
return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
|
||||
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
|
||||
max_length = 128 if accelerator.distributed_type == DistributedType.TPU else None
|
||||
# When using mixed precision we want round multiples of 8/16
|
||||
if accelerator.mixed_precision == "fp8":
|
||||
pad_to_multiple_of = 16
|
||||
elif accelerator.mixed_precision != "no":
|
||||
pad_to_multiple_of = 8
|
||||
else:
|
||||
pad_to_multiple_of = None
|
||||
|
||||
return tokenizer.pad(
|
||||
examples,
|
||||
padding="longest",
|
||||
max_length=max_length,
|
||||
pad_to_multiple_of=pad_to_multiple_of,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
# Instantiate dataloaders.
|
||||
train_dataloader = DataLoader(
|
||||
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
|
||||
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size, drop_last=True
|
||||
)
|
||||
eval_dataloader = DataLoader(
|
||||
tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
|
||||
tokenized_datasets["validation"],
|
||||
shuffle=False,
|
||||
collate_fn=collate_fn,
|
||||
batch_size=EVAL_BATCH_SIZE,
|
||||
drop_last=(accelerator.mixed_precision == "fp8"),
|
||||
)
|
||||
|
||||
return train_dataloader, eval_dataloader
|
||||
@ -120,7 +137,6 @@ def training_function(config, args):
|
||||
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
|
||||
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
|
||||
model = model.to(accelerator.device)
|
||||
|
||||
# Instantiate optimizer
|
||||
optimizer = AdamW(params=model.parameters(), lr=lr)
|
||||
|
||||
@ -134,6 +150,7 @@ def training_function(config, args):
|
||||
# Prepare everything
|
||||
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
|
||||
# prepare method.
|
||||
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
||||
)
|
||||
@ -176,8 +193,8 @@ def main():
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
default="no",
|
||||
choices=["no", "fp16", "bf16"],
|
||||
default=None,
|
||||
choices=["no", "fp16", "bf16", "fp8"],
|
||||
help="Whether to use mixed precision. Choose"
|
||||
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
||||
"and an Nvidia Ampere GPU.",
|
||||
|
||||
27
examples/slurm/submit_multigpu.sh
Normal file
27
examples/slurm/submit_multigpu.sh
Normal file
@ -0,0 +1,27 @@
|
||||
#!/bin/bash
|
||||
|
||||
#SBATCH --job-name=multigpu
|
||||
#SBATCH -D .
|
||||
#SBATCH --output=O-%x.%j
|
||||
#SBATCH --error=E-%x.%j
|
||||
#SBATCH --nodes=1
|
||||
#SBATCH --ntasks-per-node=1 # number of MP tasks
|
||||
#SBATCH --gres=gpu:4 # number of GPUs per node
|
||||
#SBATCH --cpus-per-task=160 # number of cores per tasks
|
||||
#SBATCH --time=01:59:00 # maximum execution time (HH:MM:SS)
|
||||
|
||||
######################
|
||||
### Set enviroment ###
|
||||
######################
|
||||
source activateEnviroment.sh
|
||||
export GPUS_PER_NODE=4
|
||||
######################
|
||||
|
||||
export SCRIPT=/accelerate/examples/complete_nlp_example.py
|
||||
export SCRIPT_ARGS=" \
|
||||
--mixed_precision fp16 \
|
||||
--output_dir /accelerate/examples/output \
|
||||
--with_tracking \
|
||||
"
|
||||
|
||||
accelerate launch --num_processes $GPUS_PER_NODE $SCRIPT $SCRIPT_ARGS
|
||||
41
examples/slurm/submit_multinode.sh
Normal file
41
examples/slurm/submit_multinode.sh
Normal file
@ -0,0 +1,41 @@
|
||||
#!/bin/bash
|
||||
|
||||
#SBATCH --job-name=multinode
|
||||
#SBATCH -D .
|
||||
#SBATCH --output=O-%x.%j
|
||||
#SBATCH --error=E-%x.%j
|
||||
#SBATCH --nodes=4 # number of nodes
|
||||
#SBATCH --ntasks-per-node=1 # number of MP tasks
|
||||
#SBATCH --gres=gpu:4 # number of GPUs per node
|
||||
#SBATCH --cpus-per-task=160 # number of cores per tasks
|
||||
#SBATCH --time=01:59:00 # maximum execution time (HH:MM:SS)
|
||||
|
||||
######################
|
||||
### Set enviroment ###
|
||||
######################
|
||||
source activateEnviroment.sh
|
||||
export GPUS_PER_NODE=4
|
||||
######################
|
||||
|
||||
######################
|
||||
#### Set network #####
|
||||
######################
|
||||
head_node_ip=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
|
||||
######################
|
||||
|
||||
export LAUNCHER="accelerate launch \
|
||||
--num_processes $((SLURM_NNODES * GPUS_PER_NODE)) \
|
||||
--num_machines $SLURM_NNODES \
|
||||
--rdzv_backend c10d \
|
||||
--main_process_ip $head_node_ip \
|
||||
--main_process_port 29500 \
|
||||
"
|
||||
export SCRIPT="/accelerate/examples/complete_nlp_example.py"
|
||||
export SCRIPT_ARGS=" \
|
||||
--mixed_precision fp16 \
|
||||
--output_dir /accelerate/examples/output \
|
||||
"
|
||||
|
||||
# This step is necessary because accelerate launch does not handle multiline arguments properly
|
||||
export CMD="$LAUNCHER $PYTHON_FILE $ARGS"
|
||||
srun $CMD
|
||||
@ -1,3 +1,17 @@
|
||||
[tool.black]
|
||||
line-length = 119
|
||||
target-version = ['py36']
|
||||
target-version = ['py37']
|
||||
|
||||
[tool.ruff]
|
||||
# Never enforce `E501` (line length violations).
|
||||
ignore = ["E501", "E741", "W605"]
|
||||
select = ["E", "F", "I", "W"]
|
||||
line-length = 119
|
||||
|
||||
# Ignore import violations in all `__init__.py` files.
|
||||
[tool.ruff.per-file-ignores]
|
||||
"__init__.py" = ["E402", "F401", "F403", "F811"]
|
||||
|
||||
[tool.ruff.isort]
|
||||
lines-after-imports = 2
|
||||
known-first-party = ["accelerate"]
|
||||
|
||||
@ -4,11 +4,6 @@ ensure_newline_before_comments = True
|
||||
force_grid_wrap = 0
|
||||
include_trailing_comma = True
|
||||
known_first_party = accelerate
|
||||
known_third_party =
|
||||
numpy
|
||||
torch
|
||||
torch_xla
|
||||
|
||||
line_length = 119
|
||||
lines_after_imports = 2
|
||||
multi_line_output = 3
|
||||
|
||||
49
setup.py
49
setup.py
@ -16,14 +16,16 @@ from setuptools import setup
|
||||
from setuptools import find_packages
|
||||
|
||||
extras = {}
|
||||
extras["quality"] = ["black ~= 22.0", "isort >= 5.5.4", "flake8 >= 3.8.3", "hf-doc-builder >= 0.3.0"]
|
||||
extras["quality"] = ["black ~= 23.1", "ruff >= 0.0.241", "hf-doc-builder >= 0.3.0", "urllib3 < 2.0.0"]
|
||||
extras["docs"] = []
|
||||
extras["test_prod"] = ["pytest", "pytest-xdist", "pytest-subtests", "parameterized"]
|
||||
extras["test_dev"] = ["datasets", "evaluate", "transformers", "scipy", "scikit-learn", "deepspeed<0.7.0", "tqdm"]
|
||||
extras["test_dev"] = [
|
||||
"datasets", "evaluate", "transformers", "scipy", "scikit-learn", "deepspeed", "tqdm", "bitsandbytes", "timm"
|
||||
]
|
||||
extras["testing"] = extras["test_prod"] + extras["test_dev"]
|
||||
extras["rich"] = ["rich"]
|
||||
|
||||
extras["test_trackers"] = ["wandb", "comet-ml", "tensorboard"]
|
||||
extras["test_trackers"] = ["wandb", "comet-ml", "tensorboard", "dvclive"]
|
||||
extras["dev"] = extras["quality"] + extras["testing"] + extras["rich"]
|
||||
|
||||
extras["sagemaker"] = [
|
||||
@ -32,7 +34,7 @@ extras["sagemaker"] = [
|
||||
|
||||
setup(
|
||||
name="accelerate",
|
||||
version="0.15.0.dev0",
|
||||
version="0.27.0.dev0",
|
||||
description="Accelerate",
|
||||
long_description=open("README.md", "r", encoding="utf-8").read(),
|
||||
long_description_content_type="text/markdown",
|
||||
@ -47,11 +49,12 @@ setup(
|
||||
"console_scripts": [
|
||||
"accelerate=accelerate.commands.accelerate_cli:main",
|
||||
"accelerate-config=accelerate.commands.config:main",
|
||||
"accelerate-estimate-memory=accelerate.commands.estimate:main",
|
||||
"accelerate-launch=accelerate.commands.launch:main",
|
||||
]
|
||||
},
|
||||
python_requires=">=3.7.0",
|
||||
install_requires=["numpy>=1.17", "packaging>=20.0", "psutil", "pyyaml", "torch>=1.4.0"],
|
||||
python_requires=">=3.8.0",
|
||||
install_requires=["numpy>=1.17", "packaging>=20.0", "psutil", "pyyaml", "torch>=1.10.0", "huggingface_hub", "safetensors>=0.3.1"],
|
||||
extras_require=extras,
|
||||
classifiers=[
|
||||
"Development Status :: 5 - Production/Stable",
|
||||
@ -61,27 +64,35 @@ setup(
|
||||
"License :: OSI Approved :: Apache Software License",
|
||||
"Operating System :: OS Independent",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.7",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
||||
],
|
||||
)
|
||||
|
||||
# Release checklist
|
||||
# 1. Change the version in __init__.py and setup.py.
|
||||
# 2. Commit these changes with the message: "Release: VERSION"
|
||||
# 3. Add a tag in git to mark the release: "git tag VERSION -m 'Adds tag VERSION for pypi' "
|
||||
# Push the tag to git: git push --tags origin main
|
||||
# 4. Run the following commands in the top-level directory:
|
||||
# 1. Checkout the release branch (for a patch the current release branch, for a new minor version, create one):
|
||||
# git checkout -b vXX.xx-release
|
||||
# The -b is only necessary for creation (so remove it when doing a patch)
|
||||
# 2. Change the version in __init__.py and setup.py to the proper value.
|
||||
# 3. Commit these changes with the message: "Release: v<VERSION>"
|
||||
# 4. Add a tag in git to mark the release:
|
||||
# git tag v<VERSION> -m 'Adds tag v<VERSION> for pypi'
|
||||
# Push the tag and release commit to git: git push --tags origin vXX.xx-release
|
||||
# 5. Run the following commands in the top-level directory:
|
||||
# rm -rf dist
|
||||
# rm -rf build
|
||||
# python setup.py bdist_wheel
|
||||
# python setup.py sdist
|
||||
# 5. Upload the package to the pypi test server first:
|
||||
# twine upload dist/* -r pypitest
|
||||
# twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/
|
||||
# 6. Check that you can install it in a virtualenv by running:
|
||||
# 6. Upload the package to the pypi test server first:
|
||||
# twine upload dist/* -r testpypi
|
||||
# 7. Check that you can install it in a virtualenv by running:
|
||||
# pip install accelerate
|
||||
# pip uninstall accelerate
|
||||
# pip install -i https://testpypi.python.org/pypi accelerate
|
||||
# accelerate env
|
||||
# accelerate test
|
||||
# 7. Upload the final version to actual pypi:
|
||||
# 8. Upload the final version to actual pypi:
|
||||
# twine upload dist/* -r pypi
|
||||
# 8. Add release notes to the tag in github once everything is looking hunky-dory.
|
||||
# 9. Update the version in __init__.py, setup.py to the new version "-dev" and push to master
|
||||
# 9. Add release notes to the tag in github once everything is looking hunky-dory.
|
||||
# 10. Go back to the main branch and update the version in __init__.py, setup.py to the new version ".dev" and push to
|
||||
# main.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user